LLVM  7.0.0svn
TargetLoweringBase.cpp
Go to the documentation of this file.
1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the TargetLoweringBase class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/BitVector.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringExtras.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/ADT/Twine.h"
21 #include "llvm/CodeGen/Analysis.h"
32 #include "llvm/CodeGen/StackMaps.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/CallingConv.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/GlobalValue.h"
43 #include "llvm/IR/GlobalVariable.h"
44 #include "llvm/IR/IRBuilder.h"
45 #include "llvm/IR/Module.h"
46 #include "llvm/IR/Type.h"
48 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/Compiler.h"
55 #include <algorithm>
56 #include <cassert>
57 #include <cstddef>
58 #include <cstdint>
59 #include <cstring>
60 #include <iterator>
61 #include <string>
62 #include <tuple>
63 #include <utility>
64 
65 using namespace llvm;
66 
68  "jump-is-expensive", cl::init(false),
69  cl::desc("Do not create extra branches to split comparison logic."),
70  cl::Hidden);
71 
73  ("min-jump-table-entries", cl::init(4), cl::Hidden,
74  cl::desc("Set minimum number of entries to use a jump table."));
75 
77  ("max-jump-table-size", cl::init(0), cl::Hidden,
78  cl::desc("Set maximum size of jump tables; zero for no limit."));
79 
80 /// Minimum jump table density for normal functions.
81 static cl::opt<unsigned>
82  JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
83  cl::desc("Minimum density for building a jump table in "
84  "a normal function"));
85 
86 /// Minimum jump table density for -Os or -Oz functions.
88  "optsize-jump-table-density", cl::init(40), cl::Hidden,
89  cl::desc("Minimum density for building a jump table in "
90  "an optsize function"));
91 
92 static bool darwinHasSinCos(const Triple &TT) {
93  assert(TT.isOSDarwin() && "should be called with darwin triple");
94  // Don't bother with 32 bit x86.
95  if (TT.getArch() == Triple::x86)
96  return false;
97  // Macos < 10.9 has no sincos_stret.
98  if (TT.isMacOSX())
99  return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit();
100  // iOS < 7.0 has no sincos_stret.
101  if (TT.isiOS())
102  return !TT.isOSVersionLT(7, 0);
103  // Any other darwin such as WatchOS/TvOS is new enough.
104  return true;
105 }
106 
107 // Although this default value is arbitrary, it is not random. It is assumed
108 // that a condition that evaluates the same way by a higher percentage than this
109 // is best represented as control flow. Therefore, the default value N should be
110 // set such that the win from N% correct executions is greater than the loss
111 // from (100 - N)% mispredicted executions for the majority of intended targets.
113  "min-predictable-branch", cl::init(99),
114  cl::desc("Minimum percentage (0-100) that a condition must be either true "
115  "or false to assume that the condition is predictable"),
116  cl::Hidden);
117 
118 void TargetLoweringBase::InitLibcalls(const Triple &TT) {
119 #define HANDLE_LIBCALL(code, name) \
120  setLibcallName(RTLIB::code, name);
121 #include "llvm/CodeGen/RuntimeLibcalls.def"
122 #undef HANDLE_LIBCALL
123  // Initialize calling conventions to their default.
124  for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
126 
127  // A few names are different on particular architectures or environments.
128  if (TT.isOSDarwin()) {
129  // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
130  // of the gnueabi-style __gnu_*_ieee.
131  // FIXME: What about other targets?
132  setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
133  setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
134 
135  // Some darwins have an optimized __bzero/bzero function.
136  switch (TT.getArch()) {
137  case Triple::x86:
138  case Triple::x86_64:
139  if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
140  setLibcallName(RTLIB::BZERO, "__bzero");
141  break;
142  case Triple::aarch64:
143  setLibcallName(RTLIB::BZERO, "bzero");
144  break;
145  default:
146  break;
147  }
148 
149  if (darwinHasSinCos(TT)) {
150  setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret");
151  setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret");
152  if (TT.isWatchABI()) {
153  setLibcallCallingConv(RTLIB::SINCOS_STRET_F32,
155  setLibcallCallingConv(RTLIB::SINCOS_STRET_F64,
157  }
158  }
159  } else {
160  setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
161  setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
162  }
163 
164  if (TT.isGNUEnvironment() || TT.isOSFuchsia()) {
165  setLibcallName(RTLIB::SINCOS_F32, "sincosf");
166  setLibcallName(RTLIB::SINCOS_F64, "sincos");
167  setLibcallName(RTLIB::SINCOS_F80, "sincosl");
168  setLibcallName(RTLIB::SINCOS_F128, "sincosl");
169  setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl");
170  }
171 
172  if (TT.isOSOpenBSD()) {
173  setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr);
174  }
175 }
176 
177 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
178 /// UNKNOWN_LIBCALL if there is none.
180  if (OpVT == MVT::f16) {
181  if (RetVT == MVT::f32)
182  return FPEXT_F16_F32;
183  } else if (OpVT == MVT::f32) {
184  if (RetVT == MVT::f64)
185  return FPEXT_F32_F64;
186  if (RetVT == MVT::f128)
187  return FPEXT_F32_F128;
188  if (RetVT == MVT::ppcf128)
189  return FPEXT_F32_PPCF128;
190  } else if (OpVT == MVT::f64) {
191  if (RetVT == MVT::f128)
192  return FPEXT_F64_F128;
193  else if (RetVT == MVT::ppcf128)
194  return FPEXT_F64_PPCF128;
195  } else if (OpVT == MVT::f80) {
196  if (RetVT == MVT::f128)
197  return FPEXT_F80_F128;
198  }
199 
200  return UNKNOWN_LIBCALL;
201 }
202 
203 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
204 /// UNKNOWN_LIBCALL if there is none.
206  if (RetVT == MVT::f16) {
207  if (OpVT == MVT::f32)
208  return FPROUND_F32_F16;
209  if (OpVT == MVT::f64)
210  return FPROUND_F64_F16;
211  if (OpVT == MVT::f80)
212  return FPROUND_F80_F16;
213  if (OpVT == MVT::f128)
214  return FPROUND_F128_F16;
215  if (OpVT == MVT::ppcf128)
216  return FPROUND_PPCF128_F16;
217  } else if (RetVT == MVT::f32) {
218  if (OpVT == MVT::f64)
219  return FPROUND_F64_F32;
220  if (OpVT == MVT::f80)
221  return FPROUND_F80_F32;
222  if (OpVT == MVT::f128)
223  return FPROUND_F128_F32;
224  if (OpVT == MVT::ppcf128)
225  return FPROUND_PPCF128_F32;
226  } else if (RetVT == MVT::f64) {
227  if (OpVT == MVT::f80)
228  return FPROUND_F80_F64;
229  if (OpVT == MVT::f128)
230  return FPROUND_F128_F64;
231  if (OpVT == MVT::ppcf128)
232  return FPROUND_PPCF128_F64;
233  } else if (RetVT == MVT::f80) {
234  if (OpVT == MVT::f128)
235  return FPROUND_F128_F80;
236  }
237 
238  return UNKNOWN_LIBCALL;
239 }
240 
241 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
242 /// UNKNOWN_LIBCALL if there is none.
244  if (OpVT == MVT::f32) {
245  if (RetVT == MVT::i32)
246  return FPTOSINT_F32_I32;
247  if (RetVT == MVT::i64)
248  return FPTOSINT_F32_I64;
249  if (RetVT == MVT::i128)
250  return FPTOSINT_F32_I128;
251  } else if (OpVT == MVT::f64) {
252  if (RetVT == MVT::i32)
253  return FPTOSINT_F64_I32;
254  if (RetVT == MVT::i64)
255  return FPTOSINT_F64_I64;
256  if (RetVT == MVT::i128)
257  return FPTOSINT_F64_I128;
258  } else if (OpVT == MVT::f80) {
259  if (RetVT == MVT::i32)
260  return FPTOSINT_F80_I32;
261  if (RetVT == MVT::i64)
262  return FPTOSINT_F80_I64;
263  if (RetVT == MVT::i128)
264  return FPTOSINT_F80_I128;
265  } else if (OpVT == MVT::f128) {
266  if (RetVT == MVT::i32)
267  return FPTOSINT_F128_I32;
268  if (RetVT == MVT::i64)
269  return FPTOSINT_F128_I64;
270  if (RetVT == MVT::i128)
271  return FPTOSINT_F128_I128;
272  } else if (OpVT == MVT::ppcf128) {
273  if (RetVT == MVT::i32)
274  return FPTOSINT_PPCF128_I32;
275  if (RetVT == MVT::i64)
276  return FPTOSINT_PPCF128_I64;
277  if (RetVT == MVT::i128)
278  return FPTOSINT_PPCF128_I128;
279  }
280  return UNKNOWN_LIBCALL;
281 }
282 
283 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
284 /// UNKNOWN_LIBCALL if there is none.
286  if (OpVT == MVT::f32) {
287  if (RetVT == MVT::i32)
288  return FPTOUINT_F32_I32;
289  if (RetVT == MVT::i64)
290  return FPTOUINT_F32_I64;
291  if (RetVT == MVT::i128)
292  return FPTOUINT_F32_I128;
293  } else if (OpVT == MVT::f64) {
294  if (RetVT == MVT::i32)
295  return FPTOUINT_F64_I32;
296  if (RetVT == MVT::i64)
297  return FPTOUINT_F64_I64;
298  if (RetVT == MVT::i128)
299  return FPTOUINT_F64_I128;
300  } else if (OpVT == MVT::f80) {
301  if (RetVT == MVT::i32)
302  return FPTOUINT_F80_I32;
303  if (RetVT == MVT::i64)
304  return FPTOUINT_F80_I64;
305  if (RetVT == MVT::i128)
306  return FPTOUINT_F80_I128;
307  } else if (OpVT == MVT::f128) {
308  if (RetVT == MVT::i32)
309  return FPTOUINT_F128_I32;
310  if (RetVT == MVT::i64)
311  return FPTOUINT_F128_I64;
312  if (RetVT == MVT::i128)
313  return FPTOUINT_F128_I128;
314  } else if (OpVT == MVT::ppcf128) {
315  if (RetVT == MVT::i32)
316  return FPTOUINT_PPCF128_I32;
317  if (RetVT == MVT::i64)
318  return FPTOUINT_PPCF128_I64;
319  if (RetVT == MVT::i128)
320  return FPTOUINT_PPCF128_I128;
321  }
322  return UNKNOWN_LIBCALL;
323 }
324 
325 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
326 /// UNKNOWN_LIBCALL if there is none.
328  if (OpVT == MVT::i32) {
329  if (RetVT == MVT::f32)
330  return SINTTOFP_I32_F32;
331  if (RetVT == MVT::f64)
332  return SINTTOFP_I32_F64;
333  if (RetVT == MVT::f80)
334  return SINTTOFP_I32_F80;
335  if (RetVT == MVT::f128)
336  return SINTTOFP_I32_F128;
337  if (RetVT == MVT::ppcf128)
338  return SINTTOFP_I32_PPCF128;
339  } else if (OpVT == MVT::i64) {
340  if (RetVT == MVT::f32)
341  return SINTTOFP_I64_F32;
342  if (RetVT == MVT::f64)
343  return SINTTOFP_I64_F64;
344  if (RetVT == MVT::f80)
345  return SINTTOFP_I64_F80;
346  if (RetVT == MVT::f128)
347  return SINTTOFP_I64_F128;
348  if (RetVT == MVT::ppcf128)
349  return SINTTOFP_I64_PPCF128;
350  } else if (OpVT == MVT::i128) {
351  if (RetVT == MVT::f32)
352  return SINTTOFP_I128_F32;
353  if (RetVT == MVT::f64)
354  return SINTTOFP_I128_F64;
355  if (RetVT == MVT::f80)
356  return SINTTOFP_I128_F80;
357  if (RetVT == MVT::f128)
358  return SINTTOFP_I128_F128;
359  if (RetVT == MVT::ppcf128)
360  return SINTTOFP_I128_PPCF128;
361  }
362  return UNKNOWN_LIBCALL;
363 }
364 
365 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
366 /// UNKNOWN_LIBCALL if there is none.
368  if (OpVT == MVT::i32) {
369  if (RetVT == MVT::f32)
370  return UINTTOFP_I32_F32;
371  if (RetVT == MVT::f64)
372  return UINTTOFP_I32_F64;
373  if (RetVT == MVT::f80)
374  return UINTTOFP_I32_F80;
375  if (RetVT == MVT::f128)
376  return UINTTOFP_I32_F128;
377  if (RetVT == MVT::ppcf128)
378  return UINTTOFP_I32_PPCF128;
379  } else if (OpVT == MVT::i64) {
380  if (RetVT == MVT::f32)
381  return UINTTOFP_I64_F32;
382  if (RetVT == MVT::f64)
383  return UINTTOFP_I64_F64;
384  if (RetVT == MVT::f80)
385  return UINTTOFP_I64_F80;
386  if (RetVT == MVT::f128)
387  return UINTTOFP_I64_F128;
388  if (RetVT == MVT::ppcf128)
389  return UINTTOFP_I64_PPCF128;
390  } else if (OpVT == MVT::i128) {
391  if (RetVT == MVT::f32)
392  return UINTTOFP_I128_F32;
393  if (RetVT == MVT::f64)
394  return UINTTOFP_I128_F64;
395  if (RetVT == MVT::f80)
396  return UINTTOFP_I128_F80;
397  if (RetVT == MVT::f128)
398  return UINTTOFP_I128_F128;
399  if (RetVT == MVT::ppcf128)
400  return UINTTOFP_I128_PPCF128;
401  }
402  return UNKNOWN_LIBCALL;
403 }
404 
405 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
406 #define OP_TO_LIBCALL(Name, Enum) \
407  case Name: \
408  switch (VT.SimpleTy) { \
409  default: \
410  return UNKNOWN_LIBCALL; \
411  case MVT::i8: \
412  return Enum##_1; \
413  case MVT::i16: \
414  return Enum##_2; \
415  case MVT::i32: \
416  return Enum##_4; \
417  case MVT::i64: \
418  return Enum##_8; \
419  case MVT::i128: \
420  return Enum##_16; \
421  }
422 
423  switch (Opc) {
424  OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
425  OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
426  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
427  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
428  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
429  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
430  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
431  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
432  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
433  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
434  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
435  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
436  }
437 
438 #undef OP_TO_LIBCALL
439 
440  return UNKNOWN_LIBCALL;
441 }
442 
444  switch (ElementSize) {
445  case 1:
446  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
447  case 2:
448  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
449  case 4:
450  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
451  case 8:
452  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
453  case 16:
454  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
455  default:
456  return UNKNOWN_LIBCALL;
457  }
458 }
459 
461  switch (ElementSize) {
462  case 1:
463  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
464  case 2:
465  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
466  case 4:
467  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
468  case 8:
469  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
470  case 16:
471  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
472  default:
473  return UNKNOWN_LIBCALL;
474  }
475 }
476 
478  switch (ElementSize) {
479  case 1:
480  return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
481  case 2:
482  return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
483  case 4:
484  return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
485  case 8:
486  return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
487  case 16:
488  return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
489  default:
490  return UNKNOWN_LIBCALL;
491  }
492 }
493 
494 /// InitCmpLibcallCCs - Set default comparison libcall CC.
495 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
496  memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
497  CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
498  CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
499  CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
500  CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
501  CCs[RTLIB::UNE_F32] = ISD::SETNE;
502  CCs[RTLIB::UNE_F64] = ISD::SETNE;
503  CCs[RTLIB::UNE_F128] = ISD::SETNE;
504  CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
505  CCs[RTLIB::OGE_F32] = ISD::SETGE;
506  CCs[RTLIB::OGE_F64] = ISD::SETGE;
507  CCs[RTLIB::OGE_F128] = ISD::SETGE;
508  CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
509  CCs[RTLIB::OLT_F32] = ISD::SETLT;
510  CCs[RTLIB::OLT_F64] = ISD::SETLT;
511  CCs[RTLIB::OLT_F128] = ISD::SETLT;
512  CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
513  CCs[RTLIB::OLE_F32] = ISD::SETLE;
514  CCs[RTLIB::OLE_F64] = ISD::SETLE;
515  CCs[RTLIB::OLE_F128] = ISD::SETLE;
516  CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
517  CCs[RTLIB::OGT_F32] = ISD::SETGT;
518  CCs[RTLIB::OGT_F64] = ISD::SETGT;
519  CCs[RTLIB::OGT_F128] = ISD::SETGT;
520  CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
521  CCs[RTLIB::UO_F32] = ISD::SETNE;
522  CCs[RTLIB::UO_F64] = ISD::SETNE;
523  CCs[RTLIB::UO_F128] = ISD::SETNE;
524  CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
525  CCs[RTLIB::O_F32] = ISD::SETEQ;
526  CCs[RTLIB::O_F64] = ISD::SETEQ;
527  CCs[RTLIB::O_F128] = ISD::SETEQ;
528  CCs[RTLIB::O_PPCF128] = ISD::SETEQ;
529 }
530 
531 /// NOTE: The TargetMachine owns TLOF.
533  initActions();
534 
535  // Perform these initializations only once.
537  MaxLoadsPerMemcmp = 8;
540  UseUnderscoreSetJmp = false;
541  UseUnderscoreLongJmp = false;
542  HasMultipleConditionRegisters = false;
543  HasExtractBitsInsn = false;
544  JumpIsExpensive = JumpIsExpensiveOverride;
546  EnableExtLdPromotion = false;
547  HasFloatingPointExceptions = true;
548  StackPointerRegisterToSaveRestore = 0;
549  BooleanContents = UndefinedBooleanContent;
550  BooleanFloatContents = UndefinedBooleanContent;
551  BooleanVectorContents = UndefinedBooleanContent;
552  SchedPreferenceInfo = Sched::ILP;
553  JumpBufSize = 0;
554  JumpBufAlignment = 0;
555  MinFunctionAlignment = 0;
556  PrefFunctionAlignment = 0;
557  PrefLoopAlignment = 0;
559  MinStackArgumentAlignment = 1;
560  // TODO: the default will be switched to 0 in the next commit, along
561  // with the Target-specific changes necessary.
562  MaxAtomicSizeInBitsSupported = 1024;
563 
564  MinCmpXchgSizeInBits = 0;
565  SupportsUnalignedAtomics = false;
566 
567  std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
568 
569  InitLibcalls(TM.getTargetTriple());
570  InitCmpLibcallCCs(CmpLibcallCCs);
571 }
572 
574  // All operations default to being supported.
575  memset(OpActions, 0, sizeof(OpActions));
576  memset(LoadExtActions, 0, sizeof(LoadExtActions));
577  memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
578  memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
579  memset(CondCodeActions, 0, sizeof(CondCodeActions));
580  std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
581  std::fill(std::begin(TargetDAGCombineArray),
582  std::end(TargetDAGCombineArray), 0);
583 
584  // Set default actions for various operations.
585  for (MVT VT : MVT::all_valuetypes()) {
586  // Default all indexed load / store to expand.
587  for (unsigned IM = (unsigned)ISD::PRE_INC;
588  IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
589  setIndexedLoadAction(IM, VT, Expand);
590  setIndexedStoreAction(IM, VT, Expand);
591  }
592 
593  // Most backends expect to see the node which just returns the value loaded.
595 
596  // These operations default to expand.
609 
610  // Overflow operations default to expand
617 
618  // ADDCARRY operations default to expand
622 
623  // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
626 
628 
629  // These library functions default to expand.
632 
633  // These operations default to expand for vector types.
634  if (VT.isVector()) {
639  }
640 
641  // For most targets @llvm.get.dynamic.area.offset just returns 0.
643  }
644 
645  // Most targets ignore the @llvm.prefetch intrinsic.
647 
648  // Most targets also ignore the @llvm.readcyclecounter intrinsic.
650 
651  // ConstantFP nodes default to expand. Targets can either change this to
652  // Legal, in which case all fp constants are legal, or use isFPImmLegal()
653  // to optimize expansions for certain constants.
659 
660  // These library functions default to expand.
661  for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
673  }
674 
675  // Default ISD::TRAP to expand (which turns it into abort).
677 
678  // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
679  // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
681 }
682 
684  EVT) const {
685  return MVT::getIntegerVT(8 * DL.getPointerSize(0));
686 }
687 
689  bool LegalTypes) const {
690  assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
691  if (LHSTy.isVector())
692  return LHSTy;
693  return LegalTypes ? getScalarShiftAmountTy(DL, LHSTy)
694  : getPointerTy(DL);
695 }
696 
697 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
698  assert(isTypeLegal(VT));
699  switch (Op) {
700  default:
701  return false;
702  case ISD::SDIV:
703  case ISD::UDIV:
704  case ISD::SREM:
705  case ISD::UREM:
706  return true;
707  }
708 }
709 
711  // If the command-line option was specified, ignore this request.
712  if (!JumpIsExpensiveOverride.getNumOccurrences())
713  JumpIsExpensive = isExpensive;
714 }
715 
717 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
718  // If this is a simple type, use the ComputeRegisterProp mechanism.
719  if (VT.isSimple()) {
720  MVT SVT = VT.getSimpleVT();
721  assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
722  MVT NVT = TransformToType[SVT.SimpleTy];
724 
725  assert((LA == TypeLegal || LA == TypeSoftenFloat ||
727  "Promote may not follow Expand or Promote");
728 
729  if (LA == TypeSplitVector)
730  return LegalizeKind(LA,
731  EVT::getVectorVT(Context, SVT.getVectorElementType(),
732  SVT.getVectorNumElements() / 2));
733  if (LA == TypeScalarizeVector)
734  return LegalizeKind(LA, SVT.getVectorElementType());
735  return LegalizeKind(LA, NVT);
736  }
737 
738  // Handle Extended Scalar Types.
739  if (!VT.isVector()) {
740  assert(VT.isInteger() && "Float types must be simple");
741  unsigned BitSize = VT.getSizeInBits();
742  // First promote to a power-of-two size, then expand if necessary.
743  if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
744  EVT NVT = VT.getRoundIntegerType(Context);
745  assert(NVT != VT && "Unable to round integer VT");
746  LegalizeKind NextStep = getTypeConversion(Context, NVT);
747  // Avoid multi-step promotion.
748  if (NextStep.first == TypePromoteInteger)
749  return NextStep;
750  // Return rounded integer type.
751  return LegalizeKind(TypePromoteInteger, NVT);
752  }
753 
755  EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
756  }
757 
758  // Handle vector types.
759  unsigned NumElts = VT.getVectorNumElements();
760  EVT EltVT = VT.getVectorElementType();
761 
762  // Vectors with only one element are always scalarized.
763  if (NumElts == 1)
764  return LegalizeKind(TypeScalarizeVector, EltVT);
765 
766  // Try to widen vector elements until the element type is a power of two and
767  // promote it to a legal type later on, for example:
768  // <3 x i8> -> <4 x i8> -> <4 x i32>
769  if (EltVT.isInteger()) {
770  // Vectors with a number of elements that is not a power of two are always
771  // widened, for example <3 x i8> -> <4 x i8>.
772  if (!VT.isPow2VectorType()) {
773  NumElts = (unsigned)NextPowerOf2(NumElts);
774  EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
775  return LegalizeKind(TypeWidenVector, NVT);
776  }
777 
778  // Examine the element type.
779  LegalizeKind LK = getTypeConversion(Context, EltVT);
780 
781  // If type is to be expanded, split the vector.
782  // <4 x i140> -> <2 x i140>
783  if (LK.first == TypeExpandInteger)
785  EVT::getVectorVT(Context, EltVT, NumElts / 2));
786 
787  // Promote the integer element types until a legal vector type is found
788  // or until the element integer type is too big. If a legal type was not
789  // found, fallback to the usual mechanism of widening/splitting the
790  // vector.
791  EVT OldEltVT = EltVT;
792  while (true) {
793  // Increase the bitwidth of the element to the next pow-of-two
794  // (which is greater than 8 bits).
795  EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
796  .getRoundIntegerType(Context);
797 
798  // Stop trying when getting a non-simple element type.
799  // Note that vector elements may be greater than legal vector element
800  // types. Example: X86 XMM registers hold 64bit element on 32bit
801  // systems.
802  if (!EltVT.isSimple())
803  break;
804 
805  // Build a new vector type and check if it is legal.
806  MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
807  // Found a legal promoted vector type.
808  if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
810  EVT::getVectorVT(Context, EltVT, NumElts));
811  }
812 
813  // Reset the type to the unexpanded type if we did not find a legal vector
814  // type with a promoted vector element type.
815  EltVT = OldEltVT;
816  }
817 
818  // Try to widen the vector until a legal type is found.
819  // If there is no wider legal type, split the vector.
820  while (true) {
821  // Round up to the next power of 2.
822  NumElts = (unsigned)NextPowerOf2(NumElts);
823 
824  // If there is no simple vector type with this many elements then there
825  // cannot be a larger legal vector type. Note that this assumes that
826  // there are no skipped intermediate vector types in the simple types.
827  if (!EltVT.isSimple())
828  break;
829  MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
830  if (LargerVector == MVT())
831  break;
832 
833  // If this type is legal then widen the vector.
834  if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
835  return LegalizeKind(TypeWidenVector, LargerVector);
836  }
837 
838  // Widen odd vectors to next power of two.
839  if (!VT.isPow2VectorType()) {
840  EVT NVT = VT.getPow2VectorType(Context);
841  return LegalizeKind(TypeWidenVector, NVT);
842  }
843 
844  // Vectors with illegal element types are expanded.
845  EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
846  return LegalizeKind(TypeSplitVector, NVT);
847 }
848 
849 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
850  unsigned &NumIntermediates,
851  MVT &RegisterVT,
852  TargetLoweringBase *TLI) {
853  // Figure out the right, legal destination reg to copy into.
854  unsigned NumElts = VT.getVectorNumElements();
855  MVT EltTy = VT.getVectorElementType();
856 
857  unsigned NumVectorRegs = 1;
858 
859  // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
860  // could break down into LHS/RHS like LegalizeDAG does.
861  if (!isPowerOf2_32(NumElts)) {
862  NumVectorRegs = NumElts;
863  NumElts = 1;
864  }
865 
866  // Divide the input until we get to a supported size. This will always
867  // end with a scalar if the target doesn't support vectors.
868  while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
869  NumElts >>= 1;
870  NumVectorRegs <<= 1;
871  }
872 
873  NumIntermediates = NumVectorRegs;
874 
875  MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
876  if (!TLI->isTypeLegal(NewVT))
877  NewVT = EltTy;
878  IntermediateVT = NewVT;
879 
880  unsigned NewVTSize = NewVT.getSizeInBits();
881 
882  // Convert sizes such as i33 to i64.
883  if (!isPowerOf2_32(NewVTSize))
884  NewVTSize = NextPowerOf2(NewVTSize);
885 
886  MVT DestVT = TLI->getRegisterType(NewVT);
887  RegisterVT = DestVT;
888  if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
889  return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
890 
891  // Otherwise, promotion or legal types use the same number of registers as
892  // the vector decimated to the appropriate level.
893  return NumVectorRegs;
894 }
895 
896 /// isLegalRC - Return true if the value types that can be represented by the
897 /// specified register class are all legal.
899  const TargetRegisterClass &RC) const {
900  for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
901  if (isTypeLegal(*I))
902  return true;
903  return false;
904 }
905 
906 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
907 /// sequence of memory operands that is recognized by PrologEpilogInserter.
910  MachineBasicBlock *MBB) const {
911  MachineInstr *MI = &InitialMI;
912  MachineFunction &MF = *MI->getMF();
913  MachineFrameInfo &MFI = MF.getFrameInfo();
914 
915  // We're handling multiple types of operands here:
916  // PATCHPOINT MetaArgs - live-in, read only, direct
917  // STATEPOINT Deopt Spill - live-through, read only, indirect
918  // STATEPOINT Deopt Alloca - live-through, read only, direct
919  // (We're currently conservative and mark the deopt slots read/write in
920  // practice.)
921  // STATEPOINT GC Spill - live-through, read/write, indirect
922  // STATEPOINT GC Alloca - live-through, read/write, direct
923  // The live-in vs live-through is handled already (the live through ones are
924  // all stack slots), but we need to handle the different type of stackmap
925  // operands and memory effects here.
926 
927  // MI changes inside this loop as we grow operands.
928  for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
929  MachineOperand &MO = MI->getOperand(OperIdx);
930  if (!MO.isFI())
931  continue;
932 
933  // foldMemoryOperand builds a new MI after replacing a single FI operand
934  // with the canonical set of five x86 addressing-mode operands.
935  int FI = MO.getIndex();
936  MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
937 
938  // Copy operands before the frame-index.
939  for (unsigned i = 0; i < OperIdx; ++i)
940  MIB.add(MI->getOperand(i));
941  // Add frame index operands recognized by stackmaps.cpp
942  if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
943  // indirect-mem-ref tag, size, #FI, offset.
944  // Used for spills inserted by StatepointLowering. This codepath is not
945  // used for patchpoints/stackmaps at all, for these spilling is done via
946  // foldMemoryOperand callback only.
947  assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
948  MIB.addImm(StackMaps::IndirectMemRefOp);
949  MIB.addImm(MFI.getObjectSize(FI));
950  MIB.add(MI->getOperand(OperIdx));
951  MIB.addImm(0);
952  } else {
953  // direct-mem-ref tag, #FI, offset.
954  // Used by patchpoint, and direct alloca arguments to statepoints
955  MIB.addImm(StackMaps::DirectMemRefOp);
956  MIB.add(MI->getOperand(OperIdx));
957  MIB.addImm(0);
958  }
959  // Copy the operands after the frame index.
960  for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
961  MIB.add(MI->getOperand(i));
962 
963  // Inherit previous memory operands.
964  MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
965  assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
966 
967  // Add a new memory operand for this FI.
968  assert(MFI.getObjectOffset(FI) != -1);
969 
970  auto Flags = MachineMemOperand::MOLoad;
971  if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
974  }
976  MachinePointerInfo::getFixedStack(MF, FI), Flags,
978  MIB->addMemOperand(MF, MMO);
979 
980  // Replace the instruction and update the operand index.
981  MBB->insert(MachineBasicBlock::iterator(MI), MIB);
982  OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
983  MI->eraseFromParent();
984  MI = MIB;
985  }
986  return MBB;
987 }
988 
991  MachineBasicBlock *MBB) const {
992  assert(MI.getOpcode() == TargetOpcode::PATCHABLE_EVENT_CALL &&
993  "Called emitXRayCustomEvent on the wrong MI!");
994  auto &MF = *MI.getMF();
995  auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
996  for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
997  MIB.add(MI.getOperand(OpIdx));
998 
999  MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1000  MI.eraseFromParent();
1001  return MBB;
1002 }
1003 
1006  MachineBasicBlock *MBB) const {
1007  assert(MI.getOpcode() == TargetOpcode::PATCHABLE_TYPED_EVENT_CALL &&
1008  "Called emitXRayTypedEvent on the wrong MI!");
1009  auto &MF = *MI.getMF();
1010  auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
1011  for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
1012  MIB.add(MI.getOperand(OpIdx));
1013 
1014  MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1015  MI.eraseFromParent();
1016  return MBB;
1017 }
1018 
1019 /// findRepresentativeClass - Return the largest legal super-reg register class
1020 /// of the register class for the specified type and its associated "cost".
1021 // This function is in TargetLowering because it uses RegClassForVT which would
1022 // need to be moved to TargetRegisterInfo and would necessitate moving
1023 // isTypeLegal over as well - a massive change that would just require
1024 // TargetLowering having a TargetRegisterInfo class member that it would use.
1025 std::pair<const TargetRegisterClass *, uint8_t>
1027  MVT VT) const {
1028  const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1029  if (!RC)
1030  return std::make_pair(RC, 0);
1031 
1032  // Compute the set of all super-register classes.
1033  BitVector SuperRegRC(TRI->getNumRegClasses());
1034  for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1035  SuperRegRC.setBitsInMask(RCI.getMask());
1036 
1037  // Find the first legal register class with the largest spill size.
1038  const TargetRegisterClass *BestRC = RC;
1039  for (unsigned i : SuperRegRC.set_bits()) {
1040  const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1041  // We want the largest possible spill size.
1042  if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
1043  continue;
1044  if (!isLegalRC(*TRI, *SuperRC))
1045  continue;
1046  BestRC = SuperRC;
1047  }
1048  return std::make_pair(BestRC, 1);
1049 }
1050 
1051 /// computeRegisterProperties - Once all of the register classes are added,
1052 /// this allows us to compute derived properties we expose.
1054  const TargetRegisterInfo *TRI) {
1056  "Too many value types for ValueTypeActions to hold!");
1057 
1058  // Everything defaults to needing one register.
1059  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1060  NumRegistersForVT[i] = 1;
1061  RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1062  }
1063  // ...except isVoid, which doesn't need any registers.
1064  NumRegistersForVT[MVT::isVoid] = 0;
1065 
1066  // Find the largest integer register class.
1067  unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1068  for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1069  assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1070 
1071  // Every integer value type larger than this largest register takes twice as
1072  // many registers to represent as the previous ValueType.
1073  for (unsigned ExpandedReg = LargestIntReg + 1;
1074  ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1075  NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1076  RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1077  TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1080  }
1081 
1082  // Inspect all of the ValueType's smaller than the largest integer
1083  // register to see which ones need promotion.
1084  unsigned LegalIntReg = LargestIntReg;
1085  for (unsigned IntReg = LargestIntReg - 1;
1086  IntReg >= (unsigned)MVT::i1; --IntReg) {
1087  MVT IVT = (MVT::SimpleValueType)IntReg;
1088  if (isTypeLegal(IVT)) {
1089  LegalIntReg = IntReg;
1090  } else {
1091  RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1092  (const MVT::SimpleValueType)LegalIntReg;
1094  }
1095  }
1096 
1097  // ppcf128 type is really two f64's.
1098  if (!isTypeLegal(MVT::ppcf128)) {
1099  if (isTypeLegal(MVT::f64)) {
1100  NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1101  RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1102  TransformToType[MVT::ppcf128] = MVT::f64;
1104  } else {
1105  NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1106  RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1107  TransformToType[MVT::ppcf128] = MVT::i128;
1109  }
1110  }
1111 
1112  // Decide how to handle f128. If the target does not have native f128 support,
1113  // expand it to i128 and we will be generating soft float library calls.
1114  if (!isTypeLegal(MVT::f128)) {
1115  NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1116  RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1117  TransformToType[MVT::f128] = MVT::i128;
1119  }
1120 
1121  // Decide how to handle f64. If the target does not have native f64 support,
1122  // expand it to i64 and we will be generating soft float library calls.
1123  if (!isTypeLegal(MVT::f64)) {
1124  NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1125  RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1126  TransformToType[MVT::f64] = MVT::i64;
1128  }
1129 
1130  // Decide how to handle f32. If the target does not have native f32 support,
1131  // expand it to i32 and we will be generating soft float library calls.
1132  if (!isTypeLegal(MVT::f32)) {
1133  NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1134  RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1135  TransformToType[MVT::f32] = MVT::i32;
1137  }
1138 
1139  // Decide how to handle f16. If the target does not have native f16 support,
1140  // promote it to f32, because there are no f16 library calls (except for
1141  // conversions).
1142  if (!isTypeLegal(MVT::f16)) {
1143  NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1144  RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1145  TransformToType[MVT::f16] = MVT::f32;
1147  }
1148 
1149  // Loop over all of the vector value types to see which need transformations.
1150  for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1151  i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1152  MVT VT = (MVT::SimpleValueType) i;
1153  if (isTypeLegal(VT))
1154  continue;
1155 
1156  MVT EltVT = VT.getVectorElementType();
1157  unsigned NElts = VT.getVectorNumElements();
1158  bool IsLegalWiderType = false;
1159  LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1160  switch (PreferredAction) {
1161  case TypePromoteInteger:
1162  // Try to promote the elements of integer vectors. If no legal
1163  // promotion was found, fall through to the widen-vector method.
1164  for (unsigned nVT = i + 1; nVT <= MVT::LAST_INTEGER_VECTOR_VALUETYPE; ++nVT) {
1165  MVT SVT = (MVT::SimpleValueType) nVT;
1166  // Promote vectors of integers to vectors with the same number
1167  // of elements, with a wider element type.
1168  if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
1169  SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)) {
1170  TransformToType[i] = SVT;
1171  RegisterTypeForVT[i] = SVT;
1172  NumRegistersForVT[i] = 1;
1174  IsLegalWiderType = true;
1175  break;
1176  }
1177  }
1178  if (IsLegalWiderType)
1179  break;
1181 
1182  case TypeWidenVector:
1183  // Try to widen the vector.
1184  for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1185  MVT SVT = (MVT::SimpleValueType) nVT;
1186  if (SVT.getVectorElementType() == EltVT
1187  && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
1188  TransformToType[i] = SVT;
1189  RegisterTypeForVT[i] = SVT;
1190  NumRegistersForVT[i] = 1;
1192  IsLegalWiderType = true;
1193  break;
1194  }
1195  }
1196  if (IsLegalWiderType)
1197  break;
1199 
1200  case TypeSplitVector:
1201  case TypeScalarizeVector: {
1202  MVT IntermediateVT;
1203  MVT RegisterVT;
1204  unsigned NumIntermediates;
1205  NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1206  NumIntermediates, RegisterVT, this);
1207  RegisterTypeForVT[i] = RegisterVT;
1208 
1209  MVT NVT = VT.getPow2VectorType();
1210  if (NVT == VT) {
1211  // Type is already a power of 2. The default action is to split.
1212  TransformToType[i] = MVT::Other;
1213  if (PreferredAction == TypeScalarizeVector)
1215  else if (PreferredAction == TypeSplitVector)
1217  else
1218  // Set type action according to the number of elements.
1220  : TypeSplitVector);
1221  } else {
1222  TransformToType[i] = NVT;
1224  }
1225  break;
1226  }
1227  default:
1228  llvm_unreachable("Unknown vector legalization action!");
1229  }
1230  }
1231 
1232  // Determine the 'representative' register class for each value type.
1233  // An representative register class is the largest (meaning one which is
1234  // not a sub-register class / subreg register class) legal register class for
1235  // a group of value types. For example, on i386, i8, i16, and i32
1236  // representative would be GR32; while on x86_64 it's GR64.
1237  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1238  const TargetRegisterClass* RRC;
1239  uint8_t Cost;
1240  std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1241  RepRegClassForVT[i] = RRC;
1242  RepRegClassCostForVT[i] = Cost;
1243  }
1244 }
1245 
1247  EVT VT) const {
1248  assert(!VT.isVector() && "No default SetCC type for vectors!");
1249  return getPointerTy(DL).SimpleTy;
1250 }
1251 
1253  return MVT::i32; // return the default value
1254 }
1255 
1256 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1257 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1258 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1259 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1260 ///
1261 /// This method returns the number of registers needed, and the VT for each
1262 /// register. It also returns the VT and quantity of the intermediate values
1263 /// before they are promoted/expanded.
1265  EVT &IntermediateVT,
1266  unsigned &NumIntermediates,
1267  MVT &RegisterVT) const {
1268  unsigned NumElts = VT.getVectorNumElements();
1269 
1270  // If there is a wider vector type with the same element type as this one,
1271  // or a promoted vector type that has the same number of elements which
1272  // are wider, then we should convert to that legal vector type.
1273  // This handles things like <2 x float> -> <4 x float> and
1274  // <4 x i1> -> <4 x i32>.
1275  LegalizeTypeAction TA = getTypeAction(Context, VT);
1276  if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1277  EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1278  if (isTypeLegal(RegisterEVT)) {
1279  IntermediateVT = RegisterEVT;
1280  RegisterVT = RegisterEVT.getSimpleVT();
1281  NumIntermediates = 1;
1282  return 1;
1283  }
1284  }
1285 
1286  // Figure out the right, legal destination reg to copy into.
1287  EVT EltTy = VT.getVectorElementType();
1288 
1289  unsigned NumVectorRegs = 1;
1290 
1291  // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1292  // could break down into LHS/RHS like LegalizeDAG does.
1293  if (!isPowerOf2_32(NumElts)) {
1294  NumVectorRegs = NumElts;
1295  NumElts = 1;
1296  }
1297 
1298  // Divide the input until we get to a supported size. This will always
1299  // end with a scalar if the target doesn't support vectors.
1300  while (NumElts > 1 && !isTypeLegal(
1301  EVT::getVectorVT(Context, EltTy, NumElts))) {
1302  NumElts >>= 1;
1303  NumVectorRegs <<= 1;
1304  }
1305 
1306  NumIntermediates = NumVectorRegs;
1307 
1308  EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
1309  if (!isTypeLegal(NewVT))
1310  NewVT = EltTy;
1311  IntermediateVT = NewVT;
1312 
1313  MVT DestVT = getRegisterType(Context, NewVT);
1314  RegisterVT = DestVT;
1315  unsigned NewVTSize = NewVT.getSizeInBits();
1316 
1317  // Convert sizes such as i33 to i64.
1318  if (!isPowerOf2_32(NewVTSize))
1319  NewVTSize = NextPowerOf2(NewVTSize);
1320 
1321  if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1322  return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1323 
1324  // Otherwise, promotion or legal types use the same number of registers as
1325  // the vector decimated to the appropriate level.
1326  return NumVectorRegs;
1327 }
1328 
1329 /// Get the EVTs and ArgFlags collections that represent the legalized return
1330 /// type of the given function. This does not require a DAG or a return value,
1331 /// and is suitable for use before any DAGs for the function are constructed.
1332 /// TODO: Move this out of TargetLowering.cpp.
1335  const TargetLowering &TLI, const DataLayout &DL) {
1336  SmallVector<EVT, 4> ValueVTs;
1337  ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1338  unsigned NumValues = ValueVTs.size();
1339  if (NumValues == 0) return;
1340 
1341  for (unsigned j = 0, f = NumValues; j != f; ++j) {
1342  EVT VT = ValueVTs[j];
1343  ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1344 
1345  if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1346  ExtendKind = ISD::SIGN_EXTEND;
1347  else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1348  ExtendKind = ISD::ZERO_EXTEND;
1349 
1350  // FIXME: C calling convention requires the return type to be promoted to
1351  // at least 32-bit. But this is not necessary for non-C calling
1352  // conventions. The frontend should mark functions whose return values
1353  // require promoting with signext or zeroext attributes.
1354  if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1355  MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1356  if (VT.bitsLT(MinVT))
1357  VT = MinVT;
1358  }
1359 
1360  unsigned NumParts =
1361  TLI.getNumRegistersForCallingConv(ReturnType->getContext(), VT);
1362  MVT PartVT =
1363  TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), VT);
1364 
1365  // 'inreg' on function refers to return value
1366  ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1367  if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg))
1368  Flags.setInReg();
1369 
1370  // Propagate extension type if any
1371  if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1372  Flags.setSExt();
1373  else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1374  Flags.setZExt();
1375 
1376  for (unsigned i = 0; i < NumParts; ++i)
1377  Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
1378  }
1379 }
1380 
1381 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1382 /// function arguments in the caller parameter area. This is the actual
1383 /// alignment, not its logarithm.
1385  const DataLayout &DL) const {
1386  return DL.getABITypeAlignment(Ty);
1387 }
1388 
1390  const DataLayout &DL, EVT VT,
1391  unsigned AddrSpace,
1392  unsigned Alignment,
1393  bool *Fast) const {
1394  // Check if the specified alignment is sufficient based on the data layout.
1395  // TODO: While using the data layout works in practice, a better solution
1396  // would be to implement this check directly (make this a virtual function).
1397  // For example, the ABI alignment may change based on software platform while
1398  // this function should only be affected by hardware implementation.
1399  Type *Ty = VT.getTypeForEVT(Context);
1400  if (Alignment >= DL.getABITypeAlignment(Ty)) {
1401  // Assume that an access that meets the ABI-specified alignment is fast.
1402  if (Fast != nullptr)
1403  *Fast = true;
1404  return true;
1405  }
1406 
1407  // This is a misaligned access.
1408  return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
1409 }
1410 
1412  return BranchProbability(MinPercentageForPredictableBranch, 100);
1413 }
1414 
1415 //===----------------------------------------------------------------------===//
1416 // TargetTransformInfo Helpers
1417 //===----------------------------------------------------------------------===//
1418 
1420  enum InstructionOpcodes {
1421 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1422 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1423 #include "llvm/IR/Instruction.def"
1424  };
1425  switch (static_cast<InstructionOpcodes>(Opcode)) {
1426  case Ret: return 0;
1427  case Br: return 0;
1428  case Switch: return 0;
1429  case IndirectBr: return 0;
1430  case Invoke: return 0;
1431  case Resume: return 0;
1432  case Unreachable: return 0;
1433  case CleanupRet: return 0;
1434  case CatchRet: return 0;
1435  case CatchPad: return 0;
1436  case CatchSwitch: return 0;
1437  case CleanupPad: return 0;
1438  case Add: return ISD::ADD;
1439  case FAdd: return ISD::FADD;
1440  case Sub: return ISD::SUB;
1441  case FSub: return ISD::FSUB;
1442  case Mul: return ISD::MUL;
1443  case FMul: return ISD::FMUL;
1444  case UDiv: return ISD::UDIV;
1445  case SDiv: return ISD::SDIV;
1446  case FDiv: return ISD::FDIV;
1447  case URem: return ISD::UREM;
1448  case SRem: return ISD::SREM;
1449  case FRem: return ISD::FREM;
1450  case Shl: return ISD::SHL;
1451  case LShr: return ISD::SRL;
1452  case AShr: return ISD::SRA;
1453  case And: return ISD::AND;
1454  case Or: return ISD::OR;
1455  case Xor: return ISD::XOR;
1456  case Alloca: return 0;
1457  case Load: return ISD::LOAD;
1458  case Store: return ISD::STORE;
1459  case GetElementPtr: return 0;
1460  case Fence: return 0;
1461  case AtomicCmpXchg: return 0;
1462  case AtomicRMW: return 0;
1463  case Trunc: return ISD::TRUNCATE;
1464  case ZExt: return ISD::ZERO_EXTEND;
1465  case SExt: return ISD::SIGN_EXTEND;
1466  case FPToUI: return ISD::FP_TO_UINT;
1467  case FPToSI: return ISD::FP_TO_SINT;
1468  case UIToFP: return ISD::UINT_TO_FP;
1469  case SIToFP: return ISD::SINT_TO_FP;
1470  case FPTrunc: return ISD::FP_ROUND;
1471  case FPExt: return ISD::FP_EXTEND;
1472  case PtrToInt: return ISD::BITCAST;
1473  case IntToPtr: return ISD::BITCAST;
1474  case BitCast: return ISD::BITCAST;
1475  case AddrSpaceCast: return ISD::ADDRSPACECAST;
1476  case ICmp: return ISD::SETCC;
1477  case FCmp: return ISD::SETCC;
1478  case PHI: return 0;
1479  case Call: return 0;
1480  case Select: return ISD::SELECT;
1481  case UserOp1: return 0;
1482  case UserOp2: return 0;
1483  case VAArg: return 0;
1484  case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1485  case InsertElement: return ISD::INSERT_VECTOR_ELT;
1486  case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1487  case ExtractValue: return ISD::MERGE_VALUES;
1488  case InsertValue: return ISD::MERGE_VALUES;
1489  case LandingPad: return 0;
1490  }
1491 
1492  llvm_unreachable("Unknown instruction type encountered!");
1493 }
1494 
1495 std::pair<int, MVT>
1497  Type *Ty) const {
1498  LLVMContext &C = Ty->getContext();
1499  EVT MTy = getValueType(DL, Ty);
1500 
1501  int Cost = 1;
1502  // We keep legalizing the type until we find a legal kind. We assume that
1503  // the only operation that costs anything is the split. After splitting
1504  // we need to handle two types.
1505  while (true) {
1506  LegalizeKind LK = getTypeConversion(C, MTy);
1507 
1508  if (LK.first == TypeLegal)
1509  return std::make_pair(Cost, MTy.getSimpleVT());
1510 
1511  if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1512  Cost *= 2;
1513 
1514  // Do not loop with f128 type.
1515  if (MTy == LK.second)
1516  return std::make_pair(Cost, MTy.getSimpleVT());
1517 
1518  // Keep legalizing the type.
1519  MTy = LK.second;
1520  }
1521 }
1522 
1524  bool UseTLS) const {
1525  // compiler-rt provides a variable with a magic name. Targets that do not
1526  // link with compiler-rt may also provide such a variable.
1527  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1528  const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1529  auto UnsafeStackPtr =
1530  dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1531 
1532  Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1533 
1534  if (!UnsafeStackPtr) {
1535  auto TLSModel = UseTLS ?
1538  // The global variable is not defined yet, define it ourselves.
1539  // We use the initial-exec TLS model because we do not support the
1540  // variable living anywhere other than in the main executable.
1541  UnsafeStackPtr = new GlobalVariable(
1542  *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1543  UnsafeStackPtrVar, nullptr, TLSModel);
1544  } else {
1545  // The variable exists, check its type and attributes.
1546  if (UnsafeStackPtr->getValueType() != StackPtrTy)
1547  report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1548  if (UseTLS != UnsafeStackPtr->isThreadLocal())
1549  report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1550  (UseTLS ? "" : "not ") + "be thread-local");
1551  }
1552  return UnsafeStackPtr;
1553 }
1554 
1556  if (!TM.getTargetTriple().isAndroid())
1557  return getDefaultSafeStackPointerLocation(IRB, true);
1558 
1559  // Android provides a libc function to retrieve the address of the current
1560  // thread's unsafe stack pointer.
1561  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1562  Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1563  Value *Fn = M->getOrInsertFunction("__safestack_pointer_address",
1564  StackPtrTy->getPointerTo(0));
1565  return IRB.CreateCall(Fn);
1566 }
1567 
1568 //===----------------------------------------------------------------------===//
1569 // Loop Strength Reduction hooks
1570 //===----------------------------------------------------------------------===//
1571 
1572 /// isLegalAddressingMode - Return true if the addressing mode represented
1573 /// by AM is legal for this target, for a load/store of the specified type.
1575  const AddrMode &AM, Type *Ty,
1576  unsigned AS, Instruction *I) const {
1577  // The default implementation of this implements a conservative RISCy, r+r and
1578  // r+i addr mode.
1579 
1580  // Allows a sign-extended 16-bit immediate field.
1581  if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1582  return false;
1583 
1584  // No global is ever allowed as a base.
1585  if (AM.BaseGV)
1586  return false;
1587 
1588  // Only support r+r,
1589  switch (AM.Scale) {
1590  case 0: // "r+i" or just "i", depending on HasBaseReg.
1591  break;
1592  case 1:
1593  if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1594  return false;
1595  // Otherwise we have r+r or r+i.
1596  break;
1597  case 2:
1598  if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1599  return false;
1600  // Allow 2*r as r+r.
1601  break;
1602  default: // Don't allow n * r
1603  return false;
1604  }
1605 
1606  return true;
1607 }
1608 
1609 //===----------------------------------------------------------------------===//
1610 // Stack Protector
1611 //===----------------------------------------------------------------------===//
1612 
1613 // For OpenBSD return its special guard variable. Otherwise return nullptr,
1614 // so that SelectionDAG handle SSP.
1616  if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
1617  Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
1619  return M.getOrInsertGlobal("__guard_local", PtrTy);
1620  }
1621  return nullptr;
1622 }
1623 
1624 // Currently only support "standard" __stack_chk_guard.
1625 // TODO: add LOAD_STACK_GUARD support.
1627  if (!M.getNamedValue("__stack_chk_guard"))
1628  new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false,
1630  nullptr, "__stack_chk_guard");
1631 }
1632 
1633 // Currently only support "standard" __stack_chk_guard.
1634 // TODO: add LOAD_STACK_GUARD support.
1636  return M.getNamedValue("__stack_chk_guard");
1637 }
1638 
1640  return nullptr;
1641 }
1642 
1644  return MinimumJumpTableEntries;
1645 }
1646 
1649 }
1650 
1651 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
1652  return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
1653 }
1654 
1656  return MaximumJumpTableSize;
1657 }
1658 
1660  MaximumJumpTableSize = Val;
1661 }
1662 
1663 //===----------------------------------------------------------------------===//
1664 // Reciprocal Estimates
1665 //===----------------------------------------------------------------------===//
1666 
1667 /// Get the reciprocal estimate attribute string for a function that will
1668 /// override the target defaults.
1670  const Function &F = MF.getFunction();
1671  return F.getFnAttribute("reciprocal-estimates").getValueAsString();
1672 }
1673 
1674 /// Construct a string for the given reciprocal operation of the given type.
1675 /// This string should match the corresponding option to the front-end's
1676 /// "-mrecip" flag assuming those strings have been passed through in an
1677 /// attribute string. For example, "vec-divf" for a division of a vXf32.
1678 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
1679  std::string Name = VT.isVector() ? "vec-" : "";
1680 
1681  Name += IsSqrt ? "sqrt" : "div";
1682 
1683  // TODO: Handle "half" or other float types?
1684  if (VT.getScalarType() == MVT::f64) {
1685  Name += "d";
1686  } else {
1687  assert(VT.getScalarType() == MVT::f32 &&
1688  "Unexpected FP type for reciprocal estimate");
1689  Name += "f";
1690  }
1691 
1692  return Name;
1693 }
1694 
1695 /// Return the character position and value (a single numeric character) of a
1696 /// customized refinement operation in the input string if it exists. Return
1697 /// false if there is no customized refinement step count.
1698 static bool parseRefinementStep(StringRef In, size_t &Position,
1699  uint8_t &Value) {
1700  const char RefStepToken = ':';
1701  Position = In.find(RefStepToken);
1702  if (Position == StringRef::npos)
1703  return false;
1704 
1705  StringRef RefStepString = In.substr(Position + 1);
1706  // Allow exactly one numeric character for the additional refinement
1707  // step parameter.
1708  if (RefStepString.size() == 1) {
1709  char RefStepChar = RefStepString[0];
1710  if (RefStepChar >= '0' && RefStepChar <= '9') {
1711  Value = RefStepChar - '0';
1712  return true;
1713  }
1714  }
1715  report_fatal_error("Invalid refinement step for -recip.");
1716 }
1717 
1718 /// For the input attribute string, return one of the ReciprocalEstimate enum
1719 /// status values (enabled, disabled, or not specified) for this operation on
1720 /// the specified data type.
1721 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
1722  if (Override.empty())
1723  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1724 
1725  SmallVector<StringRef, 4> OverrideVector;
1726  SplitString(Override, OverrideVector, ",");
1727  unsigned NumArgs = OverrideVector.size();
1728 
1729  // Check if "all", "none", or "default" was specified.
1730  if (NumArgs == 1) {
1731  // Look for an optional setting of the number of refinement steps needed
1732  // for this type of reciprocal operation.
1733  size_t RefPos;
1734  uint8_t RefSteps;
1735  if (parseRefinementStep(Override, RefPos, RefSteps)) {
1736  // Split the string for further processing.
1737  Override = Override.substr(0, RefPos);
1738  }
1739 
1740  // All reciprocal types are enabled.
1741  if (Override == "all")
1743 
1744  // All reciprocal types are disabled.
1745  if (Override == "none")
1746  return TargetLoweringBase::ReciprocalEstimate::Disabled;
1747 
1748  // Target defaults for enablement are used.
1749  if (Override == "default")
1750  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1751  }
1752 
1753  // The attribute string may omit the size suffix ('f'/'d').
1754  std::string VTName = getReciprocalOpName(IsSqrt, VT);
1755  std::string VTNameNoSize = VTName;
1756  VTNameNoSize.pop_back();
1757  static const char DisabledPrefix = '!';
1758 
1759  for (StringRef RecipType : OverrideVector) {
1760  size_t RefPos;
1761  uint8_t RefSteps;
1762  if (parseRefinementStep(RecipType, RefPos, RefSteps))
1763  RecipType = RecipType.substr(0, RefPos);
1764 
1765  // Ignore the disablement token for string matching.
1766  bool IsDisabled = RecipType[0] == DisabledPrefix;
1767  if (IsDisabled)
1768  RecipType = RecipType.substr(1);
1769 
1770  if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
1771  return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
1773  }
1774 
1775  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1776 }
1777 
1778 /// For the input attribute string, return the customized refinement step count
1779 /// for this operation on the specified data type. If the step count does not
1780 /// exist, return the ReciprocalEstimate enum value for unspecified.
1781 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
1782  if (Override.empty())
1783  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1784 
1785  SmallVector<StringRef, 4> OverrideVector;
1786  SplitString(Override, OverrideVector, ",");
1787  unsigned NumArgs = OverrideVector.size();
1788 
1789  // Check if "all", "default", or "none" was specified.
1790  if (NumArgs == 1) {
1791  // Look for an optional setting of the number of refinement steps needed
1792  // for this type of reciprocal operation.
1793  size_t RefPos;
1794  uint8_t RefSteps;
1795  if (!parseRefinementStep(Override, RefPos, RefSteps))
1796  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1797 
1798  // Split the string for further processing.
1799  Override = Override.substr(0, RefPos);
1800  assert(Override != "none" &&
1801  "Disabled reciprocals, but specifed refinement steps?");
1802 
1803  // If this is a general override, return the specified number of steps.
1804  if (Override == "all" || Override == "default")
1805  return RefSteps;
1806  }
1807 
1808  // The attribute string may omit the size suffix ('f'/'d').
1809  std::string VTName = getReciprocalOpName(IsSqrt, VT);
1810  std::string VTNameNoSize = VTName;
1811  VTNameNoSize.pop_back();
1812 
1813  for (StringRef RecipType : OverrideVector) {
1814  size_t RefPos;
1815  uint8_t RefSteps;
1816  if (!parseRefinementStep(RecipType, RefPos, RefSteps))
1817  continue;
1818 
1819  RecipType = RecipType.substr(0, RefPos);
1820  if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
1821  return RefSteps;
1822  }
1823 
1824  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1825 }
1826 
1828  MachineFunction &MF) const {
1829  return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
1830 }
1831 
1833  MachineFunction &MF) const {
1834  return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
1835 }
1836 
1838  MachineFunction &MF) const {
1839  return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
1840 }
1841 
1843  MachineFunction &MF) const {
1844  return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
1845 }
1846 
1848  MF.getRegInfo().freezeReservedRegs(MF);
1849 }
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
static bool darwinHasSinCos(const Triple &TT)
uint64_t CallInst * C
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:547
X = FP_ROUND(Y, TRUNC) - Rounding &#39;Y&#39; from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:514
bool isOSDarwin() const
isOSDarwin - Is this a "Darwin" OS (OS X, iOS, or watchOS).
Definition: Triple.h:468
virtual MVT getRegisterTypeForCallingConv(MVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
static MVT getIntegerVT(unsigned BitWidth)
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:245
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:571
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
LLVMContext & Context
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
Definition: ValueTypes.h:359
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:236
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
bool isMacOSX() const
isMacOSX - Is this a Mac OS X triple.
Definition: Triple.h:440
static MVT getVectorVT(MVT VT, unsigned NumElements)
Constant * getOrInsertFunction(StringRef Name, FunctionType *T, AttributeList AttributeList)
Look up the specified function in the module symbol table.
Definition: Module.cpp:142
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:343
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:260
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:63
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:235
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition: ISDOpcodes.h:495
Libcall getSYNC(unsigned Opc, MVT VT)
Return the SYNC_FETCH_AND_* value for the given opcode and type, or UNKNOWN_LIBCALL if there is none...
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:35
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:271
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
Y = RRC X, rotate right via carry.
static cl::opt< bool > JumpIsExpensiveOverride("jump-is-expensive", cl::init(false), cl::desc("Do not create extra branches to split comparison logic."), cl::Hidden)
static cl::opt< unsigned > MinimumJumpTableEntries("min-jump-table-entries", cl::init(4), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table."))
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
MVT getPow2VectorType() const
Widens the length of the given vector MVT up to the nearest power of 2 and returns that type...
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:251
static cl::opt< int > MinPercentageForPredictableBranch("min-predictable-branch", cl::init(99), cl::desc("Minimum percentage (0-100) that a condition must be either true " "or false to assume that the condition is predictable"), cl::Hidden)
static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return one of the ReciprocalEstimate enum status values (enabled...
unsigned getVectorNumElements() const
Externally visible function.
Definition: GlobalValue.h:49
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
Definition: ISDOpcodes.h:771
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
bool isOSFuchsia() const
Definition: Triple.h:488
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition: ISDOpcodes.h:486
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
Definition: ISDOpcodes.h:360
Same for subtraction.
Definition: ISDOpcodes.h:254
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it...
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:39
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
Definition: ISDOpcodes.h:292
EVT getPow2VectorType(LLVMContext &Context) const
Widens the length of the given vector EVT up to the nearest power of 2 and returns that type...
Definition: ValueTypes.h:366
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a square root of the given type based on the function&#39;s at...
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
A description of a memory reference used in the backend.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
Shift and rotation operations.
Definition: ISDOpcodes.h:380
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:296
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:202
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:237
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth...
Definition: ISDOpcodes.h:369
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:639
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:677
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void freezeReservedRegs(const MachineFunction &)
freezeReservedRegs - Called by the register allocator to freeze the set of reserved registers before ...
This file contains the simple types necessary to represent the attributes associated with functions a...
SimpleValueType SimpleTy
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
Definition: ISDOpcodes.h:779
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:293
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \\\)
SplitString - Split up the specified string according to the specified delimiters, appending the result fragments to the output list.
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
unsigned getNumRegClasses() const
unsigned getSizeInBits() const
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:290
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:456
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
ArchType getArch() const
getArch - Get the parsed architecture type of this triple.
Definition: Triple.h:283
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const
Return the preferred vector type legalization action.
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:121
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:201
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:133
GlobalValue * getNamedValue(StringRef Name) const
Return the global value in the module with the specified name, of arbitrary type. ...
Definition: Module.cpp:112
bool isiOS() const
Is this an iOS triple.
Definition: Triple.h:449
static StringRef getRecipEstimateForFunc(MachineFunction &MF)
Get the reciprocal estimate attribute string for a function that will override the target defaults...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:919
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:598
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:716
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
Definition: ISDOpcodes.h:473
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:84
MVT getVectorElementType() const
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a division of the given type based on the function&#39;s attri...
Class to represent pointers.
Definition: DerivedTypes.h:467
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:500
static cl::opt< unsigned > JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden, cl::desc("Minimum density for building a jump table in " "a normal function"))
Minimum jump table density for normal functions.
The memory access is volatile.
virtual Value * getIRStackGuard(IRBuilder<> &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static std::string getReciprocalOpName(bool IsSqrt, EVT VT)
Construct a string for the given reciprocal operation of the given type.
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
static void InitCmpLibcallCCs(ISD::CondCode *CCs)
InitCmpLibcallCCs - Set default comparison libcall CC.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:421
Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
Constant * getOrInsertGlobal(StringRef Name, Type *Ty)
Look up the specified global in the module symbol table.
Definition: Module.cpp:202
Simple binary floating point operators.
Definition: ISDOpcodes.h:260
bool isOSOpenBSD() const
Definition: Triple.h:480
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:273
unsigned getScalarSizeInBits() const
unsigned getPointerSize(unsigned AS=0) const
Layout pointer size FIXME: The defaults need to be removed once all of the backends/clients are updat...
Definition: DataLayout.cpp:631
bool isWatchABI() const
Definition: Triple.h:463
Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:308
TargetLoweringBase(const TargetMachine &TM)
NOTE: The TargetMachine owns TLOF.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:736
const Triple & getTargetTriple() const
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:739
static cl::opt< unsigned > MaximumJumpTableSize("max-jump-table-size", cl::init(0), cl::Hidden, cl::desc("Set maximum size of jump tables; zero for no limit."))
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
Bit counting operators with an undefined result for zero inputs.
Definition: ISDOpcodes.h:386
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
Definition: ISDOpcodes.h:765
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:532
virtual Value * getSafeStackPointerLocation(IRBuilder<> &IRB) const
Returns the target-specific address of the unsafe stack pointer.
Extended Value Type.
Definition: ValueTypes.h:34
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:632
const AMDGPUAS & AS
static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT, TargetLoweringBase *TLI)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:220
int getDivRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a division of the given type based on the function&#39;s attributes...
const TargetMachine & getTargetMachine() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
Value * getDefaultSafeStackPointerLocation(IRBuilder<> &IRB, bool UseTLS) const
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should should continue looking for chain dependencies when trying to find...
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
The memory access writes data.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
void initActions()
Initialize all of the actions to default values.
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal...
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight)...
Definition: ValueTypes.h:317
ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
Definition: CallingConv.h:103
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:392
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:315
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:265
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
Definition: ISDOpcodes.h:429
ADDRSPACECAST - This operator converts between pointers of different address spaces.
Definition: ISDOpcodes.h:551
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:862
Module.h This file contains the declarations for the Module class.
unsigned getMaximumJumpTableSize() const
Return upper limit for number of entries in a jump table.
static cl::opt< unsigned > OptsizeJumpTableDensity("optsize-jump-table-density", cl::init(40), cl::Hidden, cl::desc("Minimum density for building a jump table in " "an optsize function"))
Minimum jump table density for -Os or -Oz functions.
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:725
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const
EVT is not used in-tree, but is used by out-of-tree target.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:724
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:43
static bool Enabled
Definition: Statistic.cpp:51
const Function & getFunction() const
Return the LLVM function that this machine code represents.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT...
Definition: ValueTypes.h:73
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isStatepointSpillSlotObjectIndex(int ObjectIdx) const
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:390
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:241
unsigned getMinimumJumpTableDensity(bool OptForSize) const
Return lower limit of the density in a jump table.
void setMaximumJumpTableSize(unsigned)
Indicate the maximum number of entries in jump tables.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:446
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:449
ValueTypeActionImpl ValueTypeActions
#define OP_TO_LIBCALL(Name, Enum)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:288
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
FMINNAN/FMAXNAN - Behave identically to FMINNUM/FMAXNUM, except that when a single input is NaN...
Definition: ISDOpcodes.h:574
virtual Value * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a square root of the given type based on the function&#39;s attribut...
The memory access reads data.
static bool parseRefinementStep(StringRef In, size_t &Position, uint8_t &Value)
Return the character position and value (a single numeric character) of a customized refinement opera...
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca...
Definition: ISDOpcodes.h:819
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static mvt_range all_valuetypes()
SimpleValueType Iteration.
Representation of each machine instruction.
Definition: MachineInstr.h:60
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, EVT VT) const
Certain targets require unusual breakdowns of certain types.
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:151
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:363
static const size_t npos
Definition: StringRef.h:51
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineBasicBlock * emitXRayCustomEvent(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify the XRay custom event operands with target-dependent details.
StringRef getValueAsString() const
Return the attribute&#39;s value as a string.
Definition: Attributes.cpp:194
bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition: Triple.cpp:1241
void setTypeAction(MVT VT, LegalizeTypeAction Action)
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:583
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return &#39;Legal&#39;) or we ...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:108
MachineBasicBlock * emitXRayTypedEvent(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify the XRay typed event operands with target-dependent details.
#define I(x, y, z)
Definition: MD5.cpp:58
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
bool isGNUEnvironment() const
Definition: Triple.h:504
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
Same for multiplication.
Definition: ISDOpcodes.h:257
static const int LAST_INDEXED_MODE
Definition: ISDOpcodes.h:882
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that&#39;s previously inserted by insertSSPDeclarations, if any, otherwise return nul...
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
bool isMacOSXVersionLT(unsigned Major, unsigned Minor=0, unsigned Micro=0) const
isMacOSXVersionLT - Comparison function for checking OS X version compatibility, which handles suppor...
Definition: Triple.h:425
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:321
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:629
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, unsigned Alignment=1, bool *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void GetReturnInfo(Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:73
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
PREFETCH - This corresponds to a prefetch intrinsic.
Definition: ISDOpcodes.h:745
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:619
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.h:312
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations...
Definition: ISDOpcodes.h:282
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:413
bool isOSVersionLT(unsigned Major, unsigned Minor=0, unsigned Micro=0) const
isOSVersionLT - Helper function for doing comparisons against version numbers included in the target ...
Definition: Triple.h:401
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:198
Conversion operators.
Definition: ISDOpcodes.h:443
static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return the customized refinement step count for this operation on the...
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:452
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:126
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:298
virtual BranchProbability getPredictableBranchThreshold() const
If a branch or a select condition is skewed in one direction by more than this factor, it is very likely to be predicted correctly.
Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:298
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd)
Assign this MachineInstr&#39;s memory reference descriptor list.
void setBitsInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
setBitsInMask - Add &#39;1&#39; bits from Mask to this vector.
Definition: BitVector.h:759
std::pair< int, MVT > getTypeLegalizationCost(const DataLayout &DL, Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isValid() const
Returns true if this iterator is still pointing at a valid entry.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:242
LegalizeTypeAction getTypeAction(MVT VT) const
This file describes how to lower LLVM code to machine code.
mmo_iterator memoperands_end() const
Definition: MachineInstr.h:393
vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const
Loop over all of the value types that can be represented by values in the given register class...
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
CallInst * CreateCall(Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1812
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...