LLVM  6.0.0svn
TargetLoweringBase.cpp
Go to the documentation of this file.
1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the TargetLoweringBase class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/BitVector.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringExtras.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/ADT/Twine.h"
21 #include "llvm/CodeGen/Analysis.h"
33 #include "llvm/CodeGen/StackMaps.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/GlobalValue.h"
44 #include "llvm/IR/GlobalVariable.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Type.h"
49 #include "llvm/Support/Casting.h"
51 #include "llvm/Support/Compiler.h"
55 #include <algorithm>
56 #include <cassert>
57 #include <cstddef>
58 #include <cstdint>
59 #include <cstring>
60 #include <iterator>
61 #include <string>
62 #include <tuple>
63 #include <utility>
64 
65 using namespace llvm;
66 
68  "jump-is-expensive", cl::init(false),
69  cl::desc("Do not create extra branches to split comparison logic."),
70  cl::Hidden);
71 
73  ("min-jump-table-entries", cl::init(4), cl::Hidden,
74  cl::desc("Set minimum number of entries to use a jump table."));
75 
77  ("max-jump-table-size", cl::init(0), cl::Hidden,
78  cl::desc("Set maximum size of jump tables; zero for no limit."));
79 
80 /// Minimum jump table density for normal functions.
81 static cl::opt<unsigned>
82  JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
83  cl::desc("Minimum density for building a jump table in "
84  "a normal function"));
85 
86 /// Minimum jump table density for -Os or -Oz functions.
88  "optsize-jump-table-density", cl::init(40), cl::Hidden,
89  cl::desc("Minimum density for building a jump table in "
90  "an optsize function"));
91 
92 // Although this default value is arbitrary, it is not random. It is assumed
93 // that a condition that evaluates the same way by a higher percentage than this
94 // is best represented as control flow. Therefore, the default value N should be
95 // set such that the win from N% correct executions is greater than the loss
96 // from (100 - N)% mispredicted executions for the majority of intended targets.
98  "min-predictable-branch", cl::init(99),
99  cl::desc("Minimum percentage (0-100) that a condition must be either true "
100  "or false to assume that the condition is predictable"),
101  cl::Hidden);
102 
103 /// InitLibcallNames - Set default libcall names.
104 static void InitLibcallNames(const char **Names, const Triple &TT) {
105 #define HANDLE_LIBCALL(code, name) \
106  Names[RTLIB::code] = name;
107 #include "llvm/CodeGen/RuntimeLibcalls.def"
108 #undef HANDLE_LIBCALL
109 
110  // A few names are different on particular architectures or environments.
111  if (TT.isOSDarwin()) {
112  // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
113  // of the gnueabi-style __gnu_*_ieee.
114  // FIXME: What about other targets?
115  Names[RTLIB::FPEXT_F16_F32] = "__extendhfsf2";
116  Names[RTLIB::FPROUND_F32_F16] = "__truncsfhf2";
117  } else {
118  Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee";
119  Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee";
120  }
121 
122  if (TT.isGNUEnvironment() || TT.isOSFuchsia()) {
123  Names[RTLIB::SINCOS_F32] = "sincosf";
124  Names[RTLIB::SINCOS_F64] = "sincos";
125  Names[RTLIB::SINCOS_F80] = "sincosl";
126  Names[RTLIB::SINCOS_F128] = "sincosl";
127  Names[RTLIB::SINCOS_PPCF128] = "sincosl";
128  }
129 
130  if (TT.isOSOpenBSD()) {
131  Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = nullptr;
132  }
133 }
134 
135 /// Set default libcall CallingConvs.
137  for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
138  CCs[LC] = CallingConv::C;
139 }
140 
141 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
142 /// UNKNOWN_LIBCALL if there is none.
144  if (OpVT == MVT::f16) {
145  if (RetVT == MVT::f32)
146  return FPEXT_F16_F32;
147  } else if (OpVT == MVT::f32) {
148  if (RetVT == MVT::f64)
149  return FPEXT_F32_F64;
150  if (RetVT == MVT::f128)
151  return FPEXT_F32_F128;
152  if (RetVT == MVT::ppcf128)
153  return FPEXT_F32_PPCF128;
154  } else if (OpVT == MVT::f64) {
155  if (RetVT == MVT::f128)
156  return FPEXT_F64_F128;
157  else if (RetVT == MVT::ppcf128)
158  return FPEXT_F64_PPCF128;
159  }
160 
161  return UNKNOWN_LIBCALL;
162 }
163 
164 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
165 /// UNKNOWN_LIBCALL if there is none.
167  if (RetVT == MVT::f16) {
168  if (OpVT == MVT::f32)
169  return FPROUND_F32_F16;
170  if (OpVT == MVT::f64)
171  return FPROUND_F64_F16;
172  if (OpVT == MVT::f80)
173  return FPROUND_F80_F16;
174  if (OpVT == MVT::f128)
175  return FPROUND_F128_F16;
176  if (OpVT == MVT::ppcf128)
177  return FPROUND_PPCF128_F16;
178  } else if (RetVT == MVT::f32) {
179  if (OpVT == MVT::f64)
180  return FPROUND_F64_F32;
181  if (OpVT == MVT::f80)
182  return FPROUND_F80_F32;
183  if (OpVT == MVT::f128)
184  return FPROUND_F128_F32;
185  if (OpVT == MVT::ppcf128)
186  return FPROUND_PPCF128_F32;
187  } else if (RetVT == MVT::f64) {
188  if (OpVT == MVT::f80)
189  return FPROUND_F80_F64;
190  if (OpVT == MVT::f128)
191  return FPROUND_F128_F64;
192  if (OpVT == MVT::ppcf128)
193  return FPROUND_PPCF128_F64;
194  }
195 
196  return UNKNOWN_LIBCALL;
197 }
198 
199 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
200 /// UNKNOWN_LIBCALL if there is none.
202  if (OpVT == MVT::f32) {
203  if (RetVT == MVT::i32)
204  return FPTOSINT_F32_I32;
205  if (RetVT == MVT::i64)
206  return FPTOSINT_F32_I64;
207  if (RetVT == MVT::i128)
208  return FPTOSINT_F32_I128;
209  } else if (OpVT == MVT::f64) {
210  if (RetVT == MVT::i32)
211  return FPTOSINT_F64_I32;
212  if (RetVT == MVT::i64)
213  return FPTOSINT_F64_I64;
214  if (RetVT == MVT::i128)
215  return FPTOSINT_F64_I128;
216  } else if (OpVT == MVT::f80) {
217  if (RetVT == MVT::i32)
218  return FPTOSINT_F80_I32;
219  if (RetVT == MVT::i64)
220  return FPTOSINT_F80_I64;
221  if (RetVT == MVT::i128)
222  return FPTOSINT_F80_I128;
223  } else if (OpVT == MVT::f128) {
224  if (RetVT == MVT::i32)
225  return FPTOSINT_F128_I32;
226  if (RetVT == MVT::i64)
227  return FPTOSINT_F128_I64;
228  if (RetVT == MVT::i128)
229  return FPTOSINT_F128_I128;
230  } else if (OpVT == MVT::ppcf128) {
231  if (RetVT == MVT::i32)
232  return FPTOSINT_PPCF128_I32;
233  if (RetVT == MVT::i64)
234  return FPTOSINT_PPCF128_I64;
235  if (RetVT == MVT::i128)
236  return FPTOSINT_PPCF128_I128;
237  }
238  return UNKNOWN_LIBCALL;
239 }
240 
241 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
242 /// UNKNOWN_LIBCALL if there is none.
244  if (OpVT == MVT::f32) {
245  if (RetVT == MVT::i32)
246  return FPTOUINT_F32_I32;
247  if (RetVT == MVT::i64)
248  return FPTOUINT_F32_I64;
249  if (RetVT == MVT::i128)
250  return FPTOUINT_F32_I128;
251  } else if (OpVT == MVT::f64) {
252  if (RetVT == MVT::i32)
253  return FPTOUINT_F64_I32;
254  if (RetVT == MVT::i64)
255  return FPTOUINT_F64_I64;
256  if (RetVT == MVT::i128)
257  return FPTOUINT_F64_I128;
258  } else if (OpVT == MVT::f80) {
259  if (RetVT == MVT::i32)
260  return FPTOUINT_F80_I32;
261  if (RetVT == MVT::i64)
262  return FPTOUINT_F80_I64;
263  if (RetVT == MVT::i128)
264  return FPTOUINT_F80_I128;
265  } else if (OpVT == MVT::f128) {
266  if (RetVT == MVT::i32)
267  return FPTOUINT_F128_I32;
268  if (RetVT == MVT::i64)
269  return FPTOUINT_F128_I64;
270  if (RetVT == MVT::i128)
271  return FPTOUINT_F128_I128;
272  } else if (OpVT == MVT::ppcf128) {
273  if (RetVT == MVT::i32)
274  return FPTOUINT_PPCF128_I32;
275  if (RetVT == MVT::i64)
276  return FPTOUINT_PPCF128_I64;
277  if (RetVT == MVT::i128)
278  return FPTOUINT_PPCF128_I128;
279  }
280  return UNKNOWN_LIBCALL;
281 }
282 
283 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
284 /// UNKNOWN_LIBCALL if there is none.
286  if (OpVT == MVT::i32) {
287  if (RetVT == MVT::f32)
288  return SINTTOFP_I32_F32;
289  if (RetVT == MVT::f64)
290  return SINTTOFP_I32_F64;
291  if (RetVT == MVT::f80)
292  return SINTTOFP_I32_F80;
293  if (RetVT == MVT::f128)
294  return SINTTOFP_I32_F128;
295  if (RetVT == MVT::ppcf128)
296  return SINTTOFP_I32_PPCF128;
297  } else if (OpVT == MVT::i64) {
298  if (RetVT == MVT::f32)
299  return SINTTOFP_I64_F32;
300  if (RetVT == MVT::f64)
301  return SINTTOFP_I64_F64;
302  if (RetVT == MVT::f80)
303  return SINTTOFP_I64_F80;
304  if (RetVT == MVT::f128)
305  return SINTTOFP_I64_F128;
306  if (RetVT == MVT::ppcf128)
307  return SINTTOFP_I64_PPCF128;
308  } else if (OpVT == MVT::i128) {
309  if (RetVT == MVT::f32)
310  return SINTTOFP_I128_F32;
311  if (RetVT == MVT::f64)
312  return SINTTOFP_I128_F64;
313  if (RetVT == MVT::f80)
314  return SINTTOFP_I128_F80;
315  if (RetVT == MVT::f128)
316  return SINTTOFP_I128_F128;
317  if (RetVT == MVT::ppcf128)
318  return SINTTOFP_I128_PPCF128;
319  }
320  return UNKNOWN_LIBCALL;
321 }
322 
323 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
324 /// UNKNOWN_LIBCALL if there is none.
326  if (OpVT == MVT::i32) {
327  if (RetVT == MVT::f32)
328  return UINTTOFP_I32_F32;
329  if (RetVT == MVT::f64)
330  return UINTTOFP_I32_F64;
331  if (RetVT == MVT::f80)
332  return UINTTOFP_I32_F80;
333  if (RetVT == MVT::f128)
334  return UINTTOFP_I32_F128;
335  if (RetVT == MVT::ppcf128)
336  return UINTTOFP_I32_PPCF128;
337  } else if (OpVT == MVT::i64) {
338  if (RetVT == MVT::f32)
339  return UINTTOFP_I64_F32;
340  if (RetVT == MVT::f64)
341  return UINTTOFP_I64_F64;
342  if (RetVT == MVT::f80)
343  return UINTTOFP_I64_F80;
344  if (RetVT == MVT::f128)
345  return UINTTOFP_I64_F128;
346  if (RetVT == MVT::ppcf128)
347  return UINTTOFP_I64_PPCF128;
348  } else if (OpVT == MVT::i128) {
349  if (RetVT == MVT::f32)
350  return UINTTOFP_I128_F32;
351  if (RetVT == MVT::f64)
352  return UINTTOFP_I128_F64;
353  if (RetVT == MVT::f80)
354  return UINTTOFP_I128_F80;
355  if (RetVT == MVT::f128)
356  return UINTTOFP_I128_F128;
357  if (RetVT == MVT::ppcf128)
358  return UINTTOFP_I128_PPCF128;
359  }
360  return UNKNOWN_LIBCALL;
361 }
362 
363 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
364 #define OP_TO_LIBCALL(Name, Enum) \
365  case Name: \
366  switch (VT.SimpleTy) { \
367  default: \
368  return UNKNOWN_LIBCALL; \
369  case MVT::i8: \
370  return Enum##_1; \
371  case MVT::i16: \
372  return Enum##_2; \
373  case MVT::i32: \
374  return Enum##_4; \
375  case MVT::i64: \
376  return Enum##_8; \
377  case MVT::i128: \
378  return Enum##_16; \
379  }
380 
381  switch (Opc) {
382  OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
383  OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
384  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
385  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
386  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
387  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
388  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
389  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
390  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
391  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
392  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
393  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
394  }
395 
396 #undef OP_TO_LIBCALL
397 
398  return UNKNOWN_LIBCALL;
399 }
400 
402  switch (ElementSize) {
403  case 1:
404  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
405  case 2:
406  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
407  case 4:
408  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
409  case 8:
410  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
411  case 16:
412  return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
413  default:
414  return UNKNOWN_LIBCALL;
415  }
416 }
417 
419  switch (ElementSize) {
420  case 1:
421  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
422  case 2:
423  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
424  case 4:
425  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
426  case 8:
427  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
428  case 16:
429  return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
430  default:
431  return UNKNOWN_LIBCALL;
432  }
433 }
434 
436  switch (ElementSize) {
437  case 1:
438  return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
439  case 2:
440  return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
441  case 4:
442  return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
443  case 8:
444  return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
445  case 16:
446  return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
447  default:
448  return UNKNOWN_LIBCALL;
449  }
450 }
451 
452 /// InitCmpLibcallCCs - Set default comparison libcall CC.
453 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
454  memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
455  CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
456  CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
457  CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
458  CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
459  CCs[RTLIB::UNE_F32] = ISD::SETNE;
460  CCs[RTLIB::UNE_F64] = ISD::SETNE;
461  CCs[RTLIB::UNE_F128] = ISD::SETNE;
462  CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
463  CCs[RTLIB::OGE_F32] = ISD::SETGE;
464  CCs[RTLIB::OGE_F64] = ISD::SETGE;
465  CCs[RTLIB::OGE_F128] = ISD::SETGE;
466  CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
467  CCs[RTLIB::OLT_F32] = ISD::SETLT;
468  CCs[RTLIB::OLT_F64] = ISD::SETLT;
469  CCs[RTLIB::OLT_F128] = ISD::SETLT;
470  CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
471  CCs[RTLIB::OLE_F32] = ISD::SETLE;
472  CCs[RTLIB::OLE_F64] = ISD::SETLE;
473  CCs[RTLIB::OLE_F128] = ISD::SETLE;
474  CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
475  CCs[RTLIB::OGT_F32] = ISD::SETGT;
476  CCs[RTLIB::OGT_F64] = ISD::SETGT;
477  CCs[RTLIB::OGT_F128] = ISD::SETGT;
478  CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
479  CCs[RTLIB::UO_F32] = ISD::SETNE;
480  CCs[RTLIB::UO_F64] = ISD::SETNE;
481  CCs[RTLIB::UO_F128] = ISD::SETNE;
482  CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
483  CCs[RTLIB::O_F32] = ISD::SETEQ;
484  CCs[RTLIB::O_F64] = ISD::SETEQ;
485  CCs[RTLIB::O_F128] = ISD::SETEQ;
486  CCs[RTLIB::O_PPCF128] = ISD::SETEQ;
487 }
488 
489 /// NOTE: The TargetMachine owns TLOF.
491  initActions();
492 
493  // Perform these initializations only once.
495  MaxLoadsPerMemcmp = 8;
498  UseUnderscoreSetJmp = false;
499  UseUnderscoreLongJmp = false;
500  HasMultipleConditionRegisters = false;
501  HasExtractBitsInsn = false;
502  JumpIsExpensive = JumpIsExpensiveOverride;
504  EnableExtLdPromotion = false;
505  HasFloatingPointExceptions = true;
506  StackPointerRegisterToSaveRestore = 0;
507  BooleanContents = UndefinedBooleanContent;
508  BooleanFloatContents = UndefinedBooleanContent;
509  BooleanVectorContents = UndefinedBooleanContent;
510  SchedPreferenceInfo = Sched::ILP;
511  JumpBufSize = 0;
512  JumpBufAlignment = 0;
513  MinFunctionAlignment = 0;
514  PrefFunctionAlignment = 0;
515  PrefLoopAlignment = 0;
517  MinStackArgumentAlignment = 1;
518  // TODO: the default will be switched to 0 in the next commit, along
519  // with the Target-specific changes necessary.
520  MaxAtomicSizeInBitsSupported = 1024;
521 
522  MinCmpXchgSizeInBits = 0;
523 
524  std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
525 
526  InitLibcallNames(LibcallRoutineNames, TM.getTargetTriple());
527  InitCmpLibcallCCs(CmpLibcallCCs);
528  InitLibcallCallingConvs(LibcallCallingConvs);
529 }
530 
532  // All operations default to being supported.
533  memset(OpActions, 0, sizeof(OpActions));
534  memset(LoadExtActions, 0, sizeof(LoadExtActions));
535  memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
536  memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
537  memset(CondCodeActions, 0, sizeof(CondCodeActions));
538  std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
539  std::fill(std::begin(TargetDAGCombineArray),
540  std::end(TargetDAGCombineArray), 0);
541 
542  // Set default actions for various operations.
543  for (MVT VT : MVT::all_valuetypes()) {
544  // Default all indexed load / store to expand.
545  for (unsigned IM = (unsigned)ISD::PRE_INC;
546  IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
547  setIndexedLoadAction(IM, VT, Expand);
548  setIndexedStoreAction(IM, VT, Expand);
549  }
550 
551  // Most backends expect to see the node which just returns the value loaded.
553 
554  // These operations default to expand.
567 
568  // Overflow operations default to expand
575 
576  // ADDCARRY operations default to expand
580 
581  // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
584 
586 
587  // These library functions default to expand.
590 
591  // These operations default to expand for vector types.
592  if (VT.isVector()) {
597  }
598 
599  // For most targets @llvm.get.dynamic.area.offset just returns 0.
601  }
602 
603  // Most targets ignore the @llvm.prefetch intrinsic.
605 
606  // Most targets also ignore the @llvm.readcyclecounter intrinsic.
608 
609  // ConstantFP nodes default to expand. Targets can either change this to
610  // Legal, in which case all fp constants are legal, or use isFPImmLegal()
611  // to optimize expansions for certain constants.
617 
618  // These library functions default to expand.
619  for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
631  }
632 
633  // Default ISD::TRAP to expand (which turns it into abort).
635 
636  // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
637  // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
639 }
640 
642  EVT) const {
643  return MVT::getIntegerVT(8 * DL.getPointerSize(0));
644 }
645 
647  const DataLayout &DL) const {
648  assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
649  if (LHSTy.isVector())
650  return LHSTy;
651  return getScalarShiftAmountTy(DL, LHSTy);
652 }
653 
654 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
655  assert(isTypeLegal(VT));
656  switch (Op) {
657  default:
658  return false;
659  case ISD::SDIV:
660  case ISD::UDIV:
661  case ISD::SREM:
662  case ISD::UREM:
663  return true;
664  }
665 }
666 
668  // If the command-line option was specified, ignore this request.
669  if (!JumpIsExpensiveOverride.getNumOccurrences())
670  JumpIsExpensive = isExpensive;
671 }
672 
674 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
675  // If this is a simple type, use the ComputeRegisterProp mechanism.
676  if (VT.isSimple()) {
677  MVT SVT = VT.getSimpleVT();
678  assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
679  MVT NVT = TransformToType[SVT.SimpleTy];
681 
682  assert((LA == TypeLegal || LA == TypeSoftenFloat ||
684  "Promote may not follow Expand or Promote");
685 
686  if (LA == TypeSplitVector)
687  return LegalizeKind(LA,
688  EVT::getVectorVT(Context, SVT.getVectorElementType(),
689  SVT.getVectorNumElements() / 2));
690  if (LA == TypeScalarizeVector)
691  return LegalizeKind(LA, SVT.getVectorElementType());
692  return LegalizeKind(LA, NVT);
693  }
694 
695  // Handle Extended Scalar Types.
696  if (!VT.isVector()) {
697  assert(VT.isInteger() && "Float types must be simple");
698  unsigned BitSize = VT.getSizeInBits();
699  // First promote to a power-of-two size, then expand if necessary.
700  if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
701  EVT NVT = VT.getRoundIntegerType(Context);
702  assert(NVT != VT && "Unable to round integer VT");
703  LegalizeKind NextStep = getTypeConversion(Context, NVT);
704  // Avoid multi-step promotion.
705  if (NextStep.first == TypePromoteInteger)
706  return NextStep;
707  // Return rounded integer type.
708  return LegalizeKind(TypePromoteInteger, NVT);
709  }
710 
712  EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
713  }
714 
715  // Handle vector types.
716  unsigned NumElts = VT.getVectorNumElements();
717  EVT EltVT = VT.getVectorElementType();
718 
719  // Vectors with only one element are always scalarized.
720  if (NumElts == 1)
721  return LegalizeKind(TypeScalarizeVector, EltVT);
722 
723  // Try to widen vector elements until the element type is a power of two and
724  // promote it to a legal type later on, for example:
725  // <3 x i8> -> <4 x i8> -> <4 x i32>
726  if (EltVT.isInteger()) {
727  // Vectors with a number of elements that is not a power of two are always
728  // widened, for example <3 x i8> -> <4 x i8>.
729  if (!VT.isPow2VectorType()) {
730  NumElts = (unsigned)NextPowerOf2(NumElts);
731  EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
732  return LegalizeKind(TypeWidenVector, NVT);
733  }
734 
735  // Examine the element type.
736  LegalizeKind LK = getTypeConversion(Context, EltVT);
737 
738  // If type is to be expanded, split the vector.
739  // <4 x i140> -> <2 x i140>
740  if (LK.first == TypeExpandInteger)
742  EVT::getVectorVT(Context, EltVT, NumElts / 2));
743 
744  // Promote the integer element types until a legal vector type is found
745  // or until the element integer type is too big. If a legal type was not
746  // found, fallback to the usual mechanism of widening/splitting the
747  // vector.
748  EVT OldEltVT = EltVT;
749  while (true) {
750  // Increase the bitwidth of the element to the next pow-of-two
751  // (which is greater than 8 bits).
752  EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
753  .getRoundIntegerType(Context);
754 
755  // Stop trying when getting a non-simple element type.
756  // Note that vector elements may be greater than legal vector element
757  // types. Example: X86 XMM registers hold 64bit element on 32bit
758  // systems.
759  if (!EltVT.isSimple())
760  break;
761 
762  // Build a new vector type and check if it is legal.
763  MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
764  // Found a legal promoted vector type.
765  if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
767  EVT::getVectorVT(Context, EltVT, NumElts));
768  }
769 
770  // Reset the type to the unexpanded type if we did not find a legal vector
771  // type with a promoted vector element type.
772  EltVT = OldEltVT;
773  }
774 
775  // Try to widen the vector until a legal type is found.
776  // If there is no wider legal type, split the vector.
777  while (true) {
778  // Round up to the next power of 2.
779  NumElts = (unsigned)NextPowerOf2(NumElts);
780 
781  // If there is no simple vector type with this many elements then there
782  // cannot be a larger legal vector type. Note that this assumes that
783  // there are no skipped intermediate vector types in the simple types.
784  if (!EltVT.isSimple())
785  break;
786  MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
787  if (LargerVector == MVT())
788  break;
789 
790  // If this type is legal then widen the vector.
791  if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
792  return LegalizeKind(TypeWidenVector, LargerVector);
793  }
794 
795  // Widen odd vectors to next power of two.
796  if (!VT.isPow2VectorType()) {
797  EVT NVT = VT.getPow2VectorType(Context);
798  return LegalizeKind(TypeWidenVector, NVT);
799  }
800 
801  // Vectors with illegal element types are expanded.
802  EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
803  return LegalizeKind(TypeSplitVector, NVT);
804 }
805 
806 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
807  unsigned &NumIntermediates,
808  MVT &RegisterVT,
809  TargetLoweringBase *TLI) {
810  // Figure out the right, legal destination reg to copy into.
811  unsigned NumElts = VT.getVectorNumElements();
812  MVT EltTy = VT.getVectorElementType();
813 
814  unsigned NumVectorRegs = 1;
815 
816  // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
817  // could break down into LHS/RHS like LegalizeDAG does.
818  if (!isPowerOf2_32(NumElts)) {
819  NumVectorRegs = NumElts;
820  NumElts = 1;
821  }
822 
823  // Divide the input until we get to a supported size. This will always
824  // end with a scalar if the target doesn't support vectors.
825  while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
826  NumElts >>= 1;
827  NumVectorRegs <<= 1;
828  }
829 
830  NumIntermediates = NumVectorRegs;
831 
832  MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
833  if (!TLI->isTypeLegal(NewVT))
834  NewVT = EltTy;
835  IntermediateVT = NewVT;
836 
837  unsigned NewVTSize = NewVT.getSizeInBits();
838 
839  // Convert sizes such as i33 to i64.
840  if (!isPowerOf2_32(NewVTSize))
841  NewVTSize = NextPowerOf2(NewVTSize);
842 
843  MVT DestVT = TLI->getRegisterType(NewVT);
844  RegisterVT = DestVT;
845  if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
846  return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
847 
848  // Otherwise, promotion or legal types use the same number of registers as
849  // the vector decimated to the appropriate level.
850  return NumVectorRegs;
851 }
852 
853 /// isLegalRC - Return true if the value types that can be represented by the
854 /// specified register class are all legal.
856  const TargetRegisterClass &RC) const {
857  for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
858  if (isTypeLegal(*I))
859  return true;
860  return false;
861 }
862 
863 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
864 /// sequence of memory operands that is recognized by PrologEpilogInserter.
867  MachineBasicBlock *MBB) const {
868  MachineInstr *MI = &InitialMI;
869  MachineFunction &MF = *MI->getMF();
870  MachineFrameInfo &MFI = MF.getFrameInfo();
871 
872  // We're handling multiple types of operands here:
873  // PATCHPOINT MetaArgs - live-in, read only, direct
874  // STATEPOINT Deopt Spill - live-through, read only, indirect
875  // STATEPOINT Deopt Alloca - live-through, read only, direct
876  // (We're currently conservative and mark the deopt slots read/write in
877  // practice.)
878  // STATEPOINT GC Spill - live-through, read/write, indirect
879  // STATEPOINT GC Alloca - live-through, read/write, direct
880  // The live-in vs live-through is handled already (the live through ones are
881  // all stack slots), but we need to handle the different type of stackmap
882  // operands and memory effects here.
883 
884  // MI changes inside this loop as we grow operands.
885  for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
886  MachineOperand &MO = MI->getOperand(OperIdx);
887  if (!MO.isFI())
888  continue;
889 
890  // foldMemoryOperand builds a new MI after replacing a single FI operand
891  // with the canonical set of five x86 addressing-mode operands.
892  int FI = MO.getIndex();
893  MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
894 
895  // Copy operands before the frame-index.
896  for (unsigned i = 0; i < OperIdx; ++i)
897  MIB.add(MI->getOperand(i));
898  // Add frame index operands recognized by stackmaps.cpp
899  if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
900  // indirect-mem-ref tag, size, #FI, offset.
901  // Used for spills inserted by StatepointLowering. This codepath is not
902  // used for patchpoints/stackmaps at all, for these spilling is done via
903  // foldMemoryOperand callback only.
904  assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
905  MIB.addImm(StackMaps::IndirectMemRefOp);
906  MIB.addImm(MFI.getObjectSize(FI));
907  MIB.add(MI->getOperand(OperIdx));
908  MIB.addImm(0);
909  } else {
910  // direct-mem-ref tag, #FI, offset.
911  // Used by patchpoint, and direct alloca arguments to statepoints
912  MIB.addImm(StackMaps::DirectMemRefOp);
913  MIB.add(MI->getOperand(OperIdx));
914  MIB.addImm(0);
915  }
916  // Copy the operands after the frame index.
917  for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
918  MIB.add(MI->getOperand(i));
919 
920  // Inherit previous memory operands.
921  MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
922  assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
923 
924  // Add a new memory operand for this FI.
925  assert(MFI.getObjectOffset(FI) != -1);
926 
927  auto Flags = MachineMemOperand::MOLoad;
928  if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
931  }
933  MachinePointerInfo::getFixedStack(MF, FI), Flags,
935  MIB->addMemOperand(MF, MMO);
936 
937  // Replace the instruction and update the operand index.
938  MBB->insert(MachineBasicBlock::iterator(MI), MIB);
939  OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
940  MI->eraseFromParent();
941  MI = MIB;
942  }
943  return MBB;
944 }
945 
946 /// findRepresentativeClass - Return the largest legal super-reg register class
947 /// of the register class for the specified type and its associated "cost".
948 // This function is in TargetLowering because it uses RegClassForVT which would
949 // need to be moved to TargetRegisterInfo and would necessitate moving
950 // isTypeLegal over as well - a massive change that would just require
951 // TargetLowering having a TargetRegisterInfo class member that it would use.
952 std::pair<const TargetRegisterClass *, uint8_t>
954  MVT VT) const {
955  const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
956  if (!RC)
957  return std::make_pair(RC, 0);
958 
959  // Compute the set of all super-register classes.
960  BitVector SuperRegRC(TRI->getNumRegClasses());
961  for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
962  SuperRegRC.setBitsInMask(RCI.getMask());
963 
964  // Find the first legal register class with the largest spill size.
965  const TargetRegisterClass *BestRC = RC;
966  for (unsigned i : SuperRegRC.set_bits()) {
967  const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
968  // We want the largest possible spill size.
969  if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
970  continue;
971  if (!isLegalRC(*TRI, *SuperRC))
972  continue;
973  BestRC = SuperRC;
974  }
975  return std::make_pair(BestRC, 1);
976 }
977 
978 /// computeRegisterProperties - Once all of the register classes are added,
979 /// this allows us to compute derived properties we expose.
981  const TargetRegisterInfo *TRI) {
983  "Too many value types for ValueTypeActions to hold!");
984 
985  // Everything defaults to needing one register.
986  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
987  NumRegistersForVT[i] = 1;
988  RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
989  }
990  // ...except isVoid, which doesn't need any registers.
991  NumRegistersForVT[MVT::isVoid] = 0;
992 
993  // Find the largest integer register class.
994  unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
995  for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
996  assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
997 
998  // Every integer value type larger than this largest register takes twice as
999  // many registers to represent as the previous ValueType.
1000  for (unsigned ExpandedReg = LargestIntReg + 1;
1001  ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1002  NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1003  RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1004  TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1007  }
1008 
1009  // Inspect all of the ValueType's smaller than the largest integer
1010  // register to see which ones need promotion.
1011  unsigned LegalIntReg = LargestIntReg;
1012  for (unsigned IntReg = LargestIntReg - 1;
1013  IntReg >= (unsigned)MVT::i1; --IntReg) {
1014  MVT IVT = (MVT::SimpleValueType)IntReg;
1015  if (isTypeLegal(IVT)) {
1016  LegalIntReg = IntReg;
1017  } else {
1018  RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1019  (const MVT::SimpleValueType)LegalIntReg;
1021  }
1022  }
1023 
1024  // ppcf128 type is really two f64's.
1025  if (!isTypeLegal(MVT::ppcf128)) {
1026  if (isTypeLegal(MVT::f64)) {
1027  NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1028  RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1029  TransformToType[MVT::ppcf128] = MVT::f64;
1031  } else {
1032  NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1033  RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1034  TransformToType[MVT::ppcf128] = MVT::i128;
1036  }
1037  }
1038 
1039  // Decide how to handle f128. If the target does not have native f128 support,
1040  // expand it to i128 and we will be generating soft float library calls.
1041  if (!isTypeLegal(MVT::f128)) {
1042  NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1043  RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1044  TransformToType[MVT::f128] = MVT::i128;
1046  }
1047 
1048  // Decide how to handle f64. If the target does not have native f64 support,
1049  // expand it to i64 and we will be generating soft float library calls.
1050  if (!isTypeLegal(MVT::f64)) {
1051  NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1052  RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1053  TransformToType[MVT::f64] = MVT::i64;
1055  }
1056 
1057  // Decide how to handle f32. If the target does not have native f32 support,
1058  // expand it to i32 and we will be generating soft float library calls.
1059  if (!isTypeLegal(MVT::f32)) {
1060  NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1061  RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1062  TransformToType[MVT::f32] = MVT::i32;
1064  }
1065 
1066  // Decide how to handle f16. If the target does not have native f16 support,
1067  // promote it to f32, because there are no f16 library calls (except for
1068  // conversions).
1069  if (!isTypeLegal(MVT::f16)) {
1070  NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1071  RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1072  TransformToType[MVT::f16] = MVT::f32;
1074  }
1075 
1076  // Loop over all of the vector value types to see which need transformations.
1077  for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1078  i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1079  MVT VT = (MVT::SimpleValueType) i;
1080  if (isTypeLegal(VT))
1081  continue;
1082 
1083  MVT EltVT = VT.getVectorElementType();
1084  unsigned NElts = VT.getVectorNumElements();
1085  bool IsLegalWiderType = false;
1086  LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1087  switch (PreferredAction) {
1088  case TypePromoteInteger:
1089  // Try to promote the elements of integer vectors. If no legal
1090  // promotion was found, fall through to the widen-vector method.
1091  for (unsigned nVT = i + 1; nVT <= MVT::LAST_INTEGER_VECTOR_VALUETYPE; ++nVT) {
1092  MVT SVT = (MVT::SimpleValueType) nVT;
1093  // Promote vectors of integers to vectors with the same number
1094  // of elements, with a wider element type.
1095  if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
1096  SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)) {
1097  TransformToType[i] = SVT;
1098  RegisterTypeForVT[i] = SVT;
1099  NumRegistersForVT[i] = 1;
1101  IsLegalWiderType = true;
1102  break;
1103  }
1104  }
1105  if (IsLegalWiderType)
1106  break;
1108 
1109  case TypeWidenVector:
1110  // Try to widen the vector.
1111  for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1112  MVT SVT = (MVT::SimpleValueType) nVT;
1113  if (SVT.getVectorElementType() == EltVT
1114  && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
1115  TransformToType[i] = SVT;
1116  RegisterTypeForVT[i] = SVT;
1117  NumRegistersForVT[i] = 1;
1119  IsLegalWiderType = true;
1120  break;
1121  }
1122  }
1123  if (IsLegalWiderType)
1124  break;
1126 
1127  case TypeSplitVector:
1128  case TypeScalarizeVector: {
1129  MVT IntermediateVT;
1130  MVT RegisterVT;
1131  unsigned NumIntermediates;
1132  NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1133  NumIntermediates, RegisterVT, this);
1134  RegisterTypeForVT[i] = RegisterVT;
1135 
1136  MVT NVT = VT.getPow2VectorType();
1137  if (NVT == VT) {
1138  // Type is already a power of 2. The default action is to split.
1139  TransformToType[i] = MVT::Other;
1140  if (PreferredAction == TypeScalarizeVector)
1142  else if (PreferredAction == TypeSplitVector)
1144  else
1145  // Set type action according to the number of elements.
1147  : TypeSplitVector);
1148  } else {
1149  TransformToType[i] = NVT;
1151  }
1152  break;
1153  }
1154  default:
1155  llvm_unreachable("Unknown vector legalization action!");
1156  }
1157  }
1158 
1159  // Determine the 'representative' register class for each value type.
1160  // An representative register class is the largest (meaning one which is
1161  // not a sub-register class / subreg register class) legal register class for
1162  // a group of value types. For example, on i386, i8, i16, and i32
1163  // representative would be GR32; while on x86_64 it's GR64.
1164  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1165  const TargetRegisterClass* RRC;
1166  uint8_t Cost;
1167  std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1168  RepRegClassForVT[i] = RRC;
1169  RepRegClassCostForVT[i] = Cost;
1170  }
1171 }
1172 
1174  EVT VT) const {
1175  assert(!VT.isVector() && "No default SetCC type for vectors!");
1176  return getPointerTy(DL).SimpleTy;
1177 }
1178 
1180  return MVT::i32; // return the default value
1181 }
1182 
1183 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1184 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1185 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1186 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1187 ///
1188 /// This method returns the number of registers needed, and the VT for each
1189 /// register. It also returns the VT and quantity of the intermediate values
1190 /// before they are promoted/expanded.
1192  EVT &IntermediateVT,
1193  unsigned &NumIntermediates,
1194  MVT &RegisterVT) const {
1195  unsigned NumElts = VT.getVectorNumElements();
1196 
1197  // If there is a wider vector type with the same element type as this one,
1198  // or a promoted vector type that has the same number of elements which
1199  // are wider, then we should convert to that legal vector type.
1200  // This handles things like <2 x float> -> <4 x float> and
1201  // <4 x i1> -> <4 x i32>.
1202  LegalizeTypeAction TA = getTypeAction(Context, VT);
1203  if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1204  EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1205  if (isTypeLegal(RegisterEVT)) {
1206  IntermediateVT = RegisterEVT;
1207  RegisterVT = RegisterEVT.getSimpleVT();
1208  NumIntermediates = 1;
1209  return 1;
1210  }
1211  }
1212 
1213  // Figure out the right, legal destination reg to copy into.
1214  EVT EltTy = VT.getVectorElementType();
1215 
1216  unsigned NumVectorRegs = 1;
1217 
1218  // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1219  // could break down into LHS/RHS like LegalizeDAG does.
1220  if (!isPowerOf2_32(NumElts)) {
1221  NumVectorRegs = NumElts;
1222  NumElts = 1;
1223  }
1224 
1225  // Divide the input until we get to a supported size. This will always
1226  // end with a scalar if the target doesn't support vectors.
1227  while (NumElts > 1 && !isTypeLegal(
1228  EVT::getVectorVT(Context, EltTy, NumElts))) {
1229  NumElts >>= 1;
1230  NumVectorRegs <<= 1;
1231  }
1232 
1233  NumIntermediates = NumVectorRegs;
1234 
1235  EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
1236  if (!isTypeLegal(NewVT))
1237  NewVT = EltTy;
1238  IntermediateVT = NewVT;
1239 
1240  MVT DestVT = getRegisterType(Context, NewVT);
1241  RegisterVT = DestVT;
1242  unsigned NewVTSize = NewVT.getSizeInBits();
1243 
1244  // Convert sizes such as i33 to i64.
1245  if (!isPowerOf2_32(NewVTSize))
1246  NewVTSize = NextPowerOf2(NewVTSize);
1247 
1248  if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1249  return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1250 
1251  // Otherwise, promotion or legal types use the same number of registers as
1252  // the vector decimated to the appropriate level.
1253  return NumVectorRegs;
1254 }
1255 
1256 /// Get the EVTs and ArgFlags collections that represent the legalized return
1257 /// type of the given function. This does not require a DAG or a return value,
1258 /// and is suitable for use before any DAGs for the function are constructed.
1259 /// TODO: Move this out of TargetLowering.cpp.
1262  const TargetLowering &TLI, const DataLayout &DL) {
1263  SmallVector<EVT, 4> ValueVTs;
1264  ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1265  unsigned NumValues = ValueVTs.size();
1266  if (NumValues == 0) return;
1267 
1268  for (unsigned j = 0, f = NumValues; j != f; ++j) {
1269  EVT VT = ValueVTs[j];
1270  ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1271 
1272  if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1273  ExtendKind = ISD::SIGN_EXTEND;
1274  else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1275  ExtendKind = ISD::ZERO_EXTEND;
1276 
1277  // FIXME: C calling convention requires the return type to be promoted to
1278  // at least 32-bit. But this is not necessary for non-C calling
1279  // conventions. The frontend should mark functions whose return values
1280  // require promoting with signext or zeroext attributes.
1281  if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1282  MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1283  if (VT.bitsLT(MinVT))
1284  VT = MinVT;
1285  }
1286 
1287  unsigned NumParts =
1288  TLI.getNumRegistersForCallingConv(ReturnType->getContext(), VT);
1289  MVT PartVT =
1290  TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), VT);
1291 
1292  // 'inreg' on function refers to return value
1293  ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1294  if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg))
1295  Flags.setInReg();
1296 
1297  // Propagate extension type if any
1298  if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1299  Flags.setSExt();
1300  else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1301  Flags.setZExt();
1302 
1303  for (unsigned i = 0; i < NumParts; ++i)
1304  Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
1305  }
1306 }
1307 
1308 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1309 /// function arguments in the caller parameter area. This is the actual
1310 /// alignment, not its logarithm.
1312  const DataLayout &DL) const {
1313  return DL.getABITypeAlignment(Ty);
1314 }
1315 
1317  const DataLayout &DL, EVT VT,
1318  unsigned AddrSpace,
1319  unsigned Alignment,
1320  bool *Fast) const {
1321  // Check if the specified alignment is sufficient based on the data layout.
1322  // TODO: While using the data layout works in practice, a better solution
1323  // would be to implement this check directly (make this a virtual function).
1324  // For example, the ABI alignment may change based on software platform while
1325  // this function should only be affected by hardware implementation.
1326  Type *Ty = VT.getTypeForEVT(Context);
1327  if (Alignment >= DL.getABITypeAlignment(Ty)) {
1328  // Assume that an access that meets the ABI-specified alignment is fast.
1329  if (Fast != nullptr)
1330  *Fast = true;
1331  return true;
1332  }
1333 
1334  // This is a misaligned access.
1335  return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
1336 }
1337 
1340 }
1341 
1342 //===----------------------------------------------------------------------===//
1343 // TargetTransformInfo Helpers
1344 //===----------------------------------------------------------------------===//
1345 
1347  enum InstructionOpcodes {
1348 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1349 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1350 #include "llvm/IR/Instruction.def"
1351  };
1352  switch (static_cast<InstructionOpcodes>(Opcode)) {
1353  case Ret: return 0;
1354  case Br: return 0;
1355  case Switch: return 0;
1356  case IndirectBr: return 0;
1357  case Invoke: return 0;
1358  case Resume: return 0;
1359  case Unreachable: return 0;
1360  case CleanupRet: return 0;
1361  case CatchRet: return 0;
1362  case CatchPad: return 0;
1363  case CatchSwitch: return 0;
1364  case CleanupPad: return 0;
1365  case Add: return ISD::ADD;
1366  case FAdd: return ISD::FADD;
1367  case Sub: return ISD::SUB;
1368  case FSub: return ISD::FSUB;
1369  case Mul: return ISD::MUL;
1370  case FMul: return ISD::FMUL;
1371  case UDiv: return ISD::UDIV;
1372  case SDiv: return ISD::SDIV;
1373  case FDiv: return ISD::FDIV;
1374  case URem: return ISD::UREM;
1375  case SRem: return ISD::SREM;
1376  case FRem: return ISD::FREM;
1377  case Shl: return ISD::SHL;
1378  case LShr: return ISD::SRL;
1379  case AShr: return ISD::SRA;
1380  case And: return ISD::AND;
1381  case Or: return ISD::OR;
1382  case Xor: return ISD::XOR;
1383  case Alloca: return 0;
1384  case Load: return ISD::LOAD;
1385  case Store: return ISD::STORE;
1386  case GetElementPtr: return 0;
1387  case Fence: return 0;
1388  case AtomicCmpXchg: return 0;
1389  case AtomicRMW: return 0;
1390  case Trunc: return ISD::TRUNCATE;
1391  case ZExt: return ISD::ZERO_EXTEND;
1392  case SExt: return ISD::SIGN_EXTEND;
1393  case FPToUI: return ISD::FP_TO_UINT;
1394  case FPToSI: return ISD::FP_TO_SINT;
1395  case UIToFP: return ISD::UINT_TO_FP;
1396  case SIToFP: return ISD::SINT_TO_FP;
1397  case FPTrunc: return ISD::FP_ROUND;
1398  case FPExt: return ISD::FP_EXTEND;
1399  case PtrToInt: return ISD::BITCAST;
1400  case IntToPtr: return ISD::BITCAST;
1401  case BitCast: return ISD::BITCAST;
1402  case AddrSpaceCast: return ISD::ADDRSPACECAST;
1403  case ICmp: return ISD::SETCC;
1404  case FCmp: return ISD::SETCC;
1405  case PHI: return 0;
1406  case Call: return 0;
1407  case Select: return ISD::SELECT;
1408  case UserOp1: return 0;
1409  case UserOp2: return 0;
1410  case VAArg: return 0;
1411  case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1412  case InsertElement: return ISD::INSERT_VECTOR_ELT;
1413  case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1414  case ExtractValue: return ISD::MERGE_VALUES;
1415  case InsertValue: return ISD::MERGE_VALUES;
1416  case LandingPad: return 0;
1417  }
1418 
1419  llvm_unreachable("Unknown instruction type encountered!");
1420 }
1421 
1422 std::pair<int, MVT>
1424  Type *Ty) const {
1425  LLVMContext &C = Ty->getContext();
1426  EVT MTy = getValueType(DL, Ty);
1427 
1428  int Cost = 1;
1429  // We keep legalizing the type until we find a legal kind. We assume that
1430  // the only operation that costs anything is the split. After splitting
1431  // we need to handle two types.
1432  while (true) {
1433  LegalizeKind LK = getTypeConversion(C, MTy);
1434 
1435  if (LK.first == TypeLegal)
1436  return std::make_pair(Cost, MTy.getSimpleVT());
1437 
1438  if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1439  Cost *= 2;
1440 
1441  // Do not loop with f128 type.
1442  if (MTy == LK.second)
1443  return std::make_pair(Cost, MTy.getSimpleVT());
1444 
1445  // Keep legalizing the type.
1446  MTy = LK.second;
1447  }
1448 }
1449 
1451  bool UseTLS) const {
1452  // compiler-rt provides a variable with a magic name. Targets that do not
1453  // link with compiler-rt may also provide such a variable.
1454  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1455  const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1456  auto UnsafeStackPtr =
1457  dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1458 
1459  Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1460 
1461  if (!UnsafeStackPtr) {
1462  auto TLSModel = UseTLS ?
1465  // The global variable is not defined yet, define it ourselves.
1466  // We use the initial-exec TLS model because we do not support the
1467  // variable living anywhere other than in the main executable.
1468  UnsafeStackPtr = new GlobalVariable(
1469  *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1470  UnsafeStackPtrVar, nullptr, TLSModel);
1471  } else {
1472  // The variable exists, check its type and attributes.
1473  if (UnsafeStackPtr->getValueType() != StackPtrTy)
1474  report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1475  if (UseTLS != UnsafeStackPtr->isThreadLocal())
1476  report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1477  (UseTLS ? "" : "not ") + "be thread-local");
1478  }
1479  return UnsafeStackPtr;
1480 }
1481 
1483  if (!TM.getTargetTriple().isAndroid())
1484  return getDefaultSafeStackPointerLocation(IRB, true);
1485 
1486  // Android provides a libc function to retrieve the address of the current
1487  // thread's unsafe stack pointer.
1488  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1489  Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1490  Value *Fn = M->getOrInsertFunction("__safestack_pointer_address",
1491  StackPtrTy->getPointerTo(0));
1492  return IRB.CreateCall(Fn);
1493 }
1494 
1495 //===----------------------------------------------------------------------===//
1496 // Loop Strength Reduction hooks
1497 //===----------------------------------------------------------------------===//
1498 
1499 /// isLegalAddressingMode - Return true if the addressing mode represented
1500 /// by AM is legal for this target, for a load/store of the specified type.
1502  const AddrMode &AM, Type *Ty,
1503  unsigned AS, Instruction *I) const {
1504  // The default implementation of this implements a conservative RISCy, r+r and
1505  // r+i addr mode.
1506 
1507  // Allows a sign-extended 16-bit immediate field.
1508  if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1509  return false;
1510 
1511  // No global is ever allowed as a base.
1512  if (AM.BaseGV)
1513  return false;
1514 
1515  // Only support r+r,
1516  switch (AM.Scale) {
1517  case 0: // "r+i" or just "i", depending on HasBaseReg.
1518  break;
1519  case 1:
1520  if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1521  return false;
1522  // Otherwise we have r+r or r+i.
1523  break;
1524  case 2:
1525  if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1526  return false;
1527  // Allow 2*r as r+r.
1528  break;
1529  default: // Don't allow n * r
1530  return false;
1531  }
1532 
1533  return true;
1534 }
1535 
1536 //===----------------------------------------------------------------------===//
1537 // Stack Protector
1538 //===----------------------------------------------------------------------===//
1539 
1540 // For OpenBSD return its special guard variable. Otherwise return nullptr,
1541 // so that SelectionDAG handle SSP.
1543  if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
1544  Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
1546  return M.getOrInsertGlobal("__guard_local", PtrTy);
1547  }
1548  return nullptr;
1549 }
1550 
1551 // Currently only support "standard" __stack_chk_guard.
1552 // TODO: add LOAD_STACK_GUARD support.
1554  M.getOrInsertGlobal("__stack_chk_guard", Type::getInt8PtrTy(M.getContext()));
1555 }
1556 
1557 // Currently only support "standard" __stack_chk_guard.
1558 // TODO: add LOAD_STACK_GUARD support.
1560  return M.getGlobalVariable("__stack_chk_guard", true);
1561 }
1562 
1564  return nullptr;
1565 }
1566 
1568  return MinimumJumpTableEntries;
1569 }
1570 
1573 }
1574 
1575 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
1576  return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
1577 }
1578 
1580  return MaximumJumpTableSize;
1581 }
1582 
1584  MaximumJumpTableSize = Val;
1585 }
1586 
1587 //===----------------------------------------------------------------------===//
1588 // Reciprocal Estimates
1589 //===----------------------------------------------------------------------===//
1590 
1591 /// Get the reciprocal estimate attribute string for a function that will
1592 /// override the target defaults.
1594  const Function *F = MF.getFunction();
1595  return F->getFnAttribute("reciprocal-estimates").getValueAsString();
1596 }
1597 
1598 /// Construct a string for the given reciprocal operation of the given type.
1599 /// This string should match the corresponding option to the front-end's
1600 /// "-mrecip" flag assuming those strings have been passed through in an
1601 /// attribute string. For example, "vec-divf" for a division of a vXf32.
1602 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
1603  std::string Name = VT.isVector() ? "vec-" : "";
1604 
1605  Name += IsSqrt ? "sqrt" : "div";
1606 
1607  // TODO: Handle "half" or other float types?
1608  if (VT.getScalarType() == MVT::f64) {
1609  Name += "d";
1610  } else {
1611  assert(VT.getScalarType() == MVT::f32 &&
1612  "Unexpected FP type for reciprocal estimate");
1613  Name += "f";
1614  }
1615 
1616  return Name;
1617 }
1618 
1619 /// Return the character position and value (a single numeric character) of a
1620 /// customized refinement operation in the input string if it exists. Return
1621 /// false if there is no customized refinement step count.
1622 static bool parseRefinementStep(StringRef In, size_t &Position,
1623  uint8_t &Value) {
1624  const char RefStepToken = ':';
1625  Position = In.find(RefStepToken);
1626  if (Position == StringRef::npos)
1627  return false;
1628 
1629  StringRef RefStepString = In.substr(Position + 1);
1630  // Allow exactly one numeric character for the additional refinement
1631  // step parameter.
1632  if (RefStepString.size() == 1) {
1633  char RefStepChar = RefStepString[0];
1634  if (RefStepChar >= '0' && RefStepChar <= '9') {
1635  Value = RefStepChar - '0';
1636  return true;
1637  }
1638  }
1639  report_fatal_error("Invalid refinement step for -recip.");
1640 }
1641 
1642 /// For the input attribute string, return one of the ReciprocalEstimate enum
1643 /// status values (enabled, disabled, or not specified) for this operation on
1644 /// the specified data type.
1645 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
1646  if (Override.empty())
1647  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1648 
1649  SmallVector<StringRef, 4> OverrideVector;
1650  SplitString(Override, OverrideVector, ",");
1651  unsigned NumArgs = OverrideVector.size();
1652 
1653  // Check if "all", "none", or "default" was specified.
1654  if (NumArgs == 1) {
1655  // Look for an optional setting of the number of refinement steps needed
1656  // for this type of reciprocal operation.
1657  size_t RefPos;
1658  uint8_t RefSteps;
1659  if (parseRefinementStep(Override, RefPos, RefSteps)) {
1660  // Split the string for further processing.
1661  Override = Override.substr(0, RefPos);
1662  }
1663 
1664  // All reciprocal types are enabled.
1665  if (Override == "all")
1667 
1668  // All reciprocal types are disabled.
1669  if (Override == "none")
1670  return TargetLoweringBase::ReciprocalEstimate::Disabled;
1671 
1672  // Target defaults for enablement are used.
1673  if (Override == "default")
1674  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1675  }
1676 
1677  // The attribute string may omit the size suffix ('f'/'d').
1678  std::string VTName = getReciprocalOpName(IsSqrt, VT);
1679  std::string VTNameNoSize = VTName;
1680  VTNameNoSize.pop_back();
1681  static const char DisabledPrefix = '!';
1682 
1683  for (StringRef RecipType : OverrideVector) {
1684  size_t RefPos;
1685  uint8_t RefSteps;
1686  if (parseRefinementStep(RecipType, RefPos, RefSteps))
1687  RecipType = RecipType.substr(0, RefPos);
1688 
1689  // Ignore the disablement token for string matching.
1690  bool IsDisabled = RecipType[0] == DisabledPrefix;
1691  if (IsDisabled)
1692  RecipType = RecipType.substr(1);
1693 
1694  if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
1695  return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
1697  }
1698 
1699  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1700 }
1701 
1702 /// For the input attribute string, return the customized refinement step count
1703 /// for this operation on the specified data type. If the step count does not
1704 /// exist, return the ReciprocalEstimate enum value for unspecified.
1705 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
1706  if (Override.empty())
1707  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1708 
1709  SmallVector<StringRef, 4> OverrideVector;
1710  SplitString(Override, OverrideVector, ",");
1711  unsigned NumArgs = OverrideVector.size();
1712 
1713  // Check if "all", "default", or "none" was specified.
1714  if (NumArgs == 1) {
1715  // Look for an optional setting of the number of refinement steps needed
1716  // for this type of reciprocal operation.
1717  size_t RefPos;
1718  uint8_t RefSteps;
1719  if (!parseRefinementStep(Override, RefPos, RefSteps))
1720  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1721 
1722  // Split the string for further processing.
1723  Override = Override.substr(0, RefPos);
1724  assert(Override != "none" &&
1725  "Disabled reciprocals, but specifed refinement steps?");
1726 
1727  // If this is a general override, return the specified number of steps.
1728  if (Override == "all" || Override == "default")
1729  return RefSteps;
1730  }
1731 
1732  // The attribute string may omit the size suffix ('f'/'d').
1733  std::string VTName = getReciprocalOpName(IsSqrt, VT);
1734  std::string VTNameNoSize = VTName;
1735  VTNameNoSize.pop_back();
1736 
1737  for (StringRef RecipType : OverrideVector) {
1738  size_t RefPos;
1739  uint8_t RefSteps;
1740  if (!parseRefinementStep(RecipType, RefPos, RefSteps))
1741  continue;
1742 
1743  RecipType = RecipType.substr(0, RefPos);
1744  if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
1745  return RefSteps;
1746  }
1747 
1748  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1749 }
1750 
1752  MachineFunction &MF) const {
1753  return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
1754 }
1755 
1757  MachineFunction &MF) const {
1758  return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
1759 }
1760 
1762  MachineFunction &MF) const {
1763  return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
1764 }
1765 
1767  MachineFunction &MF) const {
1768  return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
1769 }
1770 
1772  MF.getRegInfo().freezeReservedRegs(MF);
1773 }
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:43
uint64_t CallInst * C
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:545
X = FP_ROUND(Y, TRUNC) - Rounding &#39;Y&#39; from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:512
bool isOSDarwin() const
isOSDarwin - Is this a "Darwin" OS (OS X, iOS, or watchOS).
Definition: Triple.h:470
virtual MVT getRegisterTypeForCallingConv(MVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
static MVT getIntegerVT(unsigned BitWidth)
const MachineInstrBuilder & add(const MachineOperand &MO) const
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:244
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:569
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
LLVMContext & Context
static void InitLibcallCallingConvs(CallingConv::ID *CCs)
Set default libcall CallingConvs.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
Definition: ValueTypes.h:359
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:235
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:115
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
static MVT getVectorVT(MVT VT, unsigned NumElements)
Constant * getOrInsertFunction(StringRef Name, FunctionType *T, AttributeList AttributeList)
Look up the specified function in the module symbol table.
Definition: Module.cpp:142
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:342
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:260
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:63
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition: ISDOpcodes.h:494
static void InitLibcallNames(const char **Names, const Triple &TT)
InitLibcallNames - Set default libcall names.
Libcall getSYNC(unsigned Opc, MVT VT)
Return the SYNC_FETCH_AND_* value for the given opcode and type, or UNKNOWN_LIBCALL if there is none...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:268
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:138
Y = RRC X, rotate right via carry.
static cl::opt< bool > JumpIsExpensiveOverride("jump-is-expensive", cl::init(false), cl::desc("Do not create extra branches to split comparison logic."), cl::Hidden)
static cl::opt< unsigned > MinimumJumpTableEntries("min-jump-table-entries", cl::init(4), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table."))
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:253
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
MVT getPow2VectorType() const
Widens the length of the given vector MVT up to the nearest power of 2 and returns that type...
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:250
static cl::opt< int > MinPercentageForPredictableBranch("min-predictable-branch", cl::init(99), cl::desc("Minimum percentage (0-100) that a condition must be either true " "or false to assume that the condition is predictable"), cl::Hidden)
static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return one of the ReciprocalEstimate enum status values (enabled...
unsigned getVectorNumElements() const
Externally visible function.
Definition: GlobalValue.h:49
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
Definition: ISDOpcodes.h:769
GlobalVariable * getGlobalVariable(StringRef Name) const
Look up the specified global variable in the module symbol table.
Definition: Module.h:368
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:141
F(f)
bool isOSFuchsia() const
Definition: Triple.h:490
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition: ISDOpcodes.h:485
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
Definition: ISDOpcodes.h:359
Same for subtraction.
Definition: ISDOpcodes.h:253
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it...
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:39
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
Definition: ISDOpcodes.h:291
EVT getPow2VectorType(LLVMContext &Context) const
Widens the length of the given vector EVT up to the nearest power of 2 and returns that type...
Definition: ValueTypes.h:366
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a square root of the given type based on the function&#39;s at...
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
A description of a memory reference used in the backend.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
Shift and rotation operations.
Definition: ISDOpcodes.h:379
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:293
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:205
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:237
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth...
Definition: ISDOpcodes.h:368
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:639
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
void eraseFromParent()
Unlink &#39;this&#39; from the containing basic block and delete it.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:668
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void freezeReservedRegs(const MachineFunction &)
freezeReservedRegs - Called by the register allocator to freeze the set of reserved registers before ...
This file contains the simple types necessary to represent the attributes associated with functions a...
SimpleValueType SimpleTy
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
Definition: ISDOpcodes.h:777
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:290
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \\\)
SplitString - Split up the specified string according to the specified delimiters, appending the result fragments to the output list.
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
unsigned getNumRegClasses() const
unsigned getSizeInBits() const
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:287
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:455
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:35
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const
Return the preferred vector type legalization action.
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:122
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:133
GlobalValue * getNamedValue(StringRef Name) const
Return the global value in the module with the specified name, of arbitrary type. ...
Definition: Module.cpp:112
static StringRef getRecipEstimateForFunc(MachineFunction &MF)
Get the reciprocal estimate attribute string for a function that will override the target defaults...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:916
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:598
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:714
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
Definition: ISDOpcodes.h:472
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:85
MVT getVectorElementType() const
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a division of the given type based on the function&#39;s attri...
Class to represent pointers.
Definition: DerivedTypes.h:467
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:498
static cl::opt< unsigned > JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden, cl::desc("Minimum density for building a jump table in " "a normal function"))
Minimum jump table density for normal functions.
The memory access is volatile.
virtual Value * getIRStackGuard(IRBuilder<> &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static std::string getReciprocalOpName(bool IsSqrt, EVT VT)
Construct a string for the given reciprocal operation of the given type.
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
static void InitCmpLibcallCCs(ISD::CondCode *CCs)
InitCmpLibcallCCs - Set default comparison libcall CC.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:406
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:421
Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
Constant * getOrInsertGlobal(StringRef Name, Type *Ty)
Look up the specified global in the module symbol table.
Definition: Module.cpp:202
Simple binary floating point operators.
Definition: ISDOpcodes.h:259
bool isOSOpenBSD() const
Definition: Triple.h:482
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:273
unsigned getScalarSizeInBits() const
unsigned getPointerSize(unsigned AS=0) const
Layout pointer size FIXME: The defaults need to be removed once all of the backends/clients are updat...
Definition: DataLayout.cpp:605
Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:307
TargetLoweringBase(const TargetMachine &TM)
NOTE: The TargetMachine owns TLOF.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:734
const Triple & getTargetTriple() const
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:737
static cl::opt< unsigned > MaximumJumpTableSize("max-jump-table-size", cl::init(0), cl::Hidden, cl::desc("Set maximum size of jump tables; zero for no limit."))
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
Bit counting operators with an undefined result for zero inputs.
Definition: ISDOpcodes.h:385
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
Definition: ISDOpcodes.h:763
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:530
virtual Value * getSafeStackPointerLocation(IRBuilder<> &IRB) const
Returns the target-specific address of the unsafe stack pointer.
Extended Value Type.
Definition: ValueTypes.h:34
uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:632
const AMDGPUAS & AS
static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT, TargetLoweringBase *TLI)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:220
int getDivRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a division of the given type based on the function&#39;s attributes...
const TargetMachine & getTargetMachine() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
Value * getDefaultSafeStackPointerLocation(IRBuilder<> &IRB, bool UseTLS) const
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should should continue looking for chain dependencies when trying to find...
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
The memory access writes data.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
void initActions()
Initialize all of the actions to default values.
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal...
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight)...
Definition: ValueTypes.h:317
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:389
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:314
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:265
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
Definition: ISDOpcodes.h:428
ADDRSPACECAST - This operator converts between pointers of different address spaces.
Definition: ISDOpcodes.h:549
MachineOperand class - Representation of each machine instruction operand.
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
Module.h This file contains the declarations for the Module class.
unsigned getMaximumJumpTableSize() const
Return upper limit for number of entries in a jump table.
static cl::opt< unsigned > OptsizeJumpTableDensity("optsize-jump-table-density", cl::init(40), cl::Hidden, cl::desc("Minimum density for building a jump table in " "an optsize function"))
Minimum jump table density for -Os or -Oz functions.
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:720
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const
EVT is not used in-tree, but is used by out-of-tree target.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:682
static bool Enabled
Definition: Statistic.cpp:49
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT...
Definition: ValueTypes.h:73
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isStatepointSpillSlotObjectIndex(int ObjectIdx) const
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:389
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:241
unsigned getMinimumJumpTableDensity(bool OptForSize) const
Return lower limit of the density in a jump table.
void setMaximumJumpTableSize(unsigned)
Indicate the maximum number of entries in jump tables.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:445
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:448
ValueTypeActionImpl ValueTypeActions
#define OP_TO_LIBCALL(Name, Enum)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:287
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
FMINNAN/FMAXNAN - Behave identically to FMINNUM/FMAXNUM, except that when a single input is NaN...
Definition: ISDOpcodes.h:572
virtual Value * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a square root of the given type based on the function&#39;s attribut...
The memory access reads data.
static bool parseRefinementStep(StringRef In, size_t &Position, uint8_t &Value)
Return the character position and value (a single numeric character) of a customized refinement opera...
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca...
Definition: ISDOpcodes.h:816
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static mvt_range all_valuetypes()
SimpleValueType Iteration.
Representation of each machine instruction.
Definition: MachineInstr.h:59
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, EVT VT) const
Certain targets require unusual breakdowns of certain types.
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:151
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:362
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
static const size_t npos
Definition: StringRef.h:51
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
StringRef getValueAsString() const
Return the attribute&#39;s value as a string.
Definition: Attributes.cpp:195
void setTypeAction(MVT VT, LegalizeTypeAction Action)
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:581
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return &#39;Legal&#39;) or we ...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:108
#define I(x, y, z)
Definition: MD5.cpp:58
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
bool isGNUEnvironment() const
Definition: Triple.h:506
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
Same for multiplication.
Definition: ISDOpcodes.h:256
static const int LAST_INDEXED_MODE
Definition: ISDOpcodes.h:879
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that&#39;s previously inserted by insertSSPDeclarations, if any, otherwise return nul...
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:320
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:626
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, unsigned Alignment=1, bool *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void GetReturnInfo(Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:556
LLVM Value Representation.
Definition: Value.h:73
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:235
PREFETCH - This corresponds to a prefetch intrinsic.
Definition: ISDOpcodes.h:743
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:621
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.h:270
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:57
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations...
Definition: ISDOpcodes.h:281
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:412
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:197
Conversion operators.
Definition: ISDOpcodes.h:442
static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return the customized refinement step count for this operation on the...
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:451
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:126
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:298
virtual BranchProbability getPredictableBranchThreshold() const
If a branch or a select condition is skewed in one direction by more than this factor, it is very likely to be predicted correctly.
Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:295
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd)
Assign this MachineInstr&#39;s memory reference descriptor list.
void setBitsInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
setBitsInMask - Add &#39;1&#39; bits from Mask to this vector.
Definition: BitVector.h:759
std::pair< int, MVT > getTypeLegalizationCost(const DataLayout &DL, Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
bool isValid() const
Returns true if this iterator is still pointing at a valid entry.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:241
LegalizeTypeAction getTypeAction(MVT VT) const
This file describes how to lower LLVM code to machine code.
mmo_iterator memoperands_end() const
Definition: MachineInstr.h:390
vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const
Loop over all of the value types that can be represented by values in the given register class...
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
CallInst * CreateCall(Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1663
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...