LLVM  4.0.0
TargetLoweringBase.cpp
Go to the documentation of this file.
1 //===-- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ---===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the TargetLoweringBase class.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/StringExtras.h"
18 #include "llvm/ADT/Triple.h"
19 #include "llvm/CodeGen/Analysis.h"
24 #include "llvm/CodeGen/StackMaps.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/GlobalVariable.h"
28 #include "llvm/IR/Mangler.h"
29 #include "llvm/MC/MCAsmInfo.h"
30 #include "llvm/MC/MCContext.h"
31 #include "llvm/MC/MCExpr.h"
40 #include <cctype>
41 using namespace llvm;
42 
44  "jump-is-expensive", cl::init(false),
45  cl::desc("Do not create extra branches to split comparison logic."),
46  cl::Hidden);
47 
49  ("min-jump-table-entries", cl::init(4), cl::Hidden,
50  cl::desc("Set minimum number of entries to use a jump table."));
51 
53  ("max-jump-table-size", cl::init(0), cl::Hidden,
54  cl::desc("Set maximum size of jump tables; zero for no limit."));
55 
56 // Although this default value is arbitrary, it is not random. It is assumed
57 // that a condition that evaluates the same way by a higher percentage than this
58 // is best represented as control flow. Therefore, the default value N should be
59 // set such that the win from N% correct executions is greater than the loss
60 // from (100 - N)% mispredicted executions for the majority of intended targets.
62  "min-predictable-branch", cl::init(99),
63  cl::desc("Minimum percentage (0-100) that a condition must be either true "
64  "or false to assume that the condition is predictable"),
65  cl::Hidden);
66 
67 /// InitLibcallNames - Set default libcall names.
68 ///
69 static void InitLibcallNames(const char **Names, const Triple &TT) {
70  Names[RTLIB::SHL_I16] = "__ashlhi3";
71  Names[RTLIB::SHL_I32] = "__ashlsi3";
72  Names[RTLIB::SHL_I64] = "__ashldi3";
73  Names[RTLIB::SHL_I128] = "__ashlti3";
74  Names[RTLIB::SRL_I16] = "__lshrhi3";
75  Names[RTLIB::SRL_I32] = "__lshrsi3";
76  Names[RTLIB::SRL_I64] = "__lshrdi3";
77  Names[RTLIB::SRL_I128] = "__lshrti3";
78  Names[RTLIB::SRA_I16] = "__ashrhi3";
79  Names[RTLIB::SRA_I32] = "__ashrsi3";
80  Names[RTLIB::SRA_I64] = "__ashrdi3";
81  Names[RTLIB::SRA_I128] = "__ashrti3";
82  Names[RTLIB::MUL_I8] = "__mulqi3";
83  Names[RTLIB::MUL_I16] = "__mulhi3";
84  Names[RTLIB::MUL_I32] = "__mulsi3";
85  Names[RTLIB::MUL_I64] = "__muldi3";
86  Names[RTLIB::MUL_I128] = "__multi3";
87  Names[RTLIB::MULO_I32] = "__mulosi4";
88  Names[RTLIB::MULO_I64] = "__mulodi4";
89  Names[RTLIB::MULO_I128] = "__muloti4";
90  Names[RTLIB::SDIV_I8] = "__divqi3";
91  Names[RTLIB::SDIV_I16] = "__divhi3";
92  Names[RTLIB::SDIV_I32] = "__divsi3";
93  Names[RTLIB::SDIV_I64] = "__divdi3";
94  Names[RTLIB::SDIV_I128] = "__divti3";
95  Names[RTLIB::UDIV_I8] = "__udivqi3";
96  Names[RTLIB::UDIV_I16] = "__udivhi3";
97  Names[RTLIB::UDIV_I32] = "__udivsi3";
98  Names[RTLIB::UDIV_I64] = "__udivdi3";
99  Names[RTLIB::UDIV_I128] = "__udivti3";
100  Names[RTLIB::SREM_I8] = "__modqi3";
101  Names[RTLIB::SREM_I16] = "__modhi3";
102  Names[RTLIB::SREM_I32] = "__modsi3";
103  Names[RTLIB::SREM_I64] = "__moddi3";
104  Names[RTLIB::SREM_I128] = "__modti3";
105  Names[RTLIB::UREM_I8] = "__umodqi3";
106  Names[RTLIB::UREM_I16] = "__umodhi3";
107  Names[RTLIB::UREM_I32] = "__umodsi3";
108  Names[RTLIB::UREM_I64] = "__umoddi3";
109  Names[RTLIB::UREM_I128] = "__umodti3";
110 
111  Names[RTLIB::NEG_I32] = "__negsi2";
112  Names[RTLIB::NEG_I64] = "__negdi2";
113  Names[RTLIB::ADD_F32] = "__addsf3";
114  Names[RTLIB::ADD_F64] = "__adddf3";
115  Names[RTLIB::ADD_F80] = "__addxf3";
116  Names[RTLIB::ADD_F128] = "__addtf3";
117  Names[RTLIB::ADD_PPCF128] = "__gcc_qadd";
118  Names[RTLIB::SUB_F32] = "__subsf3";
119  Names[RTLIB::SUB_F64] = "__subdf3";
120  Names[RTLIB::SUB_F80] = "__subxf3";
121  Names[RTLIB::SUB_F128] = "__subtf3";
122  Names[RTLIB::SUB_PPCF128] = "__gcc_qsub";
123  Names[RTLIB::MUL_F32] = "__mulsf3";
124  Names[RTLIB::MUL_F64] = "__muldf3";
125  Names[RTLIB::MUL_F80] = "__mulxf3";
126  Names[RTLIB::MUL_F128] = "__multf3";
127  Names[RTLIB::MUL_PPCF128] = "__gcc_qmul";
128  Names[RTLIB::DIV_F32] = "__divsf3";
129  Names[RTLIB::DIV_F64] = "__divdf3";
130  Names[RTLIB::DIV_F80] = "__divxf3";
131  Names[RTLIB::DIV_F128] = "__divtf3";
132  Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv";
133  Names[RTLIB::REM_F32] = "fmodf";
134  Names[RTLIB::REM_F64] = "fmod";
135  Names[RTLIB::REM_F80] = "fmodl";
136  Names[RTLIB::REM_F128] = "fmodl";
137  Names[RTLIB::REM_PPCF128] = "fmodl";
138  Names[RTLIB::FMA_F32] = "fmaf";
139  Names[RTLIB::FMA_F64] = "fma";
140  Names[RTLIB::FMA_F80] = "fmal";
141  Names[RTLIB::FMA_F128] = "fmal";
142  Names[RTLIB::FMA_PPCF128] = "fmal";
143  Names[RTLIB::POWI_F32] = "__powisf2";
144  Names[RTLIB::POWI_F64] = "__powidf2";
145  Names[RTLIB::POWI_F80] = "__powixf2";
146  Names[RTLIB::POWI_F128] = "__powitf2";
147  Names[RTLIB::POWI_PPCF128] = "__powitf2";
148  Names[RTLIB::SQRT_F32] = "sqrtf";
149  Names[RTLIB::SQRT_F64] = "sqrt";
150  Names[RTLIB::SQRT_F80] = "sqrtl";
151  Names[RTLIB::SQRT_F128] = "sqrtl";
152  Names[RTLIB::SQRT_PPCF128] = "sqrtl";
153  Names[RTLIB::LOG_F32] = "logf";
154  Names[RTLIB::LOG_F64] = "log";
155  Names[RTLIB::LOG_F80] = "logl";
156  Names[RTLIB::LOG_F128] = "logl";
157  Names[RTLIB::LOG_PPCF128] = "logl";
158  Names[RTLIB::LOG2_F32] = "log2f";
159  Names[RTLIB::LOG2_F64] = "log2";
160  Names[RTLIB::LOG2_F80] = "log2l";
161  Names[RTLIB::LOG2_F128] = "log2l";
162  Names[RTLIB::LOG2_PPCF128] = "log2l";
163  Names[RTLIB::LOG10_F32] = "log10f";
164  Names[RTLIB::LOG10_F64] = "log10";
165  Names[RTLIB::LOG10_F80] = "log10l";
166  Names[RTLIB::LOG10_F128] = "log10l";
167  Names[RTLIB::LOG10_PPCF128] = "log10l";
168  Names[RTLIB::EXP_F32] = "expf";
169  Names[RTLIB::EXP_F64] = "exp";
170  Names[RTLIB::EXP_F80] = "expl";
171  Names[RTLIB::EXP_F128] = "expl";
172  Names[RTLIB::EXP_PPCF128] = "expl";
173  Names[RTLIB::EXP2_F32] = "exp2f";
174  Names[RTLIB::EXP2_F64] = "exp2";
175  Names[RTLIB::EXP2_F80] = "exp2l";
176  Names[RTLIB::EXP2_F128] = "exp2l";
177  Names[RTLIB::EXP2_PPCF128] = "exp2l";
178  Names[RTLIB::SIN_F32] = "sinf";
179  Names[RTLIB::SIN_F64] = "sin";
180  Names[RTLIB::SIN_F80] = "sinl";
181  Names[RTLIB::SIN_F128] = "sinl";
182  Names[RTLIB::SIN_PPCF128] = "sinl";
183  Names[RTLIB::COS_F32] = "cosf";
184  Names[RTLIB::COS_F64] = "cos";
185  Names[RTLIB::COS_F80] = "cosl";
186  Names[RTLIB::COS_F128] = "cosl";
187  Names[RTLIB::COS_PPCF128] = "cosl";
188  Names[RTLIB::POW_F32] = "powf";
189  Names[RTLIB::POW_F64] = "pow";
190  Names[RTLIB::POW_F80] = "powl";
191  Names[RTLIB::POW_F128] = "powl";
192  Names[RTLIB::POW_PPCF128] = "powl";
193  Names[RTLIB::CEIL_F32] = "ceilf";
194  Names[RTLIB::CEIL_F64] = "ceil";
195  Names[RTLIB::CEIL_F80] = "ceill";
196  Names[RTLIB::CEIL_F128] = "ceill";
197  Names[RTLIB::CEIL_PPCF128] = "ceill";
198  Names[RTLIB::TRUNC_F32] = "truncf";
199  Names[RTLIB::TRUNC_F64] = "trunc";
200  Names[RTLIB::TRUNC_F80] = "truncl";
201  Names[RTLIB::TRUNC_F128] = "truncl";
202  Names[RTLIB::TRUNC_PPCF128] = "truncl";
203  Names[RTLIB::RINT_F32] = "rintf";
204  Names[RTLIB::RINT_F64] = "rint";
205  Names[RTLIB::RINT_F80] = "rintl";
206  Names[RTLIB::RINT_F128] = "rintl";
207  Names[RTLIB::RINT_PPCF128] = "rintl";
208  Names[RTLIB::NEARBYINT_F32] = "nearbyintf";
209  Names[RTLIB::NEARBYINT_F64] = "nearbyint";
210  Names[RTLIB::NEARBYINT_F80] = "nearbyintl";
211  Names[RTLIB::NEARBYINT_F128] = "nearbyintl";
212  Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl";
213  Names[RTLIB::ROUND_F32] = "roundf";
214  Names[RTLIB::ROUND_F64] = "round";
215  Names[RTLIB::ROUND_F80] = "roundl";
216  Names[RTLIB::ROUND_F128] = "roundl";
217  Names[RTLIB::ROUND_PPCF128] = "roundl";
218  Names[RTLIB::FLOOR_F32] = "floorf";
219  Names[RTLIB::FLOOR_F64] = "floor";
220  Names[RTLIB::FLOOR_F80] = "floorl";
221  Names[RTLIB::FLOOR_F128] = "floorl";
222  Names[RTLIB::FLOOR_PPCF128] = "floorl";
223  Names[RTLIB::FMIN_F32] = "fminf";
224  Names[RTLIB::FMIN_F64] = "fmin";
225  Names[RTLIB::FMIN_F80] = "fminl";
226  Names[RTLIB::FMIN_F128] = "fminl";
227  Names[RTLIB::FMIN_PPCF128] = "fminl";
228  Names[RTLIB::FMAX_F32] = "fmaxf";
229  Names[RTLIB::FMAX_F64] = "fmax";
230  Names[RTLIB::FMAX_F80] = "fmaxl";
231  Names[RTLIB::FMAX_F128] = "fmaxl";
232  Names[RTLIB::FMAX_PPCF128] = "fmaxl";
233  Names[RTLIB::ROUND_F32] = "roundf";
234  Names[RTLIB::ROUND_F64] = "round";
235  Names[RTLIB::ROUND_F80] = "roundl";
236  Names[RTLIB::ROUND_F128] = "roundl";
237  Names[RTLIB::ROUND_PPCF128] = "roundl";
238  Names[RTLIB::COPYSIGN_F32] = "copysignf";
239  Names[RTLIB::COPYSIGN_F64] = "copysign";
240  Names[RTLIB::COPYSIGN_F80] = "copysignl";
241  Names[RTLIB::COPYSIGN_F128] = "copysignl";
242  Names[RTLIB::COPYSIGN_PPCF128] = "copysignl";
243  Names[RTLIB::FPEXT_F32_PPCF128] = "__gcc_stoq";
244  Names[RTLIB::FPEXT_F64_PPCF128] = "__gcc_dtoq";
245  Names[RTLIB::FPEXT_F64_F128] = "__extenddftf2";
246  Names[RTLIB::FPEXT_F32_F128] = "__extendsftf2";
247  Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
248  if (TT.isOSDarwin()) {
249  // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
250  // of the gnueabi-style __gnu_*_ieee.
251  // FIXME: What about other targets?
252  Names[RTLIB::FPEXT_F16_F32] = "__extendhfsf2";
253  Names[RTLIB::FPROUND_F32_F16] = "__truncsfhf2";
254  } else {
255  Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee";
256  Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee";
257  }
258  Names[RTLIB::FPROUND_F64_F16] = "__truncdfhf2";
259  Names[RTLIB::FPROUND_F80_F16] = "__truncxfhf2";
260  Names[RTLIB::FPROUND_F128_F16] = "__trunctfhf2";
261  Names[RTLIB::FPROUND_PPCF128_F16] = "__trunctfhf2";
262  Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
263  Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2";
264  Names[RTLIB::FPROUND_F128_F32] = "__trunctfsf2";
265  Names[RTLIB::FPROUND_PPCF128_F32] = "__gcc_qtos";
266  Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2";
267  Names[RTLIB::FPROUND_F128_F64] = "__trunctfdf2";
268  Names[RTLIB::FPROUND_PPCF128_F64] = "__gcc_qtod";
269  Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
270  Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
271  Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti";
272  Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
273  Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
274  Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti";
275  Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi";
276  Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi";
277  Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti";
278  Names[RTLIB::FPTOSINT_F128_I32] = "__fixtfsi";
279  Names[RTLIB::FPTOSINT_F128_I64] = "__fixtfdi";
280  Names[RTLIB::FPTOSINT_F128_I128] = "__fixtfti";
281  Names[RTLIB::FPTOSINT_PPCF128_I32] = "__gcc_qtou";
282  Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi";
283  Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti";
284  Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
285  Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
286  Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti";
287  Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi";
288  Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi";
289  Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti";
290  Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi";
291  Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi";
292  Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti";
293  Names[RTLIB::FPTOUINT_F128_I32] = "__fixunstfsi";
294  Names[RTLIB::FPTOUINT_F128_I64] = "__fixunstfdi";
295  Names[RTLIB::FPTOUINT_F128_I128] = "__fixunstfti";
296  Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi";
297  Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi";
298  Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti";
299  Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
300  Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf";
301  Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf";
302  Names[RTLIB::SINTTOFP_I32_F128] = "__floatsitf";
303  Names[RTLIB::SINTTOFP_I32_PPCF128] = "__gcc_itoq";
304  Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf";
305  Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf";
306  Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf";
307  Names[RTLIB::SINTTOFP_I64_F128] = "__floatditf";
308  Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf";
309  Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf";
310  Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf";
311  Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf";
312  Names[RTLIB::SINTTOFP_I128_F128] = "__floattitf";
313  Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf";
314  Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf";
315  Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf";
316  Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf";
317  Names[RTLIB::UINTTOFP_I32_F128] = "__floatunsitf";
318  Names[RTLIB::UINTTOFP_I32_PPCF128] = "__gcc_utoq";
319  Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf";
320  Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf";
321  Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf";
322  Names[RTLIB::UINTTOFP_I64_F128] = "__floatunditf";
323  Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf";
324  Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf";
325  Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf";
326  Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf";
327  Names[RTLIB::UINTTOFP_I128_F128] = "__floatuntitf";
328  Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf";
329  Names[RTLIB::OEQ_F32] = "__eqsf2";
330  Names[RTLIB::OEQ_F64] = "__eqdf2";
331  Names[RTLIB::OEQ_F128] = "__eqtf2";
332  Names[RTLIB::OEQ_PPCF128] = "__gcc_qeq";
333  Names[RTLIB::UNE_F32] = "__nesf2";
334  Names[RTLIB::UNE_F64] = "__nedf2";
335  Names[RTLIB::UNE_F128] = "__netf2";
336  Names[RTLIB::UNE_PPCF128] = "__gcc_qne";
337  Names[RTLIB::OGE_F32] = "__gesf2";
338  Names[RTLIB::OGE_F64] = "__gedf2";
339  Names[RTLIB::OGE_F128] = "__getf2";
340  Names[RTLIB::OGE_PPCF128] = "__gcc_qge";
341  Names[RTLIB::OLT_F32] = "__ltsf2";
342  Names[RTLIB::OLT_F64] = "__ltdf2";
343  Names[RTLIB::OLT_F128] = "__lttf2";
344  Names[RTLIB::OLT_PPCF128] = "__gcc_qlt";
345  Names[RTLIB::OLE_F32] = "__lesf2";
346  Names[RTLIB::OLE_F64] = "__ledf2";
347  Names[RTLIB::OLE_F128] = "__letf2";
348  Names[RTLIB::OLE_PPCF128] = "__gcc_qle";
349  Names[RTLIB::OGT_F32] = "__gtsf2";
350  Names[RTLIB::OGT_F64] = "__gtdf2";
351  Names[RTLIB::OGT_F128] = "__gttf2";
352  Names[RTLIB::OGT_PPCF128] = "__gcc_qgt";
353  Names[RTLIB::UO_F32] = "__unordsf2";
354  Names[RTLIB::UO_F64] = "__unorddf2";
355  Names[RTLIB::UO_F128] = "__unordtf2";
356  Names[RTLIB::UO_PPCF128] = "__gcc_qunord";
357  Names[RTLIB::O_F32] = "__unordsf2";
358  Names[RTLIB::O_F64] = "__unorddf2";
359  Names[RTLIB::O_F128] = "__unordtf2";
360  Names[RTLIB::O_PPCF128] = "__gcc_qunord";
361  Names[RTLIB::MEMCPY] = "memcpy";
362  Names[RTLIB::MEMMOVE] = "memmove";
363  Names[RTLIB::MEMSET] = "memset";
364  Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_1] = "__llvm_memcpy_element_atomic_1";
365  Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_2] = "__llvm_memcpy_element_atomic_2";
366  Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_4] = "__llvm_memcpy_element_atomic_4";
367  Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_8] = "__llvm_memcpy_element_atomic_8";
368  Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_16] = "__llvm_memcpy_element_atomic_16";
369  Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
370  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1";
371  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2";
372  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = "__sync_val_compare_and_swap_4";
373  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = "__sync_val_compare_and_swap_8";
374  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_16] = "__sync_val_compare_and_swap_16";
375  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1";
376  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2";
377  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4";
378  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8";
379  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_16] = "__sync_lock_test_and_set_16";
380  Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1";
381  Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2";
382  Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4";
383  Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8";
384  Names[RTLIB::SYNC_FETCH_AND_ADD_16] = "__sync_fetch_and_add_16";
385  Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1";
386  Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2";
387  Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4";
388  Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8";
389  Names[RTLIB::SYNC_FETCH_AND_SUB_16] = "__sync_fetch_and_sub_16";
390  Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1";
391  Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2";
392  Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4";
393  Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8";
394  Names[RTLIB::SYNC_FETCH_AND_AND_16] = "__sync_fetch_and_and_16";
395  Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1";
396  Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2";
397  Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4";
398  Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8";
399  Names[RTLIB::SYNC_FETCH_AND_OR_16] = "__sync_fetch_and_or_16";
400  Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1";
401  Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2";
402  Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and_xor_4";
403  Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8";
404  Names[RTLIB::SYNC_FETCH_AND_XOR_16] = "__sync_fetch_and_xor_16";
405  Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1";
406  Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2";
407  Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4";
408  Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8";
409  Names[RTLIB::SYNC_FETCH_AND_NAND_16] = "__sync_fetch_and_nand_16";
410  Names[RTLIB::SYNC_FETCH_AND_MAX_1] = "__sync_fetch_and_max_1";
411  Names[RTLIB::SYNC_FETCH_AND_MAX_2] = "__sync_fetch_and_max_2";
412  Names[RTLIB::SYNC_FETCH_AND_MAX_4] = "__sync_fetch_and_max_4";
413  Names[RTLIB::SYNC_FETCH_AND_MAX_8] = "__sync_fetch_and_max_8";
414  Names[RTLIB::SYNC_FETCH_AND_MAX_16] = "__sync_fetch_and_max_16";
415  Names[RTLIB::SYNC_FETCH_AND_UMAX_1] = "__sync_fetch_and_umax_1";
416  Names[RTLIB::SYNC_FETCH_AND_UMAX_2] = "__sync_fetch_and_umax_2";
417  Names[RTLIB::SYNC_FETCH_AND_UMAX_4] = "__sync_fetch_and_umax_4";
418  Names[RTLIB::SYNC_FETCH_AND_UMAX_8] = "__sync_fetch_and_umax_8";
419  Names[RTLIB::SYNC_FETCH_AND_UMAX_16] = "__sync_fetch_and_umax_16";
420  Names[RTLIB::SYNC_FETCH_AND_MIN_1] = "__sync_fetch_and_min_1";
421  Names[RTLIB::SYNC_FETCH_AND_MIN_2] = "__sync_fetch_and_min_2";
422  Names[RTLIB::SYNC_FETCH_AND_MIN_4] = "__sync_fetch_and_min_4";
423  Names[RTLIB::SYNC_FETCH_AND_MIN_8] = "__sync_fetch_and_min_8";
424  Names[RTLIB::SYNC_FETCH_AND_MIN_16] = "__sync_fetch_and_min_16";
425  Names[RTLIB::SYNC_FETCH_AND_UMIN_1] = "__sync_fetch_and_umin_1";
426  Names[RTLIB::SYNC_FETCH_AND_UMIN_2] = "__sync_fetch_and_umin_2";
427  Names[RTLIB::SYNC_FETCH_AND_UMIN_4] = "__sync_fetch_and_umin_4";
428  Names[RTLIB::SYNC_FETCH_AND_UMIN_8] = "__sync_fetch_and_umin_8";
429  Names[RTLIB::SYNC_FETCH_AND_UMIN_16] = "__sync_fetch_and_umin_16";
430 
431  Names[RTLIB::ATOMIC_LOAD] = "__atomic_load";
432  Names[RTLIB::ATOMIC_LOAD_1] = "__atomic_load_1";
433  Names[RTLIB::ATOMIC_LOAD_2] = "__atomic_load_2";
434  Names[RTLIB::ATOMIC_LOAD_4] = "__atomic_load_4";
435  Names[RTLIB::ATOMIC_LOAD_8] = "__atomic_load_8";
436  Names[RTLIB::ATOMIC_LOAD_16] = "__atomic_load_16";
437 
438  Names[RTLIB::ATOMIC_STORE] = "__atomic_store";
439  Names[RTLIB::ATOMIC_STORE_1] = "__atomic_store_1";
440  Names[RTLIB::ATOMIC_STORE_2] = "__atomic_store_2";
441  Names[RTLIB::ATOMIC_STORE_4] = "__atomic_store_4";
442  Names[RTLIB::ATOMIC_STORE_8] = "__atomic_store_8";
443  Names[RTLIB::ATOMIC_STORE_16] = "__atomic_store_16";
444 
445  Names[RTLIB::ATOMIC_EXCHANGE] = "__atomic_exchange";
446  Names[RTLIB::ATOMIC_EXCHANGE_1] = "__atomic_exchange_1";
447  Names[RTLIB::ATOMIC_EXCHANGE_2] = "__atomic_exchange_2";
448  Names[RTLIB::ATOMIC_EXCHANGE_4] = "__atomic_exchange_4";
449  Names[RTLIB::ATOMIC_EXCHANGE_8] = "__atomic_exchange_8";
450  Names[RTLIB::ATOMIC_EXCHANGE_16] = "__atomic_exchange_16";
451 
452  Names[RTLIB::ATOMIC_COMPARE_EXCHANGE] = "__atomic_compare_exchange";
453  Names[RTLIB::ATOMIC_COMPARE_EXCHANGE_1] = "__atomic_compare_exchange_1";
454  Names[RTLIB::ATOMIC_COMPARE_EXCHANGE_2] = "__atomic_compare_exchange_2";
455  Names[RTLIB::ATOMIC_COMPARE_EXCHANGE_4] = "__atomic_compare_exchange_4";
456  Names[RTLIB::ATOMIC_COMPARE_EXCHANGE_8] = "__atomic_compare_exchange_8";
457  Names[RTLIB::ATOMIC_COMPARE_EXCHANGE_16] = "__atomic_compare_exchange_16";
458 
459  Names[RTLIB::ATOMIC_FETCH_ADD_1] = "__atomic_fetch_add_1";
460  Names[RTLIB::ATOMIC_FETCH_ADD_2] = "__atomic_fetch_add_2";
461  Names[RTLIB::ATOMIC_FETCH_ADD_4] = "__atomic_fetch_add_4";
462  Names[RTLIB::ATOMIC_FETCH_ADD_8] = "__atomic_fetch_add_8";
463  Names[RTLIB::ATOMIC_FETCH_ADD_16] = "__atomic_fetch_add_16";
464  Names[RTLIB::ATOMIC_FETCH_SUB_1] = "__atomic_fetch_sub_1";
465  Names[RTLIB::ATOMIC_FETCH_SUB_2] = "__atomic_fetch_sub_2";
466  Names[RTLIB::ATOMIC_FETCH_SUB_4] = "__atomic_fetch_sub_4";
467  Names[RTLIB::ATOMIC_FETCH_SUB_8] = "__atomic_fetch_sub_8";
468  Names[RTLIB::ATOMIC_FETCH_SUB_16] = "__atomic_fetch_sub_16";
469  Names[RTLIB::ATOMIC_FETCH_AND_1] = "__atomic_fetch_and_1";
470  Names[RTLIB::ATOMIC_FETCH_AND_2] = "__atomic_fetch_and_2";
471  Names[RTLIB::ATOMIC_FETCH_AND_4] = "__atomic_fetch_and_4";
472  Names[RTLIB::ATOMIC_FETCH_AND_8] = "__atomic_fetch_and_8";
473  Names[RTLIB::ATOMIC_FETCH_AND_16] = "__atomic_fetch_and_16";
474  Names[RTLIB::ATOMIC_FETCH_OR_1] = "__atomic_fetch_or_1";
475  Names[RTLIB::ATOMIC_FETCH_OR_2] = "__atomic_fetch_or_2";
476  Names[RTLIB::ATOMIC_FETCH_OR_4] = "__atomic_fetch_or_4";
477  Names[RTLIB::ATOMIC_FETCH_OR_8] = "__atomic_fetch_or_8";
478  Names[RTLIB::ATOMIC_FETCH_OR_16] = "__atomic_fetch_or_16";
479  Names[RTLIB::ATOMIC_FETCH_XOR_1] = "__atomic_fetch_xor_1";
480  Names[RTLIB::ATOMIC_FETCH_XOR_2] = "__atomic_fetch_xor_2";
481  Names[RTLIB::ATOMIC_FETCH_XOR_4] = "__atomic_fetch_xor_4";
482  Names[RTLIB::ATOMIC_FETCH_XOR_8] = "__atomic_fetch_xor_8";
483  Names[RTLIB::ATOMIC_FETCH_XOR_16] = "__atomic_fetch_xor_16";
484  Names[RTLIB::ATOMIC_FETCH_NAND_1] = "__atomic_fetch_nand_1";
485  Names[RTLIB::ATOMIC_FETCH_NAND_2] = "__atomic_fetch_nand_2";
486  Names[RTLIB::ATOMIC_FETCH_NAND_4] = "__atomic_fetch_nand_4";
487  Names[RTLIB::ATOMIC_FETCH_NAND_8] = "__atomic_fetch_nand_8";
488  Names[RTLIB::ATOMIC_FETCH_NAND_16] = "__atomic_fetch_nand_16";
489 
490  if (TT.isGNUEnvironment()) {
491  Names[RTLIB::SINCOS_F32] = "sincosf";
492  Names[RTLIB::SINCOS_F64] = "sincos";
493  Names[RTLIB::SINCOS_F80] = "sincosl";
494  Names[RTLIB::SINCOS_F128] = "sincosl";
495  Names[RTLIB::SINCOS_PPCF128] = "sincosl";
496  }
497 
498  if (!TT.isOSOpenBSD()) {
499  Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = "__stack_chk_fail";
500  }
501 
502  Names[RTLIB::DEOPTIMIZE] = "__llvm_deoptimize";
503 }
504 
505 /// Set default libcall CallingConvs.
507  for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
508  CCs[LC] = CallingConv::C;
509 }
510 
511 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
512 /// UNKNOWN_LIBCALL if there is none.
514  if (OpVT == MVT::f16) {
515  if (RetVT == MVT::f32)
516  return FPEXT_F16_F32;
517  } else if (OpVT == MVT::f32) {
518  if (RetVT == MVT::f64)
519  return FPEXT_F32_F64;
520  if (RetVT == MVT::f128)
521  return FPEXT_F32_F128;
522  if (RetVT == MVT::ppcf128)
523  return FPEXT_F32_PPCF128;
524  } else if (OpVT == MVT::f64) {
525  if (RetVT == MVT::f128)
526  return FPEXT_F64_F128;
527  else if (RetVT == MVT::ppcf128)
528  return FPEXT_F64_PPCF128;
529  }
530 
531  return UNKNOWN_LIBCALL;
532 }
533 
534 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
535 /// UNKNOWN_LIBCALL if there is none.
537  if (RetVT == MVT::f16) {
538  if (OpVT == MVT::f32)
539  return FPROUND_F32_F16;
540  if (OpVT == MVT::f64)
541  return FPROUND_F64_F16;
542  if (OpVT == MVT::f80)
543  return FPROUND_F80_F16;
544  if (OpVT == MVT::f128)
545  return FPROUND_F128_F16;
546  if (OpVT == MVT::ppcf128)
547  return FPROUND_PPCF128_F16;
548  } else if (RetVT == MVT::f32) {
549  if (OpVT == MVT::f64)
550  return FPROUND_F64_F32;
551  if (OpVT == MVT::f80)
552  return FPROUND_F80_F32;
553  if (OpVT == MVT::f128)
554  return FPROUND_F128_F32;
555  if (OpVT == MVT::ppcf128)
556  return FPROUND_PPCF128_F32;
557  } else if (RetVT == MVT::f64) {
558  if (OpVT == MVT::f80)
559  return FPROUND_F80_F64;
560  if (OpVT == MVT::f128)
561  return FPROUND_F128_F64;
562  if (OpVT == MVT::ppcf128)
563  return FPROUND_PPCF128_F64;
564  }
565 
566  return UNKNOWN_LIBCALL;
567 }
568 
569 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
570 /// UNKNOWN_LIBCALL if there is none.
572  if (OpVT == MVT::f32) {
573  if (RetVT == MVT::i32)
574  return FPTOSINT_F32_I32;
575  if (RetVT == MVT::i64)
576  return FPTOSINT_F32_I64;
577  if (RetVT == MVT::i128)
578  return FPTOSINT_F32_I128;
579  } else if (OpVT == MVT::f64) {
580  if (RetVT == MVT::i32)
581  return FPTOSINT_F64_I32;
582  if (RetVT == MVT::i64)
583  return FPTOSINT_F64_I64;
584  if (RetVT == MVT::i128)
585  return FPTOSINT_F64_I128;
586  } else if (OpVT == MVT::f80) {
587  if (RetVT == MVT::i32)
588  return FPTOSINT_F80_I32;
589  if (RetVT == MVT::i64)
590  return FPTOSINT_F80_I64;
591  if (RetVT == MVT::i128)
592  return FPTOSINT_F80_I128;
593  } else if (OpVT == MVT::f128) {
594  if (RetVT == MVT::i32)
595  return FPTOSINT_F128_I32;
596  if (RetVT == MVT::i64)
597  return FPTOSINT_F128_I64;
598  if (RetVT == MVT::i128)
599  return FPTOSINT_F128_I128;
600  } else if (OpVT == MVT::ppcf128) {
601  if (RetVT == MVT::i32)
602  return FPTOSINT_PPCF128_I32;
603  if (RetVT == MVT::i64)
604  return FPTOSINT_PPCF128_I64;
605  if (RetVT == MVT::i128)
606  return FPTOSINT_PPCF128_I128;
607  }
608  return UNKNOWN_LIBCALL;
609 }
610 
611 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
612 /// UNKNOWN_LIBCALL if there is none.
614  if (OpVT == MVT::f32) {
615  if (RetVT == MVT::i32)
616  return FPTOUINT_F32_I32;
617  if (RetVT == MVT::i64)
618  return FPTOUINT_F32_I64;
619  if (RetVT == MVT::i128)
620  return FPTOUINT_F32_I128;
621  } else if (OpVT == MVT::f64) {
622  if (RetVT == MVT::i32)
623  return FPTOUINT_F64_I32;
624  if (RetVT == MVT::i64)
625  return FPTOUINT_F64_I64;
626  if (RetVT == MVT::i128)
627  return FPTOUINT_F64_I128;
628  } else if (OpVT == MVT::f80) {
629  if (RetVT == MVT::i32)
630  return FPTOUINT_F80_I32;
631  if (RetVT == MVT::i64)
632  return FPTOUINT_F80_I64;
633  if (RetVT == MVT::i128)
634  return FPTOUINT_F80_I128;
635  } else if (OpVT == MVT::f128) {
636  if (RetVT == MVT::i32)
637  return FPTOUINT_F128_I32;
638  if (RetVT == MVT::i64)
639  return FPTOUINT_F128_I64;
640  if (RetVT == MVT::i128)
641  return FPTOUINT_F128_I128;
642  } else if (OpVT == MVT::ppcf128) {
643  if (RetVT == MVT::i32)
644  return FPTOUINT_PPCF128_I32;
645  if (RetVT == MVT::i64)
646  return FPTOUINT_PPCF128_I64;
647  if (RetVT == MVT::i128)
648  return FPTOUINT_PPCF128_I128;
649  }
650  return UNKNOWN_LIBCALL;
651 }
652 
653 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
654 /// UNKNOWN_LIBCALL if there is none.
656  if (OpVT == MVT::i32) {
657  if (RetVT == MVT::f32)
658  return SINTTOFP_I32_F32;
659  if (RetVT == MVT::f64)
660  return SINTTOFP_I32_F64;
661  if (RetVT == MVT::f80)
662  return SINTTOFP_I32_F80;
663  if (RetVT == MVT::f128)
664  return SINTTOFP_I32_F128;
665  if (RetVT == MVT::ppcf128)
666  return SINTTOFP_I32_PPCF128;
667  } else if (OpVT == MVT::i64) {
668  if (RetVT == MVT::f32)
669  return SINTTOFP_I64_F32;
670  if (RetVT == MVT::f64)
671  return SINTTOFP_I64_F64;
672  if (RetVT == MVT::f80)
673  return SINTTOFP_I64_F80;
674  if (RetVT == MVT::f128)
675  return SINTTOFP_I64_F128;
676  if (RetVT == MVT::ppcf128)
677  return SINTTOFP_I64_PPCF128;
678  } else if (OpVT == MVT::i128) {
679  if (RetVT == MVT::f32)
680  return SINTTOFP_I128_F32;
681  if (RetVT == MVT::f64)
682  return SINTTOFP_I128_F64;
683  if (RetVT == MVT::f80)
684  return SINTTOFP_I128_F80;
685  if (RetVT == MVT::f128)
686  return SINTTOFP_I128_F128;
687  if (RetVT == MVT::ppcf128)
688  return SINTTOFP_I128_PPCF128;
689  }
690  return UNKNOWN_LIBCALL;
691 }
692 
693 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
694 /// UNKNOWN_LIBCALL if there is none.
696  if (OpVT == MVT::i32) {
697  if (RetVT == MVT::f32)
698  return UINTTOFP_I32_F32;
699  if (RetVT == MVT::f64)
700  return UINTTOFP_I32_F64;
701  if (RetVT == MVT::f80)
702  return UINTTOFP_I32_F80;
703  if (RetVT == MVT::f128)
704  return UINTTOFP_I32_F128;
705  if (RetVT == MVT::ppcf128)
706  return UINTTOFP_I32_PPCF128;
707  } else if (OpVT == MVT::i64) {
708  if (RetVT == MVT::f32)
709  return UINTTOFP_I64_F32;
710  if (RetVT == MVT::f64)
711  return UINTTOFP_I64_F64;
712  if (RetVT == MVT::f80)
713  return UINTTOFP_I64_F80;
714  if (RetVT == MVT::f128)
715  return UINTTOFP_I64_F128;
716  if (RetVT == MVT::ppcf128)
717  return UINTTOFP_I64_PPCF128;
718  } else if (OpVT == MVT::i128) {
719  if (RetVT == MVT::f32)
720  return UINTTOFP_I128_F32;
721  if (RetVT == MVT::f64)
722  return UINTTOFP_I128_F64;
723  if (RetVT == MVT::f80)
724  return UINTTOFP_I128_F80;
725  if (RetVT == MVT::f128)
726  return UINTTOFP_I128_F128;
727  if (RetVT == MVT::ppcf128)
728  return UINTTOFP_I128_PPCF128;
729  }
730  return UNKNOWN_LIBCALL;
731 }
732 
733 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
734 #define OP_TO_LIBCALL(Name, Enum) \
735  case Name: \
736  switch (VT.SimpleTy) { \
737  default: \
738  return UNKNOWN_LIBCALL; \
739  case MVT::i8: \
740  return Enum##_1; \
741  case MVT::i16: \
742  return Enum##_2; \
743  case MVT::i32: \
744  return Enum##_4; \
745  case MVT::i64: \
746  return Enum##_8; \
747  case MVT::i128: \
748  return Enum##_16; \
749  }
750 
751  switch (Opc) {
752  OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
753  OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
754  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
755  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
756  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
757  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
758  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
759  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
760  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
761  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
762  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
763  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
764  }
765 
766 #undef OP_TO_LIBCALL
767 
768  return UNKNOWN_LIBCALL;
769 }
770 
772  switch (ElementSize) {
773  case 1:
775  case 2:
777  case 4:
779  case 8:
781  case 16:
783  default:
784  return UNKNOWN_LIBCALL;
785  }
786 
787 }
788 
789 /// InitCmpLibcallCCs - Set default comparison libcall CC.
790 ///
791 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
793  CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
794  CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
797  CCs[RTLIB::UNE_F32] = ISD::SETNE;
798  CCs[RTLIB::UNE_F64] = ISD::SETNE;
801  CCs[RTLIB::OGE_F32] = ISD::SETGE;
802  CCs[RTLIB::OGE_F64] = ISD::SETGE;
805  CCs[RTLIB::OLT_F32] = ISD::SETLT;
806  CCs[RTLIB::OLT_F64] = ISD::SETLT;
809  CCs[RTLIB::OLE_F32] = ISD::SETLE;
810  CCs[RTLIB::OLE_F64] = ISD::SETLE;
813  CCs[RTLIB::OGT_F32] = ISD::SETGT;
814  CCs[RTLIB::OGT_F64] = ISD::SETGT;
817  CCs[RTLIB::UO_F32] = ISD::SETNE;
818  CCs[RTLIB::UO_F64] = ISD::SETNE;
819  CCs[RTLIB::UO_F128] = ISD::SETNE;
821  CCs[RTLIB::O_F32] = ISD::SETEQ;
822  CCs[RTLIB::O_F64] = ISD::SETEQ;
823  CCs[RTLIB::O_F128] = ISD::SETEQ;
825 }
826 
827 /// NOTE: The TargetMachine owns TLOF.
829  initActions();
830 
831  // Perform these initializations only once.
835  UseUnderscoreSetJmp = false;
836  UseUnderscoreLongJmp = false;
837  HasMultipleConditionRegisters = false;
838  HasExtractBitsInsn = false;
839  JumpIsExpensive = JumpIsExpensiveOverride;
842  EnableExtLdPromotion = false;
843  HasFloatingPointExceptions = true;
844  StackPointerRegisterToSaveRestore = 0;
845  BooleanContents = UndefinedBooleanContent;
846  BooleanFloatContents = UndefinedBooleanContent;
847  BooleanVectorContents = UndefinedBooleanContent;
848  SchedPreferenceInfo = Sched::ILP;
849  JumpBufSize = 0;
850  JumpBufAlignment = 0;
851  MinFunctionAlignment = 0;
852  PrefFunctionAlignment = 0;
853  PrefLoopAlignment = 0;
855  MinStackArgumentAlignment = 1;
856  // TODO: the default will be switched to 0 in the next commit, along
857  // with the Target-specific changes necessary.
858  MaxAtomicSizeInBitsSupported = 1024;
859 
860  MinCmpXchgSizeInBits = 0;
861 
862  std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
863 
864  InitLibcallNames(LibcallRoutineNames, TM.getTargetTriple());
865  InitCmpLibcallCCs(CmpLibcallCCs);
866  InitLibcallCallingConvs(LibcallCallingConvs);
867 }
868 
870  // All operations default to being supported.
871  memset(OpActions, 0, sizeof(OpActions));
872  memset(LoadExtActions, 0, sizeof(LoadExtActions));
873  memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
874  memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
875  memset(CondCodeActions, 0, sizeof(CondCodeActions));
876  std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
877  std::fill(std::begin(TargetDAGCombineArray),
878  std::end(TargetDAGCombineArray), 0);
879 
880  // Set default actions for various operations.
881  for (MVT VT : MVT::all_valuetypes()) {
882  // Default all indexed load / store to expand.
883  for (unsigned IM = (unsigned)ISD::PRE_INC;
884  IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
885  setIndexedLoadAction(IM, VT, Expand);
886  setIndexedStoreAction(IM, VT, Expand);
887  }
888 
889  // Most backends expect to see the node which just returns the value loaded.
891 
892  // These operations default to expand.
904 
905  // Overflow operations default to expand
912 
913  // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
916 
918 
919  // These library functions default to expand.
921 
922  // These operations default to expand for vector types.
923  if (VT.isVector()) {
928  }
929 
930  // For most targets @llvm.get.dynamic.area.offset just returns 0.
932  }
933 
934  // Most targets ignore the @llvm.prefetch intrinsic.
936 
937  // Most targets also ignore the @llvm.readcyclecounter intrinsic.
939 
940  // ConstantFP nodes default to expand. Targets can either change this to
941  // Legal, in which case all fp constants are legal, or use isFPImmLegal()
942  // to optimize expansions for certain constants.
948 
949  // These library functions default to expand.
950  for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
962  }
963 
964  // Default ISD::TRAP to expand (which turns it into abort).
966 
967  // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
968  // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
969  //
971 }
972 
974  EVT) const {
975  return MVT::getIntegerVT(8 * DL.getPointerSize(0));
976 }
977 
979  const DataLayout &DL) const {
980  assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
981  if (LHSTy.isVector())
982  return LHSTy;
983  return getScalarShiftAmountTy(DL, LHSTy);
984 }
985 
986 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
987  assert(isTypeLegal(VT));
988  switch (Op) {
989  default:
990  return false;
991  case ISD::SDIV:
992  case ISD::UDIV:
993  case ISD::SREM:
994  case ISD::UREM:
995  return true;
996  }
997 }
998 
1000  // If the command-line option was specified, ignore this request.
1001  if (!JumpIsExpensiveOverride.getNumOccurrences())
1002  JumpIsExpensive = isExpensive;
1003 }
1004 
1006 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
1007  // If this is a simple type, use the ComputeRegisterProp mechanism.
1008  if (VT.isSimple()) {
1009  MVT SVT = VT.getSimpleVT();
1010  assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
1011  MVT NVT = TransformToType[SVT.SimpleTy];
1013 
1014  assert((LA == TypeLegal || LA == TypeSoftenFloat ||
1016  "Promote may not follow Expand or Promote");
1017 
1018  if (LA == TypeSplitVector)
1019  return LegalizeKind(LA,
1020  EVT::getVectorVT(Context, SVT.getVectorElementType(),
1021  SVT.getVectorNumElements() / 2));
1022  if (LA == TypeScalarizeVector)
1023  return LegalizeKind(LA, SVT.getVectorElementType());
1024  return LegalizeKind(LA, NVT);
1025  }
1026 
1027  // Handle Extended Scalar Types.
1028  if (!VT.isVector()) {
1029  assert(VT.isInteger() && "Float types must be simple");
1030  unsigned BitSize = VT.getSizeInBits();
1031  // First promote to a power-of-two size, then expand if necessary.
1032  if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
1033  EVT NVT = VT.getRoundIntegerType(Context);
1034  assert(NVT != VT && "Unable to round integer VT");
1035  LegalizeKind NextStep = getTypeConversion(Context, NVT);
1036  // Avoid multi-step promotion.
1037  if (NextStep.first == TypePromoteInteger)
1038  return NextStep;
1039  // Return rounded integer type.
1040  return LegalizeKind(TypePromoteInteger, NVT);
1041  }
1042 
1044  EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
1045  }
1046 
1047  // Handle vector types.
1048  unsigned NumElts = VT.getVectorNumElements();
1049  EVT EltVT = VT.getVectorElementType();
1050 
1051  // Vectors with only one element are always scalarized.
1052  if (NumElts == 1)
1053  return LegalizeKind(TypeScalarizeVector, EltVT);
1054 
1055  // Try to widen vector elements until the element type is a power of two and
1056  // promote it to a legal type later on, for example:
1057  // <3 x i8> -> <4 x i8> -> <4 x i32>
1058  if (EltVT.isInteger()) {
1059  // Vectors with a number of elements that is not a power of two are always
1060  // widened, for example <3 x i8> -> <4 x i8>.
1061  if (!VT.isPow2VectorType()) {
1062  NumElts = (unsigned)NextPowerOf2(NumElts);
1063  EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1064  return LegalizeKind(TypeWidenVector, NVT);
1065  }
1066 
1067  // Examine the element type.
1068  LegalizeKind LK = getTypeConversion(Context, EltVT);
1069 
1070  // If type is to be expanded, split the vector.
1071  // <4 x i140> -> <2 x i140>
1072  if (LK.first == TypeExpandInteger)
1074  EVT::getVectorVT(Context, EltVT, NumElts / 2));
1075 
1076  // Promote the integer element types until a legal vector type is found
1077  // or until the element integer type is too big. If a legal type was not
1078  // found, fallback to the usual mechanism of widening/splitting the
1079  // vector.
1080  EVT OldEltVT = EltVT;
1081  while (1) {
1082  // Increase the bitwidth of the element to the next pow-of-two
1083  // (which is greater than 8 bits).
1084  EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
1085  .getRoundIntegerType(Context);
1086 
1087  // Stop trying when getting a non-simple element type.
1088  // Note that vector elements may be greater than legal vector element
1089  // types. Example: X86 XMM registers hold 64bit element on 32bit
1090  // systems.
1091  if (!EltVT.isSimple())
1092  break;
1093 
1094  // Build a new vector type and check if it is legal.
1095  MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1096  // Found a legal promoted vector type.
1097  if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1099  EVT::getVectorVT(Context, EltVT, NumElts));
1100  }
1101 
1102  // Reset the type to the unexpanded type if we did not find a legal vector
1103  // type with a promoted vector element type.
1104  EltVT = OldEltVT;
1105  }
1106 
1107  // Try to widen the vector until a legal type is found.
1108  // If there is no wider legal type, split the vector.
1109  while (1) {
1110  // Round up to the next power of 2.
1111  NumElts = (unsigned)NextPowerOf2(NumElts);
1112 
1113  // If there is no simple vector type with this many elements then there
1114  // cannot be a larger legal vector type. Note that this assumes that
1115  // there are no skipped intermediate vector types in the simple types.
1116  if (!EltVT.isSimple())
1117  break;
1118  MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1119  if (LargerVector == MVT())
1120  break;
1121 
1122  // If this type is legal then widen the vector.
1123  if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1124  return LegalizeKind(TypeWidenVector, LargerVector);
1125  }
1126 
1127  // Widen odd vectors to next power of two.
1128  if (!VT.isPow2VectorType()) {
1129  EVT NVT = VT.getPow2VectorType(Context);
1130  return LegalizeKind(TypeWidenVector, NVT);
1131  }
1132 
1133  // Vectors with illegal element types are expanded.
1134  EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
1135  return LegalizeKind(TypeSplitVector, NVT);
1136 }
1137 
1138 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
1139  unsigned &NumIntermediates,
1140  MVT &RegisterVT,
1141  TargetLoweringBase *TLI) {
1142  // Figure out the right, legal destination reg to copy into.
1143  unsigned NumElts = VT.getVectorNumElements();
1144  MVT EltTy = VT.getVectorElementType();
1145 
1146  unsigned NumVectorRegs = 1;
1147 
1148  // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1149  // could break down into LHS/RHS like LegalizeDAG does.
1150  if (!isPowerOf2_32(NumElts)) {
1151  NumVectorRegs = NumElts;
1152  NumElts = 1;
1153  }
1154 
1155  // Divide the input until we get to a supported size. This will always
1156  // end with a scalar if the target doesn't support vectors.
1157  while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
1158  NumElts >>= 1;
1159  NumVectorRegs <<= 1;
1160  }
1161 
1162  NumIntermediates = NumVectorRegs;
1163 
1164  MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
1165  if (!TLI->isTypeLegal(NewVT))
1166  NewVT = EltTy;
1167  IntermediateVT = NewVT;
1168 
1169  unsigned NewVTSize = NewVT.getSizeInBits();
1170 
1171  // Convert sizes such as i33 to i64.
1172  if (!isPowerOf2_32(NewVTSize))
1173  NewVTSize = NextPowerOf2(NewVTSize);
1174 
1175  MVT DestVT = TLI->getRegisterType(NewVT);
1176  RegisterVT = DestVT;
1177  if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1178  return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1179 
1180  // Otherwise, promotion or legal types use the same number of registers as
1181  // the vector decimated to the appropriate level.
1182  return NumVectorRegs;
1183 }
1184 
1185 /// isLegalRC - Return true if the value types that can be represented by the
1186 /// specified register class are all legal.
1188  for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
1189  I != E; ++I) {
1190  if (isTypeLegal(*I))
1191  return true;
1192  }
1193  return false;
1194 }
1195 
1196 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
1197 /// sequence of memory operands that is recognized by PrologEpilogInserter.
1200  MachineBasicBlock *MBB) const {
1201  MachineInstr *MI = &InitialMI;
1202  MachineFunction &MF = *MI->getParent()->getParent();
1203  MachineFrameInfo &MFI = MF.getFrameInfo();
1204 
1205  // We're handling multiple types of operands here:
1206  // PATCHPOINT MetaArgs - live-in, read only, direct
1207  // STATEPOINT Deopt Spill - live-through, read only, indirect
1208  // STATEPOINT Deopt Alloca - live-through, read only, direct
1209  // (We're currently conservative and mark the deopt slots read/write in
1210  // practice.)
1211  // STATEPOINT GC Spill - live-through, read/write, indirect
1212  // STATEPOINT GC Alloca - live-through, read/write, direct
1213  // The live-in vs live-through is handled already (the live through ones are
1214  // all stack slots), but we need to handle the different type of stackmap
1215  // operands and memory effects here.
1216 
1217  // MI changes inside this loop as we grow operands.
1218  for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
1219  MachineOperand &MO = MI->getOperand(OperIdx);
1220  if (!MO.isFI())
1221  continue;
1222 
1223  // foldMemoryOperand builds a new MI after replacing a single FI operand
1224  // with the canonical set of five x86 addressing-mode operands.
1225  int FI = MO.getIndex();
1226  MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1227 
1228  // Copy operands before the frame-index.
1229  for (unsigned i = 0; i < OperIdx; ++i)
1230  MIB.addOperand(MI->getOperand(i));
1231  // Add frame index operands recognized by stackmaps.cpp
1232  if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
1233  // indirect-mem-ref tag, size, #FI, offset.
1234  // Used for spills inserted by StatepointLowering. This codepath is not
1235  // used for patchpoints/stackmaps at all, for these spilling is done via
1236  // foldMemoryOperand callback only.
1237  assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
1239  MIB.addImm(MFI.getObjectSize(FI));
1240  MIB.addOperand(MI->getOperand(OperIdx));
1241  MIB.addImm(0);
1242  } else {
1243  // direct-mem-ref tag, #FI, offset.
1244  // Used by patchpoint, and direct alloca arguments to statepoints
1246  MIB.addOperand(MI->getOperand(OperIdx));
1247  MIB.addImm(0);
1248  }
1249  // Copy the operands after the frame index.
1250  for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
1251  MIB.addOperand(MI->getOperand(i));
1252 
1253  // Inherit previous memory operands.
1254  MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1255  assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1256 
1257  // Add a new memory operand for this FI.
1258  assert(MFI.getObjectOffset(FI) != -1);
1259 
1261  if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
1262  Flags |= MachineMemOperand::MOStore;
1264  }
1266  MachinePointerInfo::getFixedStack(MF, FI), Flags,
1268  MIB->addMemOperand(MF, MMO);
1269 
1270  // Replace the instruction and update the operand index.
1271  MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1272  OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
1273  MI->eraseFromParent();
1274  MI = MIB;
1275  }
1276  return MBB;
1277 }
1278 
1279 /// findRepresentativeClass - Return the largest legal super-reg register class
1280 /// of the register class for the specified type and its associated "cost".
1281 // This function is in TargetLowering because it uses RegClassForVT which would
1282 // need to be moved to TargetRegisterInfo and would necessitate moving
1283 // isTypeLegal over as well - a massive change that would just require
1284 // TargetLowering having a TargetRegisterInfo class member that it would use.
1285 std::pair<const TargetRegisterClass *, uint8_t>
1287  MVT VT) const {
1288  const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1289  if (!RC)
1290  return std::make_pair(RC, 0);
1291 
1292  // Compute the set of all super-register classes.
1293  BitVector SuperRegRC(TRI->getNumRegClasses());
1294  for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1295  SuperRegRC.setBitsInMask(RCI.getMask());
1296 
1297  // Find the first legal register class with the largest spill size.
1298  const TargetRegisterClass *BestRC = RC;
1299  for (int i = SuperRegRC.find_first(); i >= 0; i = SuperRegRC.find_next(i)) {
1300  const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1301  // We want the largest possible spill size.
1302  if (SuperRC->getSize() <= BestRC->getSize())
1303  continue;
1304  if (!isLegalRC(SuperRC))
1305  continue;
1306  BestRC = SuperRC;
1307  }
1308  return std::make_pair(BestRC, 1);
1309 }
1310 
1311 /// computeRegisterProperties - Once all of the register classes are added,
1312 /// this allows us to compute derived properties we expose.
1314  const TargetRegisterInfo *TRI) {
1316  "Too many value types for ValueTypeActions to hold!");
1317 
1318  // Everything defaults to needing one register.
1319  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1320  NumRegistersForVT[i] = 1;
1321  RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1322  }
1323  // ...except isVoid, which doesn't need any registers.
1324  NumRegistersForVT[MVT::isVoid] = 0;
1325 
1326  // Find the largest integer register class.
1327  unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1328  for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1329  assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1330 
1331  // Every integer value type larger than this largest register takes twice as
1332  // many registers to represent as the previous ValueType.
1333  for (unsigned ExpandedReg = LargestIntReg + 1;
1334  ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1335  NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1336  RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1337  TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1340  }
1341 
1342  // Inspect all of the ValueType's smaller than the largest integer
1343  // register to see which ones need promotion.
1344  unsigned LegalIntReg = LargestIntReg;
1345  for (unsigned IntReg = LargestIntReg - 1;
1346  IntReg >= (unsigned)MVT::i1; --IntReg) {
1347  MVT IVT = (MVT::SimpleValueType)IntReg;
1348  if (isTypeLegal(IVT)) {
1349  LegalIntReg = IntReg;
1350  } else {
1351  RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1352  (const MVT::SimpleValueType)LegalIntReg;
1354  }
1355  }
1356 
1357  // ppcf128 type is really two f64's.
1358  if (!isTypeLegal(MVT::ppcf128)) {
1359  if (isTypeLegal(MVT::f64)) {
1360  NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1361  RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1362  TransformToType[MVT::ppcf128] = MVT::f64;
1364  } else {
1365  NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1366  RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1367  TransformToType[MVT::ppcf128] = MVT::i128;
1369  }
1370  }
1371 
1372  // Decide how to handle f128. If the target does not have native f128 support,
1373  // expand it to i128 and we will be generating soft float library calls.
1374  if (!isTypeLegal(MVT::f128)) {
1375  NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1376  RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1377  TransformToType[MVT::f128] = MVT::i128;
1379  }
1380 
1381  // Decide how to handle f64. If the target does not have native f64 support,
1382  // expand it to i64 and we will be generating soft float library calls.
1383  if (!isTypeLegal(MVT::f64)) {
1384  NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1385  RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1386  TransformToType[MVT::f64] = MVT::i64;
1388  }
1389 
1390  // Decide how to handle f32. If the target does not have native f32 support,
1391  // expand it to i32 and we will be generating soft float library calls.
1392  if (!isTypeLegal(MVT::f32)) {
1393  NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1394  RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1395  TransformToType[MVT::f32] = MVT::i32;
1397  }
1398 
1399  // Decide how to handle f16. If the target does not have native f16 support,
1400  // promote it to f32, because there are no f16 library calls (except for
1401  // conversions).
1402  if (!isTypeLegal(MVT::f16)) {
1403  NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1404  RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1405  TransformToType[MVT::f16] = MVT::f32;
1407  }
1408 
1409  // Loop over all of the vector value types to see which need transformations.
1410  for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1412  MVT VT = (MVT::SimpleValueType) i;
1413  if (isTypeLegal(VT))
1414  continue;
1415 
1416  MVT EltVT = VT.getVectorElementType();
1417  unsigned NElts = VT.getVectorNumElements();
1418  bool IsLegalWiderType = false;
1419  LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1420  switch (PreferredAction) {
1421  case TypePromoteInteger: {
1422  // Try to promote the elements of integer vectors. If no legal
1423  // promotion was found, fall through to the widen-vector method.
1424  for (unsigned nVT = i + 1; nVT <= MVT::LAST_INTEGER_VECTOR_VALUETYPE; ++nVT) {
1425  MVT SVT = (MVT::SimpleValueType) nVT;
1426  // Promote vectors of integers to vectors with the same number
1427  // of elements, with a wider element type.
1428  if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
1429  SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)) {
1430  TransformToType[i] = SVT;
1431  RegisterTypeForVT[i] = SVT;
1432  NumRegistersForVT[i] = 1;
1434  IsLegalWiderType = true;
1435  break;
1436  }
1437  }
1438  if (IsLegalWiderType)
1439  break;
1440  }
1441  case TypeWidenVector: {
1442  // Try to widen the vector.
1443  for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1444  MVT SVT = (MVT::SimpleValueType) nVT;
1445  if (SVT.getVectorElementType() == EltVT
1446  && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
1447  TransformToType[i] = SVT;
1448  RegisterTypeForVT[i] = SVT;
1449  NumRegistersForVT[i] = 1;
1451  IsLegalWiderType = true;
1452  break;
1453  }
1454  }
1455  if (IsLegalWiderType)
1456  break;
1457  }
1458  case TypeSplitVector:
1459  case TypeScalarizeVector: {
1460  MVT IntermediateVT;
1461  MVT RegisterVT;
1462  unsigned NumIntermediates;
1463  NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1464  NumIntermediates, RegisterVT, this);
1465  RegisterTypeForVT[i] = RegisterVT;
1466 
1467  MVT NVT = VT.getPow2VectorType();
1468  if (NVT == VT) {
1469  // Type is already a power of 2. The default action is to split.
1470  TransformToType[i] = MVT::Other;
1471  if (PreferredAction == TypeScalarizeVector)
1473  else if (PreferredAction == TypeSplitVector)
1475  else
1476  // Set type action according to the number of elements.
1478  : TypeSplitVector);
1479  } else {
1480  TransformToType[i] = NVT;
1482  }
1483  break;
1484  }
1485  default:
1486  llvm_unreachable("Unknown vector legalization action!");
1487  }
1488  }
1489 
1490  // Determine the 'representative' register class for each value type.
1491  // An representative register class is the largest (meaning one which is
1492  // not a sub-register class / subreg register class) legal register class for
1493  // a group of value types. For example, on i386, i8, i16, and i32
1494  // representative would be GR32; while on x86_64 it's GR64.
1495  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1496  const TargetRegisterClass* RRC;
1497  uint8_t Cost;
1498  std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1499  RepRegClassForVT[i] = RRC;
1500  RepRegClassCostForVT[i] = Cost;
1501  }
1502 }
1503 
1505  EVT VT) const {
1506  assert(!VT.isVector() && "No default SetCC type for vectors!");
1507  return getPointerTy(DL).SimpleTy;
1508 }
1509 
1511  return MVT::i32; // return the default value
1512 }
1513 
1514 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1515 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1516 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1517 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1518 ///
1519 /// This method returns the number of registers needed, and the VT for each
1520 /// register. It also returns the VT and quantity of the intermediate values
1521 /// before they are promoted/expanded.
1522 ///
1524  EVT &IntermediateVT,
1525  unsigned &NumIntermediates,
1526  MVT &RegisterVT) const {
1527  unsigned NumElts = VT.getVectorNumElements();
1528 
1529  // If there is a wider vector type with the same element type as this one,
1530  // or a promoted vector type that has the same number of elements which
1531  // are wider, then we should convert to that legal vector type.
1532  // This handles things like <2 x float> -> <4 x float> and
1533  // <4 x i1> -> <4 x i32>.
1534  LegalizeTypeAction TA = getTypeAction(Context, VT);
1535  if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1536  EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1537  if (isTypeLegal(RegisterEVT)) {
1538  IntermediateVT = RegisterEVT;
1539  RegisterVT = RegisterEVT.getSimpleVT();
1540  NumIntermediates = 1;
1541  return 1;
1542  }
1543  }
1544 
1545  // Figure out the right, legal destination reg to copy into.
1546  EVT EltTy = VT.getVectorElementType();
1547 
1548  unsigned NumVectorRegs = 1;
1549 
1550  // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1551  // could break down into LHS/RHS like LegalizeDAG does.
1552  if (!isPowerOf2_32(NumElts)) {
1553  NumVectorRegs = NumElts;
1554  NumElts = 1;
1555  }
1556 
1557  // Divide the input until we get to a supported size. This will always
1558  // end with a scalar if the target doesn't support vectors.
1559  while (NumElts > 1 && !isTypeLegal(
1560  EVT::getVectorVT(Context, EltTy, NumElts))) {
1561  NumElts >>= 1;
1562  NumVectorRegs <<= 1;
1563  }
1564 
1565  NumIntermediates = NumVectorRegs;
1566 
1567  EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
1568  if (!isTypeLegal(NewVT))
1569  NewVT = EltTy;
1570  IntermediateVT = NewVT;
1571 
1572  MVT DestVT = getRegisterType(Context, NewVT);
1573  RegisterVT = DestVT;
1574  unsigned NewVTSize = NewVT.getSizeInBits();
1575 
1576  // Convert sizes such as i33 to i64.
1577  if (!isPowerOf2_32(NewVTSize))
1578  NewVTSize = NextPowerOf2(NewVTSize);
1579 
1580  if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1581  return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1582 
1583  // Otherwise, promotion or legal types use the same number of registers as
1584  // the vector decimated to the appropriate level.
1585  return NumVectorRegs;
1586 }
1587 
1588 /// Get the EVTs and ArgFlags collections that represent the legalized return
1589 /// type of the given function. This does not require a DAG or a return value,
1590 /// and is suitable for use before any DAGs for the function are constructed.
1591 /// TODO: Move this out of TargetLowering.cpp.
1594  const TargetLowering &TLI, const DataLayout &DL) {
1595  SmallVector<EVT, 4> ValueVTs;
1596  ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1597  unsigned NumValues = ValueVTs.size();
1598  if (NumValues == 0) return;
1599 
1600  for (unsigned j = 0, f = NumValues; j != f; ++j) {
1601  EVT VT = ValueVTs[j];
1602  ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1603 
1604  if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
1605  ExtendKind = ISD::SIGN_EXTEND;
1606  else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt))
1607  ExtendKind = ISD::ZERO_EXTEND;
1608 
1609  // FIXME: C calling convention requires the return type to be promoted to
1610  // at least 32-bit. But this is not necessary for non-C calling
1611  // conventions. The frontend should mark functions whose return values
1612  // require promoting with signext or zeroext attributes.
1613  if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1614  MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1615  if (VT.bitsLT(MinVT))
1616  VT = MinVT;
1617  }
1618 
1619  unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
1620  MVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
1621 
1622  // 'inreg' on function refers to return value
1624  if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::InReg))
1625  Flags.setInReg();
1626 
1627  // Propagate extension type if any
1628  if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
1629  Flags.setSExt();
1630  else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt))
1631  Flags.setZExt();
1632 
1633  for (unsigned i = 0; i < NumParts; ++i)
1634  Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
1635  }
1636 }
1637 
1638 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1639 /// function arguments in the caller parameter area. This is the actual
1640 /// alignment, not its logarithm.
1642  const DataLayout &DL) const {
1643  return DL.getABITypeAlignment(Ty);
1644 }
1645 
1647  const DataLayout &DL, EVT VT,
1648  unsigned AddrSpace,
1649  unsigned Alignment,
1650  bool *Fast) const {
1651  // Check if the specified alignment is sufficient based on the data layout.
1652  // TODO: While using the data layout works in practice, a better solution
1653  // would be to implement this check directly (make this a virtual function).
1654  // For example, the ABI alignment may change based on software platform while
1655  // this function should only be affected by hardware implementation.
1656  Type *Ty = VT.getTypeForEVT(Context);
1657  if (Alignment >= DL.getABITypeAlignment(Ty)) {
1658  // Assume that an access that meets the ABI-specified alignment is fast.
1659  if (Fast != nullptr)
1660  *Fast = true;
1661  return true;
1662  }
1663 
1664  // This is a misaligned access.
1665  return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
1666 }
1667 
1670 }
1671 
1672 //===----------------------------------------------------------------------===//
1673 // TargetTransformInfo Helpers
1674 //===----------------------------------------------------------------------===//
1675 
1677  enum InstructionOpcodes {
1678 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1679 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1680 #include "llvm/IR/Instruction.def"
1681  };
1682  switch (static_cast<InstructionOpcodes>(Opcode)) {
1683  case Ret: return 0;
1684  case Br: return 0;
1685  case Switch: return 0;
1686  case IndirectBr: return 0;
1687  case Invoke: return 0;
1688  case Resume: return 0;
1689  case Unreachable: return 0;
1690  case CleanupRet: return 0;
1691  case CatchRet: return 0;
1692  case CatchPad: return 0;
1693  case CatchSwitch: return 0;
1694  case CleanupPad: return 0;
1695  case Add: return ISD::ADD;
1696  case FAdd: return ISD::FADD;
1697  case Sub: return ISD::SUB;
1698  case FSub: return ISD::FSUB;
1699  case Mul: return ISD::MUL;
1700  case FMul: return ISD::FMUL;
1701  case UDiv: return ISD::UDIV;
1702  case SDiv: return ISD::SDIV;
1703  case FDiv: return ISD::FDIV;
1704  case URem: return ISD::UREM;
1705  case SRem: return ISD::SREM;
1706  case FRem: return ISD::FREM;
1707  case Shl: return ISD::SHL;
1708  case LShr: return ISD::SRL;
1709  case AShr: return ISD::SRA;
1710  case And: return ISD::AND;
1711  case Or: return ISD::OR;
1712  case Xor: return ISD::XOR;
1713  case Alloca: return 0;
1714  case Load: return ISD::LOAD;
1715  case Store: return ISD::STORE;
1716  case GetElementPtr: return 0;
1717  case Fence: return 0;
1718  case AtomicCmpXchg: return 0;
1719  case AtomicRMW: return 0;
1720  case Trunc: return ISD::TRUNCATE;
1721  case ZExt: return ISD::ZERO_EXTEND;
1722  case SExt: return ISD::SIGN_EXTEND;
1723  case FPToUI: return ISD::FP_TO_UINT;
1724  case FPToSI: return ISD::FP_TO_SINT;
1725  case UIToFP: return ISD::UINT_TO_FP;
1726  case SIToFP: return ISD::SINT_TO_FP;
1727  case FPTrunc: return ISD::FP_ROUND;
1728  case FPExt: return ISD::FP_EXTEND;
1729  case PtrToInt: return ISD::BITCAST;
1730  case IntToPtr: return ISD::BITCAST;
1731  case BitCast: return ISD::BITCAST;
1732  case AddrSpaceCast: return ISD::ADDRSPACECAST;
1733  case ICmp: return ISD::SETCC;
1734  case FCmp: return ISD::SETCC;
1735  case PHI: return 0;
1736  case Call: return 0;
1737  case Select: return ISD::SELECT;
1738  case UserOp1: return 0;
1739  case UserOp2: return 0;
1740  case VAArg: return 0;
1741  case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1742  case InsertElement: return ISD::INSERT_VECTOR_ELT;
1743  case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1744  case ExtractValue: return ISD::MERGE_VALUES;
1745  case InsertValue: return ISD::MERGE_VALUES;
1746  case LandingPad: return 0;
1747  }
1748 
1749  llvm_unreachable("Unknown instruction type encountered!");
1750 }
1751 
1752 std::pair<int, MVT>
1754  Type *Ty) const {
1755  LLVMContext &C = Ty->getContext();
1756  EVT MTy = getValueType(DL, Ty);
1757 
1758  int Cost = 1;
1759  // We keep legalizing the type until we find a legal kind. We assume that
1760  // the only operation that costs anything is the split. After splitting
1761  // we need to handle two types.
1762  while (true) {
1763  LegalizeKind LK = getTypeConversion(C, MTy);
1764 
1765  if (LK.first == TypeLegal)
1766  return std::make_pair(Cost, MTy.getSimpleVT());
1767 
1768  if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1769  Cost *= 2;
1770 
1771  // Do not loop with f128 type.
1772  if (MTy == LK.second)
1773  return std::make_pair(Cost, MTy.getSimpleVT());
1774 
1775  // Keep legalizing the type.
1776  MTy = LK.second;
1777  }
1778 }
1779 
1781  bool UseTLS) const {
1782  // compiler-rt provides a variable with a magic name. Targets that do not
1783  // link with compiler-rt may also provide such a variable.
1784  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1785  const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1786  auto UnsafeStackPtr =
1787  dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1788 
1789  Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1790 
1791  if (!UnsafeStackPtr) {
1792  auto TLSModel = UseTLS ?
1795  // The global variable is not defined yet, define it ourselves.
1796  // We use the initial-exec TLS model because we do not support the
1797  // variable living anywhere other than in the main executable.
1798  UnsafeStackPtr = new GlobalVariable(
1799  *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1800  UnsafeStackPtrVar, nullptr, TLSModel);
1801  } else {
1802  // The variable exists, check its type and attributes.
1803  if (UnsafeStackPtr->getValueType() != StackPtrTy)
1804  report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1805  if (UseTLS != UnsafeStackPtr->isThreadLocal())
1806  report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1807  (UseTLS ? "" : "not ") + "be thread-local");
1808  }
1809  return UnsafeStackPtr;
1810 }
1811 
1813  if (!TM.getTargetTriple().isAndroid())
1814  return getDefaultSafeStackPointerLocation(IRB, true);
1815 
1816  // Android provides a libc function to retrieve the address of the current
1817  // thread's unsafe stack pointer.
1818  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1819  Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1820  Value *Fn = M->getOrInsertFunction("__safestack_pointer_address",
1821  StackPtrTy->getPointerTo(0), nullptr);
1822  return IRB.CreateCall(Fn);
1823 }
1824 
1825 //===----------------------------------------------------------------------===//
1826 // Loop Strength Reduction hooks
1827 //===----------------------------------------------------------------------===//
1828 
1829 /// isLegalAddressingMode - Return true if the addressing mode represented
1830 /// by AM is legal for this target, for a load/store of the specified type.
1832  const AddrMode &AM, Type *Ty,
1833  unsigned AS) const {
1834  // The default implementation of this implements a conservative RISCy, r+r and
1835  // r+i addr mode.
1836 
1837  // Allows a sign-extended 16-bit immediate field.
1838  if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1839  return false;
1840 
1841  // No global is ever allowed as a base.
1842  if (AM.BaseGV)
1843  return false;
1844 
1845  // Only support r+r,
1846  switch (AM.Scale) {
1847  case 0: // "r+i" or just "i", depending on HasBaseReg.
1848  break;
1849  case 1:
1850  if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1851  return false;
1852  // Otherwise we have r+r or r+i.
1853  break;
1854  case 2:
1855  if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1856  return false;
1857  // Allow 2*r as r+r.
1858  break;
1859  default: // Don't allow n * r
1860  return false;
1861  }
1862 
1863  return true;
1864 }
1865 
1866 //===----------------------------------------------------------------------===//
1867 // Stack Protector
1868 //===----------------------------------------------------------------------===//
1869 
1870 // For OpenBSD return its special guard variable. Otherwise return nullptr,
1871 // so that SelectionDAG handle SSP.
1873  if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
1874  Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
1876  return M.getOrInsertGlobal("__guard_local", PtrTy);
1877  }
1878  return nullptr;
1879 }
1880 
1881 // Currently only support "standard" __stack_chk_guard.
1882 // TODO: add LOAD_STACK_GUARD support.
1884  M.getOrInsertGlobal("__stack_chk_guard", Type::getInt8PtrTy(M.getContext()));
1885 }
1886 
1887 // Currently only support "standard" __stack_chk_guard.
1888 // TODO: add LOAD_STACK_GUARD support.
1890  return M.getGlobalVariable("__stack_chk_guard", true);
1891 }
1892 
1894  return nullptr;
1895 }
1896 
1898  return MinimumJumpTableEntries;
1899 }
1900 
1903 }
1904 
1906  return MaximumJumpTableSize;
1907 }
1908 
1910  MaximumJumpTableSize = Val;
1911 }
1912 
1913 //===----------------------------------------------------------------------===//
1914 // Reciprocal Estimates
1915 //===----------------------------------------------------------------------===//
1916 
1917 /// Get the reciprocal estimate attribute string for a function that will
1918 /// override the target defaults.
1920  const Function *F = MF.getFunction();
1921  StringRef RecipAttrName = "reciprocal-estimates";
1922  if (!F->hasFnAttribute(RecipAttrName))
1923  return StringRef();
1924 
1925  return F->getFnAttribute(RecipAttrName).getValueAsString();
1926 }
1927 
1928 /// Construct a string for the given reciprocal operation of the given type.
1929 /// This string should match the corresponding option to the front-end's
1930 /// "-mrecip" flag assuming those strings have been passed through in an
1931 /// attribute string. For example, "vec-divf" for a division of a vXf32.
1932 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
1933  std::string Name = VT.isVector() ? "vec-" : "";
1934 
1935  Name += IsSqrt ? "sqrt" : "div";
1936 
1937  // TODO: Handle "half" or other float types?
1938  if (VT.getScalarType() == MVT::f64) {
1939  Name += "d";
1940  } else {
1941  assert(VT.getScalarType() == MVT::f32 &&
1942  "Unexpected FP type for reciprocal estimate");
1943  Name += "f";
1944  }
1945 
1946  return Name;
1947 }
1948 
1949 /// Return the character position and value (a single numeric character) of a
1950 /// customized refinement operation in the input string if it exists. Return
1951 /// false if there is no customized refinement step count.
1952 static bool parseRefinementStep(StringRef In, size_t &Position,
1953  uint8_t &Value) {
1954  const char RefStepToken = ':';
1955  Position = In.find(RefStepToken);
1956  if (Position == StringRef::npos)
1957  return false;
1958 
1959  StringRef RefStepString = In.substr(Position + 1);
1960  // Allow exactly one numeric character for the additional refinement
1961  // step parameter.
1962  if (RefStepString.size() == 1) {
1963  char RefStepChar = RefStepString[0];
1964  if (RefStepChar >= '0' && RefStepChar <= '9') {
1965  Value = RefStepChar - '0';
1966  return true;
1967  }
1968  }
1969  report_fatal_error("Invalid refinement step for -recip.");
1970 }
1971 
1972 /// For the input attribute string, return one of the ReciprocalEstimate enum
1973 /// status values (enabled, disabled, or not specified) for this operation on
1974 /// the specified data type.
1975 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
1976  if (Override.empty())
1977  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1978 
1979  SmallVector<StringRef, 4> OverrideVector;
1980  SplitString(Override, OverrideVector, ",");
1981  unsigned NumArgs = OverrideVector.size();
1982 
1983  // Check if "all", "none", or "default" was specified.
1984  if (NumArgs == 1) {
1985  // Look for an optional setting of the number of refinement steps needed
1986  // for this type of reciprocal operation.
1987  size_t RefPos;
1988  uint8_t RefSteps;
1989  if (parseRefinementStep(Override, RefPos, RefSteps)) {
1990  // Split the string for further processing.
1991  Override = Override.substr(0, RefPos);
1992  }
1993 
1994  // All reciprocal types are enabled.
1995  if (Override == "all")
1997 
1998  // All reciprocal types are disabled.
1999  if (Override == "none")
2000  return TargetLoweringBase::ReciprocalEstimate::Disabled;
2001 
2002  // Target defaults for enablement are used.
2003  if (Override == "default")
2004  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2005  }
2006 
2007  // The attribute string may omit the size suffix ('f'/'d').
2008  std::string VTName = getReciprocalOpName(IsSqrt, VT);
2009  std::string VTNameNoSize = VTName;
2010  VTNameNoSize.pop_back();
2011  static const char DisabledPrefix = '!';
2012 
2013  for (StringRef RecipType : OverrideVector) {
2014  size_t RefPos;
2015  uint8_t RefSteps;
2016  if (parseRefinementStep(RecipType, RefPos, RefSteps))
2017  RecipType = RecipType.substr(0, RefPos);
2018 
2019  // Ignore the disablement token for string matching.
2020  bool IsDisabled = RecipType[0] == DisabledPrefix;
2021  if (IsDisabled)
2022  RecipType = RecipType.substr(1);
2023 
2024  if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
2025  return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
2027  }
2028 
2029  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2030 }
2031 
2032 /// For the input attribute string, return the customized refinement step count
2033 /// for this operation on the specified data type. If the step count does not
2034 /// exist, return the ReciprocalEstimate enum value for unspecified.
2035 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
2036  if (Override.empty())
2037  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2038 
2039  SmallVector<StringRef, 4> OverrideVector;
2040  SplitString(Override, OverrideVector, ",");
2041  unsigned NumArgs = OverrideVector.size();
2042 
2043  // Check if "all", "default", or "none" was specified.
2044  if (NumArgs == 1) {
2045  // Look for an optional setting of the number of refinement steps needed
2046  // for this type of reciprocal operation.
2047  size_t RefPos;
2048  uint8_t RefSteps;
2049  if (!parseRefinementStep(Override, RefPos, RefSteps))
2050  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2051 
2052  // Split the string for further processing.
2053  Override = Override.substr(0, RefPos);
2054  assert(Override != "none" &&
2055  "Disabled reciprocals, but specifed refinement steps?");
2056 
2057  // If this is a general override, return the specified number of steps.
2058  if (Override == "all" || Override == "default")
2059  return RefSteps;
2060  }
2061 
2062  // The attribute string may omit the size suffix ('f'/'d').
2063  std::string VTName = getReciprocalOpName(IsSqrt, VT);
2064  std::string VTNameNoSize = VTName;
2065  VTNameNoSize.pop_back();
2066 
2067  for (StringRef RecipType : OverrideVector) {
2068  size_t RefPos;
2069  uint8_t RefSteps;
2070  if (!parseRefinementStep(RecipType, RefPos, RefSteps))
2071  continue;
2072 
2073  RecipType = RecipType.substr(0, RefPos);
2074  if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
2075  return RefSteps;
2076  }
2077 
2078  return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2079 }
2080 
2082  MachineFunction &MF) const {
2083  return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
2084 }
2085 
2087  MachineFunction &MF) const {
2088  return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
2089 }
2090 
2092  MachineFunction &MF) const {
2093  return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
2094 }
2095 
2097  MachineFunction &MF) const {
2098  return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
2099 }
EVT getRoundIntegerType(LLVMContext &Context) const
getRoundIntegerType - Rounds the bit-width of the given integer EVT up to the nearest power of two (a...
Definition: ValueTypes.h:281
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:500
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:467
static MVT getIntegerVT(unsigned BitWidth)
void push_back(const T &Elt)
Definition: SmallVector.h:211
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:102
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:241
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:524
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
vt_iterator vt_end() const
LLVMContext & Context
static void InitLibcallCallingConvs(CallingConv::ID *CCs)
Set default libcall CallingConvs.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
size_t i
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
Return the number of registers that this ValueType will eventually require.
static MVT getVectorVT(MVT VT, unsigned NumElements)
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:313
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:52
const TargetMachine & getTargetMachine() const
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition: ISDOpcodes.h:449
static void InitLibcallNames(const char **Names, const Triple &TT)
InitLibcallNames - Set default libcall names.
Libcall getSYNC(unsigned Opc, MVT VT)
Return the SYNC_FETCH_AND_* value for the given opcode and type, or UNKNOWN_LIBCALL if there is none...
unsigned getScalarSizeInBits() const
Y = RRC X, rotate right via carry.
static cl::opt< bool > JumpIsExpensiveOverride("jump-is-expensive", cl::init(false), cl::desc("Do not create extra branches to split comparison logic."), cl::Hidden)
static cl::opt< unsigned > MinimumJumpTableEntries("min-jump-table-entries", cl::init(4), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table."))
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:237
static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return one of the ReciprocalEstimate enum status values (enabled...
Type * getTypeForEVT(LLVMContext &Context) const
getTypeForEVT - This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:204
unsigned getSizeInBits() const
Externally visible function.
Definition: GlobalValue.h:49
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:270
const_iterator begin(StringRef path)
Get begin iterator over path.
Definition: Path.cpp:233
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
Definition: ISDOpcodes.h:711
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:100
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.h:234
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition: ISDOpcodes.h:440
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
Definition: ISDOpcodes.h:330
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it...
Same for subtraction.
Definition: ISDOpcodes.h:240
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
Definition: Attributes.cpp:994
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:39
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
Definition: ValueTypes.h:212
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
Definition: ISDOpcodes.h:263
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:593
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
const Triple & getTargetTriple() const
bool isVector() const
isVector - Return true if this is a vector value type.
Definition: ValueTypes.h:133
unsigned getSize() const
Return the size of the register in bytes, which is also the size of a stack slot allocated to hold a ...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
A description of a memory reference used in the backend.
unsigned getNumRegClasses() const
struct fuzzer::@269 Flags
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
Shift and rotation operations.
Definition: ISDOpcodes.h:344
const TargetRegisterClass * getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:592
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:588
SimpleValueType SimpleTy
EVT getScalarType() const
getScalarType - If this is a vector type, return the element type, otherwise return this...
Definition: ValueTypes.h:233
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
Definition: ISDOpcodes.h:719
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
Definition: ValueTypes.h:123
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:277
EVT getVectorElementType() const
getVectorElementType - Given a vector type, return the type of each element.
Definition: ValueTypes.h:239
virtual Value * getIRStackGuard(IRBuilder<> &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
#define F(x, y, z)
Definition: MD5.cpp:51
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:410
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
virtual Value * getSafeStackPointerLocation(IRBuilder<> &IRB) const
Returns the target-specific address of the unsafe stack pointer.
void GetReturnInfo(Type *ReturnType, AttributeSet attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
MachineBasicBlock * MBB
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:200
static StringRef getRecipEstimateForFunc(MachineFunction &MF)
Get the reciprocal estimate attribute string for a function that will override the target defaults...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:842
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
Libcall getMEMCPY_ELEMENT_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_ATOMIC - Return MEMCPY_ELEMENT_ATOMIC_* value for the given element size or UNKNOW_...
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:656
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
Definition: ISDOpcodes.h:427
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:135
int Switch(int a)
Definition: Switch2Test.cpp:11
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
bool isLegalRC(const TargetRegisterClass *RC) const
Return true if the value types that can be represented by the specified register class are all legal...
bool isPow2VectorType() const
isPow2VectorType - Returns true if the given vector is a power of 2.
Definition: ValueTypes.h:314
Class to represent pointers.
Definition: DerivedTypes.h:443
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:273
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:453
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:131
mmo_iterator memoperands_end() const
Definition: MachineInstr.h:359
The memory access is volatile.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static std::string getReciprocalOpName(bool IsSqrt, EVT VT)
Construct a string for the given reciprocal operation of the given type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
static void InitCmpLibcallCCs(ISD::CondCode *CCs)
InitCmpLibcallCCs - Set default comparison libcall CC.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:395
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:295
unsigned getVectorNumElements() const
constexpr bool isPowerOf2_32(uint32_t Value)
isPowerOf2_32 - This function returns true if the argument is a power of two > 0. ...
Definition: MathExtras.h:399
Constant * getOrInsertFunction(StringRef Name, FunctionType *T, AttributeSet AttributeList)
Look up the specified function in the module symbol table.
Definition: Module.cpp:123
MVT - Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:48
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
Constant * getOrInsertGlobal(StringRef Name, Type *Ty)
Look up the specified global in the module symbol table.
Definition: Module.cpp:225
Simple binary floating point operators.
Definition: ISDOpcodes.h:246
bool isOSOpenBSD() const
Definition: Triple.h:463
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:279
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:279
TargetLoweringBase(const TargetMachine &TM)
NOTE: The TargetMachine owns TLOF.
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
Definition: APInt.h:1947
APInt Xor(const APInt &LHS, const APInt &RHS)
Bitwise XOR function for APInt.
Definition: APInt.h:1952
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:587
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:676
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:679
static cl::opt< unsigned > MaximumJumpTableSize("max-jump-table-size", cl::init(0), cl::Hidden, cl::desc("Set maximum size of jump tables; zero for no limit."))
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
Bit counting operators with an undefined result for zero inputs.
Definition: ISDOpcodes.h:350
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
Definition: ISDOpcodes.h:705
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:485
EVT - Extended Value Type.
Definition: ValueTypes.h:31
LegalizeTypeAction getTypeAction(MVT VT) const
uint64_t NextPowerOf2(uint64_t A)
NextPowerOf2 - Returns the next power of two (in 64-bits) that is strictly greater than A...
Definition: MathExtras.h:619
static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT, TargetLoweringBase *TLI)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:213
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
getVectorVT - Returns the EVT that represents a vector NumElements in length, where each element is o...
Definition: ValueTypes.h:70
bool isStatepointSpillSlotObjectIndex(int ObjectIdx) const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should should continue looking for chain dependencies when trying to find...
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
The memory access writes data.
virtual Value * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters, appending the result fragments to the output list.
bool isOSDarwin() const
isOSDarwin - Is this a "Darwin" OS (OS X, iOS, or watchOS).
Definition: Triple.h:455
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
void initActions()
Initialize all of the actions to default values.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:689
int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a division of the given type based on the function's attri...
bool MaskAndBranchFoldingIsLegal
MaskAndBranchFoldingIsLegal - Indicates if the target supports folding a mask of a single bit...
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:285
ADDRSPACECAST - This operator converts between pointers of different address spaces.
Definition: ISDOpcodes.h:504
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
bool isValid() const
Returns true if this iterator is still pointing at a valid entry.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:649
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:121
int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a square root of the given type based on the function's attribut...
static bool Enabled
Definition: Statistic.cpp:49
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SynchronizationScope SynchScope=CrossThread, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:354
std::pair< int, MVT > getTypeLegalizationCost(const DataLayout &DL, Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
void setMaximumJumpTableSize(unsigned)
Indicate the maximum number of entries in jump tables.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:400
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:403
ValueTypeActionImpl ValueTypeActions
bool isGNUEnvironment() const
Definition: Triple.h:489
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:130
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:250
#define OP_TO_LIBCALL(Name, Enum)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:259
FMINNAN/FMAXNAN - Behave identically to FMINNUM/FMAXNUM, except that when a single input is NaN...
Definition: ISDOpcodes.h:527
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const
Return the preferred vector type legalization action.
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
Definition: APInt.h:1942
The memory access reads data.
static bool parseRefinementStep(StringRef In, size_t &Position, uint8_t &Value)
Return the character position and value (a single numeric character) of a customized refinement opera...
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca...
Definition: ISDOpcodes.h:758
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static mvt_range all_valuetypes()
SimpleValueType Iteration.
Representation of each machine instruction.
Definition: MachineInstr.h:52
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:333
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:226
static const size_t npos
Definition: StringRef.h:51
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
void setTypeAction(MVT VT, LegalizeTypeAction Action)
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const
EVT is not used in-tree, but is used by out-of-tree target.
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:536
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
Definition: ValueTypes.h:256
#define I(x, y, z)
Definition: MD5.cpp:54
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:135
CallInst * CreateCall(Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1579
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
Same for multiplication.
Definition: ISDOpcodes.h:243
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
Value * getDefaultSafeStackPointerLocation(IRBuilder<> &IRB, bool UseTLS) const
EVT getPow2VectorType(LLVMContext &Context) const
getPow2VectorType - Widens the length of the given vector EVT up to the nearest power of 2 and return...
Definition: ValueTypes.h:321
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
int getDivRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a division of the given type based on the function's attributes...
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:291
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:178
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
virtual BranchProbability getPredictableBranchThreshold() const
If a branch or a select condition is skewed in one direction by more than this factor, it is very likely to be predicted correctly.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, unsigned Alignment=1, bool *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:107
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:537
LLVM Value Representation.
Definition: Value.h:71
unsigned getMaximumJumpTableSize() const
Return upper limit for number of entries in a jump table.
static cl::opt< int > MinPercentageForPredictableBranch("min-predictable-branch", cl::init(99), cl::desc("Minimum percentage (0-100) that a condition must be either true ""or false to assume that the condition is predictable"), cl::Hidden)
PREFETCH - This corresponds to a prefetch intrinsic.
Definition: ISDOpcodes.h:685
MVT getPow2VectorType() const
getPow2VectorType - Widens the length of the given vector MVT up to the nearest power of 2 and return...
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
Primary interface to the complete machine description for the target machine.
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:678
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:47
vt_iterator vt_begin() const
vt_begin / vt_end - Loop over all of the value types that can be represented by values in this regist...
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations...
Definition: ISDOpcodes.h:253
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:377
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:197
MVT getVectorElementType() const
Conversion operators.
Definition: ISDOpcodes.h:397
static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return the customized refinement step count for this operation on the...
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:406
unsigned getPointerSize(unsigned AS=0) const
Layout pointer size FIXME: The defaults need to be removed once all of the backends/clients are updat...
Definition: DataLayout.cpp:608
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
getIntegerVT - Returns the EVT that represents an integer with the given number of bits...
Definition: ValueTypes.h:61
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd)
Assign this MachineInstr's memory reference descriptor list.
GlobalValue * getNamedValue(StringRef Name) const
Return the global value in the module with the specified name, of arbitrary type. ...
Definition: Module.cpp:93
void setBitsInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
setBitsInMask - Add '1' bits from Mask to this vector.
Definition: BitVector.h:483
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
GlobalVariable * getGlobalVariable(StringRef Name) const
Look up the specified global variable in the module symbol table.
Definition: Module.h:344
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:226
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:222
This file describes how to lower LLVM code to machine code.
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a square root of the given type based on the function's at...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:358
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:248