LLVM  3.7.0
TargetLoweringBase.cpp
Go to the documentation of this file.
1 //===-- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ---===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the TargetLoweringBase class.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/Triple.h"
18 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/StackMaps.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/GlobalVariable.h"
27 #include "llvm/IR/Mangler.h"
28 #include "llvm/MC/MCAsmInfo.h"
29 #include "llvm/MC/MCContext.h"
30 #include "llvm/MC/MCExpr.h"
38 #include <cctype>
39 using namespace llvm;
40 
42  "jump-is-expensive", cl::init(false),
43  cl::desc("Do not create extra branches to split comparison logic."),
44  cl::Hidden);
45 
46 /// InitLibcallNames - Set default libcall names.
47 ///
48 static void InitLibcallNames(const char **Names, const Triple &TT) {
49  Names[RTLIB::SHL_I16] = "__ashlhi3";
50  Names[RTLIB::SHL_I32] = "__ashlsi3";
51  Names[RTLIB::SHL_I64] = "__ashldi3";
52  Names[RTLIB::SHL_I128] = "__ashlti3";
53  Names[RTLIB::SRL_I16] = "__lshrhi3";
54  Names[RTLIB::SRL_I32] = "__lshrsi3";
55  Names[RTLIB::SRL_I64] = "__lshrdi3";
56  Names[RTLIB::SRL_I128] = "__lshrti3";
57  Names[RTLIB::SRA_I16] = "__ashrhi3";
58  Names[RTLIB::SRA_I32] = "__ashrsi3";
59  Names[RTLIB::SRA_I64] = "__ashrdi3";
60  Names[RTLIB::SRA_I128] = "__ashrti3";
61  Names[RTLIB::MUL_I8] = "__mulqi3";
62  Names[RTLIB::MUL_I16] = "__mulhi3";
63  Names[RTLIB::MUL_I32] = "__mulsi3";
64  Names[RTLIB::MUL_I64] = "__muldi3";
65  Names[RTLIB::MUL_I128] = "__multi3";
66  Names[RTLIB::MULO_I32] = "__mulosi4";
67  Names[RTLIB::MULO_I64] = "__mulodi4";
68  Names[RTLIB::MULO_I128] = "__muloti4";
69  Names[RTLIB::SDIV_I8] = "__divqi3";
70  Names[RTLIB::SDIV_I16] = "__divhi3";
71  Names[RTLIB::SDIV_I32] = "__divsi3";
72  Names[RTLIB::SDIV_I64] = "__divdi3";
73  Names[RTLIB::SDIV_I128] = "__divti3";
74  Names[RTLIB::UDIV_I8] = "__udivqi3";
75  Names[RTLIB::UDIV_I16] = "__udivhi3";
76  Names[RTLIB::UDIV_I32] = "__udivsi3";
77  Names[RTLIB::UDIV_I64] = "__udivdi3";
78  Names[RTLIB::UDIV_I128] = "__udivti3";
79  Names[RTLIB::SREM_I8] = "__modqi3";
80  Names[RTLIB::SREM_I16] = "__modhi3";
81  Names[RTLIB::SREM_I32] = "__modsi3";
82  Names[RTLIB::SREM_I64] = "__moddi3";
83  Names[RTLIB::SREM_I128] = "__modti3";
84  Names[RTLIB::UREM_I8] = "__umodqi3";
85  Names[RTLIB::UREM_I16] = "__umodhi3";
86  Names[RTLIB::UREM_I32] = "__umodsi3";
87  Names[RTLIB::UREM_I64] = "__umoddi3";
88  Names[RTLIB::UREM_I128] = "__umodti3";
89 
90  // These are generally not available.
91  Names[RTLIB::SDIVREM_I8] = nullptr;
92  Names[RTLIB::SDIVREM_I16] = nullptr;
93  Names[RTLIB::SDIVREM_I32] = nullptr;
94  Names[RTLIB::SDIVREM_I64] = nullptr;
95  Names[RTLIB::SDIVREM_I128] = nullptr;
96  Names[RTLIB::UDIVREM_I8] = nullptr;
97  Names[RTLIB::UDIVREM_I16] = nullptr;
98  Names[RTLIB::UDIVREM_I32] = nullptr;
99  Names[RTLIB::UDIVREM_I64] = nullptr;
100  Names[RTLIB::UDIVREM_I128] = nullptr;
101 
102  Names[RTLIB::NEG_I32] = "__negsi2";
103  Names[RTLIB::NEG_I64] = "__negdi2";
104  Names[RTLIB::ADD_F32] = "__addsf3";
105  Names[RTLIB::ADD_F64] = "__adddf3";
106  Names[RTLIB::ADD_F80] = "__addxf3";
107  Names[RTLIB::ADD_F128] = "__addtf3";
108  Names[RTLIB::ADD_PPCF128] = "__gcc_qadd";
109  Names[RTLIB::SUB_F32] = "__subsf3";
110  Names[RTLIB::SUB_F64] = "__subdf3";
111  Names[RTLIB::SUB_F80] = "__subxf3";
112  Names[RTLIB::SUB_F128] = "__subtf3";
113  Names[RTLIB::SUB_PPCF128] = "__gcc_qsub";
114  Names[RTLIB::MUL_F32] = "__mulsf3";
115  Names[RTLIB::MUL_F64] = "__muldf3";
116  Names[RTLIB::MUL_F80] = "__mulxf3";
117  Names[RTLIB::MUL_F128] = "__multf3";
118  Names[RTLIB::MUL_PPCF128] = "__gcc_qmul";
119  Names[RTLIB::DIV_F32] = "__divsf3";
120  Names[RTLIB::DIV_F64] = "__divdf3";
121  Names[RTLIB::DIV_F80] = "__divxf3";
122  Names[RTLIB::DIV_F128] = "__divtf3";
123  Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv";
124  Names[RTLIB::REM_F32] = "fmodf";
125  Names[RTLIB::REM_F64] = "fmod";
126  Names[RTLIB::REM_F80] = "fmodl";
127  Names[RTLIB::REM_F128] = "fmodl";
128  Names[RTLIB::REM_PPCF128] = "fmodl";
129  Names[RTLIB::FMA_F32] = "fmaf";
130  Names[RTLIB::FMA_F64] = "fma";
131  Names[RTLIB::FMA_F80] = "fmal";
132  Names[RTLIB::FMA_F128] = "fmal";
133  Names[RTLIB::FMA_PPCF128] = "fmal";
134  Names[RTLIB::POWI_F32] = "__powisf2";
135  Names[RTLIB::POWI_F64] = "__powidf2";
136  Names[RTLIB::POWI_F80] = "__powixf2";
137  Names[RTLIB::POWI_F128] = "__powitf2";
138  Names[RTLIB::POWI_PPCF128] = "__powitf2";
139  Names[RTLIB::SQRT_F32] = "sqrtf";
140  Names[RTLIB::SQRT_F64] = "sqrt";
141  Names[RTLIB::SQRT_F80] = "sqrtl";
142  Names[RTLIB::SQRT_F128] = "sqrtl";
143  Names[RTLIB::SQRT_PPCF128] = "sqrtl";
144  Names[RTLIB::LOG_F32] = "logf";
145  Names[RTLIB::LOG_F64] = "log";
146  Names[RTLIB::LOG_F80] = "logl";
147  Names[RTLIB::LOG_F128] = "logl";
148  Names[RTLIB::LOG_PPCF128] = "logl";
149  Names[RTLIB::LOG2_F32] = "log2f";
150  Names[RTLIB::LOG2_F64] = "log2";
151  Names[RTLIB::LOG2_F80] = "log2l";
152  Names[RTLIB::LOG2_F128] = "log2l";
153  Names[RTLIB::LOG2_PPCF128] = "log2l";
154  Names[RTLIB::LOG10_F32] = "log10f";
155  Names[RTLIB::LOG10_F64] = "log10";
156  Names[RTLIB::LOG10_F80] = "log10l";
157  Names[RTLIB::LOG10_F128] = "log10l";
158  Names[RTLIB::LOG10_PPCF128] = "log10l";
159  Names[RTLIB::EXP_F32] = "expf";
160  Names[RTLIB::EXP_F64] = "exp";
161  Names[RTLIB::EXP_F80] = "expl";
162  Names[RTLIB::EXP_F128] = "expl";
163  Names[RTLIB::EXP_PPCF128] = "expl";
164  Names[RTLIB::EXP2_F32] = "exp2f";
165  Names[RTLIB::EXP2_F64] = "exp2";
166  Names[RTLIB::EXP2_F80] = "exp2l";
167  Names[RTLIB::EXP2_F128] = "exp2l";
168  Names[RTLIB::EXP2_PPCF128] = "exp2l";
169  Names[RTLIB::SIN_F32] = "sinf";
170  Names[RTLIB::SIN_F64] = "sin";
171  Names[RTLIB::SIN_F80] = "sinl";
172  Names[RTLIB::SIN_F128] = "sinl";
173  Names[RTLIB::SIN_PPCF128] = "sinl";
174  Names[RTLIB::COS_F32] = "cosf";
175  Names[RTLIB::COS_F64] = "cos";
176  Names[RTLIB::COS_F80] = "cosl";
177  Names[RTLIB::COS_F128] = "cosl";
178  Names[RTLIB::COS_PPCF128] = "cosl";
179  Names[RTLIB::POW_F32] = "powf";
180  Names[RTLIB::POW_F64] = "pow";
181  Names[RTLIB::POW_F80] = "powl";
182  Names[RTLIB::POW_F128] = "powl";
183  Names[RTLIB::POW_PPCF128] = "powl";
184  Names[RTLIB::CEIL_F32] = "ceilf";
185  Names[RTLIB::CEIL_F64] = "ceil";
186  Names[RTLIB::CEIL_F80] = "ceill";
187  Names[RTLIB::CEIL_F128] = "ceill";
188  Names[RTLIB::CEIL_PPCF128] = "ceill";
189  Names[RTLIB::TRUNC_F32] = "truncf";
190  Names[RTLIB::TRUNC_F64] = "trunc";
191  Names[RTLIB::TRUNC_F80] = "truncl";
192  Names[RTLIB::TRUNC_F128] = "truncl";
193  Names[RTLIB::TRUNC_PPCF128] = "truncl";
194  Names[RTLIB::RINT_F32] = "rintf";
195  Names[RTLIB::RINT_F64] = "rint";
196  Names[RTLIB::RINT_F80] = "rintl";
197  Names[RTLIB::RINT_F128] = "rintl";
198  Names[RTLIB::RINT_PPCF128] = "rintl";
199  Names[RTLIB::NEARBYINT_F32] = "nearbyintf";
200  Names[RTLIB::NEARBYINT_F64] = "nearbyint";
201  Names[RTLIB::NEARBYINT_F80] = "nearbyintl";
202  Names[RTLIB::NEARBYINT_F128] = "nearbyintl";
203  Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl";
204  Names[RTLIB::ROUND_F32] = "roundf";
205  Names[RTLIB::ROUND_F64] = "round";
206  Names[RTLIB::ROUND_F80] = "roundl";
207  Names[RTLIB::ROUND_F128] = "roundl";
208  Names[RTLIB::ROUND_PPCF128] = "roundl";
209  Names[RTLIB::FLOOR_F32] = "floorf";
210  Names[RTLIB::FLOOR_F64] = "floor";
211  Names[RTLIB::FLOOR_F80] = "floorl";
212  Names[RTLIB::FLOOR_F128] = "floorl";
213  Names[RTLIB::FLOOR_PPCF128] = "floorl";
214  Names[RTLIB::FMIN_F32] = "fminf";
215  Names[RTLIB::FMIN_F64] = "fmin";
216  Names[RTLIB::FMIN_F80] = "fminl";
217  Names[RTLIB::FMIN_F128] = "fminl";
218  Names[RTLIB::FMIN_PPCF128] = "fminl";
219  Names[RTLIB::FMAX_F32] = "fmaxf";
220  Names[RTLIB::FMAX_F64] = "fmax";
221  Names[RTLIB::FMAX_F80] = "fmaxl";
222  Names[RTLIB::FMAX_F128] = "fmaxl";
223  Names[RTLIB::FMAX_PPCF128] = "fmaxl";
224  Names[RTLIB::ROUND_F32] = "roundf";
225  Names[RTLIB::ROUND_F64] = "round";
226  Names[RTLIB::ROUND_F80] = "roundl";
227  Names[RTLIB::ROUND_F128] = "roundl";
228  Names[RTLIB::ROUND_PPCF128] = "roundl";
229  Names[RTLIB::COPYSIGN_F32] = "copysignf";
230  Names[RTLIB::COPYSIGN_F64] = "copysign";
231  Names[RTLIB::COPYSIGN_F80] = "copysignl";
232  Names[RTLIB::COPYSIGN_F128] = "copysignl";
233  Names[RTLIB::COPYSIGN_PPCF128] = "copysignl";
234  Names[RTLIB::FPEXT_F64_F128] = "__extenddftf2";
235  Names[RTLIB::FPEXT_F32_F128] = "__extendsftf2";
236  Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
237  Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee";
238  Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee";
239  Names[RTLIB::FPROUND_F64_F16] = "__truncdfhf2";
240  Names[RTLIB::FPROUND_F80_F16] = "__truncxfhf2";
241  Names[RTLIB::FPROUND_F128_F16] = "__trunctfhf2";
242  Names[RTLIB::FPROUND_PPCF128_F16] = "__trunctfhf2";
243  Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
244  Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2";
245  Names[RTLIB::FPROUND_F128_F32] = "__trunctfsf2";
246  Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2";
247  Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2";
248  Names[RTLIB::FPROUND_F128_F64] = "__trunctfdf2";
249  Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2";
250  Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfqi";
251  Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfhi";
252  Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
253  Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
254  Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti";
255  Names[RTLIB::FPTOSINT_F64_I8] = "__fixdfqi";
256  Names[RTLIB::FPTOSINT_F64_I16] = "__fixdfhi";
257  Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
258  Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
259  Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti";
260  Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi";
261  Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi";
262  Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti";
263  Names[RTLIB::FPTOSINT_F128_I32] = "__fixtfsi";
264  Names[RTLIB::FPTOSINT_F128_I64] = "__fixtfdi";
265  Names[RTLIB::FPTOSINT_F128_I128] = "__fixtfti";
266  Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi";
267  Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi";
268  Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti";
269  Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfqi";
270  Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfhi";
271  Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
272  Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
273  Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti";
274  Names[RTLIB::FPTOUINT_F64_I8] = "__fixunsdfqi";
275  Names[RTLIB::FPTOUINT_F64_I16] = "__fixunsdfhi";
276  Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi";
277  Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi";
278  Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti";
279  Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi";
280  Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi";
281  Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti";
282  Names[RTLIB::FPTOUINT_F128_I32] = "__fixunstfsi";
283  Names[RTLIB::FPTOUINT_F128_I64] = "__fixunstfdi";
284  Names[RTLIB::FPTOUINT_F128_I128] = "__fixunstfti";
285  Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi";
286  Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi";
287  Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti";
288  Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
289  Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf";
290  Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf";
291  Names[RTLIB::SINTTOFP_I32_F128] = "__floatsitf";
292  Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf";
293  Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf";
294  Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf";
295  Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf";
296  Names[RTLIB::SINTTOFP_I64_F128] = "__floatditf";
297  Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf";
298  Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf";
299  Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf";
300  Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf";
301  Names[RTLIB::SINTTOFP_I128_F128] = "__floattitf";
302  Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf";
303  Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf";
304  Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf";
305  Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf";
306  Names[RTLIB::UINTTOFP_I32_F128] = "__floatunsitf";
307  Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf";
308  Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf";
309  Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf";
310  Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf";
311  Names[RTLIB::UINTTOFP_I64_F128] = "__floatunditf";
312  Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf";
313  Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf";
314  Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf";
315  Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf";
316  Names[RTLIB::UINTTOFP_I128_F128] = "__floatuntitf";
317  Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf";
318  Names[RTLIB::OEQ_F32] = "__eqsf2";
319  Names[RTLIB::OEQ_F64] = "__eqdf2";
320  Names[RTLIB::OEQ_F128] = "__eqtf2";
321  Names[RTLIB::UNE_F32] = "__nesf2";
322  Names[RTLIB::UNE_F64] = "__nedf2";
323  Names[RTLIB::UNE_F128] = "__netf2";
324  Names[RTLIB::OGE_F32] = "__gesf2";
325  Names[RTLIB::OGE_F64] = "__gedf2";
326  Names[RTLIB::OGE_F128] = "__getf2";
327  Names[RTLIB::OLT_F32] = "__ltsf2";
328  Names[RTLIB::OLT_F64] = "__ltdf2";
329  Names[RTLIB::OLT_F128] = "__lttf2";
330  Names[RTLIB::OLE_F32] = "__lesf2";
331  Names[RTLIB::OLE_F64] = "__ledf2";
332  Names[RTLIB::OLE_F128] = "__letf2";
333  Names[RTLIB::OGT_F32] = "__gtsf2";
334  Names[RTLIB::OGT_F64] = "__gtdf2";
335  Names[RTLIB::OGT_F128] = "__gttf2";
336  Names[RTLIB::UO_F32] = "__unordsf2";
337  Names[RTLIB::UO_F64] = "__unorddf2";
338  Names[RTLIB::UO_F128] = "__unordtf2";
339  Names[RTLIB::O_F32] = "__unordsf2";
340  Names[RTLIB::O_F64] = "__unorddf2";
341  Names[RTLIB::O_F128] = "__unordtf2";
342  Names[RTLIB::MEMCPY] = "memcpy";
343  Names[RTLIB::MEMMOVE] = "memmove";
344  Names[RTLIB::MEMSET] = "memset";
345  Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
346  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1";
347  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2";
348  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = "__sync_val_compare_and_swap_4";
349  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = "__sync_val_compare_and_swap_8";
350  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_16] = "__sync_val_compare_and_swap_16";
351  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1";
352  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2";
353  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4";
354  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8";
355  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_16] = "__sync_lock_test_and_set_16";
356  Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1";
357  Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2";
358  Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4";
359  Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8";
360  Names[RTLIB::SYNC_FETCH_AND_ADD_16] = "__sync_fetch_and_add_16";
361  Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1";
362  Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2";
363  Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4";
364  Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8";
365  Names[RTLIB::SYNC_FETCH_AND_SUB_16] = "__sync_fetch_and_sub_16";
366  Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1";
367  Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2";
368  Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4";
369  Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8";
370  Names[RTLIB::SYNC_FETCH_AND_AND_16] = "__sync_fetch_and_and_16";
371  Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1";
372  Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2";
373  Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4";
374  Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8";
375  Names[RTLIB::SYNC_FETCH_AND_OR_16] = "__sync_fetch_and_or_16";
376  Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1";
377  Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2";
378  Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and_xor_4";
379  Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8";
380  Names[RTLIB::SYNC_FETCH_AND_XOR_16] = "__sync_fetch_and_xor_16";
381  Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1";
382  Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2";
383  Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4";
384  Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8";
385  Names[RTLIB::SYNC_FETCH_AND_NAND_16] = "__sync_fetch_and_nand_16";
386  Names[RTLIB::SYNC_FETCH_AND_MAX_1] = "__sync_fetch_and_max_1";
387  Names[RTLIB::SYNC_FETCH_AND_MAX_2] = "__sync_fetch_and_max_2";
388  Names[RTLIB::SYNC_FETCH_AND_MAX_4] = "__sync_fetch_and_max_4";
389  Names[RTLIB::SYNC_FETCH_AND_MAX_8] = "__sync_fetch_and_max_8";
390  Names[RTLIB::SYNC_FETCH_AND_MAX_16] = "__sync_fetch_and_max_16";
391  Names[RTLIB::SYNC_FETCH_AND_UMAX_1] = "__sync_fetch_and_umax_1";
392  Names[RTLIB::SYNC_FETCH_AND_UMAX_2] = "__sync_fetch_and_umax_2";
393  Names[RTLIB::SYNC_FETCH_AND_UMAX_4] = "__sync_fetch_and_umax_4";
394  Names[RTLIB::SYNC_FETCH_AND_UMAX_8] = "__sync_fetch_and_umax_8";
395  Names[RTLIB::SYNC_FETCH_AND_UMAX_16] = "__sync_fetch_and_umax_16";
396  Names[RTLIB::SYNC_FETCH_AND_MIN_1] = "__sync_fetch_and_min_1";
397  Names[RTLIB::SYNC_FETCH_AND_MIN_2] = "__sync_fetch_and_min_2";
398  Names[RTLIB::SYNC_FETCH_AND_MIN_4] = "__sync_fetch_and_min_4";
399  Names[RTLIB::SYNC_FETCH_AND_MIN_8] = "__sync_fetch_and_min_8";
400  Names[RTLIB::SYNC_FETCH_AND_MIN_16] = "__sync_fetch_and_min_16";
401  Names[RTLIB::SYNC_FETCH_AND_UMIN_1] = "__sync_fetch_and_umin_1";
402  Names[RTLIB::SYNC_FETCH_AND_UMIN_2] = "__sync_fetch_and_umin_2";
403  Names[RTLIB::SYNC_FETCH_AND_UMIN_4] = "__sync_fetch_and_umin_4";
404  Names[RTLIB::SYNC_FETCH_AND_UMIN_8] = "__sync_fetch_and_umin_8";
405  Names[RTLIB::SYNC_FETCH_AND_UMIN_16] = "__sync_fetch_and_umin_16";
406 
407  if (TT.getEnvironment() == Triple::GNU) {
408  Names[RTLIB::SINCOS_F32] = "sincosf";
409  Names[RTLIB::SINCOS_F64] = "sincos";
410  Names[RTLIB::SINCOS_F80] = "sincosl";
411  Names[RTLIB::SINCOS_F128] = "sincosl";
412  Names[RTLIB::SINCOS_PPCF128] = "sincosl";
413  } else {
414  // These are generally not available.
415  Names[RTLIB::SINCOS_F32] = nullptr;
416  Names[RTLIB::SINCOS_F64] = nullptr;
417  Names[RTLIB::SINCOS_F80] = nullptr;
418  Names[RTLIB::SINCOS_F128] = nullptr;
419  Names[RTLIB::SINCOS_PPCF128] = nullptr;
420  }
421 
422  if (!TT.isOSOpenBSD()) {
423  Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = "__stack_chk_fail";
424  } else {
425  // These are generally not available.
426  Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = nullptr;
427  }
428 
429  // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
430  // of the gnueabi-style __gnu_*_ieee.
431  // FIXME: What about other targets?
432  if (TT.isOSDarwin()) {
433  Names[RTLIB::FPEXT_F16_F32] = "__extendhfsf2";
434  Names[RTLIB::FPROUND_F32_F16] = "__truncsfhf2";
435  }
436 }
437 
438 /// InitLibcallCallingConvs - Set default libcall CallingConvs.
439 ///
441  for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) {
442  CCs[i] = CallingConv::C;
443  }
444 }
445 
446 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
447 /// UNKNOWN_LIBCALL if there is none.
449  if (OpVT == MVT::f16) {
450  if (RetVT == MVT::f32)
451  return FPEXT_F16_F32;
452  } else if (OpVT == MVT::f32) {
453  if (RetVT == MVT::f64)
454  return FPEXT_F32_F64;
455  if (RetVT == MVT::f128)
456  return FPEXT_F32_F128;
457  } else if (OpVT == MVT::f64) {
458  if (RetVT == MVT::f128)
459  return FPEXT_F64_F128;
460  }
461 
462  return UNKNOWN_LIBCALL;
463 }
464 
465 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
466 /// UNKNOWN_LIBCALL if there is none.
468  if (RetVT == MVT::f16) {
469  if (OpVT == MVT::f32)
470  return FPROUND_F32_F16;
471  if (OpVT == MVT::f64)
472  return FPROUND_F64_F16;
473  if (OpVT == MVT::f80)
474  return FPROUND_F80_F16;
475  if (OpVT == MVT::f128)
476  return FPROUND_F128_F16;
477  if (OpVT == MVT::ppcf128)
478  return FPROUND_PPCF128_F16;
479  } else if (RetVT == MVT::f32) {
480  if (OpVT == MVT::f64)
481  return FPROUND_F64_F32;
482  if (OpVT == MVT::f80)
483  return FPROUND_F80_F32;
484  if (OpVT == MVT::f128)
485  return FPROUND_F128_F32;
486  if (OpVT == MVT::ppcf128)
487  return FPROUND_PPCF128_F32;
488  } else if (RetVT == MVT::f64) {
489  if (OpVT == MVT::f80)
490  return FPROUND_F80_F64;
491  if (OpVT == MVT::f128)
492  return FPROUND_F128_F64;
493  if (OpVT == MVT::ppcf128)
494  return FPROUND_PPCF128_F64;
495  }
496 
497  return UNKNOWN_LIBCALL;
498 }
499 
500 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
501 /// UNKNOWN_LIBCALL if there is none.
503  if (OpVT == MVT::f32) {
504  if (RetVT == MVT::i8)
505  return FPTOSINT_F32_I8;
506  if (RetVT == MVT::i16)
507  return FPTOSINT_F32_I16;
508  if (RetVT == MVT::i32)
509  return FPTOSINT_F32_I32;
510  if (RetVT == MVT::i64)
511  return FPTOSINT_F32_I64;
512  if (RetVT == MVT::i128)
513  return FPTOSINT_F32_I128;
514  } else if (OpVT == MVT::f64) {
515  if (RetVT == MVT::i8)
516  return FPTOSINT_F64_I8;
517  if (RetVT == MVT::i16)
518  return FPTOSINT_F64_I16;
519  if (RetVT == MVT::i32)
520  return FPTOSINT_F64_I32;
521  if (RetVT == MVT::i64)
522  return FPTOSINT_F64_I64;
523  if (RetVT == MVT::i128)
524  return FPTOSINT_F64_I128;
525  } else if (OpVT == MVT::f80) {
526  if (RetVT == MVT::i32)
527  return FPTOSINT_F80_I32;
528  if (RetVT == MVT::i64)
529  return FPTOSINT_F80_I64;
530  if (RetVT == MVT::i128)
531  return FPTOSINT_F80_I128;
532  } else if (OpVT == MVT::f128) {
533  if (RetVT == MVT::i32)
534  return FPTOSINT_F128_I32;
535  if (RetVT == MVT::i64)
536  return FPTOSINT_F128_I64;
537  if (RetVT == MVT::i128)
538  return FPTOSINT_F128_I128;
539  } else if (OpVT == MVT::ppcf128) {
540  if (RetVT == MVT::i32)
541  return FPTOSINT_PPCF128_I32;
542  if (RetVT == MVT::i64)
543  return FPTOSINT_PPCF128_I64;
544  if (RetVT == MVT::i128)
545  return FPTOSINT_PPCF128_I128;
546  }
547  return UNKNOWN_LIBCALL;
548 }
549 
550 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
551 /// UNKNOWN_LIBCALL if there is none.
553  if (OpVT == MVT::f32) {
554  if (RetVT == MVT::i8)
555  return FPTOUINT_F32_I8;
556  if (RetVT == MVT::i16)
557  return FPTOUINT_F32_I16;
558  if (RetVT == MVT::i32)
559  return FPTOUINT_F32_I32;
560  if (RetVT == MVT::i64)
561  return FPTOUINT_F32_I64;
562  if (RetVT == MVT::i128)
563  return FPTOUINT_F32_I128;
564  } else if (OpVT == MVT::f64) {
565  if (RetVT == MVT::i8)
566  return FPTOUINT_F64_I8;
567  if (RetVT == MVT::i16)
568  return FPTOUINT_F64_I16;
569  if (RetVT == MVT::i32)
570  return FPTOUINT_F64_I32;
571  if (RetVT == MVT::i64)
572  return FPTOUINT_F64_I64;
573  if (RetVT == MVT::i128)
574  return FPTOUINT_F64_I128;
575  } else if (OpVT == MVT::f80) {
576  if (RetVT == MVT::i32)
577  return FPTOUINT_F80_I32;
578  if (RetVT == MVT::i64)
579  return FPTOUINT_F80_I64;
580  if (RetVT == MVT::i128)
581  return FPTOUINT_F80_I128;
582  } else if (OpVT == MVT::f128) {
583  if (RetVT == MVT::i32)
584  return FPTOUINT_F128_I32;
585  if (RetVT == MVT::i64)
586  return FPTOUINT_F128_I64;
587  if (RetVT == MVT::i128)
588  return FPTOUINT_F128_I128;
589  } else if (OpVT == MVT::ppcf128) {
590  if (RetVT == MVT::i32)
591  return FPTOUINT_PPCF128_I32;
592  if (RetVT == MVT::i64)
593  return FPTOUINT_PPCF128_I64;
594  if (RetVT == MVT::i128)
595  return FPTOUINT_PPCF128_I128;
596  }
597  return UNKNOWN_LIBCALL;
598 }
599 
600 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
601 /// UNKNOWN_LIBCALL if there is none.
603  if (OpVT == MVT::i32) {
604  if (RetVT == MVT::f32)
605  return SINTTOFP_I32_F32;
606  if (RetVT == MVT::f64)
607  return SINTTOFP_I32_F64;
608  if (RetVT == MVT::f80)
609  return SINTTOFP_I32_F80;
610  if (RetVT == MVT::f128)
611  return SINTTOFP_I32_F128;
612  if (RetVT == MVT::ppcf128)
613  return SINTTOFP_I32_PPCF128;
614  } else if (OpVT == MVT::i64) {
615  if (RetVT == MVT::f32)
616  return SINTTOFP_I64_F32;
617  if (RetVT == MVT::f64)
618  return SINTTOFP_I64_F64;
619  if (RetVT == MVT::f80)
620  return SINTTOFP_I64_F80;
621  if (RetVT == MVT::f128)
622  return SINTTOFP_I64_F128;
623  if (RetVT == MVT::ppcf128)
624  return SINTTOFP_I64_PPCF128;
625  } else if (OpVT == MVT::i128) {
626  if (RetVT == MVT::f32)
627  return SINTTOFP_I128_F32;
628  if (RetVT == MVT::f64)
629  return SINTTOFP_I128_F64;
630  if (RetVT == MVT::f80)
631  return SINTTOFP_I128_F80;
632  if (RetVT == MVT::f128)
633  return SINTTOFP_I128_F128;
634  if (RetVT == MVT::ppcf128)
635  return SINTTOFP_I128_PPCF128;
636  }
637  return UNKNOWN_LIBCALL;
638 }
639 
640 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
641 /// UNKNOWN_LIBCALL if there is none.
643  if (OpVT == MVT::i32) {
644  if (RetVT == MVT::f32)
645  return UINTTOFP_I32_F32;
646  if (RetVT == MVT::f64)
647  return UINTTOFP_I32_F64;
648  if (RetVT == MVT::f80)
649  return UINTTOFP_I32_F80;
650  if (RetVT == MVT::f128)
651  return UINTTOFP_I32_F128;
652  if (RetVT == MVT::ppcf128)
653  return UINTTOFP_I32_PPCF128;
654  } else if (OpVT == MVT::i64) {
655  if (RetVT == MVT::f32)
656  return UINTTOFP_I64_F32;
657  if (RetVT == MVT::f64)
658  return UINTTOFP_I64_F64;
659  if (RetVT == MVT::f80)
660  return UINTTOFP_I64_F80;
661  if (RetVT == MVT::f128)
662  return UINTTOFP_I64_F128;
663  if (RetVT == MVT::ppcf128)
664  return UINTTOFP_I64_PPCF128;
665  } else if (OpVT == MVT::i128) {
666  if (RetVT == MVT::f32)
667  return UINTTOFP_I128_F32;
668  if (RetVT == MVT::f64)
669  return UINTTOFP_I128_F64;
670  if (RetVT == MVT::f80)
671  return UINTTOFP_I128_F80;
672  if (RetVT == MVT::f128)
673  return UINTTOFP_I128_F128;
674  if (RetVT == MVT::ppcf128)
675  return UINTTOFP_I128_PPCF128;
676  }
677  return UNKNOWN_LIBCALL;
678 }
679 
681 #define OP_TO_LIBCALL(Name, Enum) \
682  case Name: \
683  switch (VT.SimpleTy) { \
684  default: \
685  return UNKNOWN_LIBCALL; \
686  case MVT::i8: \
687  return Enum##_1; \
688  case MVT::i16: \
689  return Enum##_2; \
690  case MVT::i32: \
691  return Enum##_4; \
692  case MVT::i64: \
693  return Enum##_8; \
694  case MVT::i128: \
695  return Enum##_16; \
696  }
697 
698  switch (Opc) {
699  OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
700  OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
701  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
702  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
703  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
704  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
705  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
706  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
707  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
708  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
709  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
710  OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
711  }
712 
713 #undef OP_TO_LIBCALL
714 
715  return UNKNOWN_LIBCALL;
716 }
717 
718 /// InitCmpLibcallCCs - Set default comparison libcall CC.
719 ///
720 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
722  CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
723  CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
725  CCs[RTLIB::UNE_F32] = ISD::SETNE;
726  CCs[RTLIB::UNE_F64] = ISD::SETNE;
728  CCs[RTLIB::OGE_F32] = ISD::SETGE;
729  CCs[RTLIB::OGE_F64] = ISD::SETGE;
731  CCs[RTLIB::OLT_F32] = ISD::SETLT;
732  CCs[RTLIB::OLT_F64] = ISD::SETLT;
734  CCs[RTLIB::OLE_F32] = ISD::SETLE;
735  CCs[RTLIB::OLE_F64] = ISD::SETLE;
737  CCs[RTLIB::OGT_F32] = ISD::SETGT;
738  CCs[RTLIB::OGT_F64] = ISD::SETGT;
740  CCs[RTLIB::UO_F32] = ISD::SETNE;
741  CCs[RTLIB::UO_F64] = ISD::SETNE;
742  CCs[RTLIB::UO_F128] = ISD::SETNE;
743  CCs[RTLIB::O_F32] = ISD::SETEQ;
744  CCs[RTLIB::O_F64] = ISD::SETEQ;
745  CCs[RTLIB::O_F128] = ISD::SETEQ;
746 }
747 
748 /// NOTE: The TargetMachine owns TLOF.
749 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
750  initActions();
751 
752  // Perform these initializations only once.
756  UseUnderscoreSetJmp = false;
757  UseUnderscoreLongJmp = false;
758  SelectIsExpensive = false;
759  HasMultipleConditionRegisters = false;
760  HasExtractBitsInsn = false;
761  IntDivIsCheap = false;
762  FsqrtIsCheap = false;
763  Pow2SDivIsCheap = false;
764  JumpIsExpensive = JumpIsExpensiveOverride;
767  EnableExtLdPromotion = false;
768  HasFloatingPointExceptions = true;
769  StackPointerRegisterToSaveRestore = 0;
770  ExceptionPointerRegister = 0;
771  ExceptionSelectorRegister = 0;
772  BooleanContents = UndefinedBooleanContent;
773  BooleanFloatContents = UndefinedBooleanContent;
774  BooleanVectorContents = UndefinedBooleanContent;
775  SchedPreferenceInfo = Sched::ILP;
776  JumpBufSize = 0;
777  JumpBufAlignment = 0;
778  MinFunctionAlignment = 0;
779  PrefFunctionAlignment = 0;
780  PrefLoopAlignment = 0;
781  MinStackArgumentAlignment = 1;
782  InsertFencesForAtomic = false;
783  MinimumJumpTableEntries = 4;
784 
785  InitLibcallNames(LibcallRoutineNames, TM.getTargetTriple());
786  InitCmpLibcallCCs(CmpLibcallCCs);
787  InitLibcallCallingConvs(LibcallCallingConvs);
788 }
789 
791  // All operations default to being supported.
792  memset(OpActions, 0, sizeof(OpActions));
793  memset(LoadExtActions, 0, sizeof(LoadExtActions));
794  memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
795  memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
796  memset(CondCodeActions, 0, sizeof(CondCodeActions));
797  memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
798  memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray));
799 
800  // Set default actions for various operations.
801  for (MVT VT : MVT::all_valuetypes()) {
802  // Default all indexed load / store to expand.
803  for (unsigned IM = (unsigned)ISD::PRE_INC;
804  IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
805  setIndexedLoadAction(IM, VT, Expand);
806  setIndexedStoreAction(IM, VT, Expand);
807  }
808 
809  // Most backends expect to see the node which just returns the value loaded.
811 
812  // These operations default to expand.
822 
823  // Overflow operations default to expand
830 
831  // These library functions default to expand.
833 
834  // These operations default to expand for vector types.
835  if (VT.isVector()) {
840  }
841  }
842 
843  // Most targets ignore the @llvm.prefetch intrinsic.
845 
846  // ConstantFP nodes default to expand. Targets can either change this to
847  // Legal, in which case all fp constants are legal, or use isFPImmLegal()
848  // to optimize expansions for certain constants.
854 
855  // These library functions default to expand.
856  for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
870  }
871 
872  // Default ISD::TRAP to expand (which turns it into abort).
874 
875  // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
876  // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
877  //
879 }
880 
882  EVT) const {
883  return MVT::getIntegerVT(8 * DL.getPointerSize(0));
884 }
885 
887  const DataLayout &DL) const {
888  assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
889  if (LHSTy.isVector())
890  return LHSTy;
891  return getScalarShiftAmountTy(DL, LHSTy);
892 }
893 
894 /// canOpTrap - Returns true if the operation can trap for the value type.
895 /// VT must be a legal type.
896 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
897  assert(isTypeLegal(VT));
898  switch (Op) {
899  default:
900  return false;
901  case ISD::FDIV:
902  case ISD::FREM:
903  case ISD::SDIV:
904  case ISD::UDIV:
905  case ISD::SREM:
906  case ISD::UREM:
907  return true;
908  }
909 }
910 
912  // If the command-line option was specified, ignore this request.
913  if (!JumpIsExpensiveOverride.getNumOccurrences())
914  JumpIsExpensive = isExpensive;
915 }
916 
918 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
919  // If this is a simple type, use the ComputeRegisterProp mechanism.
920  if (VT.isSimple()) {
921  MVT SVT = VT.getSimpleVT();
922  assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
923  MVT NVT = TransformToType[SVT.SimpleTy];
924  LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
925 
926  assert((LA == TypeLegal || LA == TypeSoftenFloat ||
927  ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) &&
928  "Promote may not follow Expand or Promote");
929 
930  if (LA == TypeSplitVector)
931  return LegalizeKind(LA,
932  EVT::getVectorVT(Context, SVT.getVectorElementType(),
933  SVT.getVectorNumElements() / 2));
934  if (LA == TypeScalarizeVector)
935  return LegalizeKind(LA, SVT.getVectorElementType());
936  return LegalizeKind(LA, NVT);
937  }
938 
939  // Handle Extended Scalar Types.
940  if (!VT.isVector()) {
941  assert(VT.isInteger() && "Float types must be simple");
942  unsigned BitSize = VT.getSizeInBits();
943  // First promote to a power-of-two size, then expand if necessary.
944  if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
945  EVT NVT = VT.getRoundIntegerType(Context);
946  assert(NVT != VT && "Unable to round integer VT");
947  LegalizeKind NextStep = getTypeConversion(Context, NVT);
948  // Avoid multi-step promotion.
949  if (NextStep.first == TypePromoteInteger)
950  return NextStep;
951  // Return rounded integer type.
952  return LegalizeKind(TypePromoteInteger, NVT);
953  }
954 
956  EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
957  }
958 
959  // Handle vector types.
960  unsigned NumElts = VT.getVectorNumElements();
961  EVT EltVT = VT.getVectorElementType();
962 
963  // Vectors with only one element are always scalarized.
964  if (NumElts == 1)
965  return LegalizeKind(TypeScalarizeVector, EltVT);
966 
967  // Try to widen vector elements until the element type is a power of two and
968  // promote it to a legal type later on, for example:
969  // <3 x i8> -> <4 x i8> -> <4 x i32>
970  if (EltVT.isInteger()) {
971  // Vectors with a number of elements that is not a power of two are always
972  // widened, for example <3 x i8> -> <4 x i8>.
973  if (!VT.isPow2VectorType()) {
974  NumElts = (unsigned)NextPowerOf2(NumElts);
975  EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
976  return LegalizeKind(TypeWidenVector, NVT);
977  }
978 
979  // Examine the element type.
980  LegalizeKind LK = getTypeConversion(Context, EltVT);
981 
982  // If type is to be expanded, split the vector.
983  // <4 x i140> -> <2 x i140>
984  if (LK.first == TypeExpandInteger)
986  EVT::getVectorVT(Context, EltVT, NumElts / 2));
987 
988  // Promote the integer element types until a legal vector type is found
989  // or until the element integer type is too big. If a legal type was not
990  // found, fallback to the usual mechanism of widening/splitting the
991  // vector.
992  EVT OldEltVT = EltVT;
993  while (1) {
994  // Increase the bitwidth of the element to the next pow-of-two
995  // (which is greater than 8 bits).
996  EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
997  .getRoundIntegerType(Context);
998 
999  // Stop trying when getting a non-simple element type.
1000  // Note that vector elements may be greater than legal vector element
1001  // types. Example: X86 XMM registers hold 64bit element on 32bit
1002  // systems.
1003  if (!EltVT.isSimple())
1004  break;
1005 
1006  // Build a new vector type and check if it is legal.
1007  MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1008  // Found a legal promoted vector type.
1009  if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1011  EVT::getVectorVT(Context, EltVT, NumElts));
1012  }
1013 
1014  // Reset the type to the unexpanded type if we did not find a legal vector
1015  // type with a promoted vector element type.
1016  EltVT = OldEltVT;
1017  }
1018 
1019  // Try to widen the vector until a legal type is found.
1020  // If there is no wider legal type, split the vector.
1021  while (1) {
1022  // Round up to the next power of 2.
1023  NumElts = (unsigned)NextPowerOf2(NumElts);
1024 
1025  // If there is no simple vector type with this many elements then there
1026  // cannot be a larger legal vector type. Note that this assumes that
1027  // there are no skipped intermediate vector types in the simple types.
1028  if (!EltVT.isSimple())
1029  break;
1030  MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1031  if (LargerVector == MVT())
1032  break;
1033 
1034  // If this type is legal then widen the vector.
1035  if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1036  return LegalizeKind(TypeWidenVector, LargerVector);
1037  }
1038 
1039  // Widen odd vectors to next power of two.
1040  if (!VT.isPow2VectorType()) {
1041  EVT NVT = VT.getPow2VectorType(Context);
1042  return LegalizeKind(TypeWidenVector, NVT);
1043  }
1044 
1045  // Vectors with illegal element types are expanded.
1046  EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
1047  return LegalizeKind(TypeSplitVector, NVT);
1048 }
1049 
1050 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
1051  unsigned &NumIntermediates,
1052  MVT &RegisterVT,
1053  TargetLoweringBase *TLI) {
1054  // Figure out the right, legal destination reg to copy into.
1055  unsigned NumElts = VT.getVectorNumElements();
1056  MVT EltTy = VT.getVectorElementType();
1057 
1058  unsigned NumVectorRegs = 1;
1059 
1060  // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1061  // could break down into LHS/RHS like LegalizeDAG does.
1062  if (!isPowerOf2_32(NumElts)) {
1063  NumVectorRegs = NumElts;
1064  NumElts = 1;
1065  }
1066 
1067  // Divide the input until we get to a supported size. This will always
1068  // end with a scalar if the target doesn't support vectors.
1069  while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
1070  NumElts >>= 1;
1071  NumVectorRegs <<= 1;
1072  }
1073 
1074  NumIntermediates = NumVectorRegs;
1075 
1076  MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
1077  if (!TLI->isTypeLegal(NewVT))
1078  NewVT = EltTy;
1079  IntermediateVT = NewVT;
1080 
1081  unsigned NewVTSize = NewVT.getSizeInBits();
1082 
1083  // Convert sizes such as i33 to i64.
1084  if (!isPowerOf2_32(NewVTSize))
1085  NewVTSize = NextPowerOf2(NewVTSize);
1086 
1087  MVT DestVT = TLI->getRegisterType(NewVT);
1088  RegisterVT = DestVT;
1089  if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1090  return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1091 
1092  // Otherwise, promotion or legal types use the same number of registers as
1093  // the vector decimated to the appropriate level.
1094  return NumVectorRegs;
1095 }
1096 
1097 /// isLegalRC - Return true if the value types that can be represented by the
1098 /// specified register class are all legal.
1100  for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
1101  I != E; ++I) {
1102  if (isTypeLegal(*I))
1103  return true;
1104  }
1105  return false;
1106 }
1107 
1108 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
1109 /// sequence of memory operands that is recognized by PrologEpilogInserter.
1112  MachineBasicBlock *MBB) const {
1113  MachineFunction &MF = *MI->getParent()->getParent();
1114 
1115  // MI changes inside this loop as we grow operands.
1116  for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
1117  MachineOperand &MO = MI->getOperand(OperIdx);
1118  if (!MO.isFI())
1119  continue;
1120 
1121  // foldMemoryOperand builds a new MI after replacing a single FI operand
1122  // with the canonical set of five x86 addressing-mode operands.
1123  int FI = MO.getIndex();
1124  MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1125 
1126  // Copy operands before the frame-index.
1127  for (unsigned i = 0; i < OperIdx; ++i)
1128  MIB.addOperand(MI->getOperand(i));
1129  // Add frame index operands: direct-mem-ref tag, #FI, offset.
1131  MIB.addOperand(MI->getOperand(OperIdx));
1132  MIB.addImm(0);
1133  // Copy the operands after the frame index.
1134  for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
1135  MIB.addOperand(MI->getOperand(i));
1136 
1137  // Inherit previous memory operands.
1138  MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1139  assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1140 
1141  // Add a new memory operand for this FI.
1142  const MachineFrameInfo &MFI = *MF.getFrameInfo();
1143  assert(MFI.getObjectOffset(FI) != -1);
1144 
1145  unsigned Flags = MachineMemOperand::MOLoad;
1146  if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
1147  Flags |= MachineMemOperand::MOStore;
1149  }
1152  TM.getDataLayout()->getPointerSize(), MFI.getObjectAlignment(FI));
1153  MIB->addMemOperand(MF, MMO);
1154 
1155  // Replace the instruction and update the operand index.
1156  MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1157  OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
1158  MI->eraseFromParent();
1159  MI = MIB;
1160  }
1161  return MBB;
1162 }
1163 
1164 /// findRepresentativeClass - Return the largest legal super-reg register class
1165 /// of the register class for the specified type and its associated "cost".
1166 // This function is in TargetLowering because it uses RegClassForVT which would
1167 // need to be moved to TargetRegisterInfo and would necessitate moving
1168 // isTypeLegal over as well - a massive change that would just require
1169 // TargetLowering having a TargetRegisterInfo class member that it would use.
1170 std::pair<const TargetRegisterClass *, uint8_t>
1172  MVT VT) const {
1173  const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1174  if (!RC)
1175  return std::make_pair(RC, 0);
1176 
1177  // Compute the set of all super-register classes.
1178  BitVector SuperRegRC(TRI->getNumRegClasses());
1179  for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1180  SuperRegRC.setBitsInMask(RCI.getMask());
1181 
1182  // Find the first legal register class with the largest spill size.
1183  const TargetRegisterClass *BestRC = RC;
1184  for (int i = SuperRegRC.find_first(); i >= 0; i = SuperRegRC.find_next(i)) {
1185  const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1186  // We want the largest possible spill size.
1187  if (SuperRC->getSize() <= BestRC->getSize())
1188  continue;
1189  if (!isLegalRC(SuperRC))
1190  continue;
1191  BestRC = SuperRC;
1192  }
1193  return std::make_pair(BestRC, 1);
1194 }
1195 
1196 /// computeRegisterProperties - Once all of the register classes are added,
1197 /// this allows us to compute derived properties we expose.
1199  const TargetRegisterInfo *TRI) {
1201  "Too many value types for ValueTypeActions to hold!");
1202 
1203  // Everything defaults to needing one register.
1204  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1205  NumRegistersForVT[i] = 1;
1206  RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1207  }
1208  // ...except isVoid, which doesn't need any registers.
1209  NumRegistersForVT[MVT::isVoid] = 0;
1210 
1211  // Find the largest integer register class.
1212  unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1213  for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1214  assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1215 
1216  // Every integer value type larger than this largest register takes twice as
1217  // many registers to represent as the previous ValueType.
1218  for (unsigned ExpandedReg = LargestIntReg + 1;
1219  ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1220  NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1221  RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1222  TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1223  ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1225  }
1226 
1227  // Inspect all of the ValueType's smaller than the largest integer
1228  // register to see which ones need promotion.
1229  unsigned LegalIntReg = LargestIntReg;
1230  for (unsigned IntReg = LargestIntReg - 1;
1231  IntReg >= (unsigned)MVT::i1; --IntReg) {
1232  MVT IVT = (MVT::SimpleValueType)IntReg;
1233  if (isTypeLegal(IVT)) {
1234  LegalIntReg = IntReg;
1235  } else {
1236  RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1237  (const MVT::SimpleValueType)LegalIntReg;
1238  ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1239  }
1240  }
1241 
1242  // ppcf128 type is really two f64's.
1243  if (!isTypeLegal(MVT::ppcf128)) {
1244  NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1245  RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1246  TransformToType[MVT::ppcf128] = MVT::f64;
1247  ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1248  }
1249 
1250  // Decide how to handle f128. If the target does not have native f128 support,
1251  // expand it to i128 and we will be generating soft float library calls.
1252  if (!isTypeLegal(MVT::f128)) {
1253  NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1254  RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1255  TransformToType[MVT::f128] = MVT::i128;
1256  ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1257  }
1258 
1259  // Decide how to handle f64. If the target does not have native f64 support,
1260  // expand it to i64 and we will be generating soft float library calls.
1261  if (!isTypeLegal(MVT::f64)) {
1262  NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1263  RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1264  TransformToType[MVT::f64] = MVT::i64;
1265  ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1266  }
1267 
1268  // Decide how to handle f32. If the target does not have native f32 support,
1269  // expand it to i32 and we will be generating soft float library calls.
1270  if (!isTypeLegal(MVT::f32)) {
1271  NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1272  RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1273  TransformToType[MVT::f32] = MVT::i32;
1274  ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1275  }
1276 
1277  if (!isTypeLegal(MVT::f16)) {
1278  // If the target has native f32 support, promote f16 operations to f32. If
1279  // f32 is not supported, generate soft float library calls.
1280  if (isTypeLegal(MVT::f32)) {
1281  NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1282  RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1283  TransformToType[MVT::f16] = MVT::f32;
1284  ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1285  } else {
1286  NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
1287  RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
1288  TransformToType[MVT::f16] = MVT::i16;
1289  ValueTypeActions.setTypeAction(MVT::f16, TypeSoftenFloat);
1290  }
1291  }
1292 
1293  // Loop over all of the vector value types to see which need transformations.
1294  for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1295  i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1296  MVT VT = (MVT::SimpleValueType) i;
1297  if (isTypeLegal(VT))
1298  continue;
1299 
1300  MVT EltVT = VT.getVectorElementType();
1301  unsigned NElts = VT.getVectorNumElements();
1302  bool IsLegalWiderType = false;
1303  LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1304  switch (PreferredAction) {
1305  case TypePromoteInteger: {
1306  // Try to promote the elements of integer vectors. If no legal
1307  // promotion was found, fall through to the widen-vector method.
1308  for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1309  MVT SVT = (MVT::SimpleValueType) nVT;
1310  // Promote vectors of integers to vectors with the same number
1311  // of elements, with a wider element type.
1312  if (SVT.getVectorElementType().getSizeInBits() > EltVT.getSizeInBits()
1313  && SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)
1314  && SVT.getScalarType().isInteger()) {
1315  TransformToType[i] = SVT;
1316  RegisterTypeForVT[i] = SVT;
1317  NumRegistersForVT[i] = 1;
1318  ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1319  IsLegalWiderType = true;
1320  break;
1321  }
1322  }
1323  if (IsLegalWiderType)
1324  break;
1325  }
1326  case TypeWidenVector: {
1327  // Try to widen the vector.
1328  for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1329  MVT SVT = (MVT::SimpleValueType) nVT;
1330  if (SVT.getVectorElementType() == EltVT
1331  && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
1332  TransformToType[i] = SVT;
1333  RegisterTypeForVT[i] = SVT;
1334  NumRegistersForVT[i] = 1;
1335  ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1336  IsLegalWiderType = true;
1337  break;
1338  }
1339  }
1340  if (IsLegalWiderType)
1341  break;
1342  }
1343  case TypeSplitVector:
1344  case TypeScalarizeVector: {
1345  MVT IntermediateVT;
1346  MVT RegisterVT;
1347  unsigned NumIntermediates;
1348  NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1349  NumIntermediates, RegisterVT, this);
1350  RegisterTypeForVT[i] = RegisterVT;
1351 
1352  MVT NVT = VT.getPow2VectorType();
1353  if (NVT == VT) {
1354  // Type is already a power of 2. The default action is to split.
1355  TransformToType[i] = MVT::Other;
1356  if (PreferredAction == TypeScalarizeVector)
1357  ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1358  else if (PreferredAction == TypeSplitVector)
1359  ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1360  else
1361  // Set type action according to the number of elements.
1362  ValueTypeActions.setTypeAction(VT, NElts == 1 ? TypeScalarizeVector
1363  : TypeSplitVector);
1364  } else {
1365  TransformToType[i] = NVT;
1366  ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1367  }
1368  break;
1369  }
1370  default:
1371  llvm_unreachable("Unknown vector legalization action!");
1372  }
1373  }
1374 
1375  // Determine the 'representative' register class for each value type.
1376  // An representative register class is the largest (meaning one which is
1377  // not a sub-register class / subreg register class) legal register class for
1378  // a group of value types. For example, on i386, i8, i16, and i32
1379  // representative would be GR32; while on x86_64 it's GR64.
1380  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1381  const TargetRegisterClass* RRC;
1382  uint8_t Cost;
1383  std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1384  RepRegClassForVT[i] = RRC;
1385  RepRegClassCostForVT[i] = Cost;
1386  }
1387 }
1388 
1390  EVT VT) const {
1391  assert(!VT.isVector() && "No default SetCC type for vectors!");
1392  return getPointerTy(DL).SimpleTy;
1393 }
1394 
1396  return MVT::i32; // return the default value
1397 }
1398 
1399 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1400 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1401 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1402 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1403 ///
1404 /// This method returns the number of registers needed, and the VT for each
1405 /// register. It also returns the VT and quantity of the intermediate values
1406 /// before they are promoted/expanded.
1407 ///
1409  EVT &IntermediateVT,
1410  unsigned &NumIntermediates,
1411  MVT &RegisterVT) const {
1412  unsigned NumElts = VT.getVectorNumElements();
1413 
1414  // If there is a wider vector type with the same element type as this one,
1415  // or a promoted vector type that has the same number of elements which
1416  // are wider, then we should convert to that legal vector type.
1417  // This handles things like <2 x float> -> <4 x float> and
1418  // <4 x i1> -> <4 x i32>.
1419  LegalizeTypeAction TA = getTypeAction(Context, VT);
1420  if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1421  EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1422  if (isTypeLegal(RegisterEVT)) {
1423  IntermediateVT = RegisterEVT;
1424  RegisterVT = RegisterEVT.getSimpleVT();
1425  NumIntermediates = 1;
1426  return 1;
1427  }
1428  }
1429 
1430  // Figure out the right, legal destination reg to copy into.
1431  EVT EltTy = VT.getVectorElementType();
1432 
1433  unsigned NumVectorRegs = 1;
1434 
1435  // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1436  // could break down into LHS/RHS like LegalizeDAG does.
1437  if (!isPowerOf2_32(NumElts)) {
1438  NumVectorRegs = NumElts;
1439  NumElts = 1;
1440  }
1441 
1442  // Divide the input until we get to a supported size. This will always
1443  // end with a scalar if the target doesn't support vectors.
1444  while (NumElts > 1 && !isTypeLegal(
1445  EVT::getVectorVT(Context, EltTy, NumElts))) {
1446  NumElts >>= 1;
1447  NumVectorRegs <<= 1;
1448  }
1449 
1450  NumIntermediates = NumVectorRegs;
1451 
1452  EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
1453  if (!isTypeLegal(NewVT))
1454  NewVT = EltTy;
1455  IntermediateVT = NewVT;
1456 
1457  MVT DestVT = getRegisterType(Context, NewVT);
1458  RegisterVT = DestVT;
1459  unsigned NewVTSize = NewVT.getSizeInBits();
1460 
1461  // Convert sizes such as i33 to i64.
1462  if (!isPowerOf2_32(NewVTSize))
1463  NewVTSize = NextPowerOf2(NewVTSize);
1464 
1465  if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1466  return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1467 
1468  // Otherwise, promotion or legal types use the same number of registers as
1469  // the vector decimated to the appropriate level.
1470  return NumVectorRegs;
1471 }
1472 
1473 /// Get the EVTs and ArgFlags collections that represent the legalized return
1474 /// type of the given function. This does not require a DAG or a return value,
1475 /// and is suitable for use before any DAGs for the function are constructed.
1476 /// TODO: Move this out of TargetLowering.cpp.
1479  const TargetLowering &TLI, const DataLayout &DL) {
1480  SmallVector<EVT, 4> ValueVTs;
1481  ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1482  unsigned NumValues = ValueVTs.size();
1483  if (NumValues == 0) return;
1484 
1485  for (unsigned j = 0, f = NumValues; j != f; ++j) {
1486  EVT VT = ValueVTs[j];
1487  ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1488 
1490  ExtendKind = ISD::SIGN_EXTEND;
1492  ExtendKind = ISD::ZERO_EXTEND;
1493 
1494  // FIXME: C calling convention requires the return type to be promoted to
1495  // at least 32-bit. But this is not necessary for non-C calling
1496  // conventions. The frontend should mark functions whose return values
1497  // require promoting with signext or zeroext attributes.
1498  if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1499  MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1500  if (VT.bitsLT(MinVT))
1501  VT = MinVT;
1502  }
1503 
1504  unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
1505  MVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
1506 
1507  // 'inreg' on function refers to return value
1510  Flags.setInReg();
1511 
1512  // Propagate extension type if any
1514  Flags.setSExt();
1516  Flags.setZExt();
1517 
1518  for (unsigned i = 0; i < NumParts; ++i)
1519  Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
1520  }
1521 }
1522 
1523 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1524 /// function arguments in the caller parameter area. This is the actual
1525 /// alignment, not its logarithm.
1527  const DataLayout &DL) const {
1528  return DL.getABITypeAlignment(Ty);
1529 }
1530 
1531 //===----------------------------------------------------------------------===//
1532 // TargetTransformInfo Helpers
1533 //===----------------------------------------------------------------------===//
1534 
1536  enum InstructionOpcodes {
1537 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1538 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1539 #include "llvm/IR/Instruction.def"
1540  };
1541  switch (static_cast<InstructionOpcodes>(Opcode)) {
1542  case Ret: return 0;
1543  case Br: return 0;
1544  case Switch: return 0;
1545  case IndirectBr: return 0;
1546  case Invoke: return 0;
1547  case Resume: return 0;
1548  case Unreachable: return 0;
1549  case Add: return ISD::ADD;
1550  case FAdd: return ISD::FADD;
1551  case Sub: return ISD::SUB;
1552  case FSub: return ISD::FSUB;
1553  case Mul: return ISD::MUL;
1554  case FMul: return ISD::FMUL;
1555  case UDiv: return ISD::UDIV;
1556  case SDiv: return ISD::SDIV;
1557  case FDiv: return ISD::FDIV;
1558  case URem: return ISD::UREM;
1559  case SRem: return ISD::SREM;
1560  case FRem: return ISD::FREM;
1561  case Shl: return ISD::SHL;
1562  case LShr: return ISD::SRL;
1563  case AShr: return ISD::SRA;
1564  case And: return ISD::AND;
1565  case Or: return ISD::OR;
1566  case Xor: return ISD::XOR;
1567  case Alloca: return 0;
1568  case Load: return ISD::LOAD;
1569  case Store: return ISD::STORE;
1570  case GetElementPtr: return 0;
1571  case Fence: return 0;
1572  case AtomicCmpXchg: return 0;
1573  case AtomicRMW: return 0;
1574  case Trunc: return ISD::TRUNCATE;
1575  case ZExt: return ISD::ZERO_EXTEND;
1576  case SExt: return ISD::SIGN_EXTEND;
1577  case FPToUI: return ISD::FP_TO_UINT;
1578  case FPToSI: return ISD::FP_TO_SINT;
1579  case UIToFP: return ISD::UINT_TO_FP;
1580  case SIToFP: return ISD::SINT_TO_FP;
1581  case FPTrunc: return ISD::FP_ROUND;
1582  case FPExt: return ISD::FP_EXTEND;
1583  case PtrToInt: return ISD::BITCAST;
1584  case IntToPtr: return ISD::BITCAST;
1585  case BitCast: return ISD::BITCAST;
1586  case AddrSpaceCast: return ISD::ADDRSPACECAST;
1587  case ICmp: return ISD::SETCC;
1588  case FCmp: return ISD::SETCC;
1589  case PHI: return 0;
1590  case Call: return 0;
1591  case Select: return ISD::SELECT;
1592  case UserOp1: return 0;
1593  case UserOp2: return 0;
1594  case VAArg: return 0;
1596  case InsertElement: return ISD::INSERT_VECTOR_ELT;
1597  case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1598  case ExtractValue: return ISD::MERGE_VALUES;
1599  case InsertValue: return ISD::MERGE_VALUES;
1600  case LandingPad: return 0;
1601  }
1602 
1603  llvm_unreachable("Unknown instruction type encountered!");
1604 }
1605 
1606 std::pair<unsigned, MVT>
1608  Type *Ty) const {
1609  LLVMContext &C = Ty->getContext();
1610  EVT MTy = getValueType(DL, Ty);
1611 
1612  unsigned Cost = 1;
1613  // We keep legalizing the type until we find a legal kind. We assume that
1614  // the only operation that costs anything is the split. After splitting
1615  // we need to handle two types.
1616  while (true) {
1617  LegalizeKind LK = getTypeConversion(C, MTy);
1618 
1619  if (LK.first == TypeLegal)
1620  return std::make_pair(Cost, MTy.getSimpleVT());
1621 
1622  if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1623  Cost *= 2;
1624 
1625  // Keep legalizing the type.
1626  MTy = LK.second;
1627  }
1628 }
1629 
1630 //===----------------------------------------------------------------------===//
1631 // Loop Strength Reduction hooks
1632 //===----------------------------------------------------------------------===//
1633 
1634 /// isLegalAddressingMode - Return true if the addressing mode represented
1635 /// by AM is legal for this target, for a load/store of the specified type.
1637  const AddrMode &AM, Type *Ty,
1638  unsigned AS) const {
1639  // The default implementation of this implements a conservative RISCy, r+r and
1640  // r+i addr mode.
1641 
1642  // Allows a sign-extended 16-bit immediate field.
1643  if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1644  return false;
1645 
1646  // No global is ever allowed as a base.
1647  if (AM.BaseGV)
1648  return false;
1649 
1650  // Only support r+r,
1651  switch (AM.Scale) {
1652  case 0: // "r+i" or just "i", depending on HasBaseReg.
1653  break;
1654  case 1:
1655  if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1656  return false;
1657  // Otherwise we have r+r or r+i.
1658  break;
1659  case 2:
1660  if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1661  return false;
1662  // Allow 2*r as r+r.
1663  break;
1664  default: // Don't allow n * r
1665  return false;
1666  }
1667 
1668  return true;
1669 }
EVT getRoundIntegerType(LLVMContext &Context) const
getRoundIntegerType - Rounds the bit-width of the given integer EVT up to the nearest power of two (a...
Definition: ValueTypes.h:258
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:477
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:450
static MVT getIntegerVT(unsigned BitWidth)
void push_back(const T &Elt)
Definition: SmallVector.h:222
The memory access reads data.
const MachineFunction * getParent() const
getParent - Return the MachineFunction containing this basic block.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:104
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
vt_iterator vt_end() const
The memory access writes data.
static void InitLibcallCallingConvs(CallingConv::ID *CCs)
InitLibcallCallingConvs - Set default libcall CallingConvs.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
Sign extended before/after call.
Definition: Attributes.h:105
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
Return the number of registers that this ValueType will eventually require.
static MVT getVectorVT(MVT VT, unsigned NumElements)
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:301
Force argument to be passed in register.
Definition: Attributes.h:78
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition: ISDOpcodes.h:432
static void InitLibcallNames(const char **Names, const Triple &TT)
InitLibcallNames - Set default libcall names.
Y = RRC X, rotate right via carry.
static cl::opt< bool > JumpIsExpensiveOverride("jump-is-expensive", cl::init(false), cl::desc("Do not create extra branches to split comparison logic."), cl::Hidden)
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:228
unsigned getSizeInBits() const
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:264
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
Definition: ISDOpcodes.h:679
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition: ISDOpcodes.h:423
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
Definition: ISDOpcodes.h:318
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it...
Same for subtraction.
Definition: ISDOpcodes.h:231
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
Definition: Attributes.cpp:956
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
getMachineMemOperand - Allocate a new MachineMemOperand.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:39
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
Definition: ValueTypes.h:189
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
Definition: ISDOpcodes.h:254
static MachinePointerInfo getFixedStack(int FI, int64_t offset=0)
getFixedStack - Return a MachinePointerInfo record that refers to the the specified FrameIndex...
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
const Triple & getTargetTriple() const
bool isVector() const
isVector - Return true if this is a vector value type.
Definition: ValueTypes.h:115
unsigned getSize() const
getSize - Return the size of the register in bytes, which is also the size of a stack slot allocated ...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
MachineMemOperand - A description of a memory reference used in the backend.
unsigned getNumRegClasses() const
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
Shift and rotation operations.
Definition: ISDOpcodes.h:332
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:98
const TargetRegisterClass * getRegClass(unsigned i) const
getRegClass - Returns the register class associated with the enumeration value.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
Definition: MachineInstr.h:566
SimpleValueType SimpleTy
MVT getScalarType() const
getScalarType - If this is a vector type, return the element type, otherwise return this...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
Definition: ISDOpcodes.h:687
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
The memory access is volatile.
static ConstantInt * ExtractElement(Constant *V, Constant *Idx)
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
const MachineInstrBuilder & addImm(int64_t Val) const
addImm - Add a new immediate operand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
Definition: ValueTypes.h:110
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:271
EVT getVectorElementType() const
getVectorElementType - Given a vector type, return the type of each element.
Definition: ValueTypes.h:216
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:393
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
LLVMContext & getContext() const
getContext - Return the LLVMContext in which this type was uniqued.
Definition: Type.h:125
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
void GetReturnInfo(Type *ReturnType, AttributeSet attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:191
MachineBasicBlock * emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:804
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
Definition: ISDOpcodes.h:410
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
bool isLegalRC(const TargetRegisterClass *RC) const
Return true if the value types that can be represented by the specified register class are all legal...
bool isPow2VectorType() const
isPow2VectorType - Returns true if the given vector is a power of 2.
Definition: ValueTypes.h:291
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:267
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:436
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:120
LLVM_CONSTEXPR size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:247
mmo_iterator memoperands_end() const
Definition: MachineInstr.h:341
bundle_iterator< MachineInstr, instr_iterator > iterator
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
static void InitCmpLibcallCCs(ISD::CondCode *CCs)
InitCmpLibcallCCs - Set default comparison libcall CC.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:325
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
unsigned getVectorNumElements() const
MVT - Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:41
Simple binary floating point operators.
Definition: ISDOpcodes.h:237
bool isOSOpenBSD() const
Definition: Triple.h:412
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
Definition: ISDOpcodes.h:267
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:273
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
Definition: APInt.h:1895
APInt Xor(const APInt &LHS, const APInt &RHS)
Bitwise XOR function for APInt.
Definition: APInt.h:1900
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:644
Zero extended before/after call.
Definition: Attributes.h:119
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:647
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
BuildMI - Builder interface.
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
Definition: ISDOpcodes.h:673
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:468
EVT - Extended Value Type.
Definition: ValueTypes.h:31
LegalizeTypeAction getTypeAction(MVT VT) const
uint64_t NextPowerOf2(uint64_t A)
NextPowerOf2 - Returns the next power of two (in 64-bits) that is strictly greater than A...
Definition: MathExtras.h:582
static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT, TargetLoweringBase *TLI)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
getVectorVT - Returns the EVT that represents a vector NumElements in length, where each element is o...
Definition: ValueTypes.h:70
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
bool isOSDarwin() const
isOSDarwin - Is this a "Darwin" OS (OS X or iOS).
Definition: Triple.h:404
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
void initActions()
Initialize all of the actions to default values.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
Definition: DataLayout.cpp:674
bool MaskAndBranchFoldingIsLegal
MaskAndBranchFoldingIsLegal - Indicates if the target supports folding a mask of a single bit...
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:273
const DataLayout * getDataLayout() const
Deprecated in 3.7, will be removed in 3.8.
ADDRSPACECAST - This operator converts between pointers of different address spaces.
Definition: ISDOpcodes.h:481
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
bool isValid() const
Returns true if this iterator is still pointing at a valid entry.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineFrameInfo * getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:342
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:383
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:386
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:238
#define OP_TO_LIBCALL(Name, Enum)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:250
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const
Return the preferred vector type legalization action.
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
Definition: APInt.h:1890
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
static mvt_range all_valuetypes()
SimpleValueType Iteration.
Representation of each machine instruction.
Definition: MachineInstr.h:51
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:321
void setTypeAction(MVT VT, LegalizeTypeAction Action)
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
Call instruction with associated vm state for deoptimization and list of live pointers for relocation...
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const
EVT is not used in-tree, but is used by out-of-tree target.
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:518
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
Definition: ValueTypes.h:233
#define I(x, y, z)
Definition: MD5.cpp:54
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OpSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
Same for multiplication.
Definition: ISDOpcodes.h:234
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
EVT getPow2VectorType(LLVMContext &Context) const
getPow2VectorType - Widens the length of the given vector EVT up to the nearest power of 2 and return...
Definition: ValueTypes.h:298
EnvironmentType getEnvironment() const
getEnvironment - Get the parsed environment type of this triple.
Definition: Triple.h:260
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
Libcall getATOMIC(unsigned Opc, MVT VT)
Return the SYNC_FETCH_AND_* value for the given opcode and type, or UNKNOWN_LIBCALL if there is none...
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:279
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:94
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
PREFETCH - This corresponds to a prefetch intrinsic.
Definition: ISDOpcodes.h:653
MVT getPow2VectorType() const
getPow2VectorType - Widens the length of the given vector MVT up to the nearest power of 2 and return...
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
Primary interface to the complete machine description for the target machine.
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool isPowerOf2_32(uint32_t Value)
isPowerOf2_32 - This function returns true if the argument is a power of two > 0. ...
Definition: MathExtras.h:354
vt_iterator vt_begin() const
vt_begin / vt_end - Loop over all of the value types that can be represented by values in this regist...
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations...
Definition: ISDOpcodes.h:244
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:365
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:188
MVT getVectorElementType() const
Conversion operators.
Definition: ISDOpcodes.h:380
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:389
unsigned getPointerSize(unsigned AS=0) const
Layout pointer size FIXME: The defaults need to be removed once all of the backends/clients are updat...
Definition: DataLayout.cpp:593
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
getIntegerVT - Returns the EVT that represents an integer with the given number of bits...
Definition: ValueTypes.h:61
std::pair< unsigned, MVT > getTypeLegalizationCost(const DataLayout &DL, Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd)
Assign this MachineInstr's memory reference descriptor list.
void setBitsInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
setBitsInMask - Add '1' bits from Mask to this vector.
Definition: BitVector.h:481
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:203
This file describes how to lower LLVM code to machine code.
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:340
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:225