Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1110, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name WebAssemblyISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/build-llvm/lib/Target/WebAssembly -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/build-llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/build-llvm/lib/Target/WebAssembly -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-07-23-023125-8721-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the WebAssemblyTargetLowering class.
11///
12//===----------------------------------------------------------------------===//
13
14#include "WebAssemblyISelLowering.h"
15#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16#include "Utils/WebAssemblyUtilities.h"
17#include "WebAssemblyMachineFunctionInfo.h"
18#include "WebAssemblySubtarget.h"
19#include "WebAssemblyTargetMachine.h"
20#include "llvm/CodeGen/CallingConvLower.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineJumpTableInfo.h"
23#include "llvm/CodeGen/MachineModuleInfo.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/SelectionDAG.h"
26#include "llvm/CodeGen/SelectionDAGNodes.h"
27#include "llvm/CodeGen/WasmEHFuncInfo.h"
28#include "llvm/IR/DiagnosticInfo.h"
29#include "llvm/IR/DiagnosticPrinter.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Intrinsics.h"
32#include "llvm/IR/IntrinsicsWebAssembly.h"
33#include "llvm/Support/Debug.h"
34#include "llvm/Support/ErrorHandling.h"
35#include "llvm/Support/MathExtras.h"
36#include "llvm/Support/raw_ostream.h"
37#include "llvm/Target/TargetOptions.h"
38using namespace llvm;
39
40#define DEBUG_TYPE"wasm-lower" "wasm-lower"
41
42WebAssemblyTargetLowering::WebAssemblyTargetLowering(
43 const TargetMachine &TM, const WebAssemblySubtarget &STI)
44 : TargetLowering(TM), Subtarget(&STI) {
45 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
46
47 // Booleans always contain 0 or 1.
48 setBooleanContents(ZeroOrOneBooleanContent);
49 // Except in SIMD vectors
50 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
51 // We don't know the microarchitecture here, so just reduce register pressure.
52 setSchedulingPreference(Sched::RegPressure);
53 // Tell ISel that we have a stack pointer.
54 setStackPointerRegisterToSaveRestore(
55 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
56 // Set up the register classes.
57 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
58 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
59 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
60 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
61 if (Subtarget->hasSIMD128()) {
62 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
63 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
64 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
65 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
66 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
67 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
68 }
69 // Compute derived properties from the register classes.
70 computeRegisterProperties(Subtarget->getRegisterInfo());
71
72 // Transform loads and stores to pointers in address space 1 to loads and
73 // stores to WebAssembly global variables, outside linear memory.
74 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
75 setOperationAction(ISD::LOAD, T, Custom);
76 setOperationAction(ISD::STORE, T, Custom);
77 }
78 if (Subtarget->hasSIMD128()) {
79 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
80 MVT::v2f64}) {
81 setOperationAction(ISD::LOAD, T, Custom);
82 setOperationAction(ISD::STORE, T, Custom);
83 }
84 }
85
86 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
87 setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom);
88 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
89 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
90 setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
91 setOperationAction(ISD::BRIND, MVT::Other, Custom);
92
93 // Take the default expansion for va_arg, va_copy, and va_end. There is no
94 // default action for va_start, so we do that custom.
95 setOperationAction(ISD::VASTART, MVT::Other, Custom);
96 setOperationAction(ISD::VAARG, MVT::Other, Expand);
97 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
98 setOperationAction(ISD::VAEND, MVT::Other, Expand);
99
100 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
101 // Don't expand the floating-point types to constant pools.
102 setOperationAction(ISD::ConstantFP, T, Legal);
103 // Expand floating-point comparisons.
104 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
105 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
106 setCondCodeAction(CC, T, Expand);
107 // Expand floating-point library function operators.
108 for (auto Op :
109 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
110 setOperationAction(Op, T, Expand);
111 // Note supported floating-point library function operators that otherwise
112 // default to expand.
113 for (auto Op :
114 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
115 setOperationAction(Op, T, Legal);
116 // Support minimum and maximum, which otherwise default to expand.
117 setOperationAction(ISD::FMINIMUM, T, Legal);
118 setOperationAction(ISD::FMAXIMUM, T, Legal);
119 // WebAssembly currently has no builtin f16 support.
120 setOperationAction(ISD::FP16_TO_FP, T, Expand);
121 setOperationAction(ISD::FP_TO_FP16, T, Expand);
122 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
123 setTruncStoreAction(T, MVT::f16, Expand);
124 }
125
126 // Expand unavailable integer operations.
127 for (auto Op :
128 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
129 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
130 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
131 for (auto T : {MVT::i32, MVT::i64})
132 setOperationAction(Op, T, Expand);
133 if (Subtarget->hasSIMD128())
134 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
135 setOperationAction(Op, T, Expand);
136 }
137
138 if (Subtarget->hasNontrappingFPToInt())
139 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
140 for (auto T : {MVT::i32, MVT::i64})
141 setOperationAction(Op, T, Custom);
142
143 // SIMD-specific configuration
144 if (Subtarget->hasSIMD128()) {
145 // Hoist bitcasts out of shuffles
146 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
147
148 // Combine extends of extract_subvectors into widening ops
149 setTargetDAGCombine(ISD::SIGN_EXTEND);
150 setTargetDAGCombine(ISD::ZERO_EXTEND);
151
152 // Combine int_to_fp or fp_extend of extract_vectors and vice versa into
153 // conversions ops
154 setTargetDAGCombine(ISD::SINT_TO_FP);
155 setTargetDAGCombine(ISD::UINT_TO_FP);
156 setTargetDAGCombine(ISD::FP_EXTEND);
157 setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
158
159 // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa
160 // into conversion ops
161 setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
162 setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
163 setTargetDAGCombine(ISD::FP_ROUND);
164 setTargetDAGCombine(ISD::CONCAT_VECTORS);
165
166 // Support saturating add for i8x16 and i16x8
167 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
168 for (auto T : {MVT::v16i8, MVT::v8i16})
169 setOperationAction(Op, T, Legal);
170
171 // Support integer abs
172 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
173 setOperationAction(ISD::ABS, T, Legal);
174
175 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
176 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
177 MVT::v2f64})
178 setOperationAction(ISD::BUILD_VECTOR, T, Custom);
179
180 // We have custom shuffle lowering to expose the shuffle mask
181 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
182 MVT::v2f64})
183 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
184
185 // Custom lowering since wasm shifts must have a scalar shift amount
186 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
187 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
188 setOperationAction(Op, T, Custom);
189
190 // Custom lower lane accesses to expand out variable indices
191 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
192 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
193 MVT::v2f64})
194 setOperationAction(Op, T, Custom);
195
196 // There is no i8x16.mul instruction
197 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
198
199 // There is no vector conditional select instruction
200 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
201 MVT::v2f64})
202 setOperationAction(ISD::SELECT_CC, T, Expand);
203
204 // Expand integer operations supported for scalars but not SIMD
205 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
206 ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
207 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
208 setOperationAction(Op, T, Expand);
209
210 // But we do have integer min and max operations
211 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
212 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
213 setOperationAction(Op, T, Legal);
214
215 // And we have popcnt for i8x16
216 setOperationAction(ISD::CTPOP, MVT::v16i8, Legal);
217
218 // Expand float operations supported for scalars but not SIMD
219 for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
220 ISD::FEXP, ISD::FEXP2, ISD::FRINT})
221 for (auto T : {MVT::v4f32, MVT::v2f64})
222 setOperationAction(Op, T, Expand);
223
224 // Unsigned comparison operations are unavailable for i64x2 vectors.
225 for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE})
226 setCondCodeAction(CC, MVT::v2i64, Custom);
227
228 // 64x2 conversions are not in the spec
229 for (auto Op :
230 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
231 for (auto T : {MVT::v2i64, MVT::v2f64})
232 setOperationAction(Op, T, Expand);
233
234 // But saturating fp_to_int converstions are
235 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
236 setOperationAction(Op, MVT::v4i32, Custom);
237 }
238
239 // As a special case, these operators use the type to mean the type to
240 // sign-extend from.
241 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
242 if (!Subtarget->hasSignExt()) {
243 // Sign extends are legal only when extending a vector extract
244 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
245 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
246 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
247 }
248 for (auto T : MVT::integer_fixedlen_vector_valuetypes())
249 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
250
251 // Dynamic stack allocation: use the default expansion.
252 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
253 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
254 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
255
256 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
257 setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
258 setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
259
260 // Expand these forms; we pattern-match the forms that we can handle in isel.
261 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
262 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
263 setOperationAction(Op, T, Expand);
264
265 // We have custom switch handling.
266 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
267
268 // WebAssembly doesn't have:
269 // - Floating-point extending loads.
270 // - Floating-point truncating stores.
271 // - i1 extending loads.
272 // - truncating SIMD stores and most extending loads
273 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
274 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
275 for (auto T : MVT::integer_valuetypes())
276 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
277 setLoadExtAction(Ext, T, MVT::i1, Promote);
278 if (Subtarget->hasSIMD128()) {
279 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
280 MVT::v2f64}) {
281 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
282 if (MVT(T) != MemT) {
283 setTruncStoreAction(T, MemT, Expand);
284 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
285 setLoadExtAction(Ext, T, MemT, Expand);
286 }
287 }
288 }
289 // But some vector extending loads are legal
290 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
291 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
292 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
293 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
294 }
295 // And some truncating stores are legal as well
296 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
297 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
298 }
299
300 // Don't do anything clever with build_pairs
301 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
302
303 // Trap lowers to wasm unreachable
304 setOperationAction(ISD::TRAP, MVT::Other, Legal);
305 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
306
307 // Exception handling intrinsics
308 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
309 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
310 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
311
312 setMaxAtomicSizeInBitsSupported(64);
313
314 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
315 // consistent with the f64 and f128 names.
316 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
317 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
318
319 // Define the emscripten name for return address helper.
320 // TODO: when implementing other Wasm backends, make this generic or only do
321 // this on emscripten depending on what they end up doing.
322 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
323
324 // Always convert switches to br_tables unless there is only one case, which
325 // is equivalent to a simple branch. This reduces code size for wasm, and we
326 // defer possible jump table optimizations to the VM.
327 setMinimumJumpTableEntries(2);
328}
329
330TargetLowering::AtomicExpansionKind
331WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
332 // We have wasm instructions for these
333 switch (AI->getOperation()) {
334 case AtomicRMWInst::Add:
335 case AtomicRMWInst::Sub:
336 case AtomicRMWInst::And:
337 case AtomicRMWInst::Or:
338 case AtomicRMWInst::Xor:
339 case AtomicRMWInst::Xchg:
340 return AtomicExpansionKind::None;
341 default:
342 break;
343 }
344 return AtomicExpansionKind::CmpXChg;
345}
346
347bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
348 // Implementation copied from X86TargetLowering.
349 unsigned Opc = VecOp.getOpcode();
350
351 // Assume target opcodes can't be scalarized.
352 // TODO - do we have any exceptions?
353 if (Opc >= ISD::BUILTIN_OP_END)
354 return false;
355
356 // If the vector op is not supported, try to convert to scalar.
357 EVT VecVT = VecOp.getValueType();
358 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
359 return true;
360
361 // If the vector op is supported, but the scalar op is not, the transform may
362 // not be worthwhile.
363 EVT ScalarVT = VecVT.getScalarType();
364 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
365}
366
367FastISel *WebAssemblyTargetLowering::createFastISel(
368 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
369 return WebAssembly::createFastISel(FuncInfo, LibInfo);
370}
371
372MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
373 EVT VT) const {
374 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
375 if (BitWidth > 1 && BitWidth < 8)
376 BitWidth = 8;
377
378 if (BitWidth > 64) {
379 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
380 // the count to be an i32.
381 BitWidth = 32;
382 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&(static_cast <bool> (BitWidth >= Log2_32_Ceil(VT.getSizeInBits
()) && "32-bit shift counts ought to be enough for anyone"
) ? void (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 383, __extension__ __PRETTY_FUNCTION__))
383 "32-bit shift counts ought to be enough for anyone")(static_cast <bool> (BitWidth >= Log2_32_Ceil(VT.getSizeInBits
()) && "32-bit shift counts ought to be enough for anyone"
) ? void (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 383, __extension__ __PRETTY_FUNCTION__))
;
384 }
385
386 MVT Result = MVT::getIntegerVT(BitWidth);
387 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&(static_cast <bool> (Result != MVT::INVALID_SIMPLE_VALUE_TYPE
&& "Unable to represent scalar shift amount type") ?
void (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 388, __extension__ __PRETTY_FUNCTION__))
388 "Unable to represent scalar shift amount type")(static_cast <bool> (Result != MVT::INVALID_SIMPLE_VALUE_TYPE
&& "Unable to represent scalar shift amount type") ?
void (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 388, __extension__ __PRETTY_FUNCTION__))
;
389 return Result;
390}
391
392// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
393// undefined result on invalid/overflow, to the WebAssembly opcode, which
394// traps on invalid/overflow.
395static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
396 MachineBasicBlock *BB,
397 const TargetInstrInfo &TII,
398 bool IsUnsigned, bool Int64,
399 bool Float64, unsigned LoweredOpcode) {
400 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
401
402 Register OutReg = MI.getOperand(0).getReg();
403 Register InReg = MI.getOperand(1).getReg();
404
405 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
406 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
407 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
408 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
409 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
410 unsigned Eqz = WebAssembly::EQZ_I32;
411 unsigned And = WebAssembly::AND_I32;
412 int64_t Limit = Int64 ? INT64_MIN(-9223372036854775807L -1) : INT32_MIN(-2147483647-1);
413 int64_t Substitute = IsUnsigned ? 0 : Limit;
414 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
415 auto &Context = BB->getParent()->getFunction().getContext();
416 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
417
418 const BasicBlock *LLVMBB = BB->getBasicBlock();
419 MachineFunction *F = BB->getParent();
420 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
421 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
422 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
423
424 MachineFunction::iterator It = ++BB->getIterator();
425 F->insert(It, FalseMBB);
426 F->insert(It, TrueMBB);
427 F->insert(It, DoneMBB);
428
429 // Transfer the remainder of BB and its successor edges to DoneMBB.
430 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
431 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
432
433 BB->addSuccessor(TrueMBB);
434 BB->addSuccessor(FalseMBB);
435 TrueMBB->addSuccessor(DoneMBB);
436 FalseMBB->addSuccessor(DoneMBB);
437
438 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
439 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
440 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
441 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
442 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
443 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
444 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
445
446 MI.eraseFromParent();
447 // For signed numbers, we can do a single comparison to determine whether
448 // fabs(x) is within range.
449 if (IsUnsigned) {
450 Tmp0 = InReg;
451 } else {
452 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
453 }
454 BuildMI(BB, DL, TII.get(FConst), Tmp1)
455 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
456 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
457
458 // For unsigned numbers, we have to do a separate comparison with zero.
459 if (IsUnsigned) {
460 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
461 Register SecondCmpReg =
462 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
463 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
464 BuildMI(BB, DL, TII.get(FConst), Tmp1)
465 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
466 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
467 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
468 CmpReg = AndReg;
469 }
470
471 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
472
473 // Create the CFG diamond to select between doing the conversion or using
474 // the substitute value.
475 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
476 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
477 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
478 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
479 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
480 .addReg(FalseReg)
481 .addMBB(FalseMBB)
482 .addReg(TrueReg)
483 .addMBB(TrueMBB);
484
485 return DoneMBB;
486}
487
488static MachineBasicBlock *
489LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
490 const WebAssemblySubtarget *Subtarget,
491 const TargetInstrInfo &TII) {
492 MachineInstr &CallParams = *CallResults.getPrevNode();
493 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS)(static_cast <bool> (CallParams.getOpcode() == WebAssembly
::CALL_PARAMS) ? void (0) : __assert_fail ("CallParams.getOpcode() == WebAssembly::CALL_PARAMS"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 493, __extension__ __PRETTY_FUNCTION__))
;
494 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||(static_cast <bool> (CallResults.getOpcode() == WebAssembly
::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS
) ? void (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 495, __extension__ __PRETTY_FUNCTION__))
495 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS)(static_cast <bool> (CallResults.getOpcode() == WebAssembly
::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS
) ? void (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 495, __extension__ __PRETTY_FUNCTION__))
;
496
497 bool IsIndirect = CallParams.getOperand(0).isReg();
498 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
499
500 unsigned CallOp;
501 if (IsIndirect && IsRetCall) {
502 CallOp = WebAssembly::RET_CALL_INDIRECT;
503 } else if (IsIndirect) {
504 CallOp = WebAssembly::CALL_INDIRECT;
505 } else if (IsRetCall) {
506 CallOp = WebAssembly::RET_CALL;
507 } else {
508 CallOp = WebAssembly::CALL;
509 }
510
511 MachineFunction &MF = *BB->getParent();
512 const MCInstrDesc &MCID = TII.get(CallOp);
513 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
514
515 // See if we must truncate the function pointer.
516 // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
517 // as 64-bit for uniformity with other pointer types.
518 // See also: WebAssemblyFastISel::selectCall
519 if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
520 Register Reg32 =
521 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
522 auto &FnPtr = CallParams.getOperand(0);
523 BuildMI(*BB, CallResults.getIterator(), DL,
524 TII.get(WebAssembly::I32_WRAP_I64), Reg32)
525 .addReg(FnPtr.getReg());
526 FnPtr.setReg(Reg32);
527 }
528
529 // Move the function pointer to the end of the arguments for indirect calls
530 if (IsIndirect) {
531 auto FnPtr = CallParams.getOperand(0);
532 CallParams.RemoveOperand(0);
533 CallParams.addOperand(FnPtr);
534 }
535
536 for (auto Def : CallResults.defs())
537 MIB.add(Def);
538
539 if (IsIndirect) {
540 // Placeholder for the type index.
541 MIB.addImm(0);
542 // The table into which this call_indirect indexes.
543 MCSymbolWasm *Table =
544 WebAssembly::getOrCreateFunctionTableSymbol(MF.getContext(), Subtarget);
545 if (Subtarget->hasReferenceTypes()) {
546 MIB.addSym(Table);
547 } else {
548 // For the MVP there is at most one table whose number is 0, but we can't
549 // write a table symbol or issue relocations. Instead we just ensure the
550 // table is live and write a zero.
551 Table->setNoStrip();
552 MIB.addImm(0);
553 }
554 }
555
556 for (auto Use : CallParams.uses())
557 MIB.add(Use);
558
559 BB->insert(CallResults.getIterator(), MIB);
560 CallParams.eraseFromParent();
561 CallResults.eraseFromParent();
562
563 return BB;
564}
565
566MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
567 MachineInstr &MI, MachineBasicBlock *BB) const {
568 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
569 DebugLoc DL = MI.getDebugLoc();
570
571 switch (MI.getOpcode()) {
572 default:
573 llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 573)
;
574 case WebAssembly::FP_TO_SINT_I32_F32:
575 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
576 WebAssembly::I32_TRUNC_S_F32);
577 case WebAssembly::FP_TO_UINT_I32_F32:
578 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
579 WebAssembly::I32_TRUNC_U_F32);
580 case WebAssembly::FP_TO_SINT_I64_F32:
581 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
582 WebAssembly::I64_TRUNC_S_F32);
583 case WebAssembly::FP_TO_UINT_I64_F32:
584 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
585 WebAssembly::I64_TRUNC_U_F32);
586 case WebAssembly::FP_TO_SINT_I32_F64:
587 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
588 WebAssembly::I32_TRUNC_S_F64);
589 case WebAssembly::FP_TO_UINT_I32_F64:
590 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
591 WebAssembly::I32_TRUNC_U_F64);
592 case WebAssembly::FP_TO_SINT_I64_F64:
593 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
594 WebAssembly::I64_TRUNC_S_F64);
595 case WebAssembly::FP_TO_UINT_I64_F64:
596 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
597 WebAssembly::I64_TRUNC_U_F64);
598 case WebAssembly::CALL_RESULTS:
599 case WebAssembly::RET_CALL_RESULTS:
600 return LowerCallResults(MI, DL, BB, Subtarget, TII);
601 }
602}
603
604const char *
605WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
606 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
607 case WebAssemblyISD::FIRST_NUMBER:
608 case WebAssemblyISD::FIRST_MEM_OPCODE:
609 break;
610#define HANDLE_NODETYPE(NODE) \
611 case WebAssemblyISD::NODE: \
612 return "WebAssemblyISD::" #NODE;
613#define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
614#include "WebAssemblyISD.def"
615#undef HANDLE_MEM_NODETYPE
616#undef HANDLE_NODETYPE
617 }
618 return nullptr;
619}
620
621std::pair<unsigned, const TargetRegisterClass *>
622WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
623 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
624 // First, see if this is a constraint that directly corresponds to a
625 // WebAssembly register class.
626 if (Constraint.size() == 1) {
627 switch (Constraint[0]) {
628 case 'r':
629 assert(VT != MVT::iPTR && "Pointer MVT not expected here")(static_cast <bool> (VT != MVT::iPTR && "Pointer MVT not expected here"
) ? void (0) : __assert_fail ("VT != MVT::iPTR && \"Pointer MVT not expected here\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 629, __extension__ __PRETTY_FUNCTION__))
;
630 if (Subtarget->hasSIMD128() && VT.isVector()) {
631 if (VT.getSizeInBits() == 128)
632 return std::make_pair(0U, &WebAssembly::V128RegClass);
633 }
634 if (VT.isInteger() && !VT.isVector()) {
635 if (VT.getSizeInBits() <= 32)
636 return std::make_pair(0U, &WebAssembly::I32RegClass);
637 if (VT.getSizeInBits() <= 64)
638 return std::make_pair(0U, &WebAssembly::I64RegClass);
639 }
640 if (VT.isFloatingPoint() && !VT.isVector()) {
641 switch (VT.getSizeInBits()) {
642 case 32:
643 return std::make_pair(0U, &WebAssembly::F32RegClass);
644 case 64:
645 return std::make_pair(0U, &WebAssembly::F64RegClass);
646 default:
647 break;
648 }
649 }
650 break;
651 default:
652 break;
653 }
654 }
655
656 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
657}
658
659bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
660 // Assume ctz is a relatively cheap operation.
661 return true;
662}
663
664bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
665 // Assume clz is a relatively cheap operation.
666 return true;
667}
668
669bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
670 const AddrMode &AM,
671 Type *Ty, unsigned AS,
672 Instruction *I) const {
673 // WebAssembly offsets are added as unsigned without wrapping. The
674 // isLegalAddressingMode gives us no way to determine if wrapping could be
675 // happening, so we approximate this by accepting only non-negative offsets.
676 if (AM.BaseOffs < 0)
677 return false;
678
679 // WebAssembly has no scale register operands.
680 if (AM.Scale != 0)
681 return false;
682
683 // Everything else is legal.
684 return true;
685}
686
687bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
688 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
689 MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
690 // WebAssembly supports unaligned accesses, though it should be declared
691 // with the p2align attribute on loads and stores which do so, and there
692 // may be a performance impact. We tell LLVM they're "fast" because
693 // for the kinds of things that LLVM uses this for (merging adjacent stores
694 // of constants, etc.), WebAssembly implementations will either want the
695 // unaligned access or they'll split anyway.
696 if (Fast)
697 *Fast = true;
698 return true;
699}
700
701bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
702 AttributeList Attr) const {
703 // The current thinking is that wasm engines will perform this optimization,
704 // so we can save on code size.
705 return true;
706}
707
708bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
709 EVT ExtT = ExtVal.getValueType();
710 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
711 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
712 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
713 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
714}
715
716EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
717 LLVMContext &C,
718 EVT VT) const {
719 if (VT.isVector())
720 return VT.changeVectorElementTypeToInteger();
721
722 // So far, all branch instructions in Wasm take an I32 condition.
723 // The default TargetLowering::getSetCCResultType returns the pointer size,
724 // which would be useful to reduce instruction counts when testing
725 // against 64-bit pointers/values if at some point Wasm supports that.
726 return EVT::getIntegerVT(C, 32);
727}
728
729bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
730 const CallInst &I,
731 MachineFunction &MF,
732 unsigned Intrinsic) const {
733 switch (Intrinsic) {
734 case Intrinsic::wasm_memory_atomic_notify:
735 Info.opc = ISD::INTRINSIC_W_CHAIN;
736 Info.memVT = MVT::i32;
737 Info.ptrVal = I.getArgOperand(0);
738 Info.offset = 0;
739 Info.align = Align(4);
740 // atomic.notify instruction does not really load the memory specified with
741 // this argument, but MachineMemOperand should either be load or store, so
742 // we set this to a load.
743 // FIXME Volatile isn't really correct, but currently all LLVM atomic
744 // instructions are treated as volatiles in the backend, so we should be
745 // consistent. The same applies for wasm_atomic_wait intrinsics too.
746 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
747 return true;
748 case Intrinsic::wasm_memory_atomic_wait32:
749 Info.opc = ISD::INTRINSIC_W_CHAIN;
750 Info.memVT = MVT::i32;
751 Info.ptrVal = I.getArgOperand(0);
752 Info.offset = 0;
753 Info.align = Align(4);
754 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
755 return true;
756 case Intrinsic::wasm_memory_atomic_wait64:
757 Info.opc = ISD::INTRINSIC_W_CHAIN;
758 Info.memVT = MVT::i64;
759 Info.ptrVal = I.getArgOperand(0);
760 Info.offset = 0;
761 Info.align = Align(8);
762 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
763 return true;
764 default:
765 return false;
766 }
767}
768
769//===----------------------------------------------------------------------===//
770// WebAssembly Lowering private implementation.
771//===----------------------------------------------------------------------===//
772
773//===----------------------------------------------------------------------===//
774// Lowering Code
775//===----------------------------------------------------------------------===//
776
777static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
778 MachineFunction &MF = DAG.getMachineFunction();
779 DAG.getContext()->diagnose(
780 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
781}
782
783// Test whether the given calling convention is supported.
784static bool callingConvSupported(CallingConv::ID CallConv) {
785 // We currently support the language-independent target-independent
786 // conventions. We don't yet have a way to annotate calls with properties like
787 // "cold", and we don't have any call-clobbered registers, so these are mostly
788 // all handled the same.
789 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
790 CallConv == CallingConv::Cold ||
791 CallConv == CallingConv::PreserveMost ||
792 CallConv == CallingConv::PreserveAll ||
793 CallConv == CallingConv::CXX_FAST_TLS ||
794 CallConv == CallingConv::WASM_EmscriptenInvoke ||
795 CallConv == CallingConv::Swift;
796}
797
798SDValue
799WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
800 SmallVectorImpl<SDValue> &InVals) const {
801 SelectionDAG &DAG = CLI.DAG;
802 SDLoc DL = CLI.DL;
803 SDValue Chain = CLI.Chain;
804 SDValue Callee = CLI.Callee;
805 MachineFunction &MF = DAG.getMachineFunction();
806 auto Layout = MF.getDataLayout();
807
808 CallingConv::ID CallConv = CLI.CallConv;
809 if (!callingConvSupported(CallConv))
810 fail(DL, DAG,
811 "WebAssembly doesn't support language-specific or target-specific "
812 "calling conventions yet");
813 if (CLI.IsPatchPoint)
814 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
815
816 if (CLI.IsTailCall) {
817 auto NoTail = [&](const char *Msg) {
818 if (CLI.CB && CLI.CB->isMustTailCall())
819 fail(DL, DAG, Msg);
820 CLI.IsTailCall = false;
821 };
822
823 if (!Subtarget->hasTailCall())
824 NoTail("WebAssembly 'tail-call' feature not enabled");
825
826 // Varargs calls cannot be tail calls because the buffer is on the stack
827 if (CLI.IsVarArg)
828 NoTail("WebAssembly does not support varargs tail calls");
829
830 // Do not tail call unless caller and callee return types match
831 const Function &F = MF.getFunction();
832 const TargetMachine &TM = getTargetMachine();
833 Type *RetTy = F.getReturnType();
834 SmallVector<MVT, 4> CallerRetTys;
835 SmallVector<MVT, 4> CalleeRetTys;
836 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
837 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
838 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
839 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
840 CalleeRetTys.begin());
841 if (!TypesMatch)
842 NoTail("WebAssembly tail call requires caller and callee return types to "
843 "match");
844
845 // If pointers to local stack values are passed, we cannot tail call
846 if (CLI.CB) {
847 for (auto &Arg : CLI.CB->args()) {
848 Value *Val = Arg.get();
849 // Trace the value back through pointer operations
850 while (true) {
851 Value *Src = Val->stripPointerCastsAndAliases();
852 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
853 Src = GEP->getPointerOperand();
854 if (Val == Src)
855 break;
856 Val = Src;
857 }
858 if (isa<AllocaInst>(Val)) {
859 NoTail(
860 "WebAssembly does not support tail calling with stack arguments");
861 break;
862 }
863 }
864 }
865 }
866
867 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
868 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
869 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
870
871 // The generic code may have added an sret argument. If we're lowering an
872 // invoke function, the ABI requires that the function pointer be the first
873 // argument, so we may have to swap the arguments.
874 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
875 Outs[0].Flags.isSRet()) {
876 std::swap(Outs[0], Outs[1]);
877 std::swap(OutVals[0], OutVals[1]);
878 }
879
880 bool HasSwiftSelfArg = false;
881 bool HasSwiftErrorArg = false;
882 unsigned NumFixedArgs = 0;
883 for (unsigned I = 0; I < Outs.size(); ++I) {
884 const ISD::OutputArg &Out = Outs[I];
885 SDValue &OutVal = OutVals[I];
886 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
887 HasSwiftErrorArg |= Out.Flags.isSwiftError();
888 if (Out.Flags.isNest())
889 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
890 if (Out.Flags.isInAlloca())
891 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
892 if (Out.Flags.isInConsecutiveRegs())
893 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
894 if (Out.Flags.isInConsecutiveRegsLast())
895 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
896 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
897 auto &MFI = MF.getFrameInfo();
898 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
899 Out.Flags.getNonZeroByValAlign(),
900 /*isSS=*/false);
901 SDValue SizeNode =
902 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
903 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
904 Chain = DAG.getMemcpy(
905 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
906 /*isVolatile*/ false, /*AlwaysInline=*/false,
907 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
908 OutVal = FINode;
909 }
910 // Count the number of fixed args *after* legalization.
911 NumFixedArgs += Out.IsFixed;
912 }
913
914 bool IsVarArg = CLI.IsVarArg;
915 auto PtrVT = getPointerTy(Layout);
916
917 // For swiftcc, emit additional swiftself and swifterror arguments
918 // if there aren't. These additional arguments are also added for callee
919 // signature They are necessary to match callee and caller signature for
920 // indirect call.
921 if (CallConv == CallingConv::Swift) {
922 if (!HasSwiftSelfArg) {
923 NumFixedArgs++;
924 ISD::OutputArg Arg;
925 Arg.Flags.setSwiftSelf();
926 CLI.Outs.push_back(Arg);
927 SDValue ArgVal = DAG.getUNDEF(PtrVT);
928 CLI.OutVals.push_back(ArgVal);
929 }
930 if (!HasSwiftErrorArg) {
931 NumFixedArgs++;
932 ISD::OutputArg Arg;
933 Arg.Flags.setSwiftError();
934 CLI.Outs.push_back(Arg);
935 SDValue ArgVal = DAG.getUNDEF(PtrVT);
936 CLI.OutVals.push_back(ArgVal);
937 }
938 }
939
940 // Analyze operands of the call, assigning locations to each operand.
941 SmallVector<CCValAssign, 16> ArgLocs;
942 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
943
944 if (IsVarArg) {
945 // Outgoing non-fixed arguments are placed in a buffer. First
946 // compute their offsets and the total amount of buffer space needed.
947 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
948 const ISD::OutputArg &Out = Outs[I];
949 SDValue &Arg = OutVals[I];
950 EVT VT = Arg.getValueType();
951 assert(VT != MVT::iPTR && "Legalized args should be concrete")(static_cast <bool> (VT != MVT::iPTR && "Legalized args should be concrete"
) ? void (0) : __assert_fail ("VT != MVT::iPTR && \"Legalized args should be concrete\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 951, __extension__ __PRETTY_FUNCTION__))
;
952 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
953 Align Alignment =
954 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
955 unsigned Offset =
956 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
957 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
958 Offset, VT.getSimpleVT(),
959 CCValAssign::Full));
960 }
961 }
962
963 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
964
965 SDValue FINode;
966 if (IsVarArg && NumBytes) {
967 // For non-fixed arguments, next emit stores to store the argument values
968 // to the stack buffer at the offsets computed above.
969 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
970 Layout.getStackAlignment(),
971 /*isSS=*/false);
972 unsigned ValNo = 0;
973 SmallVector<SDValue, 8> Chains;
974 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
975 assert(ArgLocs[ValNo].getValNo() == ValNo &&(static_cast <bool> (ArgLocs[ValNo].getValNo() == ValNo
&& "ArgLocs should remain in order and only hold varargs args"
) ? void (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 976, __extension__ __PRETTY_FUNCTION__))
976 "ArgLocs should remain in order and only hold varargs args")(static_cast <bool> (ArgLocs[ValNo].getValNo() == ValNo
&& "ArgLocs should remain in order and only hold varargs args"
) ? void (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 976, __extension__ __PRETTY_FUNCTION__))
;
977 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
978 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
979 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
980 DAG.getConstant(Offset, DL, PtrVT));
981 Chains.push_back(
982 DAG.getStore(Chain, DL, Arg, Add,
983 MachinePointerInfo::getFixedStack(MF, FI, Offset)));
984 }
985 if (!Chains.empty())
986 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
987 } else if (IsVarArg) {
988 FINode = DAG.getIntPtrConstant(0, DL);
989 }
990
991 if (Callee->getOpcode() == ISD::GlobalAddress) {
992 // If the callee is a GlobalAddress node (quite common, every direct call
993 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
994 // doesn't at MO_GOT which is not needed for direct calls.
995 GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
996 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
997 getPointerTy(DAG.getDataLayout()),
998 GA->getOffset());
999 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1000 getPointerTy(DAG.getDataLayout()), Callee);
1001 }
1002
1003 // Compute the operands for the CALLn node.
1004 SmallVector<SDValue, 16> Ops;
1005 Ops.push_back(Chain);
1006 Ops.push_back(Callee);
1007
1008 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1009 // isn't reliable.
1010 Ops.append(OutVals.begin(),
1011 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1012 // Add a pointer to the vararg buffer.
1013 if (IsVarArg)
1014 Ops.push_back(FINode);
1015
1016 SmallVector<EVT, 8> InTys;
1017 for (const auto &In : Ins) {
1018 assert(!In.Flags.isByVal() && "byval is not valid for return values")(static_cast <bool> (!In.Flags.isByVal() && "byval is not valid for return values"
) ? void (0) : __assert_fail ("!In.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1018, __extension__ __PRETTY_FUNCTION__))
;
1019 assert(!In.Flags.isNest() && "nest is not valid for return values")(static_cast <bool> (!In.Flags.isNest() && "nest is not valid for return values"
) ? void (0) : __assert_fail ("!In.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1019, __extension__ __PRETTY_FUNCTION__))
;
1020 if (In.Flags.isInAlloca())
1021 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1022 if (In.Flags.isInConsecutiveRegs())
1023 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1024 if (In.Flags.isInConsecutiveRegsLast())
1025 fail(DL, DAG,
1026 "WebAssembly hasn't implemented cons regs last return values");
1027 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1028 // registers.
1029 InTys.push_back(In.VT);
1030 }
1031
1032 if (CLI.IsTailCall) {
1033 // ret_calls do not return values to the current frame
1034 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1035 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1036 }
1037
1038 InTys.push_back(MVT::Other);
1039 SDVTList InTyList = DAG.getVTList(InTys);
1040 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1041
1042 for (size_t I = 0; I < Ins.size(); ++I)
1043 InVals.push_back(Res.getValue(I));
1044
1045 // Return the chain
1046 return Res.getValue(Ins.size());
1047}
1048
1049bool WebAssemblyTargetLowering::CanLowerReturn(
1050 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1051 const SmallVectorImpl<ISD::OutputArg> &Outs,
1052 LLVMContext & /*Context*/) const {
1053 // WebAssembly can only handle returning tuples with multivalue enabled
1054 return Subtarget->hasMultivalue() || Outs.size() <= 1;
1055}
1056
1057SDValue WebAssemblyTargetLowering::LowerReturn(
1058 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1059 const SmallVectorImpl<ISD::OutputArg> &Outs,
1060 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1061 SelectionDAG &DAG) const {
1062 assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&(static_cast <bool> ((Subtarget->hasMultivalue() || Outs
.size() <= 1) && "MVP WebAssembly can only return up to one value"
) ? void (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1063, __extension__ __PRETTY_FUNCTION__))
1063 "MVP WebAssembly can only return up to one value")(static_cast <bool> ((Subtarget->hasMultivalue() || Outs
.size() <= 1) && "MVP WebAssembly can only return up to one value"
) ? void (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1063, __extension__ __PRETTY_FUNCTION__))
;
1064 if (!callingConvSupported(CallConv))
1065 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1066
1067 SmallVector<SDValue, 4> RetOps(1, Chain);
1068 RetOps.append(OutVals.begin(), OutVals.end());
1069 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1070
1071 // Record the number and types of the return values.
1072 for (const ISD::OutputArg &Out : Outs) {
1073 assert(!Out.Flags.isByVal() && "byval is not valid for return values")(static_cast <bool> (!Out.Flags.isByVal() && "byval is not valid for return values"
) ? void (0) : __assert_fail ("!Out.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1073, __extension__ __PRETTY_FUNCTION__))
;
1074 assert(!Out.Flags.isNest() && "nest is not valid for return values")(static_cast <bool> (!Out.Flags.isNest() && "nest is not valid for return values"
) ? void (0) : __assert_fail ("!Out.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1074, __extension__ __PRETTY_FUNCTION__))
;
1075 assert(Out.IsFixed && "non-fixed return value is not valid")(static_cast <bool> (Out.IsFixed && "non-fixed return value is not valid"
) ? void (0) : __assert_fail ("Out.IsFixed && \"non-fixed return value is not valid\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1075, __extension__ __PRETTY_FUNCTION__))
;
1076 if (Out.Flags.isInAlloca())
1077 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1078 if (Out.Flags.isInConsecutiveRegs())
1079 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1080 if (Out.Flags.isInConsecutiveRegsLast())
1081 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1082 }
1083
1084 return Chain;
1085}
1086
1087SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1088 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1089 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1090 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1091 if (!callingConvSupported(CallConv))
1092 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1093
1094 MachineFunction &MF = DAG.getMachineFunction();
1095 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1096
1097 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1098 // of the incoming values before they're represented by virtual registers.
1099 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1100
1101 bool HasSwiftErrorArg = false;
1102 bool HasSwiftSelfArg = false;
1103 for (const ISD::InputArg &In : Ins) {
1104 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1105 HasSwiftErrorArg |= In.Flags.isSwiftError();
1106 if (In.Flags.isInAlloca())
1107 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1108 if (In.Flags.isNest())
1109 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1110 if (In.Flags.isInConsecutiveRegs())
1111 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1112 if (In.Flags.isInConsecutiveRegsLast())
1113 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1114 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1115 // registers.
1116 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1117 DAG.getTargetConstant(InVals.size(),
1118 DL, MVT::i32))
1119 : DAG.getUNDEF(In.VT));
1120
1121 // Record the number and types of arguments.
1122 MFI->addParam(In.VT);
1123 }
1124
1125 // For swiftcc, emit additional swiftself and swifterror arguments
1126 // if there aren't. These additional arguments are also added for callee
1127 // signature They are necessary to match callee and caller signature for
1128 // indirect call.
1129 auto PtrVT = getPointerTy(MF.getDataLayout());
1130 if (CallConv == CallingConv::Swift) {
1131 if (!HasSwiftSelfArg) {
1132 MFI->addParam(PtrVT);
1133 }
1134 if (!HasSwiftErrorArg) {
1135 MFI->addParam(PtrVT);
1136 }
1137 }
1138 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1139 // the buffer is passed as an argument.
1140 if (IsVarArg) {
1141 MVT PtrVT = getPointerTy(MF.getDataLayout());
1142 Register VarargVreg =
1143 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
1144 MFI->setVarargBufferVreg(VarargVreg);
1145 Chain = DAG.getCopyToReg(
1146 Chain, DL, VarargVreg,
1147 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1148 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1149 MFI->addParam(PtrVT);
1150 }
1151
1152 // Record the number and types of arguments and results.
1153 SmallVector<MVT, 4> Params;
1154 SmallVector<MVT, 4> Results;
1155 computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
1156 MF.getFunction(), DAG.getTarget(), Params, Results);
1157 for (MVT VT : Results)
1158 MFI->addResult(VT);
1159 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1160 // the param logic here with ComputeSignatureVTs
1161 assert(MFI->getParams().size() == Params.size() &&(static_cast <bool> (MFI->getParams().size() == Params
.size() && std::equal(MFI->getParams().begin(), MFI
->getParams().end(), Params.begin())) ? void (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1163, __extension__ __PRETTY_FUNCTION__))
1162 std::equal(MFI->getParams().begin(), MFI->getParams().end(),(static_cast <bool> (MFI->getParams().size() == Params
.size() && std::equal(MFI->getParams().begin(), MFI
->getParams().end(), Params.begin())) ? void (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1163, __extension__ __PRETTY_FUNCTION__))
1163 Params.begin()))(static_cast <bool> (MFI->getParams().size() == Params
.size() && std::equal(MFI->getParams().begin(), MFI
->getParams().end(), Params.begin())) ? void (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1163, __extension__ __PRETTY_FUNCTION__))
;
1164
1165 return Chain;
1166}
1167
1168void WebAssemblyTargetLowering::ReplaceNodeResults(
1169 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1170 switch (N->getOpcode()) {
1171 case ISD::SIGN_EXTEND_INREG:
1172 // Do not add any results, signifying that N should not be custom lowered
1173 // after all. This happens because simd128 turns on custom lowering for
1174 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1175 // illegal type.
1176 break;
1177 default:
1178 llvm_unreachable(::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1179)
1179 "ReplaceNodeResults not implemented for this op for WebAssembly!")::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1179)
;
1180 }
1181}
1182
1183//===----------------------------------------------------------------------===//
1184// Custom lowering hooks.
1185//===----------------------------------------------------------------------===//
1186
1187SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1188 SelectionDAG &DAG) const {
1189 SDLoc DL(Op);
1190 switch (Op.getOpcode()) {
1
Control jumps to 'case BUILD_VECTOR:' at line 1227
1191 default:
1192 llvm_unreachable("unimplemented operation lowering")::llvm::llvm_unreachable_internal("unimplemented operation lowering"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1192)
;
1193 return SDValue();
1194 case ISD::FrameIndex:
1195 return LowerFrameIndex(Op, DAG);
1196 case ISD::GlobalAddress:
1197 return LowerGlobalAddress(Op, DAG);
1198 case ISD::GlobalTLSAddress:
1199 return LowerGlobalTLSAddress(Op, DAG);
1200 case ISD::ExternalSymbol:
1201 return LowerExternalSymbol(Op, DAG);
1202 case ISD::JumpTable:
1203 return LowerJumpTable(Op, DAG);
1204 case ISD::BR_JT:
1205 return LowerBR_JT(Op, DAG);
1206 case ISD::VASTART:
1207 return LowerVASTART(Op, DAG);
1208 case ISD::BlockAddress:
1209 case ISD::BRIND:
1210 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1211 return SDValue();
1212 case ISD::RETURNADDR:
1213 return LowerRETURNADDR(Op, DAG);
1214 case ISD::FRAMEADDR:
1215 return LowerFRAMEADDR(Op, DAG);
1216 case ISD::CopyToReg:
1217 return LowerCopyToReg(Op, DAG);
1218 case ISD::EXTRACT_VECTOR_ELT:
1219 case ISD::INSERT_VECTOR_ELT:
1220 return LowerAccessVectorElement(Op, DAG);
1221 case ISD::INTRINSIC_VOID:
1222 case ISD::INTRINSIC_WO_CHAIN:
1223 case ISD::INTRINSIC_W_CHAIN:
1224 return LowerIntrinsic(Op, DAG);
1225 case ISD::SIGN_EXTEND_INREG:
1226 return LowerSIGN_EXTEND_INREG(Op, DAG);
1227 case ISD::BUILD_VECTOR:
1228 return LowerBUILD_VECTOR(Op, DAG);
2
Calling 'WebAssemblyTargetLowering::LowerBUILD_VECTOR'
1229 case ISD::VECTOR_SHUFFLE:
1230 return LowerVECTOR_SHUFFLE(Op, DAG);
1231 case ISD::SETCC:
1232 return LowerSETCC(Op, DAG);
1233 case ISD::SHL:
1234 case ISD::SRA:
1235 case ISD::SRL:
1236 return LowerShift(Op, DAG);
1237 case ISD::FP_TO_SINT_SAT:
1238 case ISD::FP_TO_UINT_SAT:
1239 return LowerFP_TO_INT_SAT(Op, DAG);
1240 case ISD::LOAD:
1241 return LowerLoad(Op, DAG);
1242 case ISD::STORE:
1243 return LowerStore(Op, DAG);
1244 }
1245}
1246
1247static bool IsWebAssemblyGlobal(SDValue Op) {
1248 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1249 return WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace());
1250
1251 return false;
1252}
1253
1254static Optional<unsigned> IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG) {
1255 const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
1256 if (!FI)
1257 return None;
1258
1259 auto &MF = DAG.getMachineFunction();
1260 return WebAssemblyFrameLowering::getLocalForStackObject(MF, FI->getIndex());
1261}
1262
1263SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
1264 SelectionDAG &DAG) const {
1265 SDLoc DL(Op);
1266 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
1267 const SDValue &Value = SN->getValue();
1268 const SDValue &Base = SN->getBasePtr();
1269 const SDValue &Offset = SN->getOffset();
1270
1271 if (IsWebAssemblyGlobal(Base)) {
1272 if (!Offset->isUndef())
1273 report_fatal_error("unexpected offset when storing to webassembly global",
1274 false);
1275
1276 SDVTList Tys = DAG.getVTList(MVT::Other);
1277 SDValue Ops[] = {SN->getChain(), Value, Base};
1278 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops,
1279 SN->getMemoryVT(), SN->getMemOperand());
1280 }
1281
1282 if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1283 if (!Offset->isUndef())
1284 report_fatal_error("unexpected offset when storing to webassembly local",
1285 false);
1286
1287 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1288 SDVTList Tys = DAG.getVTList(MVT::Other); // The chain.
1289 SDValue Ops[] = {SN->getChain(), Idx, Value};
1290 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
1291 }
1292
1293 return Op;
1294}
1295
1296SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
1297 SelectionDAG &DAG) const {
1298 SDLoc DL(Op);
1299 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
1300 const SDValue &Base = LN->getBasePtr();
1301 const SDValue &Offset = LN->getOffset();
1302
1303 if (IsWebAssemblyGlobal(Base)) {
1304 if (!Offset->isUndef())
1305 report_fatal_error(
1306 "unexpected offset when loading from webassembly global", false);
1307
1308 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
1309 SDValue Ops[] = {LN->getChain(), Base};
1310 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
1311 LN->getMemoryVT(), LN->getMemOperand());
1312 }
1313
1314 if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1315 if (!Offset->isUndef())
1316 report_fatal_error(
1317 "unexpected offset when loading from webassembly local", false);
1318
1319 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1320 EVT LocalVT = LN->getValueType(0);
1321 SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT,
1322 {LN->getChain(), Idx});
1323 SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL);
1324 assert(Result->getNumValues() == 2 && "Loads must carry a chain!")(static_cast <bool> (Result->getNumValues() == 2 &&
"Loads must carry a chain!") ? void (0) : __assert_fail ("Result->getNumValues() == 2 && \"Loads must carry a chain!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1324, __extension__ __PRETTY_FUNCTION__))
;
1325 return Result;
1326 }
1327
1328 return Op;
1329}
1330
1331SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1332 SelectionDAG &DAG) const {
1333 SDValue Src = Op.getOperand(2);
1334 if (isa<FrameIndexSDNode>(Src.getNode())) {
1335 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1336 // the FI to some LEA-like instruction, but since we don't have that, we
1337 // need to insert some kind of instruction that can take an FI operand and
1338 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1339 // local.copy between Op and its FI operand.
1340 SDValue Chain = Op.getOperand(0);
1341 SDLoc DL(Op);
1342 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1343 EVT VT = Src.getValueType();
1344 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1345 : WebAssembly::COPY_I64,
1346 DL, VT, Src),
1347 0);
1348 return Op.getNode()->getNumValues() == 1
1349 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1350 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1351 Op.getNumOperands() == 4 ? Op.getOperand(3)
1352 : SDValue());
1353 }
1354 return SDValue();
1355}
1356
1357SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1358 SelectionDAG &DAG) const {
1359 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1360 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1361}
1362
1363SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1364 SelectionDAG &DAG) const {
1365 SDLoc DL(Op);
1366
1367 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1368 fail(DL, DAG,
1369 "Non-Emscripten WebAssembly hasn't implemented "
1370 "__builtin_return_address");
1371 return SDValue();
1372 }
1373
1374 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1375 return SDValue();
1376
1377 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1378 MakeLibCallOptions CallOptions;
1379 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1380 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1381 .first;
1382}
1383
1384SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1385 SelectionDAG &DAG) const {
1386 // Non-zero depths are not supported by WebAssembly currently. Use the
1387 // legalizer's default expansion, which is to return 0 (what this function is
1388 // documented to do).
1389 if (Op.getConstantOperandVal(0) > 0)
1390 return SDValue();
1391
1392 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1393 EVT VT = Op.getValueType();
1394 Register FP =
1395 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1396 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1397}
1398
1399SDValue
1400WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1401 SelectionDAG &DAG) const {
1402 SDLoc DL(Op);
1403 const auto *GA = cast<GlobalAddressSDNode>(Op);
1404 MVT PtrVT = getPointerTy(DAG.getDataLayout());
1405
1406 MachineFunction &MF = DAG.getMachineFunction();
1407 if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
1408 report_fatal_error("cannot use thread-local storage without bulk memory",
1409 false);
1410
1411 const GlobalValue *GV = GA->getGlobal();
1412
1413 // Currently Emscripten does not support dynamic linking with threads.
1414 // Therefore, if we have thread-local storage, only the local-exec model
1415 // is possible.
1416 // TODO: remove this and implement proper TLS models once Emscripten
1417 // supports dynamic linking with threads.
1418 if (GV->getThreadLocalMode() != GlobalValue::LocalExecTLSModel &&
1419 !Subtarget->getTargetTriple().isOSEmscripten()) {
1420 report_fatal_error("only -ftls-model=local-exec is supported for now on "
1421 "non-Emscripten OSes: variable " +
1422 GV->getName(),
1423 false);
1424 }
1425
1426 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1427 : WebAssembly::GLOBAL_GET_I32;
1428 const char *BaseName = MF.createExternalSymbolName("__tls_base");
1429
1430 SDValue BaseAddr(
1431 DAG.getMachineNode(GlobalGet, DL, PtrVT,
1432 DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1433 0);
1434
1435 SDValue TLSOffset = DAG.getTargetGlobalAddress(
1436 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1437 SDValue SymAddr = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, TLSOffset);
1438
1439 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
1440}
1441
1442SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1443 SelectionDAG &DAG) const {
1444 SDLoc DL(Op);
1445 const auto *GA = cast<GlobalAddressSDNode>(Op);
1446 EVT VT = Op.getValueType();
1447 assert(GA->getTargetFlags() == 0 &&(static_cast <bool> (GA->getTargetFlags() == 0 &&
"Unexpected target flags on generic GlobalAddressSDNode") ? void
(0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1448, __extension__ __PRETTY_FUNCTION__))
1448 "Unexpected target flags on generic GlobalAddressSDNode")(static_cast <bool> (GA->getTargetFlags() == 0 &&
"Unexpected target flags on generic GlobalAddressSDNode") ? void
(0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1448, __extension__ __PRETTY_FUNCTION__))
;
1449 if (!WebAssembly::isValidAddressSpace(GA->getAddressSpace()))
1450 fail(DL, DAG, "Invalid address space for WebAssembly target");
1451
1452 unsigned OperandFlags = 0;
1453 if (isPositionIndependent()) {
1454 const GlobalValue *GV = GA->getGlobal();
1455 if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1456 MachineFunction &MF = DAG.getMachineFunction();
1457 MVT PtrVT = getPointerTy(MF.getDataLayout());
1458 const char *BaseName;
1459 if (GV->getValueType()->isFunctionTy()) {
1460 BaseName = MF.createExternalSymbolName("__table_base");
1461 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1462 }
1463 else {
1464 BaseName = MF.createExternalSymbolName("__memory_base");
1465 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1466 }
1467 SDValue BaseAddr =
1468 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1469 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1470
1471 SDValue SymAddr = DAG.getNode(
1472 WebAssemblyISD::WrapperPIC, DL, VT,
1473 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1474 OperandFlags));
1475
1476 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1477 } else {
1478 OperandFlags = WebAssemblyII::MO_GOT;
1479 }
1480 }
1481
1482 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1483 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1484 GA->getOffset(), OperandFlags));
1485}
1486
1487SDValue
1488WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1489 SelectionDAG &DAG) const {
1490 SDLoc DL(Op);
1491 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1492 EVT VT = Op.getValueType();
1493 assert(ES->getTargetFlags() == 0 &&(static_cast <bool> (ES->getTargetFlags() == 0 &&
"Unexpected target flags on generic ExternalSymbolSDNode") ?
void (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1494, __extension__ __PRETTY_FUNCTION__))
1494 "Unexpected target flags on generic ExternalSymbolSDNode")(static_cast <bool> (ES->getTargetFlags() == 0 &&
"Unexpected target flags on generic ExternalSymbolSDNode") ?
void (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1494, __extension__ __PRETTY_FUNCTION__))
;
1495 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1496 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1497}
1498
1499SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1500 SelectionDAG &DAG) const {
1501 // There's no need for a Wrapper node because we always incorporate a jump
1502 // table operand into a BR_TABLE instruction, rather than ever
1503 // materializing it in a register.
1504 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1505 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1506 JT->getTargetFlags());
1507}
1508
1509SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1510 SelectionDAG &DAG) const {
1511 SDLoc DL(Op);
1512 SDValue Chain = Op.getOperand(0);
1513 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1514 SDValue Index = Op.getOperand(2);
1515 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags")(static_cast <bool> (JT->getTargetFlags() == 0 &&
"WebAssembly doesn't set target flags") ? void (0) : __assert_fail
("JT->getTargetFlags() == 0 && \"WebAssembly doesn't set target flags\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1515, __extension__ __PRETTY_FUNCTION__))
;
1516
1517 SmallVector<SDValue, 8> Ops;
1518 Ops.push_back(Chain);
1519 Ops.push_back(Index);
1520
1521 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1522 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1523
1524 // Add an operand for each case.
1525 for (auto MBB : MBBs)
1526 Ops.push_back(DAG.getBasicBlock(MBB));
1527
1528 // Add the first MBB as a dummy default target for now. This will be replaced
1529 // with the proper default target (and the preceding range check eliminated)
1530 // if possible by WebAssemblyFixBrTableDefaults.
1531 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1532 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1533}
1534
1535SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1536 SelectionDAG &DAG) const {
1537 SDLoc DL(Op);
1538 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1539
1540 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1541 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1542
1543 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1544 MFI->getVarargBufferVreg(), PtrVT);
1545 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1546 MachinePointerInfo(SV));
1547}
1548
1549static SDValue getCppExceptionSymNode(SDValue Op, unsigned TagIndex,
1550 SelectionDAG &DAG) {
1551 // We only support C++ exceptions for now
1552 int Tag =
1553 cast<ConstantSDNode>(Op.getOperand(TagIndex).getNode())->getZExtValue();
1554 if (Tag != WebAssembly::CPP_EXCEPTION)
1555 llvm_unreachable("Invalid tag: We only support C++ exceptions for now")::llvm::llvm_unreachable_internal("Invalid tag: We only support C++ exceptions for now"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1555)
;
1556 auto &MF = DAG.getMachineFunction();
1557 const auto &TLI = DAG.getTargetLoweringInfo();
1558 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1559 const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1560 return DAG.getNode(WebAssemblyISD::Wrapper, SDLoc(Op), PtrVT,
1561 DAG.getTargetExternalSymbol(SymName, PtrVT));
1562}
1563
1564SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1565 SelectionDAG &DAG) const {
1566 MachineFunction &MF = DAG.getMachineFunction();
1567 unsigned IntNo;
1568 switch (Op.getOpcode()) {
1569 case ISD::INTRINSIC_VOID:
1570 case ISD::INTRINSIC_W_CHAIN:
1571 IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1572 break;
1573 case ISD::INTRINSIC_WO_CHAIN:
1574 IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1575 break;
1576 default:
1577 llvm_unreachable("Invalid intrinsic")::llvm::llvm_unreachable_internal("Invalid intrinsic", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1577)
;
1578 }
1579 SDLoc DL(Op);
1580
1581 switch (IntNo) {
1582 default:
1583 return SDValue(); // Don't custom lower most intrinsics.
1584
1585 case Intrinsic::wasm_lsda: {
1586 EVT VT = Op.getValueType();
1587 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1588 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1589 auto &Context = MF.getMMI().getContext();
1590 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1591 Twine(MF.getFunctionNumber()));
1592 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1593 DAG.getMCSymbol(S, PtrVT));
1594 }
1595
1596 case Intrinsic::wasm_throw: {
1597 SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1598 return DAG.getNode(WebAssemblyISD::THROW, DL,
1599 MVT::Other, // outchain type
1600 {
1601 Op.getOperand(0), // inchain
1602 SymNode, // exception symbol
1603 Op.getOperand(3) // thrown value
1604 });
1605 }
1606
1607 case Intrinsic::wasm_catch: {
1608 SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1609 return DAG.getNode(WebAssemblyISD::CATCH, DL,
1610 {
1611 MVT::i32, // outchain type
1612 MVT::Other // return value
1613 },
1614 {
1615 Op.getOperand(0), // inchain
1616 SymNode // exception symbol
1617 });
1618 }
1619
1620 case Intrinsic::wasm_shuffle: {
1621 // Drop in-chain and replace undefs, but otherwise pass through unchanged
1622 SDValue Ops[18];
1623 size_t OpIdx = 0;
1624 Ops[OpIdx++] = Op.getOperand(1);
1625 Ops[OpIdx++] = Op.getOperand(2);
1626 while (OpIdx < 18) {
1627 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1628 if (MaskIdx.isUndef() ||
1629 cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
1630 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32);
1631 } else {
1632 Ops[OpIdx++] = MaskIdx;
1633 }
1634 }
1635 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1636 }
1637 }
1638}
1639
1640SDValue
1641WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1642 SelectionDAG &DAG) const {
1643 SDLoc DL(Op);
1644 // If sign extension operations are disabled, allow sext_inreg only if operand
1645 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1646 // extension operations, but allowing sext_inreg in this context lets us have
1647 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1648 // everywhere would be simpler in this file, but would necessitate large and
1649 // brittle patterns to undo the expansion and select extract_lane_s
1650 // instructions.
1651 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128())(static_cast <bool> (!Subtarget->hasSignExt() &&
Subtarget->hasSIMD128()) ? void (0) : __assert_fail ("!Subtarget->hasSignExt() && Subtarget->hasSIMD128()"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1651, __extension__ __PRETTY_FUNCTION__))
;
1652 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1653 return SDValue();
1654
1655 const SDValue &Extract = Op.getOperand(0);
1656 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1657 if (VecT.getVectorElementType().getSizeInBits() > 32)
1658 return SDValue();
1659 MVT ExtractedLaneT =
1660 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1661 MVT ExtractedVecT =
1662 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1663 if (ExtractedVecT == VecT)
1664 return Op;
1665
1666 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1667 const SDNode *Index = Extract.getOperand(1).getNode();
1668 if (!isa<ConstantSDNode>(Index))
1669 return SDValue();
1670 unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
1671 unsigned Scale =
1672 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1673 assert(Scale > 1)(static_cast <bool> (Scale > 1) ? void (0) : __assert_fail
("Scale > 1", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1673, __extension__ __PRETTY_FUNCTION__))
;
1674 SDValue NewIndex =
1675 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1676 SDValue NewExtract = DAG.getNode(
1677 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1678 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1679 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1680 Op.getOperand(1));
1681}
1682
1683SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1684 SelectionDAG &DAG) const {
1685 SDLoc DL(Op);
1686 const EVT VecT = Op.getValueType();
1687 const EVT LaneT = Op.getOperand(0).getValueType();
1688 const size_t Lanes = Op.getNumOperands();
1689 bool CanSwizzle = VecT == MVT::v16i8;
1690
1691 // BUILD_VECTORs are lowered to the instruction that initializes the highest
1692 // possible number of lanes at once followed by a sequence of replace_lane
1693 // instructions to individually initialize any remaining lanes.
1694
1695 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
1696 // swizzled lanes should be given greater weight.
1697
1698 // TODO: Investigate looping rather than always extracting/replacing specific
1699 // lanes to fill gaps.
1700
1701 auto IsConstant = [](const SDValue &V) {
1702 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1703 };
1704
1705 // Returns the source vector and index vector pair if they exist. Checks for:
1706 // (extract_vector_elt
1707 // $src,
1708 // (sign_extend_inreg (extract_vector_elt $indices, $i))
1709 // )
1710 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
1711 auto Bail = std::make_pair(SDValue(), SDValue());
1712 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1713 return Bail;
1714 const SDValue &SwizzleSrc = Lane->getOperand(0);
1715 const SDValue &IndexExt = Lane->getOperand(1);
1716 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
1717 return Bail;
1718 const SDValue &Index = IndexExt->getOperand(0);
1719 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1720 return Bail;
1721 const SDValue &SwizzleIndices = Index->getOperand(0);
1722 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
1723 SwizzleIndices.getValueType() != MVT::v16i8 ||
1724 Index->getOperand(1)->getOpcode() != ISD::Constant ||
1725 Index->getConstantOperandVal(1) != I)
1726 return Bail;
1727 return std::make_pair(SwizzleSrc, SwizzleIndices);
1728 };
1729
1730 // If the lane is extracted from another vector at a constant index, return
1731 // that vector. The source vector must not have more lanes than the dest
1732 // because the shufflevector indices are in terms of the destination lanes and
1733 // would not be able to address the smaller individual source lanes.
1734 auto GetShuffleSrc = [&](const SDValue &Lane) {
1735 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1736 return SDValue();
1737 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
1738 return SDValue();
1739 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
1740 VecT.getVectorNumElements())
1741 return SDValue();
1742 return Lane->getOperand(0);
1743 };
1744
1745 using ValueEntry = std::pair<SDValue, size_t>;
1746 SmallVector<ValueEntry, 16> SplatValueCounts;
1747
1748 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
1749 SmallVector<SwizzleEntry, 16> SwizzleCounts;
1750
1751 using ShuffleEntry = std::pair<SDValue, size_t>;
1752 SmallVector<ShuffleEntry, 16> ShuffleCounts;
1753
1754 auto AddCount = [](auto &Counts, const auto &Val) {
1755 auto CountIt =
1756 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
1757 if (CountIt == Counts.end()) {
1758 Counts.emplace_back(Val, 1);
1759 } else {
1760 CountIt->second++;
1761 }
1762 };
1763
1764 auto GetMostCommon = [](auto &Counts) {
1765 auto CommonIt =
1766 std::max_element(Counts.begin(), Counts.end(),
1767 [](auto A, auto B) { return A.second < B.second; });
1768 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector")(static_cast <bool> (CommonIt != Counts.end() &&
"Unexpected all-undef build_vector") ? void (0) : __assert_fail
("CommonIt != Counts.end() && \"Unexpected all-undef build_vector\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1768, __extension__ __PRETTY_FUNCTION__))
;
1769 return *CommonIt;
1770 };
1771
1772 size_t NumConstantLanes = 0;
1773
1774 // Count eligible lanes for each type of vector creation op
1775 for (size_t I = 0; I
2.1
'I' is < 'Lanes'
2.1
'I' is < 'Lanes'
2.1
'I' is < 'Lanes'
< Lanes
; ++I) {
3
Loop condition is true. Entering loop body
9
Assuming 'I' is >= 'Lanes'
10
Loop condition is false. Execution continues on line 1793
1776 const SDValue &Lane = Op->getOperand(I);
1777 if (Lane.isUndef())
4
Taking false branch
1778 continue;
1779
1780 AddCount(SplatValueCounts, Lane);
1781
1782 if (IsConstant(Lane))
5
Taking false branch
1783 NumConstantLanes++;
1784 if (auto ShuffleSrc = GetShuffleSrc(Lane))
6
Taking false branch
1785 AddCount(ShuffleCounts, ShuffleSrc);
1786 if (CanSwizzle
6.1
'CanSwizzle' is true
6.1
'CanSwizzle' is true
6.1
'CanSwizzle' is true
) {
7
Taking true branch
1787 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
1788 if (SwizzleSrcs.first)
8
Taking false branch
1789 AddCount(SwizzleCounts, SwizzleSrcs);
1790 }
1791 }
1792
1793 SDValue SplatValue;
1794 size_t NumSplatLanes;
1795 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
1796
1797 SDValue SwizzleSrc;
1798 SDValue SwizzleIndices;
1799 size_t NumSwizzleLanes = 0;
1800 if (SwizzleCounts.size())
11
Assuming the condition is false
12
Taking false branch
1801 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
1802 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
1803
1804 // Shuffles can draw from up to two vectors, so find the two most common
1805 // sources.
1806 SDValue ShuffleSrc1, ShuffleSrc2;
1807 size_t NumShuffleLanes = 0;
1808 if (ShuffleCounts.size()) {
13
Assuming the condition is false
14
Taking false branch
1809 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
1810 ShuffleCounts.erase(std::remove_if(ShuffleCounts.begin(),
1811 ShuffleCounts.end(),
1812 [&](const auto &Pair) {
1813 return Pair.first == ShuffleSrc1;
1814 }),
1815 ShuffleCounts.end());
1816 }
1817 if (ShuffleCounts.size()) {
15
Taking false branch
1818 size_t AdditionalShuffleLanes;
1819 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
1820 GetMostCommon(ShuffleCounts);
1821 NumShuffleLanes += AdditionalShuffleLanes;
1822 }
1823
1824 // Predicate returning true if the lane is properly initialized by the
1825 // original instruction
1826 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
1827 SDValue Result;
1828 // Prefer swizzles over shuffles over vector consts over splats
1829 if (NumSwizzleLanes
15.1
'NumSwizzleLanes' is >= 'NumShuffleLanes'
15.1
'NumSwizzleLanes' is >= 'NumShuffleLanes'
15.1
'NumSwizzleLanes' is >= 'NumShuffleLanes'
>= NumShuffleLanes &&
17
Taking false branch
1830 NumSwizzleLanes
15.2
'NumSwizzleLanes' is >= 'NumConstantLanes'
15.2
'NumSwizzleLanes' is >= 'NumConstantLanes'
15.2
'NumSwizzleLanes' is >= 'NumConstantLanes'
>= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
16
Assuming 'NumSwizzleLanes' is < 'NumSplatLanes'
1831 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
1832 SwizzleIndices);
1833 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
1834 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
1835 return Swizzled == GetSwizzleSrcs(I, Lane);
1836 };
1837 } else if (NumShuffleLanes
17.1
'NumShuffleLanes' is >= 'NumConstantLanes'
17.1
'NumShuffleLanes' is >= 'NumConstantLanes'
17.1
'NumShuffleLanes' is >= 'NumConstantLanes'
>= NumConstantLanes &&
18
Taking false branch
1838 NumShuffleLanes
17.2
'NumShuffleLanes' is < 'NumSplatLanes'
17.2
'NumShuffleLanes' is < 'NumSplatLanes'
17.2
'NumShuffleLanes' is < 'NumSplatLanes'
>= NumSplatLanes) {
1839 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
1840 size_t DestLaneCount = VecT.getVectorNumElements();
1841 size_t Scale1 = 1;
1842 size_t Scale2 = 1;
1843 SDValue Src1 = ShuffleSrc1;
1844 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
1845 if (Src1.getValueType() != VecT) {
1846 size_t LaneSize =
1847 Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1848 assert(LaneSize > DestLaneSize)(static_cast <bool> (LaneSize > DestLaneSize) ? void
(0) : __assert_fail ("LaneSize > DestLaneSize", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1848, __extension__ __PRETTY_FUNCTION__))
;
1849 Scale1 = LaneSize / DestLaneSize;
1850 Src1 = DAG.getBitcast(VecT, Src1);
1851 }
1852 if (Src2.getValueType() != VecT) {
1853 size_t LaneSize =
1854 Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1855 assert(LaneSize > DestLaneSize)(static_cast <bool> (LaneSize > DestLaneSize) ? void
(0) : __assert_fail ("LaneSize > DestLaneSize", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1855, __extension__ __PRETTY_FUNCTION__))
;
1856 Scale2 = LaneSize / DestLaneSize;
1857 Src2 = DAG.getBitcast(VecT, Src2);
1858 }
1859
1860 int Mask[16];
1861 assert(DestLaneCount <= 16)(static_cast <bool> (DestLaneCount <= 16) ? void (0)
: __assert_fail ("DestLaneCount <= 16", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1861, __extension__ __PRETTY_FUNCTION__))
;
1862 for (size_t I = 0; I < DestLaneCount; ++I) {
1863 const SDValue &Lane = Op->getOperand(I);
1864 SDValue Src = GetShuffleSrc(Lane);
1865 if (Src == ShuffleSrc1) {
1866 Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
1867 } else if (Src && Src == ShuffleSrc2) {
1868 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
1869 } else {
1870 Mask[I] = -1;
1871 }
1872 }
1873 ArrayRef<int> MaskRef(Mask, DestLaneCount);
1874 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
1875 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
1876 auto Src = GetShuffleSrc(Lane);
1877 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
1878 };
1879 } else if (NumConstantLanes
18.1
'NumConstantLanes' is < 'NumSplatLanes'
18.1
'NumConstantLanes' is < 'NumSplatLanes'
18.1
'NumConstantLanes' is < 'NumSplatLanes'
>= NumSplatLanes) {
19
Taking false branch
1880 SmallVector<SDValue, 16> ConstLanes;
1881 for (const SDValue &Lane : Op->op_values()) {
1882 if (IsConstant(Lane)) {
1883 ConstLanes.push_back(Lane);
1884 } else if (LaneT.isFloatingPoint()) {
1885 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1886 } else {
1887 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1888 }
1889 }
1890 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1891 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
1892 return IsConstant(Lane);
1893 };
1894 } else {
1895 // Use a splat, but possibly a load_splat
1896 LoadSDNode *SplattedLoad;
1897 if ((SplattedLoad = dyn_cast<LoadSDNode>(SplatValue)) &&
20
Assuming 'SplattedLoad' is null
21
Assuming pointer value is null
22
Taking false branch
1898 SplattedLoad->getMemoryVT() == VecT.getVectorElementType()) {
1899 Result = DAG.getMemIntrinsicNode(
1900 WebAssemblyISD::LOAD_SPLAT, DL, DAG.getVTList(VecT),
1901 {SplattedLoad->getChain(), SplattedLoad->getBasePtr(),
1902 SplattedLoad->getOffset()},
1903 SplattedLoad->getMemoryVT(), SplattedLoad->getMemOperand());
1904 } else {
1905 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
23
The value of 'SplatValue' is assigned to 'Op.Node'
24
Calling 'SelectionDAG::getSplatBuildVector'
1906 }
1907 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
1908 return Lane == SplatValue;
1909 };
1910 }
1911
1912 assert(Result)(static_cast <bool> (Result) ? void (0) : __assert_fail
("Result", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1912, __extension__ __PRETTY_FUNCTION__))
;
1913 assert(IsLaneConstructed)(static_cast <bool> (IsLaneConstructed) ? void (0) : __assert_fail
("IsLaneConstructed", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1913, __extension__ __PRETTY_FUNCTION__))
;
1914
1915 // Add replace_lane instructions for any unhandled values
1916 for (size_t I = 0; I < Lanes; ++I) {
1917 const SDValue &Lane = Op->getOperand(I);
1918 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
1919 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1920 DAG.getConstant(I, DL, MVT::i32));
1921 }
1922
1923 return Result;
1924}
1925
1926SDValue
1927WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1928 SelectionDAG &DAG) const {
1929 SDLoc DL(Op);
1930 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1931 MVT VecType = Op.getOperand(0).getSimpleValueType();
1932 assert(VecType.is128BitVector() && "Unexpected shuffle vector type")(static_cast <bool> (VecType.is128BitVector() &&
"Unexpected shuffle vector type") ? void (0) : __assert_fail
("VecType.is128BitVector() && \"Unexpected shuffle vector type\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1932, __extension__ __PRETTY_FUNCTION__))
;
1933 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1934
1935 // Space for two vector args and sixteen mask indices
1936 SDValue Ops[18];
1937 size_t OpIdx = 0;
1938 Ops[OpIdx++] = Op.getOperand(0);
1939 Ops[OpIdx++] = Op.getOperand(1);
1940
1941 // Expand mask indices to byte indices and materialize them as operands
1942 for (int M : Mask) {
1943 for (size_t J = 0; J < LaneBytes; ++J) {
1944 // Lower undefs (represented by -1 in mask) to zero
1945 uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1946 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1947 }
1948 }
1949
1950 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1951}
1952
1953SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
1954 SelectionDAG &DAG) const {
1955 SDLoc DL(Op);
1956 // The legalizer does not know how to expand the unsupported comparison modes
1957 // of i64x2 vectors, so we manually unroll them here.
1958 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64)(static_cast <bool> (Op->getOperand(0)->getSimpleValueType
(0) == MVT::v2i64) ? void (0) : __assert_fail ("Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1958, __extension__ __PRETTY_FUNCTION__))
;
1959 SmallVector<SDValue, 2> LHS, RHS;
1960 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
1961 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
1962 const SDValue &CC = Op->getOperand(2);
1963 auto MakeLane = [&](unsigned I) {
1964 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
1965 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
1966 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
1967 };
1968 return DAG.getBuildVector(Op->getValueType(0), DL,
1969 {MakeLane(0), MakeLane(1)});
1970}
1971
1972SDValue
1973WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1974 SelectionDAG &DAG) const {
1975 // Allow constant lane indices, expand variable lane indices
1976 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1977 if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1978 return Op;
1979 else
1980 // Perform default expansion
1981 return SDValue();
1982}
1983
1984static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
1985 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
1986 // 32-bit and 64-bit unrolled shifts will have proper semantics
1987 if (LaneT.bitsGE(MVT::i32))
1988 return DAG.UnrollVectorOp(Op.getNode());
1989 // Otherwise mask the shift value to get proper semantics from 32-bit shift
1990 SDLoc DL(Op);
1991 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
1992 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
1993 unsigned ShiftOpcode = Op.getOpcode();
1994 SmallVector<SDValue, 16> ShiftedElements;
1995 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
1996 SmallVector<SDValue, 16> ShiftElements;
1997 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
1998 SmallVector<SDValue, 16> UnrolledOps;
1999 for (size_t i = 0; i < NumLanes; ++i) {
2000 SDValue MaskedShiftValue =
2001 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
2002 SDValue ShiftedValue = ShiftedElements[i];
2003 if (ShiftOpcode == ISD::SRA)
2004 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
2005 ShiftedValue, DAG.getValueType(LaneT));
2006 UnrolledOps.push_back(
2007 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2008 }
2009 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
2010}
2011
2012SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
2013 SelectionDAG &DAG) const {
2014 SDLoc DL(Op);
2015
2016 // Only manually lower vector shifts
2017 assert(Op.getSimpleValueType().isVector())(static_cast <bool> (Op.getSimpleValueType().isVector()
) ? void (0) : __assert_fail ("Op.getSimpleValueType().isVector()"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2017, __extension__ __PRETTY_FUNCTION__))
;
2018
2019 auto ShiftVal = DAG.getSplatValue(Op.getOperand(1));
2020 if (!ShiftVal)
2021 return unrollVectorShift(Op, DAG);
2022
2023 // Use anyext because none of the high bits can affect the shift
2024 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
2025
2026 unsigned Opcode;
2027 switch (Op.getOpcode()) {
2028 case ISD::SHL:
2029 Opcode = WebAssemblyISD::VEC_SHL;
2030 break;
2031 case ISD::SRA:
2032 Opcode = WebAssemblyISD::VEC_SHR_S;
2033 break;
2034 case ISD::SRL:
2035 Opcode = WebAssemblyISD::VEC_SHR_U;
2036 break;
2037 default:
2038 llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2038)
;
2039 }
2040
2041 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
2042}
2043
2044SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2045 SelectionDAG &DAG) const {
2046 SDLoc DL(Op);
2047 EVT ResT = Op.getValueType();
2048 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2049
2050 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2051 (SatVT == MVT::i32 || SatVT == MVT::i64))
2052 return Op;
2053
2054 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2055 return Op;
2056
2057 return SDValue();
2058}
2059
2060//===----------------------------------------------------------------------===//
2061// Custom DAG combine hooks
2062//===----------------------------------------------------------------------===//
2063static SDValue
2064performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2065 auto &DAG = DCI.DAG;
2066 auto Shuffle = cast<ShuffleVectorSDNode>(N);
2067
2068 // Hoist vector bitcasts that don't change the number of lanes out of unary
2069 // shuffles, where they are less likely to get in the way of other combines.
2070 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
2071 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
2072 SDValue Bitcast = N->getOperand(0);
2073 if (Bitcast.getOpcode() != ISD::BITCAST)
2074 return SDValue();
2075 if (!N->getOperand(1).isUndef())
2076 return SDValue();
2077 SDValue CastOp = Bitcast.getOperand(0);
2078 MVT SrcType = CastOp.getSimpleValueType();
2079 MVT DstType = Bitcast.getSimpleValueType();
2080 if (!SrcType.is128BitVector() ||
2081 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2082 return SDValue();
2083 SDValue NewShuffle = DAG.getVectorShuffle(
2084 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
2085 return DAG.getBitcast(DstType, NewShuffle);
2086}
2087
2088static SDValue
2089performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2090 auto &DAG = DCI.DAG;
2091 assert(N->getOpcode() == ISD::SIGN_EXTEND ||(static_cast <bool> (N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) ? void (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2092, __extension__ __PRETTY_FUNCTION__))
2092 N->getOpcode() == ISD::ZERO_EXTEND)(static_cast <bool> (N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) ? void (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2092, __extension__ __PRETTY_FUNCTION__))
;
2093
2094 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
2095 // possible before the extract_subvector can be expanded.
2096 auto Extract = N->getOperand(0);
2097 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2098 return SDValue();
2099 auto Source = Extract.getOperand(0);
2100 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2101 if (IndexNode == nullptr)
2102 return SDValue();
2103 auto Index = IndexNode->getZExtValue();
2104
2105 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
2106 // extracted subvector is the low or high half of its source.
2107 EVT ResVT = N->getValueType(0);
2108 if (ResVT == MVT::v8i16) {
2109 if (Extract.getValueType() != MVT::v8i8 ||
2110 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2111 return SDValue();
2112 } else if (ResVT == MVT::v4i32) {
2113 if (Extract.getValueType() != MVT::v4i16 ||
2114 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2115 return SDValue();
2116 } else if (ResVT == MVT::v2i64) {
2117 if (Extract.getValueType() != MVT::v2i32 ||
2118 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2119 return SDValue();
2120 } else {
2121 return SDValue();
2122 }
2123
2124 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
2125 bool IsLow = Index == 0;
2126
2127 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2128 : WebAssemblyISD::EXTEND_HIGH_S)
2129 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2130 : WebAssemblyISD::EXTEND_HIGH_U);
2131
2132 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2133}
2134
2135static SDValue
2136performVectorConvertLowCombine(SDNode *N,
2137 TargetLowering::DAGCombinerInfo &DCI) {
2138 auto &DAG = DCI.DAG;
2139
2140 EVT ResVT = N->getValueType(0);
2141 if (ResVT != MVT::v2f64)
2142 return SDValue();
2143
2144 auto GetWasmConversionOp = [](unsigned Op) {
2145 switch (Op) {
2146 case ISD::SINT_TO_FP:
2147 return WebAssemblyISD::CONVERT_LOW_S;
2148 case ISD::UINT_TO_FP:
2149 return WebAssemblyISD::CONVERT_LOW_U;
2150 case ISD::FP_EXTEND:
2151 return WebAssemblyISD::PROMOTE_LOW;
2152 }
2153 llvm_unreachable("unexpected op")::llvm::llvm_unreachable_internal("unexpected op", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2153)
;
2154 };
2155
2156 if (N->getOpcode() == ISD::EXTRACT_SUBVECTOR) {
2157 // Combine this:
2158 //
2159 // (v2f64 (extract_subvector
2160 // (v4f64 ({s,u}int_to_fp (v4i32 $x))), 0))
2161 //
2162 // into (f64x2.convert_low_i32x4_{s,u} $x).
2163 //
2164 // Or this:
2165 //
2166 // (v2f64 (extract_subvector
2167 // (v4f64 (fp_extend (v4f32 $x))), 0))
2168 //
2169 // into (f64x2.promote_low_f32x4 $x).
2170 auto Conversion = N->getOperand(0);
2171 auto ConversionOp = Conversion.getOpcode();
2172 MVT ExpectedSourceType;
2173 switch (ConversionOp) {
2174 case ISD::SINT_TO_FP:
2175 case ISD::UINT_TO_FP:
2176 ExpectedSourceType = MVT::v4i32;
2177 break;
2178 case ISD::FP_EXTEND:
2179 ExpectedSourceType = MVT::v4f32;
2180 break;
2181 default:
2182 return SDValue();
2183 }
2184
2185 if (Conversion.getValueType() != MVT::v4f64)
2186 return SDValue();
2187
2188 auto Source = Conversion.getOperand(0);
2189 if (Source.getValueType() != ExpectedSourceType)
2190 return SDValue();
2191
2192 auto IndexNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
2193 if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
2194 return SDValue();
2195
2196 auto Op = GetWasmConversionOp(ConversionOp);
2197 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2198 }
2199
2200 // Combine this:
2201 //
2202 // (v2f64 ({s,u}int_to_fp
2203 // (v2i32 (extract_subvector (v4i32 $x), 0))))
2204 //
2205 // into (f64x2.convert_low_i32x4_{s,u} $x).
2206 //
2207 // Or this:
2208 //
2209 // (v2f64 (fp_extend
2210 // (v2f32 (extract_subvector (v4f32 $x), 0))))
2211 //
2212 // into (f64x2.promote_low_f32x4 $x).
2213 auto ConversionOp = N->getOpcode();
2214 MVT ExpectedExtractType;
2215 MVT ExpectedSourceType;
2216 switch (ConversionOp) {
2217 case ISD::SINT_TO_FP:
2218 case ISD::UINT_TO_FP:
2219 ExpectedExtractType = MVT::v2i32;
2220 ExpectedSourceType = MVT::v4i32;
2221 break;
2222 case ISD::FP_EXTEND:
2223 ExpectedExtractType = MVT::v2f32;
2224 ExpectedSourceType = MVT::v4f32;
2225 break;
2226 default:
2227 llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2227)
;
2228 }
2229
2230 auto Extract = N->getOperand(0);
2231 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2232 return SDValue();
2233
2234 if (Extract.getValueType() != ExpectedExtractType)
2235 return SDValue();
2236
2237 auto Source = Extract.getOperand(0);
2238 if (Source.getValueType() != ExpectedSourceType)
2239 return SDValue();
2240
2241 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2242 if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
2243 return SDValue();
2244
2245 unsigned Op = GetWasmConversionOp(ConversionOp);
2246 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2247}
2248
2249static SDValue
2250performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2251 auto &DAG = DCI.DAG;
2252
2253 auto GetWasmConversionOp = [](unsigned Op) {
2254 switch (Op) {
2255 case ISD::FP_TO_SINT_SAT:
2256 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
2257 case ISD::FP_TO_UINT_SAT:
2258 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
2259 case ISD::FP_ROUND:
2260 return WebAssemblyISD::DEMOTE_ZERO;
2261 }
2262 llvm_unreachable("unexpected op")::llvm::llvm_unreachable_internal("unexpected op", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2262)
;
2263 };
2264
2265 auto IsZeroSplat = [](SDValue SplatVal) {
2266 auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
2267 APInt SplatValue, SplatUndef;
2268 unsigned SplatBitSize;
2269 bool HasAnyUndefs;
2270 return Splat &&
2271 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2272 HasAnyUndefs) &&
2273 SplatValue == 0;
2274 };
2275
2276 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
2277 // Combine this:
2278 //
2279 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
2280 //
2281 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2282 //
2283 // Or this:
2284 //
2285 // (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0)))
2286 //
2287 // into (f32x4.demote_zero_f64x2 $x).
2288 EVT ResVT;
2289 EVT ExpectedConversionType;
2290 auto Conversion = N->getOperand(0);
2291 auto ConversionOp = Conversion.getOpcode();
2292 switch (ConversionOp) {
2293 case ISD::FP_TO_SINT_SAT:
2294 case ISD::FP_TO_UINT_SAT:
2295 ResVT = MVT::v4i32;
2296 ExpectedConversionType = MVT::v2i32;
2297 break;
2298 case ISD::FP_ROUND:
2299 ResVT = MVT::v4f32;
2300 ExpectedConversionType = MVT::v2f32;
2301 break;
2302 default:
2303 return SDValue();
2304 }
2305
2306 if (N->getValueType(0) != ResVT)
2307 return SDValue();
2308
2309 if (Conversion.getValueType() != ExpectedConversionType)
2310 return SDValue();
2311
2312 auto Source = Conversion.getOperand(0);
2313 if (Source.getValueType() != MVT::v2f64)
2314 return SDValue();
2315
2316 if (!IsZeroSplat(N->getOperand(1)) ||
2317 N->getOperand(1).getValueType() != ExpectedConversionType)
2318 return SDValue();
2319
2320 unsigned Op = GetWasmConversionOp(ConversionOp);
2321 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2322 }
2323
2324 // Combine this:
2325 //
2326 // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32)
2327 //
2328 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2329 //
2330 // Or this:
2331 //
2332 // (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0)))))
2333 //
2334 // into (f32x4.demote_zero_f64x2 $x).
2335 EVT ResVT;
2336 auto ConversionOp = N->getOpcode();
2337 switch (ConversionOp) {
2338 case ISD::FP_TO_SINT_SAT:
2339 case ISD::FP_TO_UINT_SAT:
2340 ResVT = MVT::v4i32;
2341 break;
2342 case ISD::FP_ROUND:
2343 ResVT = MVT::v4f32;
2344 break;
2345 default:
2346 llvm_unreachable("unexpected op")::llvm::llvm_unreachable_internal("unexpected op", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2346)
;
2347 }
2348
2349 if (N->getValueType(0) != ResVT)
2350 return SDValue();
2351
2352 auto Concat = N->getOperand(0);
2353 if (Concat.getValueType() != MVT::v4f64)
2354 return SDValue();
2355
2356 auto Source = Concat.getOperand(0);
2357 if (Source.getValueType() != MVT::v2f64)
2358 return SDValue();
2359
2360 if (!IsZeroSplat(Concat.getOperand(1)) ||
2361 Concat.getOperand(1).getValueType() != MVT::v2f64)
2362 return SDValue();
2363
2364 unsigned Op = GetWasmConversionOp(ConversionOp);
2365 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2366}
2367
2368SDValue
2369WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
2370 DAGCombinerInfo &DCI) const {
2371 switch (N->getOpcode()) {
2372 default:
2373 return SDValue();
2374 case ISD::VECTOR_SHUFFLE:
2375 return performVECTOR_SHUFFLECombine(N, DCI);
2376 case ISD::SIGN_EXTEND:
2377 case ISD::ZERO_EXTEND:
2378 return performVectorExtendCombine(N, DCI);
2379 case ISD::SINT_TO_FP:
2380 case ISD::UINT_TO_FP:
2381 case ISD::FP_EXTEND:
2382 case ISD::EXTRACT_SUBVECTOR:
2383 return performVectorConvertLowCombine(N, DCI);
2384 case ISD::FP_TO_SINT_SAT:
2385 case ISD::FP_TO_UINT_SAT:
2386 case ISD::FP_ROUND:
2387 case ISD::CONCAT_VECTORS:
2388 return performVectorTruncZeroCombine(N, DCI);
2389 }
2390}

/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h

1//===- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SelectionDAG class, and transitively defines the
10// SDNode class and subclasses.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CODEGEN_SELECTIONDAG_H
15#define LLVM_CODEGEN_SELECTIONDAG_H
16
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
21#include "llvm/ADT/DenseSet.h"
22#include "llvm/ADT/FoldingSet.h"
23#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/StringMap.h"
26#include "llvm/ADT/ilist.h"
27#include "llvm/ADT/iterator.h"
28#include "llvm/ADT/iterator_range.h"
29#include "llvm/CodeGen/DAGCombine.h"
30#include "llvm/CodeGen/ISDOpcodes.h"
31#include "llvm/CodeGen/MachineFunction.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/SelectionDAGNodes.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/DebugLoc.h"
36#include "llvm/IR/Instructions.h"
37#include "llvm/IR/Metadata.h"
38#include "llvm/Support/Allocator.h"
39#include "llvm/Support/ArrayRecycler.h"
40#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/CodeGen.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MachineValueType.h"
45#include "llvm/Support/RecyclingAllocator.h"
46#include <algorithm>
47#include <cassert>
48#include <cstdint>
49#include <functional>
50#include <map>
51#include <string>
52#include <tuple>
53#include <utility>
54#include <vector>
55
56namespace llvm {
57
58class AAResults;
59class BlockAddress;
60class BlockFrequencyInfo;
61class Constant;
62class ConstantFP;
63class ConstantInt;
64class DataLayout;
65struct fltSemantics;
66class FunctionLoweringInfo;
67class GlobalValue;
68struct KnownBits;
69class LegacyDivergenceAnalysis;
70class LLVMContext;
71class MachineBasicBlock;
72class MachineConstantPoolValue;
73class MCSymbol;
74class OptimizationRemarkEmitter;
75class ProfileSummaryInfo;
76class SDDbgValue;
77class SDDbgOperand;
78class SDDbgLabel;
79class SelectionDAG;
80class SelectionDAGTargetInfo;
81class TargetLibraryInfo;
82class TargetLowering;
83class TargetMachine;
84class TargetSubtargetInfo;
85class Value;
86
87class SDVTListNode : public FoldingSetNode {
88 friend struct FoldingSetTrait<SDVTListNode>;
89
90 /// A reference to an Interned FoldingSetNodeID for this node.
91 /// The Allocator in SelectionDAG holds the data.
92 /// SDVTList contains all types which are frequently accessed in SelectionDAG.
93 /// The size of this list is not expected to be big so it won't introduce
94 /// a memory penalty.
95 FoldingSetNodeIDRef FastID;
96 const EVT *VTs;
97 unsigned int NumVTs;
98 /// The hash value for SDVTList is fixed, so cache it to avoid
99 /// hash calculation.
100 unsigned HashValue;
101
102public:
103 SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
104 FastID(ID), VTs(VT), NumVTs(Num) {
105 HashValue = ID.ComputeHash();
106 }
107
108 SDVTList getSDVTList() {
109 SDVTList result = {VTs, NumVTs};
110 return result;
111 }
112};
113
114/// Specialize FoldingSetTrait for SDVTListNode
115/// to avoid computing temp FoldingSetNodeID and hash value.
116template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTListNode> {
117 static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
118 ID = X.FastID;
119 }
120
121 static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
122 unsigned IDHash, FoldingSetNodeID &TempID) {
123 if (X.HashValue != IDHash)
124 return false;
125 return ID == X.FastID;
126 }
127
128 static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
129 return X.HashValue;
130 }
131};
132
133template <> struct ilist_alloc_traits<SDNode> {
134 static void deleteNode(SDNode *) {
135 llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!")::llvm::llvm_unreachable_internal("ilist_traits<SDNode> shouldn't see a deleteNode call!"
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 135)
;
136 }
137};
138
139/// Keeps track of dbg_value information through SDISel. We do
140/// not build SDNodes for these so as not to perturb the generated code;
141/// instead the info is kept off to the side in this structure. Each SDNode may
142/// have one or more associated dbg_value entries. This information is kept in
143/// DbgValMap.
144/// Byval parameters are handled separately because they don't use alloca's,
145/// which busts the normal mechanism. There is good reason for handling all
146/// parameters separately: they may not have code generated for them, they
147/// should always go at the beginning of the function regardless of other code
148/// motion, and debug info for them is potentially useful even if the parameter
149/// is unused. Right now only byval parameters are handled separately.
150class SDDbgInfo {
151 BumpPtrAllocator Alloc;
152 SmallVector<SDDbgValue*, 32> DbgValues;
153 SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
154 SmallVector<SDDbgLabel*, 4> DbgLabels;
155 using DbgValMapType = DenseMap<const SDNode *, SmallVector<SDDbgValue *, 2>>;
156 DbgValMapType DbgValMap;
157
158public:
159 SDDbgInfo() = default;
160 SDDbgInfo(const SDDbgInfo &) = delete;
161 SDDbgInfo &operator=(const SDDbgInfo &) = delete;
162
163 void add(SDDbgValue *V, bool isParameter);
164
165 void add(SDDbgLabel *L) { DbgLabels.push_back(L); }
166
167 /// Invalidate all DbgValues attached to the node and remove
168 /// it from the Node-to-DbgValues map.
169 void erase(const SDNode *Node);
170
171 void clear() {
172 DbgValMap.clear();
173 DbgValues.clear();
174 ByvalParmDbgValues.clear();
175 DbgLabels.clear();
176 Alloc.Reset();
177 }
178
179 BumpPtrAllocator &getAlloc() { return Alloc; }
180
181 bool empty() const {
182 return DbgValues.empty() && ByvalParmDbgValues.empty() && DbgLabels.empty();
183 }
184
185 ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) const {
186 auto I = DbgValMap.find(Node);
187 if (I != DbgValMap.end())
188 return I->second;
189 return ArrayRef<SDDbgValue*>();
190 }
191
192 using DbgIterator = SmallVectorImpl<SDDbgValue*>::iterator;
193 using DbgLabelIterator = SmallVectorImpl<SDDbgLabel*>::iterator;
194
195 DbgIterator DbgBegin() { return DbgValues.begin(); }
196 DbgIterator DbgEnd() { return DbgValues.end(); }
197 DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
198 DbgIterator ByvalParmDbgEnd() { return ByvalParmDbgValues.end(); }
199 DbgLabelIterator DbgLabelBegin() { return DbgLabels.begin(); }
200 DbgLabelIterator DbgLabelEnd() { return DbgLabels.end(); }
201};
202
203void checkForCycles(const SelectionDAG *DAG, bool force = false);
204
205/// This is used to represent a portion of an LLVM function in a low-level
206/// Data Dependence DAG representation suitable for instruction selection.
207/// This DAG is constructed as the first step of instruction selection in order
208/// to allow implementation of machine specific optimizations
209/// and code simplifications.
210///
211/// The representation used by the SelectionDAG is a target-independent
212/// representation, which has some similarities to the GCC RTL representation,
213/// but is significantly more simple, powerful, and is a graph form instead of a
214/// linear form.
215///
216class SelectionDAG {
217 const TargetMachine &TM;
218 const SelectionDAGTargetInfo *TSI = nullptr;
219 const TargetLowering *TLI = nullptr;
220 const TargetLibraryInfo *LibInfo = nullptr;
221 MachineFunction *MF;
222 Pass *SDAGISelPass = nullptr;
223 LLVMContext *Context;
224 CodeGenOpt::Level OptLevel;
225
226 LegacyDivergenceAnalysis * DA = nullptr;
227 FunctionLoweringInfo * FLI = nullptr;
228
229 /// The function-level optimization remark emitter. Used to emit remarks
230 /// whenever manipulating the DAG.
231 OptimizationRemarkEmitter *ORE;
232
233 ProfileSummaryInfo *PSI = nullptr;
234 BlockFrequencyInfo *BFI = nullptr;
235
236 /// The starting token.
237 SDNode EntryNode;
238
239 /// The root of the entire DAG.
240 SDValue Root;
241
242 /// A linked list of nodes in the current DAG.
243 ilist<SDNode> AllNodes;
244
245 /// The AllocatorType for allocating SDNodes. We use
246 /// pool allocation with recycling.
247 using NodeAllocatorType = RecyclingAllocator<BumpPtrAllocator, SDNode,
248 sizeof(LargestSDNode),
249 alignof(MostAlignedSDNode)>;
250
251 /// Pool allocation for nodes.
252 NodeAllocatorType NodeAllocator;
253
254 /// This structure is used to memoize nodes, automatically performing
255 /// CSE with existing nodes when a duplicate is requested.
256 FoldingSet<SDNode> CSEMap;
257
258 /// Pool allocation for machine-opcode SDNode operands.
259 BumpPtrAllocator OperandAllocator;
260 ArrayRecycler<SDUse> OperandRecycler;
261
262 /// Pool allocation for misc. objects that are created once per SelectionDAG.
263 BumpPtrAllocator Allocator;
264
265 /// Tracks dbg_value and dbg_label information through SDISel.
266 SDDbgInfo *DbgInfo;
267
268 using CallSiteInfo = MachineFunction::CallSiteInfo;
269 using CallSiteInfoImpl = MachineFunction::CallSiteInfoImpl;
270
271 struct CallSiteDbgInfo {
272 CallSiteInfo CSInfo;
273 MDNode *HeapAllocSite = nullptr;
274 bool NoMerge = false;
275 };
276
277 DenseMap<const SDNode *, CallSiteDbgInfo> SDCallSiteDbgInfo;
278
279 uint16_t NextPersistentId = 0;
280
281public:
282 /// Clients of various APIs that cause global effects on
283 /// the DAG can optionally implement this interface. This allows the clients
284 /// to handle the various sorts of updates that happen.
285 ///
286 /// A DAGUpdateListener automatically registers itself with DAG when it is
287 /// constructed, and removes itself when destroyed in RAII fashion.
288 struct DAGUpdateListener {
289 DAGUpdateListener *const Next;
290 SelectionDAG &DAG;
291
292 explicit DAGUpdateListener(SelectionDAG &D)
293 : Next(D.UpdateListeners), DAG(D) {
294 DAG.UpdateListeners = this;
295 }
296
297 virtual ~DAGUpdateListener() {
298 assert(DAG.UpdateListeners == this &&(static_cast <bool> (DAG.UpdateListeners == this &&
"DAGUpdateListeners must be destroyed in LIFO order") ? void
(0) : __assert_fail ("DAG.UpdateListeners == this && \"DAGUpdateListeners must be destroyed in LIFO order\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 299, __extension__ __PRETTY_FUNCTION__))
299 "DAGUpdateListeners must be destroyed in LIFO order")(static_cast <bool> (DAG.UpdateListeners == this &&
"DAGUpdateListeners must be destroyed in LIFO order") ? void
(0) : __assert_fail ("DAG.UpdateListeners == this && \"DAGUpdateListeners must be destroyed in LIFO order\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 299, __extension__ __PRETTY_FUNCTION__))
;
300 DAG.UpdateListeners = Next;
301 }
302
303 /// The node N that was deleted and, if E is not null, an
304 /// equivalent node E that replaced it.
305 virtual void NodeDeleted(SDNode *N, SDNode *E);
306
307 /// The node N that was updated.
308 virtual void NodeUpdated(SDNode *N);
309
310 /// The node N that was inserted.
311 virtual void NodeInserted(SDNode *N);
312 };
313
314 struct DAGNodeDeletedListener : public DAGUpdateListener {
315 std::function<void(SDNode *, SDNode *)> Callback;
316
317 DAGNodeDeletedListener(SelectionDAG &DAG,
318 std::function<void(SDNode *, SDNode *)> Callback)
319 : DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
320
321 void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
322
323 private:
324 virtual void anchor();
325 };
326
327 /// Help to insert SDNodeFlags automatically in transforming. Use
328 /// RAII to save and resume flags in current scope.
329 class FlagInserter {
330 SelectionDAG &DAG;
331 SDNodeFlags Flags;
332 FlagInserter *LastInserter;
333
334 public:
335 FlagInserter(SelectionDAG &SDAG, SDNodeFlags Flags)
336 : DAG(SDAG), Flags(Flags),
337 LastInserter(SDAG.getFlagInserter()) {
338 SDAG.setFlagInserter(this);
339 }
340 FlagInserter(SelectionDAG &SDAG, SDNode *N)
341 : FlagInserter(SDAG, N->getFlags()) {}
342
343 FlagInserter(const FlagInserter &) = delete;
344 FlagInserter &operator=(const FlagInserter &) = delete;
345 ~FlagInserter() { DAG.setFlagInserter(LastInserter); }
346
347 SDNodeFlags getFlags() const { return Flags; }
348 };
349
350 /// When true, additional steps are taken to
351 /// ensure that getConstant() and similar functions return DAG nodes that
352 /// have legal types. This is important after type legalization since
353 /// any illegally typed nodes generated after this point will not experience
354 /// type legalization.
355 bool NewNodesMustHaveLegalTypes = false;
356
357private:
358 /// DAGUpdateListener is a friend so it can manipulate the listener stack.
359 friend struct DAGUpdateListener;
360
361 /// Linked list of registered DAGUpdateListener instances.
362 /// This stack is maintained by DAGUpdateListener RAII.
363 DAGUpdateListener *UpdateListeners = nullptr;
364
365 /// Implementation of setSubgraphColor.
366 /// Return whether we had to truncate the search.
367 bool setSubgraphColorHelper(SDNode *N, const char *Color,
368 DenseSet<SDNode *> &visited,
369 int level, bool &printed);
370
371 template <typename SDNodeT, typename... ArgTypes>
372 SDNodeT *newSDNode(ArgTypes &&... Args) {
373 return new (NodeAllocator.template Allocate<SDNodeT>())
374 SDNodeT(std::forward<ArgTypes>(Args)...);
375 }
376
377 /// Build a synthetic SDNodeT with the given args and extract its subclass
378 /// data as an integer (e.g. for use in a folding set).
379 ///
380 /// The args to this function are the same as the args to SDNodeT's
381 /// constructor, except the second arg (assumed to be a const DebugLoc&) is
382 /// omitted.
383 template <typename SDNodeT, typename... ArgTypes>
384 static uint16_t getSyntheticNodeSubclassData(unsigned IROrder,
385 ArgTypes &&... Args) {
386 // The compiler can reduce this expression to a constant iff we pass an
387 // empty DebugLoc. Thankfully, the debug location doesn't have any bearing
388 // on the subclass data.
389 return SDNodeT(IROrder, DebugLoc(), std::forward<ArgTypes>(Args)...)
390 .getRawSubclassData();
391 }
392
393 template <typename SDNodeTy>
394 static uint16_t getSyntheticNodeSubclassData(unsigned Opc, unsigned Order,
395 SDVTList VTs, EVT MemoryVT,
396 MachineMemOperand *MMO) {
397 return SDNodeTy(Opc, Order, DebugLoc(), VTs, MemoryVT, MMO)
398 .getRawSubclassData();
399 }
400
401 void createOperands(SDNode *Node, ArrayRef<SDValue> Vals);
402
403 void removeOperands(SDNode *Node) {
404 if (!Node->OperandList)
405 return;
406 OperandRecycler.deallocate(
407 ArrayRecycler<SDUse>::Capacity::get(Node->NumOperands),
408 Node->OperandList);
409 Node->NumOperands = 0;
410 Node->OperandList = nullptr;
411 }
412 void CreateTopologicalOrder(std::vector<SDNode*>& Order);
413
414public:
415 // Maximum depth for recursive analysis such as computeKnownBits, etc.
416 static constexpr unsigned MaxRecursionDepth = 6;
417
418 explicit SelectionDAG(const TargetMachine &TM, CodeGenOpt::Level);
419 SelectionDAG(const SelectionDAG &) = delete;
420 SelectionDAG &operator=(const SelectionDAG &) = delete;
421 ~SelectionDAG();
422
423 /// Prepare this SelectionDAG to process code in the given MachineFunction.
424 void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
425 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
426 LegacyDivergenceAnalysis * Divergence,
427 ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin);
428
429 void setFunctionLoweringInfo(FunctionLoweringInfo * FuncInfo) {
430 FLI = FuncInfo;
431 }
432
433 /// Clear state and free memory necessary to make this
434 /// SelectionDAG ready to process a new block.
435 void clear();
436
437 MachineFunction &getMachineFunction() const { return *MF; }
438 const Pass *getPass() const { return SDAGISelPass; }
439
440 const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
441 const TargetMachine &getTarget() const { return TM; }
442 const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
443 const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
444 const TargetLibraryInfo &getLibInfo() const { return *LibInfo; }
445 const SelectionDAGTargetInfo &getSelectionDAGInfo() const { return *TSI; }
446 const LegacyDivergenceAnalysis *getDivergenceAnalysis() const { return DA; }
447 LLVMContext *getContext() const { return Context; }
448 OptimizationRemarkEmitter &getORE() const { return *ORE; }
449 ProfileSummaryInfo *getPSI() const { return PSI; }
450 BlockFrequencyInfo *getBFI() const { return BFI; }
451
452 FlagInserter *getFlagInserter() { return Inserter; }
453 void setFlagInserter(FlagInserter *FI) { Inserter = FI; }
454
455 /// Just dump dot graph to a user-provided path and title.
456 /// This doesn't open the dot viewer program and
457 /// helps visualization when outside debugging session.
458 /// FileName expects absolute path. If provided
459 /// without any path separators then the file
460 /// will be created in the current directory.
461 /// Error will be emitted if the path is insane.
462#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
463 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dumpDotGraph(const Twine &FileName, const Twine &Title);
464#endif
465
466 /// Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
467 void viewGraph(const std::string &Title);
468 void viewGraph();
469
470#ifndef NDEBUG
471 std::map<const SDNode *, std::string> NodeGraphAttrs;
472#endif
473
474 /// Clear all previously defined node graph attributes.
475 /// Intended to be used from a debugging tool (eg. gdb).
476 void clearGraphAttrs();
477
478 /// Set graph attributes for a node. (eg. "color=red".)
479 void setGraphAttrs(const SDNode *N, const char *Attrs);
480
481 /// Get graph attributes for a node. (eg. "color=red".)
482 /// Used from getNodeAttributes.
483 std::string getGraphAttrs(const SDNode *N) const;
484
485 /// Convenience for setting node color attribute.
486 void setGraphColor(const SDNode *N, const char *Color);
487
488 /// Convenience for setting subgraph color attribute.
489 void setSubgraphColor(SDNode *N, const char *Color);
490
491 using allnodes_const_iterator = ilist<SDNode>::const_iterator;
492
493 allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
494 allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
495
496 using allnodes_iterator = ilist<SDNode>::iterator;
497
498 allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
499 allnodes_iterator allnodes_end() { return AllNodes.end(); }
500
501 ilist<SDNode>::size_type allnodes_size() const {
502 return AllNodes.size();
503 }
504
505 iterator_range<allnodes_iterator> allnodes() {
506 return make_range(allnodes_begin(), allnodes_end());
507 }
508 iterator_range<allnodes_const_iterator> allnodes() const {
509 return make_range(allnodes_begin(), allnodes_end());
510 }
511
512 /// Return the root tag of the SelectionDAG.
513 const SDValue &getRoot() const { return Root; }
514
515 /// Return the token chain corresponding to the entry of the function.
516 SDValue getEntryNode() const {
517 return SDValue(const_cast<SDNode *>(&EntryNode), 0);
518 }
519
520 /// Set the current root tag of the SelectionDAG.
521 ///
522 const SDValue &setRoot(SDValue N) {
523 assert((!N.getNode() || N.getValueType() == MVT::Other) &&(static_cast <bool> ((!N.getNode() || N.getValueType() ==
MVT::Other) && "DAG root value is not a chain!") ? void
(0) : __assert_fail ("(!N.getNode() || N.getValueType() == MVT::Other) && \"DAG root value is not a chain!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 524, __extension__ __PRETTY_FUNCTION__))
524 "DAG root value is not a chain!")(static_cast <bool> ((!N.getNode() || N.getValueType() ==
MVT::Other) && "DAG root value is not a chain!") ? void
(0) : __assert_fail ("(!N.getNode() || N.getValueType() == MVT::Other) && \"DAG root value is not a chain!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 524, __extension__ __PRETTY_FUNCTION__))
;
525 if (N.getNode())
526 checkForCycles(N.getNode(), this);
527 Root = N;
528 if (N.getNode())
529 checkForCycles(this);
530 return Root;
531 }
532
533#ifndef NDEBUG
534 void VerifyDAGDiverence();
535#endif
536
537 /// This iterates over the nodes in the SelectionDAG, folding
538 /// certain types of nodes together, or eliminating superfluous nodes. The
539 /// Level argument controls whether Combine is allowed to produce nodes and
540 /// types that are illegal on the target.
541 void Combine(CombineLevel Level, AAResults *AA,
542 CodeGenOpt::Level OptLevel);
543
544 /// This transforms the SelectionDAG into a SelectionDAG that
545 /// only uses types natively supported by the target.
546 /// Returns "true" if it made any changes.
547 ///
548 /// Note that this is an involved process that may invalidate pointers into
549 /// the graph.
550 bool LegalizeTypes();
551
552 /// This transforms the SelectionDAG into a SelectionDAG that is
553 /// compatible with the target instruction selector, as indicated by the
554 /// TargetLowering object.
555 ///
556 /// Note that this is an involved process that may invalidate pointers into
557 /// the graph.
558 void Legalize();
559
560 /// Transforms a SelectionDAG node and any operands to it into a node
561 /// that is compatible with the target instruction selector, as indicated by
562 /// the TargetLowering object.
563 ///
564 /// \returns true if \c N is a valid, legal node after calling this.
565 ///
566 /// This essentially runs a single recursive walk of the \c Legalize process
567 /// over the given node (and its operands). This can be used to incrementally
568 /// legalize the DAG. All of the nodes which are directly replaced,
569 /// potentially including N, are added to the output parameter \c
570 /// UpdatedNodes so that the delta to the DAG can be understood by the
571 /// caller.
572 ///
573 /// When this returns false, N has been legalized in a way that make the
574 /// pointer passed in no longer valid. It may have even been deleted from the
575 /// DAG, and so it shouldn't be used further. When this returns true, the
576 /// N passed in is a legal node, and can be immediately processed as such.
577 /// This may still have done some work on the DAG, and will still populate
578 /// UpdatedNodes with any new nodes replacing those originally in the DAG.
579 bool LegalizeOp(SDNode *N, SmallSetVector<SDNode *, 16> &UpdatedNodes);
580
581 /// This transforms the SelectionDAG into a SelectionDAG
582 /// that only uses vector math operations supported by the target. This is
583 /// necessary as a separate step from Legalize because unrolling a vector
584 /// operation can introduce illegal types, which requires running
585 /// LegalizeTypes again.
586 ///
587 /// This returns true if it made any changes; in that case, LegalizeTypes
588 /// is called again before Legalize.
589 ///
590 /// Note that this is an involved process that may invalidate pointers into
591 /// the graph.
592 bool LegalizeVectors();
593
594 /// This method deletes all unreachable nodes in the SelectionDAG.
595 void RemoveDeadNodes();
596
597 /// Remove the specified node from the system. This node must
598 /// have no referrers.
599 void DeleteNode(SDNode *N);
600
601 /// Return an SDVTList that represents the list of values specified.
602 SDVTList getVTList(EVT VT);
603 SDVTList getVTList(EVT VT1, EVT VT2);
604 SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3);
605 SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4);
606 SDVTList getVTList(ArrayRef<EVT> VTs);
607
608 //===--------------------------------------------------------------------===//
609 // Node creation methods.
610
611 /// Create a ConstantSDNode wrapping a constant value.
612 /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
613 ///
614 /// If only legal types can be produced, this does the necessary
615 /// transformations (e.g., if the vector element type is illegal).
616 /// @{
617 SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
618 bool isTarget = false, bool isOpaque = false);
619 SDValue getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
620 bool isTarget = false, bool isOpaque = false);
621
622 SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget = false,
623 bool IsOpaque = false) {
624 return getConstant(APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL,
625 VT, IsTarget, IsOpaque);
626 }
627
628 SDValue getConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
629 bool isTarget = false, bool isOpaque = false);
630 SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL,
631 bool isTarget = false);
632 SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL,
633 bool LegalTypes = true);
634 SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
635 bool isTarget = false);
636
637 SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT,
638 bool isOpaque = false) {
639 return getConstant(Val, DL, VT, true, isOpaque);
640 }
641 SDValue getTargetConstant(const APInt &Val, const SDLoc &DL, EVT VT,
642 bool isOpaque = false) {
643 return getConstant(Val, DL, VT, true, isOpaque);
644 }
645 SDValue getTargetConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
646 bool isOpaque = false) {
647 return getConstant(Val, DL, VT, true, isOpaque);
648 }
649
650 /// Create a true or false constant of type \p VT using the target's
651 /// BooleanContent for type \p OpVT.
652 SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT);
653 /// @}
654
655 /// Create a ConstantFPSDNode wrapping a constant value.
656 /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
657 ///
658 /// If only legal types can be produced, this does the necessary
659 /// transformations (e.g., if the vector element type is illegal).
660 /// The forms that take a double should only be used for simple constants
661 /// that can be exactly represented in VT. No checks are made.
662 /// @{
663 SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT,
664 bool isTarget = false);
665 SDValue getConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT,
666 bool isTarget = false);
667 SDValue getConstantFP(const ConstantFP &V, const SDLoc &DL, EVT VT,
668 bool isTarget = false);
669 SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT) {
670 return getConstantFP(Val, DL, VT, true);
671 }
672 SDValue getTargetConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT) {
673 return getConstantFP(Val, DL, VT, true);
674 }
675 SDValue getTargetConstantFP(const ConstantFP &Val, const SDLoc &DL, EVT VT) {
676 return getConstantFP(Val, DL, VT, true);
677 }
678 /// @}
679
680 SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
681 int64_t offset = 0, bool isTargetGA = false,
682 unsigned TargetFlags = 0);
683 SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
684 int64_t offset = 0, unsigned TargetFlags = 0) {
685 return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
686 }
687 SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
688 SDValue getTargetFrameIndex(int FI, EVT VT) {
689 return getFrameIndex(FI, VT, true);
690 }
691 SDValue getJumpTable(int JTI, EVT VT, bool isTarget = false,
692 unsigned TargetFlags = 0);
693 SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags = 0) {
694 return getJumpTable(JTI, VT, true, TargetFlags);
695 }
696 SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align = None,
697 int Offs = 0, bool isT = false,
698 unsigned TargetFlags = 0);
699 SDValue getTargetConstantPool(const Constant *C, EVT VT,
700 MaybeAlign Align = None, int Offset = 0,
701 unsigned TargetFlags = 0) {
702 return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
703 }
704 SDValue getConstantPool(MachineConstantPoolValue *C, EVT VT,
705 MaybeAlign Align = None, int Offs = 0,
706 bool isT = false, unsigned TargetFlags = 0);
707 SDValue getTargetConstantPool(MachineConstantPoolValue *C, EVT VT,
708 MaybeAlign Align = None, int Offset = 0,
709 unsigned TargetFlags = 0) {
710 return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
711 }
712 SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
713 unsigned TargetFlags = 0);
714 // When generating a branch to a BB, we don't in general know enough
715 // to provide debug info for the BB at that time, so keep this one around.
716 SDValue getBasicBlock(MachineBasicBlock *MBB);
717 SDValue getExternalSymbol(const char *Sym, EVT VT);
718 SDValue getTargetExternalSymbol(const char *Sym, EVT VT,
719 unsigned TargetFlags = 0);
720 SDValue getMCSymbol(MCSymbol *Sym, EVT VT);
721
722 SDValue getValueType(EVT);
723 SDValue getRegister(unsigned Reg, EVT VT);
724 SDValue getRegisterMask(const uint32_t *RegMask);
725 SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label);
726 SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root,
727 MCSymbol *Label);
728 SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset = 0,
729 bool isTarget = false, unsigned TargetFlags = 0);
730 SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT,
731 int64_t Offset = 0, unsigned TargetFlags = 0) {
732 return getBlockAddress(BA, VT, Offset, true, TargetFlags);
733 }
734
735 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg,
736 SDValue N) {
737 return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
738 getRegister(Reg, N.getValueType()), N);
739 }
740
741 // This version of the getCopyToReg method takes an extra operand, which
742 // indicates that there is potentially an incoming glue value (if Glue is not
743 // null) and that there should be a glue result.
744 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N,
745 SDValue Glue) {
746 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
747 SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
748 return getNode(ISD::CopyToReg, dl, VTs,
749 makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
750 }
751
752 // Similar to last getCopyToReg() except parameter Reg is a SDValue
753 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, SDValue Reg, SDValue N,
754 SDValue Glue) {
755 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
756 SDValue Ops[] = { Chain, Reg, N, Glue };
757 return getNode(ISD::CopyToReg, dl, VTs,
758 makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
759 }
760
761 SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT) {
762 SDVTList VTs = getVTList(VT, MVT::Other);
763 SDValue Ops[] = { Chain, getRegister(Reg, VT) };
764 return getNode(ISD::CopyFromReg, dl, VTs, Ops);
765 }
766
767 // This version of the getCopyFromReg method takes an extra operand, which
768 // indicates that there is potentially an incoming glue value (if Glue is not
769 // null) and that there should be a glue result.
770 SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT,
771 SDValue Glue) {
772 SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
773 SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
774 return getNode(ISD::CopyFromReg, dl, VTs,
775 makeArrayRef(Ops, Glue.getNode() ? 3 : 2));
776 }
777
778 SDValue getCondCode(ISD::CondCode Cond);
779
780 /// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
781 /// which must be a vector type, must match the number of mask elements
782 /// NumElts. An integer mask element equal to -1 is treated as undefined.
783 SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
784 ArrayRef<int> Mask);
785
786 /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
787 /// which must be a vector type, must match the number of operands in Ops.
788 /// The operands must have the same type as (or, for integers, a type wider
789 /// than) VT's element type.
790 SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDValue> Ops) {
791 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
792 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
793 }
794
795 /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
796 /// which must be a vector type, must match the number of operands in Ops.
797 /// The operands must have the same type as (or, for integers, a type wider
798 /// than) VT's element type.
799 SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDUse> Ops) {
800 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
801 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
802 }
803
804 /// Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all
805 /// elements. VT must be a vector type. Op's type must be the same as (or,
806 /// for integers, a type wider than) VT's element type.
807 SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
808 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
809 if (Op.getOpcode() == ISD::UNDEF) {
25
Calling 'SDValue::getOpcode'
810 assert((VT.getVectorElementType() == Op.getValueType() ||(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __extension__ __PRETTY_FUNCTION__))
811 (VT.isInteger() &&(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __extension__ __PRETTY_FUNCTION__))
812 VT.getVectorElementType().bitsLE(Op.getValueType()))) &&(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __extension__ __PRETTY_FUNCTION__))
813 "A splatted value must have a width equal or (for integers) "(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __extension__ __PRETTY_FUNCTION__))
814 "greater than the vector element type!")(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __extension__ __PRETTY_FUNCTION__))
;
815 return getNode(ISD::UNDEF, SDLoc(), VT);
816 }
817
818 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Op);
819 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
820 }
821
822 // Return a splat ISD::SPLAT_VECTOR node, consisting of Op splatted to all
823 // elements.
824 SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op) {
825 if (Op.getOpcode() == ISD::UNDEF) {
826 assert((VT.getVectorElementType() == Op.getValueType() ||(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __extension__ __PRETTY_FUNCTION__))
827 (VT.isInteger() &&(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __extension__ __PRETTY_FUNCTION__))
828 VT.getVectorElementType().bitsLE(Op.getValueType()))) &&(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __extension__ __PRETTY_FUNCTION__))
829 "A splatted value must have a width equal or (for integers) "(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __extension__ __PRETTY_FUNCTION__))
830 "greater than the vector element type!")(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __extension__ __PRETTY_FUNCTION__))
;
831 return getNode(ISD::UNDEF, SDLoc(), VT);
832 }
833 return getNode(ISD::SPLAT_VECTOR, DL, VT, Op);
834 }
835
836 /// Returns a vector of type ResVT whose elements contain the linear sequence
837 /// <0, Step, Step * 2, Step * 3, ...>
838 SDValue getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal);
839
840 /// Returns a vector of type ResVT whose elements contain the linear sequence
841 /// <0, 1, 2, 3, ...>
842 SDValue getStepVector(const SDLoc &DL, EVT ResVT);
843
844 /// Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
845 /// the shuffle node in input but with swapped operands.
846 ///
847 /// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
848 SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV);
849
850 /// Convert Op, which must be of float type, to the
851 /// float type VT, by either extending or rounding (by truncation).
852 SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT);
853
854 /// Convert Op, which must be a STRICT operation of float type, to the
855 /// float type VT, by either extending or rounding (by truncation).
856 std::pair<SDValue, SDValue>
857 getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT);
858
859 /// Convert Op, which must be of integer type, to the
860 /// integer type VT, by either any-extending or truncating it.
861 SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
862
863 /// Convert Op, which must be of integer type, to the
864 /// integer type VT, by either sign-extending or truncating it.
865 SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
866
867 /// Convert Op, which must be of integer type, to the
868 /// integer type VT, by either zero-extending or truncating it.
869 SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
870
871 /// Return the expression required to zero extend the Op
872 /// value assuming it was the smaller SrcTy value.
873 SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
874
875 /// Convert Op, which must be of integer type, to the integer type VT, by
876 /// either truncating it or performing either zero or sign extension as
877 /// appropriate extension for the pointer's semantics.
878 SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
879
880 /// Return the expression required to extend the Op as a pointer value
881 /// assuming it was the smaller SrcTy value. This may be either a zero extend
882 /// or a sign extend.
883 SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
884
885 /// Convert Op, which must be of integer type, to the integer type VT,
886 /// by using an extension appropriate for the target's
887 /// BooleanContent for type OpVT or truncating it.
888 SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT);
889
890 /// Create a bitwise NOT operation as (XOR Val, -1).
891 SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT);
892
893 /// Create a logical NOT operation as (XOR Val, BooleanOne).
894 SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT);
895
896 /// Returns sum of the base pointer and offset.
897 /// Unlike getObjectPtrOffset this does not set NoUnsignedWrap by default.
898 SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL,
899 const SDNodeFlags Flags = SDNodeFlags());
900 SDValue getMemBasePlusOffset(SDValue Base, SDValue Offset, const SDLoc &DL,
901 const SDNodeFlags Flags = SDNodeFlags());
902
903 /// Create an add instruction with appropriate flags when used for
904 /// addressing some offset of an object. i.e. if a load is split into multiple
905 /// components, create an add nuw from the base pointer to the offset.
906 SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset) {
907 SDNodeFlags Flags;
908 Flags.setNoUnsignedWrap(true);
909 return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
910 }
911
912 SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, SDValue Offset) {
913 // The object itself can't wrap around the address space, so it shouldn't be
914 // possible for the adds of the offsets to the split parts to overflow.
915 SDNodeFlags Flags;
916 Flags.setNoUnsignedWrap(true);
917 return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
918 }
919
920 /// Return a new CALLSEQ_START node, that starts new call frame, in which
921 /// InSize bytes are set up inside CALLSEQ_START..CALLSEQ_END sequence and
922 /// OutSize specifies part of the frame set up prior to the sequence.
923 SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize,
924 const SDLoc &DL) {
925 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
926 SDValue Ops[] = { Chain,
927 getIntPtrConstant(InSize, DL, true),
928 getIntPtrConstant(OutSize, DL, true) };
929 return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
930 }
931
932 /// Return a new CALLSEQ_END node, which always must have a
933 /// glue result (to ensure it's not CSE'd).
934 /// CALLSEQ_END does not have a useful SDLoc.
935 SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
936 SDValue InGlue, const SDLoc &DL) {
937 SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
938 SmallVector<SDValue, 4> Ops;
939 Ops.push_back(Chain);
940 Ops.push_back(Op1);
941 Ops.push_back(Op2);
942 if (InGlue.getNode())
943 Ops.push_back(InGlue);
944 return getNode(ISD::CALLSEQ_END, DL, NodeTys, Ops);
945 }
946
947 /// Return true if the result of this operation is always undefined.
948 bool isUndef(unsigned Opcode, ArrayRef<SDValue> Ops);
949
950 /// Return an UNDEF node. UNDEF does not have a useful SDLoc.
951 SDValue getUNDEF(EVT VT) {
952 return getNode(ISD::UNDEF, SDLoc(), VT);
953 }
954
955 /// Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
956 SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm) {
957 assert(MulImm.getMinSignedBits() <= VT.getSizeInBits() &&(static_cast <bool> (MulImm.getMinSignedBits() <= VT
.getSizeInBits() && "Immediate does not fit VT") ? void
(0) : __assert_fail ("MulImm.getMinSignedBits() <= VT.getSizeInBits() && \"Immediate does not fit VT\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 958, __extension__ __PRETTY_FUNCTION__))
958 "Immediate does not fit VT")(static_cast <bool> (MulImm.getMinSignedBits() <= VT
.getSizeInBits() && "Immediate does not fit VT") ? void
(0) : __assert_fail ("MulImm.getMinSignedBits() <= VT.getSizeInBits() && \"Immediate does not fit VT\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 958, __extension__ __PRETTY_FUNCTION__))
;
959 return getNode(ISD::VSCALE, DL, VT,
960 getConstant(MulImm.sextOrTrunc(VT.getSizeInBits()), DL, VT));
961 }
962
963 /// Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
964 SDValue getGLOBAL_OFFSET_TABLE(EVT VT) {
965 return getNode(ISD::GLOBAL_OFFSET_TABLE, SDLoc(), VT);
966 }
967
968 /// Gets or creates the specified node.
969 ///
970 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
971 ArrayRef<SDUse> Ops);
972 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
973 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
974 SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
975 ArrayRef<SDValue> Ops);
976 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
977 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
978
979 // Use flags from current flag inserter.
980 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
981 ArrayRef<SDValue> Ops);
982 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
983 ArrayRef<SDValue> Ops);
984 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand);
985 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
986 SDValue N2);
987 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
988 SDValue N2, SDValue N3);
989
990 // Specialize based on number of operands.
991 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
992 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand,
993 const SDNodeFlags Flags);
994 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
995 SDValue N2, const SDNodeFlags Flags);
996 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
997 SDValue N2, SDValue N3, const SDNodeFlags Flags);
998 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
999 SDValue N2, SDValue N3, SDValue N4);
1000 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
1001 SDValue N2, SDValue N3, SDValue N4, SDValue N5);
1002
1003 // Specialize again based on number of operands for nodes with a VTList
1004 // rather than a single VT.
1005 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList);
1006 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N);
1007 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1008 SDValue N2);
1009 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1010 SDValue N2, SDValue N3);
1011 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1012 SDValue N2, SDValue N3, SDValue N4);
1013 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1014 SDValue N2, SDValue N3, SDValue N4, SDValue N5);
1015
1016 /// Compute a TokenFactor to force all the incoming stack arguments to be
1017 /// loaded from the stack. This is used in tail call lowering to protect
1018 /// stack arguments from being clobbered.
1019 SDValue getStackArgumentTokenFactor(SDValue Chain);
1020
1021 SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1022 SDValue Size, Align Alignment, bool isVol,
1023 bool AlwaysInline, bool isTailCall,
1024 MachinePointerInfo DstPtrInfo,
1025 MachinePointerInfo SrcPtrInfo,
1026 const AAMDNodes &AAInfo = AAMDNodes());
1027
1028 SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1029 SDValue Size, Align Alignment, bool isVol, bool isTailCall,
1030 MachinePointerInfo DstPtrInfo,
1031 MachinePointerInfo SrcPtrInfo,
1032 const AAMDNodes &AAInfo = AAMDNodes());
1033
1034 SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1035 SDValue Size, Align Alignment, bool isVol, bool isTailCall,
1036 MachinePointerInfo DstPtrInfo,
1037 const AAMDNodes &AAInfo = AAMDNodes());
1038
1039 SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
1040 unsigned DstAlign, SDValue Src, unsigned SrcAlign,
1041 SDValue Size, Type *SizeTy, unsigned ElemSz,
1042 bool isTailCall, MachinePointerInfo DstPtrInfo,
1043 MachinePointerInfo SrcPtrInfo);
1044
1045 SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
1046 unsigned DstAlign, SDValue Src, unsigned SrcAlign,
1047 SDValue Size, Type *SizeTy, unsigned ElemSz,
1048 bool isTailCall, MachinePointerInfo DstPtrInfo,
1049 MachinePointerInfo SrcPtrInfo);
1050
1051 SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
1052 unsigned DstAlign, SDValue Value, SDValue Size,
1053 Type *SizeTy, unsigned ElemSz, bool isTailCall,
1054 MachinePointerInfo DstPtrInfo);
1055
1056 /// Helper function to make it easier to build SetCC's if you just have an
1057 /// ISD::CondCode instead of an SDValue.
1058 SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
1059 ISD::CondCode Cond, SDValue Chain = SDValue(),
1060 bool IsSignaling = false) {
1061 assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&(static_cast <bool> (LHS.getValueType().isVector() == RHS
.getValueType().isVector() && "Cannot compare scalars to vectors"
) ? void (0) : __assert_fail ("LHS.getValueType().isVector() == RHS.getValueType().isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1062, __extension__ __PRETTY_FUNCTION__))
1062 "Cannot compare scalars to vectors")(static_cast <bool> (LHS.getValueType().isVector() == RHS
.getValueType().isVector() && "Cannot compare scalars to vectors"
) ? void (0) : __assert_fail ("LHS.getValueType().isVector() == RHS.getValueType().isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1062, __extension__ __PRETTY_FUNCTION__))
;
1063 assert(LHS.getValueType().isVector() == VT.isVector() &&(static_cast <bool> (LHS.getValueType().isVector() == VT
.isVector() && "Cannot compare scalars to vectors") ?
void (0) : __assert_fail ("LHS.getValueType().isVector() == VT.isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1064, __extension__ __PRETTY_FUNCTION__))
1064 "Cannot compare scalars to vectors")(static_cast <bool> (LHS.getValueType().isVector() == VT
.isVector() && "Cannot compare scalars to vectors") ?
void (0) : __assert_fail ("LHS.getValueType().isVector() == VT.isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1064, __extension__ __PRETTY_FUNCTION__))
;
1065 assert(Cond != ISD::SETCC_INVALID &&(static_cast <bool> (Cond != ISD::SETCC_INVALID &&
"Cannot create a setCC of an invalid node.") ? void (0) : __assert_fail
("Cond != ISD::SETCC_INVALID && \"Cannot create a setCC of an invalid node.\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1066, __extension__ __PRETTY_FUNCTION__))
1066 "Cannot create a setCC of an invalid node.")(static_cast <bool> (Cond != ISD::SETCC_INVALID &&
"Cannot create a setCC of an invalid node.") ? void (0) : __assert_fail
("Cond != ISD::SETCC_INVALID && \"Cannot create a setCC of an invalid node.\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1066, __extension__ __PRETTY_FUNCTION__))
;
1067 if (Chain)
1068 return getNode(IsSignaling ? ISD::STRICT_FSETCCS : ISD::STRICT_FSETCC, DL,
1069 {VT, MVT::Other}, {Chain, LHS, RHS, getCondCode(Cond)});
1070 return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
1071 }
1072
1073 /// Helper function to make it easier to build Select's if you just have
1074 /// operands and don't want to check for vector.
1075 SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS,
1076 SDValue RHS) {
1077 assert(LHS.getValueType() == RHS.getValueType() &&(static_cast <bool> (LHS.getValueType() == RHS.getValueType
() && "Cannot use select on differing types") ? void (
0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Cannot use select on differing types\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1078, __extension__ __PRETTY_FUNCTION__))
1078 "Cannot use select on differing types")(static_cast <bool> (LHS.getValueType() == RHS.getValueType
() && "Cannot use select on differing types") ? void (
0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Cannot use select on differing types\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1078, __extension__ __PRETTY_FUNCTION__))
;
1079 assert(VT.isVector() == LHS.getValueType().isVector() &&(static_cast <bool> (VT.isVector() == LHS.getValueType(
).isVector() && "Cannot mix vectors and scalars") ? void
(0) : __assert_fail ("VT.isVector() == LHS.getValueType().isVector() && \"Cannot mix vectors and scalars\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1080, __extension__ __PRETTY_FUNCTION__))
1080 "Cannot mix vectors and scalars")(static_cast <bool> (VT.isVector() == LHS.getValueType(
).isVector() && "Cannot mix vectors and scalars") ? void
(0) : __assert_fail ("VT.isVector() == LHS.getValueType().isVector() && \"Cannot mix vectors and scalars\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1080, __extension__ __PRETTY_FUNCTION__))
;
1081 auto Opcode = Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
1082 return getNode(Opcode, DL, VT, Cond, LHS, RHS);
1083 }
1084
1085 /// Helper function to make it easier to build SelectCC's if you just have an
1086 /// ISD::CondCode instead of an SDValue.
1087 SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True,
1088 SDValue False, ISD::CondCode Cond) {
1089 return getNode(ISD::SELECT_CC, DL, True.getValueType(), LHS, RHS, True,
1090 False, getCondCode(Cond));
1091 }
1092
1093 /// Try to simplify a select/vselect into 1 of its operands or a constant.
1094 SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal);
1095
1096 /// Try to simplify a shift into 1 of its operands or a constant.
1097 SDValue simplifyShift(SDValue X, SDValue Y);
1098
1099 /// Try to simplify a floating-point binary operation into 1 of its operands
1100 /// or a constant.
1101 SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
1102 SDNodeFlags Flags);
1103
1104 /// VAArg produces a result and token chain, and takes a pointer
1105 /// and a source value as input.
1106 SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1107 SDValue SV, unsigned Align);
1108
1109 /// Gets a node for an atomic cmpxchg op. There are two
1110 /// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
1111 /// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
1112 /// a success flag (initially i1), and a chain.
1113 SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
1114 SDVTList VTs, SDValue Chain, SDValue Ptr,
1115 SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);
1116
1117 /// Gets a node for an atomic op, produces result (if relevant)
1118 /// and chain and takes 2 operands.
1119 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
1120 SDValue Ptr, SDValue Val, MachineMemOperand *MMO);
1121
1122 /// Gets a node for an atomic op, produces result and chain and
1123 /// takes 1 operand.
1124 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT,
1125 SDValue Chain, SDValue Ptr, MachineMemOperand *MMO);
1126
1127 /// Gets a node for an atomic op, produces result and chain and takes N
1128 /// operands.
1129 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
1130 SDVTList VTList, ArrayRef<SDValue> Ops,
1131 MachineMemOperand *MMO);
1132
1133 /// Creates a MemIntrinsicNode that may produce a
1134 /// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
1135 /// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
1136 /// less than FIRST_TARGET_MEMORY_OPCODE.
1137 SDValue getMemIntrinsicNode(
1138 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
1139 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
1140 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
1141 MachineMemOperand::MOStore,
1142 uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes());
1143
1144 inline SDValue getMemIntrinsicNode(
1145 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
1146 EVT MemVT, MachinePointerInfo PtrInfo, MaybeAlign Alignment = None,
1147 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
1148 MachineMemOperand::MOStore,
1149 uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) {
1150 // Ensure that codegen never sees alignment 0
1151 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo,
1152 Alignment.getValueOr(getEVTAlign(MemVT)), Flags,
1153 Size, AAInfo);
1154 }
1155
1156 SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList,
1157 ArrayRef<SDValue> Ops, EVT MemVT,
1158 MachineMemOperand *MMO);
1159
1160 /// Creates a LifetimeSDNode that starts (`IsStart==true`) or ends
1161 /// (`IsStart==false`) the lifetime of the portion of `FrameIndex` between
1162 /// offsets `Offset` and `Offset + Size`.
1163 SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain,
1164 int FrameIndex, int64_t Size, int64_t Offset = -1);
1165
1166 /// Creates a PseudoProbeSDNode with function GUID `Guid` and
1167 /// the index of the block `Index` it is probing, as well as the attributes
1168 /// `attr` of the probe.
1169 SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid,
1170 uint64_t Index, uint32_t Attr);
1171
1172 /// Create a MERGE_VALUES node from the given operands.
1173 SDValue getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl);
1174
1175 /// Loads are not normal binary operators: their result type is not
1176 /// determined by their operands, and they produce a value AND a token chain.
1177 ///
1178 /// This function will set the MOLoad flag on MMOFlags, but you can set it if
1179 /// you want. The MOStore flag must not be set.
1180 SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1181 MachinePointerInfo PtrInfo,
1182 MaybeAlign Alignment = MaybeAlign(),
1183 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1184 const AAMDNodes &AAInfo = AAMDNodes(),
1185 const MDNode *Ranges = nullptr);
1186 /// FIXME: Remove once transition to Align is over.
1187 inline SDValue
1188 getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1189 MachinePointerInfo PtrInfo, unsigned Alignment,
1190 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1191 const AAMDNodes &AAInfo = AAMDNodes(),
1192 const MDNode *Ranges = nullptr) {
1193 return getLoad(VT, dl, Chain, Ptr, PtrInfo, MaybeAlign(Alignment), MMOFlags,
1194 AAInfo, Ranges);
1195 }
1196 SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1197 MachineMemOperand *MMO);
1198 SDValue
1199 getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
1200 SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
1201 MaybeAlign Alignment = MaybeAlign(),
1202 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1203 const AAMDNodes &AAInfo = AAMDNodes());
1204 /// FIXME: Remove once transition to Align is over.
1205 inline SDValue
1206 getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
1207 SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
1208 unsigned Alignment,
1209 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1210 const AAMDNodes &AAInfo = AAMDNodes()) {
1211 return getExtLoad(ExtType, dl, VT, Chain, Ptr, PtrInfo, MemVT,
1212 MaybeAlign(Alignment), MMOFlags, AAInfo);
1213 }
1214 SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
1215 SDValue Chain, SDValue Ptr, EVT MemVT,
1216 MachineMemOperand *MMO);
1217 SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
1218 SDValue Offset, ISD::MemIndexedMode AM);
1219 SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1220 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1221 MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment,
1222 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1223 const AAMDNodes &AAInfo = AAMDNodes(),
1224 const MDNode *Ranges = nullptr);
1225 inline SDValue getLoad(
1226 ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
1227 SDValue Chain, SDValue Ptr, SDValue Offset, MachinePointerInfo PtrInfo,
1228 EVT MemVT, MaybeAlign Alignment = MaybeAlign(),
1229 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1230 const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr) {
1231 // Ensures that codegen never sees a None Alignment.
1232 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
1233 Alignment.getValueOr(getEVTAlign(MemVT)), MMOFlags, AAInfo,
1234 Ranges);
1235 }
1236 /// FIXME: Remove once transition to Align is over.
1237 inline SDValue
1238 getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1239 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1240 MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment,
1241 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1242 const AAMDNodes &AAInfo = AAMDNodes(),
1243 const MDNode *Ranges = nullptr) {
1244 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
1245 MaybeAlign(Alignment), MMOFlags, AAInfo, Ranges);
1246 }
1247 SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1248 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1249 EVT MemVT, MachineMemOperand *MMO);
1250
1251 /// Helper function to build ISD::STORE nodes.
1252 ///
1253 /// This function will set the MOStore flag on MMOFlags, but you can set it if
1254 /// you want. The MOLoad and MOInvariant flags must not be set.
1255
1256 SDValue
1257 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1258 MachinePointerInfo PtrInfo, Align Alignment,
1259 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1260 const AAMDNodes &AAInfo = AAMDNodes());
1261 inline SDValue
1262 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1263 MachinePointerInfo PtrInfo, MaybeAlign Alignment = MaybeAlign(),
1264 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1265 const AAMDNodes &AAInfo = AAMDNodes()) {
1266 return getStore(Chain, dl, Val, Ptr, PtrInfo,
1267 Alignment.getValueOr(getEVTAlign(Val.getValueType())),
1268 MMOFlags, AAInfo);
1269 }
1270 /// FIXME: Remove once transition to Align is over.
1271 inline SDValue
1272 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1273 MachinePointerInfo PtrInfo, unsigned Alignment,
1274 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1275 const AAMDNodes &AAInfo = AAMDNodes()) {
1276 return getStore(Chain, dl, Val, Ptr, PtrInfo, MaybeAlign(Alignment),
1277 MMOFlags, AAInfo);
1278 }
1279 SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1280 MachineMemOperand *MMO);
1281 SDValue
1282 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1283 MachinePointerInfo PtrInfo, EVT SVT, Align Alignment,
1284 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1285 const AAMDNodes &AAInfo = AAMDNodes());
1286 inline SDValue
1287 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1288 MachinePointerInfo PtrInfo, EVT SVT,
1289 MaybeAlign Alignment = MaybeAlign(),
1290 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1291 const AAMDNodes &AAInfo = AAMDNodes()) {
1292 return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
1293 Alignment.getValueOr(getEVTAlign(SVT)), MMOFlags,
1294 AAInfo);
1295 }
1296 /// FIXME: Remove once transition to Align is over.
1297 inline SDValue
1298 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1299 MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment,
1300 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1301 const AAMDNodes &AAInfo = AAMDNodes()) {
1302 return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
1303 MaybeAlign(Alignment), MMOFlags, AAInfo);
1304 }
1305 SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
1306 SDValue Ptr, EVT SVT, MachineMemOperand *MMO);
1307 SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base,
1308 SDValue Offset, ISD::MemIndexedMode AM);
1309
1310 SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base,
1311 SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT,
1312 MachineMemOperand *MMO, ISD::MemIndexedMode AM,
1313 ISD::LoadExtType, bool IsExpanding = false);
1314 SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
1315 SDValue Offset, ISD::MemIndexedMode AM);
1316 SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val,
1317 SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT,
1318 MachineMemOperand *MMO, ISD::MemIndexedMode AM,
1319 bool IsTruncating = false, bool IsCompressing = false);
1320 SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
1321 SDValue Base, SDValue Offset,
1322 ISD::MemIndexedMode AM);
1323 SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl,
1324 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
1325 ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy);
1326 SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl,
1327 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
1328 ISD::MemIndexType IndexType,
1329 bool IsTruncating = false);
1330
1331 /// Construct a node to track a Value* through the backend.
1332 SDValue getSrcValue(const Value *v);
1333
1334 /// Return an MDNodeSDNode which holds an MDNode.
1335 SDValue getMDNode(const MDNode *MD);
1336
1337 /// Return a bitcast using the SDLoc of the value operand, and casting to the
1338 /// provided type. Use getNode to set a custom SDLoc.
1339 SDValue getBitcast(EVT VT, SDValue V);
1340
1341 /// Return an AddrSpaceCastSDNode.
1342 SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS,
1343 unsigned DestAS);
1344
1345 /// Return a freeze using the SDLoc of the value operand.
1346 SDValue getFreeze(SDValue V);
1347
1348 /// Return an AssertAlignSDNode.
1349 SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A);
1350
1351 /// Return the specified value casted to
1352 /// the target's desired shift amount type.
1353 SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op);
1354
1355 /// Expand the specified \c ISD::VAARG node as the Legalize pass would.
1356 SDValue expandVAArg(SDNode *Node);
1357
1358 /// Expand the specified \c ISD::VACOPY node as the Legalize pass would.
1359 SDValue expandVACopy(SDNode *Node);
1360
1361 /// Returs an GlobalAddress of the function from the current module with
1362 /// name matching the given ExternalSymbol. Additionally can provide the
1363 /// matched function.
1364 /// Panics the function doesn't exists.
1365 SDValue getSymbolFunctionGlobalAddress(SDValue Op,
1366 Function **TargetFunction = nullptr);
1367
1368 /// *Mutate* the specified node in-place to have the
1369 /// specified operands. If the resultant node already exists in the DAG,
1370 /// this does not modify the specified node, instead it returns the node that
1371 /// already exists. If the resultant node does not exist in the DAG, the
1372 /// input node is returned. As a degenerate case, if you specify the same
1373 /// input operands as the node already has, the input node is returned.
1374 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
1375 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
1376 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1377 SDValue Op3);
1378 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1379 SDValue Op3, SDValue Op4);
1380 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1381 SDValue Op3, SDValue Op4, SDValue Op5);
1382 SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
1383
1384 /// Creates a new TokenFactor containing \p Vals. If \p Vals contains 64k
1385 /// values or more, move values into new TokenFactors in 64k-1 blocks, until
1386 /// the final TokenFactor has less than 64k operands.
1387 SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl<SDValue> &Vals);
1388
1389 /// *Mutate* the specified machine node's memory references to the provided
1390 /// list.
1391 void setNodeMemRefs(MachineSDNode *N,
1392 ArrayRef<MachineMemOperand *> NewMemRefs);
1393
1394 // Calculate divergence of node \p N based on its operands.
1395 bool calculateDivergence(SDNode *N);
1396
1397 // Propagates the change in divergence to users
1398 void updateDivergence(SDNode * N);
1399
1400 /// These are used for target selectors to *mutate* the
1401 /// specified node to have the specified return type, Target opcode, and
1402 /// operands. Note that target opcodes are stored as
1403 /// ~TargetOpcode in the node opcode field. The resultant node is returned.
1404 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT);
1405 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT, SDValue Op1);
1406 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1407 SDValue Op1, SDValue Op2);
1408 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1409 SDValue Op1, SDValue Op2, SDValue Op3);
1410 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1411 ArrayRef<SDValue> Ops);
1412 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1, EVT VT2);
1413 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1414 EVT VT2, ArrayRef<SDValue> Ops);
1415 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1416 EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
1417 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1418 EVT VT2, SDValue Op1, SDValue Op2);
1419 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, SDVTList VTs,
1420 ArrayRef<SDValue> Ops);
1421
1422 /// This *mutates* the specified node to have the specified
1423 /// return type, opcode, and operands.
1424 SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
1425 ArrayRef<SDValue> Ops);
1426
1427 /// Mutate the specified strict FP node to its non-strict equivalent,
1428 /// unlinking the node from its chain and dropping the metadata arguments.
1429 /// The node must be a strict FP node.
1430 SDNode *mutateStrictFPToFP(SDNode *Node);
1431
1432 /// These are used for target selectors to create a new node
1433 /// with specified return type(s), MachineInstr opcode, and operands.
1434 ///
1435 /// Note that getMachineNode returns the resultant node. If there is already
1436 /// a node of the specified opcode and operands, it returns that node instead
1437 /// of the current one.
1438 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT);
1439 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1440 SDValue Op1);
1441 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1442 SDValue Op1, SDValue Op2);
1443 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1444 SDValue Op1, SDValue Op2, SDValue Op3);
1445 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1446 ArrayRef<SDValue> Ops);
1447 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1448 EVT VT2, SDValue Op1, SDValue Op2);
1449 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1450 EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
1451 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1452 EVT VT2, ArrayRef<SDValue> Ops);
1453 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1454 EVT VT2, EVT VT3, SDValue Op1, SDValue Op2);
1455 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1456 EVT VT2, EVT VT3, SDValue Op1, SDValue Op2,
1457 SDValue Op3);
1458 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1459 EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
1460 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl,
1461 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops);
1462 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, SDVTList VTs,
1463 ArrayRef<SDValue> Ops);
1464
1465 /// A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
1466 SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
1467 SDValue Operand);
1468
1469 /// A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
1470 SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
1471 SDValue Operand, SDValue Subreg);
1472
1473 /// Get the specified node if it's already available, or else return NULL.
1474 SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
1475 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
1476 SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
1477 ArrayRef<SDValue> Ops);
1478
1479 /// Check if a node exists without modifying its flags.
1480 bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef<SDValue> Ops);
1481
1482 /// Creates a SDDbgValue node.
1483 SDDbgValue *getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N,
1484 unsigned R, bool IsIndirect, const DebugLoc &DL,
1485 unsigned O);
1486
1487 /// Creates a constant SDDbgValue node.
1488 SDDbgValue *getConstantDbgValue(DIVariable *Var, DIExpression *Expr,
1489 const Value *C, const DebugLoc &DL,
1490 unsigned O);
1491
1492 /// Creates a FrameIndex SDDbgValue node.
1493 SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
1494 unsigned FI, bool IsIndirect,
1495 const DebugLoc &DL, unsigned O);
1496
1497 /// Creates a FrameIndex SDDbgValue node.
1498 SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
1499 unsigned FI,
1500 ArrayRef<SDNode *> Dependencies,
1501 bool IsIndirect, const DebugLoc &DL,
1502 unsigned O);
1503
1504 /// Creates a VReg SDDbgValue node.
1505 SDDbgValue *getVRegDbgValue(DIVariable *Var, DIExpression *Expr,
1506 unsigned VReg, bool IsIndirect,
1507 const DebugLoc &DL, unsigned O);
1508
1509 /// Creates a SDDbgValue node from a list of locations.
1510 SDDbgValue *getDbgValueList(DIVariable *Var, DIExpression *Expr,
1511 ArrayRef<SDDbgOperand> Locs,
1512 ArrayRef<SDNode *> Dependencies, bool IsIndirect,
1513 const DebugLoc &DL, unsigned O, bool IsVariadic);
1514
1515 /// Creates a SDDbgLabel node.
1516 SDDbgLabel *getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O);
1517
1518 /// Transfer debug values from one node to another, while optionally
1519 /// generating fragment expressions for split-up values. If \p InvalidateDbg
1520 /// is set, debug values are invalidated after they are transferred.
1521 void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits = 0,
1522 unsigned SizeInBits = 0, bool InvalidateDbg = true);
1523
1524 /// Remove the specified node from the system. If any of its
1525 /// operands then becomes dead, remove them as well. Inform UpdateListener
1526 /// for each node deleted.
1527 void RemoveDeadNode(SDNode *N);
1528
1529 /// This method deletes the unreachable nodes in the
1530 /// given list, and any nodes that become unreachable as a result.
1531 void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);
1532
1533 /// Modify anything using 'From' to use 'To' instead.
1534 /// This can cause recursive merging of nodes in the DAG. Use the first
1535 /// version if 'From' is known to have a single result, use the second
1536 /// if you have two nodes with identical results (or if 'To' has a superset
1537 /// of the results of 'From'), use the third otherwise.
1538 ///
1539 /// These methods all take an optional UpdateListener, which (if not null) is
1540 /// informed about nodes that are deleted and modified due to recursive
1541 /// changes in the dag.
1542 ///
1543 /// These functions only replace all existing uses. It's possible that as
1544 /// these replacements are being performed, CSE may cause the From node
1545 /// to be given new uses. These new uses of From are left in place, and
1546 /// not automatically transferred to To.
1547 ///
1548 void ReplaceAllUsesWith(SDValue From, SDValue To);
1549 void ReplaceAllUsesWith(SDNode *From, SDNode *To);
1550 void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
1551
1552 /// Replace any uses of From with To, leaving
1553 /// uses of other values produced by From.getNode() alone.
1554 void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);
1555
1556 /// Like ReplaceAllUsesOfValueWith, but for multiple values at once.
1557 /// This correctly handles the case where
1558 /// there is an overlap between the From values and the To values.
1559 void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
1560 unsigned Num);
1561
1562 /// If an existing load has uses of its chain, create a token factor node with
1563 /// that chain and the new memory node's chain and update users of the old
1564 /// chain to the token factor. This ensures that the new memory node will have
1565 /// the same relative memory dependency position as the old load. Returns the
1566 /// new merged load chain.
1567 SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain);
1568
1569 /// If an existing load has uses of its chain, create a token factor node with
1570 /// that chain and the new memory node's chain and update users of the old
1571 /// chain to the token factor. This ensures that the new memory node will have
1572 /// the same relative memory dependency position as the old load. Returns the
1573 /// new merged load chain.
1574 SDValue makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, SDValue NewMemOp);
1575
1576 /// Topological-sort the AllNodes list and a
1577 /// assign a unique node id for each node in the DAG based on their
1578 /// topological order. Returns the number of nodes.
1579 unsigned AssignTopologicalOrder();
1580
1581 /// Move node N in the AllNodes list to be immediately
1582 /// before the given iterator Position. This may be used to update the
1583 /// topological ordering when the list of nodes is modified.
1584 void RepositionNode(allnodes_iterator Position, SDNode *N) {
1585 AllNodes.insert(Position, AllNodes.remove(N));
1586 }
1587
1588 /// Returns an APFloat semantics tag appropriate for the given type. If VT is
1589 /// a vector type, the element semantics are returned.
1590 static const fltSemantics &EVTToAPFloatSemantics(EVT VT) {
1591 switch (VT.getScalarType().getSimpleVT().SimpleTy) {
1592 default: llvm_unreachable("Unknown FP format")::llvm::llvm_unreachable_internal("Unknown FP format", "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1592)
;
1593 case MVT::f16: return APFloat::IEEEhalf();
1594 case MVT::bf16: return APFloat::BFloat();
1595 case MVT::f32: return APFloat::IEEEsingle();
1596 case MVT::f64: return APFloat::IEEEdouble();
1597 case MVT::f80: return APFloat::x87DoubleExtended();
1598 case MVT::f128: return APFloat::IEEEquad();
1599 case MVT::ppcf128: return APFloat::PPCDoubleDouble();
1600 }
1601 }
1602
1603 /// Add a dbg_value SDNode. If SD is non-null that means the
1604 /// value is produced by SD.
1605 void AddDbgValue(SDDbgValue *DB, bool isParameter);
1606
1607 /// Add a dbg_label SDNode.
1608 void AddDbgLabel(SDDbgLabel *DB);
1609
1610 /// Get the debug values which reference the given SDNode.
1611 ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) const {
1612 return DbgInfo->getSDDbgValues(SD);
1613 }
1614
1615public:
1616 /// Return true if there are any SDDbgValue nodes associated
1617 /// with this SelectionDAG.
1618 bool hasDebugValues() const { return !DbgInfo->empty(); }
1619
1620 SDDbgInfo::DbgIterator DbgBegin() const { return DbgInfo->DbgBegin(); }
1621 SDDbgInfo::DbgIterator DbgEnd() const { return DbgInfo->DbgEnd(); }
1622
1623 SDDbgInfo::DbgIterator ByvalParmDbgBegin() const {
1624 return DbgInfo->ByvalParmDbgBegin();
1625 }
1626 SDDbgInfo::DbgIterator ByvalParmDbgEnd() const {
1627 return DbgInfo->ByvalParmDbgEnd();
1628 }
1629
1630 SDDbgInfo::DbgLabelIterator DbgLabelBegin() const {
1631 return DbgInfo->DbgLabelBegin();
1632 }
1633 SDDbgInfo::DbgLabelIterator DbgLabelEnd() const {
1634 return DbgInfo->DbgLabelEnd();
1635 }
1636
1637 /// To be invoked on an SDNode that is slated to be erased. This
1638 /// function mirrors \c llvm::salvageDebugInfo.
1639 void salvageDebugInfo(SDNode &N);
1640
1641 void dump() const;
1642
1643 /// In most cases this function returns the ABI alignment for a given type,
1644 /// except for illegal vector types where the alignment exceeds that of the
1645 /// stack. In such cases we attempt to break the vector down to a legal type
1646 /// and return the ABI alignment for that instead.
1647 Align getReducedAlign(EVT VT, bool UseABI);
1648
1649 /// Create a stack temporary based on the size in bytes and the alignment
1650 SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment);
1651
1652 /// Create a stack temporary, suitable for holding the specified value type.
1653 /// If minAlign is specified, the slot size will have at least that alignment.
1654 SDValue CreateStackTemporary(EVT VT, unsigned minAlign = 1);
1655
1656 /// Create a stack temporary suitable for holding either of the specified
1657 /// value types.
1658 SDValue CreateStackTemporary(EVT VT1, EVT VT2);
1659
1660 SDValue FoldSymbolOffset(unsigned Opcode, EVT VT,
1661 const GlobalAddressSDNode *GA,
1662 const SDNode *N2);
1663
1664 SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
1665 ArrayRef<SDValue> Ops);
1666
1667 SDValue FoldConstantVectorArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
1668 ArrayRef<SDValue> Ops,
1669 const SDNodeFlags Flags = SDNodeFlags());
1670
1671 /// Fold floating-point operations with 2 operands when both operands are
1672 /// constants and/or undefined.
1673 SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT,
1674 SDValue N1, SDValue N2);
1675
1676 /// Constant fold a setcc to true or false.
1677 SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond,
1678 const SDLoc &dl);
1679
1680 /// See if the specified operand can be simplified with the knowledge that
1681 /// only the bits specified by DemandedBits are used. If so, return the
1682 /// simpler operand, otherwise return a null SDValue.
1683 ///
1684 /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
1685 /// simplify nodes with multiple uses more aggressively.)
1686 SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits);
1687
1688 /// See if the specified operand can be simplified with the knowledge that
1689 /// only the bits specified by DemandedBits are used in the elements specified
1690 /// by DemandedElts. If so, return the simpler operand, otherwise return a
1691 /// null SDValue.
1692 ///
1693 /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
1694 /// simplify nodes with multiple uses more aggressively.)
1695 SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits,
1696 const APInt &DemandedElts);
1697
1698 /// Return true if the sign bit of Op is known to be zero.
1699 /// We use this predicate to simplify operations downstream.
1700 bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;
1701
1702 /// Return true if 'Op & Mask' is known to be zero. We
1703 /// use this predicate to simplify operations downstream. Op and Mask are
1704 /// known to be the same type.
1705 bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
1706 unsigned Depth = 0) const;
1707
1708 /// Return true if 'Op & Mask' is known to be zero in DemandedElts. We
1709 /// use this predicate to simplify operations downstream. Op and Mask are
1710 /// known to be the same type.
1711 bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
1712 const APInt &DemandedElts, unsigned Depth = 0) const;
1713
1714 /// Return true if '(Op & Mask) == Mask'.
1715 /// Op and Mask are known to be the same type.
1716 bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask,
1717 unsigned Depth = 0) const;
1718
1719 /// Determine which bits of Op are known to be either zero or one and return
1720 /// them in Known. For vectors, the known bits are those that are shared by
1721 /// every vector element.
1722 /// Targets can implement the computeKnownBitsForTargetNode method in the
1723 /// TargetLowering class to allow target nodes to be understood.
1724 KnownBits computeKnownBits(SDValue Op, unsigned Depth = 0) const;
1725
1726 /// Determine which bits of Op are known to be either zero or one and return
1727 /// them in Known. The DemandedElts argument allows us to only collect the
1728 /// known bits that are shared by the requested vector elements.
1729 /// Targets can implement the computeKnownBitsForTargetNode method in the
1730 /// TargetLowering class to allow target nodes to be understood.
1731 KnownBits computeKnownBits(SDValue Op, const APInt &DemandedElts,
1732 unsigned Depth = 0) const;
1733
1734 /// Used to represent the possible overflow behavior of an operation.
1735 /// Never: the operation cannot overflow.
1736 /// Always: the operation will always overflow.
1737 /// Sometime: the operation may or may not overflow.
1738 enum OverflowKind {
1739 OFK_Never,
1740 OFK_Sometime,
1741 OFK_Always,
1742 };
1743
1744 /// Determine if the result of the addition of 2 node can overflow.
1745 OverflowKind computeOverflowKind(SDValue N0, SDValue N1) const;
1746
1747 /// Test if the given value is known to have exactly one bit set. This differs
1748 /// from computeKnownBits in that it doesn't necessarily determine which bit
1749 /// is set.
1750 bool isKnownToBeAPowerOfTwo(SDValue Val) const;
1751
1752 /// Return the number of times the sign bit of the register is replicated into
1753 /// the other bits. We know that at least 1 bit is always equal to the sign
1754 /// bit (itself), but other cases can give us information. For example,
1755 /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
1756 /// to each other, so we return 3. Targets can implement the
1757 /// ComputeNumSignBitsForTarget method in the TargetLowering class to allow
1758 /// target nodes to be understood.
1759 unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;
1760
1761 /// Return the number of times the sign bit of the register is replicated into
1762 /// the other bits. We know that at least 1 bit is always equal to the sign
1763 /// bit (itself), but other cases can give us information. For example,
1764 /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
1765 /// to each other, so we return 3. The DemandedElts argument allows
1766 /// us to only collect the minimum sign bits of the requested vector elements.
1767 /// Targets can implement the ComputeNumSignBitsForTarget method in the
1768 /// TargetLowering class to allow target nodes to be understood.
1769 unsigned ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
1770 unsigned Depth = 0) const;
1771
1772 /// Return true if the specified operand is an ISD::ADD with a ConstantSDNode
1773 /// on the right-hand side, or if it is an ISD::OR with a ConstantSDNode that
1774 /// is guaranteed to have the same semantics as an ADD. This handles the
1775 /// equivalence:
1776 /// X|Cst == X+Cst iff X&Cst = 0.
1777 bool isBaseWithConstantOffset(SDValue Op) const;
1778
1779 /// Test whether the given SDValue is known to never be NaN. If \p SNaN is
1780 /// true, returns if \p Op is known to never be a signaling NaN (it may still
1781 /// be a qNaN).
1782 bool isKnownNeverNaN(SDValue Op, bool SNaN = false, unsigned Depth = 0) const;
1783
1784 /// \returns true if \p Op is known to never be a signaling NaN.
1785 bool isKnownNeverSNaN(SDValue Op, unsigned Depth = 0) const {
1786 return isKnownNeverNaN(Op, true, Depth);
1787 }
1788
1789 /// Test whether the given floating point SDValue is known to never be
1790 /// positive or negative zero.
1791 bool isKnownNeverZeroFloat(SDValue Op) const;
1792
1793 /// Test whether the given SDValue is known to contain non-zero value(s).
1794 bool isKnownNeverZero(SDValue Op) const;
1795
1796 /// Test whether two SDValues are known to compare equal. This
1797 /// is true if they are the same value, or if one is negative zero and the
1798 /// other positive zero.
1799 bool isEqualTo(SDValue A, SDValue B) const;
1800
1801 /// Return true if A and B have no common bits set. As an example, this can
1802 /// allow an 'add' to be transformed into an 'or'.
1803 bool haveNoCommonBitsSet(SDValue A, SDValue B) const;
1804
1805 /// Test whether \p V has a splatted value for all the demanded elements.
1806 ///
1807 /// On success \p UndefElts will indicate the elements that have UNDEF
1808 /// values instead of the splat value, this is only guaranteed to be correct
1809 /// for \p DemandedElts.
1810 ///
1811 /// NOTE: The function will return true for a demanded splat of UNDEF values.
1812 bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts,
1813 unsigned Depth = 0);
1814
1815 /// Test whether \p V has a splatted value.
1816 bool isSplatValue(SDValue V, bool AllowUndefs = false);
1817
1818 /// If V is a splatted value, return the source vector and its splat index.
1819 SDValue getSplatSourceVector(SDValue V, int &SplatIndex);
1820
1821 /// If V is a splat vector, return its scalar source operand by extracting
1822 /// that element from the source vector. If LegalTypes is true, this method
1823 /// may only return a legally-typed splat value. If it cannot legalize the
1824 /// splatted value it will return SDValue().
1825 SDValue getSplatValue(SDValue V, bool LegalTypes = false);
1826
1827 /// If a SHL/SRA/SRL node \p V has a constant or splat constant shift amount
1828 /// that is less than the element bit-width of the shift node, return it.
1829 const APInt *getValidShiftAmountConstant(SDValue V,
1830 const APInt &DemandedElts) const;
1831
1832 /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
1833 /// than the element bit-width of the shift node, return the minimum value.
1834 const APInt *
1835 getValidMinimumShiftAmountConstant(SDValue V,
1836 const APInt &DemandedElts) const;
1837
1838 /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
1839 /// than the element bit-width of the shift node, return the maximum value.
1840 const APInt *
1841 getValidMaximumShiftAmountConstant(SDValue V,
1842 const APInt &DemandedElts) const;
1843
1844 /// Match a binop + shuffle pyramid that represents a horizontal reduction
1845 /// over the elements of a vector starting from the EXTRACT_VECTOR_ELT node /p
1846 /// Extract. The reduction must use one of the opcodes listed in /p
1847 /// CandidateBinOps and on success /p BinOp will contain the matching opcode.
1848 /// Returns the vector that is being reduced on, or SDValue() if a reduction
1849 /// was not matched. If \p AllowPartials is set then in the case of a
1850 /// reduction pattern that only matches the first few stages, the extracted
1851 /// subvector of the start of the reduction is returned.
1852 SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
1853 ArrayRef<ISD::NodeType> CandidateBinOps,
1854 bool AllowPartials = false);
1855
1856 /// Utility function used by legalize and lowering to
1857 /// "unroll" a vector operation by splitting out the scalars and operating
1858 /// on each element individually. If the ResNE is 0, fully unroll the vector
1859 /// op. If ResNE is less than the width of the vector op, unroll up to ResNE.
1860 /// If the ResNE is greater than the width of the vector op, unroll the
1861 /// vector op and fill the end of the resulting vector with UNDEFS.
1862 SDValue UnrollVectorOp(SDNode *N, unsigned ResNE = 0);
1863
1864 /// Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
1865 /// This is a separate function because those opcodes have two results.
1866 std::pair<SDValue, SDValue> UnrollVectorOverflowOp(SDNode *N,
1867 unsigned ResNE = 0);
1868
1869 /// Return true if loads are next to each other and can be
1870 /// merged. Check that both are nonvolatile and if LD is loading
1871 /// 'Bytes' bytes from a location that is 'Dist' units away from the
1872 /// location that the 'Base' load is loading from.
1873 bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base,
1874 unsigned Bytes, int Dist) const;
1875
1876 /// Infer alignment of a load / store address. Return None if it cannot be
1877 /// inferred.
1878 MaybeAlign InferPtrAlign(SDValue Ptr) const;
1879
1880 /// Compute the VTs needed for the low/hi parts of a type
1881 /// which is split (or expanded) into two not necessarily identical pieces.
1882 std::pair<EVT, EVT> GetSplitDestVTs(const EVT &VT) const;
1883
1884 /// Compute the VTs needed for the low/hi parts of a type, dependent on an
1885 /// enveloping VT that has been split into two identical pieces. Sets the
1886 /// HisIsEmpty flag when hi type has zero storage size.
1887 std::pair<EVT, EVT> GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
1888 bool *HiIsEmpty) const;
1889
1890 /// Split the vector with EXTRACT_SUBVECTOR using the provides
1891 /// VTs and return the low/high part.
1892 std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL,
1893 const EVT &LoVT, const EVT &HiVT);
1894
1895 /// Split the vector with EXTRACT_SUBVECTOR and return the low/high part.
1896 std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL) {
1897 EVT LoVT, HiVT;
1898 std::tie(LoVT, HiVT) = GetSplitDestVTs(N.getValueType());
1899 return SplitVector(N, DL, LoVT, HiVT);
1900 }
1901
1902 /// Split the node's operand with EXTRACT_SUBVECTOR and
1903 /// return the low/high part.
1904 std::pair<SDValue, SDValue> SplitVectorOperand(const SDNode *N, unsigned OpNo)
1905 {
1906 return SplitVector(N->getOperand(OpNo), SDLoc(N));
1907 }
1908
1909 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
1910 SDValue WidenVector(const SDValue &N, const SDLoc &DL);
1911
1912 /// Append the extracted elements from Start to Count out of the vector Op in
1913 /// Args. If Count is 0, all of the elements will be extracted. The extracted
1914 /// elements will have type EVT if it is provided, and otherwise their type
1915 /// will be Op's element type.
1916 void ExtractVectorElements(SDValue Op, SmallVectorImpl<SDValue> &Args,
1917 unsigned Start = 0, unsigned Count = 0,
1918 EVT EltVT = EVT());
1919
1920 /// Compute the default alignment value for the given type.
1921 Align getEVTAlign(EVT MemoryVT) const;
1922 /// Compute the default alignment value for the given type.
1923 /// FIXME: Remove once transition to Align is over.
1924 inline unsigned getEVTAlignment(EVT MemoryVT) const {
1925 return getEVTAlign(MemoryVT).value();
1926 }
1927
1928 /// Test whether the given value is a constant int or similar node.
1929 SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N) const;
1930
1931 /// Test whether the given value is a constant FP or similar node.
1932 SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) const ;
1933
1934 /// \returns true if \p N is any kind of constant or build_vector of
1935 /// constants, int or float. If a vector, it may not necessarily be a splat.
1936 inline bool isConstantValueOfAnyType(SDValue N) const {
1937 return isConstantIntBuildVectorOrConstantInt(N) ||
1938 isConstantFPBuildVectorOrConstantFP(N);
1939 }
1940
1941 void addCallSiteInfo(const SDNode *CallNode, CallSiteInfoImpl &&CallInfo) {
1942 SDCallSiteDbgInfo[CallNode].CSInfo = std::move(CallInfo);
1943 }
1944
1945 CallSiteInfo getSDCallSiteInfo(const SDNode *CallNode) {
1946 auto I = SDCallSiteDbgInfo.find(CallNode);
1947 if (I != SDCallSiteDbgInfo.end())
1948 return std::move(I->second).CSInfo;
1949 return CallSiteInfo();
1950 }
1951
1952 void addHeapAllocSite(const SDNode *Node, MDNode *MD) {
1953 SDCallSiteDbgInfo[Node].HeapAllocSite = MD;
1954 }
1955
1956 /// Return the HeapAllocSite type associated with the SDNode, if it exists.
1957 MDNode *getHeapAllocSite(const SDNode *Node) {
1958 auto It = SDCallSiteDbgInfo.find(Node);
1959 if (It == SDCallSiteDbgInfo.end())
1960 return nullptr;
1961 return It->second.HeapAllocSite;
1962 }
1963
1964 void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge) {
1965 if (NoMerge)
1966 SDCallSiteDbgInfo[Node].NoMerge = NoMerge;
1967 }
1968
1969 bool getNoMergeSiteInfo(const SDNode *Node) {
1970 auto I = SDCallSiteDbgInfo.find(Node);
1971 if (I == SDCallSiteDbgInfo.end())
1972 return false;
1973 return I->second.NoMerge;
1974 }
1975
1976 /// Return the current function's default denormal handling kind for the given
1977 /// floating point type.
1978 DenormalMode getDenormalMode(EVT VT) const {
1979 return MF->getDenormalMode(EVTToAPFloatSemantics(VT));
1980 }
1981
1982 bool shouldOptForSize() const;
1983
1984 /// Get the (commutative) neutral element for the given opcode, if it exists.
1985 SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT,
1986 SDNodeFlags Flags);
1987
1988private:
1989 void InsertNode(SDNode *N);
1990 bool RemoveNodeFromCSEMaps(SDNode *N);
1991 void AddModifiedNodeToCSEMaps(SDNode *N);
1992 SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
1993 SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
1994 void *&InsertPos);
1995 SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
1996 void *&InsertPos);
1997 SDNode *UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &loc);
1998
1999 void DeleteNodeNotInCSEMaps(SDNode *N);
2000 void DeallocateNode(SDNode *N);
2001
2002 void allnodes_clear();
2003
2004 /// Look up the node specified by ID in CSEMap. If it exists, return it. If
2005 /// not, return the insertion token that will make insertion faster. This
2006 /// overload is for nodes other than Constant or ConstantFP, use the other one
2007 /// for those.
2008 SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
2009
2010 /// Look up the node specified by ID in CSEMap. If it exists, return it. If
2011 /// not, return the insertion token that will make insertion faster. Performs
2012 /// additional processing for constant nodes.
2013 SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, const SDLoc &DL,
2014 void *&InsertPos);
2015
2016 /// List of non-single value types.
2017 FoldingSet<SDVTListNode> VTListMap;
2018
2019 /// Maps to auto-CSE operations.
2020 std::vector<CondCodeSDNode*> CondCodeNodes;
2021
2022 std::vector<SDNode*> ValueTypeNodes;
2023 std::map<EVT, SDNode*, EVT::compareRawBits> ExtendedValueTypeNodes;
2024 StringMap<SDNode*> ExternalSymbols;
2025
2026 std::map<std::pair<std::string, unsigned>, SDNode *> TargetExternalSymbols;
2027 DenseMap<MCSymbol *, SDNode *> MCSymbols;
2028
2029 FlagInserter *Inserter = nullptr;
2030};
2031
2032template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
2033 using nodes_iterator = pointer_iterator<SelectionDAG::allnodes_iterator>;
2034
2035 static nodes_iterator nodes_begin(SelectionDAG *G) {
2036 return nodes_iterator(G->allnodes_begin());
2037 }
2038
2039 static nodes_iterator nodes_end(SelectionDAG *G) {
2040 return nodes_iterator(G->allnodes_end());
2041 }
2042};
2043
2044} // end namespace llvm
2045
2046#endif // LLVM_CODEGEN_SELECTIONDAG_H

/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/Register.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Metadata.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/Support/AlignOf.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/TypeSize.h"
47#include <algorithm>
48#include <cassert>
49#include <climits>
50#include <cstddef>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <string>
55#include <tuple>
56
57namespace llvm {
58
59class APInt;
60class Constant;
61template <typename T> struct DenseMapInfo;
62class GlobalValue;
63class MachineBasicBlock;
64class MachineConstantPoolValue;
65class MCSymbol;
66class raw_ostream;
67class SDNode;
68class SelectionDAG;
69class Type;
70class Value;
71
72void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
73 bool force = false);
74
75/// This represents a list of ValueType's that has been intern'd by
76/// a SelectionDAG. Instances of this simple value class are returned by
77/// SelectionDAG::getVTList(...).
78///
79struct SDVTList {
80 const EVT *VTs;
81 unsigned int NumVTs;
82};
83
84namespace ISD {
85
86 /// Node predicates
87
88/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
89/// same constant or undefined, return true and return the constant value in
90/// \p SplatValue.
91bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
92
93/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
94/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
95/// true, it only checks BUILD_VECTOR.
96bool isConstantSplatVectorAllOnes(const SDNode *N,
97 bool BuildVectorOnly = false);
98
99/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
100/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
101/// only checks BUILD_VECTOR.
102bool isConstantSplatVectorAllZeros(const SDNode *N,
103 bool BuildVectorOnly = false);
104
105/// Return true if the specified node is a BUILD_VECTOR where all of the
106/// elements are ~0 or undef.
107bool isBuildVectorAllOnes(const SDNode *N);
108
109/// Return true if the specified node is a BUILD_VECTOR where all of the
110/// elements are 0 or undef.
111bool isBuildVectorAllZeros(const SDNode *N);
112
113/// Return true if the specified node is a BUILD_VECTOR node of all
114/// ConstantSDNode or undef.
115bool isBuildVectorOfConstantSDNodes(const SDNode *N);
116
117/// Return true if the specified node is a BUILD_VECTOR node of all
118/// ConstantFPSDNode or undef.
119bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
120
121/// Return true if the node has at least one operand and all operands of the
122/// specified node are ISD::UNDEF.
123bool allOperandsUndef(const SDNode *N);
124
125} // end namespace ISD
126
127//===----------------------------------------------------------------------===//
128/// Unlike LLVM values, Selection DAG nodes may return multiple
129/// values as the result of a computation. Many nodes return multiple values,
130/// from loads (which define a token and a return value) to ADDC (which returns
131/// a result and a carry value), to calls (which may return an arbitrary number
132/// of values).
133///
134/// As such, each use of a SelectionDAG computation must indicate the node that
135/// computes it as well as which return value to use from that node. This pair
136/// of information is represented with the SDValue value type.
137///
138class SDValue {
139 friend struct DenseMapInfo<SDValue>;
140
141 SDNode *Node = nullptr; // The node defining the value we are using.
142 unsigned ResNo = 0; // Which return value of the node we are using.
143
144public:
145 SDValue() = default;
146 SDValue(SDNode *node, unsigned resno);
147
148 /// get the index which selects a specific result in the SDNode
149 unsigned getResNo() const { return ResNo; }
150
151 /// get the SDNode which holds the desired result
152 SDNode *getNode() const { return Node; }
153
154 /// set the SDNode
155 void setNode(SDNode *N) { Node = N; }
156
157 inline SDNode *operator->() const { return Node; }
158
159 bool operator==(const SDValue &O) const {
160 return Node == O.Node && ResNo == O.ResNo;
161 }
162 bool operator!=(const SDValue &O) const {
163 return !operator==(O);
164 }
165 bool operator<(const SDValue &O) const {
166 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
167 }
168 explicit operator bool() const {
169 return Node != nullptr;
170 }
171
172 SDValue getValue(unsigned R) const {
173 return SDValue(Node, R);
174 }
175
176 /// Return true if this node is an operand of N.
177 bool isOperandOf(const SDNode *N) const;
178
179 /// Return the ValueType of the referenced return value.
180 inline EVT getValueType() const;
181
182 /// Return the simple ValueType of the referenced return value.
183 MVT getSimpleValueType() const {
184 return getValueType().getSimpleVT();
185 }
186
187 /// Returns the size of the value in bits.
188 ///
189 /// If the value type is a scalable vector type, the scalable property will
190 /// be set and the runtime size will be a positive integer multiple of the
191 /// base size.
192 TypeSize getValueSizeInBits() const {
193 return getValueType().getSizeInBits();
194 }
195
196 uint64_t getScalarValueSizeInBits() const {
197 return getValueType().getScalarType().getFixedSizeInBits();
198 }
199
200 // Forwarding methods - These forward to the corresponding methods in SDNode.
201 inline unsigned getOpcode() const;
202 inline unsigned getNumOperands() const;
203 inline const SDValue &getOperand(unsigned i) const;
204 inline uint64_t getConstantOperandVal(unsigned i) const;
205 inline const APInt &getConstantOperandAPInt(unsigned i) const;
206 inline bool isTargetMemoryOpcode() const;
207 inline bool isTargetOpcode() const;
208 inline bool isMachineOpcode() const;
209 inline bool isUndef() const;
210 inline unsigned getMachineOpcode() const;
211 inline const DebugLoc &getDebugLoc() const;
212 inline void dump() const;
213 inline void dump(const SelectionDAG *G) const;
214 inline void dumpr() const;
215 inline void dumpr(const SelectionDAG *G) const;
216
217 /// Return true if this operand (which must be a chain) reaches the
218 /// specified operand without crossing any side-effecting instructions.
219 /// In practice, this looks through token factors and non-volatile loads.
220 /// In order to remain efficient, this only
221 /// looks a couple of nodes in, it does not do an exhaustive search.
222 bool reachesChainWithoutSideEffects(SDValue Dest,
223 unsigned Depth = 2) const;
224
225 /// Return true if there are no nodes using value ResNo of Node.
226 inline bool use_empty() const;
227
228 /// Return true if there is exactly one node using value ResNo of Node.
229 inline bool hasOneUse() const;
230};
231
232template<> struct DenseMapInfo<SDValue> {
233 static inline SDValue getEmptyKey() {
234 SDValue V;
235 V.ResNo = -1U;
236 return V;
237 }
238
239 static inline SDValue getTombstoneKey() {
240 SDValue V;
241 V.ResNo = -2U;
242 return V;
243 }
244
245 static unsigned getHashValue(const SDValue &Val) {
246 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
247 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
248 }
249
250 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
251 return LHS == RHS;
252 }
253};
254
255/// Allow casting operators to work directly on
256/// SDValues as if they were SDNode*'s.
257template<> struct simplify_type<SDValue> {
258 using SimpleType = SDNode *;
259
260 static SimpleType getSimplifiedValue(SDValue &Val) {
261 return Val.getNode();
262 }
263};
264template<> struct simplify_type<const SDValue> {
265 using SimpleType = /*const*/ SDNode *;
266
267 static SimpleType getSimplifiedValue(const SDValue &Val) {
268 return Val.getNode();
269 }
270};
271
272/// Represents a use of a SDNode. This class holds an SDValue,
273/// which records the SDNode being used and the result number, a
274/// pointer to the SDNode using the value, and Next and Prev pointers,
275/// which link together all the uses of an SDNode.
276///
277class SDUse {
278 /// Val - The value being used.
279 SDValue Val;
280 /// User - The user of this value.
281 SDNode *User = nullptr;
282 /// Prev, Next - Pointers to the uses list of the SDNode referred by
283 /// this operand.
284 SDUse **Prev = nullptr;
285 SDUse *Next = nullptr;
286
287public:
288 SDUse() = default;
289 SDUse(const SDUse &U) = delete;
290 SDUse &operator=(const SDUse &) = delete;
291
292 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
293 operator const SDValue&() const { return Val; }
294
295 /// If implicit conversion to SDValue doesn't work, the get() method returns
296 /// the SDValue.
297 const SDValue &get() const { return Val; }
298
299 /// This returns the SDNode that contains this Use.
300 SDNode *getUser() { return User; }
301
302 /// Get the next SDUse in the use list.
303 SDUse *getNext() const { return Next; }
304
305 /// Convenience function for get().getNode().
306 SDNode *getNode() const { return Val.getNode(); }
307 /// Convenience function for get().getResNo().
308 unsigned getResNo() const { return Val.getResNo(); }
309 /// Convenience function for get().getValueType().
310 EVT getValueType() const { return Val.getValueType(); }
311
312 /// Convenience function for get().operator==
313 bool operator==(const SDValue &V) const {
314 return Val == V;
315 }
316
317 /// Convenience function for get().operator!=
318 bool operator!=(const SDValue &V) const {
319 return Val != V;
320 }
321
322 /// Convenience function for get().operator<
323 bool operator<(const SDValue &V) const {
324 return Val < V;
325 }
326
327private:
328 friend class SelectionDAG;
329 friend class SDNode;
330 // TODO: unfriend HandleSDNode once we fix its operand handling.
331 friend class HandleSDNode;
332
333 void setUser(SDNode *p) { User = p; }
334
335 /// Remove this use from its existing use list, assign it the
336 /// given value, and add it to the new value's node's use list.
337 inline void set(const SDValue &V);
338 /// Like set, but only supports initializing a newly-allocated
339 /// SDUse with a non-null value.
340 inline void setInitial(const SDValue &V);
341 /// Like set, but only sets the Node portion of the value,
342 /// leaving the ResNo portion unmodified.
343 inline void setNode(SDNode *N);
344
345 void addToList(SDUse **List) {
346 Next = *List;
347 if (Next) Next->Prev = &Next;
348 Prev = List;
349 *List = this;
350 }
351
352 void removeFromList() {
353 *Prev = Next;
354 if (Next) Next->Prev = Prev;
355 }
356};
357
358/// simplify_type specializations - Allow casting operators to work directly on
359/// SDValues as if they were SDNode*'s.
360template<> struct simplify_type<SDUse> {
361 using SimpleType = SDNode *;
362
363 static SimpleType getSimplifiedValue(SDUse &Val) {
364 return Val.getNode();
365 }
366};
367
368/// These are IR-level optimization flags that may be propagated to SDNodes.
369/// TODO: This data structure should be shared by the IR optimizer and the
370/// the backend.
371struct SDNodeFlags {
372private:
373 bool NoUnsignedWrap : 1;
374 bool NoSignedWrap : 1;
375 bool Exact : 1;
376 bool NoNaNs : 1;
377 bool NoInfs : 1;
378 bool NoSignedZeros : 1;
379 bool AllowReciprocal : 1;
380 bool AllowContract : 1;
381 bool ApproximateFuncs : 1;
382 bool AllowReassociation : 1;
383
384 // We assume instructions do not raise floating-point exceptions by default,
385 // and only those marked explicitly may do so. We could choose to represent
386 // this via a positive "FPExcept" flags like on the MI level, but having a
387 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
388 // intersection logic more straightforward.
389 bool NoFPExcept : 1;
390
391public:
392 /// Default constructor turns off all optimization flags.
393 SDNodeFlags()
394 : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
395 NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
396 AllowContract(false), ApproximateFuncs(false),
397 AllowReassociation(false), NoFPExcept(false) {}
398
399 /// Propagate the fast-math-flags from an IR FPMathOperator.
400 void copyFMF(const FPMathOperator &FPMO) {
401 setNoNaNs(FPMO.hasNoNaNs());
402 setNoInfs(FPMO.hasNoInfs());
403 setNoSignedZeros(FPMO.hasNoSignedZeros());
404 setAllowReciprocal(FPMO.hasAllowReciprocal());
405 setAllowContract(FPMO.hasAllowContract());
406 setApproximateFuncs(FPMO.hasApproxFunc());
407 setAllowReassociation(FPMO.hasAllowReassoc());
408 }
409
410 // These are mutators for each flag.
411 void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
412 void setNoSignedWrap(bool b) { NoSignedWrap = b; }
413 void setExact(bool b) { Exact = b; }
414 void setNoNaNs(bool b) { NoNaNs = b; }
415 void setNoInfs(bool b) { NoInfs = b; }
416 void setNoSignedZeros(bool b) { NoSignedZeros = b; }
417 void setAllowReciprocal(bool b) { AllowReciprocal = b; }
418 void setAllowContract(bool b) { AllowContract = b; }
419 void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
420 void setAllowReassociation(bool b) { AllowReassociation = b; }
421 void setNoFPExcept(bool b) { NoFPExcept = b; }
422
423 // These are accessors for each flag.
424 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
425 bool hasNoSignedWrap() const { return NoSignedWrap; }
426 bool hasExact() const { return Exact; }
427 bool hasNoNaNs() const { return NoNaNs; }
428 bool hasNoInfs() const { return NoInfs; }
429 bool hasNoSignedZeros() const { return NoSignedZeros; }
430 bool hasAllowReciprocal() const { return AllowReciprocal; }
431 bool hasAllowContract() const { return AllowContract; }
432 bool hasApproximateFuncs() const { return ApproximateFuncs; }
433 bool hasAllowReassociation() const { return AllowReassociation; }
434 bool hasNoFPExcept() const { return NoFPExcept; }
435
436 /// Clear any flags in this flag set that aren't also set in Flags. All
437 /// flags will be cleared if Flags are undefined.
438 void intersectWith(const SDNodeFlags Flags) {
439 NoUnsignedWrap &= Flags.NoUnsignedWrap;
440 NoSignedWrap &= Flags.NoSignedWrap;
441 Exact &= Flags.Exact;
442 NoNaNs &= Flags.NoNaNs;
443 NoInfs &= Flags.NoInfs;
444 NoSignedZeros &= Flags.NoSignedZeros;
445 AllowReciprocal &= Flags.AllowReciprocal;
446 AllowContract &= Flags.AllowContract;
447 ApproximateFuncs &= Flags.ApproximateFuncs;
448 AllowReassociation &= Flags.AllowReassociation;
449 NoFPExcept &= Flags.NoFPExcept;
450 }
451};
452
453/// Represents one node in the SelectionDAG.
454///
455class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
456private:
457 /// The operation that this node performs.
458 int16_t NodeType;
459
460protected:
461 // We define a set of mini-helper classes to help us interpret the bits in our
462 // SubclassData. These are designed to fit within a uint16_t so they pack
463 // with NodeType.
464
465#if defined(_AIX) && (!defined(__GNUC__4) || defined(__clang__1))
466// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
467// and give the `pack` pragma push semantics.
468#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
469#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
470#else
471#define BEGIN_TWO_BYTE_PACK()
472#define END_TWO_BYTE_PACK()
473#endif
474
475BEGIN_TWO_BYTE_PACK()
476 class SDNodeBitfields {
477 friend class SDNode;
478 friend class MemIntrinsicSDNode;
479 friend class MemSDNode;
480 friend class SelectionDAG;
481
482 uint16_t HasDebugValue : 1;
483 uint16_t IsMemIntrinsic : 1;
484 uint16_t IsDivergent : 1;
485 };
486 enum { NumSDNodeBits = 3 };
487
488 class ConstantSDNodeBitfields {
489 friend class ConstantSDNode;
490
491 uint16_t : NumSDNodeBits;
492
493 uint16_t IsOpaque : 1;
494 };
495
496 class MemSDNodeBitfields {
497 friend class MemSDNode;
498 friend class MemIntrinsicSDNode;
499 friend class AtomicSDNode;
500
501 uint16_t : NumSDNodeBits;
502
503 uint16_t IsVolatile : 1;
504 uint16_t IsNonTemporal : 1;
505 uint16_t IsDereferenceable : 1;
506 uint16_t IsInvariant : 1;
507 };
508 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
509
510 class LSBaseSDNodeBitfields {
511 friend class LSBaseSDNode;
512 friend class MaskedLoadStoreSDNode;
513 friend class MaskedGatherScatterSDNode;
514
515 uint16_t : NumMemSDNodeBits;
516
517 // This storage is shared between disparate class hierarchies to hold an
518 // enumeration specific to the class hierarchy in use.
519 // LSBaseSDNode => enum ISD::MemIndexedMode
520 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
521 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
522 uint16_t AddressingMode : 3;
523 };
524 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
525
526 class LoadSDNodeBitfields {
527 friend class LoadSDNode;
528 friend class MaskedLoadSDNode;
529 friend class MaskedGatherSDNode;
530
531 uint16_t : NumLSBaseSDNodeBits;
532
533 uint16_t ExtTy : 2; // enum ISD::LoadExtType
534 uint16_t IsExpanding : 1;
535 };
536
537 class StoreSDNodeBitfields {
538 friend class StoreSDNode;
539 friend class MaskedStoreSDNode;
540 friend class MaskedScatterSDNode;
541
542 uint16_t : NumLSBaseSDNodeBits;
543
544 uint16_t IsTruncating : 1;
545 uint16_t IsCompressing : 1;
546 };
547
548 union {
549 char RawSDNodeBits[sizeof(uint16_t)];
550 SDNodeBitfields SDNodeBits;
551 ConstantSDNodeBitfields ConstantSDNodeBits;
552 MemSDNodeBitfields MemSDNodeBits;
553 LSBaseSDNodeBitfields LSBaseSDNodeBits;
554 LoadSDNodeBitfields LoadSDNodeBits;
555 StoreSDNodeBitfields StoreSDNodeBits;
556 };
557END_TWO_BYTE_PACK()
558#undef BEGIN_TWO_BYTE_PACK
559#undef END_TWO_BYTE_PACK
560
561 // RawSDNodeBits must cover the entirety of the union. This means that all of
562 // the union's members must have size <= RawSDNodeBits. We write the RHS as
563 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
564 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
565 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
566 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
567 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
568 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
569 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
570
571private:
572 friend class SelectionDAG;
573 // TODO: unfriend HandleSDNode once we fix its operand handling.
574 friend class HandleSDNode;
575
576 /// Unique id per SDNode in the DAG.
577 int NodeId = -1;
578
579 /// The values that are used by this operation.
580 SDUse *OperandList = nullptr;
581
582 /// The types of the values this node defines. SDNode's may
583 /// define multiple values simultaneously.
584 const EVT *ValueList;
585
586 /// List of uses for this SDNode.
587 SDUse *UseList = nullptr;
588
589 /// The number of entries in the Operand/Value list.
590 unsigned short NumOperands = 0;
591 unsigned short NumValues;
592
593 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
594 // original LLVM instructions.
595 // This is used for turning off scheduling, because we'll forgo
596 // the normal scheduling algorithms and output the instructions according to
597 // this ordering.
598 unsigned IROrder;
599
600 /// Source line information.
601 DebugLoc debugLoc;
602
603 /// Return a pointer to the specified value type.
604 static const EVT *getValueTypeList(EVT VT);
605
606 SDNodeFlags Flags;
607
608public:
609 /// Unique and persistent id per SDNode in the DAG.
610 /// Used for debug printing.
611 uint16_t PersistentId;
612
613 //===--------------------------------------------------------------------===//
614 // Accessors
615 //
616
617 /// Return the SelectionDAG opcode value for this node. For
618 /// pre-isel nodes (those for which isMachineOpcode returns false), these
619 /// are the opcode values in the ISD and <target>ISD namespaces. For
620 /// post-isel opcodes, see getMachineOpcode.
621 unsigned getOpcode() const { return (unsigned short)NodeType; }
622
623 /// Test if this node has a target-specific opcode (in the
624 /// \<target\>ISD namespace).
625 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
626
627 /// Test if this node has a target-specific opcode that may raise
628 /// FP exceptions (in the \<target\>ISD namespace and greater than
629 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
630 /// opcode are currently automatically considered to possibly raise
631 /// FP exceptions as well.
632 bool isTargetStrictFPOpcode() const {
633 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
634 }
635
636 /// Test if this node has a target-specific
637 /// memory-referencing opcode (in the \<target\>ISD namespace and
638 /// greater than FIRST_TARGET_MEMORY_OPCODE).
639 bool isTargetMemoryOpcode() const {
640 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
641 }
642
643 /// Return true if the type of the node type undefined.
644 bool isUndef() const { return NodeType == ISD::UNDEF; }
645
646 /// Test if this node is a memory intrinsic (with valid pointer information).
647 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
648 /// non-memory intrinsics (with chains) that are not really instances of
649 /// MemSDNode. For such nodes, we need some extra state to determine the
650 /// proper classof relationship.
651 bool isMemIntrinsic() const {
652 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
653 NodeType == ISD::INTRINSIC_VOID) &&
654 SDNodeBits.IsMemIntrinsic;
655 }
656
657 /// Test if this node is a strict floating point pseudo-op.
658 bool isStrictFPOpcode() {
659 switch (NodeType) {
660 default:
661 return false;
662 case ISD::STRICT_FP16_TO_FP:
663 case ISD::STRICT_FP_TO_FP16:
664#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
665 case ISD::STRICT_##DAGN:
666#include "llvm/IR/ConstrainedOps.def"
667 return true;
668 }
669 }
670
671 /// Test if this node has a post-isel opcode, directly
672 /// corresponding to a MachineInstr opcode.
673 bool isMachineOpcode() const { return NodeType < 0; }
674
675 /// This may only be called if isMachineOpcode returns
676 /// true. It returns the MachineInstr opcode value that the node's opcode
677 /// corresponds to.
678 unsigned getMachineOpcode() const {
679 assert(isMachineOpcode() && "Not a MachineInstr opcode!")(static_cast <bool> (isMachineOpcode() && "Not a MachineInstr opcode!"
) ? void (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 679, __extension__ __PRETTY_FUNCTION__))
;
680 return ~NodeType;
681 }
682
683 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
684 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
685
686 bool isDivergent() const { return SDNodeBits.IsDivergent; }
687
688 /// Return true if there are no uses of this node.
689 bool use_empty() const { return UseList == nullptr; }
690
691 /// Return true if there is exactly one use of this node.
692 bool hasOneUse() const { return hasSingleElement(uses()); }
693
694 /// Return the number of uses of this node. This method takes
695 /// time proportional to the number of uses.
696 size_t use_size() const { return std::distance(use_begin(), use_end()); }
697
698 /// Return the unique node id.
699 int getNodeId() const { return NodeId; }
700
701 /// Set unique node id.
702 void setNodeId(int Id) { NodeId = Id; }
703
704 /// Return the node ordering.
705 unsigned getIROrder() const { return IROrder; }
706
707 /// Set the node ordering.
708 void setIROrder(unsigned Order) { IROrder = Order; }
709
710 /// Return the source location info.
711 const DebugLoc &getDebugLoc() const { return debugLoc; }
712
713 /// Set source location info. Try to avoid this, putting
714 /// it in the constructor is preferable.
715 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
716
717 /// This class provides iterator support for SDUse
718 /// operands that use a specific SDNode.
719 class use_iterator {
720 friend class SDNode;
721
722 SDUse *Op = nullptr;
723
724 explicit use_iterator(SDUse *op) : Op(op) {}
725
726 public:
727 using iterator_category = std::forward_iterator_tag;
728 using value_type = SDUse;
729 using difference_type = std::ptrdiff_t;
730 using pointer = value_type *;
731 using reference = value_type &;
732
733 use_iterator() = default;
734 use_iterator(const use_iterator &I) : Op(I.Op) {}
735
736 bool operator==(const use_iterator &x) const {
737 return Op == x.Op;
738 }
739 bool operator!=(const use_iterator &x) const {
740 return !operator==(x);
741 }
742
743 /// Return true if this iterator is at the end of uses list.
744 bool atEnd() const { return Op == nullptr; }
745
746 // Iterator traversal: forward iteration only.
747 use_iterator &operator++() { // Preincrement
748 assert(Op && "Cannot increment end iterator!")(static_cast <bool> (Op && "Cannot increment end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 748, __extension__ __PRETTY_FUNCTION__))
;
749 Op = Op->getNext();
750 return *this;
751 }
752
753 use_iterator operator++(int) { // Postincrement
754 use_iterator tmp = *this; ++*this; return tmp;
755 }
756
757 /// Retrieve a pointer to the current user node.
758 SDNode *operator*() const {
759 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 759, __extension__ __PRETTY_FUNCTION__))
;
760 return Op->getUser();
761 }
762
763 SDNode *operator->() const { return operator*(); }
764
765 SDUse &getUse() const { return *Op; }
766
767 /// Retrieve the operand # of this use in its user.
768 unsigned getOperandNo() const {
769 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 769, __extension__ __PRETTY_FUNCTION__))
;
770 return (unsigned)(Op - Op->getUser()->OperandList);
771 }
772 };
773
774 /// Provide iteration support to walk over all uses of an SDNode.
775 use_iterator use_begin() const {
776 return use_iterator(UseList);
777 }
778
779 static use_iterator use_end() { return use_iterator(nullptr); }
780
781 inline iterator_range<use_iterator> uses() {
782 return make_range(use_begin(), use_end());
783 }
784 inline iterator_range<use_iterator> uses() const {
785 return make_range(use_begin(), use_end());
786 }
787
788 /// Return true if there are exactly NUSES uses of the indicated value.
789 /// This method ignores uses of other values defined by this operation.
790 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
791
792 /// Return true if there are any use of the indicated value.
793 /// This method ignores uses of other values defined by this operation.
794 bool hasAnyUseOfValue(unsigned Value) const;
795
796 /// Return true if this node is the only use of N.
797 bool isOnlyUserOf(const SDNode *N) const;
798
799 /// Return true if this node is an operand of N.
800 bool isOperandOf(const SDNode *N) const;
801
802 /// Return true if this node is a predecessor of N.
803 /// NOTE: Implemented on top of hasPredecessor and every bit as
804 /// expensive. Use carefully.
805 bool isPredecessorOf(const SDNode *N) const {
806 return N->hasPredecessor(this);
807 }
808
809 /// Return true if N is a predecessor of this node.
810 /// N is either an operand of this node, or can be reached by recursively
811 /// traversing up the operands.
812 /// NOTE: This is an expensive method. Use it carefully.
813 bool hasPredecessor(const SDNode *N) const;
814
815 /// Returns true if N is a predecessor of any node in Worklist. This
816 /// helper keeps Visited and Worklist sets externally to allow unions
817 /// searches to be performed in parallel, caching of results across
818 /// queries and incremental addition to Worklist. Stops early if N is
819 /// found but will resume. Remember to clear Visited and Worklists
820 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
821 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
822 /// topologically ordered (Operands have strictly smaller node id) and search
823 /// can be pruned leveraging this.
824 static bool hasPredecessorHelper(const SDNode *N,
825 SmallPtrSetImpl<const SDNode *> &Visited,
826 SmallVectorImpl<const SDNode *> &Worklist,
827 unsigned int MaxSteps = 0,
828 bool TopologicalPrune = false) {
829 SmallVector<const SDNode *, 8> DeferredNodes;
830 if (Visited.count(N))
831 return true;
832
833 // Node Id's are assigned in three places: As a topological
834 // ordering (> 0), during legalization (results in values set to
835 // 0), new nodes (set to -1). If N has a topolgical id then we
836 // know that all nodes with ids smaller than it cannot be
837 // successors and we need not check them. Filter out all node
838 // that can't be matches. We add them to the worklist before exit
839 // in case of multiple calls. Note that during selection the topological id
840 // may be violated if a node's predecessor is selected before it. We mark
841 // this at selection negating the id of unselected successors and
842 // restricting topological pruning to positive ids.
843
844 int NId = N->getNodeId();
845 // If we Invalidated the Id, reconstruct original NId.
846 if (NId < -1)
847 NId = -(NId + 1);
848
849 bool Found = false;
850 while (!Worklist.empty()) {
851 const SDNode *M = Worklist.pop_back_val();
852 int MId = M->getNodeId();
853 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
854 (MId > 0) && (MId < NId)) {
855 DeferredNodes.push_back(M);
856 continue;
857 }
858 for (const SDValue &OpV : M->op_values()) {
859 SDNode *Op = OpV.getNode();
860 if (Visited.insert(Op).second)
861 Worklist.push_back(Op);
862 if (Op == N)
863 Found = true;
864 }
865 if (Found)
866 break;
867 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
868 break;
869 }
870 // Push deferred nodes back on worklist.
871 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
872 // If we bailed early, conservatively return found.
873 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
874 return true;
875 return Found;
876 }
877
878 /// Return true if all the users of N are contained in Nodes.
879 /// NOTE: Requires at least one match, but doesn't require them all.
880 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
881
882 /// Return the number of values used by this operation.
883 unsigned getNumOperands() const { return NumOperands; }
884
885 /// Return the maximum number of operands that a SDNode can hold.
886 static constexpr size_t getMaxNumOperands() {
887 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
888 }
889
890 /// Helper method returns the integer value of a ConstantSDNode operand.
891 inline uint64_t getConstantOperandVal(unsigned Num) const;
892
893 /// Helper method returns the APInt of a ConstantSDNode operand.
894 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
895
896 const SDValue &getOperand(unsigned Num) const {
897 assert(Num < NumOperands && "Invalid child # of SDNode!")(static_cast <bool> (Num < NumOperands && "Invalid child # of SDNode!"
) ? void (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 897, __extension__ __PRETTY_FUNCTION__))
;
898 return OperandList[Num];
899 }
900
901 using op_iterator = SDUse *;
902
903 op_iterator op_begin() const { return OperandList; }
904 op_iterator op_end() const { return OperandList+NumOperands; }
905 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
906
907 /// Iterator for directly iterating over the operand SDValue's.
908 struct value_op_iterator
909 : iterator_adaptor_base<value_op_iterator, op_iterator,
910 std::random_access_iterator_tag, SDValue,
911 ptrdiff_t, value_op_iterator *,
912 value_op_iterator *> {
913 explicit value_op_iterator(SDUse *U = nullptr)
914 : iterator_adaptor_base(U) {}
915
916 const SDValue &operator*() const { return I->get(); }
917 };
918
919 iterator_range<value_op_iterator> op_values() const {
920 return make_range(value_op_iterator(op_begin()),
921 value_op_iterator(op_end()));
922 }
923
924 SDVTList getVTList() const {
925 SDVTList X = { ValueList, NumValues };
926 return X;
927 }
928
929 /// If this node has a glue operand, return the node
930 /// to which the glue operand points. Otherwise return NULL.
931 SDNode *getGluedNode() const {
932 if (getNumOperands() != 0 &&
933 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
934 return getOperand(getNumOperands()-1).getNode();
935 return nullptr;
936 }
937
938 /// If this node has a glue value with a user, return
939 /// the user (there is at most one). Otherwise return NULL.
940 SDNode *getGluedUser() const {
941 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
942 if (UI.getUse().get().getValueType() == MVT::Glue)
943 return *UI;
944 return nullptr;
945 }
946
947 SDNodeFlags getFlags() const { return Flags; }
948 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
949
950 /// Clear any flags in this node that aren't also set in Flags.
951 /// If Flags is not in a defined state then this has no effect.
952 void intersectFlagsWith(const SDNodeFlags Flags);
953
954 /// Return the number of values defined/returned by this operator.
955 unsigned getNumValues() const { return NumValues; }
956
957 /// Return the type of a specified result.
958 EVT getValueType(unsigned ResNo) const {
959 assert(ResNo < NumValues && "Illegal result number!")(static_cast <bool> (ResNo < NumValues && "Illegal result number!"
) ? void (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 959, __extension__ __PRETTY_FUNCTION__))
;
960 return ValueList[ResNo];
961 }
962
963 /// Return the type of a specified result as a simple type.
964 MVT getSimpleValueType(unsigned ResNo) const {
965 return getValueType(ResNo).getSimpleVT();
966 }
967
968 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
969 ///
970 /// If the value type is a scalable vector type, the scalable property will
971 /// be set and the runtime size will be a positive integer multiple of the
972 /// base size.
973 TypeSize getValueSizeInBits(unsigned ResNo) const {
974 return getValueType(ResNo).getSizeInBits();
975 }
976
977 using value_iterator = const EVT *;
978
979 value_iterator value_begin() const { return ValueList; }
980 value_iterator value_end() const { return ValueList+NumValues; }
981 iterator_range<value_iterator> values() const {
982 return llvm::make_range(value_begin(), value_end());
983 }
984
985 /// Return the opcode of this operation for printing.
986 std::string getOperationName(const SelectionDAG *G = nullptr) const;
987 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
988 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
989 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
990 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
991 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
992
993 /// Print a SelectionDAG node and all children down to
994 /// the leaves. The given SelectionDAG allows target-specific nodes
995 /// to be printed in human-readable form. Unlike printr, this will
996 /// print the whole DAG, including children that appear multiple
997 /// times.
998 ///
999 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1000
1001 /// Print a SelectionDAG node and children up to
1002 /// depth "depth." The given SelectionDAG allows target-specific
1003 /// nodes to be printed in human-readable form. Unlike printr, this
1004 /// will print children that appear multiple times wherever they are
1005 /// used.
1006 ///
1007 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1008 unsigned depth = 100) const;
1009
1010 /// Dump this node, for debugging.
1011 void dump() const;
1012
1013 /// Dump (recursively) this node and its use-def subgraph.
1014 void dumpr() const;
1015
1016 /// Dump this node, for debugging.
1017 /// The given SelectionDAG allows target-specific nodes to be printed
1018 /// in human-readable form.
1019 void dump(const SelectionDAG *G) const;
1020
1021 /// Dump (recursively) this node and its use-def subgraph.
1022 /// The given SelectionDAG allows target-specific nodes to be printed
1023 /// in human-readable form.
1024 void dumpr(const SelectionDAG *G) const;
1025
1026 /// printrFull to dbgs(). The given SelectionDAG allows
1027 /// target-specific nodes to be printed in human-readable form.
1028 /// Unlike dumpr, this will print the whole DAG, including children
1029 /// that appear multiple times.
1030 void dumprFull(const SelectionDAG *G = nullptr) const;
1031
1032 /// printrWithDepth to dbgs(). The given
1033 /// SelectionDAG allows target-specific nodes to be printed in
1034 /// human-readable form. Unlike dumpr, this will print children
1035 /// that appear multiple times wherever they are used.
1036 ///
1037 void dumprWithDepth(const SelectionDAG *G = nullptr,
1038 unsigned depth = 100) const;
1039
1040 /// Gather unique data for the node.
1041 void Profile(FoldingSetNodeID &ID) const;
1042
1043 /// This method should only be used by the SDUse class.
1044 void addUse(SDUse &U) { U.addToList(&UseList); }
1045
1046protected:
1047 static SDVTList getSDVTList(EVT VT) {
1048 SDVTList Ret = { getValueTypeList(VT), 1 };
1049 return Ret;
1050 }
1051
1052 /// Create an SDNode.
1053 ///
1054 /// SDNodes are created without any operands, and never own the operand
1055 /// storage. To add operands, see SelectionDAG::createOperands.
1056 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1057 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1058 IROrder(Order), debugLoc(std::move(dl)) {
1059 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1060 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() &&
"Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1060, __extension__ __PRETTY_FUNCTION__))
;
1061 assert(NumValues == VTs.NumVTs &&(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __extension__ __PRETTY_FUNCTION__))
1062 "NumValues wasn't wide enough for its operands!")(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __extension__ __PRETTY_FUNCTION__))
;
1063 }
1064
1065 /// Release the operands and set this node to have zero operands.
1066 void DropOperands();
1067};
1068
1069/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1070/// into SDNode creation functions.
1071/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1072/// from the original Instruction, and IROrder is the ordinal position of
1073/// the instruction.
1074/// When an SDNode is created after the DAG is being built, both DebugLoc and
1075/// the IROrder are propagated from the original SDNode.
1076/// So SDLoc class provides two constructors besides the default one, one to
1077/// be used by the DAGBuilder, the other to be used by others.
1078class SDLoc {
1079private:
1080 DebugLoc DL;
1081 int IROrder = 0;
1082
1083public:
1084 SDLoc() = default;
1085 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1086 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1087 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1088 assert(Order >= 0 && "bad IROrder")(static_cast <bool> (Order >= 0 && "bad IROrder"
) ? void (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1088, __extension__ __PRETTY_FUNCTION__))
;
1089 if (I)
1090 DL = I->getDebugLoc();
1091 }
1092
1093 unsigned getIROrder() const { return IROrder; }
1094 const DebugLoc &getDebugLoc() const { return DL; }
1095};
1096
1097// Define inline functions from the SDValue class.
1098
1099inline SDValue::SDValue(SDNode *node, unsigned resno)
1100 : Node(node), ResNo(resno) {
1101 // Explicitly check for !ResNo to avoid use-after-free, because there are
1102 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1103 // combines.
1104 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __extension__ __PRETTY_FUNCTION__))
1105 "Invalid result number for the given node!")(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __extension__ __PRETTY_FUNCTION__))
;
1106 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")(static_cast <bool> (ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? void (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "/build/llvm-toolchain-snapshot-13~++20210722111111+b115c038d2d4/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1106, __extension__ __PRETTY_FUNCTION__))
;
1107}
1108
1109inline unsigned SDValue::getOpcode() const {
1110 return Node->getOpcode();
26
Called C++ object pointer is null
1111}
1112
1113inline EVT SDValue::getValueType() const {
1114 return Node->getValueType(ResNo);
1115}
1116
1117inline unsigned SDValue::getNumOperands() const {
1118 return Node->getNumOperands();
1119}
1120
1121inline const SDValue &SDValue::getOperand(unsigned i) const {
1122 return Node->getOperand(i);
1123}
1124
1125inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1126 return Node->getConstantOperandVal(i);
1127}
1128
1129inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1130 return Node->getConstantOperandAPInt(i);
1131}
1132
1133inline bool SDValue::isTargetOpcode() const {
1134 return Node->isTargetOpcode();
1135}
1136
1137inline bool SDValue::isTargetMemoryOpcode() const {
1138 return Node->isTargetMemoryOpcode();
1139}
1140
1141inline bool SDValue::isMachineOpcode() const {
1142 return Node->isMachineOpcode();
1143}
1144
1145inline unsigned SDValue::getMachineOpcode() const {
1146 return Node->getMachineOpcode();
1147}
1148
1149inline bool SDValue::isUndef() const {
1150 return Node->isUndef();
1151}
1152
1153inline bool SDValue::use_empty() const {
1154 return !Node->hasAnyUseOfValue(ResNo);
1155}
1156
1157inline bool SDValue::hasOneUse() const {
1158 return Node->hasNUsesOfValue(1, ResNo);
1159}
1160
1161inline const DebugLoc &SDValue::getDebugLoc() const {
1162 return Node->getDebugLoc();
1163}
1164
1165inline void SDValue::dump() const {
1166 return Node->dump();
1167}
1168
1169inline void SDValue::dump(const SelectionDAG *G) const {
1170 return Node->dump(G);
1171}
1172
1173inline void SDValue::dumpr() const {
1174 return Node->dumpr();
1175}
1176
1177inline void SDValue::dumpr(const SelectionDAG *G) const {
1178 return Node->dumpr(G);
1179}
1180
1181// Define inline functions from the SDUse class.
1182
1183inline void SDUse::set(const SDValue &V) {
1184 if (Val.getNode()) removeFromList();
1185 Val = V;
1186 if (V.getNode()) V.getNode()->addUse(*this);
1187}
1188
1189inline void SDUse::setInitial(const SDValue &V) {
1190 Val = V;
1191 V.getNode()->addUse(*this);
1192}
1193
1194inline void SDUse::setNode(SDNode *N) {
1195 if (Val.getNode()) removeFromList();
1196 Val.setNode(N);
1197 if (N) N->addUse(*this);
1198}
1199
1200/// This class is used to form a handle around another node that
1201/// is persistent and is updated across invocations of replaceAllUsesWith on its
1202/// operand. This node should be directly created by end-users and not added to
1203/// the AllNodes list.
1204class HandleSDNode : public SDNode {
1205 SDUse Op;
1206
1207public:
1208 explicit HandleSDNode(SDValue X)
1209 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1210 // HandleSDNodes are never inserted into the DAG, so they won't be
1211 // auto-numbered. Use ID 65535 as a sentinel.
1212 PersistentId = 0xffff;
1213
1214 // Manually set up the operand list. This node type is special in that it's
1215 // always stack allocated and SelectionDAG does not manage its operands.
1216 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1217 // be so special.
1218 Op.setUser(this);
1219 Op.setInitial(X);
1220 NumOperands = 1;
1221 OperandList = &Op;
1222 }
1223 ~HandleSDNode();
1224
1225 const SDValue &getValue() const { return Op; }
1226};
1227
1228class AddrSpaceCastSDNode : public SDNode {
1229private:
1230 unsigned SrcAddrSpace;
1231 unsigned DestAddrSpace;
1232
1233public:
1234 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1235 unsigned SrcAS, unsigned DestAS);
1236
1237 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1238 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1239
1240 static bool classof(const SDNode *N) {
1241 return N->getOpcode() == ISD::ADDRSPACECAST;
1242 }
1243};
1244
1245/// This is an abstract virtual class for memory operations.
1246class MemSDNode : public SDNode {
1247private:
1248 // VT of in-memory value.
1249 EVT MemoryVT;
1250
1251protected:
1252 /// Memory reference information.
1253 MachineMemOperand *MMO;
1254
1255public:
1256 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1257 EVT memvt, MachineMemOperand *MMO);
1258
1259 bool readMem() const { return MMO->isLoad(); }
1260 bool writeMem() const { return MMO->isStore(); }
1261
1262 /// Returns alignment and volatility of the memory access
1263 Align getOriginalAlign() const { return MMO->getBaseAlign(); }
1264 Align getAlign() const { return MMO->getAlign(); }
1265 // FIXME: Remove once transition to getAlign is over.
1266 unsigned getAlignment() const { return MMO->getAlign().value(); }
1267
1268 /// Return the SubclassData value, without HasDebugValue. This contains an
1269 /// encoding of the volatile flag, as well as bits used by subclasses. This
1270 /// function should only be used to compute a FoldingSetNodeID value.
1271 /// The HasDebugValue bit is masked out because CSE map needs to match
1272 /// nodes with debug info with nodes without debug info. Same is about
1273 /// isDivergent bit.
1274 unsigned getRawSubclassData() const {
1275 uint16_t Data;
1276 union {
1277 char RawSDNodeBits[sizeof(uint16_t)];
1278 SDNodeBitfields SDNodeBits;
1279 };
1280 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1281 SDNodeBits.HasDebugValue = 0;
1282 SDNodeBits.IsDivergent = false;
1283 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1284 return Data;
1285 }
1286
1287 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1288 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1289 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1290 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1291
1292 // Returns the offset from the location of the access.
1293 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1294
1295 /// Returns the AA info that describes the dereference.
1296 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1297
1298