Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1114, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name WebAssemblyISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/lib/Target/WebAssembly -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/lib/Target/WebAssembly -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-06-13-111025-38230-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the WebAssemblyTargetLowering class.
11///
12//===----------------------------------------------------------------------===//
13
14#include "WebAssemblyISelLowering.h"
15#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16#include "Utils/WebAssemblyUtilities.h"
17#include "WebAssemblyMachineFunctionInfo.h"
18#include "WebAssemblySubtarget.h"
19#include "WebAssemblyTargetMachine.h"
20#include "llvm/CodeGen/CallingConvLower.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineJumpTableInfo.h"
23#include "llvm/CodeGen/MachineModuleInfo.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/SelectionDAG.h"
26#include "llvm/CodeGen/SelectionDAGNodes.h"
27#include "llvm/CodeGen/WasmEHFuncInfo.h"
28#include "llvm/IR/DiagnosticInfo.h"
29#include "llvm/IR/DiagnosticPrinter.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Intrinsics.h"
32#include "llvm/IR/IntrinsicsWebAssembly.h"
33#include "llvm/Support/Debug.h"
34#include "llvm/Support/ErrorHandling.h"
35#include "llvm/Support/MathExtras.h"
36#include "llvm/Support/raw_ostream.h"
37#include "llvm/Target/TargetOptions.h"
38using namespace llvm;
39
40#define DEBUG_TYPE"wasm-lower" "wasm-lower"
41
42WebAssemblyTargetLowering::WebAssemblyTargetLowering(
43 const TargetMachine &TM, const WebAssemblySubtarget &STI)
44 : TargetLowering(TM), Subtarget(&STI) {
45 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
46
47 // Booleans always contain 0 or 1.
48 setBooleanContents(ZeroOrOneBooleanContent);
49 // Except in SIMD vectors
50 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
51 // We don't know the microarchitecture here, so just reduce register pressure.
52 setSchedulingPreference(Sched::RegPressure);
53 // Tell ISel that we have a stack pointer.
54 setStackPointerRegisterToSaveRestore(
55 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
56 // Set up the register classes.
57 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
58 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
59 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
60 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
61 if (Subtarget->hasSIMD128()) {
62 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
63 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
64 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
65 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
66 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
67 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
68 }
69 // Compute derived properties from the register classes.
70 computeRegisterProperties(Subtarget->getRegisterInfo());
71
72 // Transform loads and stores to pointers in address space 1 to loads and
73 // stores to WebAssembly global variables, outside linear memory.
74 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
75 setOperationAction(ISD::LOAD, T, Custom);
76 setOperationAction(ISD::STORE, T, Custom);
77 }
78 if (Subtarget->hasSIMD128()) {
79 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
80 MVT::v2f64}) {
81 setOperationAction(ISD::LOAD, T, Custom);
82 setOperationAction(ISD::STORE, T, Custom);
83 }
84 }
85
86 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
87 setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom);
88 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
89 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
90 setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
91 setOperationAction(ISD::BRIND, MVT::Other, Custom);
92
93 // Take the default expansion for va_arg, va_copy, and va_end. There is no
94 // default action for va_start, so we do that custom.
95 setOperationAction(ISD::VASTART, MVT::Other, Custom);
96 setOperationAction(ISD::VAARG, MVT::Other, Expand);
97 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
98 setOperationAction(ISD::VAEND, MVT::Other, Expand);
99
100 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
101 // Don't expand the floating-point types to constant pools.
102 setOperationAction(ISD::ConstantFP, T, Legal);
103 // Expand floating-point comparisons.
104 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
105 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
106 setCondCodeAction(CC, T, Expand);
107 // Expand floating-point library function operators.
108 for (auto Op :
109 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
110 setOperationAction(Op, T, Expand);
111 // Note supported floating-point library function operators that otherwise
112 // default to expand.
113 for (auto Op :
114 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
115 setOperationAction(Op, T, Legal);
116 // Support minimum and maximum, which otherwise default to expand.
117 setOperationAction(ISD::FMINIMUM, T, Legal);
118 setOperationAction(ISD::FMAXIMUM, T, Legal);
119 // WebAssembly currently has no builtin f16 support.
120 setOperationAction(ISD::FP16_TO_FP, T, Expand);
121 setOperationAction(ISD::FP_TO_FP16, T, Expand);
122 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
123 setTruncStoreAction(T, MVT::f16, Expand);
124 }
125
126 // Expand unavailable integer operations.
127 for (auto Op :
128 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
129 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
130 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
131 for (auto T : {MVT::i32, MVT::i64})
132 setOperationAction(Op, T, Expand);
133 if (Subtarget->hasSIMD128())
134 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
135 setOperationAction(Op, T, Expand);
136 }
137
138 if (Subtarget->hasNontrappingFPToInt())
139 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
140 for (auto T : {MVT::i32, MVT::i64})
141 setOperationAction(Op, T, Custom);
142
143 // SIMD-specific configuration
144 if (Subtarget->hasSIMD128()) {
145 // Hoist bitcasts out of shuffles
146 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
147
148 // Combine extends of extract_subvectors into widening ops
149 setTargetDAGCombine(ISD::SIGN_EXTEND);
150 setTargetDAGCombine(ISD::ZERO_EXTEND);
151
152 // Combine int_to_fp of extract_vectors and vice versa into conversions ops
153 setTargetDAGCombine(ISD::SINT_TO_FP);
154 setTargetDAGCombine(ISD::UINT_TO_FP);
155 setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
156
157 // Combine concat of {s,u}int_to_fp_sat to i32x4.trunc_sat_f64x2_zero_{s,u}
158 setTargetDAGCombine(ISD::CONCAT_VECTORS);
159
160 // Support saturating add for i8x16 and i16x8
161 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
162 for (auto T : {MVT::v16i8, MVT::v8i16})
163 setOperationAction(Op, T, Legal);
164
165 // Support integer abs
166 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
167 setOperationAction(ISD::ABS, T, Legal);
168
169 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
170 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
171 MVT::v2f64})
172 setOperationAction(ISD::BUILD_VECTOR, T, Custom);
173
174 // We have custom shuffle lowering to expose the shuffle mask
175 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
176 MVT::v2f64})
177 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
178
179 // Custom lowering since wasm shifts must have a scalar shift amount
180 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
181 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
182 setOperationAction(Op, T, Custom);
183
184 // Custom lower lane accesses to expand out variable indices
185 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
186 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
187 MVT::v2f64})
188 setOperationAction(Op, T, Custom);
189
190 // There is no i8x16.mul instruction
191 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
192
193 // There is no vector conditional select instruction
194 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
195 MVT::v2f64})
196 setOperationAction(ISD::SELECT_CC, T, Expand);
197
198 // Expand integer operations supported for scalars but not SIMD
199 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
200 ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
201 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
202 setOperationAction(Op, T, Expand);
203
204 // But we do have integer min and max operations
205 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
206 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
207 setOperationAction(Op, T, Legal);
208
209 // Expand float operations supported for scalars but not SIMD
210 for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
211 ISD::FEXP, ISD::FEXP2, ISD::FRINT})
212 for (auto T : {MVT::v4f32, MVT::v2f64})
213 setOperationAction(Op, T, Expand);
214
215 // Unsigned comparison operations are unavailable for i64x2 vectors.
216 for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE})
217 setCondCodeAction(CC, MVT::v2i64, Custom);
218
219 // 64x2 conversions are not in the spec
220 for (auto Op :
221 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
222 for (auto T : {MVT::v2i64, MVT::v2f64})
223 setOperationAction(Op, T, Expand);
224
225 // But saturating fp_to_int converstions are
226 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
227 setOperationAction(Op, MVT::v4i32, Custom);
228 }
229
230 // As a special case, these operators use the type to mean the type to
231 // sign-extend from.
232 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
233 if (!Subtarget->hasSignExt()) {
234 // Sign extends are legal only when extending a vector extract
235 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
236 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
237 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
238 }
239 for (auto T : MVT::integer_fixedlen_vector_valuetypes())
240 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
241
242 // Dynamic stack allocation: use the default expansion.
243 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
244 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
245 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
246
247 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
248 setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
249 setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
250
251 // Expand these forms; we pattern-match the forms that we can handle in isel.
252 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
253 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
254 setOperationAction(Op, T, Expand);
255
256 // We have custom switch handling.
257 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
258
259 // WebAssembly doesn't have:
260 // - Floating-point extending loads.
261 // - Floating-point truncating stores.
262 // - i1 extending loads.
263 // - truncating SIMD stores and most extending loads
264 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
265 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
266 for (auto T : MVT::integer_valuetypes())
267 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
268 setLoadExtAction(Ext, T, MVT::i1, Promote);
269 if (Subtarget->hasSIMD128()) {
270 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
271 MVT::v2f64}) {
272 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
273 if (MVT(T) != MemT) {
274 setTruncStoreAction(T, MemT, Expand);
275 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
276 setLoadExtAction(Ext, T, MemT, Expand);
277 }
278 }
279 }
280 // But some vector extending loads are legal
281 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
282 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
283 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
284 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
285 }
286 // And some truncating stores are legal as well
287 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
288 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
289 }
290
291 // Don't do anything clever with build_pairs
292 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
293
294 // Trap lowers to wasm unreachable
295 setOperationAction(ISD::TRAP, MVT::Other, Legal);
296 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
297
298 // Exception handling intrinsics
299 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
300 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
301 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
302
303 setMaxAtomicSizeInBitsSupported(64);
304
305 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
306 // consistent with the f64 and f128 names.
307 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
308 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
309
310 // Define the emscripten name for return address helper.
311 // TODO: when implementing other Wasm backends, make this generic or only do
312 // this on emscripten depending on what they end up doing.
313 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
314
315 // Always convert switches to br_tables unless there is only one case, which
316 // is equivalent to a simple branch. This reduces code size for wasm, and we
317 // defer possible jump table optimizations to the VM.
318 setMinimumJumpTableEntries(2);
319}
320
321TargetLowering::AtomicExpansionKind
322WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
323 // We have wasm instructions for these
324 switch (AI->getOperation()) {
325 case AtomicRMWInst::Add:
326 case AtomicRMWInst::Sub:
327 case AtomicRMWInst::And:
328 case AtomicRMWInst::Or:
329 case AtomicRMWInst::Xor:
330 case AtomicRMWInst::Xchg:
331 return AtomicExpansionKind::None;
332 default:
333 break;
334 }
335 return AtomicExpansionKind::CmpXChg;
336}
337
338FastISel *WebAssemblyTargetLowering::createFastISel(
339 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
340 return WebAssembly::createFastISel(FuncInfo, LibInfo);
341}
342
343MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
344 EVT VT) const {
345 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
346 if (BitWidth > 1 && BitWidth < 8)
347 BitWidth = 8;
348
349 if (BitWidth > 64) {
350 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
351 // the count to be an i32.
352 BitWidth = 32;
353 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&(static_cast <bool> (BitWidth >= Log2_32_Ceil(VT.getSizeInBits
()) && "32-bit shift counts ought to be enough for anyone"
) ? void (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 354, __extension__ __PRETTY_FUNCTION__))
354 "32-bit shift counts ought to be enough for anyone")(static_cast <bool> (BitWidth >= Log2_32_Ceil(VT.getSizeInBits
()) && "32-bit shift counts ought to be enough for anyone"
) ? void (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 354, __extension__ __PRETTY_FUNCTION__))
;
355 }
356
357 MVT Result = MVT::getIntegerVT(BitWidth);
358 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&(static_cast <bool> (Result != MVT::INVALID_SIMPLE_VALUE_TYPE
&& "Unable to represent scalar shift amount type") ?
void (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 359, __extension__ __PRETTY_FUNCTION__))
359 "Unable to represent scalar shift amount type")(static_cast <bool> (Result != MVT::INVALID_SIMPLE_VALUE_TYPE
&& "Unable to represent scalar shift amount type") ?
void (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 359, __extension__ __PRETTY_FUNCTION__))
;
360 return Result;
361}
362
363// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
364// undefined result on invalid/overflow, to the WebAssembly opcode, which
365// traps on invalid/overflow.
366static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
367 MachineBasicBlock *BB,
368 const TargetInstrInfo &TII,
369 bool IsUnsigned, bool Int64,
370 bool Float64, unsigned LoweredOpcode) {
371 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
372
373 Register OutReg = MI.getOperand(0).getReg();
374 Register InReg = MI.getOperand(1).getReg();
375
376 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
377 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
378 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
379 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
380 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
381 unsigned Eqz = WebAssembly::EQZ_I32;
382 unsigned And = WebAssembly::AND_I32;
383 int64_t Limit = Int64 ? INT64_MIN(-9223372036854775807L -1) : INT32_MIN(-2147483647-1);
384 int64_t Substitute = IsUnsigned ? 0 : Limit;
385 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
386 auto &Context = BB->getParent()->getFunction().getContext();
387 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
388
389 const BasicBlock *LLVMBB = BB->getBasicBlock();
390 MachineFunction *F = BB->getParent();
391 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
392 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
393 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
394
395 MachineFunction::iterator It = ++BB->getIterator();
396 F->insert(It, FalseMBB);
397 F->insert(It, TrueMBB);
398 F->insert(It, DoneMBB);
399
400 // Transfer the remainder of BB and its successor edges to DoneMBB.
401 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
402 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
403
404 BB->addSuccessor(TrueMBB);
405 BB->addSuccessor(FalseMBB);
406 TrueMBB->addSuccessor(DoneMBB);
407 FalseMBB->addSuccessor(DoneMBB);
408
409 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
410 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
411 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
412 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
413 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
414 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
415 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
416
417 MI.eraseFromParent();
418 // For signed numbers, we can do a single comparison to determine whether
419 // fabs(x) is within range.
420 if (IsUnsigned) {
421 Tmp0 = InReg;
422 } else {
423 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
424 }
425 BuildMI(BB, DL, TII.get(FConst), Tmp1)
426 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
427 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
428
429 // For unsigned numbers, we have to do a separate comparison with zero.
430 if (IsUnsigned) {
431 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
432 Register SecondCmpReg =
433 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
434 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
435 BuildMI(BB, DL, TII.get(FConst), Tmp1)
436 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
437 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
438 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
439 CmpReg = AndReg;
440 }
441
442 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
443
444 // Create the CFG diamond to select between doing the conversion or using
445 // the substitute value.
446 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
447 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
448 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
449 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
450 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
451 .addReg(FalseReg)
452 .addMBB(FalseMBB)
453 .addReg(TrueReg)
454 .addMBB(TrueMBB);
455
456 return DoneMBB;
457}
458
459static MachineBasicBlock *
460LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
461 const WebAssemblySubtarget *Subtarget,
462 const TargetInstrInfo &TII) {
463 MachineInstr &CallParams = *CallResults.getPrevNode();
464 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS)(static_cast <bool> (CallParams.getOpcode() == WebAssembly
::CALL_PARAMS) ? void (0) : __assert_fail ("CallParams.getOpcode() == WebAssembly::CALL_PARAMS"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 464, __extension__ __PRETTY_FUNCTION__))
;
465 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||(static_cast <bool> (CallResults.getOpcode() == WebAssembly
::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS
) ? void (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 466, __extension__ __PRETTY_FUNCTION__))
466 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS)(static_cast <bool> (CallResults.getOpcode() == WebAssembly
::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS
) ? void (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 466, __extension__ __PRETTY_FUNCTION__))
;
467
468 bool IsIndirect = CallParams.getOperand(0).isReg();
469 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
470
471 unsigned CallOp;
472 if (IsIndirect && IsRetCall) {
473 CallOp = WebAssembly::RET_CALL_INDIRECT;
474 } else if (IsIndirect) {
475 CallOp = WebAssembly::CALL_INDIRECT;
476 } else if (IsRetCall) {
477 CallOp = WebAssembly::RET_CALL;
478 } else {
479 CallOp = WebAssembly::CALL;
480 }
481
482 MachineFunction &MF = *BB->getParent();
483 const MCInstrDesc &MCID = TII.get(CallOp);
484 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
485
486 // See if we must truncate the function pointer.
487 // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
488 // as 64-bit for uniformity with other pointer types.
489 // See also: WebAssemblyFastISel::selectCall
490 if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
491 Register Reg32 =
492 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
493 auto &FnPtr = CallParams.getOperand(0);
494 BuildMI(*BB, CallResults.getIterator(), DL,
495 TII.get(WebAssembly::I32_WRAP_I64), Reg32)
496 .addReg(FnPtr.getReg());
497 FnPtr.setReg(Reg32);
498 }
499
500 // Move the function pointer to the end of the arguments for indirect calls
501 if (IsIndirect) {
502 auto FnPtr = CallParams.getOperand(0);
503 CallParams.RemoveOperand(0);
504 CallParams.addOperand(FnPtr);
505 }
506
507 for (auto Def : CallResults.defs())
508 MIB.add(Def);
509
510 if (IsIndirect) {
511 // Placeholder for the type index.
512 MIB.addImm(0);
513 // The table into which this call_indirect indexes.
514 MCSymbolWasm *Table =
515 WebAssembly::getOrCreateFunctionTableSymbol(MF.getContext(), Subtarget);
516 if (Subtarget->hasReferenceTypes()) {
517 MIB.addSym(Table);
518 } else {
519 // For the MVP there is at most one table whose number is 0, but we can't
520 // write a table symbol or issue relocations. Instead we just ensure the
521 // table is live and write a zero.
522 Table->setNoStrip();
523 MIB.addImm(0);
524 }
525 }
526
527 for (auto Use : CallParams.uses())
528 MIB.add(Use);
529
530 BB->insert(CallResults.getIterator(), MIB);
531 CallParams.eraseFromParent();
532 CallResults.eraseFromParent();
533
534 return BB;
535}
536
537MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
538 MachineInstr &MI, MachineBasicBlock *BB) const {
539 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
540 DebugLoc DL = MI.getDebugLoc();
541
542 switch (MI.getOpcode()) {
543 default:
544 llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 544)
;
545 case WebAssembly::FP_TO_SINT_I32_F32:
546 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
547 WebAssembly::I32_TRUNC_S_F32);
548 case WebAssembly::FP_TO_UINT_I32_F32:
549 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
550 WebAssembly::I32_TRUNC_U_F32);
551 case WebAssembly::FP_TO_SINT_I64_F32:
552 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
553 WebAssembly::I64_TRUNC_S_F32);
554 case WebAssembly::FP_TO_UINT_I64_F32:
555 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
556 WebAssembly::I64_TRUNC_U_F32);
557 case WebAssembly::FP_TO_SINT_I32_F64:
558 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
559 WebAssembly::I32_TRUNC_S_F64);
560 case WebAssembly::FP_TO_UINT_I32_F64:
561 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
562 WebAssembly::I32_TRUNC_U_F64);
563 case WebAssembly::FP_TO_SINT_I64_F64:
564 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
565 WebAssembly::I64_TRUNC_S_F64);
566 case WebAssembly::FP_TO_UINT_I64_F64:
567 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
568 WebAssembly::I64_TRUNC_U_F64);
569 case WebAssembly::CALL_RESULTS:
570 case WebAssembly::RET_CALL_RESULTS:
571 return LowerCallResults(MI, DL, BB, Subtarget, TII);
572 }
573}
574
575const char *
576WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
577 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
578 case WebAssemblyISD::FIRST_NUMBER:
579 case WebAssemblyISD::FIRST_MEM_OPCODE:
580 break;
581#define HANDLE_NODETYPE(NODE) \
582 case WebAssemblyISD::NODE: \
583 return "WebAssemblyISD::" #NODE;
584#define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
585#include "WebAssemblyISD.def"
586#undef HANDLE_MEM_NODETYPE
587#undef HANDLE_NODETYPE
588 }
589 return nullptr;
590}
591
592std::pair<unsigned, const TargetRegisterClass *>
593WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
594 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
595 // First, see if this is a constraint that directly corresponds to a
596 // WebAssembly register class.
597 if (Constraint.size() == 1) {
598 switch (Constraint[0]) {
599 case 'r':
600 assert(VT != MVT::iPTR && "Pointer MVT not expected here")(static_cast <bool> (VT != MVT::iPTR && "Pointer MVT not expected here"
) ? void (0) : __assert_fail ("VT != MVT::iPTR && \"Pointer MVT not expected here\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 600, __extension__ __PRETTY_FUNCTION__))
;
601 if (Subtarget->hasSIMD128() && VT.isVector()) {
602 if (VT.getSizeInBits() == 128)
603 return std::make_pair(0U, &WebAssembly::V128RegClass);
604 }
605 if (VT.isInteger() && !VT.isVector()) {
606 if (VT.getSizeInBits() <= 32)
607 return std::make_pair(0U, &WebAssembly::I32RegClass);
608 if (VT.getSizeInBits() <= 64)
609 return std::make_pair(0U, &WebAssembly::I64RegClass);
610 }
611 if (VT.isFloatingPoint() && !VT.isVector()) {
612 switch (VT.getSizeInBits()) {
613 case 32:
614 return std::make_pair(0U, &WebAssembly::F32RegClass);
615 case 64:
616 return std::make_pair(0U, &WebAssembly::F64RegClass);
617 default:
618 break;
619 }
620 }
621 break;
622 default:
623 break;
624 }
625 }
626
627 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
628}
629
630bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
631 // Assume ctz is a relatively cheap operation.
632 return true;
633}
634
635bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
636 // Assume clz is a relatively cheap operation.
637 return true;
638}
639
640bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
641 const AddrMode &AM,
642 Type *Ty, unsigned AS,
643 Instruction *I) const {
644 // WebAssembly offsets are added as unsigned without wrapping. The
645 // isLegalAddressingMode gives us no way to determine if wrapping could be
646 // happening, so we approximate this by accepting only non-negative offsets.
647 if (AM.BaseOffs < 0)
648 return false;
649
650 // WebAssembly has no scale register operands.
651 if (AM.Scale != 0)
652 return false;
653
654 // Everything else is legal.
655 return true;
656}
657
658bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
659 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
660 MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
661 // WebAssembly supports unaligned accesses, though it should be declared
662 // with the p2align attribute on loads and stores which do so, and there
663 // may be a performance impact. We tell LLVM they're "fast" because
664 // for the kinds of things that LLVM uses this for (merging adjacent stores
665 // of constants, etc.), WebAssembly implementations will either want the
666 // unaligned access or they'll split anyway.
667 if (Fast)
668 *Fast = true;
669 return true;
670}
671
672bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
673 AttributeList Attr) const {
674 // The current thinking is that wasm engines will perform this optimization,
675 // so we can save on code size.
676 return true;
677}
678
679bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
680 EVT ExtT = ExtVal.getValueType();
681 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
682 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
683 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
684 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
685}
686
687EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
688 LLVMContext &C,
689 EVT VT) const {
690 if (VT.isVector())
691 return VT.changeVectorElementTypeToInteger();
692
693 // So far, all branch instructions in Wasm take an I32 condition.
694 // The default TargetLowering::getSetCCResultType returns the pointer size,
695 // which would be useful to reduce instruction counts when testing
696 // against 64-bit pointers/values if at some point Wasm supports that.
697 return EVT::getIntegerVT(C, 32);
698}
699
700bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
701 const CallInst &I,
702 MachineFunction &MF,
703 unsigned Intrinsic) const {
704 switch (Intrinsic) {
705 case Intrinsic::wasm_memory_atomic_notify:
706 Info.opc = ISD::INTRINSIC_W_CHAIN;
707 Info.memVT = MVT::i32;
708 Info.ptrVal = I.getArgOperand(0);
709 Info.offset = 0;
710 Info.align = Align(4);
711 // atomic.notify instruction does not really load the memory specified with
712 // this argument, but MachineMemOperand should either be load or store, so
713 // we set this to a load.
714 // FIXME Volatile isn't really correct, but currently all LLVM atomic
715 // instructions are treated as volatiles in the backend, so we should be
716 // consistent. The same applies for wasm_atomic_wait intrinsics too.
717 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
718 return true;
719 case Intrinsic::wasm_memory_atomic_wait32:
720 Info.opc = ISD::INTRINSIC_W_CHAIN;
721 Info.memVT = MVT::i32;
722 Info.ptrVal = I.getArgOperand(0);
723 Info.offset = 0;
724 Info.align = Align(4);
725 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
726 return true;
727 case Intrinsic::wasm_memory_atomic_wait64:
728 Info.opc = ISD::INTRINSIC_W_CHAIN;
729 Info.memVT = MVT::i64;
730 Info.ptrVal = I.getArgOperand(0);
731 Info.offset = 0;
732 Info.align = Align(8);
733 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
734 return true;
735 case Intrinsic::wasm_load32_zero:
736 case Intrinsic::wasm_load64_zero:
737 Info.opc = ISD::INTRINSIC_W_CHAIN;
738 Info.memVT = Intrinsic == Intrinsic::wasm_load32_zero ? MVT::i32 : MVT::i64;
739 Info.ptrVal = I.getArgOperand(0);
740 Info.offset = 0;
741 Info.align = Align(1);
742 Info.flags = MachineMemOperand::MOLoad;
743 return true;
744 case Intrinsic::wasm_load8_lane:
745 case Intrinsic::wasm_load16_lane:
746 case Intrinsic::wasm_load32_lane:
747 case Intrinsic::wasm_load64_lane:
748 case Intrinsic::wasm_store8_lane:
749 case Intrinsic::wasm_store16_lane:
750 case Intrinsic::wasm_store32_lane:
751 case Intrinsic::wasm_store64_lane: {
752 MVT MemVT;
753 switch (Intrinsic) {
754 case Intrinsic::wasm_load8_lane:
755 case Intrinsic::wasm_store8_lane:
756 MemVT = MVT::i8;
757 break;
758 case Intrinsic::wasm_load16_lane:
759 case Intrinsic::wasm_store16_lane:
760 MemVT = MVT::i16;
761 break;
762 case Intrinsic::wasm_load32_lane:
763 case Intrinsic::wasm_store32_lane:
764 MemVT = MVT::i32;
765 break;
766 case Intrinsic::wasm_load64_lane:
767 case Intrinsic::wasm_store64_lane:
768 MemVT = MVT::i64;
769 break;
770 default:
771 llvm_unreachable("unexpected intrinsic")::llvm::llvm_unreachable_internal("unexpected intrinsic", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 771)
;
772 }
773 if (Intrinsic == Intrinsic::wasm_load8_lane ||
774 Intrinsic == Intrinsic::wasm_load16_lane ||
775 Intrinsic == Intrinsic::wasm_load32_lane ||
776 Intrinsic == Intrinsic::wasm_load64_lane) {
777 Info.opc = ISD::INTRINSIC_W_CHAIN;
778 Info.flags = MachineMemOperand::MOLoad;
779 } else {
780 Info.opc = ISD::INTRINSIC_VOID;
781 Info.flags = MachineMemOperand::MOStore;
782 }
783 Info.ptrVal = I.getArgOperand(0);
784 Info.memVT = MemVT;
785 Info.offset = 0;
786 Info.align = Align(1);
787 return true;
788 }
789 default:
790 return false;
791 }
792}
793
794//===----------------------------------------------------------------------===//
795// WebAssembly Lowering private implementation.
796//===----------------------------------------------------------------------===//
797
798//===----------------------------------------------------------------------===//
799// Lowering Code
800//===----------------------------------------------------------------------===//
801
802static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
803 MachineFunction &MF = DAG.getMachineFunction();
804 DAG.getContext()->diagnose(
805 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
806}
807
808// Test whether the given calling convention is supported.
809static bool callingConvSupported(CallingConv::ID CallConv) {
810 // We currently support the language-independent target-independent
811 // conventions. We don't yet have a way to annotate calls with properties like
812 // "cold", and we don't have any call-clobbered registers, so these are mostly
813 // all handled the same.
814 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
815 CallConv == CallingConv::Cold ||
816 CallConv == CallingConv::PreserveMost ||
817 CallConv == CallingConv::PreserveAll ||
818 CallConv == CallingConv::CXX_FAST_TLS ||
819 CallConv == CallingConv::WASM_EmscriptenInvoke ||
820 CallConv == CallingConv::Swift;
821}
822
823SDValue
824WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
825 SmallVectorImpl<SDValue> &InVals) const {
826 SelectionDAG &DAG = CLI.DAG;
827 SDLoc DL = CLI.DL;
828 SDValue Chain = CLI.Chain;
829 SDValue Callee = CLI.Callee;
830 MachineFunction &MF = DAG.getMachineFunction();
831 auto Layout = MF.getDataLayout();
832
833 CallingConv::ID CallConv = CLI.CallConv;
834 if (!callingConvSupported(CallConv))
835 fail(DL, DAG,
836 "WebAssembly doesn't support language-specific or target-specific "
837 "calling conventions yet");
838 if (CLI.IsPatchPoint)
839 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
840
841 if (CLI.IsTailCall) {
842 auto NoTail = [&](const char *Msg) {
843 if (CLI.CB && CLI.CB->isMustTailCall())
844 fail(DL, DAG, Msg);
845 CLI.IsTailCall = false;
846 };
847
848 if (!Subtarget->hasTailCall())
849 NoTail("WebAssembly 'tail-call' feature not enabled");
850
851 // Varargs calls cannot be tail calls because the buffer is on the stack
852 if (CLI.IsVarArg)
853 NoTail("WebAssembly does not support varargs tail calls");
854
855 // Do not tail call unless caller and callee return types match
856 const Function &F = MF.getFunction();
857 const TargetMachine &TM = getTargetMachine();
858 Type *RetTy = F.getReturnType();
859 SmallVector<MVT, 4> CallerRetTys;
860 SmallVector<MVT, 4> CalleeRetTys;
861 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
862 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
863 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
864 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
865 CalleeRetTys.begin());
866 if (!TypesMatch)
867 NoTail("WebAssembly tail call requires caller and callee return types to "
868 "match");
869
870 // If pointers to local stack values are passed, we cannot tail call
871 if (CLI.CB) {
872 for (auto &Arg : CLI.CB->args()) {
873 Value *Val = Arg.get();
874 // Trace the value back through pointer operations
875 while (true) {
876 Value *Src = Val->stripPointerCastsAndAliases();
877 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
878 Src = GEP->getPointerOperand();
879 if (Val == Src)
880 break;
881 Val = Src;
882 }
883 if (isa<AllocaInst>(Val)) {
884 NoTail(
885 "WebAssembly does not support tail calling with stack arguments");
886 break;
887 }
888 }
889 }
890 }
891
892 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
893 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
894 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
895
896 // The generic code may have added an sret argument. If we're lowering an
897 // invoke function, the ABI requires that the function pointer be the first
898 // argument, so we may have to swap the arguments.
899 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
900 Outs[0].Flags.isSRet()) {
901 std::swap(Outs[0], Outs[1]);
902 std::swap(OutVals[0], OutVals[1]);
903 }
904
905 bool HasSwiftSelfArg = false;
906 bool HasSwiftErrorArg = false;
907 unsigned NumFixedArgs = 0;
908 for (unsigned I = 0; I < Outs.size(); ++I) {
909 const ISD::OutputArg &Out = Outs[I];
910 SDValue &OutVal = OutVals[I];
911 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
912 HasSwiftErrorArg |= Out.Flags.isSwiftError();
913 if (Out.Flags.isNest())
914 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
915 if (Out.Flags.isInAlloca())
916 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
917 if (Out.Flags.isInConsecutiveRegs())
918 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
919 if (Out.Flags.isInConsecutiveRegsLast())
920 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
921 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
922 auto &MFI = MF.getFrameInfo();
923 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
924 Out.Flags.getNonZeroByValAlign(),
925 /*isSS=*/false);
926 SDValue SizeNode =
927 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
928 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
929 Chain = DAG.getMemcpy(
930 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
931 /*isVolatile*/ false, /*AlwaysInline=*/false,
932 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
933 OutVal = FINode;
934 }
935 // Count the number of fixed args *after* legalization.
936 NumFixedArgs += Out.IsFixed;
937 }
938
939 bool IsVarArg = CLI.IsVarArg;
940 auto PtrVT = getPointerTy(Layout);
941
942 // For swiftcc, emit additional swiftself and swifterror arguments
943 // if there aren't. These additional arguments are also added for callee
944 // signature They are necessary to match callee and caller signature for
945 // indirect call.
946 if (CallConv == CallingConv::Swift) {
947 if (!HasSwiftSelfArg) {
948 NumFixedArgs++;
949 ISD::OutputArg Arg;
950 Arg.Flags.setSwiftSelf();
951 CLI.Outs.push_back(Arg);
952 SDValue ArgVal = DAG.getUNDEF(PtrVT);
953 CLI.OutVals.push_back(ArgVal);
954 }
955 if (!HasSwiftErrorArg) {
956 NumFixedArgs++;
957 ISD::OutputArg Arg;
958 Arg.Flags.setSwiftError();
959 CLI.Outs.push_back(Arg);
960 SDValue ArgVal = DAG.getUNDEF(PtrVT);
961 CLI.OutVals.push_back(ArgVal);
962 }
963 }
964
965 // Analyze operands of the call, assigning locations to each operand.
966 SmallVector<CCValAssign, 16> ArgLocs;
967 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
968
969 if (IsVarArg) {
970 // Outgoing non-fixed arguments are placed in a buffer. First
971 // compute their offsets and the total amount of buffer space needed.
972 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
973 const ISD::OutputArg &Out = Outs[I];
974 SDValue &Arg = OutVals[I];
975 EVT VT = Arg.getValueType();
976 assert(VT != MVT::iPTR && "Legalized args should be concrete")(static_cast <bool> (VT != MVT::iPTR && "Legalized args should be concrete"
) ? void (0) : __assert_fail ("VT != MVT::iPTR && \"Legalized args should be concrete\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 976, __extension__ __PRETTY_FUNCTION__))
;
977 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
978 Align Alignment =
979 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
980 unsigned Offset =
981 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
982 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
983 Offset, VT.getSimpleVT(),
984 CCValAssign::Full));
985 }
986 }
987
988 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
989
990 SDValue FINode;
991 if (IsVarArg && NumBytes) {
992 // For non-fixed arguments, next emit stores to store the argument values
993 // to the stack buffer at the offsets computed above.
994 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
995 Layout.getStackAlignment(),
996 /*isSS=*/false);
997 unsigned ValNo = 0;
998 SmallVector<SDValue, 8> Chains;
999 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
1000 assert(ArgLocs[ValNo].getValNo() == ValNo &&(static_cast <bool> (ArgLocs[ValNo].getValNo() == ValNo
&& "ArgLocs should remain in order and only hold varargs args"
) ? void (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1001, __extension__ __PRETTY_FUNCTION__))
1001 "ArgLocs should remain in order and only hold varargs args")(static_cast <bool> (ArgLocs[ValNo].getValNo() == ValNo
&& "ArgLocs should remain in order and only hold varargs args"
) ? void (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1001, __extension__ __PRETTY_FUNCTION__))
;
1002 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1003 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1004 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
1005 DAG.getConstant(Offset, DL, PtrVT));
1006 Chains.push_back(
1007 DAG.getStore(Chain, DL, Arg, Add,
1008 MachinePointerInfo::getFixedStack(MF, FI, Offset)));
1009 }
1010 if (!Chains.empty())
1011 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1012 } else if (IsVarArg) {
1013 FINode = DAG.getIntPtrConstant(0, DL);
1014 }
1015
1016 if (Callee->getOpcode() == ISD::GlobalAddress) {
1017 // If the callee is a GlobalAddress node (quite common, every direct call
1018 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
1019 // doesn't at MO_GOT which is not needed for direct calls.
1020 GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
1021 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
1022 getPointerTy(DAG.getDataLayout()),
1023 GA->getOffset());
1024 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1025 getPointerTy(DAG.getDataLayout()), Callee);
1026 }
1027
1028 // Compute the operands for the CALLn node.
1029 SmallVector<SDValue, 16> Ops;
1030 Ops.push_back(Chain);
1031 Ops.push_back(Callee);
1032
1033 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1034 // isn't reliable.
1035 Ops.append(OutVals.begin(),
1036 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1037 // Add a pointer to the vararg buffer.
1038 if (IsVarArg)
1039 Ops.push_back(FINode);
1040
1041 SmallVector<EVT, 8> InTys;
1042 for (const auto &In : Ins) {
1043 assert(!In.Flags.isByVal() && "byval is not valid for return values")(static_cast <bool> (!In.Flags.isByVal() && "byval is not valid for return values"
) ? void (0) : __assert_fail ("!In.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1043, __extension__ __PRETTY_FUNCTION__))
;
1044 assert(!In.Flags.isNest() && "nest is not valid for return values")(static_cast <bool> (!In.Flags.isNest() && "nest is not valid for return values"
) ? void (0) : __assert_fail ("!In.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1044, __extension__ __PRETTY_FUNCTION__))
;
1045 if (In.Flags.isInAlloca())
1046 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1047 if (In.Flags.isInConsecutiveRegs())
1048 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1049 if (In.Flags.isInConsecutiveRegsLast())
1050 fail(DL, DAG,
1051 "WebAssembly hasn't implemented cons regs last return values");
1052 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1053 // registers.
1054 InTys.push_back(In.VT);
1055 }
1056
1057 if (CLI.IsTailCall) {
1058 // ret_calls do not return values to the current frame
1059 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1060 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1061 }
1062
1063 InTys.push_back(MVT::Other);
1064 SDVTList InTyList = DAG.getVTList(InTys);
1065 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1066
1067 for (size_t I = 0; I < Ins.size(); ++I)
1068 InVals.push_back(Res.getValue(I));
1069
1070 // Return the chain
1071 return Res.getValue(Ins.size());
1072}
1073
1074bool WebAssemblyTargetLowering::CanLowerReturn(
1075 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1076 const SmallVectorImpl<ISD::OutputArg> &Outs,
1077 LLVMContext & /*Context*/) const {
1078 // WebAssembly can only handle returning tuples with multivalue enabled
1079 return Subtarget->hasMultivalue() || Outs.size() <= 1;
1080}
1081
1082SDValue WebAssemblyTargetLowering::LowerReturn(
1083 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1084 const SmallVectorImpl<ISD::OutputArg> &Outs,
1085 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1086 SelectionDAG &DAG) const {
1087 assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&(static_cast <bool> ((Subtarget->hasMultivalue() || Outs
.size() <= 1) && "MVP WebAssembly can only return up to one value"
) ? void (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1088, __extension__ __PRETTY_FUNCTION__))
1088 "MVP WebAssembly can only return up to one value")(static_cast <bool> ((Subtarget->hasMultivalue() || Outs
.size() <= 1) && "MVP WebAssembly can only return up to one value"
) ? void (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1088, __extension__ __PRETTY_FUNCTION__))
;
1089 if (!callingConvSupported(CallConv))
1090 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1091
1092 SmallVector<SDValue, 4> RetOps(1, Chain);
1093 RetOps.append(OutVals.begin(), OutVals.end());
1094 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1095
1096 // Record the number and types of the return values.
1097 for (const ISD::OutputArg &Out : Outs) {
1098 assert(!Out.Flags.isByVal() && "byval is not valid for return values")(static_cast <bool> (!Out.Flags.isByVal() && "byval is not valid for return values"
) ? void (0) : __assert_fail ("!Out.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1098, __extension__ __PRETTY_FUNCTION__))
;
1099 assert(!Out.Flags.isNest() && "nest is not valid for return values")(static_cast <bool> (!Out.Flags.isNest() && "nest is not valid for return values"
) ? void (0) : __assert_fail ("!Out.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1099, __extension__ __PRETTY_FUNCTION__))
;
1100 assert(Out.IsFixed && "non-fixed return value is not valid")(static_cast <bool> (Out.IsFixed && "non-fixed return value is not valid"
) ? void (0) : __assert_fail ("Out.IsFixed && \"non-fixed return value is not valid\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1100, __extension__ __PRETTY_FUNCTION__))
;
1101 if (Out.Flags.isInAlloca())
1102 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1103 if (Out.Flags.isInConsecutiveRegs())
1104 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1105 if (Out.Flags.isInConsecutiveRegsLast())
1106 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1107 }
1108
1109 return Chain;
1110}
1111
1112SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1113 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1114 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1115 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1116 if (!callingConvSupported(CallConv))
1117 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1118
1119 MachineFunction &MF = DAG.getMachineFunction();
1120 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1121
1122 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1123 // of the incoming values before they're represented by virtual registers.
1124 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1125
1126 bool HasSwiftErrorArg = false;
1127 bool HasSwiftSelfArg = false;
1128 for (const ISD::InputArg &In : Ins) {
1129 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1130 HasSwiftErrorArg |= In.Flags.isSwiftError();
1131 if (In.Flags.isInAlloca())
1132 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1133 if (In.Flags.isNest())
1134 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1135 if (In.Flags.isInConsecutiveRegs())
1136 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1137 if (In.Flags.isInConsecutiveRegsLast())
1138 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1139 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1140 // registers.
1141 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1142 DAG.getTargetConstant(InVals.size(),
1143 DL, MVT::i32))
1144 : DAG.getUNDEF(In.VT));
1145
1146 // Record the number and types of arguments.
1147 MFI->addParam(In.VT);
1148 }
1149
1150 // For swiftcc, emit additional swiftself and swifterror arguments
1151 // if there aren't. These additional arguments are also added for callee
1152 // signature They are necessary to match callee and caller signature for
1153 // indirect call.
1154 auto PtrVT = getPointerTy(MF.getDataLayout());
1155 if (CallConv == CallingConv::Swift) {
1156 if (!HasSwiftSelfArg) {
1157 MFI->addParam(PtrVT);
1158 }
1159 if (!HasSwiftErrorArg) {
1160 MFI->addParam(PtrVT);
1161 }
1162 }
1163 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1164 // the buffer is passed as an argument.
1165 if (IsVarArg) {
1166 MVT PtrVT = getPointerTy(MF.getDataLayout());
1167 Register VarargVreg =
1168 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
1169 MFI->setVarargBufferVreg(VarargVreg);
1170 Chain = DAG.getCopyToReg(
1171 Chain, DL, VarargVreg,
1172 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1173 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1174 MFI->addParam(PtrVT);
1175 }
1176
1177 // Record the number and types of arguments and results.
1178 SmallVector<MVT, 4> Params;
1179 SmallVector<MVT, 4> Results;
1180 computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
1181 MF.getFunction(), DAG.getTarget(), Params, Results);
1182 for (MVT VT : Results)
1183 MFI->addResult(VT);
1184 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1185 // the param logic here with ComputeSignatureVTs
1186 assert(MFI->getParams().size() == Params.size() &&(static_cast <bool> (MFI->getParams().size() == Params
.size() && std::equal(MFI->getParams().begin(), MFI
->getParams().end(), Params.begin())) ? void (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1188, __extension__ __PRETTY_FUNCTION__))
1187 std::equal(MFI->getParams().begin(), MFI->getParams().end(),(static_cast <bool> (MFI->getParams().size() == Params
.size() && std::equal(MFI->getParams().begin(), MFI
->getParams().end(), Params.begin())) ? void (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1188, __extension__ __PRETTY_FUNCTION__))
1188 Params.begin()))(static_cast <bool> (MFI->getParams().size() == Params
.size() && std::equal(MFI->getParams().begin(), MFI
->getParams().end(), Params.begin())) ? void (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1188, __extension__ __PRETTY_FUNCTION__))
;
1189
1190 return Chain;
1191}
1192
1193void WebAssemblyTargetLowering::ReplaceNodeResults(
1194 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1195 switch (N->getOpcode()) {
1196 case ISD::SIGN_EXTEND_INREG:
1197 // Do not add any results, signifying that N should not be custom lowered
1198 // after all. This happens because simd128 turns on custom lowering for
1199 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1200 // illegal type.
1201 break;
1202 default:
1203 llvm_unreachable(::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1204)
1204 "ReplaceNodeResults not implemented for this op for WebAssembly!")::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1204)
;
1205 }
1206}
1207
1208//===----------------------------------------------------------------------===//
1209// Custom lowering hooks.
1210//===----------------------------------------------------------------------===//
1211
1212SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1213 SelectionDAG &DAG) const {
1214 SDLoc DL(Op);
1215 switch (Op.getOpcode()) {
1
Control jumps to 'case BUILD_VECTOR:' at line 1252
1216 default:
1217 llvm_unreachable("unimplemented operation lowering")::llvm::llvm_unreachable_internal("unimplemented operation lowering"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1217)
;
1218 return SDValue();
1219 case ISD::FrameIndex:
1220 return LowerFrameIndex(Op, DAG);
1221 case ISD::GlobalAddress:
1222 return LowerGlobalAddress(Op, DAG);
1223 case ISD::GlobalTLSAddress:
1224 return LowerGlobalTLSAddress(Op, DAG);
1225 case ISD::ExternalSymbol:
1226 return LowerExternalSymbol(Op, DAG);
1227 case ISD::JumpTable:
1228 return LowerJumpTable(Op, DAG);
1229 case ISD::BR_JT:
1230 return LowerBR_JT(Op, DAG);
1231 case ISD::VASTART:
1232 return LowerVASTART(Op, DAG);
1233 case ISD::BlockAddress:
1234 case ISD::BRIND:
1235 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1236 return SDValue();
1237 case ISD::RETURNADDR:
1238 return LowerRETURNADDR(Op, DAG);
1239 case ISD::FRAMEADDR:
1240 return LowerFRAMEADDR(Op, DAG);
1241 case ISD::CopyToReg:
1242 return LowerCopyToReg(Op, DAG);
1243 case ISD::EXTRACT_VECTOR_ELT:
1244 case ISD::INSERT_VECTOR_ELT:
1245 return LowerAccessVectorElement(Op, DAG);
1246 case ISD::INTRINSIC_VOID:
1247 case ISD::INTRINSIC_WO_CHAIN:
1248 case ISD::INTRINSIC_W_CHAIN:
1249 return LowerIntrinsic(Op, DAG);
1250 case ISD::SIGN_EXTEND_INREG:
1251 return LowerSIGN_EXTEND_INREG(Op, DAG);
1252 case ISD::BUILD_VECTOR:
1253 return LowerBUILD_VECTOR(Op, DAG);
2
Calling 'WebAssemblyTargetLowering::LowerBUILD_VECTOR'
1254 case ISD::VECTOR_SHUFFLE:
1255 return LowerVECTOR_SHUFFLE(Op, DAG);
1256 case ISD::SETCC:
1257 return LowerSETCC(Op, DAG);
1258 case ISD::SHL:
1259 case ISD::SRA:
1260 case ISD::SRL:
1261 return LowerShift(Op, DAG);
1262 case ISD::FP_TO_SINT_SAT:
1263 case ISD::FP_TO_UINT_SAT:
1264 return LowerFP_TO_INT_SAT(Op, DAG);
1265 case ISD::LOAD:
1266 return LowerLoad(Op, DAG);
1267 case ISD::STORE:
1268 return LowerStore(Op, DAG);
1269 }
1270}
1271
1272static bool IsWebAssemblyGlobal(SDValue Op) {
1273 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1274 return WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace());
1275
1276 return false;
1277}
1278
1279static Optional<unsigned> IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG) {
1280 const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
1281 if (!FI)
1282 return None;
1283
1284 auto &MF = DAG.getMachineFunction();
1285 return WebAssemblyFrameLowering::getLocalForStackObject(MF, FI->getIndex());
1286}
1287
1288SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
1289 SelectionDAG &DAG) const {
1290 SDLoc DL(Op);
1291 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
1292 const SDValue &Value = SN->getValue();
1293 const SDValue &Base = SN->getBasePtr();
1294 const SDValue &Offset = SN->getOffset();
1295
1296 if (IsWebAssemblyGlobal(Base)) {
1297 if (!Offset->isUndef())
1298 report_fatal_error("unexpected offset when storing to webassembly global",
1299 false);
1300
1301 SDVTList Tys = DAG.getVTList(MVT::Other);
1302 SDValue Ops[] = {SN->getChain(), Value, Base};
1303 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops,
1304 SN->getMemoryVT(), SN->getMemOperand());
1305 }
1306
1307 if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1308 if (!Offset->isUndef())
1309 report_fatal_error("unexpected offset when storing to webassembly local",
1310 false);
1311
1312 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1313 SDVTList Tys = DAG.getVTList(MVT::Other); // The chain.
1314 SDValue Ops[] = {SN->getChain(), Idx, Value};
1315 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
1316 }
1317
1318 return Op;
1319}
1320
1321SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
1322 SelectionDAG &DAG) const {
1323 SDLoc DL(Op);
1324 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
1325 const SDValue &Base = LN->getBasePtr();
1326 const SDValue &Offset = LN->getOffset();
1327
1328 if (IsWebAssemblyGlobal(Base)) {
1329 if (!Offset->isUndef())
1330 report_fatal_error(
1331 "unexpected offset when loading from webassembly global", false);
1332
1333 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
1334 SDValue Ops[] = {LN->getChain(), Base};
1335 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
1336 LN->getMemoryVT(), LN->getMemOperand());
1337 }
1338
1339 if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1340 if (!Offset->isUndef())
1341 report_fatal_error(
1342 "unexpected offset when loading from webassembly local", false);
1343
1344 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1345 EVT LocalVT = LN->getValueType(0);
1346 SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT,
1347 {LN->getChain(), Idx});
1348 SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL);
1349 assert(Result->getNumValues() == 2 && "Loads must carry a chain!")(static_cast <bool> (Result->getNumValues() == 2 &&
"Loads must carry a chain!") ? void (0) : __assert_fail ("Result->getNumValues() == 2 && \"Loads must carry a chain!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1349, __extension__ __PRETTY_FUNCTION__))
;
1350 return Result;
1351 }
1352
1353 return Op;
1354}
1355
1356SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1357 SelectionDAG &DAG) const {
1358 SDValue Src = Op.getOperand(2);
1359 if (isa<FrameIndexSDNode>(Src.getNode())) {
1360 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1361 // the FI to some LEA-like instruction, but since we don't have that, we
1362 // need to insert some kind of instruction that can take an FI operand and
1363 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1364 // local.copy between Op and its FI operand.
1365 SDValue Chain = Op.getOperand(0);
1366 SDLoc DL(Op);
1367 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1368 EVT VT = Src.getValueType();
1369 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1370 : WebAssembly::COPY_I64,
1371 DL, VT, Src),
1372 0);
1373 return Op.getNode()->getNumValues() == 1
1374 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1375 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1376 Op.getNumOperands() == 4 ? Op.getOperand(3)
1377 : SDValue());
1378 }
1379 return SDValue();
1380}
1381
1382SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1383 SelectionDAG &DAG) const {
1384 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1385 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1386}
1387
1388SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1389 SelectionDAG &DAG) const {
1390 SDLoc DL(Op);
1391
1392 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1393 fail(DL, DAG,
1394 "Non-Emscripten WebAssembly hasn't implemented "
1395 "__builtin_return_address");
1396 return SDValue();
1397 }
1398
1399 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1400 return SDValue();
1401
1402 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1403 MakeLibCallOptions CallOptions;
1404 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1405 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1406 .first;
1407}
1408
1409SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1410 SelectionDAG &DAG) const {
1411 // Non-zero depths are not supported by WebAssembly currently. Use the
1412 // legalizer's default expansion, which is to return 0 (what this function is
1413 // documented to do).
1414 if (Op.getConstantOperandVal(0) > 0)
1415 return SDValue();
1416
1417 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1418 EVT VT = Op.getValueType();
1419 Register FP =
1420 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1421 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1422}
1423
1424SDValue
1425WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1426 SelectionDAG &DAG) const {
1427 SDLoc DL(Op);
1428 const auto *GA = cast<GlobalAddressSDNode>(Op);
1429 MVT PtrVT = getPointerTy(DAG.getDataLayout());
1430
1431 MachineFunction &MF = DAG.getMachineFunction();
1432 if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
1433 report_fatal_error("cannot use thread-local storage without bulk memory",
1434 false);
1435
1436 const GlobalValue *GV = GA->getGlobal();
1437
1438 // Currently Emscripten does not support dynamic linking with threads.
1439 // Therefore, if we have thread-local storage, only the local-exec model
1440 // is possible.
1441 // TODO: remove this and implement proper TLS models once Emscripten
1442 // supports dynamic linking with threads.
1443 if (GV->getThreadLocalMode() != GlobalValue::LocalExecTLSModel &&
1444 !Subtarget->getTargetTriple().isOSEmscripten()) {
1445 report_fatal_error("only -ftls-model=local-exec is supported for now on "
1446 "non-Emscripten OSes: variable " +
1447 GV->getName(),
1448 false);
1449 }
1450
1451 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1452 : WebAssembly::GLOBAL_GET_I32;
1453 const char *BaseName = MF.createExternalSymbolName("__tls_base");
1454
1455 SDValue BaseAddr(
1456 DAG.getMachineNode(GlobalGet, DL, PtrVT,
1457 DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1458 0);
1459
1460 SDValue TLSOffset = DAG.getTargetGlobalAddress(
1461 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1462 SDValue SymAddr = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, TLSOffset);
1463
1464 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
1465}
1466
1467SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1468 SelectionDAG &DAG) const {
1469 SDLoc DL(Op);
1470 const auto *GA = cast<GlobalAddressSDNode>(Op);
1471 EVT VT = Op.getValueType();
1472 assert(GA->getTargetFlags() == 0 &&(static_cast <bool> (GA->getTargetFlags() == 0 &&
"Unexpected target flags on generic GlobalAddressSDNode") ? void
(0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1473, __extension__ __PRETTY_FUNCTION__))
1473 "Unexpected target flags on generic GlobalAddressSDNode")(static_cast <bool> (GA->getTargetFlags() == 0 &&
"Unexpected target flags on generic GlobalAddressSDNode") ? void
(0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1473, __extension__ __PRETTY_FUNCTION__))
;
1474 if (!WebAssembly::isValidAddressSpace(GA->getAddressSpace()))
1475 fail(DL, DAG, "Invalid address space for WebAssembly target");
1476
1477 unsigned OperandFlags = 0;
1478 if (isPositionIndependent()) {
1479 const GlobalValue *GV = GA->getGlobal();
1480 if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1481 MachineFunction &MF = DAG.getMachineFunction();
1482 MVT PtrVT = getPointerTy(MF.getDataLayout());
1483 const char *BaseName;
1484 if (GV->getValueType()->isFunctionTy()) {
1485 BaseName = MF.createExternalSymbolName("__table_base");
1486 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1487 }
1488 else {
1489 BaseName = MF.createExternalSymbolName("__memory_base");
1490 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1491 }
1492 SDValue BaseAddr =
1493 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1494 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1495
1496 SDValue SymAddr = DAG.getNode(
1497 WebAssemblyISD::WrapperPIC, DL, VT,
1498 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1499 OperandFlags));
1500
1501 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1502 } else {
1503 OperandFlags = WebAssemblyII::MO_GOT;
1504 }
1505 }
1506
1507 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1508 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1509 GA->getOffset(), OperandFlags));
1510}
1511
1512SDValue
1513WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1514 SelectionDAG &DAG) const {
1515 SDLoc DL(Op);
1516 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1517 EVT VT = Op.getValueType();
1518 assert(ES->getTargetFlags() == 0 &&(static_cast <bool> (ES->getTargetFlags() == 0 &&
"Unexpected target flags on generic ExternalSymbolSDNode") ?
void (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1519, __extension__ __PRETTY_FUNCTION__))
1519 "Unexpected target flags on generic ExternalSymbolSDNode")(static_cast <bool> (ES->getTargetFlags() == 0 &&
"Unexpected target flags on generic ExternalSymbolSDNode") ?
void (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1519, __extension__ __PRETTY_FUNCTION__))
;
1520 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1521 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1522}
1523
1524SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1525 SelectionDAG &DAG) const {
1526 // There's no need for a Wrapper node because we always incorporate a jump
1527 // table operand into a BR_TABLE instruction, rather than ever
1528 // materializing it in a register.
1529 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1530 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1531 JT->getTargetFlags());
1532}
1533
1534SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1535 SelectionDAG &DAG) const {
1536 SDLoc DL(Op);
1537 SDValue Chain = Op.getOperand(0);
1538 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1539 SDValue Index = Op.getOperand(2);
1540 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags")(static_cast <bool> (JT->getTargetFlags() == 0 &&
"WebAssembly doesn't set target flags") ? void (0) : __assert_fail
("JT->getTargetFlags() == 0 && \"WebAssembly doesn't set target flags\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1540, __extension__ __PRETTY_FUNCTION__))
;
1541
1542 SmallVector<SDValue, 8> Ops;
1543 Ops.push_back(Chain);
1544 Ops.push_back(Index);
1545
1546 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1547 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1548
1549 // Add an operand for each case.
1550 for (auto MBB : MBBs)
1551 Ops.push_back(DAG.getBasicBlock(MBB));
1552
1553 // Add the first MBB as a dummy default target for now. This will be replaced
1554 // with the proper default target (and the preceding range check eliminated)
1555 // if possible by WebAssemblyFixBrTableDefaults.
1556 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1557 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1558}
1559
1560SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1561 SelectionDAG &DAG) const {
1562 SDLoc DL(Op);
1563 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1564
1565 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1566 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1567
1568 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1569 MFI->getVarargBufferVreg(), PtrVT);
1570 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1571 MachinePointerInfo(SV));
1572}
1573
1574static SDValue getCppExceptionSymNode(SDValue Op, unsigned TagIndex,
1575 SelectionDAG &DAG) {
1576 // We only support C++ exceptions for now
1577 int Tag =
1578 cast<ConstantSDNode>(Op.getOperand(TagIndex).getNode())->getZExtValue();
1579 if (Tag != WebAssembly::CPP_EXCEPTION)
1580 llvm_unreachable("Invalid tag: We only support C++ exceptions for now")::llvm::llvm_unreachable_internal("Invalid tag: We only support C++ exceptions for now"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1580)
;
1581 auto &MF = DAG.getMachineFunction();
1582 const auto &TLI = DAG.getTargetLoweringInfo();
1583 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1584 const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1585 return DAG.getNode(WebAssemblyISD::Wrapper, SDLoc(Op), PtrVT,
1586 DAG.getTargetExternalSymbol(SymName, PtrVT));
1587}
1588
1589SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1590 SelectionDAG &DAG) const {
1591 MachineFunction &MF = DAG.getMachineFunction();
1592 unsigned IntNo;
1593 switch (Op.getOpcode()) {
1594 case ISD::INTRINSIC_VOID:
1595 case ISD::INTRINSIC_W_CHAIN:
1596 IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1597 break;
1598 case ISD::INTRINSIC_WO_CHAIN:
1599 IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1600 break;
1601 default:
1602 llvm_unreachable("Invalid intrinsic")::llvm::llvm_unreachable_internal("Invalid intrinsic", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1602)
;
1603 }
1604 SDLoc DL(Op);
1605
1606 switch (IntNo) {
1607 default:
1608 return SDValue(); // Don't custom lower most intrinsics.
1609
1610 case Intrinsic::wasm_lsda: {
1611 EVT VT = Op.getValueType();
1612 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1613 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1614 auto &Context = MF.getMMI().getContext();
1615 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1616 Twine(MF.getFunctionNumber()));
1617 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1618 DAG.getMCSymbol(S, PtrVT));
1619 }
1620
1621 case Intrinsic::wasm_throw: {
1622 SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1623 return DAG.getNode(WebAssemblyISD::THROW, DL,
1624 MVT::Other, // outchain type
1625 {
1626 Op.getOperand(0), // inchain
1627 SymNode, // exception symbol
1628 Op.getOperand(3) // thrown value
1629 });
1630 }
1631
1632 case Intrinsic::wasm_catch: {
1633 SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1634 return DAG.getNode(WebAssemblyISD::CATCH, DL,
1635 {
1636 MVT::i32, // outchain type
1637 MVT::Other // return value
1638 },
1639 {
1640 Op.getOperand(0), // inchain
1641 SymNode // exception symbol
1642 });
1643 }
1644
1645 case Intrinsic::wasm_shuffle: {
1646 // Drop in-chain and replace undefs, but otherwise pass through unchanged
1647 SDValue Ops[18];
1648 size_t OpIdx = 0;
1649 Ops[OpIdx++] = Op.getOperand(1);
1650 Ops[OpIdx++] = Op.getOperand(2);
1651 while (OpIdx < 18) {
1652 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1653 if (MaskIdx.isUndef() ||
1654 cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
1655 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32);
1656 } else {
1657 Ops[OpIdx++] = MaskIdx;
1658 }
1659 }
1660 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1661 }
1662 }
1663}
1664
1665SDValue
1666WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1667 SelectionDAG &DAG) const {
1668 SDLoc DL(Op);
1669 // If sign extension operations are disabled, allow sext_inreg only if operand
1670 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1671 // extension operations, but allowing sext_inreg in this context lets us have
1672 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1673 // everywhere would be simpler in this file, but would necessitate large and
1674 // brittle patterns to undo the expansion and select extract_lane_s
1675 // instructions.
1676 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128())(static_cast <bool> (!Subtarget->hasSignExt() &&
Subtarget->hasSIMD128()) ? void (0) : __assert_fail ("!Subtarget->hasSignExt() && Subtarget->hasSIMD128()"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1676, __extension__ __PRETTY_FUNCTION__))
;
1677 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1678 return SDValue();
1679
1680 const SDValue &Extract = Op.getOperand(0);
1681 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1682 if (VecT.getVectorElementType().getSizeInBits() > 32)
1683 return SDValue();
1684 MVT ExtractedLaneT =
1685 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1686 MVT ExtractedVecT =
1687 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1688 if (ExtractedVecT == VecT)
1689 return Op;
1690
1691 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1692 const SDNode *Index = Extract.getOperand(1).getNode();
1693 if (!isa<ConstantSDNode>(Index))
1694 return SDValue();
1695 unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
1696 unsigned Scale =
1697 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1698 assert(Scale > 1)(static_cast <bool> (Scale > 1) ? void (0) : __assert_fail
("Scale > 1", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1698, __extension__ __PRETTY_FUNCTION__))
;
1699 SDValue NewIndex =
1700 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1701 SDValue NewExtract = DAG.getNode(
1702 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1703 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1704 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1705 Op.getOperand(1));
1706}
1707
1708SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1709 SelectionDAG &DAG) const {
1710 SDLoc DL(Op);
1711 const EVT VecT = Op.getValueType();
1712 const EVT LaneT = Op.getOperand(0).getValueType();
1713 const size_t Lanes = Op.getNumOperands();
1714 bool CanSwizzle = VecT == MVT::v16i8;
1715
1716 // BUILD_VECTORs are lowered to the instruction that initializes the highest
1717 // possible number of lanes at once followed by a sequence of replace_lane
1718 // instructions to individually initialize any remaining lanes.
1719
1720 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
1721 // swizzled lanes should be given greater weight.
1722
1723 // TODO: Investigate looping rather than always extracting/replacing specific
1724 // lanes to fill gaps.
1725
1726 auto IsConstant = [](const SDValue &V) {
1727 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1728 };
1729
1730 // Returns the source vector and index vector pair if they exist. Checks for:
1731 // (extract_vector_elt
1732 // $src,
1733 // (sign_extend_inreg (extract_vector_elt $indices, $i))
1734 // )
1735 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
1736 auto Bail = std::make_pair(SDValue(), SDValue());
1737 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1738 return Bail;
1739 const SDValue &SwizzleSrc = Lane->getOperand(0);
1740 const SDValue &IndexExt = Lane->getOperand(1);
1741 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
1742 return Bail;
1743 const SDValue &Index = IndexExt->getOperand(0);
1744 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1745 return Bail;
1746 const SDValue &SwizzleIndices = Index->getOperand(0);
1747 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
1748 SwizzleIndices.getValueType() != MVT::v16i8 ||
1749 Index->getOperand(1)->getOpcode() != ISD::Constant ||
1750 Index->getConstantOperandVal(1) != I)
1751 return Bail;
1752 return std::make_pair(SwizzleSrc, SwizzleIndices);
1753 };
1754
1755 // If the lane is extracted from another vector at a constant index, return
1756 // that vector. The source vector must not have more lanes than the dest
1757 // because the shufflevector indices are in terms of the destination lanes and
1758 // would not be able to address the smaller individual source lanes.
1759 auto GetShuffleSrc = [&](const SDValue &Lane) {
1760 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1761 return SDValue();
1762 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
1763 return SDValue();
1764 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
1765 VecT.getVectorNumElements())
1766 return SDValue();
1767 return Lane->getOperand(0);
1768 };
1769
1770 using ValueEntry = std::pair<SDValue, size_t>;
1771 SmallVector<ValueEntry, 16> SplatValueCounts;
1772
1773 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
1774 SmallVector<SwizzleEntry, 16> SwizzleCounts;
1775
1776 using ShuffleEntry = std::pair<SDValue, size_t>;
1777 SmallVector<ShuffleEntry, 16> ShuffleCounts;
1778
1779 auto AddCount = [](auto &Counts, const auto &Val) {
1780 auto CountIt =
1781 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
1782 if (CountIt == Counts.end()) {
1783 Counts.emplace_back(Val, 1);
1784 } else {
1785 CountIt->second++;
1786 }
1787 };
1788
1789 auto GetMostCommon = [](auto &Counts) {
1790 auto CommonIt =
1791 std::max_element(Counts.begin(), Counts.end(),
1792 [](auto A, auto B) { return A.second < B.second; });
1793 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector")(static_cast <bool> (CommonIt != Counts.end() &&
"Unexpected all-undef build_vector") ? void (0) : __assert_fail
("CommonIt != Counts.end() && \"Unexpected all-undef build_vector\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1793, __extension__ __PRETTY_FUNCTION__))
;
1794 return *CommonIt;
1795 };
1796
1797 size_t NumConstantLanes = 0;
1798
1799 // Count eligible lanes for each type of vector creation op
1800 for (size_t I = 0; I
2.1
'I' is < 'Lanes'
2.1
'I' is < 'Lanes'
< Lanes
; ++I) {
3
Loop condition is true. Entering loop body
9
Assuming 'I' is >= 'Lanes'
10
Loop condition is false. Execution continues on line 1818
1801 const SDValue &Lane = Op->getOperand(I);
1802 if (Lane.isUndef())
4
Taking false branch
1803 continue;
1804
1805 AddCount(SplatValueCounts, Lane);
1806
1807 if (IsConstant(Lane))
5
Taking false branch
1808 NumConstantLanes++;
1809 if (auto ShuffleSrc = GetShuffleSrc(Lane))
6
Taking false branch
1810 AddCount(ShuffleCounts, ShuffleSrc);
1811 if (CanSwizzle
6.1
'CanSwizzle' is true
6.1
'CanSwizzle' is true
) {
7
Taking true branch
1812 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
1813 if (SwizzleSrcs.first)
8
Taking false branch
1814 AddCount(SwizzleCounts, SwizzleSrcs);
1815 }
1816 }
1817
1818 SDValue SplatValue;
1819 size_t NumSplatLanes;
1820 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
1821
1822 SDValue SwizzleSrc;
1823 SDValue SwizzleIndices;
1824 size_t NumSwizzleLanes = 0;
1825 if (SwizzleCounts.size())
11
Assuming the condition is true
12
Taking true branch
1826 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
1827 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
1828
1829 // Shuffles can draw from up to two vectors, so find the two most common
1830 // sources.
1831 SDValue ShuffleSrc1, ShuffleSrc2;
1832 size_t NumShuffleLanes = 0;
1833 if (ShuffleCounts.size()) {
13
Assuming the condition is false
14
Taking false branch
1834 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
1835 ShuffleCounts.erase(std::remove_if(ShuffleCounts.begin(),
1836 ShuffleCounts.end(),
1837 [&](const auto &Pair) {
1838 return Pair.first == ShuffleSrc1;
1839 }),
1840 ShuffleCounts.end());
1841 }
1842 if (ShuffleCounts.size()) {
15
Taking false branch
1843 size_t AdditionalShuffleLanes;
1844 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
1845 GetMostCommon(ShuffleCounts);
1846 NumShuffleLanes += AdditionalShuffleLanes;
1847 }
1848
1849 // Predicate returning true if the lane is properly initialized by the
1850 // original instruction
1851 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
1852 SDValue Result;
1853 // Prefer swizzles over shuffles over vector consts over splats
1854 if (NumSwizzleLanes
15.1
'NumSwizzleLanes' is >= 'NumShuffleLanes'
15.1
'NumSwizzleLanes' is >= 'NumShuffleLanes'
>= NumShuffleLanes &&
17
Taking false branch
1855 NumSwizzleLanes
15.2
'NumSwizzleLanes' is >= 'NumConstantLanes'
15.2
'NumSwizzleLanes' is >= 'NumConstantLanes'
>= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
16
Assuming 'NumSwizzleLanes' is < 'NumSplatLanes'
1856 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
1857 SwizzleIndices);
1858 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
1859 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
1860 return Swizzled == GetSwizzleSrcs(I, Lane);
1861 };
1862 } else if (NumShuffleLanes
17.1
'NumShuffleLanes' is >= 'NumConstantLanes'
17.1
'NumShuffleLanes' is >= 'NumConstantLanes'
>= NumConstantLanes &&
19
Taking true branch
1863 NumShuffleLanes >= NumSplatLanes) {
18
Assuming 'NumShuffleLanes' is >= 'NumSplatLanes'
1864 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
1865 size_t DestLaneCount = VecT.getVectorNumElements();
1866 size_t Scale1 = 1;
1867 size_t Scale2 = 1;
1868 SDValue Src1 = ShuffleSrc1;
20
Null pointer value stored to 'Src1.Node'
1869 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
21
'?' condition is false
1870 if (Src1.getValueType() != VecT) {
22
Calling 'SDValue::getValueType'
1871 size_t LaneSize =
1872 Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1873 assert(LaneSize > DestLaneSize)(static_cast <bool> (LaneSize > DestLaneSize) ? void
(0) : __assert_fail ("LaneSize > DestLaneSize", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1873, __extension__ __PRETTY_FUNCTION__))
;
1874 Scale1 = LaneSize / DestLaneSize;
1875 Src1 = DAG.getBitcast(VecT, Src1);
1876 }
1877 if (Src2.getValueType() != VecT) {
1878 size_t LaneSize =
1879 Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1880 assert(LaneSize > DestLaneSize)(static_cast <bool> (LaneSize > DestLaneSize) ? void
(0) : __assert_fail ("LaneSize > DestLaneSize", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1880, __extension__ __PRETTY_FUNCTION__))
;
1881 Scale2 = LaneSize / DestLaneSize;
1882 Src2 = DAG.getBitcast(VecT, Src2);
1883 }
1884
1885 int Mask[16];
1886 assert(DestLaneCount <= 16)(static_cast <bool> (DestLaneCount <= 16) ? void (0)
: __assert_fail ("DestLaneCount <= 16", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1886, __extension__ __PRETTY_FUNCTION__))
;
1887 for (size_t I = 0; I < DestLaneCount; ++I) {
1888 const SDValue &Lane = Op->getOperand(I);
1889 SDValue Src = GetShuffleSrc(Lane);
1890 if (Src == ShuffleSrc1) {
1891 Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
1892 } else if (Src && Src == ShuffleSrc2) {
1893 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
1894 } else {
1895 Mask[I] = -1;
1896 }
1897 }
1898 ArrayRef<int> MaskRef(Mask, DestLaneCount);
1899 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
1900 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
1901 auto Src = GetShuffleSrc(Lane);
1902 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
1903 };
1904 } else if (NumConstantLanes >= NumSplatLanes) {
1905 SmallVector<SDValue, 16> ConstLanes;
1906 for (const SDValue &Lane : Op->op_values()) {
1907 if (IsConstant(Lane)) {
1908 ConstLanes.push_back(Lane);
1909 } else if (LaneT.isFloatingPoint()) {
1910 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1911 } else {
1912 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1913 }
1914 }
1915 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1916 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
1917 return IsConstant(Lane);
1918 };
1919 } else {
1920 // Use a splat, but possibly a load_splat
1921 LoadSDNode *SplattedLoad;
1922 if ((SplattedLoad = dyn_cast<LoadSDNode>(SplatValue)) &&
1923 SplattedLoad->getMemoryVT() == VecT.getVectorElementType()) {
1924 Result = DAG.getMemIntrinsicNode(
1925 WebAssemblyISD::LOAD_SPLAT, DL, DAG.getVTList(VecT),
1926 {SplattedLoad->getChain(), SplattedLoad->getBasePtr(),
1927 SplattedLoad->getOffset()},
1928 SplattedLoad->getMemoryVT(), SplattedLoad->getMemOperand());
1929 } else {
1930 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
1931 }
1932 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
1933 return Lane == SplatValue;
1934 };
1935 }
1936
1937 assert(Result)(static_cast <bool> (Result) ? void (0) : __assert_fail
("Result", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1937, __extension__ __PRETTY_FUNCTION__))
;
1938 assert(IsLaneConstructed)(static_cast <bool> (IsLaneConstructed) ? void (0) : __assert_fail
("IsLaneConstructed", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1938, __extension__ __PRETTY_FUNCTION__))
;
1939
1940 // Add replace_lane instructions for any unhandled values
1941 for (size_t I = 0; I < Lanes; ++I) {
1942 const SDValue &Lane = Op->getOperand(I);
1943 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
1944 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1945 DAG.getConstant(I, DL, MVT::i32));
1946 }
1947
1948 return Result;
1949}
1950
1951SDValue
1952WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1953 SelectionDAG &DAG) const {
1954 SDLoc DL(Op);
1955 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1956 MVT VecType = Op.getOperand(0).getSimpleValueType();
1957 assert(VecType.is128BitVector() && "Unexpected shuffle vector type")(static_cast <bool> (VecType.is128BitVector() &&
"Unexpected shuffle vector type") ? void (0) : __assert_fail
("VecType.is128BitVector() && \"Unexpected shuffle vector type\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1957, __extension__ __PRETTY_FUNCTION__))
;
1958 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1959
1960 // Space for two vector args and sixteen mask indices
1961 SDValue Ops[18];
1962 size_t OpIdx = 0;
1963 Ops[OpIdx++] = Op.getOperand(0);
1964 Ops[OpIdx++] = Op.getOperand(1);
1965
1966 // Expand mask indices to byte indices and materialize them as operands
1967 for (int M : Mask) {
1968 for (size_t J = 0; J < LaneBytes; ++J) {
1969 // Lower undefs (represented by -1 in mask) to zero
1970 uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1971 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1972 }
1973 }
1974
1975 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1976}
1977
1978SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
1979 SelectionDAG &DAG) const {
1980 SDLoc DL(Op);
1981 // The legalizer does not know how to expand the unsupported comparison modes
1982 // of i64x2 vectors, so we manually unroll them here.
1983 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64)(static_cast <bool> (Op->getOperand(0)->getSimpleValueType
(0) == MVT::v2i64) ? void (0) : __assert_fail ("Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1983, __extension__ __PRETTY_FUNCTION__))
;
1984 SmallVector<SDValue, 2> LHS, RHS;
1985 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
1986 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
1987 const SDValue &CC = Op->getOperand(2);
1988 auto MakeLane = [&](unsigned I) {
1989 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
1990 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
1991 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
1992 };
1993 return DAG.getBuildVector(Op->getValueType(0), DL,
1994 {MakeLane(0), MakeLane(1)});
1995}
1996
1997SDValue
1998WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1999 SelectionDAG &DAG) const {
2000 // Allow constant lane indices, expand variable lane indices
2001 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
2002 if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
2003 return Op;
2004 else
2005 // Perform default expansion
2006 return SDValue();
2007}
2008
2009static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
2010 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
2011 // 32-bit and 64-bit unrolled shifts will have proper semantics
2012 if (LaneT.bitsGE(MVT::i32))
2013 return DAG.UnrollVectorOp(Op.getNode());
2014 // Otherwise mask the shift value to get proper semantics from 32-bit shift
2015 SDLoc DL(Op);
2016 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
2017 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
2018 unsigned ShiftOpcode = Op.getOpcode();
2019 SmallVector<SDValue, 16> ShiftedElements;
2020 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
2021 SmallVector<SDValue, 16> ShiftElements;
2022 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
2023 SmallVector<SDValue, 16> UnrolledOps;
2024 for (size_t i = 0; i < NumLanes; ++i) {
2025 SDValue MaskedShiftValue =
2026 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
2027 SDValue ShiftedValue = ShiftedElements[i];
2028 if (ShiftOpcode == ISD::SRA)
2029 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
2030 ShiftedValue, DAG.getValueType(LaneT));
2031 UnrolledOps.push_back(
2032 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2033 }
2034 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
2035}
2036
2037SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
2038 SelectionDAG &DAG) const {
2039 SDLoc DL(Op);
2040
2041 // Only manually lower vector shifts
2042 assert(Op.getSimpleValueType().isVector())(static_cast <bool> (Op.getSimpleValueType().isVector()
) ? void (0) : __assert_fail ("Op.getSimpleValueType().isVector()"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2042, __extension__ __PRETTY_FUNCTION__))
;
2043
2044 auto ShiftVal = DAG.getSplatValue(Op.getOperand(1));
2045 if (!ShiftVal)
2046 return unrollVectorShift(Op, DAG);
2047
2048 // Use anyext because none of the high bits can affect the shift
2049 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
2050
2051 unsigned Opcode;
2052 switch (Op.getOpcode()) {
2053 case ISD::SHL:
2054 Opcode = WebAssemblyISD::VEC_SHL;
2055 break;
2056 case ISD::SRA:
2057 Opcode = WebAssemblyISD::VEC_SHR_S;
2058 break;
2059 case ISD::SRL:
2060 Opcode = WebAssemblyISD::VEC_SHR_U;
2061 break;
2062 default:
2063 llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2063)
;
2064 }
2065
2066 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
2067}
2068
2069SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2070 SelectionDAG &DAG) const {
2071 SDLoc DL(Op);
2072 EVT ResT = Op.getValueType();
2073 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2074
2075 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2076 (SatVT == MVT::i32 || SatVT == MVT::i64))
2077 return Op;
2078
2079 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2080 return Op;
2081
2082 return SDValue();
2083}
2084
2085//===----------------------------------------------------------------------===//
2086// Custom DAG combine hooks
2087//===----------------------------------------------------------------------===//
2088static SDValue
2089performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2090 auto &DAG = DCI.DAG;
2091 auto Shuffle = cast<ShuffleVectorSDNode>(N);
2092
2093 // Hoist vector bitcasts that don't change the number of lanes out of unary
2094 // shuffles, where they are less likely to get in the way of other combines.
2095 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
2096 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
2097 SDValue Bitcast = N->getOperand(0);
2098 if (Bitcast.getOpcode() != ISD::BITCAST)
2099 return SDValue();
2100 if (!N->getOperand(1).isUndef())
2101 return SDValue();
2102 SDValue CastOp = Bitcast.getOperand(0);
2103 MVT SrcType = CastOp.getSimpleValueType();
2104 MVT DstType = Bitcast.getSimpleValueType();
2105 if (!SrcType.is128BitVector() ||
2106 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2107 return SDValue();
2108 SDValue NewShuffle = DAG.getVectorShuffle(
2109 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
2110 return DAG.getBitcast(DstType, NewShuffle);
2111}
2112
2113static SDValue
2114performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2115 auto &DAG = DCI.DAG;
2116 assert(N->getOpcode() == ISD::SIGN_EXTEND ||(static_cast <bool> (N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) ? void (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2117, __extension__ __PRETTY_FUNCTION__))
2117 N->getOpcode() == ISD::ZERO_EXTEND)(static_cast <bool> (N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) ? void (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2117, __extension__ __PRETTY_FUNCTION__))
;
2118
2119 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
2120 // possible before the extract_subvector can be expanded.
2121 auto Extract = N->getOperand(0);
2122 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2123 return SDValue();
2124 auto Source = Extract.getOperand(0);
2125 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2126 if (IndexNode == nullptr)
2127 return SDValue();
2128 auto Index = IndexNode->getZExtValue();
2129
2130 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
2131 // extracted subvector is the low or high half of its source.
2132 EVT ResVT = N->getValueType(0);
2133 if (ResVT == MVT::v8i16) {
2134 if (Extract.getValueType() != MVT::v8i8 ||
2135 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2136 return SDValue();
2137 } else if (ResVT == MVT::v4i32) {
2138 if (Extract.getValueType() != MVT::v4i16 ||
2139 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2140 return SDValue();
2141 } else if (ResVT == MVT::v2i64) {
2142 if (Extract.getValueType() != MVT::v2i32 ||
2143 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2144 return SDValue();
2145 } else {
2146 return SDValue();
2147 }
2148
2149 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
2150 bool IsLow = Index == 0;
2151
2152 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2153 : WebAssemblyISD::EXTEND_HIGH_S)
2154 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2155 : WebAssemblyISD::EXTEND_HIGH_U);
2156
2157 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2158}
2159
2160static SDValue
2161performVectorConvertLowCombine(SDNode *N,
2162 TargetLowering::DAGCombinerInfo &DCI) {
2163 auto &DAG = DCI.DAG;
2164
2165 EVT ResVT = N->getValueType(0);
2166 if (ResVT != MVT::v2f64)
2167 return SDValue();
2168
2169 if (N->getOpcode() == ISD::SINT_TO_FP || N->getOpcode() == ISD::UINT_TO_FP) {
2170 // Combine this:
2171 //
2172 // (v2f64 ({s,u}int_to_fp
2173 // (v2i32 (extract_subvector (v4i32 $x), 0))))
2174 //
2175 // into (f64x2.convert_low_i32x4_{s,u} $x).
2176 auto Extract = N->getOperand(0);
2177 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2178 return SDValue();
2179 if (Extract.getValueType() != MVT::v2i32)
2180 return SDValue();
2181 auto Source = Extract.getOperand(0);
2182 if (Source.getValueType() != MVT::v4i32)
2183 return SDValue();
2184 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2185 if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
2186 return SDValue();
2187
2188 unsigned Op = N->getOpcode() == ISD::SINT_TO_FP
2189 ? WebAssemblyISD::CONVERT_LOW_S
2190 : WebAssemblyISD::CONVERT_LOW_U;
2191
2192 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2193
2194 } else if (N->getOpcode() == ISD::EXTRACT_SUBVECTOR) {
2195 // Combine this:
2196 //
2197 // (v2f64 (extract_subvector
2198 // (v4f64 ({s,u}int_to_fp (v4i32 $x))), 0))
2199 //
2200 // into (f64x2.convert_low_i32x4_{s,u} $x).
2201 auto IntToFP = N->getOperand(0);
2202 if (IntToFP.getOpcode() != ISD::SINT_TO_FP &&
2203 IntToFP.getOpcode() != ISD::UINT_TO_FP)
2204 return SDValue();
2205 if (IntToFP.getValueType() != MVT::v4f64)
2206 return SDValue();
2207 auto Source = IntToFP.getOperand(0);
2208 if (Source.getValueType() != MVT::v4i32)
2209 return SDValue();
2210 auto IndexNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
2211 if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
2212 return SDValue();
2213
2214 unsigned Op = IntToFP->getOpcode() == ISD::SINT_TO_FP
2215 ? WebAssemblyISD::CONVERT_LOW_S
2216 : WebAssemblyISD::CONVERT_LOW_U;
2217
2218 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2219
2220 } else {
2221 llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2221)
;
2222 }
2223}
2224
2225static SDValue
2226performVectorTruncSatLowCombine(SDNode *N,
2227 TargetLowering::DAGCombinerInfo &DCI) {
2228 auto &DAG = DCI.DAG;
2229 assert(N->getOpcode() == ISD::CONCAT_VECTORS)(static_cast <bool> (N->getOpcode() == ISD::CONCAT_VECTORS
) ? void (0) : __assert_fail ("N->getOpcode() == ISD::CONCAT_VECTORS"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2229, __extension__ __PRETTY_FUNCTION__))
;
2230
2231 // Combine this:
2232 //
2233 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
2234 //
2235 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2236 EVT ResVT = N->getValueType(0);
2237 if (ResVT != MVT::v4i32)
2238 return SDValue();
2239
2240 auto FPToInt = N->getOperand(0);
2241 auto FPToIntOp = FPToInt.getOpcode();
2242 if (FPToIntOp != ISD::FP_TO_SINT_SAT && FPToIntOp != ISD::FP_TO_UINT_SAT)
2243 return SDValue();
2244 if (cast<VTSDNode>(FPToInt.getOperand(1))->getVT() != MVT::i32)
2245 return SDValue();
2246
2247 auto Source = FPToInt.getOperand(0);
2248 if (Source.getValueType() != MVT::v2f64)
2249 return SDValue();
2250
2251 auto *Splat = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
2252 APInt SplatValue, SplatUndef;
2253 unsigned SplatBitSize;
2254 bool HasAnyUndefs;
2255 if (!Splat || !Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2256 HasAnyUndefs))
2257 return SDValue();
2258 if (SplatValue != 0)
2259 return SDValue();
2260
2261 unsigned Op = FPToIntOp == ISD::FP_TO_SINT_SAT
2262 ? WebAssemblyISD::TRUNC_SAT_ZERO_S
2263 : WebAssemblyISD::TRUNC_SAT_ZERO_U;
2264
2265 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2266}
2267
2268SDValue
2269WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
2270 DAGCombinerInfo &DCI) const {
2271 switch (N->getOpcode()) {
2272 default:
2273 return SDValue();
2274 case ISD::VECTOR_SHUFFLE:
2275 return performVECTOR_SHUFFLECombine(N, DCI);
2276 case ISD::SIGN_EXTEND:
2277 case ISD::ZERO_EXTEND:
2278 return performVectorExtendCombine(N, DCI);
2279 case ISD::SINT_TO_FP:
2280 case ISD::UINT_TO_FP:
2281 case ISD::EXTRACT_SUBVECTOR:
2282 return performVectorConvertLowCombine(N, DCI);
2283 case ISD::CONCAT_VECTORS:
2284 return performVectorTruncSatLowCombine(N, DCI);
2285 }
2286}

/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/Register.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Metadata.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/Support/AlignOf.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/TypeSize.h"
47#include <algorithm>
48#include <cassert>
49#include <climits>
50#include <cstddef>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <string>
55#include <tuple>
56
57namespace llvm {
58
59class APInt;
60class Constant;
61template <typename T> struct DenseMapInfo;
62class GlobalValue;
63class MachineBasicBlock;
64class MachineConstantPoolValue;
65class MCSymbol;
66class raw_ostream;
67class SDNode;
68class SelectionDAG;
69class Type;
70class Value;
71
72void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
73 bool force = false);
74
75/// This represents a list of ValueType's that has been intern'd by
76/// a SelectionDAG. Instances of this simple value class are returned by
77/// SelectionDAG::getVTList(...).
78///
79struct SDVTList {
80 const EVT *VTs;
81 unsigned int NumVTs;
82};
83
84namespace ISD {
85
86 /// Node predicates
87
88/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
89/// same constant or undefined, return true and return the constant value in
90/// \p SplatValue.
91bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
92
93/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
94/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
95/// true, it only checks BUILD_VECTOR.
96bool isConstantSplatVectorAllOnes(const SDNode *N,
97 bool BuildVectorOnly = false);
98
99/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
100/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
101/// only checks BUILD_VECTOR.
102bool isConstantSplatVectorAllZeros(const SDNode *N,
103 bool BuildVectorOnly = false);
104
105/// Return true if the specified node is a BUILD_VECTOR where all of the
106/// elements are ~0 or undef.
107bool isBuildVectorAllOnes(const SDNode *N);
108
109/// Return true if the specified node is a BUILD_VECTOR where all of the
110/// elements are 0 or undef.
111bool isBuildVectorAllZeros(const SDNode *N);
112
113/// Return true if the specified node is a BUILD_VECTOR node of all
114/// ConstantSDNode or undef.
115bool isBuildVectorOfConstantSDNodes(const SDNode *N);
116
117/// Return true if the specified node is a BUILD_VECTOR node of all
118/// ConstantFPSDNode or undef.
119bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
120
121/// Return true if the node has at least one operand and all operands of the
122/// specified node are ISD::UNDEF.
123bool allOperandsUndef(const SDNode *N);
124
125} // end namespace ISD
126
127//===----------------------------------------------------------------------===//
128/// Unlike LLVM values, Selection DAG nodes may return multiple
129/// values as the result of a computation. Many nodes return multiple values,
130/// from loads (which define a token and a return value) to ADDC (which returns
131/// a result and a carry value), to calls (which may return an arbitrary number
132/// of values).
133///
134/// As such, each use of a SelectionDAG computation must indicate the node that
135/// computes it as well as which return value to use from that node. This pair
136/// of information is represented with the SDValue value type.
137///
138class SDValue {
139 friend struct DenseMapInfo<SDValue>;
140
141 SDNode *Node = nullptr; // The node defining the value we are using.
142 unsigned ResNo = 0; // Which return value of the node we are using.
143
144public:
145 SDValue() = default;
146 SDValue(SDNode *node, unsigned resno);
147
148 /// get the index which selects a specific result in the SDNode
149 unsigned getResNo() const { return ResNo; }
150
151 /// get the SDNode which holds the desired result
152 SDNode *getNode() const { return Node; }
153
154 /// set the SDNode
155 void setNode(SDNode *N) { Node = N; }
156
157 inline SDNode *operator->() const { return Node; }
158
159 bool operator==(const SDValue &O) const {
160 return Node == O.Node && ResNo == O.ResNo;
161 }
162 bool operator!=(const SDValue &O) const {
163 return !operator==(O);
164 }
165 bool operator<(const SDValue &O) const {
166 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
167 }
168 explicit operator bool() const {
169 return Node != nullptr;
170 }
171
172 SDValue getValue(unsigned R) const {
173 return SDValue(Node, R);
174 }
175
176 /// Return true if this node is an operand of N.
177 bool isOperandOf(const SDNode *N) const;
178
179 /// Return the ValueType of the referenced return value.
180 inline EVT getValueType() const;
181
182 /// Return the simple ValueType of the referenced return value.
183 MVT getSimpleValueType() const {
184 return getValueType().getSimpleVT();
185 }
186
187 /// Returns the size of the value in bits.
188 ///
189 /// If the value type is a scalable vector type, the scalable property will
190 /// be set and the runtime size will be a positive integer multiple of the
191 /// base size.
192 TypeSize getValueSizeInBits() const {
193 return getValueType().getSizeInBits();
194 }
195
196 uint64_t getScalarValueSizeInBits() const {
197 return getValueType().getScalarType().getFixedSizeInBits();
198 }
199
200 // Forwarding methods - These forward to the corresponding methods in SDNode.
201 inline unsigned getOpcode() const;
202 inline unsigned getNumOperands() const;
203 inline const SDValue &getOperand(unsigned i) const;
204 inline uint64_t getConstantOperandVal(unsigned i) const;
205 inline const APInt &getConstantOperandAPInt(unsigned i) const;
206 inline bool isTargetMemoryOpcode() const;
207 inline bool isTargetOpcode() const;
208 inline bool isMachineOpcode() const;
209 inline bool isUndef() const;
210 inline unsigned getMachineOpcode() const;
211 inline const DebugLoc &getDebugLoc() const;
212 inline void dump() const;
213 inline void dump(const SelectionDAG *G) const;
214 inline void dumpr() const;
215 inline void dumpr(const SelectionDAG *G) const;
216
217 /// Return true if this operand (which must be a chain) reaches the
218 /// specified operand without crossing any side-effecting instructions.
219 /// In practice, this looks through token factors and non-volatile loads.
220 /// In order to remain efficient, this only
221 /// looks a couple of nodes in, it does not do an exhaustive search.
222 bool reachesChainWithoutSideEffects(SDValue Dest,
223 unsigned Depth = 2) const;
224
225 /// Return true if there are no nodes using value ResNo of Node.
226 inline bool use_empty() const;
227
228 /// Return true if there is exactly one node using value ResNo of Node.
229 inline bool hasOneUse() const;
230};
231
232template<> struct DenseMapInfo<SDValue> {
233 static inline SDValue getEmptyKey() {
234 SDValue V;
235 V.ResNo = -1U;
236 return V;
237 }
238
239 static inline SDValue getTombstoneKey() {
240 SDValue V;
241 V.ResNo = -2U;
242 return V;
243 }
244
245 static unsigned getHashValue(const SDValue &Val) {
246 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
247 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
248 }
249
250 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
251 return LHS == RHS;
252 }
253};
254
255/// Allow casting operators to work directly on
256/// SDValues as if they were SDNode*'s.
257template<> struct simplify_type<SDValue> {
258 using SimpleType = SDNode *;
259
260 static SimpleType getSimplifiedValue(SDValue &Val) {
261 return Val.getNode();
262 }
263};
264template<> struct simplify_type<const SDValue> {
265 using SimpleType = /*const*/ SDNode *;
266
267 static SimpleType getSimplifiedValue(const SDValue &Val) {
268 return Val.getNode();
269 }
270};
271
272/// Represents a use of a SDNode. This class holds an SDValue,
273/// which records the SDNode being used and the result number, a
274/// pointer to the SDNode using the value, and Next and Prev pointers,
275/// which link together all the uses of an SDNode.
276///
277class SDUse {
278 /// Val - The value being used.
279 SDValue Val;
280 /// User - The user of this value.
281 SDNode *User = nullptr;
282 /// Prev, Next - Pointers to the uses list of the SDNode referred by
283 /// this operand.
284 SDUse **Prev = nullptr;
285 SDUse *Next = nullptr;
286
287public:
288 SDUse() = default;
289 SDUse(const SDUse &U) = delete;
290 SDUse &operator=(const SDUse &) = delete;
291
292 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
293 operator const SDValue&() const { return Val; }
294
295 /// If implicit conversion to SDValue doesn't work, the get() method returns
296 /// the SDValue.
297 const SDValue &get() const { return Val; }
298
299 /// This returns the SDNode that contains this Use.
300 SDNode *getUser() { return User; }
301
302 /// Get the next SDUse in the use list.
303 SDUse *getNext() const { return Next; }
304
305 /// Convenience function for get().getNode().
306 SDNode *getNode() const { return Val.getNode(); }
307 /// Convenience function for get().getResNo().
308 unsigned getResNo() const { return Val.getResNo(); }
309 /// Convenience function for get().getValueType().
310 EVT getValueType() const { return Val.getValueType(); }
311
312 /// Convenience function for get().operator==
313 bool operator==(const SDValue &V) const {
314 return Val == V;
315 }
316
317 /// Convenience function for get().operator!=
318 bool operator!=(const SDValue &V) const {
319 return Val != V;
320 }
321
322 /// Convenience function for get().operator<
323 bool operator<(const SDValue &V) const {
324 return Val < V;
325 }
326
327private:
328 friend class SelectionDAG;
329 friend class SDNode;
330 // TODO: unfriend HandleSDNode once we fix its operand handling.
331 friend class HandleSDNode;
332
333 void setUser(SDNode *p) { User = p; }
334
335 /// Remove this use from its existing use list, assign it the
336 /// given value, and add it to the new value's node's use list.
337 inline void set(const SDValue &V);
338 /// Like set, but only supports initializing a newly-allocated
339 /// SDUse with a non-null value.
340 inline void setInitial(const SDValue &V);
341 /// Like set, but only sets the Node portion of the value,
342 /// leaving the ResNo portion unmodified.
343 inline void setNode(SDNode *N);
344
345 void addToList(SDUse **List) {
346 Next = *List;
347 if (Next) Next->Prev = &Next;
348 Prev = List;
349 *List = this;
350 }
351
352 void removeFromList() {
353 *Prev = Next;
354 if (Next) Next->Prev = Prev;
355 }
356};
357
358/// simplify_type specializations - Allow casting operators to work directly on
359/// SDValues as if they were SDNode*'s.
360template<> struct simplify_type<SDUse> {
361 using SimpleType = SDNode *;
362
363 static SimpleType getSimplifiedValue(SDUse &Val) {
364 return Val.getNode();
365 }
366};
367
368/// These are IR-level optimization flags that may be propagated to SDNodes.
369/// TODO: This data structure should be shared by the IR optimizer and the
370/// the backend.
371struct SDNodeFlags {
372private:
373 bool NoUnsignedWrap : 1;
374 bool NoSignedWrap : 1;
375 bool Exact : 1;
376 bool NoNaNs : 1;
377 bool NoInfs : 1;
378 bool NoSignedZeros : 1;
379 bool AllowReciprocal : 1;
380 bool AllowContract : 1;
381 bool ApproximateFuncs : 1;
382 bool AllowReassociation : 1;
383
384 // We assume instructions do not raise floating-point exceptions by default,
385 // and only those marked explicitly may do so. We could choose to represent
386 // this via a positive "FPExcept" flags like on the MI level, but having a
387 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
388 // intersection logic more straightforward.
389 bool NoFPExcept : 1;
390
391public:
392 /// Default constructor turns off all optimization flags.
393 SDNodeFlags()
394 : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
395 NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
396 AllowContract(false), ApproximateFuncs(false),
397 AllowReassociation(false), NoFPExcept(false) {}
398
399 /// Propagate the fast-math-flags from an IR FPMathOperator.
400 void copyFMF(const FPMathOperator &FPMO) {
401 setNoNaNs(FPMO.hasNoNaNs());
402 setNoInfs(FPMO.hasNoInfs());
403 setNoSignedZeros(FPMO.hasNoSignedZeros());
404 setAllowReciprocal(FPMO.hasAllowReciprocal());
405 setAllowContract(FPMO.hasAllowContract());
406 setApproximateFuncs(FPMO.hasApproxFunc());
407 setAllowReassociation(FPMO.hasAllowReassoc());
408 }
409
410 // These are mutators for each flag.
411 void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
412 void setNoSignedWrap(bool b) { NoSignedWrap = b; }
413 void setExact(bool b) { Exact = b; }
414 void setNoNaNs(bool b) { NoNaNs = b; }
415 void setNoInfs(bool b) { NoInfs = b; }
416 void setNoSignedZeros(bool b) { NoSignedZeros = b; }
417 void setAllowReciprocal(bool b) { AllowReciprocal = b; }
418 void setAllowContract(bool b) { AllowContract = b; }
419 void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
420 void setAllowReassociation(bool b) { AllowReassociation = b; }
421 void setNoFPExcept(bool b) { NoFPExcept = b; }
422
423 // These are accessors for each flag.
424 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
425 bool hasNoSignedWrap() const { return NoSignedWrap; }
426 bool hasExact() const { return Exact; }
427 bool hasNoNaNs() const { return NoNaNs; }
428 bool hasNoInfs() const { return NoInfs; }
429 bool hasNoSignedZeros() const { return NoSignedZeros; }
430 bool hasAllowReciprocal() const { return AllowReciprocal; }
431 bool hasAllowContract() const { return AllowContract; }
432 bool hasApproximateFuncs() const { return ApproximateFuncs; }
433 bool hasAllowReassociation() const { return AllowReassociation; }
434 bool hasNoFPExcept() const { return NoFPExcept; }
435
436 /// Clear any flags in this flag set that aren't also set in Flags. All
437 /// flags will be cleared if Flags are undefined.
438 void intersectWith(const SDNodeFlags Flags) {
439 NoUnsignedWrap &= Flags.NoUnsignedWrap;
440 NoSignedWrap &= Flags.NoSignedWrap;
441 Exact &= Flags.Exact;
442 NoNaNs &= Flags.NoNaNs;
443 NoInfs &= Flags.NoInfs;
444 NoSignedZeros &= Flags.NoSignedZeros;
445 AllowReciprocal &= Flags.AllowReciprocal;
446 AllowContract &= Flags.AllowContract;
447 ApproximateFuncs &= Flags.ApproximateFuncs;
448 AllowReassociation &= Flags.AllowReassociation;
449 NoFPExcept &= Flags.NoFPExcept;
450 }
451};
452
453/// Represents one node in the SelectionDAG.
454///
455class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
456private:
457 /// The operation that this node performs.
458 int16_t NodeType;
459
460protected:
461 // We define a set of mini-helper classes to help us interpret the bits in our
462 // SubclassData. These are designed to fit within a uint16_t so they pack
463 // with NodeType.
464
465#if defined(_AIX) && (!defined(__GNUC__4) || defined(__clang__1))
466// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
467// and give the `pack` pragma push semantics.
468#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
469#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
470#else
471#define BEGIN_TWO_BYTE_PACK()
472#define END_TWO_BYTE_PACK()
473#endif
474
475BEGIN_TWO_BYTE_PACK()
476 class SDNodeBitfields {
477 friend class SDNode;
478 friend class MemIntrinsicSDNode;
479 friend class MemSDNode;
480 friend class SelectionDAG;
481
482 uint16_t HasDebugValue : 1;
483 uint16_t IsMemIntrinsic : 1;
484 uint16_t IsDivergent : 1;
485 };
486 enum { NumSDNodeBits = 3 };
487
488 class ConstantSDNodeBitfields {
489 friend class ConstantSDNode;
490
491 uint16_t : NumSDNodeBits;
492
493 uint16_t IsOpaque : 1;
494 };
495
496 class MemSDNodeBitfields {
497 friend class MemSDNode;
498 friend class MemIntrinsicSDNode;
499 friend class AtomicSDNode;
500
501 uint16_t : NumSDNodeBits;
502
503 uint16_t IsVolatile : 1;
504 uint16_t IsNonTemporal : 1;
505 uint16_t IsDereferenceable : 1;
506 uint16_t IsInvariant : 1;
507 };
508 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
509
510 class LSBaseSDNodeBitfields {
511 friend class LSBaseSDNode;
512 friend class MaskedLoadStoreSDNode;
513 friend class MaskedGatherScatterSDNode;
514
515 uint16_t : NumMemSDNodeBits;
516
517 // This storage is shared between disparate class hierarchies to hold an
518 // enumeration specific to the class hierarchy in use.
519 // LSBaseSDNode => enum ISD::MemIndexedMode
520 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
521 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
522 uint16_t AddressingMode : 3;
523 };
524 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
525
526 class LoadSDNodeBitfields {
527 friend class LoadSDNode;
528 friend class MaskedLoadSDNode;
529 friend class MaskedGatherSDNode;
530
531 uint16_t : NumLSBaseSDNodeBits;
532
533 uint16_t ExtTy : 2; // enum ISD::LoadExtType
534 uint16_t IsExpanding : 1;
535 };
536
537 class StoreSDNodeBitfields {
538 friend class StoreSDNode;
539 friend class MaskedStoreSDNode;
540 friend class MaskedScatterSDNode;
541
542 uint16_t : NumLSBaseSDNodeBits;
543
544 uint16_t IsTruncating : 1;
545 uint16_t IsCompressing : 1;
546 };
547
548 union {
549 char RawSDNodeBits[sizeof(uint16_t)];
550 SDNodeBitfields SDNodeBits;
551 ConstantSDNodeBitfields ConstantSDNodeBits;
552 MemSDNodeBitfields MemSDNodeBits;
553 LSBaseSDNodeBitfields LSBaseSDNodeBits;
554 LoadSDNodeBitfields LoadSDNodeBits;
555 StoreSDNodeBitfields StoreSDNodeBits;
556 };
557END_TWO_BYTE_PACK()
558#undef BEGIN_TWO_BYTE_PACK
559#undef END_TWO_BYTE_PACK
560
561 // RawSDNodeBits must cover the entirety of the union. This means that all of
562 // the union's members must have size <= RawSDNodeBits. We write the RHS as
563 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
564 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
565 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
566 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
567 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
568 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
569 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
570
571private:
572 friend class SelectionDAG;
573 // TODO: unfriend HandleSDNode once we fix its operand handling.
574 friend class HandleSDNode;
575
576 /// Unique id per SDNode in the DAG.
577 int NodeId = -1;
578
579 /// The values that are used by this operation.
580 SDUse *OperandList = nullptr;
581
582 /// The types of the values this node defines. SDNode's may
583 /// define multiple values simultaneously.
584 const EVT *ValueList;
585
586 /// List of uses for this SDNode.
587 SDUse *UseList = nullptr;
588
589 /// The number of entries in the Operand/Value list.
590 unsigned short NumOperands = 0;
591 unsigned short NumValues;
592
593 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
594 // original LLVM instructions.
595 // This is used for turning off scheduling, because we'll forgo
596 // the normal scheduling algorithms and output the instructions according to
597 // this ordering.
598 unsigned IROrder;
599
600 /// Source line information.
601 DebugLoc debugLoc;
602
603 /// Return a pointer to the specified value type.
604 static const EVT *getValueTypeList(EVT VT);
605
606 SDNodeFlags Flags;
607
608public:
609 /// Unique and persistent id per SDNode in the DAG.
610 /// Used for debug printing.
611 uint16_t PersistentId;
612
613 //===--------------------------------------------------------------------===//
614 // Accessors
615 //
616
617 /// Return the SelectionDAG opcode value for this node. For
618 /// pre-isel nodes (those for which isMachineOpcode returns false), these
619 /// are the opcode values in the ISD and <target>ISD namespaces. For
620 /// post-isel opcodes, see getMachineOpcode.
621 unsigned getOpcode() const { return (unsigned short)NodeType; }
622
623 /// Test if this node has a target-specific opcode (in the
624 /// \<target\>ISD namespace).
625 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
626
627 /// Test if this node has a target-specific opcode that may raise
628 /// FP exceptions (in the \<target\>ISD namespace and greater than
629 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
630 /// opcode are currently automatically considered to possibly raise
631 /// FP exceptions as well.
632 bool isTargetStrictFPOpcode() const {
633 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
634 }
635
636 /// Test if this node has a target-specific
637 /// memory-referencing opcode (in the \<target\>ISD namespace and
638 /// greater than FIRST_TARGET_MEMORY_OPCODE).
639 bool isTargetMemoryOpcode() const {
640 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
641 }
642
643 /// Return true if the type of the node type undefined.
644 bool isUndef() const { return NodeType == ISD::UNDEF; }
645
646 /// Test if this node is a memory intrinsic (with valid pointer information).
647 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
648 /// non-memory intrinsics (with chains) that are not really instances of
649 /// MemSDNode. For such nodes, we need some extra state to determine the
650 /// proper classof relationship.
651 bool isMemIntrinsic() const {
652 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
653 NodeType == ISD::INTRINSIC_VOID) &&
654 SDNodeBits.IsMemIntrinsic;
655 }
656
657 /// Test if this node is a strict floating point pseudo-op.
658 bool isStrictFPOpcode() {
659 switch (NodeType) {
660 default:
661 return false;
662 case ISD::STRICT_FP16_TO_FP:
663 case ISD::STRICT_FP_TO_FP16:
664#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
665 case ISD::STRICT_##DAGN:
666#include "llvm/IR/ConstrainedOps.def"
667 return true;
668 }
669 }
670
671 /// Test if this node has a post-isel opcode, directly
672 /// corresponding to a MachineInstr opcode.
673 bool isMachineOpcode() const { return NodeType < 0; }
674
675 /// This may only be called if isMachineOpcode returns
676 /// true. It returns the MachineInstr opcode value that the node's opcode
677 /// corresponds to.
678 unsigned getMachineOpcode() const {
679 assert(isMachineOpcode() && "Not a MachineInstr opcode!")(static_cast <bool> (isMachineOpcode() && "Not a MachineInstr opcode!"
) ? void (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 679, __extension__ __PRETTY_FUNCTION__))
;
680 return ~NodeType;
681 }
682
683 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
684 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
685
686 bool isDivergent() const { return SDNodeBits.IsDivergent; }
687
688 /// Return true if there are no uses of this node.
689 bool use_empty() const { return UseList == nullptr; }
690
691 /// Return true if there is exactly one use of this node.
692 bool hasOneUse() const { return hasSingleElement(uses()); }
693
694 /// Return the number of uses of this node. This method takes
695 /// time proportional to the number of uses.
696 size_t use_size() const { return std::distance(use_begin(), use_end()); }
697
698 /// Return the unique node id.
699 int getNodeId() const { return NodeId; }
700
701 /// Set unique node id.
702 void setNodeId(int Id) { NodeId = Id; }
703
704 /// Return the node ordering.
705 unsigned getIROrder() const { return IROrder; }
706
707 /// Set the node ordering.
708 void setIROrder(unsigned Order) { IROrder = Order; }
709
710 /// Return the source location info.
711 const DebugLoc &getDebugLoc() const { return debugLoc; }
712
713 /// Set source location info. Try to avoid this, putting
714 /// it in the constructor is preferable.
715 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
716
717 /// This class provides iterator support for SDUse
718 /// operands that use a specific SDNode.
719 class use_iterator {
720 friend class SDNode;
721
722 SDUse *Op = nullptr;
723
724 explicit use_iterator(SDUse *op) : Op(op) {}
725
726 public:
727 using iterator_category = std::forward_iterator_tag;
728 using value_type = SDUse;
729 using difference_type = std::ptrdiff_t;
730 using pointer = value_type *;
731 using reference = value_type &;
732
733 use_iterator() = default;
734 use_iterator(const use_iterator &I) : Op(I.Op) {}
735
736 bool operator==(const use_iterator &x) const {
737 return Op == x.Op;
738 }
739 bool operator!=(const use_iterator &x) const {
740 return !operator==(x);
741 }
742
743 /// Return true if this iterator is at the end of uses list.
744 bool atEnd() const { return Op == nullptr; }
745
746 // Iterator traversal: forward iteration only.
747 use_iterator &operator++() { // Preincrement
748 assert(Op && "Cannot increment end iterator!")(static_cast <bool> (Op && "Cannot increment end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 748, __extension__ __PRETTY_FUNCTION__))
;
749 Op = Op->getNext();
750 return *this;
751 }
752
753 use_iterator operator++(int) { // Postincrement
754 use_iterator tmp = *this; ++*this; return tmp;
755 }
756
757 /// Retrieve a pointer to the current user node.
758 SDNode *operator*() const {
759 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 759, __extension__ __PRETTY_FUNCTION__))
;
760 return Op->getUser();
761 }
762
763 SDNode *operator->() const { return operator*(); }
764
765 SDUse &getUse() const { return *Op; }
766
767 /// Retrieve the operand # of this use in its user.
768 unsigned getOperandNo() const {
769 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 769, __extension__ __PRETTY_FUNCTION__))
;
770 return (unsigned)(Op - Op->getUser()->OperandList);
771 }
772 };
773
774 /// Provide iteration support to walk over all uses of an SDNode.
775 use_iterator use_begin() const {
776 return use_iterator(UseList);
777 }
778
779 static use_iterator use_end() { return use_iterator(nullptr); }
780
781 inline iterator_range<use_iterator> uses() {
782 return make_range(use_begin(), use_end());
783 }
784 inline iterator_range<use_iterator> uses() const {
785 return make_range(use_begin(), use_end());
786 }
787
788 /// Return true if there are exactly NUSES uses of the indicated value.
789 /// This method ignores uses of other values defined by this operation.
790 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
791
792 /// Return true if there are any use of the indicated value.
793 /// This method ignores uses of other values defined by this operation.
794 bool hasAnyUseOfValue(unsigned Value) const;
795
796 /// Return true if this node is the only use of N.
797 bool isOnlyUserOf(const SDNode *N) const;
798
799 /// Return true if this node is an operand of N.
800 bool isOperandOf(const SDNode *N) const;
801
802 /// Return true if this node is a predecessor of N.
803 /// NOTE: Implemented on top of hasPredecessor and every bit as
804 /// expensive. Use carefully.
805 bool isPredecessorOf(const SDNode *N) const {
806 return N->hasPredecessor(this);
807 }
808
809 /// Return true if N is a predecessor of this node.
810 /// N is either an operand of this node, or can be reached by recursively
811 /// traversing up the operands.
812 /// NOTE: This is an expensive method. Use it carefully.
813 bool hasPredecessor(const SDNode *N) const;
814
815 /// Returns true if N is a predecessor of any node in Worklist. This
816 /// helper keeps Visited and Worklist sets externally to allow unions
817 /// searches to be performed in parallel, caching of results across
818 /// queries and incremental addition to Worklist. Stops early if N is
819 /// found but will resume. Remember to clear Visited and Worklists
820 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
821 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
822 /// topologically ordered (Operands have strictly smaller node id) and search
823 /// can be pruned leveraging this.
824 static bool hasPredecessorHelper(const SDNode *N,
825 SmallPtrSetImpl<const SDNode *> &Visited,
826 SmallVectorImpl<const SDNode *> &Worklist,
827 unsigned int MaxSteps = 0,
828 bool TopologicalPrune = false) {
829 SmallVector<const SDNode *, 8> DeferredNodes;
830 if (Visited.count(N))
831 return true;
832
833 // Node Id's are assigned in three places: As a topological
834 // ordering (> 0), during legalization (results in values set to
835 // 0), new nodes (set to -1). If N has a topolgical id then we
836 // know that all nodes with ids smaller than it cannot be
837 // successors and we need not check them. Filter out all node
838 // that can't be matches. We add them to the worklist before exit
839 // in case of multiple calls. Note that during selection the topological id
840 // may be violated if a node's predecessor is selected before it. We mark
841 // this at selection negating the id of unselected successors and
842 // restricting topological pruning to positive ids.
843
844 int NId = N->getNodeId();
845 // If we Invalidated the Id, reconstruct original NId.
846 if (NId < -1)
847 NId = -(NId + 1);
848
849 bool Found = false;
850 while (!Worklist.empty()) {
851 const SDNode *M = Worklist.pop_back_val();
852 int MId = M->getNodeId();
853 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
854 (MId > 0) && (MId < NId)) {
855 DeferredNodes.push_back(M);
856 continue;
857 }
858 for (const SDValue &OpV : M->op_values()) {
859 SDNode *Op = OpV.getNode();
860 if (Visited.insert(Op).second)
861 Worklist.push_back(Op);
862 if (Op == N)
863 Found = true;
864 }
865 if (Found)
866 break;
867 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
868 break;
869 }
870 // Push deferred nodes back on worklist.
871 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
872 // If we bailed early, conservatively return found.
873 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
874 return true;
875 return Found;
876 }
877
878 /// Return true if all the users of N are contained in Nodes.
879 /// NOTE: Requires at least one match, but doesn't require them all.
880 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
881
882 /// Return the number of values used by this operation.
883 unsigned getNumOperands() const { return NumOperands; }
884
885 /// Return the maximum number of operands that a SDNode can hold.
886 static constexpr size_t getMaxNumOperands() {
887 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
888 }
889
890 /// Helper method returns the integer value of a ConstantSDNode operand.
891 inline uint64_t getConstantOperandVal(unsigned Num) const;
892
893 /// Helper method returns the APInt of a ConstantSDNode operand.
894 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
895
896 const SDValue &getOperand(unsigned Num) const {
897 assert(Num < NumOperands && "Invalid child # of SDNode!")(static_cast <bool> (Num < NumOperands && "Invalid child # of SDNode!"
) ? void (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 897, __extension__ __PRETTY_FUNCTION__))
;
898 return OperandList[Num];
899 }
900
901 using op_iterator = SDUse *;
902
903 op_iterator op_begin() const { return OperandList; }
904 op_iterator op_end() const { return OperandList+NumOperands; }
905 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
906
907 /// Iterator for directly iterating over the operand SDValue's.
908 struct value_op_iterator
909 : iterator_adaptor_base<value_op_iterator, op_iterator,
910 std::random_access_iterator_tag, SDValue,
911 ptrdiff_t, value_op_iterator *,
912 value_op_iterator *> {
913 explicit value_op_iterator(SDUse *U = nullptr)
914 : iterator_adaptor_base(U) {}
915
916 const SDValue &operator*() const { return I->get(); }
917 };
918
919 iterator_range<value_op_iterator> op_values() const {
920 return make_range(value_op_iterator(op_begin()),
921 value_op_iterator(op_end()));
922 }
923
924 SDVTList getVTList() const {
925 SDVTList X = { ValueList, NumValues };
926 return X;
927 }
928
929 /// If this node has a glue operand, return the node
930 /// to which the glue operand points. Otherwise return NULL.
931 SDNode *getGluedNode() const {
932 if (getNumOperands() != 0 &&
933 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
934 return getOperand(getNumOperands()-1).getNode();
935 return nullptr;
936 }
937
938 /// If this node has a glue value with a user, return
939 /// the user (there is at most one). Otherwise return NULL.
940 SDNode *getGluedUser() const {
941 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
942 if (UI.getUse().get().getValueType() == MVT::Glue)
943 return *UI;
944 return nullptr;
945 }
946
947 SDNodeFlags getFlags() const { return Flags; }
948 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
949
950 /// Clear any flags in this node that aren't also set in Flags.
951 /// If Flags is not in a defined state then this has no effect.
952 void intersectFlagsWith(const SDNodeFlags Flags);
953
954 /// Return the number of values defined/returned by this operator.
955 unsigned getNumValues() const { return NumValues; }
956
957 /// Return the type of a specified result.
958 EVT getValueType(unsigned ResNo) const {
959 assert(ResNo < NumValues && "Illegal result number!")(static_cast <bool> (ResNo < NumValues && "Illegal result number!"
) ? void (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 959, __extension__ __PRETTY_FUNCTION__))
;
960 return ValueList[ResNo];
961 }
962
963 /// Return the type of a specified result as a simple type.
964 MVT getSimpleValueType(unsigned ResNo) const {
965 return getValueType(ResNo).getSimpleVT();
966 }
967
968 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
969 ///
970 /// If the value type is a scalable vector type, the scalable property will
971 /// be set and the runtime size will be a positive integer multiple of the
972 /// base size.
973 TypeSize getValueSizeInBits(unsigned ResNo) const {
974 return getValueType(ResNo).getSizeInBits();
975 }
976
977 using value_iterator = const EVT *;
978
979 value_iterator value_begin() const { return ValueList; }
980 value_iterator value_end() const { return ValueList+NumValues; }
981 iterator_range<value_iterator> values() const {
982 return llvm::make_range(value_begin(), value_end());
983 }
984
985 /// Return the opcode of this operation for printing.
986 std::string getOperationName(const SelectionDAG *G = nullptr) const;
987 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
988 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
989 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
990 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
991 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
992
993 /// Print a SelectionDAG node and all children down to
994 /// the leaves. The given SelectionDAG allows target-specific nodes
995 /// to be printed in human-readable form. Unlike printr, this will
996 /// print the whole DAG, including children that appear multiple
997 /// times.
998 ///
999 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1000
1001 /// Print a SelectionDAG node and children up to
1002 /// depth "depth." The given SelectionDAG allows target-specific
1003 /// nodes to be printed in human-readable form. Unlike printr, this
1004 /// will print children that appear multiple times wherever they are
1005 /// used.
1006 ///
1007 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1008 unsigned depth = 100) const;
1009
1010 /// Dump this node, for debugging.
1011 void dump() const;
1012
1013 /// Dump (recursively) this node and its use-def subgraph.
1014 void dumpr() const;
1015
1016 /// Dump this node, for debugging.
1017 /// The given SelectionDAG allows target-specific nodes to be printed
1018 /// in human-readable form.
1019 void dump(const SelectionDAG *G) const;
1020
1021 /// Dump (recursively) this node and its use-def subgraph.
1022 /// The given SelectionDAG allows target-specific nodes to be printed
1023 /// in human-readable form.
1024 void dumpr(const SelectionDAG *G) const;
1025
1026 /// printrFull to dbgs(). The given SelectionDAG allows
1027 /// target-specific nodes to be printed in human-readable form.
1028 /// Unlike dumpr, this will print the whole DAG, including children
1029 /// that appear multiple times.
1030 void dumprFull(const SelectionDAG *G = nullptr) const;
1031
1032 /// printrWithDepth to dbgs(). The given
1033 /// SelectionDAG allows target-specific nodes to be printed in
1034 /// human-readable form. Unlike dumpr, this will print children
1035 /// that appear multiple times wherever they are used.
1036 ///
1037 void dumprWithDepth(const SelectionDAG *G = nullptr,
1038 unsigned depth = 100) const;
1039
1040 /// Gather unique data for the node.
1041 void Profile(FoldingSetNodeID &ID) const;
1042
1043 /// This method should only be used by the SDUse class.
1044 void addUse(SDUse &U) { U.addToList(&UseList); }
1045
1046protected:
1047 static SDVTList getSDVTList(EVT VT) {
1048 SDVTList Ret = { getValueTypeList(VT), 1 };
1049 return Ret;
1050 }
1051
1052 /// Create an SDNode.
1053 ///
1054 /// SDNodes are created without any operands, and never own the operand
1055 /// storage. To add operands, see SelectionDAG::createOperands.
1056 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1057 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1058 IROrder(Order), debugLoc(std::move(dl)) {
1059 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1060 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() &&
"Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1060, __extension__ __PRETTY_FUNCTION__))
;
1061 assert(NumValues == VTs.NumVTs &&(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __extension__ __PRETTY_FUNCTION__))
1062 "NumValues wasn't wide enough for its operands!")(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __extension__ __PRETTY_FUNCTION__))
;
1063 }
1064
1065 /// Release the operands and set this node to have zero operands.
1066 void DropOperands();
1067};
1068
1069/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1070/// into SDNode creation functions.
1071/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1072/// from the original Instruction, and IROrder is the ordinal position of
1073/// the instruction.
1074/// When an SDNode is created after the DAG is being built, both DebugLoc and
1075/// the IROrder are propagated from the original SDNode.
1076/// So SDLoc class provides two constructors besides the default one, one to
1077/// be used by the DAGBuilder, the other to be used by others.
1078class SDLoc {
1079private:
1080 DebugLoc DL;
1081 int IROrder = 0;
1082
1083public:
1084 SDLoc() = default;
1085 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1086 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1087 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1088 assert(Order >= 0 && "bad IROrder")(static_cast <bool> (Order >= 0 && "bad IROrder"
) ? void (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1088, __extension__ __PRETTY_FUNCTION__))
;
1089 if (I)
1090 DL = I->getDebugLoc();
1091 }
1092
1093 unsigned getIROrder() const { return IROrder; }
1094 const DebugLoc &getDebugLoc() const { return DL; }
1095};
1096
1097// Define inline functions from the SDValue class.
1098
1099inline SDValue::SDValue(SDNode *node, unsigned resno)
1100 : Node(node), ResNo(resno) {
1101 // Explicitly check for !ResNo to avoid use-after-free, because there are
1102 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1103 // combines.
1104 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __extension__ __PRETTY_FUNCTION__))
1105 "Invalid result number for the given node!")(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __extension__ __PRETTY_FUNCTION__))
;
1106 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")(static_cast <bool> (ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? void (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1106, __extension__ __PRETTY_FUNCTION__))
;
1107}
1108
1109inline unsigned SDValue::getOpcode() const {
1110 return Node->getOpcode();
1111}
1112
1113inline EVT SDValue::getValueType() const {
1114 return Node->getValueType(ResNo);
23
Called C++ object pointer is null
1115}
1116
1117inline unsigned SDValue::getNumOperands() const {
1118 return Node->getNumOperands();
1119}
1120
1121inline const SDValue &SDValue::getOperand(unsigned i) const {
1122 return Node->getOperand(i);
1123}
1124
1125inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1126 return Node->getConstantOperandVal(i);
1127}
1128
1129inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1130 return Node->getConstantOperandAPInt(i);
1131}
1132
1133inline bool SDValue::isTargetOpcode() const {
1134 return Node->isTargetOpcode();
1135}
1136
1137inline bool SDValue::isTargetMemoryOpcode() const {
1138 return Node->isTargetMemoryOpcode();
1139}
1140
1141inline bool SDValue::isMachineOpcode() const {
1142 return Node->isMachineOpcode();
1143}
1144
1145inline unsigned SDValue::getMachineOpcode() const {
1146 return Node->getMachineOpcode();
1147}
1148
1149inline bool SDValue::isUndef() const {
1150 return Node->isUndef();
1151}
1152
1153inline bool SDValue::use_empty() const {
1154 return !Node->hasAnyUseOfValue(ResNo);
1155}
1156
1157inline bool SDValue::hasOneUse() const {
1158 return Node->hasNUsesOfValue(1, ResNo);
1159}
1160
1161inline const DebugLoc &SDValue::getDebugLoc() const {
1162 return Node->getDebugLoc();
1163}
1164
1165inline void SDValue::dump() const {
1166 return Node->dump();
1167}
1168
1169inline void SDValue::dump(const SelectionDAG *G) const {
1170 return Node->dump(G);
1171}
1172
1173inline void SDValue::dumpr() const {
1174 return Node->dumpr();
1175}
1176
1177inline void SDValue::dumpr(const SelectionDAG *G) const {
1178 return Node->dumpr(G);
1179}
1180
1181// Define inline functions from the SDUse class.
1182
1183inline void SDUse::set(const SDValue &V) {
1184 if (Val.getNode()) removeFromList();
1185 Val = V;
1186 if (V.getNode()) V.getNode()->addUse(*this);
1187}
1188
1189inline void SDUse::setInitial(const SDValue &V) {
1190 Val = V;
1191 V.getNode()->addUse(*this);
1192}
1193
1194inline void SDUse::setNode(SDNode *N) {
1195 if (Val.getNode()) removeFromList();
1196 Val.setNode(N);
1197 if (N) N->addUse(*this);
1198}
1199
1200/// This class is used to form a handle around another node that
1201/// is persistent and is updated across invocations of replaceAllUsesWith on its
1202/// operand. This node should be directly created by end-users and not added to
1203/// the AllNodes list.
1204class HandleSDNode : public SDNode {
1205 SDUse Op;
1206
1207public:
1208 explicit HandleSDNode(SDValue X)
1209 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1210 // HandleSDNodes are never inserted into the DAG, so they won't be
1211 // auto-numbered. Use ID 65535 as a sentinel.
1212 PersistentId = 0xffff;
1213
1214 // Manually set up the operand list. This node type is special in that it's
1215 // always stack allocated and SelectionDAG does not manage its operands.
1216 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1217 // be so special.
1218 Op.setUser(this);
1219 Op.setInitial(X);
1220 NumOperands = 1;
1221 OperandList = &Op;
1222 }
1223 ~HandleSDNode();
1224
1225 const SDValue &getValue() const { return Op; }
1226};
1227
1228class AddrSpaceCastSDNode : public SDNode {
1229private:
1230 unsigned SrcAddrSpace;
1231 unsigned DestAddrSpace;
1232
1233public:
1234 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1235 unsigned SrcAS, unsigned DestAS);
1236
1237 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1238 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1239
1240 static bool classof(const SDNode *N) {
1241 return N->getOpcode() == ISD::ADDRSPACECAST;
1242 }
1243};
1244
1245/// This is an abstract virtual class for memory operations.
1246class MemSDNode : public SDNode {
1247private:
1248 // VT of in-memory value.
1249 EVT MemoryVT;
1250
1251protected:
1252 /// Memory reference information.
1253 MachineMemOperand *MMO;
1254
1255public:
1256 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1257 EVT memvt, MachineMemOperand *MMO);
1258
1259 bool readMem() const { return MMO->isLoad(); }
1260 bool writeMem() const { return MMO->isStore(); }
1261
1262 /// Returns alignment and volatility of the memory access
1263 Align getOriginalAlign() const { return MMO->getBaseAlign(); }
1264 Align getAlign() const { return MMO->getAlign(); }
1265 // FIXME: Remove once transition to getAlign is over.
1266 unsigned getAlignment() const { return MMO->getAlign().value(); }
1267
1268 /// Return the SubclassData value, without HasDebugValue. This contains an
1269 /// encoding of the volatile flag, as well as bits used by subclasses. This
1270 /// function should only be used to compute a FoldingSetNodeID value.
1271 /// The HasDebugValue bit is masked out because CSE map needs to match
1272 /// nodes with debug info with nodes without debug info. Same is about
1273 /// isDivergent bit.
1274 unsigned getRawSubclassData() const {
1275 uint16_t Data;
1276 union {
1277 char RawSDNodeBits[sizeof(uint16_t)];
1278 SDNodeBitfields SDNodeBits;
1279 };
1280 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1281 SDNodeBits.HasDebugValue = 0;
1282 SDNodeBits.IsDivergent = false;
1283 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1284 return Data;
1285 }
1286
1287 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1288 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1289 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1290 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1291
1292 // Returns the offset from the location of the access.
1293 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1294
1295 /// Returns the AA info that describes the dereference.
1296 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1297
1298 /// Returns the Ranges that describes the dereference.
1299 const MDNode *getRanges() const { return MMO->getRanges(); }
1300
1301 /// Returns the synchronization scope ID for this memory operation.
1302 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1303
1304 /// Return the atomic ordering requirements for this memory operation. For
1305 /// cmpxchg atomic operations, return the atomic ordering requirements when
1306 /// store occurs.
1307 AtomicOrdering getOrdering() const { return MMO->getOrdering(); }
1308
1309 /// Return a single atomic ordering that is at least as strong as both the
1310 /// success and failure orderings for an atomic operation. (For operations
1311 /// other than cmpxchg, this is equivalent to getOrdering().)
1312 AtomicOrdering getMergedOrdering() const { return MMO->getMergedOrdering(); }
1313
1314 /// Return true if the memory operation ordering is Unordered or higher.
1315 bool isAtomic() const { return MMO->isAtomic(); }
1316
1317 /// Returns true if the memory operation doesn't imply any ordering
1318 /// constraints on surrounding memory operations beyond the normal memory
1319 /// aliasing rules.
1320 bool isUnordered() const { return MMO->isUnordered(); }
1321
1322 /// Returns true if the memory operation is neither atomic or volatile.
1323 bool isSimple() const { return !isAtomic() && !isVolatile(); }
1324
1325 /// Return the type of the in-memory value.
1326 EVT getMemoryVT() const { return MemoryVT; }
1327
1328 /// Return a MachineMemOperand object describing the memory
1329 /// reference performed by operation.
1330 MachineMemOperand *getMemOperand() const { return MMO; }
1331
1332 const MachinePointerInfo &getPointerInfo() const {
1333 return MMO->getPointerInfo();
1334 }
1335
1336 /// Return the address space for the associated pointer
1337 unsigned getAddressSpace() const {
1338 return getPointerInfo().getAddrSpace();
1339 }
1340
1341 /// Update this MemSDNode's MachineMemOperand information
1342 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1343 /// This must only be used when the new alignment applies to all users of
1344 /// this MachineMemOperand.
1345 void refineAlignment(const MachineMemOperand *NewMMO) {
1346 MMO->refineAlignment(NewMMO);
1347 }
1348
1349 const SDValue &getChain() const { return getOperand(0); }
1350
1351 const SDValue &getBasePtr() const {
1352 switch (getOpcode()) {
1353 case ISD::STORE:
1354 case ISD::MSTORE:
1355 return getOperand(2);
1356 case ISD::MGATHER:
1357 case ISD::MSCATTER:
1358 return getOperand(3);
1359 default:
1360 return getOperand(1);
1361 }
1362 }
1363
1364 // Methods to support isa and dyn_cast
1365 static bool classof(const SDNode *N) {
1366 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1367 // with either an intrinsic or a target opcode.
1368 return N->getOpcode() == ISD::LOAD ||
1369 N->getOpcode() == ISD::STORE ||
1370 N->getOpcode() == ISD::PREFETCH ||
1371 N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1372 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1373 N->getOpcode() == ISD::ATOMIC_SWAP ||
1374 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1375 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1376 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1377 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1378 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1379 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1380 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1381 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1382 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1383 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1384 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1385 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1386 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1387 N->getOpcode() == ISD::ATOMIC_LOAD ||
1388 N->getOpcode() == ISD::ATOMIC_STORE ||
1389 N->getOpcode() == ISD::MLOAD ||
1390 N->getOpcode() == ISD::MSTORE ||
1391 N->getOpcode() == ISD::MGATHER ||
1392 N->getOpcode() == ISD::MSCATTER ||
1393 N->isMemIntrinsic() ||
1394 N->isTargetMemoryOpcode();
1395 }
1396};
1397
1398/// This is an SDNode representing atomic operations.
1399class AtomicSDNode : public MemSDNode {
1400public:
1401 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1402 EVT MemVT, MachineMemOperand *MMO)
1403 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1404 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD &&
Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1405, __extension__ __PRETTY_FUNCTION__))
1405 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD &&
Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1405, __extension__ __PRETTY_FUNCTION__))
;
1406 }
1407
1408 const SDValue &getBasePtr() const { return getOperand(1); }
1409 const SDValue &getVal() const { return getOperand(2); }
1410
1411 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1412 /// otherwise.
1413 bool isCompareAndSwap() const {
1414 unsigned Op = getOpcode();
1415 return Op == ISD::ATOMIC_CMP_SWAP ||
1416 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1417 }
1418
1419 /// For cmpxchg atomic operations, return the atomic ordering requirements
1420 /// when store does not occur.
1421 AtomicOrdering getFailureOrdering() const {
1422 assert(isCompareAndSwap() && "Must be cmpxchg operation")(static_cast <bool> (isCompareAndSwap() && "Must be cmpxchg operation"
) ? void (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1422, __extension__ __PRETTY_FUNCTION__))
;
1423 return MMO->getFailureOrdering();
1424 }
1425
1426 // Methods to support isa and dyn_cast
1427 static bool classof(const SDNode *N) {
1428 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1429 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1430 N->getOpcode() == ISD::ATOMIC_SWAP ||
1431 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1432 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1433 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1434 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1435 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1436 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1437 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1438 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1439 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1440 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1441 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1442 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1443 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1444 N->getOpcode() == ISD::ATOMIC_LOAD ||
1445 N->getOpcode() == ISD::ATOMIC_STORE;
1446 }
1447};
1448
1449/// This SDNode is used for target intrinsics that touch
1450/// memory and need an associated MachineMemOperand. Its opcode may be
1451/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1452/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1453class MemIntrinsicSDNode : public MemSDNode {
1454public:
1455 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1456 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1457 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1458 SDNodeBits.IsMemIntrinsic = true;
1459 }
1460
1461 // Methods to support isa and dyn_cast
1462 static bool classof(const SDNode *N) {
1463 // We lower some target intrinsics to their target opcode
1464 // early a node with a target opcode can be of this class
1465 return N->isMemIntrinsic() ||
1466 N->getOpcode() == ISD::PREFETCH ||
1467 N->isTargetMemoryOpcode();
1468 }
1469};
1470
1471/// This SDNode is used to implement the code generator
1472/// support for the llvm IR shufflevector instruction. It combines elements
1473/// from two input vectors into a new input vector, with the selection and
1474/// ordering of elements determined by an array of integers, referred to as
1475/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1476/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1477/// An index of -1 is treated as undef, such that the code generator may put
1478/// any value in the corresponding element of the result.
1479class ShuffleVectorSDNode : public SDNode {
1480 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1481 // is freed when the SelectionDAG object is destroyed.
1482 const int *Mask;
1483
1484protected:
1485 friend class SelectionDAG;
1486
1487 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1488 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1489
1490public:
1491 ArrayRef<int> getMask() const {
1492 EVT VT = getValueType(0);
1493 return makeArrayRef(Mask, VT.getVectorNumElements());
1494 }
1495
1496 int getMaskElt(unsigned Idx) const {
1497 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")(static_cast <bool> (Idx < getValueType(0).getVectorNumElements
() && "Idx out of range!") ? void (0) : __assert_fail
("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1497, __extension__ __PRETTY_FUNCTION__))
;
1498 return Mask[Idx];
1499 }
1500
1501 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1502
1503 int getSplatIndex() const {
1504 assert(isSplat() && "Cannot get splat index for non-splat!")(static_cast <bool> (isSplat() && "Cannot get splat index for non-splat!"
) ? void (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1504, __extension__ __PRETTY_FUNCTION__))
;
1505 EVT VT = getValueType(0);
1506 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1507 if (Mask[i] >= 0)
1508 return Mask[i];
1509
1510 // We can choose any index value here and be correct because all elements
1511 // are undefined. Return 0 for better potential for callers to simplify.
1512 return 0;
1513 }
1514
1515 static bool isSplatMask(const int *Mask, EVT VT);
1516
1517 /// Change values in a shuffle permute mask assuming
1518 /// the two vector operands have swapped position.
1519 static void commuteMask(MutableArrayRef<int> Mask) {
1520 unsigned NumElems = Mask.size();
1521 for (unsigned i = 0; i != NumElems; ++i) {
1522 int idx = Mask[i];
1523 if (idx < 0)
1524 continue;
1525 else if (idx < (int)NumElems)
1526 Mask[i] = idx + NumElems;
1527 else
1528 Mask[i] = idx - NumElems;
1529 }
1530 }
1531
1532 static bool classof(const SDNode *N) {
1533 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1534 }
1535};
1536
1537class ConstantSDNode : public SDNode {
1538 friend class SelectionDAG;
1539
1540 const ConstantInt *Value;
1541
1542 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1543 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1544 getSDVTList(VT)),
1545 Value(val) {
1546 ConstantSDNodeBits.IsOpaque = isOpaque;
1547 }
1548
1549public:
1550 const ConstantInt *getConstantIntValue() const { return Value; }
1551 const APInt &getAPIntValue() const { return Value->getValue(); }
1552 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1553 int64_t getSExtValue() const { return Value->getSExtValue(); }
1554 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1555 return Value->getLimitedValue(Limit);
1556 }
1557 MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); }
1558 Align getAlignValue() const { return Value->getAlignValue(); }
1559
1560 bool isOne() const { return Value->isOne(); }
1561 bool isNullValue() const { return Value->isZero(); }
1562 bool isAllOnesValue() const { return Value->isMinusOne(); }
1563
1564 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1565
1566 static bool classof(const SDNode *N) {
1567 return N->getOpcode() == ISD::Constant ||
1568 N->getOpcode() == ISD::TargetConstant;
1569 }
1570};
1571
1572uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1573 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1574}
1575
1576const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1577 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1578}
1579
1580class ConstantFPSDNode : public SDNode {
1581 friend class SelectionDAG;
1582
1583 const ConstantFP *Value;
1584
1585 ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
1586 : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
1587 DebugLoc(), getSDVTList(VT)),
1588 Value(val) {}
1589
1590public:
1591 const APFloat& getValueAPF() const { return Value->getValueAPF(); }
1592 const ConstantFP *getConstantFPValue() const { return Value; }
1593
1594 /// Return true if the value is positive or negative zero.
1595 bool isZero() const { return Value->isZero(); }
1596
1597 /// Return true if the value is a NaN.
1598 bool isNaN() const { return Value->isNaN(); }
1599
1600 /// Return true if the value is an infinity
1601 bool isInfinity() const { return Value->isInfinity(); }
1602
1603 /// Return true if the value is negative.
1604 bool isNegative() const { return Value->isNegative(); }
1605
1606 /// We don't rely on operator== working on double values, as
1607 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
1608 /// As such, this method can be used to do an exact bit-for-bit comparison of
1609 /// two floating point values.
1610
1611 /// We leave the version with the double argument here because it's just so
1612 /// convenient to write "2.0" and the like. Without this function we'd
1613 /// have to duplicate its logic everywhere it's called.
1614 bool isExactlyValue(double V) const {
1615 return Value->getValueAPF().isExactlyValue(V);
1616 }
1617 bool isExactlyValue(const APFloat& V) const;
1618
1619 static bool isValueValidForType(EVT VT, const APFloat& Val);
1620
1621 static bool classof(const SDNode *N) {
1622 return N->getOpcode() == ISD::ConstantFP ||
1623 N->getOpcode() == ISD::TargetConstantFP;
1624 }
1625};
1626
1627/// Returns true if \p V is a constant integer zero.
1628bool isNullConstant(SDValue V);
1629
1630/// Returns true if \p V is an FP constant with a value of positive zero.
1631bool isNullFPConstant(SDValue V);
1632
1633/// Returns true if \p V is an integer constant with all bits set.
1634bool isAllOnesConstant(SDValue V);
1635
1636/// Returns true if \p V is a constant integer one.
1637bool isOneConstant(SDValue V);
1638
1639/// Return the non-bitcasted source operand of \p V if it exists.
1640/// If \p V is not a bitcasted value, it is returned as-is.
1641SDValue peekThroughBitcasts(SDValue V);
1642
1643/// Return the non-bitcasted and one-use source operand of \p V if it exists.
1644/// If \p V is not a bitcasted one-use value, it is returned as-is.
1645SDValue peekThroughOneUseBitcasts(SDValue V);
1646
1647/// Return the non-extracted vector source operand of \p V if it exists.
1648/// If \p V is not an extracted subvector, it is returned as-is.
1649SDValue peekThroughExtractSubvectors(SDValue V);
1650
1651/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
1652/// constant is canonicalized to be operand 1.
1653bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
1654
1655/// Returns the SDNode if it is a constant splat BuildVector or constant int.
1656ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
1657 bool AllowTruncation = false);
1658
1659/// Returns the SDNode if it is a demanded constant splat BuildVector or
1660/// constant int.
1661ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
1662 bool AllowUndefs = false,
1663 bool AllowTruncation = false);
1664
1665/// Returns the SDNode if it is a constant splat BuildVector or constant float.
1666ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
1667
1668/// Returns the SDNode if it is a demanded constant splat BuildVector or
1669/// constant float.
1670ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
1671 bool AllowUndefs = false);
1672
1673/// Return true if the value is a constant 0 integer or a splatted vector of
1674/// a constant 0 integer (with no undefs by default).
1675/// Build vector implicit truncation is not an issue for null values.
1676bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);
1677
1678/// Return true if the value is a constant 1 integer or a splatted vector of a
1679/// constant 1 integer (with no undefs).
1680/// Does not permit build vector implicit truncation.
1681bool isOneOrOneSplat(SDValue V, bool AllowUndefs = false);
1682
1683/// Return true if the value is a constant -1 integer or a splatted vector of a
1684/// constant -1 integer (with no undefs).
1685/// Does not permit build vector implicit truncation.
1686bool isAllOnesOrAllOnesSplat(SDValue V, bool AllowUndefs = false);
1687
1688/// Return true if \p V is either a integer or FP constant.
1689inline bool isIntOrFPConstant(SDValue V) {
1690 return isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V);
1691}
1692
1693class GlobalAddressSDNode : public SDNode {
1694 friend class SelectionDAG;
1695
1696 const GlobalValue *TheGlobal;
1697 int64_t Offset;
1698 unsigned TargetFlags;
1699
1700 GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
1701 const GlobalValue *GA, EVT VT, int64_t o,
1702 unsigned TF);
1703
1704public:
1705 const GlobalValue *getGlobal() const { return TheGlobal; }
1706 int64_t getOffset() const { return Offset; }
1707 unsigned getTargetFlags() const { return TargetFlags; }
1708 // Return the address space this GlobalAddress belongs to.
1709 unsigned getAddressSpace() const;
1710
1711 static bool classof(const SDNode *N) {
1712 return N->getOpcode() == ISD::GlobalAddress ||
1713 N->getOpcode() == ISD::TargetGlobalAddress ||
1714 N->getOpcode() == ISD::GlobalTLSAddress ||
1715 N->getOpcode() == ISD::TargetGlobalTLSAddress;
1716 }
1717};
1718
1719class FrameIndexSDNode : public SDNode {
1720 friend class SelectionDAG;
1721
1722 int FI;
1723
1724 FrameIndexSDNode(int fi, EVT VT, bool isTarg)
1725 : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
1726 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
1727 }
1728
1729public:
1730 int getIndex() const { return FI; }
1731
1732 static bool classof(const SDNode *N) {
1733 return N->getOpcode() == ISD::FrameIndex ||
1734 N->getOpcode() == ISD::TargetFrameIndex;
1735 }
1736};
1737
1738/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
1739/// the offet and size that are started/ended in the underlying FrameIndex.
1740class LifetimeSDNode : public SDNode {
1741 friend class SelectionDAG;
1742 int64_t Size;
1743 int64_t Offset; // -1 if offset is unknown.
1744
1745 LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1746 SDVTList VTs, int64_t Size, int64_t Offset)
1747 : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
1748public:
1749 int64_t getFrameIndex() const {
1750 return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
1751 }
1752
1753 bool hasOffset() const { return Offset >= 0; }
1754 int64_t getOffset() const {
1755 assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown"
) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1755, __extension__ __PRETTY_FUNCTION__))
;
1756 return Offset;
1757 }
1758 int64_t getSize() const {
1759 assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown"
) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1759, __extension__ __PRETTY_FUNCTION__))
;
1760 return Size;
1761 }
1762
1763 // Methods to support isa and dyn_cast
1764 static bool classof(const SDNode *N) {
1765 return N->getOpcode() == ISD::LIFETIME_START ||
1766 N->getOpcode() == ISD::LIFETIME_END;
1767 }
1768};
1769
1770/// This SDNode is used for PSEUDO_PROBE values, which are the function guid and
1771/// the index of the basic block being probed. A pseudo probe serves as a place
1772/// holder and will be removed at the end of compilation. It does not have any
1773/// operand because we do not want the instruction selection to deal with any.
1774class PseudoProbeSDNode : public SDNode {
1775 friend class SelectionDAG;
1776 uint64_t Guid;
1777 uint64_t Index;
1778 uint32_t Attributes;
1779
1780 PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl,
1781 SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr)
1782 : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index),
1783 Attributes(Attr) {}
1784
1785public:
1786 uint64_t getGuid() const { return Guid; }
1787 uint64_t getIndex() const { return Index; }
1788 uint32_t getAttributes() const { return Attributes; }
1789
1790 // Methods to support isa and dyn_cast
1791 static bool classof(const SDNode *N) {
1792 return N->getOpcode() == ISD::PSEUDO_PROBE;
1793 }
1794};
1795
1796class JumpTableSDNode : public SDNode {
1797 friend class SelectionDAG;
1798
1799 int JTI;
1800 unsigned TargetFlags;
1801
1802 JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
1803 : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
1804 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
1805 }
1806
1807public:
1808 int getIndex() const { return JTI; }
1809 unsigned getTargetFlags() const { return TargetFlags; }
1810
1811 static bool classof(const SDNode *N) {
1812 return N->getOpcode() == ISD::JumpTable ||
1813 N->getOpcode() == ISD::TargetJumpTable;
1814 }
1815};
1816
1817class ConstantPoolSDNode : public SDNode {
1818 friend class SelectionDAG;
1819
1820 union {
1821 const Constant *ConstVal;
1822 MachineConstantPoolValue *MachineCPVal;
1823 } Val;
1824 int Offset; // It's a MachineConstantPoolValue if top bit is set.
1825 Align Alignment; // Minimum alignment requirement of CP.
1826 unsigned TargetFlags;
1827
1828 ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
1829 Align Alignment, unsigned TF)
1830 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1831 DebugLoc(), getSDVTList(VT)),
1832 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1833 assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1833, __extension__ __PRETTY_FUNCTION__))
;
1834 Val.ConstVal = c;
1835 }
1836
1837 ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, EVT VT, int o,
1838 Align Alignment, unsigned TF)
1839 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1840 DebugLoc(), getSDVTList(VT)),
1841 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1842 assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1842, __extension__ __PRETTY_FUNCTION__))
;
1843 Val.MachineCPVal = v;
1844 Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1);
1845 }
1846
1847public:
1848 bool isMachineConstantPoolEntry() const {
1849 return Offset < 0;
1850 }
1851
1852 const Constant *getConstVal() const {
1853 assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (!isMachineConstantPoolEntry() &&
"Wrong constantpool type") ? void (0) : __assert_fail ("!isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1853, __extension__ __PRETTY_FUNCTION__))
;
1854 return Val.ConstVal;
1855 }
1856
1857 MachineConstantPoolValue *getMachineCPVal() const {
1858 assert(isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (isMachineConstantPoolEntry() &&
"Wrong constantpool type") ? void (0) : __assert_fail ("isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1858, __extension__ __PRETTY_FUNCTION__))
;
1859 return Val.MachineCPVal;
1860 }
1861
1862 int getOffset() const {
1863 return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1));
1864 }
1865
1866 // Return the alignment of this constant pool object, which is either 0 (for
1867 // default alignment) or the desired value.
1868 Align getAlign() const { return Alignment; }
1869 unsigned getTargetFlags() const { return TargetFlags; }
1870
1871 Type *getType() const;
1872
1873 static bool classof(const SDNode *N) {
1874 return N->getOpcode() == ISD::ConstantPool ||
1875 N->getOpcode() == ISD::TargetConstantPool;
1876 }
1877};
1878
1879/// Completely target-dependent object reference.
1880class TargetIndexSDNode : public SDNode {
1881 friend class SelectionDAG;
1882
1883 unsigned TargetFlags;
1884 int Index;
1885 int64_t Offset;
1886
1887public:
1888 TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF)
1889 : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
1890 TargetFlags(TF), Index(Idx), Offset(Ofs) {}
1891
1892 unsigned getTargetFlags() const { return TargetFlags; }
1893 int getIndex() const { return Index; }
1894 int64_t getOffset() const { return Offset; }
1895
1896 static bool classof(const SDNode *N) {
1897 return N->getOpcode() == ISD::TargetIndex;
1898 }
1899};
1900
1901class BasicBlockSDNode : public SDNode {
1902 friend class SelectionDAG;
1903
1904 MachineBasicBlock *MBB;
1905
1906 /// Debug info is meaningful and potentially useful here, but we create
1907 /// blocks out of order when they're jumped to, which makes it a bit
1908 /// harder. Let's see if we need it first.
1909 explicit BasicBlockSDNode(MachineBasicBlock *mbb)
1910 : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
1911 {}
1912
1913public:
1914 MachineBasicBlock *getBasicBlock() const { return MBB; }
1915
1916 static bool classof(const SDNode *N) {
1917 return N->getOpcode() == ISD::BasicBlock;
1918 }
1919};
1920
1921/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
1922class BuildVectorSDNode : public SDNode {
1923public:
1924 // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
1925 explicit BuildVectorSDNode() = delete;
1926
1927 /// Check if this is a constant splat, and if so, find the
1928 /// smallest element size that splats the vector. If MinSplatBits is
1929 /// nonzero, the element size must be at least that large. Note that the
1930 /// splat element may be the entire vector (i.e., a one element vector).
1931 /// Returns the splat element value in SplatValue. Any undefined bits in
1932 /// that value are zero, and the corresponding bits in the SplatUndef mask
1933 /// are set. The SplatBitSize value is set to the splat element size in
1934 /// bits. HasAnyUndefs is set to true if any bits in the vector are
1935 /// undefined. isBigEndian describes the endianness of the target.
1936 bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
1937 unsigned &SplatBitSize, bool &HasAnyUndefs,
1938 unsigned MinSplatBits = 0,
1939 bool isBigEndian = false) const;
1940
1941 /// Returns the demanded splatted value or a null value if this is not a
1942 /// splat.
1943 ///
1944 /// The DemandedElts mask indicates the elements that must be in the splat.
1945 /// If passed a non-null UndefElements bitvector, it will resize it to match
1946 /// the vector width and set the bits where elements are undef.
1947 SDValue getSplatValue(const APInt &DemandedElts,
1948 BitVector *UndefElements = nullptr) const;
1949
1950 /// Returns the splatted value or a null value if this is not a splat.
1951 ///
1952 /// If passed a non-null UndefElements bitvector, it will resize it to match
1953 /// the vector width and set the bits where elements are undef.
1954 SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
1955
1956 /// Find the shortest repeating sequence of values in the build vector.
1957 ///
1958 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
1959 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
1960 ///
1961 /// Currently this must be a power-of-2 build vector.
1962 /// The DemandedElts mask indicates the elements that must be present,
1963 /// undemanded elements in Sequence may be null (SDValue()). If passed a
1964 /// non-null UndefElements bitvector, it will resize it to match the original
1965 /// vector width and set the bits where elements are undef. If result is
1966 /// false, Sequence will be empty.
1967 bool getRepeatedSequence(const APInt &DemandedElts,
1968 SmallVectorImpl<SDValue> &Sequence,
1969 BitVector *UndefElements = nullptr) const;
1970
1971 /// Find the shortest repeating sequence of values in the build vector.
1972 ///
1973 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
1974 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
1975 ///
1976 /// Currently this must be a power-of-2 build vector.
1977 /// If passed a non-null UndefElements bitvector, it will resize it to match
1978 /// the original vector width and set the bits where elements are undef.
1979 /// If result is false, Sequence will be empty.
1980 bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
1981 BitVector *UndefElements = nullptr) const;
1982
1983 /// Returns the demanded splatted constant or null if this is not a constant
1984 /// splat.
1985 ///
1986 /// The DemandedElts mask indicates the elements that must be in the splat.
1987 /// If passed a non-null UndefElements bitvector, it will resize it to match
1988 /// the vector width and set the bits where elements are undef.
1989 ConstantSDNode *
1990 getConstantSplatNode(const APInt &DemandedElts,
1991 BitVector *UndefElements = nullptr) const;
1992
1993 /// Returns the splatted constant or null if this is not a constant
1994 /// splat.
1995 ///
1996 /// If passed a non-null UndefElements bitvector, it will resize it to match
1997 /// the vector width and set the bits where elements are undef.
1998 ConstantSDNode *
1999 getConstantSplatNode(BitVector *UndefElements = nullptr) const;
2000
2001 /// Returns the demanded splatted constant FP or null if this is not a
2002 /// constant FP splat.
2003 ///
2004 /// The DemandedElts mask indicates the elements that must be in the splat.
2005 /// If passed a non-null UndefElements bitvector, it will resize it to match
2006 /// the vector width and set the bits where elements are undef.
2007 ConstantFPSDNode *
2008 getConstantFPSplatNode(const APInt &DemandedElts,
2009 BitVector *UndefElements = nullptr) const;
2010
2011 /// Returns the splatted constant FP or null if this is not a constant
2012 /// FP splat.
2013 ///
2014 /// If passed a non-null UndefElements bitvector, it will resize it to match
2015 /// the vector width and set the bits where elements are undef.
2016 ConstantFPSDNode *
2017 getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
2018
2019 /// If this is a constant FP splat and the splatted constant FP is an
2020 /// exact power or 2, return the log base 2 integer value. Otherwise,
2021 /// return -1.
2022 ///
2023 /// The BitWidth specifies the necessary bit precision.
2024 int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
2025 uint32_t BitWidth) const;
2026
2027 bool isConstant() const;
2028
2029 static bool classof(const SDNode *N) {
2030 return N->getOpcode() == ISD::BUILD_VECTOR;
2031 }
2032};
2033
2034/// An SDNode that holds an arbitrary LLVM IR Value. This is
2035/// used when the SelectionDAG needs to make a simple reference to something
2036/// in the LLVM IR representation.
2037///
2038class SrcValueSDNode : public SDNode {
2039 friend class SelectionDAG;
2040
2041 const Value *V;
2042
2043 /// Create a SrcValue for a general value.
2044 explicit SrcValueSDNode(const Value *v)
2045 : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
2046
2047public:
2048 /// Return the contained Value.
2049 const Value *getValue() const { return V; }
2050
2051 static bool classof(const SDNode *N) {
2052 return N->getOpcode() == ISD::SRCVALUE;
2053 }
2054};
2055
2056class MDNodeSDNode : public SDNode {
2057 friend class SelectionDAG;
2058
2059 const MDNode *MD;
2060
2061 explicit MDNodeSDNode(const MDNode *md)
2062 : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
2063 {}
2064
2065public:
2066 const MDNode *getMD() const { return MD; }
2067
2068 static bool classof(const SDNode *N) {
2069 return N->getOpcode() == ISD::MDNODE_SDNODE;
2070 }
2071};
2072
2073class RegisterSDNode : public SDNode {
2074 friend class SelectionDAG;
2075
2076 Register Reg;
2077
2078 RegisterSDNode(Register reg, EVT VT)
2079 : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
2080
2081public:
2082 Register getReg() const { return Reg; }
2083
2084 static bool classof(const SDNode *N) {
2085 return N->getOpcode() == ISD::Register;
2086 }
2087};
2088
2089class RegisterMaskSDNode : public SDNode {
2090 friend class SelectionDAG;
2091
2092 // The memory for RegMask is not owned by the node.
2093 const uint32_t *RegMask;
2094
2095 RegisterMaskSDNode(const uint32_t *mask)
2096 : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
2097 RegMask(mask) {}
2098
2099public:
2100 const uint32_t *getRegMask() const { return RegMask; }
2101
2102 static bool classof(const SDNode *N) {
2103 return N->getOpcode() == ISD::RegisterMask;
2104 }
2105};
2106
2107class BlockAddressSDNode : public SDNode {
2108 friend class SelectionDAG;
2109
2110 const BlockAddress *BA;
2111 int64_t Offset;
2112 unsigned TargetFlags;
2113
2114 BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
2115 int64_t o, unsigned Flags)
2116 : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
2117 BA(ba), Offset(o), TargetFlags(Flags) {}
2118
2119public:
2120 const BlockAddress *getBlockAddress() const { return BA; }
2121 int64_t getOffset() const { return Offset; }
2122 unsigned getTargetFlags() const { return TargetFlags; }
2123
2124 static bool classof(const SDNode *N) {
2125 return N->getOpcode() == ISD::BlockAddress ||
2126 N->getOpcode() == ISD::TargetBlockAddress;
2127 }
2128};
2129
2130class LabelSDNode : public SDNode {
2131 friend class SelectionDAG;
2132
2133 MCSymbol *Label;
2134
2135 LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L)
2136 : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) {
2137 assert(LabelSDNode::classof(this) && "not a label opcode")(static_cast <bool> (LabelSDNode::classof(this) &&
"not a label opcode") ? void (0) : __assert_fail ("LabelSDNode::classof(this) && \"not a label opcode\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2137, __extension__ __PRETTY_FUNCTION__))
;
2138 }
2139
2140public:
2141 MCSymbol *getLabel() const { return Label; }
2142
2143 static bool classof(const SDNode *N) {
2144 return N->getOpcode() == ISD::EH_LABEL ||
2145 N->getOpcode() == ISD::ANNOTATION_LABEL;
2146 }
2147};
2148
2149class ExternalSymbolSDNode : public SDNode {
2150 friend class SelectionDAG;
2151
2152 const char *Symbol;
2153 unsigned TargetFlags;
2154
2155 ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT)
2156 : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0,
2157 DebugLoc(), getSDVTList(VT)),
2158 Symbol(Sym), TargetFlags(TF) {}
2159
2160public:
2161 const char *getSymbol() const { return Symbol; }
2162 unsigned getTargetFlags() const { return TargetFlags; }
2163
2164 static bool classof(const SDNode *N) {
2165 return N->getOpcode() == ISD::ExternalSymbol ||
2166 N->getOpcode() == ISD::TargetExternalSymbol;
2167 }
2168};
2169
2170class MCSymbolSDNode : public SDNode {
2171 friend class SelectionDAG;
2172
2173 MCSymbol *Symbol;
2174
2175 MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
2176 : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
2177
2178public:
2179 MCSymbol *getMCSymbol() const { return Symbol; }
2180
2181 static bool classof(const SDNode *N) {
2182 return N->getOpcode() == ISD::MCSymbol;
2183 }
2184};
2185
2186class CondCodeSDNode : public SDNode {
2187 friend class SelectionDAG;
2188
2189 ISD::CondCode Condition;
2190
2191 explicit CondCodeSDNode(ISD::CondCode Cond)
2192 : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2193 Condition(Cond) {}
2194
2195public:
2196 ISD::CondCode get() const { return Condition; }
2197
2198 static bool classof(const SDNode *N) {
2199 return N->getOpcode() == ISD::CONDCODE;
2200 }
2201};
2202
2203/// This class is used to represent EVT's, which are used
2204/// to parameterize some operations.
2205class VTSDNode : public SDNode {
2206 friend class SelectionDAG;
2207
2208 EVT ValueType;
2209
2210 explicit VTSDNode(EVT VT)
2211 : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2212 ValueType(VT) {}
2213
2214public:
2215 EVT getVT() const { return ValueType; }
2216
2217 static bool classof(const SDNode *N) {
2218 return N->getOpcode() == ISD::VALUETYPE;
2219 }
2220};
2221
2222/// Base class for LoadSDNode and StoreSDNode
2223class LSBaseSDNode : public MemSDNode {
2224public:
2225 LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2226 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2227 MachineMemOperand *MMO)
2228 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2229 LSBaseSDNodeBits.AddressingMode = AM;
2230 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2230, __extension__ __PRETTY_FUNCTION__))
;
2231 }
2232
2233 const SDValue &getOffset() const {
2234 return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
2235 }
2236
2237 /// Return the addressing mode for this load or store:
2238 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2239 ISD::MemIndexedMode getAddressingMode() const {
2240 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2241 }
2242
2243 /// Return true if this is a pre/post inc/dec load/store.
2244 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2245
2246 /// Return true if this is NOT a pre/post inc/dec load/store.
2247 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2248
2249 static bool classof(const SDNode *N) {
2250 return N->getOpcode() == ISD::LOAD ||
2251 N->getOpcode() == ISD::STORE;
2252 }
2253};
2254
2255/// This class is used to represent ISD::LOAD nodes.
2256class LoadSDNode : public LSBaseSDNode {
2257 friend class SelectionDAG;
2258
2259 LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2260 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
2261 MachineMemOperand *MMO)
2262 : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2263 LoadSDNodeBits.ExtTy = ETy;
2264 assert(readMem() && "Load MachineMemOperand is not a load!")(static_cast <bool> (readMem() && "Load MachineMemOperand is not a load!"
) ? void (0) : __assert_fail ("readMem() && \"Load MachineMemOperand is not a load!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2264, __extension__ __PRETTY_FUNCTION__))
;
2265 assert(!writeMem() && "Load MachineMemOperand is a store!")(static_cast <bool> (!writeMem() && "Load MachineMemOperand is a store!"
) ? void (0) : __assert_fail ("!writeMem() && \"Load MachineMemOperand is a store!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2265, __extension__ __PRETTY_FUNCTION__))
;
2266 }
2267
2268public:
2269 /// Return whether this is a plain node,
2270 /// or one of the varieties of value-extending loads.
2271 ISD::LoadExtType getExtensionType() const {
2272 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2273 }
2274
2275 const SDValue &getBasePtr() const { return getOperand(1); }
2276 const SDValue &getOffset() const { return getOperand(2); }
2277
2278 static bool classof(const SDNode *N) {
2279 return N->getOpcode() == ISD::LOAD;
2280 }
2281};
2282
2283/// This class is used to represent ISD::STORE nodes.
2284class StoreSDNode : public LSBaseSDNode {
2285 friend class SelectionDAG;
2286
2287 StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2288 ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
2289 MachineMemOperand *MMO)
2290 : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
2291 StoreSDNodeBits.IsTruncating = isTrunc;
2292 assert(!readMem() && "Store MachineMemOperand is a load!")(static_cast <bool> (!readMem() && "Store MachineMemOperand is a load!"
) ? void (0) : __assert_fail ("!readMem() && \"Store MachineMemOperand is a load!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2292, __extension__ __PRETTY_FUNCTION__))
;
2293 assert(writeMem() && "Store MachineMemOperand is not a store!")(static_cast <bool> (writeMem() && "Store MachineMemOperand is not a store!"
) ? void (0) : __assert_fail ("writeMem() && \"Store MachineMemOperand is not a store!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2293, __extension__ __PRETTY_FUNCTION__))
;
2294 }
2295
2296public:
2297 /// Return true if the op does a truncation before store.
2298 /// For integers this is the same as doing a TRUNCATE and storing the result.
2299 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2300 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2301 void setTruncatingStore(bool Truncating) {
2302 StoreSDNodeBits.IsTruncating = Truncating;
2303 }
2304
2305 const SDValue &getValue() const { return getOperand(1); }
2306 const SDValue &getBasePtr() const { return getOperand(2); }
2307 const SDValue &getOffset() const { return getOperand(3); }
2308
2309 static bool classof(const SDNode *N) {
2310 return N->getOpcode() == ISD::STORE;
2311 }
2312};
2313
2314/// This base class is used to represent MLOAD and MSTORE nodes
2315class MaskedLoadStoreSDNode : public MemSDNode {
2316public:
2317 friend class SelectionDAG;
2318
2319 MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
2320 const DebugLoc &dl, SDVTList VTs,
2321 ISD::MemIndexedMode AM, EVT MemVT,
2322 MachineMemOperand *MMO)
2323 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2324 LSBaseSDNodeBits.AddressingMode = AM;
2325 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2325, __extension__ __PRETTY_FUNCTION__))
;
2326 }
2327
2328 // MaskedLoadSDNode (Chain, ptr, offset, mask, passthru)
2329 // MaskedStoreSDNode (Chain, data, ptr, offset, mask)
2330 // Mask is a vector of i1 elements
2331 const SDValue &getOffset() const {
2332 return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
2333 }
2334 const SDValue &getMask() const {
2335 return getOperand(getOpcode() == ISD::MLOAD ? 3 : 4);
2336 }
2337
2338 /// Return the addressing mode for this load or store:
2339 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2340 ISD::MemIndexedMode getAddressingMode() const {
2341 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2342 }
2343
2344 /// Return true if this is a pre/post inc/dec load/store.
2345 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2346
2347 /// Return true if this is NOT a pre/post inc/dec load/store.
2348 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2349
2350 static bool classof(const SDNode *N) {
2351 return N->getOpcode() == ISD::MLOAD ||
2352 N->getOpcode() == ISD::MSTORE;
2353 }
2354};
2355
2356/// This class is used to represent an MLOAD node
2357class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
2358public:
2359 friend class SelectionDAG;
2360
2361 MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2362 ISD::MemIndexedMode AM, ISD::LoadExtType ETy,
2363 bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
2364 : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, AM, MemVT, MMO) {
2365 LoadSDNodeBits.ExtTy = ETy;
2366 LoadSDNodeBits.IsExpanding = IsExpanding;
2367 }
2368
2369 ISD::LoadExtType getExtensionType() const {
2370 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2371 }
2372
2373 const SDValue &getBasePtr() const { return getOperand(1); }
2374 const SDValue &getOffset() const { return getOperand(2); }
2375 const SDValue &getMask() const { return getOperand(3); }
2376 const SDValue &getPassThru() const { return getOperand(4); }
2377
2378 static bool classof(const SDNode *N) {
2379 return N->getOpcode() == ISD::MLOAD;
2380 }
2381
2382 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2383};
2384
2385/// This class is used to represent an MSTORE node
2386class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
2387public:
2388 friend class SelectionDAG;
2389
2390 MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2391 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2392 EVT MemVT, MachineMemOperand *MMO)
2393 : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, AM, MemVT, MMO) {
2394 StoreSDNodeBits.IsTruncating = isTrunc;
2395 StoreSDNodeBits.IsCompressing = isCompressing;
2396 }
2397
2398 /// Return true if the op does a truncation before store.
2399 /// For integers this is the same as doing a TRUNCATE and storing the result.
2400 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2401 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2402
2403 /// Returns true if the op does a compression to the vector before storing.
2404 /// The node contiguously stores the active elements (integers or floats)
2405 /// in src (those with their respective bit set in writemask k) to unaligned
2406 /// memory at base_addr.
2407 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2408
2409 const SDValue &getValue() const { return getOperand(1); }
2410 const SDValue &getBasePtr() const { return getOperand(2); }
2411 const SDValue &getOffset() const { return getOperand(3); }
2412 const SDValue &getMask() const { return getOperand(4); }
2413
2414 static bool classof(const SDNode *N) {
2415 return N->getOpcode() == ISD::MSTORE;
2416 }
2417};
2418
2419/// This is a base class used to represent
2420/// MGATHER and MSCATTER nodes
2421///
2422class MaskedGatherScatterSDNode : public MemSDNode {
2423public:
2424 friend class SelectionDAG;
2425
2426 MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2427 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2428 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2429 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2430 LSBaseSDNodeBits.AddressingMode = IndexType;
2431 assert(getIndexType() == IndexType && "Value truncated")(static_cast <bool> (getIndexType() == IndexType &&
"Value truncated") ? void (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2431, __extension__ __PRETTY_FUNCTION__))
;
2432 }
2433
2434 /// How is Index applied to BasePtr when computing addresses.
2435 ISD::MemIndexType getIndexType() const {
2436 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2437 }
2438 void setIndexType(ISD::MemIndexType IndexType) {
2439 LSBaseSDNodeBits.AddressingMode = IndexType;
2440 }
2441 bool isIndexScaled() const {
2442 return (getIndexType() == ISD::SIGNED_SCALED) ||
2443 (getIndexType() == ISD::UNSIGNED_SCALED);
2444 }
2445 bool isIndexSigned() const {
2446 return (getIndexType() == ISD::SIGNED_SCALED) ||
2447 (getIndexType() == ISD::SIGNED_UNSCALED);
2448 }
2449
2450 // In the both nodes address is Op1, mask is Op2:
2451 // MaskedGatherSDNode (Chain, passthru, mask, base, index, scale)
2452 // MaskedScatterSDNode (Chain, value, mask, base, index, scale)
2453 // Mask is a vector of i1 elements
2454 const SDValue &getBasePtr() const { return getOperand(3); }
2455 const SDValue &getIndex() const { return getOperand(4); }
2456 const SDValue &getMask() const { return getOperand(2); }
2457 const SDValue &getScale() const { return getOperand(5); }
2458
2459 static bool classof(const SDNode *N) {
2460 return N->getOpcode() == ISD::MGATHER ||
2461 N->getOpcode() == ISD::MSCATTER;
2462 }
2463};
2464
2465/// This class is used to represent an MGATHER node
2466///
2467class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
2468public:
2469 friend class SelectionDAG;
2470
2471 MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2472 EVT MemVT, MachineMemOperand *MMO,
2473 ISD::MemIndexType IndexType, ISD::LoadExtType ETy)
2474 : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO,
2475 IndexType) {
2476 LoadSDNodeBits.ExtTy = ETy;
2477 }
2478
2479 const SDValue &getPassThru() const { return getOperand(1); }
2480
2481 ISD::LoadExtType getExtensionType() const {
2482 return ISD::LoadExtType(LoadSDNodeBits.ExtTy);
2483 }
2484
2485 static bool classof(const SDNode *N) {
2486 return N->getOpcode() == ISD::MGATHER;
2487 }
2488};
2489
2490/// This class is used to represent an MSCATTER node
2491///
2492class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
2493public:
2494 friend class SelectionDAG;
2495
2496 MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2497 EVT MemVT, MachineMemOperand *MMO,
2498 ISD::MemIndexType IndexType, bool IsTrunc)
2499 : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO,
2500 IndexType) {
2501 StoreSDNodeBits.IsTruncating = IsTrunc;
2502 }
2503
2504 /// Return true if the op does a truncation before store.
2505 /// For integers this is the same as doing a TRUNCATE and storing the result.
2506 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2507 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2508
2509 const SDValue &getValue() const { return getOperand(1); }
2510
2511 static bool classof(const SDNode *N) {
2512 return N->getOpcode() == ISD::MSCATTER;
2513 }
2514};
2515
2516/// An SDNode that represents everything that will be needed
2517/// to construct a MachineInstr. These nodes are created during the
2518/// instruction selection proper phase.
2519///
2520/// Note that the only supported way to set the `memoperands` is by calling the
2521/// `SelectionDAG::setNodeMemRefs` function as the memory management happens
2522/// inside the DAG rather than in the node.
2523class MachineSDNode : public SDNode {
2524private:
2525 friend class SelectionDAG;
2526
2527 MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
2528 : SDNode(Opc, Order, DL, VTs) {}
2529
2530 // We use a pointer union between a single `MachineMemOperand` pointer and
2531 // a pointer to an array of `MachineMemOperand` pointers. This is null when
2532 // the number of these is zero, the single pointer variant used when the
2533 // number is one, and the array is used for larger numbers.
2534 //
2535 // The array is allocated via the `SelectionDAG`'s allocator and so will
2536 // always live until the DAG is cleaned up and doesn't require ownership here.
2537 //
2538 // We can't use something simpler like `TinyPtrVector` here because `SDNode`
2539 // subclasses aren't managed in a conforming C++ manner. See the comments on
2540 // `SelectionDAG::MorphNodeTo` which details what all goes on, but the
2541 // constraint here is that these don't manage memory with their constructor or
2542 // destructor and can be initialized to a good state even if they start off
2543 // uninitialized.
2544 PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {};
2545
2546 // Note that this could be folded into the above `MemRefs` member if doing so
2547 // is advantageous at some point. We don't need to store this in most cases.
2548 // However, at the moment this doesn't appear to make the allocation any
2549 // smaller and makes the code somewhat simpler to read.
2550 int NumMemRefs = 0;
2551
2552public:
2553 using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator;
2554
2555 ArrayRef<MachineMemOperand *> memoperands() const {
2556 // Special case the common cases.
2557 if (NumMemRefs == 0)
2558 return {};
2559 if (NumMemRefs == 1)
2560 return makeArrayRef(MemRefs.getAddrOfPtr1(), 1);
2561
2562 // Otherwise we have an actual array.
2563 return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs);
2564 }
2565 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
2566 mmo_iterator memoperands_end() const { return memoperands().end(); }
2567 bool memoperands_empty() const { return memoperands().empty(); }
2568
2569 /// Clear out the memory reference descriptor list.
2570 void clearMemRefs() {
2571 MemRefs = nullptr;
2572 NumMemRefs = 0;
2573 }
2574
2575 static bool classof(const SDNode *N) {
2576 return N->isMachineOpcode();
2577 }
2578};
2579
2580/// An SDNode that records if a register contains a value that is guaranteed to
2581/// be aligned accordingly.
2582class AssertAlignSDNode : public SDNode {
2583 Align Alignment;
2584
2585public:
2586 AssertAlignSDNode(unsigned Order, const DebugLoc &DL, EVT VT, Align A)
2587 : SDNode(ISD::AssertAlign, Order, DL, getSDVTList(VT)), Alignment(A) {}
2588
2589 Align getAlign() const { return Alignment; }
2590
2591 static bool classof(const SDNode *N) {
2592 return N->getOpcode() == ISD::AssertAlign;
2593 }
2594};
2595
2596class SDNodeIterator {
2597 const SDNode *Node;
2598 unsigned Operand;
2599
2600 SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
2601
2602public:
2603 using iterator_category = std::forward_iterator_tag;
2604 using value_type = SDNode;
2605 using difference_type = std::ptrdiff_t;
2606 using pointer = value_type *;
2607 using reference = value_type &;
2608
2609 bool operator==(const SDNodeIterator& x) const {
2610 return Operand == x.Operand;
2611 }
2612 bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
2613
2614 pointer operator*() const {
2615 return Node->getOperand(Operand).getNode();
2616 }
2617 pointer operator->() const { return operator*(); }
2618
2619 SDNodeIterator& operator++() { // Preincrement
2620 ++Operand;
2621 return *this;
2622 }
2623 SDNodeIterator operator++(int) { // Postincrement
2624 SDNodeIterator tmp = *this; ++*this; return tmp;
2625 }
2626 size_t operator-(SDNodeIterator Other) const {
2627 assert(Node == Other.Node &&(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2628, __extension__ __PRETTY_FUNCTION__))
2628 "Cannot compare iterators of two different nodes!")(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2628, __extension__ __PRETTY_FUNCTION__))
;
2629 return Operand - Other.Operand;
2630 }
2631
2632 static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
2633 static SDNodeIterator end (const SDNode *N) {
2634 return SDNodeIterator(N, N->getNumOperands());
2635 }
2636
2637 unsigned getOperand() const { return Operand; }
2638 const SDNode *getNode() const { return Node; }
2639};
2640
2641template <> struct GraphTraits<SDNode*> {
2642 using NodeRef = SDNode *;
2643 using ChildIteratorType = SDNodeIterator;
2644
2645 static NodeRef getEntryNode(SDNode *N) { return N; }
2646
2647 static ChildIteratorType child_begin(NodeRef N) {
2648 return SDNodeIterator::begin(N);
2649 }
2650
2651 static ChildIteratorType child_end(NodeRef N) {
2652 return SDNodeIterator::end(N);
2653 }
2654};
2655
2656/// A representation of the largest SDNode, for use in sizeof().
2657///
2658/// This needs to be a union because the largest node differs on 32 bit systems
2659/// with 4 and 8 byte pointer alignment, respectively.
2660using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
2661 BlockAddressSDNode,
2662 GlobalAddressSDNode,
2663 PseudoProbeSDNode>;
2664
2665/// The SDNode class with the greatest alignment requirement.
2666using MostAlignedSDNode = GlobalAddressSDNode;
2667
2668namespace ISD {
2669
2670 /// Returns true if the specified node is a non-extending and unindexed load.
2671 inline bool isNormalLoad(const SDNode *N) {
2672 const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
2673 return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
2674 Ld->getAddressingMode() == ISD::UNINDEXED;
2675 }
2676
2677 /// Returns true if the specified node is a non-extending load.
2678 inline bool isNON_EXTLoad(const SDNode *N) {
2679 return isa<LoadSDNode>(N) &&
2680 cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
2681 }
2682
2683 /// Returns true if the specified node is a EXTLOAD.
2684 inline bool isEXTLoad(const SDNode *N) {
2685 return isa<LoadSDNode>(N) &&
2686 cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
2687 }
2688
2689 /// Returns true if the specified node is a SEXTLOAD.
2690 inline bool isSEXTLoad(const SDNode *N) {
2691 return isa<LoadSDNode>(N) &&
2692 cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
2693 }
2694
2695 /// Returns true if the specified node is a ZEXTLOAD.
2696 inline bool isZEXTLoad(const SDNode *N) {
2697 return isa<LoadSDNode>(N) &&
2698 cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
2699 }
2700
2701 /// Returns true if the specified node is an unindexed load.
2702 inline bool isUNINDEXEDLoad(const SDNode *N) {
2703 return isa<LoadSDNode>(N) &&
2704 cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2705 }
2706
2707 /// Returns true if the specified node is a non-truncating
2708 /// and unindexed store.
2709 inline bool isNormalStore(const SDNode *N) {
2710 const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
2711 return St && !St->isTruncatingStore() &&
2712 St->getAddressingMode() == ISD::UNINDEXED;
2713 }
2714
2715 /// Returns true if the specified node is a non-truncating store.
2716 inline bool isNON_TRUNCStore(const SDNode *N) {
2717 return isa<StoreSDNode>(N) && !cast<StoreSDNode>(N)->isTruncatingStore();
2718 }
2719
2720 /// Returns true if the specified node is a truncating store.
2721 inline bool isTRUNCStore(const SDNode *N) {
2722 return isa<StoreSDNode>(N) && cast<StoreSDNode>(N)->isTruncatingStore();
2723 }
2724
2725 /// Returns true if the specified node is an unindexed store.
2726 inline bool isUNINDEXEDStore(const SDNode *N) {
2727 return isa<StoreSDNode>(N) &&
2728 cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2729 }
2730
2731 /// Attempt to match a unary predicate against a scalar/splat constant or
2732 /// every element of a constant BUILD_VECTOR.
2733 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2734 bool matchUnaryPredicate(SDValue Op,
2735 std::function<bool(ConstantSDNode *)> Match,
2736 bool AllowUndefs = false);
2737
2738 /// Attempt to match a binary predicate against a pair of scalar/splat
2739 /// constants or every element of a pair of constant BUILD_VECTORs.
2740 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2741 /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match.
2742 bool matchBinaryPredicate(
2743 SDValue LHS, SDValue RHS,
2744 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
2745 bool AllowUndefs = false, bool AllowTypeMismatch = false);
2746
2747 /// Returns true if the specified value is the overflow result from one
2748 /// of the overflow intrinsic nodes.
2749 inline bool isOverflowIntrOpRes(SDValue Op) {
2750 unsigned Opc = Op.getOpcode();
2751 return (Op.getResNo() == 1 &&
2752 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
2753 Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO));
2754 }
2755
2756} // end namespace ISD
2757
2758} // end namespace llvm
2759
2760#endif // LLVM_CODEGEN_SELECTIONDAGNODES_H