Bug Summary

File:llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
Warning:line 2235, column 9
The result of the left shift is undefined due to shifting by '127', which is greater or equal to the width of type 'long long'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name WebAssemblyISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly -I include -I /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/build-llvm -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-11-10-160236-22541-1 -x c++ /build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the WebAssemblyTargetLowering class.
11///
12//===----------------------------------------------------------------------===//
13
14#include "WebAssemblyISelLowering.h"
15#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16#include "Utils/WebAssemblyTypeUtilities.h"
17#include "Utils/WebAssemblyUtilities.h"
18#include "WebAssemblyMachineFunctionInfo.h"
19#include "WebAssemblySubtarget.h"
20#include "WebAssemblyTargetMachine.h"
21#include "llvm/CodeGen/CallingConvLower.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineJumpTableInfo.h"
24#include "llvm/CodeGen/MachineModuleInfo.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/SelectionDAG.h"
27#include "llvm/CodeGen/SelectionDAGNodes.h"
28#include "llvm/IR/DiagnosticInfo.h"
29#include "llvm/IR/DiagnosticPrinter.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Intrinsics.h"
32#include "llvm/IR/IntrinsicsWebAssembly.h"
33#include "llvm/Support/Debug.h"
34#include "llvm/Support/ErrorHandling.h"
35#include "llvm/Support/KnownBits.h"
36#include "llvm/Support/MathExtras.h"
37#include "llvm/Support/raw_ostream.h"
38#include "llvm/Target/TargetOptions.h"
39using namespace llvm;
40
41#define DEBUG_TYPE"wasm-lower" "wasm-lower"
42
43WebAssemblyTargetLowering::WebAssemblyTargetLowering(
44 const TargetMachine &TM, const WebAssemblySubtarget &STI)
45 : TargetLowering(TM), Subtarget(&STI) {
46 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
47
48 // Booleans always contain 0 or 1.
49 setBooleanContents(ZeroOrOneBooleanContent);
50 // Except in SIMD vectors
51 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
52 // We don't know the microarchitecture here, so just reduce register pressure.
53 setSchedulingPreference(Sched::RegPressure);
54 // Tell ISel that we have a stack pointer.
55 setStackPointerRegisterToSaveRestore(
56 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
57 // Set up the register classes.
58 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
59 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
60 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
61 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
62 if (Subtarget->hasSIMD128()) {
63 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
64 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
65 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
66 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
67 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
68 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
69 }
70 if (Subtarget->hasReferenceTypes()) {
71 addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass);
72 addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass);
73 }
74 // Compute derived properties from the register classes.
75 computeRegisterProperties(Subtarget->getRegisterInfo());
76
77 // Transform loads and stores to pointers in address space 1 to loads and
78 // stores to WebAssembly global variables, outside linear memory.
79 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
80 setOperationAction(ISD::LOAD, T, Custom);
81 setOperationAction(ISD::STORE, T, Custom);
82 }
83 if (Subtarget->hasSIMD128()) {
84 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
85 MVT::v2f64}) {
86 setOperationAction(ISD::LOAD, T, Custom);
87 setOperationAction(ISD::STORE, T, Custom);
88 }
89 }
90 if (Subtarget->hasReferenceTypes()) {
91 // We need custom load and store lowering for both externref, funcref and
92 // Other. The MVT::Other here represents tables of reference types.
93 for (auto T : {MVT::externref, MVT::funcref, MVT::Other}) {
94 setOperationAction(ISD::LOAD, T, Custom);
95 setOperationAction(ISD::STORE, T, Custom);
96 }
97 }
98
99 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
100 setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom);
101 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
102 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
103 setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
104 setOperationAction(ISD::BRIND, MVT::Other, Custom);
105
106 // Take the default expansion for va_arg, va_copy, and va_end. There is no
107 // default action for va_start, so we do that custom.
108 setOperationAction(ISD::VASTART, MVT::Other, Custom);
109 setOperationAction(ISD::VAARG, MVT::Other, Expand);
110 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
111 setOperationAction(ISD::VAEND, MVT::Other, Expand);
112
113 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
114 // Don't expand the floating-point types to constant pools.
115 setOperationAction(ISD::ConstantFP, T, Legal);
116 // Expand floating-point comparisons.
117 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
118 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
119 setCondCodeAction(CC, T, Expand);
120 // Expand floating-point library function operators.
121 for (auto Op :
122 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
123 setOperationAction(Op, T, Expand);
124 // Note supported floating-point library function operators that otherwise
125 // default to expand.
126 for (auto Op :
127 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
128 setOperationAction(Op, T, Legal);
129 // Support minimum and maximum, which otherwise default to expand.
130 setOperationAction(ISD::FMINIMUM, T, Legal);
131 setOperationAction(ISD::FMAXIMUM, T, Legal);
132 // WebAssembly currently has no builtin f16 support.
133 setOperationAction(ISD::FP16_TO_FP, T, Expand);
134 setOperationAction(ISD::FP_TO_FP16, T, Expand);
135 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
136 setTruncStoreAction(T, MVT::f16, Expand);
137 }
138
139 // Expand unavailable integer operations.
140 for (auto Op :
141 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
142 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
143 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
144 for (auto T : {MVT::i32, MVT::i64})
145 setOperationAction(Op, T, Expand);
146 if (Subtarget->hasSIMD128())
147 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
148 setOperationAction(Op, T, Expand);
149 }
150
151 if (Subtarget->hasNontrappingFPToInt())
152 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
153 for (auto T : {MVT::i32, MVT::i64})
154 setOperationAction(Op, T, Custom);
155
156 // SIMD-specific configuration
157 if (Subtarget->hasSIMD128()) {
158 // Hoist bitcasts out of shuffles
159 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
160
161 // Combine extends of extract_subvectors into widening ops
162 setTargetDAGCombine(ISD::SIGN_EXTEND);
163 setTargetDAGCombine(ISD::ZERO_EXTEND);
164
165 // Combine int_to_fp or fp_extend of extract_vectors and vice versa into
166 // conversions ops
167 setTargetDAGCombine(ISD::SINT_TO_FP);
168 setTargetDAGCombine(ISD::UINT_TO_FP);
169 setTargetDAGCombine(ISD::FP_EXTEND);
170 setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
171
172 // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa
173 // into conversion ops
174 setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
175 setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
176 setTargetDAGCombine(ISD::FP_ROUND);
177 setTargetDAGCombine(ISD::CONCAT_VECTORS);
178
179 // Support saturating add for i8x16 and i16x8
180 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
181 for (auto T : {MVT::v16i8, MVT::v8i16})
182 setOperationAction(Op, T, Legal);
183
184 // Support integer abs
185 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
186 setOperationAction(ISD::ABS, T, Legal);
187
188 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
189 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
190 MVT::v2f64})
191 setOperationAction(ISD::BUILD_VECTOR, T, Custom);
192
193 // We have custom shuffle lowering to expose the shuffle mask
194 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
195 MVT::v2f64})
196 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
197
198 // Custom lowering since wasm shifts must have a scalar shift amount
199 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
200 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
201 setOperationAction(Op, T, Custom);
202
203 // Custom lower lane accesses to expand out variable indices
204 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
205 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
206 MVT::v2f64})
207 setOperationAction(Op, T, Custom);
208
209 // There is no i8x16.mul instruction
210 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
211
212 // There is no vector conditional select instruction
213 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
214 MVT::v2f64})
215 setOperationAction(ISD::SELECT_CC, T, Expand);
216
217 // Expand integer operations supported for scalars but not SIMD
218 for (auto Op :
219 {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
220 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
221 setOperationAction(Op, T, Expand);
222
223 // But we do have integer min and max operations
224 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
225 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
226 setOperationAction(Op, T, Legal);
227
228 // And we have popcnt for i8x16. It can be used to expand ctlz/cttz.
229 setOperationAction(ISD::CTPOP, MVT::v16i8, Legal);
230 setOperationAction(ISD::CTLZ, MVT::v16i8, Expand);
231 setOperationAction(ISD::CTTZ, MVT::v16i8, Expand);
232
233 // Custom lower bit counting operations for other types to scalarize them.
234 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP})
235 for (auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
236 setOperationAction(Op, T, Custom);
237
238 // Expand float operations supported for scalars but not SIMD
239 for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
240 ISD::FEXP, ISD::FEXP2, ISD::FRINT})
241 for (auto T : {MVT::v4f32, MVT::v2f64})
242 setOperationAction(Op, T, Expand);
243
244 // Unsigned comparison operations are unavailable for i64x2 vectors.
245 for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE})
246 setCondCodeAction(CC, MVT::v2i64, Custom);
247
248 // 64x2 conversions are not in the spec
249 for (auto Op :
250 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
251 for (auto T : {MVT::v2i64, MVT::v2f64})
252 setOperationAction(Op, T, Expand);
253
254 // But saturating fp_to_int converstions are
255 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
256 setOperationAction(Op, MVT::v4i32, Custom);
257 }
258
259 // As a special case, these operators use the type to mean the type to
260 // sign-extend from.
261 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
262 if (!Subtarget->hasSignExt()) {
263 // Sign extends are legal only when extending a vector extract
264 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
265 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
266 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
267 }
268 for (auto T : MVT::integer_fixedlen_vector_valuetypes())
269 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
270
271 // Dynamic stack allocation: use the default expansion.
272 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
273 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
274 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
275
276 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
277 setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
278 setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
279
280 // Expand these forms; we pattern-match the forms that we can handle in isel.
281 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
282 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
283 setOperationAction(Op, T, Expand);
284
285 // We have custom switch handling.
286 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
287
288 // WebAssembly doesn't have:
289 // - Floating-point extending loads.
290 // - Floating-point truncating stores.
291 // - i1 extending loads.
292 // - truncating SIMD stores and most extending loads
293 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
294 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
295 for (auto T : MVT::integer_valuetypes())
296 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
297 setLoadExtAction(Ext, T, MVT::i1, Promote);
298 if (Subtarget->hasSIMD128()) {
299 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
300 MVT::v2f64}) {
301 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
302 if (MVT(T) != MemT) {
303 setTruncStoreAction(T, MemT, Expand);
304 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
305 setLoadExtAction(Ext, T, MemT, Expand);
306 }
307 }
308 }
309 // But some vector extending loads are legal
310 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
311 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
312 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
313 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
314 }
315 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Legal);
316 }
317
318 // Don't do anything clever with build_pairs
319 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
320
321 // Trap lowers to wasm unreachable
322 setOperationAction(ISD::TRAP, MVT::Other, Legal);
323 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
324
325 // Exception handling intrinsics
326 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
327 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
328 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
329
330 setMaxAtomicSizeInBitsSupported(64);
331
332 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
333 // consistent with the f64 and f128 names.
334 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
335 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
336
337 // Define the emscripten name for return address helper.
338 // TODO: when implementing other Wasm backends, make this generic or only do
339 // this on emscripten depending on what they end up doing.
340 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
341
342 // Always convert switches to br_tables unless there is only one case, which
343 // is equivalent to a simple branch. This reduces code size for wasm, and we
344 // defer possible jump table optimizations to the VM.
345 setMinimumJumpTableEntries(2);
346}
347
348MVT WebAssemblyTargetLowering::getPointerTy(const DataLayout &DL,
349 uint32_t AS) const {
350 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF)
351 return MVT::externref;
352 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF)
353 return MVT::funcref;
354 return TargetLowering::getPointerTy(DL, AS);
355}
356
357MVT WebAssemblyTargetLowering::getPointerMemTy(const DataLayout &DL,
358 uint32_t AS) const {
359 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF)
360 return MVT::externref;
361 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF)
362 return MVT::funcref;
363 return TargetLowering::getPointerMemTy(DL, AS);
364}
365
366TargetLowering::AtomicExpansionKind
367WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
368 // We have wasm instructions for these
369 switch (AI->getOperation()) {
370 case AtomicRMWInst::Add:
371 case AtomicRMWInst::Sub:
372 case AtomicRMWInst::And:
373 case AtomicRMWInst::Or:
374 case AtomicRMWInst::Xor:
375 case AtomicRMWInst::Xchg:
376 return AtomicExpansionKind::None;
377 default:
378 break;
379 }
380 return AtomicExpansionKind::CmpXChg;
381}
382
383bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
384 // Implementation copied from X86TargetLowering.
385 unsigned Opc = VecOp.getOpcode();
386
387 // Assume target opcodes can't be scalarized.
388 // TODO - do we have any exceptions?
389 if (Opc >= ISD::BUILTIN_OP_END)
390 return false;
391
392 // If the vector op is not supported, try to convert to scalar.
393 EVT VecVT = VecOp.getValueType();
394 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
395 return true;
396
397 // If the vector op is supported, but the scalar op is not, the transform may
398 // not be worthwhile.
399 EVT ScalarVT = VecVT.getScalarType();
400 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
401}
402
403FastISel *WebAssemblyTargetLowering::createFastISel(
404 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
405 return WebAssembly::createFastISel(FuncInfo, LibInfo);
406}
407
408MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
409 EVT VT) const {
410 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
411 if (BitWidth > 1 && BitWidth < 8)
412 BitWidth = 8;
413
414 if (BitWidth > 64) {
415 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
416 // the count to be an i32.
417 BitWidth = 32;
418 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&(static_cast <bool> (BitWidth >= Log2_32_Ceil(VT.getSizeInBits
()) && "32-bit shift counts ought to be enough for anyone"
) ? void (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 419, __extension__ __PRETTY_FUNCTION__))
419 "32-bit shift counts ought to be enough for anyone")(static_cast <bool> (BitWidth >= Log2_32_Ceil(VT.getSizeInBits
()) && "32-bit shift counts ought to be enough for anyone"
) ? void (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 419, __extension__ __PRETTY_FUNCTION__))
;
420 }
421
422 MVT Result = MVT::getIntegerVT(BitWidth);
423 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&(static_cast <bool> (Result != MVT::INVALID_SIMPLE_VALUE_TYPE
&& "Unable to represent scalar shift amount type") ?
void (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 424, __extension__ __PRETTY_FUNCTION__))
424 "Unable to represent scalar shift amount type")(static_cast <bool> (Result != MVT::INVALID_SIMPLE_VALUE_TYPE
&& "Unable to represent scalar shift amount type") ?
void (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 424, __extension__ __PRETTY_FUNCTION__))
;
425 return Result;
426}
427
428// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
429// undefined result on invalid/overflow, to the WebAssembly opcode, which
430// traps on invalid/overflow.
431static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
432 MachineBasicBlock *BB,
433 const TargetInstrInfo &TII,
434 bool IsUnsigned, bool Int64,
435 bool Float64, unsigned LoweredOpcode) {
436 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
437
438 Register OutReg = MI.getOperand(0).getReg();
439 Register InReg = MI.getOperand(1).getReg();
440
441 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
442 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
443 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
444 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
445 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
446 unsigned Eqz = WebAssembly::EQZ_I32;
447 unsigned And = WebAssembly::AND_I32;
448 int64_t Limit = Int64 ? INT64_MIN(-9223372036854775807L -1) : INT32_MIN(-2147483647-1);
449 int64_t Substitute = IsUnsigned ? 0 : Limit;
450 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
451 auto &Context = BB->getParent()->getFunction().getContext();
452 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
453
454 const BasicBlock *LLVMBB = BB->getBasicBlock();
455 MachineFunction *F = BB->getParent();
456 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
457 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
458 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
459
460 MachineFunction::iterator It = ++BB->getIterator();
461 F->insert(It, FalseMBB);
462 F->insert(It, TrueMBB);
463 F->insert(It, DoneMBB);
464
465 // Transfer the remainder of BB and its successor edges to DoneMBB.
466 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
467 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
468
469 BB->addSuccessor(TrueMBB);
470 BB->addSuccessor(FalseMBB);
471 TrueMBB->addSuccessor(DoneMBB);
472 FalseMBB->addSuccessor(DoneMBB);
473
474 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
475 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
476 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
477 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
478 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
479 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
480 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
481
482 MI.eraseFromParent();
483 // For signed numbers, we can do a single comparison to determine whether
484 // fabs(x) is within range.
485 if (IsUnsigned) {
486 Tmp0 = InReg;
487 } else {
488 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
489 }
490 BuildMI(BB, DL, TII.get(FConst), Tmp1)
491 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
492 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
493
494 // For unsigned numbers, we have to do a separate comparison with zero.
495 if (IsUnsigned) {
496 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
497 Register SecondCmpReg =
498 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
499 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
500 BuildMI(BB, DL, TII.get(FConst), Tmp1)
501 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
502 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
503 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
504 CmpReg = AndReg;
505 }
506
507 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
508
509 // Create the CFG diamond to select between doing the conversion or using
510 // the substitute value.
511 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
512 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
513 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
514 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
515 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
516 .addReg(FalseReg)
517 .addMBB(FalseMBB)
518 .addReg(TrueReg)
519 .addMBB(TrueMBB);
520
521 return DoneMBB;
522}
523
524static MachineBasicBlock *
525LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
526 const WebAssemblySubtarget *Subtarget,
527 const TargetInstrInfo &TII) {
528 MachineInstr &CallParams = *CallResults.getPrevNode();
529 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS)(static_cast <bool> (CallParams.getOpcode() == WebAssembly
::CALL_PARAMS) ? void (0) : __assert_fail ("CallParams.getOpcode() == WebAssembly::CALL_PARAMS"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 529, __extension__ __PRETTY_FUNCTION__))
;
530 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||(static_cast <bool> (CallResults.getOpcode() == WebAssembly
::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS
) ? void (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 531, __extension__ __PRETTY_FUNCTION__))
531 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS)(static_cast <bool> (CallResults.getOpcode() == WebAssembly
::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS
) ? void (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 531, __extension__ __PRETTY_FUNCTION__))
;
532
533 bool IsIndirect = CallParams.getOperand(0).isReg();
534 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
535
536 bool IsFuncrefCall = false;
537 if (IsIndirect) {
538 Register Reg = CallParams.getOperand(0).getReg();
539 const MachineFunction *MF = BB->getParent();
540 const MachineRegisterInfo &MRI = MF->getRegInfo();
541 const TargetRegisterClass *TRC = MRI.getRegClass(Reg);
542 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
543 assert(!IsFuncrefCall || Subtarget->hasReferenceTypes())(static_cast <bool> (!IsFuncrefCall || Subtarget->hasReferenceTypes
()) ? void (0) : __assert_fail ("!IsFuncrefCall || Subtarget->hasReferenceTypes()"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 543, __extension__ __PRETTY_FUNCTION__))
;
544 }
545
546 unsigned CallOp;
547 if (IsIndirect && IsRetCall) {
548 CallOp = WebAssembly::RET_CALL_INDIRECT;
549 } else if (IsIndirect) {
550 CallOp = WebAssembly::CALL_INDIRECT;
551 } else if (IsRetCall) {
552 CallOp = WebAssembly::RET_CALL;
553 } else {
554 CallOp = WebAssembly::CALL;
555 }
556
557 MachineFunction &MF = *BB->getParent();
558 const MCInstrDesc &MCID = TII.get(CallOp);
559 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
560
561 // See if we must truncate the function pointer.
562 // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
563 // as 64-bit for uniformity with other pointer types.
564 // See also: WebAssemblyFastISel::selectCall
565 if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
566 Register Reg32 =
567 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
568 auto &FnPtr = CallParams.getOperand(0);
569 BuildMI(*BB, CallResults.getIterator(), DL,
570 TII.get(WebAssembly::I32_WRAP_I64), Reg32)
571 .addReg(FnPtr.getReg());
572 FnPtr.setReg(Reg32);
573 }
574
575 // Move the function pointer to the end of the arguments for indirect calls
576 if (IsIndirect) {
577 auto FnPtr = CallParams.getOperand(0);
578 CallParams.RemoveOperand(0);
579
580 // For funcrefs, call_indirect is done through __funcref_call_table and the
581 // funcref is always installed in slot 0 of the table, therefore instead of having
582 // the function pointer added at the end of the params list, a zero (the index in
583 // __funcref_call_table is added).
584 if (IsFuncrefCall) {
585 Register RegZero =
586 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
587 MachineInstrBuilder MIBC0 =
588 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
589
590 BB->insert(CallResults.getIterator(), MIBC0);
591 MachineInstrBuilder(MF, CallParams).addReg(RegZero);
592 } else
593 CallParams.addOperand(FnPtr);
594 }
595
596 for (auto Def : CallResults.defs())
597 MIB.add(Def);
598
599 if (IsIndirect) {
600 // Placeholder for the type index.
601 MIB.addImm(0);
602 // The table into which this call_indirect indexes.
603 MCSymbolWasm *Table = IsFuncrefCall
604 ? WebAssembly::getOrCreateFuncrefCallTableSymbol(
605 MF.getContext(), Subtarget)
606 : WebAssembly::getOrCreateFunctionTableSymbol(
607 MF.getContext(), Subtarget);
608 if (Subtarget->hasReferenceTypes()) {
609 MIB.addSym(Table);
610 } else {
611 // For the MVP there is at most one table whose number is 0, but we can't
612 // write a table symbol or issue relocations. Instead we just ensure the
613 // table is live and write a zero.
614 Table->setNoStrip();
615 MIB.addImm(0);
616 }
617 }
618
619 for (auto Use : CallParams.uses())
620 MIB.add(Use);
621
622 BB->insert(CallResults.getIterator(), MIB);
623 CallParams.eraseFromParent();
624 CallResults.eraseFromParent();
625
626 // If this is a funcref call, to avoid hidden GC roots, we need to clear the
627 // table slot with ref.null upon call_indirect return.
628 //
629 // This generates the following code, which comes right after a call_indirect
630 // of a funcref:
631 //
632 // i32.const 0
633 // ref.null func
634 // table.set __funcref_call_table
635 if (IsIndirect && IsFuncrefCall) {
636 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
637 MF.getContext(), Subtarget);
638 Register RegZero =
639 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
640 MachineInstr *Const0 =
641 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
642 BB->insertAfter(MIB.getInstr()->getIterator(), Const0);
643
644 Register RegFuncref =
645 MF.getRegInfo().createVirtualRegister(&WebAssembly::FUNCREFRegClass);
646 MachineInstr *RefNull =
647 BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref)
648 .addImm(static_cast<int32_t>(WebAssembly::HeapType::Funcref));
649 BB->insertAfter(Const0->getIterator(), RefNull);
650
651 MachineInstr *TableSet =
652 BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF))
653 .addSym(Table)
654 .addReg(RegZero)
655 .addReg(RegFuncref);
656 BB->insertAfter(RefNull->getIterator(), TableSet);
657 }
658
659 return BB;
660}
661
662MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
663 MachineInstr &MI, MachineBasicBlock *BB) const {
664 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
665 DebugLoc DL = MI.getDebugLoc();
666
667 switch (MI.getOpcode()) {
668 default:
669 llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 669)
;
670 case WebAssembly::FP_TO_SINT_I32_F32:
671 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
672 WebAssembly::I32_TRUNC_S_F32);
673 case WebAssembly::FP_TO_UINT_I32_F32:
674 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
675 WebAssembly::I32_TRUNC_U_F32);
676 case WebAssembly::FP_TO_SINT_I64_F32:
677 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
678 WebAssembly::I64_TRUNC_S_F32);
679 case WebAssembly::FP_TO_UINT_I64_F32:
680 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
681 WebAssembly::I64_TRUNC_U_F32);
682 case WebAssembly::FP_TO_SINT_I32_F64:
683 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
684 WebAssembly::I32_TRUNC_S_F64);
685 case WebAssembly::FP_TO_UINT_I32_F64:
686 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
687 WebAssembly::I32_TRUNC_U_F64);
688 case WebAssembly::FP_TO_SINT_I64_F64:
689 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
690 WebAssembly::I64_TRUNC_S_F64);
691 case WebAssembly::FP_TO_UINT_I64_F64:
692 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
693 WebAssembly::I64_TRUNC_U_F64);
694 case WebAssembly::CALL_RESULTS:
695 case WebAssembly::RET_CALL_RESULTS:
696 return LowerCallResults(MI, DL, BB, Subtarget, TII);
697 }
698}
699
700const char *
701WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
702 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
703 case WebAssemblyISD::FIRST_NUMBER:
704 case WebAssemblyISD::FIRST_MEM_OPCODE:
705 break;
706#define HANDLE_NODETYPE(NODE) \
707 case WebAssemblyISD::NODE: \
708 return "WebAssemblyISD::" #NODE;
709#define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
710#include "WebAssemblyISD.def"
711#undef HANDLE_MEM_NODETYPE
712#undef HANDLE_NODETYPE
713 }
714 return nullptr;
715}
716
717std::pair<unsigned, const TargetRegisterClass *>
718WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
719 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
720 // First, see if this is a constraint that directly corresponds to a
721 // WebAssembly register class.
722 if (Constraint.size() == 1) {
723 switch (Constraint[0]) {
724 case 'r':
725 assert(VT != MVT::iPTR && "Pointer MVT not expected here")(static_cast <bool> (VT != MVT::iPTR && "Pointer MVT not expected here"
) ? void (0) : __assert_fail ("VT != MVT::iPTR && \"Pointer MVT not expected here\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 725, __extension__ __PRETTY_FUNCTION__))
;
726 if (Subtarget->hasSIMD128() && VT.isVector()) {
727 if (VT.getSizeInBits() == 128)
728 return std::make_pair(0U, &WebAssembly::V128RegClass);
729 }
730 if (VT.isInteger() && !VT.isVector()) {
731 if (VT.getSizeInBits() <= 32)
732 return std::make_pair(0U, &WebAssembly::I32RegClass);
733 if (VT.getSizeInBits() <= 64)
734 return std::make_pair(0U, &WebAssembly::I64RegClass);
735 }
736 if (VT.isFloatingPoint() && !VT.isVector()) {
737 switch (VT.getSizeInBits()) {
738 case 32:
739 return std::make_pair(0U, &WebAssembly::F32RegClass);
740 case 64:
741 return std::make_pair(0U, &WebAssembly::F64RegClass);
742 default:
743 break;
744 }
745 }
746 break;
747 default:
748 break;
749 }
750 }
751
752 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
753}
754
755bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
756 // Assume ctz is a relatively cheap operation.
757 return true;
758}
759
760bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
761 // Assume clz is a relatively cheap operation.
762 return true;
763}
764
765bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
766 const AddrMode &AM,
767 Type *Ty, unsigned AS,
768 Instruction *I) const {
769 // WebAssembly offsets are added as unsigned without wrapping. The
770 // isLegalAddressingMode gives us no way to determine if wrapping could be
771 // happening, so we approximate this by accepting only non-negative offsets.
772 if (AM.BaseOffs < 0)
773 return false;
774
775 // WebAssembly has no scale register operands.
776 if (AM.Scale != 0)
777 return false;
778
779 // Everything else is legal.
780 return true;
781}
782
783bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
784 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
785 MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
786 // WebAssembly supports unaligned accesses, though it should be declared
787 // with the p2align attribute on loads and stores which do so, and there
788 // may be a performance impact. We tell LLVM they're "fast" because
789 // for the kinds of things that LLVM uses this for (merging adjacent stores
790 // of constants, etc.), WebAssembly implementations will either want the
791 // unaligned access or they'll split anyway.
792 if (Fast)
793 *Fast = true;
794 return true;
795}
796
797bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
798 AttributeList Attr) const {
799 // The current thinking is that wasm engines will perform this optimization,
800 // so we can save on code size.
801 return true;
802}
803
804bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
805 EVT ExtT = ExtVal.getValueType();
806 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
807 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
808 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
809 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
810}
811
812bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
813 const GlobalAddressSDNode *GA) const {
814 // Wasm doesn't support function addresses with offsets
815 const GlobalValue *GV = GA->getGlobal();
816 return isa<Function>(GV) ? false : TargetLowering::isOffsetFoldingLegal(GA);
817}
818
819EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
820 LLVMContext &C,
821 EVT VT) const {
822 if (VT.isVector())
823 return VT.changeVectorElementTypeToInteger();
824
825 // So far, all branch instructions in Wasm take an I32 condition.
826 // The default TargetLowering::getSetCCResultType returns the pointer size,
827 // which would be useful to reduce instruction counts when testing
828 // against 64-bit pointers/values if at some point Wasm supports that.
829 return EVT::getIntegerVT(C, 32);
830}
831
832bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
833 const CallInst &I,
834 MachineFunction &MF,
835 unsigned Intrinsic) const {
836 switch (Intrinsic) {
837 case Intrinsic::wasm_memory_atomic_notify:
838 Info.opc = ISD::INTRINSIC_W_CHAIN;
839 Info.memVT = MVT::i32;
840 Info.ptrVal = I.getArgOperand(0);
841 Info.offset = 0;
842 Info.align = Align(4);
843 // atomic.notify instruction does not really load the memory specified with
844 // this argument, but MachineMemOperand should either be load or store, so
845 // we set this to a load.
846 // FIXME Volatile isn't really correct, but currently all LLVM atomic
847 // instructions are treated as volatiles in the backend, so we should be
848 // consistent. The same applies for wasm_atomic_wait intrinsics too.
849 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
850 return true;
851 case Intrinsic::wasm_memory_atomic_wait32:
852 Info.opc = ISD::INTRINSIC_W_CHAIN;
853 Info.memVT = MVT::i32;
854 Info.ptrVal = I.getArgOperand(0);
855 Info.offset = 0;
856 Info.align = Align(4);
857 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
858 return true;
859 case Intrinsic::wasm_memory_atomic_wait64:
860 Info.opc = ISD::INTRINSIC_W_CHAIN;
861 Info.memVT = MVT::i64;
862 Info.ptrVal = I.getArgOperand(0);
863 Info.offset = 0;
864 Info.align = Align(8);
865 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
866 return true;
867 default:
868 return false;
869 }
870}
871
872void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
873 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
874 const SelectionDAG &DAG, unsigned Depth) const {
875 switch (Op.getOpcode()) {
876 default:
877 break;
878 case ISD::INTRINSIC_WO_CHAIN: {
879 unsigned IntNo = Op.getConstantOperandVal(0);
880 switch (IntNo) {
881 default:
882 break;
883 case Intrinsic::wasm_bitmask: {
884 unsigned BitWidth = Known.getBitWidth();
885 EVT VT = Op.getOperand(1).getSimpleValueType();
886 unsigned PossibleBits = VT.getVectorNumElements();
887 APInt ZeroMask = APInt::getHighBitsSet(BitWidth, BitWidth - PossibleBits);
888 Known.Zero |= ZeroMask;
889 break;
890 }
891 }
892 }
893 }
894}
895
896TargetLoweringBase::LegalizeTypeAction
897WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const {
898 if (VT.isFixedLengthVector()) {
899 MVT EltVT = VT.getVectorElementType();
900 // We have legal vector types with these lane types, so widening the
901 // vector would let us use some of the lanes directly without having to
902 // extend or truncate values.
903 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
904 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
905 return TypeWidenVector;
906 }
907
908 return TargetLoweringBase::getPreferredVectorAction(VT);
909}
910
911//===----------------------------------------------------------------------===//
912// WebAssembly Lowering private implementation.
913//===----------------------------------------------------------------------===//
914
915//===----------------------------------------------------------------------===//
916// Lowering Code
917//===----------------------------------------------------------------------===//
918
919static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
920 MachineFunction &MF = DAG.getMachineFunction();
921 DAG.getContext()->diagnose(
922 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
923}
924
925// Test whether the given calling convention is supported.
926static bool callingConvSupported(CallingConv::ID CallConv) {
927 // We currently support the language-independent target-independent
928 // conventions. We don't yet have a way to annotate calls with properties like
929 // "cold", and we don't have any call-clobbered registers, so these are mostly
930 // all handled the same.
931 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
932 CallConv == CallingConv::Cold ||
933 CallConv == CallingConv::PreserveMost ||
934 CallConv == CallingConv::PreserveAll ||
935 CallConv == CallingConv::CXX_FAST_TLS ||
936 CallConv == CallingConv::WASM_EmscriptenInvoke ||
937 CallConv == CallingConv::Swift;
938}
939
940SDValue
941WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
942 SmallVectorImpl<SDValue> &InVals) const {
943 SelectionDAG &DAG = CLI.DAG;
944 SDLoc DL = CLI.DL;
945 SDValue Chain = CLI.Chain;
946 SDValue Callee = CLI.Callee;
947 MachineFunction &MF = DAG.getMachineFunction();
948 auto Layout = MF.getDataLayout();
949
950 CallingConv::ID CallConv = CLI.CallConv;
951 if (!callingConvSupported(CallConv))
952 fail(DL, DAG,
953 "WebAssembly doesn't support language-specific or target-specific "
954 "calling conventions yet");
955 if (CLI.IsPatchPoint)
956 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
957
958 if (CLI.IsTailCall) {
959 auto NoTail = [&](const char *Msg) {
960 if (CLI.CB && CLI.CB->isMustTailCall())
961 fail(DL, DAG, Msg);
962 CLI.IsTailCall = false;
963 };
964
965 if (!Subtarget->hasTailCall())
966 NoTail("WebAssembly 'tail-call' feature not enabled");
967
968 // Varargs calls cannot be tail calls because the buffer is on the stack
969 if (CLI.IsVarArg)
970 NoTail("WebAssembly does not support varargs tail calls");
971
972 // Do not tail call unless caller and callee return types match
973 const Function &F = MF.getFunction();
974 const TargetMachine &TM = getTargetMachine();
975 Type *RetTy = F.getReturnType();
976 SmallVector<MVT, 4> CallerRetTys;
977 SmallVector<MVT, 4> CalleeRetTys;
978 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
979 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
980 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
981 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
982 CalleeRetTys.begin());
983 if (!TypesMatch)
984 NoTail("WebAssembly tail call requires caller and callee return types to "
985 "match");
986
987 // If pointers to local stack values are passed, we cannot tail call
988 if (CLI.CB) {
989 for (auto &Arg : CLI.CB->args()) {
990 Value *Val = Arg.get();
991 // Trace the value back through pointer operations
992 while (true) {
993 Value *Src = Val->stripPointerCastsAndAliases();
994 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
995 Src = GEP->getPointerOperand();
996 if (Val == Src)
997 break;
998 Val = Src;
999 }
1000 if (isa<AllocaInst>(Val)) {
1001 NoTail(
1002 "WebAssembly does not support tail calling with stack arguments");
1003 break;
1004 }
1005 }
1006 }
1007 }
1008
1009 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1010 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1011 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1012
1013 // The generic code may have added an sret argument. If we're lowering an
1014 // invoke function, the ABI requires that the function pointer be the first
1015 // argument, so we may have to swap the arguments.
1016 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
1017 Outs[0].Flags.isSRet()) {
1018 std::swap(Outs[0], Outs[1]);
1019 std::swap(OutVals[0], OutVals[1]);
1020 }
1021
1022 bool HasSwiftSelfArg = false;
1023 bool HasSwiftErrorArg = false;
1024 unsigned NumFixedArgs = 0;
1025 for (unsigned I = 0; I < Outs.size(); ++I) {
1026 const ISD::OutputArg &Out = Outs[I];
1027 SDValue &OutVal = OutVals[I];
1028 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
1029 HasSwiftErrorArg |= Out.Flags.isSwiftError();
1030 if (Out.Flags.isNest())
1031 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1032 if (Out.Flags.isInAlloca())
1033 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1034 if (Out.Flags.isInConsecutiveRegs())
1035 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1036 if (Out.Flags.isInConsecutiveRegsLast())
1037 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1038 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
1039 auto &MFI = MF.getFrameInfo();
1040 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
1041 Out.Flags.getNonZeroByValAlign(),
1042 /*isSS=*/false);
1043 SDValue SizeNode =
1044 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
1045 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1046 Chain = DAG.getMemcpy(
1047 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
1048 /*isVolatile*/ false, /*AlwaysInline=*/false,
1049 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
1050 OutVal = FINode;
1051 }
1052 // Count the number of fixed args *after* legalization.
1053 NumFixedArgs += Out.IsFixed;
1054 }
1055
1056 bool IsVarArg = CLI.IsVarArg;
1057 auto PtrVT = getPointerTy(Layout);
1058
1059 // For swiftcc, emit additional swiftself and swifterror arguments
1060 // if there aren't. These additional arguments are also added for callee
1061 // signature They are necessary to match callee and caller signature for
1062 // indirect call.
1063 if (CallConv == CallingConv::Swift) {
1064 if (!HasSwiftSelfArg) {
1065 NumFixedArgs++;
1066 ISD::OutputArg Arg;
1067 Arg.Flags.setSwiftSelf();
1068 CLI.Outs.push_back(Arg);
1069 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1070 CLI.OutVals.push_back(ArgVal);
1071 }
1072 if (!HasSwiftErrorArg) {
1073 NumFixedArgs++;
1074 ISD::OutputArg Arg;
1075 Arg.Flags.setSwiftError();
1076 CLI.Outs.push_back(Arg);
1077 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1078 CLI.OutVals.push_back(ArgVal);
1079 }
1080 }
1081
1082 // Analyze operands of the call, assigning locations to each operand.
1083 SmallVector<CCValAssign, 16> ArgLocs;
1084 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1085
1086 if (IsVarArg) {
1087 // Outgoing non-fixed arguments are placed in a buffer. First
1088 // compute their offsets and the total amount of buffer space needed.
1089 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
1090 const ISD::OutputArg &Out = Outs[I];
1091 SDValue &Arg = OutVals[I];
1092 EVT VT = Arg.getValueType();
1093 assert(VT != MVT::iPTR && "Legalized args should be concrete")(static_cast <bool> (VT != MVT::iPTR && "Legalized args should be concrete"
) ? void (0) : __assert_fail ("VT != MVT::iPTR && \"Legalized args should be concrete\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1093, __extension__ __PRETTY_FUNCTION__))
;
1094 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
1095 Align Alignment =
1096 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
1097 unsigned Offset =
1098 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
1099 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
1100 Offset, VT.getSimpleVT(),
1101 CCValAssign::Full));
1102 }
1103 }
1104
1105 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1106
1107 SDValue FINode;
1108 if (IsVarArg && NumBytes) {
1109 // For non-fixed arguments, next emit stores to store the argument values
1110 // to the stack buffer at the offsets computed above.
1111 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
1112 Layout.getStackAlignment(),
1113 /*isSS=*/false);
1114 unsigned ValNo = 0;
1115 SmallVector<SDValue, 8> Chains;
1116 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
1117 assert(ArgLocs[ValNo].getValNo() == ValNo &&(static_cast <bool> (ArgLocs[ValNo].getValNo() == ValNo
&& "ArgLocs should remain in order and only hold varargs args"
) ? void (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1118, __extension__ __PRETTY_FUNCTION__))
1118 "ArgLocs should remain in order and only hold varargs args")(static_cast <bool> (ArgLocs[ValNo].getValNo() == ValNo
&& "ArgLocs should remain in order and only hold varargs args"
) ? void (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1118, __extension__ __PRETTY_FUNCTION__))
;
1119 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1120 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1121 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
1122 DAG.getConstant(Offset, DL, PtrVT));
1123 Chains.push_back(
1124 DAG.getStore(Chain, DL, Arg, Add,
1125 MachinePointerInfo::getFixedStack(MF, FI, Offset)));
1126 }
1127 if (!Chains.empty())
1128 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1129 } else if (IsVarArg) {
1130 FINode = DAG.getIntPtrConstant(0, DL);
1131 }
1132
1133 if (Callee->getOpcode() == ISD::GlobalAddress) {
1134 // If the callee is a GlobalAddress node (quite common, every direct call
1135 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
1136 // doesn't at MO_GOT which is not needed for direct calls.
1137 GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
1138 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
1139 getPointerTy(DAG.getDataLayout()),
1140 GA->getOffset());
1141 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1142 getPointerTy(DAG.getDataLayout()), Callee);
1143 }
1144
1145 // Compute the operands for the CALLn node.
1146 SmallVector<SDValue, 16> Ops;
1147 Ops.push_back(Chain);
1148 Ops.push_back(Callee);
1149
1150 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1151 // isn't reliable.
1152 Ops.append(OutVals.begin(),
1153 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1154 // Add a pointer to the vararg buffer.
1155 if (IsVarArg)
1156 Ops.push_back(FINode);
1157
1158 SmallVector<EVT, 8> InTys;
1159 for (const auto &In : Ins) {
1160 assert(!In.Flags.isByVal() && "byval is not valid for return values")(static_cast <bool> (!In.Flags.isByVal() && "byval is not valid for return values"
) ? void (0) : __assert_fail ("!In.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1160, __extension__ __PRETTY_FUNCTION__))
;
1161 assert(!In.Flags.isNest() && "nest is not valid for return values")(static_cast <bool> (!In.Flags.isNest() && "nest is not valid for return values"
) ? void (0) : __assert_fail ("!In.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1161, __extension__ __PRETTY_FUNCTION__))
;
1162 if (In.Flags.isInAlloca())
1163 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1164 if (In.Flags.isInConsecutiveRegs())
1165 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1166 if (In.Flags.isInConsecutiveRegsLast())
1167 fail(DL, DAG,
1168 "WebAssembly hasn't implemented cons regs last return values");
1169 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1170 // registers.
1171 InTys.push_back(In.VT);
1172 }
1173
1174 // Lastly, if this is a call to a funcref we need to add an instruction
1175 // table.set to the chain and transform the call.
1176 if (CLI.CB &&
1177 WebAssembly::isFuncrefType(CLI.CB->getCalledOperand()->getType())) {
1178 // In the absence of function references proposal where a funcref call is
1179 // lowered to call_ref, using reference types we generate a table.set to set
1180 // the funcref to a special table used solely for this purpose, followed by
1181 // a call_indirect. Here we just generate the table set, and return the
1182 // SDValue of the table.set so that LowerCall can finalize the lowering by
1183 // generating the call_indirect.
1184 SDValue Chain = Ops[0];
1185
1186 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
1187 MF.getContext(), Subtarget);
1188 SDValue Sym = DAG.getMCSymbol(Table, PtrVT);
1189 SDValue TableSlot = DAG.getConstant(0, DL, MVT::i32);
1190 SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee};
1191 SDValue TableSet = DAG.getMemIntrinsicNode(
1192 WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps,
1193 MVT::funcref,
1194 // Machine Mem Operand args
1195 MachinePointerInfo(
1196 WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF),
1197 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()),
1198 MachineMemOperand::MOStore);
1199
1200 Ops[0] = TableSet; // The new chain is the TableSet itself
1201 }
1202
1203 if (CLI.IsTailCall) {
1204 // ret_calls do not return values to the current frame
1205 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1206 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1207 }
1208
1209 InTys.push_back(MVT::Other);
1210 SDVTList InTyList = DAG.getVTList(InTys);
1211 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1212
1213 for (size_t I = 0; I < Ins.size(); ++I)
1214 InVals.push_back(Res.getValue(I));
1215
1216 // Return the chain
1217 return Res.getValue(Ins.size());
1218}
1219
1220bool WebAssemblyTargetLowering::CanLowerReturn(
1221 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1222 const SmallVectorImpl<ISD::OutputArg> &Outs,
1223 LLVMContext & /*Context*/) const {
1224 // WebAssembly can only handle returning tuples with multivalue enabled
1225 return Subtarget->hasMultivalue() || Outs.size() <= 1;
1226}
1227
1228SDValue WebAssemblyTargetLowering::LowerReturn(
1229 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1230 const SmallVectorImpl<ISD::OutputArg> &Outs,
1231 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1232 SelectionDAG &DAG) const {
1233 assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&(static_cast <bool> ((Subtarget->hasMultivalue() || Outs
.size() <= 1) && "MVP WebAssembly can only return up to one value"
) ? void (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1234, __extension__ __PRETTY_FUNCTION__))
1234 "MVP WebAssembly can only return up to one value")(static_cast <bool> ((Subtarget->hasMultivalue() || Outs
.size() <= 1) && "MVP WebAssembly can only return up to one value"
) ? void (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1234, __extension__ __PRETTY_FUNCTION__))
;
1235 if (!callingConvSupported(CallConv))
1236 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1237
1238 SmallVector<SDValue, 4> RetOps(1, Chain);
1239 RetOps.append(OutVals.begin(), OutVals.end());
1240 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1241
1242 // Record the number and types of the return values.
1243 for (const ISD::OutputArg &Out : Outs) {
1244 assert(!Out.Flags.isByVal() && "byval is not valid for return values")(static_cast <bool> (!Out.Flags.isByVal() && "byval is not valid for return values"
) ? void (0) : __assert_fail ("!Out.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1244, __extension__ __PRETTY_FUNCTION__))
;
1245 assert(!Out.Flags.isNest() && "nest is not valid for return values")(static_cast <bool> (!Out.Flags.isNest() && "nest is not valid for return values"
) ? void (0) : __assert_fail ("!Out.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1245, __extension__ __PRETTY_FUNCTION__))
;
1246 assert(Out.IsFixed && "non-fixed return value is not valid")(static_cast <bool> (Out.IsFixed && "non-fixed return value is not valid"
) ? void (0) : __assert_fail ("Out.IsFixed && \"non-fixed return value is not valid\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1246, __extension__ __PRETTY_FUNCTION__))
;
1247 if (Out.Flags.isInAlloca())
1248 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1249 if (Out.Flags.isInConsecutiveRegs())
1250 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1251 if (Out.Flags.isInConsecutiveRegsLast())
1252 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1253 }
1254
1255 return Chain;
1256}
1257
1258SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1259 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1260 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1261 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1262 if (!callingConvSupported(CallConv))
1263 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1264
1265 MachineFunction &MF = DAG.getMachineFunction();
1266 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1267
1268 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1269 // of the incoming values before they're represented by virtual registers.
1270 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1271
1272 bool HasSwiftErrorArg = false;
1273 bool HasSwiftSelfArg = false;
1274 for (const ISD::InputArg &In : Ins) {
1275 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1276 HasSwiftErrorArg |= In.Flags.isSwiftError();
1277 if (In.Flags.isInAlloca())
1278 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1279 if (In.Flags.isNest())
1280 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1281 if (In.Flags.isInConsecutiveRegs())
1282 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1283 if (In.Flags.isInConsecutiveRegsLast())
1284 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1285 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1286 // registers.
1287 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1288 DAG.getTargetConstant(InVals.size(),
1289 DL, MVT::i32))
1290 : DAG.getUNDEF(In.VT));
1291
1292 // Record the number and types of arguments.
1293 MFI->addParam(In.VT);
1294 }
1295
1296 // For swiftcc, emit additional swiftself and swifterror arguments
1297 // if there aren't. These additional arguments are also added for callee
1298 // signature They are necessary to match callee and caller signature for
1299 // indirect call.
1300 auto PtrVT = getPointerTy(MF.getDataLayout());
1301 if (CallConv == CallingConv::Swift) {
1302 if (!HasSwiftSelfArg) {
1303 MFI->addParam(PtrVT);
1304 }
1305 if (!HasSwiftErrorArg) {
1306 MFI->addParam(PtrVT);
1307 }
1308 }
1309 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1310 // the buffer is passed as an argument.
1311 if (IsVarArg) {
1312 MVT PtrVT = getPointerTy(MF.getDataLayout());
1313 Register VarargVreg =
1314 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
1315 MFI->setVarargBufferVreg(VarargVreg);
1316 Chain = DAG.getCopyToReg(
1317 Chain, DL, VarargVreg,
1318 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1319 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1320 MFI->addParam(PtrVT);
1321 }
1322
1323 // Record the number and types of arguments and results.
1324 SmallVector<MVT, 4> Params;
1325 SmallVector<MVT, 4> Results;
1326 computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
1327 MF.getFunction(), DAG.getTarget(), Params, Results);
1328 for (MVT VT : Results)
1329 MFI->addResult(VT);
1330 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1331 // the param logic here with ComputeSignatureVTs
1332 assert(MFI->getParams().size() == Params.size() &&(static_cast <bool> (MFI->getParams().size() == Params
.size() && std::equal(MFI->getParams().begin(), MFI
->getParams().end(), Params.begin())) ? void (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1334, __extension__ __PRETTY_FUNCTION__))
1333 std::equal(MFI->getParams().begin(), MFI->getParams().end(),(static_cast <bool> (MFI->getParams().size() == Params
.size() && std::equal(MFI->getParams().begin(), MFI
->getParams().end(), Params.begin())) ? void (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1334, __extension__ __PRETTY_FUNCTION__))
1334 Params.begin()))(static_cast <bool> (MFI->getParams().size() == Params
.size() && std::equal(MFI->getParams().begin(), MFI
->getParams().end(), Params.begin())) ? void (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1334, __extension__ __PRETTY_FUNCTION__))
;
1335
1336 return Chain;
1337}
1338
1339void WebAssemblyTargetLowering::ReplaceNodeResults(
1340 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1341 switch (N->getOpcode()) {
1342 case ISD::SIGN_EXTEND_INREG:
1343 // Do not add any results, signifying that N should not be custom lowered
1344 // after all. This happens because simd128 turns on custom lowering for
1345 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1346 // illegal type.
1347 break;
1348 default:
1349 llvm_unreachable(::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1350)
1350 "ReplaceNodeResults not implemented for this op for WebAssembly!")::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1350)
;
1351 }
1352}
1353
1354//===----------------------------------------------------------------------===//
1355// Custom lowering hooks.
1356//===----------------------------------------------------------------------===//
1357
1358SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1359 SelectionDAG &DAG) const {
1360 SDLoc DL(Op);
1361 switch (Op.getOpcode()) {
1
Control jumps to 'case BUILD_VECTOR:' at line 1398
1362 default:
1363 llvm_unreachable("unimplemented operation lowering")::llvm::llvm_unreachable_internal("unimplemented operation lowering"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1363)
;
1364 return SDValue();
1365 case ISD::FrameIndex:
1366 return LowerFrameIndex(Op, DAG);
1367 case ISD::GlobalAddress:
1368 return LowerGlobalAddress(Op, DAG);
1369 case ISD::GlobalTLSAddress:
1370 return LowerGlobalTLSAddress(Op, DAG);
1371 case ISD::ExternalSymbol:
1372 return LowerExternalSymbol(Op, DAG);
1373 case ISD::JumpTable:
1374 return LowerJumpTable(Op, DAG);
1375 case ISD::BR_JT:
1376 return LowerBR_JT(Op, DAG);
1377 case ISD::VASTART:
1378 return LowerVASTART(Op, DAG);
1379 case ISD::BlockAddress:
1380 case ISD::BRIND:
1381 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1382 return SDValue();
1383 case ISD::RETURNADDR:
1384 return LowerRETURNADDR(Op, DAG);
1385 case ISD::FRAMEADDR:
1386 return LowerFRAMEADDR(Op, DAG);
1387 case ISD::CopyToReg:
1388 return LowerCopyToReg(Op, DAG);
1389 case ISD::EXTRACT_VECTOR_ELT:
1390 case ISD::INSERT_VECTOR_ELT:
1391 return LowerAccessVectorElement(Op, DAG);
1392 case ISD::INTRINSIC_VOID:
1393 case ISD::INTRINSIC_WO_CHAIN:
1394 case ISD::INTRINSIC_W_CHAIN:
1395 return LowerIntrinsic(Op, DAG);
1396 case ISD::SIGN_EXTEND_INREG:
1397 return LowerSIGN_EXTEND_INREG(Op, DAG);
1398 case ISD::BUILD_VECTOR:
1399 return LowerBUILD_VECTOR(Op, DAG);
2
Calling 'WebAssemblyTargetLowering::LowerBUILD_VECTOR'
1400 case ISD::VECTOR_SHUFFLE:
1401 return LowerVECTOR_SHUFFLE(Op, DAG);
1402 case ISD::SETCC:
1403 return LowerSETCC(Op, DAG);
1404 case ISD::SHL:
1405 case ISD::SRA:
1406 case ISD::SRL:
1407 return LowerShift(Op, DAG);
1408 case ISD::FP_TO_SINT_SAT:
1409 case ISD::FP_TO_UINT_SAT:
1410 return LowerFP_TO_INT_SAT(Op, DAG);
1411 case ISD::LOAD:
1412 return LowerLoad(Op, DAG);
1413 case ISD::STORE:
1414 return LowerStore(Op, DAG);
1415 case ISD::CTPOP:
1416 case ISD::CTLZ:
1417 case ISD::CTTZ:
1418 return DAG.UnrollVectorOp(Op.getNode());
1419 }
1420}
1421
1422static bool IsWebAssemblyGlobal(SDValue Op) {
1423 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1424 return WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace());
1425
1426 return false;
1427}
1428
1429static Optional<unsigned> IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG) {
1430 const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
1431 if (!FI)
1432 return None;
1433
1434 auto &MF = DAG.getMachineFunction();
1435 return WebAssemblyFrameLowering::getLocalForStackObject(MF, FI->getIndex());
1436}
1437
1438static bool IsWebAssemblyTable(SDValue Op) {
1439 const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
1440 if (GA && WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace())) {
1441 const GlobalValue *Value = GA->getGlobal();
1442 const Type *Ty = Value->getValueType();
1443
1444 if (Ty->isArrayTy() && WebAssembly::isRefType(Ty->getArrayElementType()))
1445 return true;
1446 }
1447 return false;
1448}
1449
1450// This function will accept as Op any access to a table, so Op can
1451// be the actual table or an offset into the table.
1452static bool IsWebAssemblyTableWithOffset(SDValue Op) {
1453 if (Op->getOpcode() == ISD::ADD && Op->getNumOperands() == 2)
1454 return (Op->getOperand(1).getSimpleValueType() == MVT::i32 &&
1455 IsWebAssemblyTableWithOffset(Op->getOperand(0))) ||
1456 (Op->getOperand(0).getSimpleValueType() == MVT::i32 &&
1457 IsWebAssemblyTableWithOffset(Op->getOperand(1)));
1458
1459 return IsWebAssemblyTable(Op);
1460}
1461
1462// Helper for table pattern matching used in LowerStore and LowerLoad
1463bool WebAssemblyTargetLowering::MatchTableForLowering(SelectionDAG &DAG,
1464 const SDLoc &DL,
1465 const SDValue &Base,
1466 GlobalAddressSDNode *&GA,
1467 SDValue &Idx) const {
1468 // We expect the following graph for a load of the form:
1469 // table[<var> + <constant offset>]
1470 //
1471 // Case 1:
1472 // externref = load t1
1473 // t1: i32 = add t2, i32:<constant offset>
1474 // t2: i32 = add tX, table
1475 //
1476 // This is in some cases simplified to just:
1477 // Case 2:
1478 // externref = load t1
1479 // t1: i32 = add t2, i32:tX
1480 //
1481 // So, unfortunately we need to check for both cases and if we are in the
1482 // first case extract the table GlobalAddressNode and build a new node tY
1483 // that's tY: i32 = add i32:<constant offset>, i32:tX
1484 //
1485 if (IsWebAssemblyTable(Base)) {
1486 GA = cast<GlobalAddressSDNode>(Base);
1487 Idx = DAG.getConstant(0, DL, MVT::i32);
1488 } else {
1489 GA = dyn_cast<GlobalAddressSDNode>(Base->getOperand(0));
1490 if (GA) {
1491 // We are in Case 2 above.
1492 Idx = Base->getOperand(1);
1493 if (!Idx || GA->getNumValues() != 1 || Idx->getNumValues() != 1)
1494 return false;
1495 } else {
1496 // This might be Case 1 above (or an error)
1497 SDValue V = Base->getOperand(0);
1498 GA = dyn_cast<GlobalAddressSDNode>(V->getOperand(1));
1499
1500 if (V->getOpcode() != ISD::ADD || V->getNumOperands() != 2 || !GA)
1501 return false;
1502
1503 SDValue IdxV = DAG.getNode(ISD::ADD, DL, MVT::i32, Base->getOperand(1),
1504 V->getOperand(0));
1505 Idx = IdxV;
1506 }
1507 }
1508
1509 return true;
1510}
1511
1512SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
1513 SelectionDAG &DAG) const {
1514 SDLoc DL(Op);
1515 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
1516 const SDValue &Value = SN->getValue();
1517 const SDValue &Base = SN->getBasePtr();
1518 const SDValue &Offset = SN->getOffset();
1519
1520 if (IsWebAssemblyTableWithOffset(Base)) {
1521 if (!Offset->isUndef())
1522 report_fatal_error(
1523 "unexpected offset when loading from webassembly table", false);
1524
1525 SDValue Idx;
1526 GlobalAddressSDNode *GA;
1527
1528 if (!MatchTableForLowering(DAG, DL, Base, GA, Idx))
1529 report_fatal_error("failed pattern matching for lowering table store",
1530 false);
1531
1532 SDVTList Tys = DAG.getVTList(MVT::Other);
1533 SDValue TableSetOps[] = {SN->getChain(), SDValue(GA, 0), Idx, Value};
1534 SDValue TableSet =
1535 DAG.getMemIntrinsicNode(WebAssemblyISD::TABLE_SET, DL, Tys, TableSetOps,
1536 SN->getMemoryVT(), SN->getMemOperand());
1537 return TableSet;
1538 }
1539
1540 if (IsWebAssemblyGlobal(Base)) {
1541 if (!Offset->isUndef())
1542 report_fatal_error("unexpected offset when storing to webassembly global",
1543 false);
1544
1545 SDVTList Tys = DAG.getVTList(MVT::Other);
1546 SDValue Ops[] = {SN->getChain(), Value, Base};
1547 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops,
1548 SN->getMemoryVT(), SN->getMemOperand());
1549 }
1550
1551 if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1552 if (!Offset->isUndef())
1553 report_fatal_error("unexpected offset when storing to webassembly local",
1554 false);
1555
1556 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1557 SDVTList Tys = DAG.getVTList(MVT::Other); // The chain.
1558 SDValue Ops[] = {SN->getChain(), Idx, Value};
1559 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
1560 }
1561
1562 return Op;
1563}
1564
1565SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
1566 SelectionDAG &DAG) const {
1567 SDLoc DL(Op);
1568 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
1569 const SDValue &Base = LN->getBasePtr();
1570 const SDValue &Offset = LN->getOffset();
1571
1572 if (IsWebAssemblyTableWithOffset(Base)) {
1573 if (!Offset->isUndef())
1574 report_fatal_error(
1575 "unexpected offset when loading from webassembly table", false);
1576
1577 GlobalAddressSDNode *GA;
1578 SDValue Idx;
1579
1580 if (!MatchTableForLowering(DAG, DL, Base, GA, Idx))
1581 report_fatal_error("failed pattern matching for lowering table load",
1582 false);
1583
1584 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
1585 SDValue TableGetOps[] = {LN->getChain(), SDValue(GA, 0), Idx};
1586 SDValue TableGet =
1587 DAG.getMemIntrinsicNode(WebAssemblyISD::TABLE_GET, DL, Tys, TableGetOps,
1588 LN->getMemoryVT(), LN->getMemOperand());
1589 return TableGet;
1590 }
1591
1592 if (IsWebAssemblyGlobal(Base)) {
1593 if (!Offset->isUndef())
1594 report_fatal_error(
1595 "unexpected offset when loading from webassembly global", false);
1596
1597 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
1598 SDValue Ops[] = {LN->getChain(), Base};
1599 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
1600 LN->getMemoryVT(), LN->getMemOperand());
1601 }
1602
1603 if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1604 if (!Offset->isUndef())
1605 report_fatal_error(
1606 "unexpected offset when loading from webassembly local", false);
1607
1608 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1609 EVT LocalVT = LN->getValueType(0);
1610 SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT,
1611 {LN->getChain(), Idx});
1612 SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL);
1613 assert(Result->getNumValues() == 2 && "Loads must carry a chain!")(static_cast <bool> (Result->getNumValues() == 2 &&
"Loads must carry a chain!") ? void (0) : __assert_fail ("Result->getNumValues() == 2 && \"Loads must carry a chain!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1613, __extension__ __PRETTY_FUNCTION__))
;
1614 return Result;
1615 }
1616
1617 return Op;
1618}
1619
1620SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1621 SelectionDAG &DAG) const {
1622 SDValue Src = Op.getOperand(2);
1623 if (isa<FrameIndexSDNode>(Src.getNode())) {
1624 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1625 // the FI to some LEA-like instruction, but since we don't have that, we
1626 // need to insert some kind of instruction that can take an FI operand and
1627 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1628 // local.copy between Op and its FI operand.
1629 SDValue Chain = Op.getOperand(0);
1630 SDLoc DL(Op);
1631 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1632 EVT VT = Src.getValueType();
1633 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1634 : WebAssembly::COPY_I64,
1635 DL, VT, Src),
1636 0);
1637 return Op.getNode()->getNumValues() == 1
1638 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1639 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1640 Op.getNumOperands() == 4 ? Op.getOperand(3)
1641 : SDValue());
1642 }
1643 return SDValue();
1644}
1645
1646SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1647 SelectionDAG &DAG) const {
1648 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1649 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1650}
1651
1652SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1653 SelectionDAG &DAG) const {
1654 SDLoc DL(Op);
1655
1656 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1657 fail(DL, DAG,
1658 "Non-Emscripten WebAssembly hasn't implemented "
1659 "__builtin_return_address");
1660 return SDValue();
1661 }
1662
1663 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1664 return SDValue();
1665
1666 unsigned Depth = Op.getConstantOperandVal(0);
1667 MakeLibCallOptions CallOptions;
1668 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1669 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1670 .first;
1671}
1672
1673SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1674 SelectionDAG &DAG) const {
1675 // Non-zero depths are not supported by WebAssembly currently. Use the
1676 // legalizer's default expansion, which is to return 0 (what this function is
1677 // documented to do).
1678 if (Op.getConstantOperandVal(0) > 0)
1679 return SDValue();
1680
1681 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1682 EVT VT = Op.getValueType();
1683 Register FP =
1684 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1685 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1686}
1687
1688SDValue
1689WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1690 SelectionDAG &DAG) const {
1691 SDLoc DL(Op);
1692 const auto *GA = cast<GlobalAddressSDNode>(Op);
1693
1694 MachineFunction &MF = DAG.getMachineFunction();
1695 if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
1696 report_fatal_error("cannot use thread-local storage without bulk memory",
1697 false);
1698
1699 const GlobalValue *GV = GA->getGlobal();
1700
1701 // Currently Emscripten does not support dynamic linking with threads.
1702 // Therefore, if we have thread-local storage, only the local-exec model
1703 // is possible.
1704 // TODO: remove this and implement proper TLS models once Emscripten
1705 // supports dynamic linking with threads.
1706 if (GV->getThreadLocalMode() != GlobalValue::LocalExecTLSModel &&
1707 !Subtarget->getTargetTriple().isOSEmscripten()) {
1708 report_fatal_error("only -ftls-model=local-exec is supported for now on "
1709 "non-Emscripten OSes: variable " +
1710 GV->getName(),
1711 false);
1712 }
1713
1714 auto model = GV->getThreadLocalMode();
1715
1716 // Unsupported TLS modes
1717 assert(model != GlobalValue::NotThreadLocal)(static_cast <bool> (model != GlobalValue::NotThreadLocal
) ? void (0) : __assert_fail ("model != GlobalValue::NotThreadLocal"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1717, __extension__ __PRETTY_FUNCTION__))
;
1718 assert(model != GlobalValue::InitialExecTLSModel)(static_cast <bool> (model != GlobalValue::InitialExecTLSModel
) ? void (0) : __assert_fail ("model != GlobalValue::InitialExecTLSModel"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1718, __extension__ __PRETTY_FUNCTION__))
;
1719
1720 if (model == GlobalValue::LocalExecTLSModel ||
1721 model == GlobalValue::LocalDynamicTLSModel ||
1722 (model == GlobalValue::GeneralDynamicTLSModel &&
1723 getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))) {
1724 // For DSO-local TLS variables we use offset from __tls_base
1725
1726 MVT PtrVT = getPointerTy(DAG.getDataLayout());
1727 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1728 : WebAssembly::GLOBAL_GET_I32;
1729 const char *BaseName = MF.createExternalSymbolName("__tls_base");
1730
1731 SDValue BaseAddr(
1732 DAG.getMachineNode(GlobalGet, DL, PtrVT,
1733 DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1734 0);
1735
1736 SDValue TLSOffset = DAG.getTargetGlobalAddress(
1737 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1738 SDValue SymOffset =
1739 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, TLSOffset);
1740
1741 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymOffset);
1742 }
1743
1744 assert(model == GlobalValue::GeneralDynamicTLSModel)(static_cast <bool> (model == GlobalValue::GeneralDynamicTLSModel
) ? void (0) : __assert_fail ("model == GlobalValue::GeneralDynamicTLSModel"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1744, __extension__ __PRETTY_FUNCTION__))
;
1745
1746 EVT VT = Op.getValueType();
1747 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1748 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1749 GA->getOffset(),
1750 WebAssemblyII::MO_GOT_TLS));
1751}
1752
1753SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1754 SelectionDAG &DAG) const {
1755 SDLoc DL(Op);
1756 const auto *GA = cast<GlobalAddressSDNode>(Op);
1757 EVT VT = Op.getValueType();
1758 assert(GA->getTargetFlags() == 0 &&(static_cast <bool> (GA->getTargetFlags() == 0 &&
"Unexpected target flags on generic GlobalAddressSDNode") ? void
(0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1759, __extension__ __PRETTY_FUNCTION__))
1759 "Unexpected target flags on generic GlobalAddressSDNode")(static_cast <bool> (GA->getTargetFlags() == 0 &&
"Unexpected target flags on generic GlobalAddressSDNode") ? void
(0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1759, __extension__ __PRETTY_FUNCTION__))
;
1760 if (!WebAssembly::isValidAddressSpace(GA->getAddressSpace()))
1761 fail(DL, DAG, "Invalid address space for WebAssembly target");
1762
1763 unsigned OperandFlags = 0;
1764 if (isPositionIndependent()) {
1765 const GlobalValue *GV = GA->getGlobal();
1766 if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1767 MachineFunction &MF = DAG.getMachineFunction();
1768 MVT PtrVT = getPointerTy(MF.getDataLayout());
1769 const char *BaseName;
1770 if (GV->getValueType()->isFunctionTy()) {
1771 BaseName = MF.createExternalSymbolName("__table_base");
1772 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1773 }
1774 else {
1775 BaseName = MF.createExternalSymbolName("__memory_base");
1776 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1777 }
1778 SDValue BaseAddr =
1779 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1780 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1781
1782 SDValue SymAddr = DAG.getNode(
1783 WebAssemblyISD::WrapperREL, DL, VT,
1784 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1785 OperandFlags));
1786
1787 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1788 }
1789 OperandFlags = WebAssemblyII::MO_GOT;
1790 }
1791
1792 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1793 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1794 GA->getOffset(), OperandFlags));
1795}
1796
1797SDValue
1798WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1799 SelectionDAG &DAG) const {
1800 SDLoc DL(Op);
1801 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1802 EVT VT = Op.getValueType();
1803 assert(ES->getTargetFlags() == 0 &&(static_cast <bool> (ES->getTargetFlags() == 0 &&
"Unexpected target flags on generic ExternalSymbolSDNode") ?
void (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1804, __extension__ __PRETTY_FUNCTION__))
1804 "Unexpected target flags on generic ExternalSymbolSDNode")(static_cast <bool> (ES->getTargetFlags() == 0 &&
"Unexpected target flags on generic ExternalSymbolSDNode") ?
void (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1804, __extension__ __PRETTY_FUNCTION__))
;
1805 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1806 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1807}
1808
1809SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1810 SelectionDAG &DAG) const {
1811 // There's no need for a Wrapper node because we always incorporate a jump
1812 // table operand into a BR_TABLE instruction, rather than ever
1813 // materializing it in a register.
1814 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1815 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1816 JT->getTargetFlags());
1817}
1818
1819SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1820 SelectionDAG &DAG) const {
1821 SDLoc DL(Op);
1822 SDValue Chain = Op.getOperand(0);
1823 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1824 SDValue Index = Op.getOperand(2);
1825 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags")(static_cast <bool> (JT->getTargetFlags() == 0 &&
"WebAssembly doesn't set target flags") ? void (0) : __assert_fail
("JT->getTargetFlags() == 0 && \"WebAssembly doesn't set target flags\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1825, __extension__ __PRETTY_FUNCTION__))
;
1826
1827 SmallVector<SDValue, 8> Ops;
1828 Ops.push_back(Chain);
1829 Ops.push_back(Index);
1830
1831 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1832 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1833
1834 // Add an operand for each case.
1835 for (auto MBB : MBBs)
1836 Ops.push_back(DAG.getBasicBlock(MBB));
1837
1838 // Add the first MBB as a dummy default target for now. This will be replaced
1839 // with the proper default target (and the preceding range check eliminated)
1840 // if possible by WebAssemblyFixBrTableDefaults.
1841 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1842 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1843}
1844
1845SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1846 SelectionDAG &DAG) const {
1847 SDLoc DL(Op);
1848 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1849
1850 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1851 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1852
1853 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1854 MFI->getVarargBufferVreg(), PtrVT);
1855 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1856 MachinePointerInfo(SV));
1857}
1858
1859SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1860 SelectionDAG &DAG) const {
1861 MachineFunction &MF = DAG.getMachineFunction();
1862 unsigned IntNo;
1863 switch (Op.getOpcode()) {
1864 case ISD::INTRINSIC_VOID:
1865 case ISD::INTRINSIC_W_CHAIN:
1866 IntNo = Op.getConstantOperandVal(1);
1867 break;
1868 case ISD::INTRINSIC_WO_CHAIN:
1869 IntNo = Op.getConstantOperandVal(0);
1870 break;
1871 default:
1872 llvm_unreachable("Invalid intrinsic")::llvm::llvm_unreachable_internal("Invalid intrinsic", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1872)
;
1873 }
1874 SDLoc DL(Op);
1875
1876 switch (IntNo) {
1877 default:
1878 return SDValue(); // Don't custom lower most intrinsics.
1879
1880 case Intrinsic::wasm_lsda: {
1881 auto PtrVT = getPointerTy(MF.getDataLayout());
1882 const char *SymName = MF.createExternalSymbolName(
1883 "GCC_except_table" + std::to_string(MF.getFunctionNumber()));
1884 if (isPositionIndependent()) {
1885 SDValue Node = DAG.getTargetExternalSymbol(
1886 SymName, PtrVT, WebAssemblyII::MO_MEMORY_BASE_REL);
1887 const char *BaseName = MF.createExternalSymbolName("__memory_base");
1888 SDValue BaseAddr =
1889 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1890 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1891 SDValue SymAddr =
1892 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, Node);
1893 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
1894 }
1895 SDValue Node = DAG.getTargetExternalSymbol(SymName, PtrVT);
1896 return DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, Node);
1897 }
1898
1899 case Intrinsic::wasm_shuffle: {
1900 // Drop in-chain and replace undefs, but otherwise pass through unchanged
1901 SDValue Ops[18];
1902 size_t OpIdx = 0;
1903 Ops[OpIdx++] = Op.getOperand(1);
1904 Ops[OpIdx++] = Op.getOperand(2);
1905 while (OpIdx < 18) {
1906 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1907 if (MaskIdx.isUndef() ||
1908 cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
1909 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32);
1910 } else {
1911 Ops[OpIdx++] = MaskIdx;
1912 }
1913 }
1914 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1915 }
1916 }
1917}
1918
1919SDValue
1920WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1921 SelectionDAG &DAG) const {
1922 SDLoc DL(Op);
1923 // If sign extension operations are disabled, allow sext_inreg only if operand
1924 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1925 // extension operations, but allowing sext_inreg in this context lets us have
1926 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1927 // everywhere would be simpler in this file, but would necessitate large and
1928 // brittle patterns to undo the expansion and select extract_lane_s
1929 // instructions.
1930 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128())(static_cast <bool> (!Subtarget->hasSignExt() &&
Subtarget->hasSIMD128()) ? void (0) : __assert_fail ("!Subtarget->hasSignExt() && Subtarget->hasSIMD128()"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1930, __extension__ __PRETTY_FUNCTION__))
;
1931 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1932 return SDValue();
1933
1934 const SDValue &Extract = Op.getOperand(0);
1935 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1936 if (VecT.getVectorElementType().getSizeInBits() > 32)
1937 return SDValue();
1938 MVT ExtractedLaneT =
1939 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1940 MVT ExtractedVecT =
1941 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1942 if (ExtractedVecT == VecT)
1943 return Op;
1944
1945 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1946 const SDNode *Index = Extract.getOperand(1).getNode();
1947 if (!isa<ConstantSDNode>(Index))
1948 return SDValue();
1949 unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
1950 unsigned Scale =
1951 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1952 assert(Scale > 1)(static_cast <bool> (Scale > 1) ? void (0) : __assert_fail
("Scale > 1", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1952, __extension__ __PRETTY_FUNCTION__))
;
1953 SDValue NewIndex =
1954 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1955 SDValue NewExtract = DAG.getNode(
1956 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1957 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1958 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1959 Op.getOperand(1));
1960}
1961
1962static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG) {
1963 SDLoc DL(Op);
1964 if (Op.getValueType() != MVT::v2f64)
1965 return SDValue();
1966
1967 auto GetConvertedLane = [](SDValue Op, unsigned &Opcode, SDValue &SrcVec,
1968 unsigned &Index) -> bool {
1969 switch (Op.getOpcode()) {
1970 case ISD::SINT_TO_FP:
1971 Opcode = WebAssemblyISD::CONVERT_LOW_S;
1972 break;
1973 case ISD::UINT_TO_FP:
1974 Opcode = WebAssemblyISD::CONVERT_LOW_U;
1975 break;
1976 case ISD::FP_EXTEND:
1977 Opcode = WebAssemblyISD::PROMOTE_LOW;
1978 break;
1979 default:
1980 return false;
1981 }
1982
1983 auto ExtractVector = Op.getOperand(0);
1984 if (ExtractVector.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1985 return false;
1986
1987 if (!isa<ConstantSDNode>(ExtractVector.getOperand(1).getNode()))
1988 return false;
1989
1990 SrcVec = ExtractVector.getOperand(0);
1991 Index = ExtractVector.getConstantOperandVal(1);
1992 return true;
1993 };
1994
1995 unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex;
1996 SDValue LHSSrcVec, RHSSrcVec;
1997 if (!GetConvertedLane(Op.getOperand(0), LHSOpcode, LHSSrcVec, LHSIndex) ||
1998 !GetConvertedLane(Op.getOperand(1), RHSOpcode, RHSSrcVec, RHSIndex))
1999 return SDValue();
2000
2001 if (LHSOpcode != RHSOpcode)
2002 return SDValue();
2003
2004 MVT ExpectedSrcVT;
2005 switch (LHSOpcode) {
2006 case WebAssemblyISD::CONVERT_LOW_S:
2007 case WebAssemblyISD::CONVERT_LOW_U:
2008 ExpectedSrcVT = MVT::v4i32;
2009 break;
2010 case WebAssemblyISD::PROMOTE_LOW:
2011 ExpectedSrcVT = MVT::v4f32;
2012 break;
2013 }
2014 if (LHSSrcVec.getValueType() != ExpectedSrcVT)
2015 return SDValue();
2016
2017 auto Src = LHSSrcVec;
2018 if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) {
2019 // Shuffle the source vector so that the converted lanes are the low lanes.
2020 Src = DAG.getVectorShuffle(
2021 ExpectedSrcVT, DL, LHSSrcVec, RHSSrcVec,
2022 {static_cast<int>(LHSIndex), static_cast<int>(RHSIndex) + 4, -1, -1});
2023 }
2024 return DAG.getNode(LHSOpcode, DL, MVT::v2f64, Src);
2025}
2026
2027SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
2028 SelectionDAG &DAG) const {
2029 if (auto ConvertLow = LowerConvertLow(Op, DAG))
3
Calling 'SDValue::operator bool'
5
Returning from 'SDValue::operator bool'
6
Taking false branch
2030 return ConvertLow;
2031
2032 SDLoc DL(Op);
2033 const EVT VecT = Op.getValueType();
2034 const EVT LaneT = Op.getOperand(0).getValueType();
2035 const size_t Lanes = Op.getNumOperands();
2036 bool CanSwizzle = VecT == MVT::v16i8;
2037
2038 // BUILD_VECTORs are lowered to the instruction that initializes the highest
2039 // possible number of lanes at once followed by a sequence of replace_lane
2040 // instructions to individually initialize any remaining lanes.
2041
2042 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
2043 // swizzled lanes should be given greater weight.
2044
2045 // TODO: Investigate looping rather than always extracting/replacing specific
2046 // lanes to fill gaps.
2047
2048 auto IsConstant = [](const SDValue &V) {
2049 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
27
Assuming the condition is true
28
Returning the value 1, which participates in a condition later
2050 };
2051
2052 // Returns the source vector and index vector pair if they exist. Checks for:
2053 // (extract_vector_elt
2054 // $src,
2055 // (sign_extend_inreg (extract_vector_elt $indices, $i))
2056 // )
2057 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
2058 auto Bail = std::make_pair(SDValue(), SDValue());
2059 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2060 return Bail;
2061 const SDValue &SwizzleSrc = Lane->getOperand(0);
2062 const SDValue &IndexExt = Lane->getOperand(1);
2063 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
2064 return Bail;
2065 const SDValue &Index = IndexExt->getOperand(0);
2066 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2067 return Bail;
2068 const SDValue &SwizzleIndices = Index->getOperand(0);
2069 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
2070 SwizzleIndices.getValueType() != MVT::v16i8 ||
2071 Index->getOperand(1)->getOpcode() != ISD::Constant ||
2072 Index->getConstantOperandVal(1) != I)
2073 return Bail;
2074 return std::make_pair(SwizzleSrc, SwizzleIndices);
2075 };
2076
2077 // If the lane is extracted from another vector at a constant index, return
2078 // that vector. The source vector must not have more lanes than the dest
2079 // because the shufflevector indices are in terms of the destination lanes and
2080 // would not be able to address the smaller individual source lanes.
2081 auto GetShuffleSrc = [&](const SDValue &Lane) {
2082 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2083 return SDValue();
2084 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
2085 return SDValue();
2086 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
2087 VecT.getVectorNumElements())
2088 return SDValue();
2089 return Lane->getOperand(0);
2090 };
2091
2092 using ValueEntry = std::pair<SDValue, size_t>;
2093 SmallVector<ValueEntry, 16> SplatValueCounts;
2094
2095 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
2096 SmallVector<SwizzleEntry, 16> SwizzleCounts;
2097
2098 using ShuffleEntry = std::pair<SDValue, size_t>;
2099 SmallVector<ShuffleEntry, 16> ShuffleCounts;
2100
2101 auto AddCount = [](auto &Counts, const auto &Val) {
2102 auto CountIt =
2103 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
2104 if (CountIt == Counts.end()) {
2105 Counts.emplace_back(Val, 1);
2106 } else {
2107 CountIt->second++;
2108 }
2109 };
2110
2111 auto GetMostCommon = [](auto &Counts) {
2112 auto CommonIt =
2113 std::max_element(Counts.begin(), Counts.end(),
2114 [](auto A, auto B) { return A.second < B.second; });
2115 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector")(static_cast <bool> (CommonIt != Counts.end() &&
"Unexpected all-undef build_vector") ? void (0) : __assert_fail
("CommonIt != Counts.end() && \"Unexpected all-undef build_vector\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2115, __extension__ __PRETTY_FUNCTION__))
;
2116 return *CommonIt;
2117 };
2118
2119 size_t NumConstantLanes = 0;
2120
2121 // Count eligible lanes for each type of vector creation op
2122 for (size_t I = 0; I
6.1
'I' is < 'Lanes'
6.1
'I' is < 'Lanes'
< Lanes
; ++I) {
7
Loop condition is true. Entering loop body
13
Assuming 'I' is >= 'Lanes'
14
Loop condition is false. Execution continues on line 2140
2123 const SDValue &Lane = Op->getOperand(I);
2124 if (Lane.isUndef())
8
Taking false branch
2125 continue;
2126
2127 AddCount(SplatValueCounts, Lane);
2128
2129 if (IsConstant(Lane))
9
Taking false branch
2130 NumConstantLanes++;
2131 if (auto ShuffleSrc = GetShuffleSrc(Lane))
10
Taking false branch
2132 AddCount(ShuffleCounts, ShuffleSrc);
2133 if (CanSwizzle
10.1
'CanSwizzle' is true
10.1
'CanSwizzle' is true
) {
11
Taking true branch
2134 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
2135 if (SwizzleSrcs.first)
12
Taking false branch
2136 AddCount(SwizzleCounts, SwizzleSrcs);
2137 }
2138 }
2139
2140 SDValue SplatValue;
2141 size_t NumSplatLanes;
2142 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
2143
2144 SDValue SwizzleSrc;
2145 SDValue SwizzleIndices;
2146 size_t NumSwizzleLanes = 0;
2147 if (SwizzleCounts.size())
15
Assuming the condition is false
16
Taking false branch
2148 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
2149 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
2150
2151 // Shuffles can draw from up to two vectors, so find the two most common
2152 // sources.
2153 SDValue ShuffleSrc1, ShuffleSrc2;
2154 size_t NumShuffleLanes = 0;
2155 if (ShuffleCounts.size()) {
17
Assuming the condition is true
18
Taking true branch
2156 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
2157 llvm::erase_if(ShuffleCounts,
2158 [&](const auto &Pair) { return Pair.first == ShuffleSrc1; });
2159 }
2160 if (ShuffleCounts.size()) {
19
Assuming the condition is false
20
Taking false branch
2161 size_t AdditionalShuffleLanes;
2162 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
2163 GetMostCommon(ShuffleCounts);
2164 NumShuffleLanes += AdditionalShuffleLanes;
2165 }
2166
2167 // Predicate returning true if the lane is properly initialized by the
2168 // original instruction
2169 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
2170 SDValue Result;
2171 // Prefer swizzles over shuffles over vector consts over splats
2172 if (NumSwizzleLanes >= NumShuffleLanes &&
21
Assuming 'NumSwizzleLanes' is < 'NumShuffleLanes'
2173 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
2174 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
2175 SwizzleIndices);
2176 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
2177 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
2178 return Swizzled == GetSwizzleSrcs(I, Lane);
2179 };
2180 } else if (NumShuffleLanes
21.1
'NumShuffleLanes' is >= 'NumConstantLanes'
21.1
'NumShuffleLanes' is >= 'NumConstantLanes'
>= NumConstantLanes &&
23
Taking false branch
2181 NumShuffleLanes >= NumSplatLanes) {
22
Assuming 'NumShuffleLanes' is < 'NumSplatLanes'
2182 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
2183 size_t DestLaneCount = VecT.getVectorNumElements();
2184 size_t Scale1 = 1;
2185 size_t Scale2 = 1;
2186 SDValue Src1 = ShuffleSrc1;
2187 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
2188 if (Src1.getValueType() != VecT) {
2189 size_t LaneSize =
2190 Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
2191 assert(LaneSize > DestLaneSize)(static_cast <bool> (LaneSize > DestLaneSize) ? void
(0) : __assert_fail ("LaneSize > DestLaneSize", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2191, __extension__ __PRETTY_FUNCTION__))
;
2192 Scale1 = LaneSize / DestLaneSize;
2193 Src1 = DAG.getBitcast(VecT, Src1);
2194 }
2195 if (Src2.getValueType() != VecT) {
2196 size_t LaneSize =
2197 Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
2198 assert(LaneSize > DestLaneSize)(static_cast <bool> (LaneSize > DestLaneSize) ? void
(0) : __assert_fail ("LaneSize > DestLaneSize", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2198, __extension__ __PRETTY_FUNCTION__))
;
2199 Scale2 = LaneSize / DestLaneSize;
2200 Src2 = DAG.getBitcast(VecT, Src2);
2201 }
2202
2203 int Mask[16];
2204 assert(DestLaneCount <= 16)(static_cast <bool> (DestLaneCount <= 16) ? void (0)
: __assert_fail ("DestLaneCount <= 16", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2204, __extension__ __PRETTY_FUNCTION__))
;
2205 for (size_t I = 0; I < DestLaneCount; ++I) {
2206 const SDValue &Lane = Op->getOperand(I);
2207 SDValue Src = GetShuffleSrc(Lane);
2208 if (Src == ShuffleSrc1) {
2209 Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
2210 } else if (Src && Src == ShuffleSrc2) {
2211 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
2212 } else {
2213 Mask[I] = -1;
2214 }
2215 }
2216 ArrayRef<int> MaskRef(Mask, DestLaneCount);
2217 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
2218 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
2219 auto Src = GetShuffleSrc(Lane);
2220 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
2221 };
2222 } else if (NumConstantLanes >= NumSplatLanes) {
24
Assuming 'NumConstantLanes' is >= 'NumSplatLanes'
25
Taking true branch
2223 SmallVector<SDValue, 16> ConstLanes;
2224 for (const SDValue &Lane : Op->op_values()) {
2225 if (IsConstant(Lane)) {
26
Calling 'operator()'
29
Returning from 'operator()'
30
Taking true branch
2226 // Values may need to be fixed so that they will sign extend to be
2227 // within the expected range during ISel. Check whether the value is in
2228 // bounds based on the lane bit width and if it is out of bounds, lop
2229 // off the extra bits and subtract 2^n to reflect giving the high bit
2230 // value -2^(n-1) rather than +2^(n-1). Skip the i64 case because it
2231 // cannot possibly be out of range.
2232 auto *Const = dyn_cast<ConstantSDNode>(Lane.getNode());
31
Assuming the object is not a 'ConstantSDNode'
2233 int64_t Val = Const
31.1
'Const' is null
31.1
'Const' is null
? Const->getSExtValue() : 0;
32
'?' condition is false
2234 uint64_t LaneBits = 128 / Lanes;
2235 assert((LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) &&(static_cast <bool> ((LaneBits == 64 || Val >= -(1ll
<< (LaneBits - 1))) && "Unexpected out of bounds negative value"
) ? void (0) : __assert_fail ("(LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) && \"Unexpected out of bounds negative value\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2236, __extension__ __PRETTY_FUNCTION__))
33
The result of the left shift is undefined due to shifting by '127', which is greater or equal to the width of type 'long long'
2236 "Unexpected out of bounds negative value")(static_cast <bool> ((LaneBits == 64 || Val >= -(1ll
<< (LaneBits - 1))) && "Unexpected out of bounds negative value"
) ? void (0) : __assert_fail ("(LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) && \"Unexpected out of bounds negative value\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2236, __extension__ __PRETTY_FUNCTION__))
;
2237 if (Const && LaneBits != 64 && Val > (1ll << (LaneBits - 1)) - 1) {
2238 auto NewVal = ((uint64_t)Val % (1ll << LaneBits)) - (1ll << LaneBits);
2239 ConstLanes.push_back(DAG.getConstant(NewVal, SDLoc(Lane), LaneT));
2240 } else {
2241 ConstLanes.push_back(Lane);
2242 }
2243 } else if (LaneT.isFloatingPoint()) {
2244 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
2245 } else {
2246 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
2247 }
2248 }
2249 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
2250 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
2251 return IsConstant(Lane);
2252 };
2253 } else {
2254 // Use a splat, but possibly a load_splat
2255 LoadSDNode *SplattedLoad;
2256 if ((SplattedLoad = dyn_cast<LoadSDNode>(SplatValue)) &&
2257 SplattedLoad->getMemoryVT() == VecT.getVectorElementType()) {
2258 Result = DAG.getMemIntrinsicNode(
2259 WebAssemblyISD::LOAD_SPLAT, DL, DAG.getVTList(VecT),
2260 {SplattedLoad->getChain(), SplattedLoad->getBasePtr(),
2261 SplattedLoad->getOffset()},
2262 SplattedLoad->getMemoryVT(), SplattedLoad->getMemOperand());
2263 } else {
2264 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
2265 }
2266 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
2267 return Lane == SplatValue;
2268 };
2269 }
2270
2271 assert(Result)(static_cast <bool> (Result) ? void (0) : __assert_fail
("Result", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2271, __extension__ __PRETTY_FUNCTION__))
;
2272 assert(IsLaneConstructed)(static_cast <bool> (IsLaneConstructed) ? void (0) : __assert_fail
("IsLaneConstructed", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2272, __extension__ __PRETTY_FUNCTION__))
;
2273
2274 // Add replace_lane instructions for any unhandled values
2275 for (size_t I = 0; I < Lanes; ++I) {
2276 const SDValue &Lane = Op->getOperand(I);
2277 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
2278 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
2279 DAG.getConstant(I, DL, MVT::i32));
2280 }
2281
2282 return Result;
2283}
2284
2285SDValue
2286WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2287 SelectionDAG &DAG) const {
2288 SDLoc DL(Op);
2289 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
2290 MVT VecType = Op.getOperand(0).getSimpleValueType();
2291 assert(VecType.is128BitVector() && "Unexpected shuffle vector type")(static_cast <bool> (VecType.is128BitVector() &&
"Unexpected shuffle vector type") ? void (0) : __assert_fail
("VecType.is128BitVector() && \"Unexpected shuffle vector type\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2291, __extension__ __PRETTY_FUNCTION__))
;
2292 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
2293
2294 // Space for two vector args and sixteen mask indices
2295 SDValue Ops[18];
2296 size_t OpIdx = 0;
2297 Ops[OpIdx++] = Op.getOperand(0);
2298 Ops[OpIdx++] = Op.getOperand(1);
2299
2300 // Expand mask indices to byte indices and materialize them as operands
2301 for (int M : Mask) {
2302 for (size_t J = 0; J < LaneBytes; ++J) {
2303 // Lower undefs (represented by -1 in mask) to zero
2304 uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
2305 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
2306 }
2307 }
2308
2309 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
2310}
2311
2312SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
2313 SelectionDAG &DAG) const {
2314 SDLoc DL(Op);
2315 // The legalizer does not know how to expand the unsupported comparison modes
2316 // of i64x2 vectors, so we manually unroll them here.
2317 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64)(static_cast <bool> (Op->getOperand(0)->getSimpleValueType
(0) == MVT::v2i64) ? void (0) : __assert_fail ("Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2317, __extension__ __PRETTY_FUNCTION__))
;
2318 SmallVector<SDValue, 2> LHS, RHS;
2319 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
2320 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
2321 const SDValue &CC = Op->getOperand(2);
2322 auto MakeLane = [&](unsigned I) {
2323 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
2324 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
2325 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
2326 };
2327 return DAG.getBuildVector(Op->getValueType(0), DL,
2328 {MakeLane(0), MakeLane(1)});
2329}
2330
2331SDValue
2332WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
2333 SelectionDAG &DAG) const {
2334 // Allow constant lane indices, expand variable lane indices
2335 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
2336 if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
2337 return Op;
2338 else
2339 // Perform default expansion
2340 return SDValue();
2341}
2342
2343static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
2344 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
2345 // 32-bit and 64-bit unrolled shifts will have proper semantics
2346 if (LaneT.bitsGE(MVT::i32))
2347 return DAG.UnrollVectorOp(Op.getNode());
2348 // Otherwise mask the shift value to get proper semantics from 32-bit shift
2349 SDLoc DL(Op);
2350 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
2351 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
2352 unsigned ShiftOpcode = Op.getOpcode();
2353 SmallVector<SDValue, 16> ShiftedElements;
2354 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
2355 SmallVector<SDValue, 16> ShiftElements;
2356 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
2357 SmallVector<SDValue, 16> UnrolledOps;
2358 for (size_t i = 0; i < NumLanes; ++i) {
2359 SDValue MaskedShiftValue =
2360 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
2361 SDValue ShiftedValue = ShiftedElements[i];
2362 if (ShiftOpcode == ISD::SRA)
2363 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
2364 ShiftedValue, DAG.getValueType(LaneT));
2365 UnrolledOps.push_back(
2366 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2367 }
2368 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
2369}
2370
2371SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
2372 SelectionDAG &DAG) const {
2373 SDLoc DL(Op);
2374
2375 // Only manually lower vector shifts
2376 assert(Op.getSimpleValueType().isVector())(static_cast <bool> (Op.getSimpleValueType().isVector()
) ? void (0) : __assert_fail ("Op.getSimpleValueType().isVector()"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2376, __extension__ __PRETTY_FUNCTION__))
;
2377
2378 auto ShiftVal = DAG.getSplatValue(Op.getOperand(1));
2379 if (!ShiftVal)
2380 return unrollVectorShift(Op, DAG);
2381
2382 // Use anyext because none of the high bits can affect the shift
2383 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
2384
2385 unsigned Opcode;
2386 switch (Op.getOpcode()) {
2387 case ISD::SHL:
2388 Opcode = WebAssemblyISD::VEC_SHL;
2389 break;
2390 case ISD::SRA:
2391 Opcode = WebAssemblyISD::VEC_SHR_S;
2392 break;
2393 case ISD::SRL:
2394 Opcode = WebAssemblyISD::VEC_SHR_U;
2395 break;
2396 default:
2397 llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2397)
;
2398 }
2399
2400 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
2401}
2402
2403SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2404 SelectionDAG &DAG) const {
2405 SDLoc DL(Op);
2406 EVT ResT = Op.getValueType();
2407 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2408
2409 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2410 (SatVT == MVT::i32 || SatVT == MVT::i64))
2411 return Op;
2412
2413 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2414 return Op;
2415
2416 return SDValue();
2417}
2418
2419//===----------------------------------------------------------------------===//
2420// Custom DAG combine hooks
2421//===----------------------------------------------------------------------===//
2422static SDValue
2423performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2424 auto &DAG = DCI.DAG;
2425 auto Shuffle = cast<ShuffleVectorSDNode>(N);
2426
2427 // Hoist vector bitcasts that don't change the number of lanes out of unary
2428 // shuffles, where they are less likely to get in the way of other combines.
2429 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
2430 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
2431 SDValue Bitcast = N->getOperand(0);
2432 if (Bitcast.getOpcode() != ISD::BITCAST)
2433 return SDValue();
2434 if (!N->getOperand(1).isUndef())
2435 return SDValue();
2436 SDValue CastOp = Bitcast.getOperand(0);
2437 MVT SrcType = CastOp.getSimpleValueType();
2438 MVT DstType = Bitcast.getSimpleValueType();
2439 if (!SrcType.is128BitVector() ||
2440 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2441 return SDValue();
2442 SDValue NewShuffle = DAG.getVectorShuffle(
2443 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
2444 return DAG.getBitcast(DstType, NewShuffle);
2445}
2446
2447static SDValue
2448performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2449 auto &DAG = DCI.DAG;
2450 assert(N->getOpcode() == ISD::SIGN_EXTEND ||(static_cast <bool> (N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) ? void (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2451, __extension__ __PRETTY_FUNCTION__))
2451 N->getOpcode() == ISD::ZERO_EXTEND)(static_cast <bool> (N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) ? void (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2451, __extension__ __PRETTY_FUNCTION__))
;
2452
2453 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
2454 // possible before the extract_subvector can be expanded.
2455 auto Extract = N->getOperand(0);
2456 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2457 return SDValue();
2458 auto Source = Extract.getOperand(0);
2459 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2460 if (IndexNode == nullptr)
2461 return SDValue();
2462 auto Index = IndexNode->getZExtValue();
2463
2464 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
2465 // extracted subvector is the low or high half of its source.
2466 EVT ResVT = N->getValueType(0);
2467 if (ResVT == MVT::v8i16) {
2468 if (Extract.getValueType() != MVT::v8i8 ||
2469 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2470 return SDValue();
2471 } else if (ResVT == MVT::v4i32) {
2472 if (Extract.getValueType() != MVT::v4i16 ||
2473 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2474 return SDValue();
2475 } else if (ResVT == MVT::v2i64) {
2476 if (Extract.getValueType() != MVT::v2i32 ||
2477 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2478 return SDValue();
2479 } else {
2480 return SDValue();
2481 }
2482
2483 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
2484 bool IsLow = Index == 0;
2485
2486 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2487 : WebAssemblyISD::EXTEND_HIGH_S)
2488 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2489 : WebAssemblyISD::EXTEND_HIGH_U);
2490
2491 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2492}
2493
2494static SDValue
2495performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2496 auto &DAG = DCI.DAG;
2497
2498 auto GetWasmConversionOp = [](unsigned Op) {
2499 switch (Op) {
2500 case ISD::FP_TO_SINT_SAT:
2501 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
2502 case ISD::FP_TO_UINT_SAT:
2503 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
2504 case ISD::FP_ROUND:
2505 return WebAssemblyISD::DEMOTE_ZERO;
2506 }
2507 llvm_unreachable("unexpected op")::llvm::llvm_unreachable_internal("unexpected op", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2507)
;
2508 };
2509
2510 auto IsZeroSplat = [](SDValue SplatVal) {
2511 auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
2512 APInt SplatValue, SplatUndef;
2513 unsigned SplatBitSize;
2514 bool HasAnyUndefs;
2515 return Splat &&
2516 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2517 HasAnyUndefs) &&
2518 SplatValue == 0;
2519 };
2520
2521 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
2522 // Combine this:
2523 //
2524 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
2525 //
2526 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2527 //
2528 // Or this:
2529 //
2530 // (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0)))
2531 //
2532 // into (f32x4.demote_zero_f64x2 $x).
2533 EVT ResVT;
2534 EVT ExpectedConversionType;
2535 auto Conversion = N->getOperand(0);
2536 auto ConversionOp = Conversion.getOpcode();
2537 switch (ConversionOp) {
2538 case ISD::FP_TO_SINT_SAT:
2539 case ISD::FP_TO_UINT_SAT:
2540 ResVT = MVT::v4i32;
2541 ExpectedConversionType = MVT::v2i32;
2542 break;
2543 case ISD::FP_ROUND:
2544 ResVT = MVT::v4f32;
2545 ExpectedConversionType = MVT::v2f32;
2546 break;
2547 default:
2548 return SDValue();
2549 }
2550
2551 if (N->getValueType(0) != ResVT)
2552 return SDValue();
2553
2554 if (Conversion.getValueType() != ExpectedConversionType)
2555 return SDValue();
2556
2557 auto Source = Conversion.getOperand(0);
2558 if (Source.getValueType() != MVT::v2f64)
2559 return SDValue();
2560
2561 if (!IsZeroSplat(N->getOperand(1)) ||
2562 N->getOperand(1).getValueType() != ExpectedConversionType)
2563 return SDValue();
2564
2565 unsigned Op = GetWasmConversionOp(ConversionOp);
2566 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2567 }
2568
2569 // Combine this:
2570 //
2571 // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32)
2572 //
2573 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2574 //
2575 // Or this:
2576 //
2577 // (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0)))))
2578 //
2579 // into (f32x4.demote_zero_f64x2 $x).
2580 EVT ResVT;
2581 auto ConversionOp = N->getOpcode();
2582 switch (ConversionOp) {
2583 case ISD::FP_TO_SINT_SAT:
2584 case ISD::FP_TO_UINT_SAT:
2585 ResVT = MVT::v4i32;
2586 break;
2587 case ISD::FP_ROUND:
2588 ResVT = MVT::v4f32;
2589 break;
2590 default:
2591 llvm_unreachable("unexpected op")::llvm::llvm_unreachable_internal("unexpected op", "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2591)
;
2592 }
2593
2594 if (N->getValueType(0) != ResVT)
2595 return SDValue();
2596
2597 auto Concat = N->getOperand(0);
2598 if (Concat.getValueType() != MVT::v4f64)
2599 return SDValue();
2600
2601 auto Source = Concat.getOperand(0);
2602 if (Source.getValueType() != MVT::v2f64)
2603 return SDValue();
2604
2605 if (!IsZeroSplat(Concat.getOperand(1)) ||
2606 Concat.getOperand(1).getValueType() != MVT::v2f64)
2607 return SDValue();
2608
2609 unsigned Op = GetWasmConversionOp(ConversionOp);
2610 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2611}
2612
2613SDValue
2614WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
2615 DAGCombinerInfo &DCI) const {
2616 switch (N->getOpcode()) {
2617 default:
2618 return SDValue();
2619 case ISD::VECTOR_SHUFFLE:
2620 return performVECTOR_SHUFFLECombine(N, DCI);
2621 case ISD::SIGN_EXTEND:
2622 case ISD::ZERO_EXTEND:
2623 return performVectorExtendCombine(N, DCI);
2624 case ISD::FP_TO_SINT_SAT:
2625 case ISD::FP_TO_UINT_SAT:
2626 case ISD::FP_ROUND:
2627 case ISD::CONCAT_VECTORS:
2628 return performVectorTruncZeroCombine(N, DCI);
2629 }
2630}

/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/Register.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Metadata.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/Support/AlignOf.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/TypeSize.h"
47#include <algorithm>
48#include <cassert>
49#include <climits>
50#include <cstddef>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <string>
55#include <tuple>
56
57namespace llvm {
58
59class APInt;
60class Constant;
61template <typename T> struct DenseMapInfo;
62class GlobalValue;
63class MachineBasicBlock;
64class MachineConstantPoolValue;
65class MCSymbol;
66class raw_ostream;
67class SDNode;
68class SelectionDAG;
69class Type;
70class Value;
71
72void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
73 bool force = false);
74
75/// This represents a list of ValueType's that has been intern'd by
76/// a SelectionDAG. Instances of this simple value class are returned by
77/// SelectionDAG::getVTList(...).
78///
79struct SDVTList {
80 const EVT *VTs;
81 unsigned int NumVTs;
82};
83
84namespace ISD {
85
86 /// Node predicates
87
88/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
89/// same constant or undefined, return true and return the constant value in
90/// \p SplatValue.
91bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
92
93/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
94/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
95/// true, it only checks BUILD_VECTOR.
96bool isConstantSplatVectorAllOnes(const SDNode *N,
97 bool BuildVectorOnly = false);
98
99/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
100/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
101/// only checks BUILD_VECTOR.
102bool isConstantSplatVectorAllZeros(const SDNode *N,
103 bool BuildVectorOnly = false);
104
105/// Return true if the specified node is a BUILD_VECTOR where all of the
106/// elements are ~0 or undef.
107bool isBuildVectorAllOnes(const SDNode *N);
108
109/// Return true if the specified node is a BUILD_VECTOR where all of the
110/// elements are 0 or undef.
111bool isBuildVectorAllZeros(const SDNode *N);
112
113/// Return true if the specified node is a BUILD_VECTOR node of all
114/// ConstantSDNode or undef.
115bool isBuildVectorOfConstantSDNodes(const SDNode *N);
116
117/// Return true if the specified node is a BUILD_VECTOR node of all
118/// ConstantFPSDNode or undef.
119bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
120
121/// Return true if the node has at least one operand and all operands of the
122/// specified node are ISD::UNDEF.
123bool allOperandsUndef(const SDNode *N);
124
125} // end namespace ISD
126
127//===----------------------------------------------------------------------===//
128/// Unlike LLVM values, Selection DAG nodes may return multiple
129/// values as the result of a computation. Many nodes return multiple values,
130/// from loads (which define a token and a return value) to ADDC (which returns
131/// a result and a carry value), to calls (which may return an arbitrary number
132/// of values).
133///
134/// As such, each use of a SelectionDAG computation must indicate the node that
135/// computes it as well as which return value to use from that node. This pair
136/// of information is represented with the SDValue value type.
137///
138class SDValue {
139 friend struct DenseMapInfo<SDValue>;
140
141 SDNode *Node = nullptr; // The node defining the value we are using.
142 unsigned ResNo = 0; // Which return value of the node we are using.
143
144public:
145 SDValue() = default;
146 SDValue(SDNode *node, unsigned resno);
147
148 /// get the index which selects a specific result in the SDNode
149 unsigned getResNo() const { return ResNo; }
150
151 /// get the SDNode which holds the desired result
152 SDNode *getNode() const { return Node; }
153
154 /// set the SDNode
155 void setNode(SDNode *N) { Node = N; }
156
157 inline SDNode *operator->() const { return Node; }
158
159 bool operator==(const SDValue &O) const {
160 return Node == O.Node && ResNo == O.ResNo;
161 }
162 bool operator!=(const SDValue &O) const {
163 return !operator==(O);
164 }
165 bool operator<(const SDValue &O) const {
166 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
167 }
168 explicit operator bool() const {
169 return Node != nullptr;
4
Returning zero, which participates in a condition later
170 }
171
172 SDValue getValue(unsigned R) const {
173 return SDValue(Node, R);
174 }
175
176 /// Return true if this node is an operand of N.
177 bool isOperandOf(const SDNode *N) const;
178
179 /// Return the ValueType of the referenced return value.
180 inline EVT getValueType() const;
181
182 /// Return the simple ValueType of the referenced return value.
183 MVT getSimpleValueType() const {
184 return getValueType().getSimpleVT();
185 }
186
187 /// Returns the size of the value in bits.
188 ///
189 /// If the value type is a scalable vector type, the scalable property will
190 /// be set and the runtime size will be a positive integer multiple of the
191 /// base size.
192 TypeSize getValueSizeInBits() const {
193 return getValueType().getSizeInBits();
194 }
195
196 uint64_t getScalarValueSizeInBits() const {
197 return getValueType().getScalarType().getFixedSizeInBits();
198 }
199
200 // Forwarding methods - These forward to the corresponding methods in SDNode.
201 inline unsigned getOpcode() const;
202 inline unsigned getNumOperands() const;
203 inline const SDValue &getOperand(unsigned i) const;
204 inline uint64_t getConstantOperandVal(unsigned i) const;
205 inline const APInt &getConstantOperandAPInt(unsigned i) const;
206 inline bool isTargetMemoryOpcode() const;
207 inline bool isTargetOpcode() const;
208 inline bool isMachineOpcode() const;
209 inline bool isUndef() const;
210 inline unsigned getMachineOpcode() const;
211 inline const DebugLoc &getDebugLoc() const;
212 inline void dump() const;
213 inline void dump(const SelectionDAG *G) const;
214 inline void dumpr() const;
215 inline void dumpr(const SelectionDAG *G) const;
216
217 /// Return true if this operand (which must be a chain) reaches the
218 /// specified operand without crossing any side-effecting instructions.
219 /// In practice, this looks through token factors and non-volatile loads.
220 /// In order to remain efficient, this only
221 /// looks a couple of nodes in, it does not do an exhaustive search.
222 bool reachesChainWithoutSideEffects(SDValue Dest,
223 unsigned Depth = 2) const;
224
225 /// Return true if there are no nodes using value ResNo of Node.
226 inline bool use_empty() const;
227
228 /// Return true if there is exactly one node using value ResNo of Node.
229 inline bool hasOneUse() const;
230};
231
232template<> struct DenseMapInfo<SDValue> {
233 static inline SDValue getEmptyKey() {
234 SDValue V;
235 V.ResNo = -1U;
236 return V;
237 }
238
239 static inline SDValue getTombstoneKey() {
240 SDValue V;
241 V.ResNo = -2U;
242 return V;
243 }
244
245 static unsigned getHashValue(const SDValue &Val) {
246 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
247 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
248 }
249
250 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
251 return LHS == RHS;
252 }
253};
254
255/// Allow casting operators to work directly on
256/// SDValues as if they were SDNode*'s.
257template<> struct simplify_type<SDValue> {
258 using SimpleType = SDNode *;
259
260 static SimpleType getSimplifiedValue(SDValue &Val) {
261 return Val.getNode();
262 }
263};
264template<> struct simplify_type<const SDValue> {
265 using SimpleType = /*const*/ SDNode *;
266
267 static SimpleType getSimplifiedValue(const SDValue &Val) {
268 return Val.getNode();
269 }
270};
271
272/// Represents a use of a SDNode. This class holds an SDValue,
273/// which records the SDNode being used and the result number, a
274/// pointer to the SDNode using the value, and Next and Prev pointers,
275/// which link together all the uses of an SDNode.
276///
277class SDUse {
278 /// Val - The value being used.
279 SDValue Val;
280 /// User - The user of this value.
281 SDNode *User = nullptr;
282 /// Prev, Next - Pointers to the uses list of the SDNode referred by
283 /// this operand.
284 SDUse **Prev = nullptr;
285 SDUse *Next = nullptr;
286
287public:
288 SDUse() = default;
289 SDUse(const SDUse &U) = delete;
290 SDUse &operator=(const SDUse &) = delete;
291
292 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
293 operator const SDValue&() const { return Val; }
294
295 /// If implicit conversion to SDValue doesn't work, the get() method returns
296 /// the SDValue.
297 const SDValue &get() const { return Val; }
298
299 /// This returns the SDNode that contains this Use.
300 SDNode *getUser() { return User; }
301
302 /// Get the next SDUse in the use list.
303 SDUse *getNext() const { return Next; }
304
305 /// Convenience function for get().getNode().
306 SDNode *getNode() const { return Val.getNode(); }
307 /// Convenience function for get().getResNo().
308 unsigned getResNo() const { return Val.getResNo(); }
309 /// Convenience function for get().getValueType().
310 EVT getValueType() const { return Val.getValueType(); }
311
312 /// Convenience function for get().operator==
313 bool operator==(const SDValue &V) const {
314 return Val == V;
315 }
316
317 /// Convenience function for get().operator!=
318 bool operator!=(const SDValue &V) const {
319 return Val != V;
320 }
321
322 /// Convenience function for get().operator<
323 bool operator<(const SDValue &V) const {
324 return Val < V;
325 }
326
327private:
328 friend class SelectionDAG;
329 friend class SDNode;
330 // TODO: unfriend HandleSDNode once we fix its operand handling.
331 friend class HandleSDNode;
332
333 void setUser(SDNode *p) { User = p; }
334
335 /// Remove this use from its existing use list, assign it the
336 /// given value, and add it to the new value's node's use list.
337 inline void set(const SDValue &V);
338 /// Like set, but only supports initializing a newly-allocated
339 /// SDUse with a non-null value.
340 inline void setInitial(const SDValue &V);
341 /// Like set, but only sets the Node portion of the value,
342 /// leaving the ResNo portion unmodified.
343 inline void setNode(SDNode *N);
344
345 void addToList(SDUse **List) {
346 Next = *List;
347 if (Next) Next->Prev = &Next;
348 Prev = List;
349 *List = this;
350 }
351
352 void removeFromList() {
353 *Prev = Next;
354 if (Next) Next->Prev = Prev;
355 }
356};
357
358/// simplify_type specializations - Allow casting operators to work directly on
359/// SDValues as if they were SDNode*'s.
360template<> struct simplify_type<SDUse> {
361 using SimpleType = SDNode *;
362
363 static SimpleType getSimplifiedValue(SDUse &Val) {
364 return Val.getNode();
365 }
366};
367
368/// These are IR-level optimization flags that may be propagated to SDNodes.
369/// TODO: This data structure should be shared by the IR optimizer and the
370/// the backend.
371struct SDNodeFlags {
372private:
373 bool NoUnsignedWrap : 1;
374 bool NoSignedWrap : 1;
375 bool Exact : 1;
376 bool NoNaNs : 1;
377 bool NoInfs : 1;
378 bool NoSignedZeros : 1;
379 bool AllowReciprocal : 1;
380 bool AllowContract : 1;
381 bool ApproximateFuncs : 1;
382 bool AllowReassociation : 1;
383
384 // We assume instructions do not raise floating-point exceptions by default,
385 // and only those marked explicitly may do so. We could choose to represent
386 // this via a positive "FPExcept" flags like on the MI level, but having a
387 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
388 // intersection logic more straightforward.
389 bool NoFPExcept : 1;
390
391public:
392 /// Default constructor turns off all optimization flags.
393 SDNodeFlags()
394 : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
395 NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
396 AllowContract(false), ApproximateFuncs(false),
397 AllowReassociation(false), NoFPExcept(false) {}
398
399 /// Propagate the fast-math-flags from an IR FPMathOperator.
400 void copyFMF(const FPMathOperator &FPMO) {
401 setNoNaNs(FPMO.hasNoNaNs());
402 setNoInfs(FPMO.hasNoInfs());
403 setNoSignedZeros(FPMO.hasNoSignedZeros());
404 setAllowReciprocal(FPMO.hasAllowReciprocal());
405 setAllowContract(FPMO.hasAllowContract());
406 setApproximateFuncs(FPMO.hasApproxFunc());
407 setAllowReassociation(FPMO.hasAllowReassoc());
408 }
409
410 // These are mutators for each flag.
411 void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
412 void setNoSignedWrap(bool b) { NoSignedWrap = b; }
413 void setExact(bool b) { Exact = b; }
414 void setNoNaNs(bool b) { NoNaNs = b; }
415 void setNoInfs(bool b) { NoInfs = b; }
416 void setNoSignedZeros(bool b) { NoSignedZeros = b; }
417 void setAllowReciprocal(bool b) { AllowReciprocal = b; }
418 void setAllowContract(bool b) { AllowContract = b; }
419 void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
420 void setAllowReassociation(bool b) { AllowReassociation = b; }
421 void setNoFPExcept(bool b) { NoFPExcept = b; }
422
423 // These are accessors for each flag.
424 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
425 bool hasNoSignedWrap() const { return NoSignedWrap; }
426 bool hasExact() const { return Exact; }
427 bool hasNoNaNs() const { return NoNaNs; }
428 bool hasNoInfs() const { return NoInfs; }
429 bool hasNoSignedZeros() const { return NoSignedZeros; }
430 bool hasAllowReciprocal() const { return AllowReciprocal; }
431 bool hasAllowContract() const { return AllowContract; }
432 bool hasApproximateFuncs() const { return ApproximateFuncs; }
433 bool hasAllowReassociation() const { return AllowReassociation; }
434 bool hasNoFPExcept() const { return NoFPExcept; }
435
436 /// Clear any flags in this flag set that aren't also set in Flags. All
437 /// flags will be cleared if Flags are undefined.
438 void intersectWith(const SDNodeFlags Flags) {
439 NoUnsignedWrap &= Flags.NoUnsignedWrap;
440 NoSignedWrap &= Flags.NoSignedWrap;
441 Exact &= Flags.Exact;
442 NoNaNs &= Flags.NoNaNs;
443 NoInfs &= Flags.NoInfs;
444 NoSignedZeros &= Flags.NoSignedZeros;
445 AllowReciprocal &= Flags.AllowReciprocal;
446 AllowContract &= Flags.AllowContract;
447 ApproximateFuncs &= Flags.ApproximateFuncs;
448 AllowReassociation &= Flags.AllowReassociation;
449 NoFPExcept &= Flags.NoFPExcept;
450 }
451};
452
453/// Represents one node in the SelectionDAG.
454///
455class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
456private:
457 /// The operation that this node performs.
458 int16_t NodeType;
459
460protected:
461 // We define a set of mini-helper classes to help us interpret the bits in our
462 // SubclassData. These are designed to fit within a uint16_t so they pack
463 // with NodeType.
464
465#if defined(_AIX) && (!defined(__GNUC__4) || defined(__clang__1))
466// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
467// and give the `pack` pragma push semantics.
468#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
469#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
470#else
471#define BEGIN_TWO_BYTE_PACK()
472#define END_TWO_BYTE_PACK()
473#endif
474
475BEGIN_TWO_BYTE_PACK()
476 class SDNodeBitfields {
477 friend class SDNode;
478 friend class MemIntrinsicSDNode;
479 friend class MemSDNode;
480 friend class SelectionDAG;
481
482 uint16_t HasDebugValue : 1;
483 uint16_t IsMemIntrinsic : 1;
484 uint16_t IsDivergent : 1;
485 };
486 enum { NumSDNodeBits = 3 };
487
488 class ConstantSDNodeBitfields {
489 friend class ConstantSDNode;
490
491 uint16_t : NumSDNodeBits;
492
493 uint16_t IsOpaque : 1;
494 };
495
496 class MemSDNodeBitfields {
497 friend class MemSDNode;
498 friend class MemIntrinsicSDNode;
499 friend class AtomicSDNode;
500
501 uint16_t : NumSDNodeBits;
502
503 uint16_t IsVolatile : 1;
504 uint16_t IsNonTemporal : 1;
505 uint16_t IsDereferenceable : 1;
506 uint16_t IsInvariant : 1;
507 };
508 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
509
510 class LSBaseSDNodeBitfields {
511 friend class LSBaseSDNode;
512 friend class VPLoadStoreSDNode;
513 friend class MaskedLoadStoreSDNode;
514 friend class MaskedGatherScatterSDNode;
515 friend class VPGatherScatterSDNode;
516
517 uint16_t : NumMemSDNodeBits;
518
519 // This storage is shared between disparate class hierarchies to hold an
520 // enumeration specific to the class hierarchy in use.
521 // LSBaseSDNode => enum ISD::MemIndexedMode
522 // VPLoadStoreBaseSDNode => enum ISD::MemIndexedMode
523 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
524 // VPGatherScatterSDNode => enum ISD::MemIndexType
525 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
526 uint16_t AddressingMode : 3;
527 };
528 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
529
530 class LoadSDNodeBitfields {
531 friend class LoadSDNode;
532 friend class VPLoadSDNode;
533 friend class MaskedLoadSDNode;
534 friend class MaskedGatherSDNode;
535 friend class VPGatherSDNode;
536
537 uint16_t : NumLSBaseSDNodeBits;
538
539 uint16_t ExtTy : 2; // enum ISD::LoadExtType
540 uint16_t IsExpanding : 1;
541 };
542
543 class StoreSDNodeBitfields {
544 friend class StoreSDNode;
545 friend class VPStoreSDNode;
546 friend class MaskedStoreSDNode;
547 friend class MaskedScatterSDNode;
548 friend class VPScatterSDNode;
549
550 uint16_t : NumLSBaseSDNodeBits;
551
552 uint16_t IsTruncating : 1;
553 uint16_t IsCompressing : 1;
554 };
555
556 union {
557 char RawSDNodeBits[sizeof(uint16_t)];
558 SDNodeBitfields SDNodeBits;
559 ConstantSDNodeBitfields ConstantSDNodeBits;
560 MemSDNodeBitfields MemSDNodeBits;
561 LSBaseSDNodeBitfields LSBaseSDNodeBits;
562 LoadSDNodeBitfields LoadSDNodeBits;
563 StoreSDNodeBitfields StoreSDNodeBits;
564 };
565END_TWO_BYTE_PACK()
566#undef BEGIN_TWO_BYTE_PACK
567#undef END_TWO_BYTE_PACK
568
569 // RawSDNodeBits must cover the entirety of the union. This means that all of
570 // the union's members must have size <= RawSDNodeBits. We write the RHS as
571 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
572 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
573 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
574 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
575 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
576 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
577 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
578
579private:
580 friend class SelectionDAG;
581 // TODO: unfriend HandleSDNode once we fix its operand handling.
582 friend class HandleSDNode;
583
584 /// Unique id per SDNode in the DAG.
585 int NodeId = -1;
586
587 /// The values that are used by this operation.
588 SDUse *OperandList = nullptr;
589
590 /// The types of the values this node defines. SDNode's may
591 /// define multiple values simultaneously.
592 const EVT *ValueList;
593
594 /// List of uses for this SDNode.
595 SDUse *UseList = nullptr;
596
597 /// The number of entries in the Operand/Value list.
598 unsigned short NumOperands = 0;
599 unsigned short NumValues;
600
601 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
602 // original LLVM instructions.
603 // This is used for turning off scheduling, because we'll forgo
604 // the normal scheduling algorithms and output the instructions according to
605 // this ordering.
606 unsigned IROrder;
607
608 /// Source line information.
609 DebugLoc debugLoc;
610
611 /// Return a pointer to the specified value type.
612 static const EVT *getValueTypeList(EVT VT);
613
614 SDNodeFlags Flags;
615
616public:
617 /// Unique and persistent id per SDNode in the DAG.
618 /// Used for debug printing.
619 uint16_t PersistentId;
620
621 //===--------------------------------------------------------------------===//
622 // Accessors
623 //
624
625 /// Return the SelectionDAG opcode value for this node. For
626 /// pre-isel nodes (those for which isMachineOpcode returns false), these
627 /// are the opcode values in the ISD and <target>ISD namespaces. For
628 /// post-isel opcodes, see getMachineOpcode.
629 unsigned getOpcode() const { return (unsigned short)NodeType; }
630
631 /// Test if this node has a target-specific opcode (in the
632 /// \<target\>ISD namespace).
633 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
634
635 /// Test if this node has a target-specific opcode that may raise
636 /// FP exceptions (in the \<target\>ISD namespace and greater than
637 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
638 /// opcode are currently automatically considered to possibly raise
639 /// FP exceptions as well.
640 bool isTargetStrictFPOpcode() const {
641 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
642 }
643
644 /// Test if this node has a target-specific
645 /// memory-referencing opcode (in the \<target\>ISD namespace and
646 /// greater than FIRST_TARGET_MEMORY_OPCODE).
647 bool isTargetMemoryOpcode() const {
648 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
649 }
650
651 /// Return true if the type of the node type undefined.
652 bool isUndef() const { return NodeType == ISD::UNDEF; }
653
654 /// Test if this node is a memory intrinsic (with valid pointer information).
655 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
656 /// non-memory intrinsics (with chains) that are not really instances of
657 /// MemSDNode. For such nodes, we need some extra state to determine the
658 /// proper classof relationship.
659 bool isMemIntrinsic() const {
660 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
661 NodeType == ISD::INTRINSIC_VOID) &&
662 SDNodeBits.IsMemIntrinsic;
663 }
664
665 /// Test if this node is a strict floating point pseudo-op.
666 bool isStrictFPOpcode() {
667 switch (NodeType) {
668 default:
669 return false;
670 case ISD::STRICT_FP16_TO_FP:
671 case ISD::STRICT_FP_TO_FP16:
672#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
673 case ISD::STRICT_##DAGN:
674#include "llvm/IR/ConstrainedOps.def"
675 return true;
676 }
677 }
678
679 /// Test if this node has a post-isel opcode, directly
680 /// corresponding to a MachineInstr opcode.
681 bool isMachineOpcode() const { return NodeType < 0; }
682
683 /// This may only be called if isMachineOpcode returns
684 /// true. It returns the MachineInstr opcode value that the node's opcode
685 /// corresponds to.
686 unsigned getMachineOpcode() const {
687 assert(isMachineOpcode() && "Not a MachineInstr opcode!")(static_cast <bool> (isMachineOpcode() && "Not a MachineInstr opcode!"
) ? void (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 687, __extension__ __PRETTY_FUNCTION__))
;
688 return ~NodeType;
689 }
690
691 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
692 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
693
694 bool isDivergent() const { return SDNodeBits.IsDivergent; }
695
696 /// Return true if there are no uses of this node.
697 bool use_empty() const { return UseList == nullptr; }
698
699 /// Return true if there is exactly one use of this node.
700 bool hasOneUse() const { return hasSingleElement(uses()); }
701
702 /// Return the number of uses of this node. This method takes
703 /// time proportional to the number of uses.
704 size_t use_size() const { return std::distance(use_begin(), use_end()); }
705
706 /// Return the unique node id.
707 int getNodeId() const { return NodeId; }
708
709 /// Set unique node id.
710 void setNodeId(int Id) { NodeId = Id; }
711
712 /// Return the node ordering.
713 unsigned getIROrder() const { return IROrder; }
714
715 /// Set the node ordering.
716 void setIROrder(unsigned Order) { IROrder = Order; }
717
718 /// Return the source location info.
719 const DebugLoc &getDebugLoc() const { return debugLoc; }
720
721 /// Set source location info. Try to avoid this, putting
722 /// it in the constructor is preferable.
723 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
724
725 /// This class provides iterator support for SDUse
726 /// operands that use a specific SDNode.
727 class use_iterator {
728 friend class SDNode;
729
730 SDUse *Op = nullptr;
731
732 explicit use_iterator(SDUse *op) : Op(op) {}
733
734 public:
735 using iterator_category = std::forward_iterator_tag;
736 using value_type = SDUse;
737 using difference_type = std::ptrdiff_t;
738 using pointer = value_type *;
739 using reference = value_type &;
740
741 use_iterator() = default;
742 use_iterator(const use_iterator &I) : Op(I.Op) {}
743
744 bool operator==(const use_iterator &x) const {
745 return Op == x.Op;
746 }
747 bool operator!=(const use_iterator &x) const {
748 return !operator==(x);
749 }
750
751 /// Return true if this iterator is at the end of uses list.
752 bool atEnd() const { return Op == nullptr; }
753
754 // Iterator traversal: forward iteration only.
755 use_iterator &operator++() { // Preincrement
756 assert(Op && "Cannot increment end iterator!")(static_cast <bool> (Op && "Cannot increment end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 756, __extension__ __PRETTY_FUNCTION__))
;
757 Op = Op->getNext();
758 return *this;
759 }
760
761 use_iterator operator++(int) { // Postincrement
762 use_iterator tmp = *this; ++*this; return tmp;
763 }
764
765 /// Retrieve a pointer to the current user node.
766 SDNode *operator*() const {
767 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 767, __extension__ __PRETTY_FUNCTION__))
;
768 return Op->getUser();
769 }
770
771 SDNode *operator->() const { return operator*(); }
772
773 SDUse &getUse() const { return *Op; }
774
775 /// Retrieve the operand # of this use in its user.
776 unsigned getOperandNo() const {
777 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 777, __extension__ __PRETTY_FUNCTION__))
;
778 return (unsigned)(Op - Op->getUser()->OperandList);
779 }
780 };
781
782 /// Provide iteration support to walk over all uses of an SDNode.
783 use_iterator use_begin() const {
784 return use_iterator(UseList);
785 }
786
787 static use_iterator use_end() { return use_iterator(nullptr); }
788
789 inline iterator_range<use_iterator> uses() {
790 return make_range(use_begin(), use_end());
791 }
792 inline iterator_range<use_iterator> uses() const {
793 return make_range(use_begin(), use_end());
794 }
795
796 /// Return true if there are exactly NUSES uses of the indicated value.
797 /// This method ignores uses of other values defined by this operation.
798 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
799
800 /// Return true if there are any use of the indicated value.
801 /// This method ignores uses of other values defined by this operation.
802 bool hasAnyUseOfValue(unsigned Value) const;
803
804 /// Return true if this node is the only use of N.
805 bool isOnlyUserOf(const SDNode *N) const;
806
807 /// Return true if this node is an operand of N.
808 bool isOperandOf(const SDNode *N) const;
809
810 /// Return true if this node is a predecessor of N.
811 /// NOTE: Implemented on top of hasPredecessor and every bit as
812 /// expensive. Use carefully.
813 bool isPredecessorOf(const SDNode *N) const {
814 return N->hasPredecessor(this);
815 }
816
817 /// Return true if N is a predecessor of this node.
818 /// N is either an operand of this node, or can be reached by recursively
819 /// traversing up the operands.
820 /// NOTE: This is an expensive method. Use it carefully.
821 bool hasPredecessor(const SDNode *N) const;
822
823 /// Returns true if N is a predecessor of any node in Worklist. This
824 /// helper keeps Visited and Worklist sets externally to allow unions
825 /// searches to be performed in parallel, caching of results across
826 /// queries and incremental addition to Worklist. Stops early if N is
827 /// found but will resume. Remember to clear Visited and Worklists
828 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
829 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
830 /// topologically ordered (Operands have strictly smaller node id) and search
831 /// can be pruned leveraging this.
832 static bool hasPredecessorHelper(const SDNode *N,
833 SmallPtrSetImpl<const SDNode *> &Visited,
834 SmallVectorImpl<const SDNode *> &Worklist,
835 unsigned int MaxSteps = 0,
836 bool TopologicalPrune = false) {
837 SmallVector<const SDNode *, 8> DeferredNodes;
838 if (Visited.count(N))
839 return true;
840
841 // Node Id's are assigned in three places: As a topological
842 // ordering (> 0), during legalization (results in values set to
843 // 0), new nodes (set to -1). If N has a topolgical id then we
844 // know that all nodes with ids smaller than it cannot be
845 // successors and we need not check them. Filter out all node
846 // that can't be matches. We add them to the worklist before exit
847 // in case of multiple calls. Note that during selection the topological id
848 // may be violated if a node's predecessor is selected before it. We mark
849 // this at selection negating the id of unselected successors and
850 // restricting topological pruning to positive ids.
851
852 int NId = N->getNodeId();
853 // If we Invalidated the Id, reconstruct original NId.
854 if (NId < -1)
855 NId = -(NId + 1);
856
857 bool Found = false;
858 while (!Worklist.empty()) {
859 const SDNode *M = Worklist.pop_back_val();
860 int MId = M->getNodeId();
861 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
862 (MId > 0) && (MId < NId)) {
863 DeferredNodes.push_back(M);
864 continue;
865 }
866 for (const SDValue &OpV : M->op_values()) {
867 SDNode *Op = OpV.getNode();
868 if (Visited.insert(Op).second)
869 Worklist.push_back(Op);
870 if (Op == N)
871 Found = true;
872 }
873 if (Found)
874 break;
875 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
876 break;
877 }
878 // Push deferred nodes back on worklist.
879 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
880 // If we bailed early, conservatively return found.
881 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
882 return true;
883 return Found;
884 }
885
886 /// Return true if all the users of N are contained in Nodes.
887 /// NOTE: Requires at least one match, but doesn't require them all.
888 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
889
890 /// Return the number of values used by this operation.
891 unsigned getNumOperands() const { return NumOperands; }
892
893 /// Return the maximum number of operands that a SDNode can hold.
894 static constexpr size_t getMaxNumOperands() {
895 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
896 }
897
898 /// Helper method returns the integer value of a ConstantSDNode operand.
899 inline uint64_t getConstantOperandVal(unsigned Num) const;
900
901 /// Helper method returns the APInt of a ConstantSDNode operand.
902 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
903
904 const SDValue &getOperand(unsigned Num) const {
905 assert(Num < NumOperands && "Invalid child # of SDNode!")(static_cast <bool> (Num < NumOperands && "Invalid child # of SDNode!"
) ? void (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 905, __extension__ __PRETTY_FUNCTION__))
;
906 return OperandList[Num];
907 }
908
909 using op_iterator = SDUse *;
910
911 op_iterator op_begin() const { return OperandList; }
912 op_iterator op_end() const { return OperandList+NumOperands; }
913 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
914
915 /// Iterator for directly iterating over the operand SDValue's.
916 struct value_op_iterator
917 : iterator_adaptor_base<value_op_iterator, op_iterator,
918 std::random_access_iterator_tag, SDValue,
919 ptrdiff_t, value_op_iterator *,
920 value_op_iterator *> {
921 explicit value_op_iterator(SDUse *U = nullptr)
922 : iterator_adaptor_base(U) {}
923
924 const SDValue &operator*() const { return I->get(); }
925 };
926
927 iterator_range<value_op_iterator> op_values() const {
928 return make_range(value_op_iterator(op_begin()),
929 value_op_iterator(op_end()));
930 }
931
932 SDVTList getVTList() const {
933 SDVTList X = { ValueList, NumValues };
934 return X;
935 }
936
937 /// If this node has a glue operand, return the node
938 /// to which the glue operand points. Otherwise return NULL.
939 SDNode *getGluedNode() const {
940 if (getNumOperands() != 0 &&
941 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
942 return getOperand(getNumOperands()-1).getNode();
943 return nullptr;
944 }
945
946 /// If this node has a glue value with a user, return
947 /// the user (there is at most one). Otherwise return NULL.
948 SDNode *getGluedUser() const {
949 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
950 if (UI.getUse().get().getValueType() == MVT::Glue)
951 return *UI;
952 return nullptr;
953 }
954
955 SDNodeFlags getFlags() const { return Flags; }
956 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
957
958 /// Clear any flags in this node that aren't also set in Flags.
959 /// If Flags is not in a defined state then this has no effect.
960 void intersectFlagsWith(const SDNodeFlags Flags);
961
962 /// Return the number of values defined/returned by this operator.
963 unsigned getNumValues() const { return NumValues; }
964
965 /// Return the type of a specified result.
966 EVT getValueType(unsigned ResNo) const {
967 assert(ResNo < NumValues && "Illegal result number!")(static_cast <bool> (ResNo < NumValues && "Illegal result number!"
) ? void (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 967, __extension__ __PRETTY_FUNCTION__))
;
968 return ValueList[ResNo];
969 }
970
971 /// Return the type of a specified result as a simple type.
972 MVT getSimpleValueType(unsigned ResNo) const {
973 return getValueType(ResNo).getSimpleVT();
974 }
975
976 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
977 ///
978 /// If the value type is a scalable vector type, the scalable property will
979 /// be set and the runtime size will be a positive integer multiple of the
980 /// base size.
981 TypeSize getValueSizeInBits(unsigned ResNo) const {
982 return getValueType(ResNo).getSizeInBits();
983 }
984
985 using value_iterator = const EVT *;
986
987 value_iterator value_begin() const { return ValueList; }
988 value_iterator value_end() const { return ValueList+NumValues; }
989 iterator_range<value_iterator> values() const {
990 return llvm::make_range(value_begin(), value_end());
991 }
992
993 /// Return the opcode of this operation for printing.
994 std::string getOperationName(const SelectionDAG *G = nullptr) const;
995 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
996 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
997 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
998 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
999 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1000
1001 /// Print a SelectionDAG node and all children down to
1002 /// the leaves. The given SelectionDAG allows target-specific nodes
1003 /// to be printed in human-readable form. Unlike printr, this will
1004 /// print the whole DAG, including children that appear multiple
1005 /// times.
1006 ///
1007 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1008
1009 /// Print a SelectionDAG node and children up to
1010 /// depth "depth." The given SelectionDAG allows target-specific
1011 /// nodes to be printed in human-readable form. Unlike printr, this
1012 /// will print children that appear multiple times wherever they are
1013 /// used.
1014 ///
1015 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1016 unsigned depth = 100) const;
1017
1018 /// Dump this node, for debugging.
1019 void dump() const;
1020
1021 /// Dump (recursively) this node and its use-def subgraph.
1022 void dumpr() const;
1023
1024 /// Dump this node, for debugging.
1025 /// The given SelectionDAG allows target-specific nodes to be printed
1026 /// in human-readable form.
1027 void dump(const SelectionDAG *G) const;
1028
1029 /// Dump (recursively) this node and its use-def subgraph.
1030 /// The given SelectionDAG allows target-specific nodes to be printed
1031 /// in human-readable form.
1032 void dumpr(const SelectionDAG *G) const;
1033
1034 /// printrFull to dbgs(). The given SelectionDAG allows
1035 /// target-specific nodes to be printed in human-readable form.
1036 /// Unlike dumpr, this will print the whole DAG, including children
1037 /// that appear multiple times.
1038 void dumprFull(const SelectionDAG *G = nullptr) const;
1039
1040 /// printrWithDepth to dbgs(). The given
1041 /// SelectionDAG allows target-specific nodes to be printed in
1042 /// human-readable form. Unlike dumpr, this will print children
1043 /// that appear multiple times wherever they are used.
1044 ///
1045 void dumprWithDepth(const SelectionDAG *G = nullptr,
1046 unsigned depth = 100) const;
1047
1048 /// Gather unique data for the node.
1049 void Profile(FoldingSetNodeID &ID) const;
1050
1051 /// This method should only be used by the SDUse class.
1052 void addUse(SDUse &U) { U.addToList(&UseList); }
1053
1054protected:
1055 static SDVTList getSDVTList(EVT VT) {
1056 SDVTList Ret = { getValueTypeList(VT), 1 };
1057 return Ret;
1058 }
1059
1060 /// Create an SDNode.
1061 ///
1062 /// SDNodes are created without any operands, and never own the operand
1063 /// storage. To add operands, see SelectionDAG::createOperands.
1064 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1065 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1066 IROrder(Order), debugLoc(std::move(dl)) {
1067 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1068 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() &&
"Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1068, __extension__ __PRETTY_FUNCTION__))
;
1069 assert(NumValues == VTs.NumVTs &&(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1070, __extension__ __PRETTY_FUNCTION__))
1070 "NumValues wasn't wide enough for its operands!")(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1070, __extension__ __PRETTY_FUNCTION__))
;
1071 }
1072
1073 /// Release the operands and set this node to have zero operands.
1074 void DropOperands();
1075};
1076
1077/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1078/// into SDNode creation functions.
1079/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1080/// from the original Instruction, and IROrder is the ordinal position of
1081/// the instruction.
1082/// When an SDNode is created after the DAG is being built, both DebugLoc and
1083/// the IROrder are propagated from the original SDNode.
1084/// So SDLoc class provides two constructors besides the default one, one to
1085/// be used by the DAGBuilder, the other to be used by others.
1086class SDLoc {
1087private:
1088 DebugLoc DL;
1089 int IROrder = 0;
1090
1091public:
1092 SDLoc() = default;
1093 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1094 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1095 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1096 assert(Order >= 0 && "bad IROrder")(static_cast <bool> (Order >= 0 && "bad IROrder"
) ? void (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1096, __extension__ __PRETTY_FUNCTION__))
;
1097 if (I)
1098 DL = I->getDebugLoc();
1099 }
1100
1101 unsigned getIROrder() const { return IROrder; }
1102 const DebugLoc &getDebugLoc() const { return DL; }
1103};
1104
1105// Define inline functions from the SDValue class.
1106
1107inline SDValue::SDValue(SDNode *node, unsigned resno)
1108 : Node(node), ResNo(resno) {
1109 // Explicitly check for !ResNo to avoid use-after-free, because there are
1110 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1111 // combines.
1112 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1113, __extension__ __PRETTY_FUNCTION__))
1113 "Invalid result number for the given node!")(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1113, __extension__ __PRETTY_FUNCTION__))
;
1114 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")(static_cast <bool> (ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? void (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1114, __extension__ __PRETTY_FUNCTION__))
;
1115}
1116
1117inline unsigned SDValue::getOpcode() const {
1118 return Node->getOpcode();
1119}
1120
1121inline EVT SDValue::getValueType() const {
1122 return Node->getValueType(ResNo);
1123}
1124
1125inline unsigned SDValue::getNumOperands() const {
1126 return Node->getNumOperands();
1127}
1128
1129inline const SDValue &SDValue::getOperand(unsigned i) const {
1130 return Node->getOperand(i);
1131}
1132
1133inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1134 return Node->getConstantOperandVal(i);
1135}
1136
1137inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1138 return Node->getConstantOperandAPInt(i);
1139}
1140
1141inline bool SDValue::isTargetOpcode() const {
1142 return Node->isTargetOpcode();
1143}
1144
1145inline bool SDValue::isTargetMemoryOpcode() const {
1146 return Node->isTargetMemoryOpcode();
1147}
1148
1149inline bool SDValue::isMachineOpcode() const {
1150 return Node->isMachineOpcode();
1151}
1152
1153inline unsigned SDValue::getMachineOpcode() const {
1154 return Node->getMachineOpcode();
1155}
1156
1157inline bool SDValue::isUndef() const {
1158 return Node->isUndef();
1159}
1160
1161inline bool SDValue::use_empty() const {
1162 return !Node->hasAnyUseOfValue(ResNo);
1163}
1164
1165inline bool SDValue::hasOneUse() const {
1166 return Node->hasNUsesOfValue(1, ResNo);
1167}
1168
1169inline const DebugLoc &SDValue::getDebugLoc() const {
1170 return Node->getDebugLoc();
1171}
1172
1173inline void SDValue::dump() const {
1174 return Node->dump();
1175}
1176
1177inline void SDValue::dump(const SelectionDAG *G) const {
1178 return Node->dump(G);
1179}
1180
1181inline void SDValue::dumpr() const {
1182 return Node->dumpr();
1183}
1184
1185inline void SDValue::dumpr(const SelectionDAG *G) const {
1186 return Node->dumpr(G);
1187}
1188
1189// Define inline functions from the SDUse class.
1190
1191inline void SDUse::set(const SDValue &V) {
1192 if (Val.getNode()) removeFromList();
1193 Val = V;
1194 if (V.getNode()) V.getNode()->addUse(*this);
1195}
1196
1197inline void SDUse::setInitial(const SDValue &V) {
1198 Val = V;
1199 V.getNode()->addUse(*this);
1200}
1201
1202inline void SDUse::setNode(SDNode *N) {
1203 if (Val.getNode()) removeFromList();
1204 Val.setNode(N);
1205 if (N) N->addUse(*this);
1206}
1207
1208/// This class is used to form a handle around another node that
1209/// is persistent and is updated across invocations of replaceAllUsesWith on its
1210/// operand. This node should be directly created by end-users and not added to
1211/// the AllNodes list.
1212class HandleSDNode : public SDNode {
1213 SDUse Op;
1214
1215public:
1216 explicit HandleSDNode(SDValue X)
1217 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1218 // HandleSDNodes are never inserted into the DAG, so they won't be
1219 // auto-numbered. Use ID 65535 as a sentinel.
1220 PersistentId = 0xffff;
1221
1222 // Manually set up the operand list. This node type is special in that it's
1223 // always stack allocated and SelectionDAG does not manage its operands.
1224 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1225 // be so special.
1226 Op.setUser(this);
1227 Op.setInitial(X);
1228 NumOperands = 1;
1229 OperandList = &Op;
1230 }
1231 ~HandleSDNode();
1232
1233 const SDValue &getValue() const { return Op; }
1234};
1235
1236class AddrSpaceCastSDNode : public SDNode {
1237private:
1238 unsigned SrcAddrSpace;
1239 unsigned DestAddrSpace;
1240
1241public:
1242 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1243 unsigned SrcAS, unsigned DestAS);
1244
1245 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1246 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1247
1248 static bool classof(const SDNode *N) {
1249 return N->getOpcode() == ISD::ADDRSPACECAST;
1250 }
1251};
1252
1253/// This is an abstract virtual class for memory operations.
1254class MemSDNode : public SDNode {
1255private:
1256 // VT of in-memory value.
1257 EVT MemoryVT;
1258
1259protected:
1260 /// Memory reference information.
1261 MachineMemOperand *MMO;
1262
1263public:
1264 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1265 EVT memvt, MachineMemOperand *MMO);
1266
1267 bool readMem() const { return MMO->isLoad(); }
1268 bool writeMem() const { return MMO->isStore(); }
1269
1270 /// Returns alignment and volatility of the memory access
1271 Align getOriginalAlign() const { return MMO->getBaseAlign(); }
1272 Align getAlign() const { return MMO->getAlign(); }
1273 // FIXME: Remove once transition to getAlign is over.
1274 unsigned getAlignment() const { return MMO->getAlign().value(); }
1275
1276 /// Return the SubclassData value, without HasDebugValue. This contains an
1277 /// encoding of the volatile flag, as well as bits used by subclasses. This
1278 /// function should only be used to compute a FoldingSetNodeID value.
1279 /// The HasDebugValue bit is masked out because CSE map needs to match
1280 /// nodes with debug info with nodes without debug info. Same is about
1281 /// isDivergent bit.
1282 unsigned getRawSubclassData() const {
1283 uint16_t Data;
1284 union {
1285 char RawSDNodeBits[sizeof(uint16_t)];
1286 SDNodeBitfields SDNodeBits;
1287 };
1288 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1289 SDNodeBits.HasDebugValue = 0;
1290 SDNodeBits.IsDivergent = false;
1291 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1292 return Data;
1293 }
1294
1295 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1296 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1297 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1298 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1299
1300 // Returns the offset from the location of the access.
1301 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1302
1303 /// Returns the AA info that describes the dereference.
1304 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1305
1306 /// Returns the Ranges that describes the dereference.
1307 const MDNode *getRanges() const { return MMO->getRanges(); }
1308
1309 /// Returns the synchronization scope ID for this memory operation.
1310 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1311
1312 /// Return the atomic ordering requirements for this memory operation. For
1313 /// cmpxchg atomic operations, return the atomic ordering requirements when
1314 /// store occurs.
1315 AtomicOrdering getSuccessOrdering() const {
1316 return MMO->getSuccessOrdering();
1317 }
1318
1319 /// Return a single atomic ordering that is at least as strong as both the
1320 /// success and failure orderings for an atomic operation. (For operations
1321 /// other than cmpxchg, this is equivalent to getSuccessOrdering().)
1322 AtomicOrdering getMergedOrdering() const { return MMO->getMergedOrdering(); }
1323
1324 /// Return true if the memory operation ordering is Unordered or higher.
1325 bool isAtomic() const { return MMO->isAtomic(); }
1326
1327 /// Returns true if the memory operation doesn't imply any ordering
1328 /// constraints on surrounding memory operations beyond the normal memory
1329 /// aliasing rules.
1330 bool isUnordered() const { return MMO->isUnordered(); }
1331
1332 /// Returns true if the memory operation is neither atomic or volatile.
1333 bool isSimple() const { return !isAtomic() && !isVolatile(); }
1334
1335 /// Return the type of the in-memory value.
1336 EVT getMemoryVT() const { return MemoryVT; }
1337
1338 /// Return a MachineMemOperand object describing the memory
1339 /// reference performed by operation.
1340 MachineMemOperand *getMemOperand() const { return MMO; }
1341
1342 const MachinePointerInfo &getPointerInfo() const {
1343 return MMO->getPointerInfo();
1344 }
1345
1346 /// Return the address space for the associated pointer
1347 unsigned getAddressSpace() const {
1348 return getPointerInfo().getAddrSpace();
1349 }
1350
1351 /// Update this MemSDNode's MachineMemOperand information
1352 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1353 /// This must only be used when the new alignment applies to all users of
1354 /// this MachineMemOperand.
1355 void refineAlignment(const MachineMemOperand *NewMMO) {
1356 MMO->refineAlignment(NewMMO);
1357 }
1358
1359 const SDValue &getChain() const { return getOperand(0); }
1360
1361 const SDValue &getBasePtr() const {
1362 switch (getOpcode()) {
1363 case ISD::STORE:
1364 case ISD::VP_STORE:
1365 case ISD::MSTORE:
1366 case ISD::VP_SCATTER:
1367 return getOperand(2);
1368 case ISD::MGATHER:
1369 case ISD::MSCATTER:
1370 return getOperand(3);
1371 default:
1372 return getOperand(1);
1373 }
1374 }
1375
1376 // Methods to support isa and dyn_cast
1377 static bool classof(const SDNode *N) {
1378 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1379 // with either an intrinsic or a target opcode.
1380 switch (N->getOpcode()) {
1381 case ISD::LOAD:
1382 case ISD::STORE:
1383 case ISD::PREFETCH:
1384 case ISD::ATOMIC_CMP_SWAP:
1385 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
1386 case ISD::ATOMIC_SWAP:
1387 case ISD::ATOMIC_LOAD_ADD:
1388 case ISD::ATOMIC_LOAD_SUB:
1389 case ISD::ATOMIC_LOAD_AND:
1390 case ISD::ATOMIC_LOAD_CLR:
1391 case ISD::ATOMIC_LOAD_OR:
1392 case ISD::ATOMIC_LOAD_XOR:
1393 case ISD::ATOMIC_LOAD_NAND:
1394 case ISD::ATOMIC_LOAD_MIN:
1395 case ISD::ATOMIC_LOAD_MAX:
1396 case ISD::ATOMIC_LOAD_UMIN:
1397 case ISD::ATOMIC_LOAD_UMAX:
1398 case ISD::ATOMIC_LOAD_FADD:
1399 case ISD::ATOMIC_LOAD_FSUB:
1400 case ISD::ATOMIC_LOAD:
1401 case ISD::ATOMIC_STORE:
1402 case ISD::MLOAD:
1403 case ISD::MSTORE:
1404 case ISD::MGATHER:
1405 case ISD::MSCATTER:
1406 case ISD::VP_LOAD:
1407 case ISD::VP_STORE:
1408 case ISD::VP_GATHER:
1409 case ISD::VP_SCATTER:
1410 return true;
1411 default:
1412 return N->isMemIntrinsic() || N->isTargetMemoryOpcode();
1413 }
1414 }
1415};
1416
1417/// This is an SDNode representing atomic operations.
1418class AtomicSDNode : public MemSDNode {
1419public:
1420 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1421 EVT MemVT, MachineMemOperand *MMO)
1422 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1423 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD &&
Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1424, __extension__ __PRETTY_FUNCTION__))
1424 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")(static_cast <bool> (((Opc != ISD::ATOMIC_LOAD &&
Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? void (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1424, __extension__ __PRETTY_FUNCTION__))
;
1425 }
1426
1427 const SDValue &getBasePtr() const { return getOperand(1); }
1428 const SDValue &getVal() const { return getOperand(2); }
1429
1430 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1431 /// otherwise.
1432 bool isCompareAndSwap() const {
1433 unsigned Op = getOpcode();
1434 return Op == ISD::ATOMIC_CMP_SWAP ||
1435 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1436 }
1437
1438 /// For cmpxchg atomic operations, return the atomic ordering requirements
1439 /// when store does not occur.
1440 AtomicOrdering getFailureOrdering() const {
1441 assert(isCompareAndSwap() && "Must be cmpxchg operation")(static_cast <bool> (isCompareAndSwap() && "Must be cmpxchg operation"
) ? void (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1441, __extension__ __PRETTY_FUNCTION__))
;
1442 return MMO->getFailureOrdering();
1443 }
1444
1445 // Methods to support isa and dyn_cast
1446 static bool classof(const SDNode *N) {
1447 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1448 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1449 N->getOpcode() == ISD::ATOMIC_SWAP ||
1450 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1451 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1452 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1453 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1454 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1455 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1456 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1457 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1458 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1459 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1460 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1461 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1462 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1463 N->getOpcode() == ISD::ATOMIC_LOAD ||
1464 N->getOpcode() == ISD::ATOMIC_STORE;
1465 }
1466};
1467
1468/// This SDNode is used for target intrinsics that touch
1469/// memory and need an associated MachineMemOperand. Its opcode may be
1470/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1471/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1472class MemIntrinsicSDNode : public MemSDNode {
1473public:
1474 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1475 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1476 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1477 SDNodeBits.IsMemIntrinsic = true;
1478 }
1479
1480 // Methods to support isa and dyn_cast
1481 static bool classof(const SDNode *N) {
1482 // We lower some target intrinsics to their target opcode
1483 // early a node with a target opcode can be of this class
1484 return N->isMemIntrinsic() ||
1485 N->getOpcode() == ISD::PREFETCH ||
1486 N->isTargetMemoryOpcode();
1487 }
1488};
1489
1490/// This SDNode is used to implement the code generator
1491/// support for the llvm IR shufflevector instruction. It combines elements
1492/// from two input vectors into a new input vector, with the selection and
1493/// ordering of elements determined by an array of integers, referred to as
1494/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1495/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1496/// An index of -1 is treated as undef, such that the code generator may put
1497/// any value in the corresponding element of the result.
1498class ShuffleVectorSDNode : public SDNode {
1499 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1500 // is freed when the SelectionDAG object is destroyed.
1501 const int *Mask;
1502
1503protected:
1504 friend class SelectionDAG;
1505
1506 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1507 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1508
1509public:
1510 ArrayRef<int> getMask() const {
1511 EVT VT = getValueType(0);
1512 return makeArrayRef(Mask, VT.getVectorNumElements());
1513 }
1514
1515 int getMaskElt(unsigned Idx) const {
1516 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")(static_cast <bool> (Idx < getValueType(0).getVectorNumElements
() && "Idx out of range!") ? void (0) : __assert_fail
("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1516, __extension__ __PRETTY_FUNCTION__))
;
1517 return Mask[Idx];
1518 }
1519
1520 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1521
1522 int getSplatIndex() const {
1523 assert(isSplat() && "Cannot get splat index for non-splat!")(static_cast <bool> (isSplat() && "Cannot get splat index for non-splat!"
) ? void (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1523, __extension__ __PRETTY_FUNCTION__))
;
1524 EVT VT = getValueType(0);
1525 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1526 if (Mask[i] >= 0)
1527 return Mask[i];
1528
1529 // We can choose any index value here and be correct because all elements
1530 // are undefined. Return 0 for better potential for callers to simplify.
1531 return 0;
1532 }
1533
1534 static bool isSplatMask(const int *Mask, EVT VT);
1535
1536 /// Change values in a shuffle permute mask assuming
1537 /// the two vector operands have swapped position.
1538 static void commuteMask(MutableArrayRef<int> Mask) {
1539 unsigned NumElems = Mask.size();
1540 for (unsigned i = 0; i != NumElems; ++i) {
1541 int idx = Mask[i];
1542 if (idx < 0)
1543 continue;
1544 else if (idx < (int)NumElems)
1545 Mask[i] = idx + NumElems;
1546 else
1547 Mask[i] = idx - NumElems;
1548 }
1549 }
1550
1551 static bool classof(const SDNode *N) {
1552 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1553 }
1554};
1555
1556class ConstantSDNode : public SDNode {
1557 friend class SelectionDAG;
1558
1559 const ConstantInt *Value;
1560
1561 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1562 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1563 getSDVTList(VT)),
1564 Value(val) {
1565 ConstantSDNodeBits.IsOpaque = isOpaque;
1566 }
1567
1568public:
1569 const ConstantInt *getConstantIntValue() const { return Value; }
1570 const APInt &getAPIntValue() const { return Value->getValue(); }
1571 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1572 int64_t getSExtValue() const { return Value->getSExtValue(); }
1573 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1574 return Value->getLimitedValue(Limit);
1575 }
1576 MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); }
1577 Align getAlignValue() const { return Value->getAlignValue(); }
1578
1579 bool isOne() const { return Value->isOne(); }
1580 bool isZero() const { return Value->isZero(); }
1581 // NOTE: This is soft-deprecated. Please use `isZero()` instead.
1582 bool isNullValue() const { return isZero(); }
1583 bool isAllOnes() const { return Value->isMinusOne(); }
1584 // NOTE: This is soft-deprecated. Please use `isAllOnes()` instead.
1585 bool isAllOnesValue() const { return isAllOnes(); }
1586 bool isMaxSignedValue() const { return Value->isMaxValue(true); }
1587 bool isMinSignedValue() const { return Value->isMinValue(true); }
1588
1589 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1590
1591 static bool classof(const SDNode *N) {
1592 return N->getOpcode() == ISD::Constant ||
1593 N->getOpcode() == ISD::TargetConstant;
1594 }
1595};
1596
1597uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1598 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1599}
1600
1601const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1602 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1603}
1604
1605class ConstantFPSDNode : public SDNode {
1606 friend class SelectionDAG;
1607
1608 const ConstantFP *Value;
1609
1610 ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
1611 : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
1612 DebugLoc(), getSDVTList(VT)),
1613 Value(val) {}
1614
1615public:
1616 const APFloat& getValueAPF() const { return Value->getValueAPF(); }
1617 const ConstantFP *getConstantFPValue() const { return Value; }
1618
1619 /// Return true if the value is positive or negative zero.
1620 bool isZero() const { return Value->isZero(); }
1621
1622 /// Return true if the value is a NaN.
1623 bool isNaN() const { return Value->isNaN(); }
1624
1625 /// Return true if the value is an infinity
1626 bool isInfinity() const { return Value->isInfinity(); }
1627
1628 /// Return true if the value is negative.
1629 bool isNegative() const { return Value->isNegative(); }
1630
1631 /// We don't rely on operator== working on double values, as
1632 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
1633 /// As such, this method can be used to do an exact bit-for-bit comparison of
1634 /// two floating point values.
1635
1636 /// We leave the version with the double argument here because it's just so
1637 /// convenient to write "2.0" and the like. Without this function we'd
1638 /// have to duplicate its logic everywhere it's called.
1639 bool isExactlyValue(double V) const {
1640 return Value->getValueAPF().isExactlyValue(V);
1641 }
1642 bool isExactlyValue(const APFloat& V) const;
1643
1644 static bool isValueValidForType(EVT VT, const APFloat& Val);
1645
1646 static bool classof(const SDNode *N) {
1647 return N->getOpcode() == ISD::ConstantFP ||
1648 N->getOpcode() == ISD::TargetConstantFP;
1649 }
1650};
1651
1652/// Returns true if \p V is a constant integer zero.
1653bool isNullConstant(SDValue V);
1654
1655/// Returns true if \p V is an FP constant with a value of positive zero.
1656bool isNullFPConstant(SDValue V);
1657
1658/// Returns true if \p V is an integer constant with all bits set.
1659bool isAllOnesConstant(SDValue V);
1660
1661/// Returns true if \p V is a constant integer one.
1662bool isOneConstant(SDValue V);
1663
1664/// Return the non-bitcasted source operand of \p V if it exists.
1665/// If \p V is not a bitcasted value, it is returned as-is.
1666SDValue peekThroughBitcasts(SDValue V);
1667
1668/// Return the non-bitcasted and one-use source operand of \p V if it exists.
1669/// If \p V is not a bitcasted one-use value, it is returned as-is.
1670SDValue peekThroughOneUseBitcasts(SDValue V);
1671
1672/// Return the non-extracted vector source operand of \p V if it exists.
1673/// If \p V is not an extracted subvector, it is returned as-is.
1674SDValue peekThroughExtractSubvectors(SDValue V);
1675
1676/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
1677/// constant is canonicalized to be operand 1.
1678bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
1679
1680/// Returns the SDNode if it is a constant splat BuildVector or constant int.
1681ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
1682 bool AllowTruncation = false);
1683
1684/// Returns the SDNode if it is a demanded constant splat BuildVector or
1685/// constant int.
1686ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
1687 bool AllowUndefs = false,
1688 bool AllowTruncation = false);
1689
1690/// Returns the SDNode if it is a constant splat BuildVector or constant float.
1691ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
1692
1693/// Returns the SDNode if it is a demanded constant splat BuildVector or
1694/// constant float.
1695ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
1696 bool AllowUndefs = false);
1697
1698/// Return true if the value is a constant 0 integer or a splatted vector of
1699/// a constant 0 integer (with no undefs by default).
1700/// Build vector implicit truncation is not an issue for null values.
1701bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);
1702
1703/// Return true if the value is a constant 1 integer or a splatted vector of a
1704/// constant 1 integer (with no undefs).
1705/// Does not permit build vector implicit truncation.
1706bool isOneOrOneSplat(SDValue V, bool AllowUndefs = false);
1707
1708/// Return true if the value is a constant -1 integer or a splatted vector of a
1709/// constant -1 integer (with no undefs).
1710/// Does not permit build vector implicit truncation.
1711bool isAllOnesOrAllOnesSplat(SDValue V, bool AllowUndefs = false);
1712
1713/// Return true if \p V is either a integer or FP constant.
1714inline bool isIntOrFPConstant(SDValue V) {
1715 return isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V);
1716}
1717
1718class GlobalAddressSDNode : public SDNode {
1719 friend class SelectionDAG;
1720
1721 const GlobalValue *TheGlobal;
1722 int64_t Offset;
1723 unsigned TargetFlags;
1724
1725 GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
1726 const GlobalValue *GA, EVT VT, int64_t o,
1727 unsigned TF);
1728
1729public:
1730 const GlobalValue *getGlobal() const { return TheGlobal; }
1731 int64_t getOffset() const { return Offset; }
1732 unsigned getTargetFlags() const { return TargetFlags; }
1733 // Return the address space this GlobalAddress belongs to.
1734 unsigned getAddressSpace() const;
1735
1736 static bool classof(const SDNode *N) {
1737 return N->getOpcode() == ISD::GlobalAddress ||
1738 N->getOpcode() == ISD::TargetGlobalAddress ||
1739 N->getOpcode() == ISD::GlobalTLSAddress ||
1740 N->getOpcode() == ISD::TargetGlobalTLSAddress;
1741 }
1742};
1743
1744class FrameIndexSDNode : public SDNode {
1745 friend class SelectionDAG;
1746
1747 int FI;
1748
1749 FrameIndexSDNode(int fi, EVT VT, bool isTarg)
1750 : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
1751 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
1752 }
1753
1754public:
1755 int getIndex() const { return FI; }
1756
1757 static bool classof(const SDNode *N) {
1758 return N->getOpcode() == ISD::FrameIndex ||
1759 N->getOpcode() == ISD::TargetFrameIndex;
1760 }
1761};
1762
1763/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
1764/// the offet and size that are started/ended in the underlying FrameIndex.
1765class LifetimeSDNode : public SDNode {
1766 friend class SelectionDAG;
1767 int64_t Size;
1768 int64_t Offset; // -1 if offset is unknown.
1769
1770 LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1771 SDVTList VTs, int64_t Size, int64_t Offset)
1772 : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
1773public:
1774 int64_t getFrameIndex() const {
1775 return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
1776 }
1777
1778 bool hasOffset() const { return Offset >= 0; }
1779 int64_t getOffset() const {
1780 assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown"
) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1780, __extension__ __PRETTY_FUNCTION__))
;
1781 return Offset;
1782 }
1783 int64_t getSize() const {
1784 assert(hasOffset() && "offset is unknown")(static_cast <bool> (hasOffset() && "offset is unknown"
) ? void (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1784, __extension__ __PRETTY_FUNCTION__))
;
1785 return Size;
1786 }
1787
1788 // Methods to support isa and dyn_cast
1789 static bool classof(const SDNode *N) {
1790 return N->getOpcode() == ISD::LIFETIME_START ||
1791 N->getOpcode() == ISD::LIFETIME_END;
1792 }
1793};
1794
1795/// This SDNode is used for PSEUDO_PROBE values, which are the function guid and
1796/// the index of the basic block being probed. A pseudo probe serves as a place
1797/// holder and will be removed at the end of compilation. It does not have any
1798/// operand because we do not want the instruction selection to deal with any.
1799class PseudoProbeSDNode : public SDNode {
1800 friend class SelectionDAG;
1801 uint64_t Guid;
1802 uint64_t Index;
1803 uint32_t Attributes;
1804
1805 PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl,
1806 SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr)
1807 : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index),
1808 Attributes(Attr) {}
1809
1810public:
1811 uint64_t getGuid() const { return Guid; }
1812 uint64_t getIndex() const { return Index; }
1813 uint32_t getAttributes() const { return Attributes; }
1814
1815 // Methods to support isa and dyn_cast
1816 static bool classof(const SDNode *N) {
1817 return N->getOpcode() == ISD::PSEUDO_PROBE;
1818 }
1819};
1820
1821class JumpTableSDNode : public SDNode {
1822 friend class SelectionDAG;
1823
1824 int JTI;
1825 unsigned TargetFlags;
1826
1827 JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
1828 : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
1829 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
1830 }
1831
1832public:
1833 int getIndex() const { return JTI; }
1834 unsigned getTargetFlags() const { return TargetFlags; }
1835
1836 static bool classof(const SDNode *N) {
1837 return N->getOpcode() == ISD::JumpTable ||
1838 N->getOpcode() == ISD::TargetJumpTable;
1839 }
1840};
1841
1842class ConstantPoolSDNode : public SDNode {
1843 friend class SelectionDAG;
1844
1845 union {
1846 const Constant *ConstVal;
1847 MachineConstantPoolValue *MachineCPVal;
1848 } Val;
1849 int Offset; // It's a MachineConstantPoolValue if top bit is set.
1850 Align Alignment; // Minimum alignment requirement of CP.
1851 unsigned TargetFlags;
1852
1853 ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
1854 Align Alignment, unsigned TF)
1855 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1856 DebugLoc(), getSDVTList(VT)),
1857 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1858 assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1858, __extension__ __PRETTY_FUNCTION__))
;
1859 Val.ConstVal = c;
1860 }
1861
1862 ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, EVT VT, int o,
1863 Align Alignment, unsigned TF)
1864 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1865 DebugLoc(), getSDVTList(VT)),
1866 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1867 assert(Offset >= 0 && "Offset is too large")(static_cast <bool> (Offset >= 0 && "Offset is too large"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1867, __extension__ __PRETTY_FUNCTION__))
;
1868 Val.MachineCPVal = v;
1869 Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1);
1870 }
1871
1872public:
1873 bool isMachineConstantPoolEntry() const {
1874 return Offset < 0;
1875 }
1876
1877 const Constant *getConstVal() const {
1878 assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (!isMachineConstantPoolEntry() &&
"Wrong constantpool type") ? void (0) : __assert_fail ("!isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1878, __extension__ __PRETTY_FUNCTION__))
;
1879 return Val.ConstVal;
1880 }
1881
1882 MachineConstantPoolValue *getMachineCPVal() const {
1883 assert(isMachineConstantPoolEntry() && "Wrong constantpool type")(static_cast <bool> (isMachineConstantPoolEntry() &&
"Wrong constantpool type") ? void (0) : __assert_fail ("isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1883, __extension__ __PRETTY_FUNCTION__))
;
1884 return Val.MachineCPVal;
1885 }
1886
1887 int getOffset() const {
1888 return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1));
1889 }
1890
1891 // Return the alignment of this constant pool object, which is either 0 (for
1892 // default alignment) or the desired value.
1893 Align getAlign() const { return Alignment; }
1894 unsigned getTargetFlags() const { return TargetFlags; }
1895
1896 Type *getType() const;
1897
1898 static bool classof(const SDNode *N) {
1899 return N->getOpcode() == ISD::ConstantPool ||
1900 N->getOpcode() == ISD::TargetConstantPool;
1901 }
1902};
1903
1904/// Completely target-dependent object reference.
1905class TargetIndexSDNode : public SDNode {
1906 friend class SelectionDAG;
1907
1908 unsigned TargetFlags;
1909 int Index;
1910 int64_t Offset;
1911
1912public:
1913 TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF)
1914 : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
1915 TargetFlags(TF), Index(Idx), Offset(Ofs) {}
1916
1917 unsigned getTargetFlags() const { return TargetFlags; }
1918 int getIndex() const { return Index; }
1919 int64_t getOffset() const { return Offset; }
1920
1921 static bool classof(const SDNode *N) {
1922 return N->getOpcode() == ISD::TargetIndex;
1923 }
1924};
1925
1926class BasicBlockSDNode : public SDNode {
1927 friend class SelectionDAG;
1928
1929 MachineBasicBlock *MBB;
1930
1931 /// Debug info is meaningful and potentially useful here, but we create
1932 /// blocks out of order when they're jumped to, which makes it a bit
1933 /// harder. Let's see if we need it first.
1934 explicit BasicBlockSDNode(MachineBasicBlock *mbb)
1935 : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
1936 {}
1937
1938public:
1939 MachineBasicBlock *getBasicBlock() const { return MBB; }
1940
1941 static bool classof(const SDNode *N) {
1942 return N->getOpcode() == ISD::BasicBlock;
1943 }
1944};
1945
1946/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
1947class BuildVectorSDNode : public SDNode {
1948public:
1949 // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
1950 explicit BuildVectorSDNode() = delete;
1951
1952 /// Check if this is a constant splat, and if so, find the
1953 /// smallest element size that splats the vector. If MinSplatBits is
1954 /// nonzero, the element size must be at least that large. Note that the
1955 /// splat element may be the entire vector (i.e., a one element vector).
1956 /// Returns the splat element value in SplatValue. Any undefined bits in
1957 /// that value are zero, and the corresponding bits in the SplatUndef mask
1958 /// are set. The SplatBitSize value is set to the splat element size in
1959 /// bits. HasAnyUndefs is set to true if any bits in the vector are
1960 /// undefined. isBigEndian describes the endianness of the target.
1961 bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
1962 unsigned &SplatBitSize, bool &HasAnyUndefs,
1963 unsigned MinSplatBits = 0,
1964 bool isBigEndian = false) const;
1965
1966 /// Returns the demanded splatted value or a null value if this is not a
1967 /// splat.
1968 ///
1969 /// The DemandedElts mask indicates the elements that must be in the splat.
1970 /// If passed a non-null UndefElements bitvector, it will resize it to match
1971 /// the vector width and set the bits where elements are undef.
1972 SDValue getSplatValue(const APInt &DemandedElts,
1973 BitVector *UndefElements = nullptr) const;
1974
1975 /// Returns the splatted value or a null value if this is not a splat.
1976 ///
1977 /// If passed a non-null UndefElements bitvector, it will resize it to match
1978 /// the vector width and set the bits where elements are undef.
1979 SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
1980
1981 /// Find the shortest repeating sequence of values in the build vector.
1982 ///
1983 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
1984 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
1985 ///
1986 /// Currently this must be a power-of-2 build vector.
1987 /// The DemandedElts mask indicates the elements that must be present,
1988 /// undemanded elements in Sequence may be null (SDValue()). If passed a
1989 /// non-null UndefElements bitvector, it will resize it to match the original
1990 /// vector width and set the bits where elements are undef. If result is
1991 /// false, Sequence will be empty.
1992 bool getRepeatedSequence(const APInt &DemandedElts,
1993 SmallVectorImpl<SDValue> &Sequence,
1994 BitVector *UndefElements = nullptr) const;
1995
1996 /// Find the shortest repeating sequence of values in the build vector.
1997 ///
1998 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
1999 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
2000 ///
2001 /// Currently this must be a power-of-2 build vector.
2002 /// If passed a non-null UndefElements bitvector, it will resize it to match
2003 /// the original vector width and set the bits where elements are undef.
2004 /// If result is false, Sequence will be empty.
2005 bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
2006 BitVector *UndefElements = nullptr) const;
2007
2008 /// Returns the demanded splatted constant or null if this is not a constant
2009 /// splat.
2010 ///
2011 /// The DemandedElts mask indicates the elements that must be in the splat.
2012 /// If passed a non-null UndefElements bitvector, it will resize it to match
2013 /// the vector width and set the bits where elements are undef.
2014 ConstantSDNode *
2015 getConstantSplatNode(const APInt &DemandedElts,
2016 BitVector *UndefElements = nullptr) const;
2017
2018 /// Returns the splatted constant or null if this is not a constant
2019 /// splat.
2020 ///
2021 /// If passed a non-null UndefElements bitvector, it will resize it to match
2022 /// the vector width and set the bits where elements are undef.
2023 ConstantSDNode *
2024 getConstantSplatNode(BitVector *UndefElements = nullptr) const;
2025
2026 /// Returns the demanded splatted constant FP or null if this is not a
2027 /// constant FP splat.
2028 ///
2029 /// The DemandedElts mask indicates the elements that must be in the splat.
2030 /// If passed a non-null UndefElements bitvector, it will resize it to match
2031 /// the vector width and set the bits where elements are undef.
2032 ConstantFPSDNode *
2033 getConstantFPSplatNode(const APInt &DemandedElts,
2034 BitVector *UndefElements = nullptr) const;
2035
2036 /// Returns the splatted constant FP or null if this is not a constant
2037 /// FP splat.
2038 ///
2039 /// If passed a non-null UndefElements bitvector, it will resize it to match
2040 /// the vector width and set the bits where elements are undef.
2041 ConstantFPSDNode *
2042 getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
2043
2044 /// If this is a constant FP splat and the splatted constant FP is an
2045 /// exact power or 2, return the log base 2 integer value. Otherwise,
2046 /// return -1.
2047 ///
2048 /// The BitWidth specifies the necessary bit precision.
2049 int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
2050 uint32_t BitWidth) const;
2051
2052 /// Extract the raw bit data from a build vector of Undef, Constant or
2053 /// ConstantFP node elements. Each raw bit element will be \p
2054 /// DstEltSizeInBits wide, undef elements are treated as zero, and entirely
2055 /// undefined elements are flagged in \p UndefElements.
2056 bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits,
2057 SmallVectorImpl<APInt> &RawBitElements,
2058 BitVector &UndefElements) const;
2059
2060 bool isConstant() const;
2061
2062 static bool classof(const SDNode *N) {
2063 return N->getOpcode() == ISD::BUILD_VECTOR;
2064 }
2065};
2066
2067/// An SDNode that holds an arbitrary LLVM IR Value. This is
2068/// used when the SelectionDAG needs to make a simple reference to something
2069/// in the LLVM IR representation.
2070///
2071class SrcValueSDNode : public SDNode {
2072 friend class SelectionDAG;
2073
2074 const Value *V;
2075
2076 /// Create a SrcValue for a general value.
2077 explicit SrcValueSDNode(const Value *v)
2078 : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
2079
2080public:
2081 /// Return the contained Value.
2082 const Value *getValue() const { return V; }
2083
2084 static bool classof(const SDNode *N) {
2085 return N->getOpcode() == ISD::SRCVALUE;
2086 }
2087};
2088
2089class MDNodeSDNode : public SDNode {
2090 friend class SelectionDAG;
2091
2092 const MDNode *MD;
2093
2094 explicit MDNodeSDNode(const MDNode *md)
2095 : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
2096 {}
2097
2098public:
2099 const MDNode *getMD() const { return MD; }
2100
2101 static bool classof(const SDNode *N) {
2102 return N->getOpcode() == ISD::MDNODE_SDNODE;
2103 }
2104};
2105
2106class RegisterSDNode : public SDNode {
2107 friend class SelectionDAG;
2108
2109 Register Reg;
2110
2111 RegisterSDNode(Register reg, EVT VT)
2112 : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
2113
2114public:
2115 Register getReg() const { return Reg; }
2116
2117 static bool classof(const SDNode *N) {
2118 return N->getOpcode() == ISD::Register;
2119 }
2120};
2121
2122class RegisterMaskSDNode : public SDNode {
2123 friend class SelectionDAG;
2124
2125 // The memory for RegMask is not owned by the node.
2126 const uint32_t *RegMask;
2127
2128 RegisterMaskSDNode(const uint32_t *mask)
2129 : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
2130 RegMask(mask) {}
2131
2132public:
2133 const uint32_t *getRegMask() const { return RegMask; }
2134
2135 static bool classof(const SDNode *N) {
2136 return N->getOpcode() == ISD::RegisterMask;
2137 }
2138};
2139
2140class BlockAddressSDNode : public SDNode {
2141 friend class SelectionDAG;
2142
2143 const BlockAddress *BA;
2144 int64_t Offset;
2145 unsigned TargetFlags;
2146
2147 BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
2148 int64_t o, unsigned Flags)
2149 : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
2150 BA(ba), Offset(o), TargetFlags(Flags) {}
2151
2152public:
2153 const BlockAddress *getBlockAddress() const { return BA; }
2154 int64_t getOffset() const { return Offset; }
2155 unsigned getTargetFlags() const { return TargetFlags; }
2156
2157 static bool classof(const SDNode *N) {
2158 return N->getOpcode() == ISD::BlockAddress ||
2159 N->getOpcode() == ISD::TargetBlockAddress;
2160 }
2161};
2162
2163class LabelSDNode : public SDNode {
2164 friend class SelectionDAG;
2165
2166 MCSymbol *Label;
2167
2168 LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L)
2169 : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) {
2170 assert(LabelSDNode::classof(this) && "not a label opcode")(static_cast <bool> (LabelSDNode::classof(this) &&
"not a label opcode") ? void (0) : __assert_fail ("LabelSDNode::classof(this) && \"not a label opcode\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2170, __extension__ __PRETTY_FUNCTION__))
;
2171 }
2172
2173public:
2174 MCSymbol *getLabel() const { return Label; }
2175
2176 static bool classof(const SDNode *N) {
2177 return N->getOpcode() == ISD::EH_LABEL ||
2178 N->getOpcode() == ISD::ANNOTATION_LABEL;
2179 }
2180};
2181
2182class ExternalSymbolSDNode : public SDNode {
2183 friend class SelectionDAG;
2184
2185 const char *Symbol;
2186 unsigned TargetFlags;
2187
2188 ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT)
2189 : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0,
2190 DebugLoc(), getSDVTList(VT)),
2191 Symbol(Sym), TargetFlags(TF) {}
2192
2193public:
2194 const char *getSymbol() const { return Symbol; }
2195 unsigned getTargetFlags() const { return TargetFlags; }
2196
2197 static bool classof(const SDNode *N) {
2198 return N->getOpcode() == ISD::ExternalSymbol ||
2199 N->getOpcode() == ISD::TargetExternalSymbol;
2200 }
2201};
2202
2203class MCSymbolSDNode : public SDNode {
2204 friend class SelectionDAG;
2205
2206 MCSymbol *Symbol;
2207
2208 MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
2209 : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
2210
2211public:
2212 MCSymbol *getMCSymbol() const { return Symbol; }
2213
2214 static bool classof(const SDNode *N) {
2215 return N->getOpcode() == ISD::MCSymbol;
2216 }
2217};
2218
2219class CondCodeSDNode : public SDNode {
2220 friend class SelectionDAG;
2221
2222 ISD::CondCode Condition;
2223
2224 explicit CondCodeSDNode(ISD::CondCode Cond)
2225 : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2226 Condition(Cond) {}
2227
2228public:
2229 ISD::CondCode get() const { return Condition; }
2230
2231 static bool classof(const SDNode *N) {
2232 return N->getOpcode() == ISD::CONDCODE;
2233 }
2234};
2235
2236/// This class is used to represent EVT's, which are used
2237/// to parameterize some operations.
2238class VTSDNode : public SDNode {
2239 friend class SelectionDAG;
2240
2241 EVT ValueType;
2242
2243 explicit VTSDNode(EVT VT)
2244 : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2245 ValueType(VT) {}
2246
2247public:
2248 EVT getVT() const { return ValueType; }
2249
2250 static bool classof(const SDNode *N) {
2251 return N->getOpcode() == ISD::VALUETYPE;
2252 }
2253};
2254
2255/// Base class for LoadSDNode and StoreSDNode
2256class LSBaseSDNode : public MemSDNode {
2257public:
2258 LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2259 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2260 MachineMemOperand *MMO)
2261 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2262 LSBaseSDNodeBits.AddressingMode = AM;
2263 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2263, __extension__ __PRETTY_FUNCTION__))
;
2264 }
2265
2266 const SDValue &getOffset() const {
2267 return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
2268 }
2269
2270 /// Return the addressing mode for this load or store:
2271 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2272 ISD::MemIndexedMode getAddressingMode() const {
2273 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2274 }
2275
2276 /// Return true if this is a pre/post inc/dec load/store.
2277 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2278
2279 /// Return true if this is NOT a pre/post inc/dec load/store.
2280 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2281
2282 static bool classof(const SDNode *N) {
2283 return N->getOpcode() == ISD::LOAD ||
2284 N->getOpcode() == ISD::STORE;
2285 }
2286};
2287
2288/// This class is used to represent ISD::LOAD nodes.
2289class LoadSDNode : public LSBaseSDNode {
2290 friend class SelectionDAG;
2291
2292 LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2293 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
2294 MachineMemOperand *MMO)
2295 : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2296 LoadSDNodeBits.ExtTy = ETy;
2297 assert(readMem() && "Load MachineMemOperand is not a load!")(static_cast <bool> (readMem() && "Load MachineMemOperand is not a load!"
) ? void (0) : __assert_fail ("readMem() && \"Load MachineMemOperand is not a load!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2297, __extension__ __PRETTY_FUNCTION__))
;
2298 assert(!writeMem() && "Load MachineMemOperand is a store!")(static_cast <bool> (!writeMem() && "Load MachineMemOperand is a store!"
) ? void (0) : __assert_fail ("!writeMem() && \"Load MachineMemOperand is a store!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2298, __extension__ __PRETTY_FUNCTION__))
;
2299 }
2300
2301public:
2302 /// Return whether this is a plain node,
2303 /// or one of the varieties of value-extending loads.
2304 ISD::LoadExtType getExtensionType() const {
2305 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2306 }
2307
2308 const SDValue &getBasePtr() const { return getOperand(1); }
2309 const SDValue &getOffset() const { return getOperand(2); }
2310
2311 static bool classof(const SDNode *N) {
2312 return N->getOpcode() == ISD::LOAD;
2313 }
2314};
2315
2316/// This class is used to represent ISD::STORE nodes.
2317class StoreSDNode : public LSBaseSDNode {
2318 friend class SelectionDAG;
2319
2320 StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2321 ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
2322 MachineMemOperand *MMO)
2323 : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
2324 StoreSDNodeBits.IsTruncating = isTrunc;
2325 assert(!readMem() && "Store MachineMemOperand is a load!")(static_cast <bool> (!readMem() && "Store MachineMemOperand is a load!"
) ? void (0) : __assert_fail ("!readMem() && \"Store MachineMemOperand is a load!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2325, __extension__ __PRETTY_FUNCTION__))
;
2326 assert(writeMem() && "Store MachineMemOperand is not a store!")(static_cast <bool> (writeMem() && "Store MachineMemOperand is not a store!"
) ? void (0) : __assert_fail ("writeMem() && \"Store MachineMemOperand is not a store!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2326, __extension__ __PRETTY_FUNCTION__))
;
2327 }
2328
2329public:
2330 /// Return true if the op does a truncation before store.
2331 /// For integers this is the same as doing a TRUNCATE and storing the result.
2332 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2333 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2334 void setTruncatingStore(bool Truncating) {
2335 StoreSDNodeBits.IsTruncating = Truncating;
2336 }
2337
2338 const SDValue &getValue() const { return getOperand(1); }
2339 const SDValue &getBasePtr() const { return getOperand(2); }
2340 const SDValue &getOffset() const { return getOperand(3); }
2341
2342 static bool classof(const SDNode *N) {
2343 return N->getOpcode() == ISD::STORE;
2344 }
2345};
2346
2347/// This base class is used to represent VP_LOAD and VP_STORE nodes
2348class VPLoadStoreSDNode : public MemSDNode {
2349public:
2350 friend class SelectionDAG;
2351
2352 VPLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2353 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2354 MachineMemOperand *MMO)
2355 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2356 LSBaseSDNodeBits.AddressingMode = AM;
2357 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2357, __extension__ __PRETTY_FUNCTION__))
;
2358 }
2359
2360 // VPLoadSDNode (Chain, Ptr, Offset, Mask, EVL)
2361 // VPStoreSDNode (Chain, Data, Ptr, Offset, Mask, EVL)
2362 // Mask is a vector of i1 elements;
2363 // the type of EVL is TLI.getVPExplicitVectorLengthTy().
2364 const SDValue &getOffset() const {
2365 return getOperand(getOpcode() == ISD::VP_LOAD ? 2 : 3);
2366 }
2367 const SDValue &getBasePtr() const {
2368 return getOperand(getOpcode() == ISD::VP_LOAD ? 1 : 2);
2369 }
2370 const SDValue &getMask() const {
2371 return getOperand(getOpcode() == ISD::VP_LOAD ? 3 : 4);
2372 }
2373 const SDValue &getVectorLength() const {
2374 return getOperand(getOpcode() == ISD::VP_LOAD ? 4 : 5);
2375 }
2376
2377 /// Return the addressing mode for this load or store:
2378 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2379 ISD::MemIndexedMode getAddressingMode() const {
2380 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2381 }
2382
2383 /// Return true if this is a pre/post inc/dec load/store.
2384 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2385
2386 /// Return true if this is NOT a pre/post inc/dec load/store.
2387 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2388
2389 static bool classof(const SDNode *N) {
2390 return N->getOpcode() == ISD::VP_LOAD || N->getOpcode() == ISD::VP_STORE;
2391 }
2392};
2393
2394/// This class is used to represent a VP_LOAD node
2395class VPLoadSDNode : public VPLoadStoreSDNode {
2396public:
2397 friend class SelectionDAG;
2398
2399 VPLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2400 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, bool isExpanding,
2401 EVT MemVT, MachineMemOperand *MMO)
2402 : VPLoadStoreSDNode(ISD::VP_LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2403 LoadSDNodeBits.ExtTy = ETy;
2404 LoadSDNodeBits.IsExpanding = isExpanding;
2405 }
2406
2407 ISD::LoadExtType getExtensionType() const {
2408 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2409 }
2410
2411 const SDValue &getBasePtr() const { return getOperand(1); }
2412 const SDValue &getOffset() const { return getOperand(2); }
2413 const SDValue &getMask() const { return getOperand(3); }
2414 const SDValue &getVectorLength() const { return getOperand(4); }
2415
2416 static bool classof(const SDNode *N) {
2417 return N->getOpcode() == ISD::VP_LOAD;
2418 }
2419 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2420};
2421
2422/// This class is used to represent a VP_STORE node
2423class VPStoreSDNode : public VPLoadStoreSDNode {
2424public:
2425 friend class SelectionDAG;
2426
2427 VPStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2428 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2429 EVT MemVT, MachineMemOperand *MMO)
2430 : VPLoadStoreSDNode(ISD::VP_STORE, Order, dl, VTs, AM, MemVT, MMO) {
2431 StoreSDNodeBits.IsTruncating = isTrunc;
2432 StoreSDNodeBits.IsCompressing = isCompressing;
2433 }
2434
2435 /// Return true if this is a truncating store.
2436 /// For integers this is the same as doing a TRUNCATE and storing the result.
2437 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2438 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2439
2440 /// Returns true if the op does a compression to the vector before storing.
2441 /// The node contiguously stores the active elements (integers or floats)
2442 /// in src (those with their respective bit set in writemask k) to unaligned
2443 /// memory at base_addr.
2444 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2445
2446 const SDValue &getValue() const { return getOperand(1); }
2447 const SDValue &getBasePtr() const { return getOperand(2); }
2448 const SDValue &getOffset() const { return getOperand(3); }
2449 const SDValue &getMask() const { return getOperand(4); }
2450 const SDValue &getVectorLength() const { return getOperand(5); }
2451
2452 static bool classof(const SDNode *N) {
2453 return N->getOpcode() == ISD::VP_STORE;
2454 }
2455};
2456
2457/// This base class is used to represent MLOAD and MSTORE nodes
2458class MaskedLoadStoreSDNode : public MemSDNode {
2459public:
2460 friend class SelectionDAG;
2461
2462 MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
2463 const DebugLoc &dl, SDVTList VTs,
2464 ISD::MemIndexedMode AM, EVT MemVT,
2465 MachineMemOperand *MMO)
2466 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2467 LSBaseSDNodeBits.AddressingMode = AM;
2468 assert(getAddressingMode() == AM && "Value truncated")(static_cast <bool> (getAddressingMode() == AM &&
"Value truncated") ? void (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2468, __extension__ __PRETTY_FUNCTION__))
;
2469 }
2470
2471 // MaskedLoadSDNode (Chain, ptr, offset, mask, passthru)
2472 // MaskedStoreSDNode (Chain, data, ptr, offset, mask)
2473 // Mask is a vector of i1 elements
2474 const SDValue &getOffset() const {
2475 return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
2476 }
2477 const SDValue &getMask() const {
2478 return getOperand(getOpcode() == ISD::MLOAD ? 3 : 4);
2479 }
2480
2481 /// Return the addressing mode for this load or store:
2482 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2483 ISD::MemIndexedMode getAddressingMode() const {
2484 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2485 }
2486
2487 /// Return true if this is a pre/post inc/dec load/store.
2488 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2489
2490 /// Return true if this is NOT a pre/post inc/dec load/store.
2491 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2492
2493 static bool classof(const SDNode *N) {
2494 return N->getOpcode() == ISD::MLOAD ||
2495 N->getOpcode() == ISD::MSTORE;
2496 }
2497};
2498
2499/// This class is used to represent an MLOAD node
2500class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
2501public:
2502 friend class SelectionDAG;
2503
2504 MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2505 ISD::MemIndexedMode AM, ISD::LoadExtType ETy,
2506 bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
2507 : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, AM, MemVT, MMO) {
2508 LoadSDNodeBits.ExtTy = ETy;
2509 LoadSDNodeBits.IsExpanding = IsExpanding;
2510 }
2511
2512 ISD::LoadExtType getExtensionType() const {
2513 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2514 }
2515
2516 const SDValue &getBasePtr() const { return getOperand(1); }
2517 const SDValue &getOffset() const { return getOperand(2); }
2518 const SDValue &getMask() const { return getOperand(3); }
2519 const SDValue &getPassThru() const { return getOperand(4); }
2520
2521 static bool classof(const SDNode *N) {
2522 return N->getOpcode() == ISD::MLOAD;
2523 }
2524
2525 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2526};
2527
2528/// This class is used to represent an MSTORE node
2529class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
2530public:
2531 friend class SelectionDAG;
2532
2533 MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2534 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2535 EVT MemVT, MachineMemOperand *MMO)
2536 : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, AM, MemVT, MMO) {
2537 StoreSDNodeBits.IsTruncating = isTrunc;
2538 StoreSDNodeBits.IsCompressing = isCompressing;
2539 }
2540
2541 /// Return true if the op does a truncation before store.
2542 /// For integers this is the same as doing a TRUNCATE and storing the result.
2543 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2544 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2545
2546 /// Returns true if the op does a compression to the vector before storing.
2547 /// The node contiguously stores the active elements (integers or floats)
2548 /// in src (those with their respective bit set in writemask k) to unaligned
2549 /// memory at base_addr.
2550 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2551
2552 const SDValue &getValue() const { return getOperand(1); }
2553 const SDValue &getBasePtr() const { return getOperand(2); }
2554 const SDValue &getOffset() const { return getOperand(3); }
2555 const SDValue &getMask() const { return getOperand(4); }
2556
2557 static bool classof(const SDNode *N) {
2558 return N->getOpcode() == ISD::MSTORE;
2559 }
2560};
2561
2562/// This is a base class used to represent
2563/// VP_GATHER and VP_SCATTER nodes
2564///
2565class VPGatherScatterSDNode : public MemSDNode {
2566public:
2567 friend class SelectionDAG;
2568
2569 VPGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2570 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2571 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2572 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2573 LSBaseSDNodeBits.AddressingMode = IndexType;
2574 assert(getIndexType() == IndexType && "Value truncated")(static_cast <bool> (getIndexType() == IndexType &&
"Value truncated") ? void (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2574, __extension__ __PRETTY_FUNCTION__))
;
2575 }
2576
2577 /// How is Index applied to BasePtr when computing addresses.
2578 ISD::MemIndexType getIndexType() const {
2579 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2580 }
2581 bool isIndexScaled() const {
2582 return (getIndexType() == ISD::SIGNED_SCALED) ||
2583 (getIndexType() == ISD::UNSIGNED_SCALED);
2584 }
2585 bool isIndexSigned() const {
2586 return (getIndexType() == ISD::SIGNED_SCALED) ||
2587 (getIndexType() == ISD::SIGNED_UNSCALED);
2588 }
2589
2590 // In the both nodes address is Op1, mask is Op2:
2591 // VPGatherSDNode (Chain, base, index, scale, mask, vlen)
2592 // VPScatterSDNode (Chain, value, base, index, scale, mask, vlen)
2593 // Mask is a vector of i1 elements
2594 const SDValue &getBasePtr() const {
2595 return getOperand((getOpcode() == ISD::VP_GATHER) ? 1 : 2);
2596 }
2597 const SDValue &getIndex() const {
2598 return getOperand((getOpcode() == ISD::VP_GATHER) ? 2 : 3);
2599 }
2600 const SDValue &getScale() const {
2601 return getOperand((getOpcode() == ISD::VP_GATHER) ? 3 : 4);
2602 }
2603 const SDValue &getMask() const {
2604 return getOperand((getOpcode() == ISD::VP_GATHER) ? 4 : 5);
2605 }
2606 const SDValue &getVectorLength() const {
2607 return getOperand((getOpcode() == ISD::VP_GATHER) ? 5 : 6);
2608 }
2609
2610 static bool classof(const SDNode *N) {
2611 return N->getOpcode() == ISD::VP_GATHER ||
2612 N->getOpcode() == ISD::VP_SCATTER;
2613 }
2614};
2615
2616/// This class is used to represent an VP_GATHER node
2617///
2618class VPGatherSDNode : public VPGatherScatterSDNode {
2619public:
2620 friend class SelectionDAG;
2621
2622 VPGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2623 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2624 : VPGatherScatterSDNode(ISD::VP_GATHER, Order, dl, VTs, MemVT, MMO,
2625 IndexType) {}
2626
2627 static bool classof(const SDNode *N) {
2628 return N->getOpcode() == ISD::VP_GATHER;
2629 }
2630};
2631
2632/// This class is used to represent an VP_SCATTER node
2633///
2634class VPScatterSDNode : public VPGatherScatterSDNode {
2635public:
2636 friend class SelectionDAG;
2637
2638 VPScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2639 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2640 : VPGatherScatterSDNode(ISD::VP_SCATTER, Order, dl, VTs, MemVT, MMO,
2641 IndexType) {}
2642
2643 const SDValue &getValue() const { return getOperand(1); }
2644
2645 static bool classof(const SDNode *N) {
2646 return N->getOpcode() == ISD::VP_SCATTER;
2647 }
2648};
2649
2650/// This is a base class used to represent
2651/// MGATHER and MSCATTER nodes
2652///
2653class MaskedGatherScatterSDNode : public MemSDNode {
2654public:
2655 friend class SelectionDAG;
2656
2657 MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2658 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2659 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2660 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2661 LSBaseSDNodeBits.AddressingMode = IndexType;
2662 assert(getIndexType() == IndexType && "Value truncated")(static_cast <bool> (getIndexType() == IndexType &&
"Value truncated") ? void (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2662, __extension__ __PRETTY_FUNCTION__))
;
2663 }
2664
2665 /// How is Index applied to BasePtr when computing addresses.
2666 ISD::MemIndexType getIndexType() const {
2667 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2668 }
2669 void setIndexType(ISD::MemIndexType IndexType) {
2670 LSBaseSDNodeBits.AddressingMode = IndexType;
2671 }
2672 bool isIndexScaled() const {
2673 return (getIndexType() == ISD::SIGNED_SCALED) ||
2674 (getIndexType() == ISD::UNSIGNED_SCALED);
2675 }
2676 bool isIndexSigned() const {
2677 return (getIndexType() == ISD::SIGNED_SCALED) ||
2678 (getIndexType() == ISD::SIGNED_UNSCALED);
2679 }
2680
2681 // In the both nodes address is Op1, mask is Op2:
2682 // MaskedGatherSDNode (Chain, passthru, mask, base, index, scale)
2683 // MaskedScatterSDNode (Chain, value, mask, base, index, scale)
2684 // Mask is a vector of i1 elements
2685 const SDValue &getBasePtr() const { return getOperand(3); }
2686 const SDValue &getIndex() const { return getOperand(4); }
2687 const SDValue &getMask() const { return getOperand(2); }
2688 const SDValue &getScale() const { return getOperand(5); }
2689
2690 static bool classof(const SDNode *N) {
2691 return N->getOpcode() == ISD::MGATHER ||
2692 N->getOpcode() == ISD::MSCATTER;
2693 }
2694};
2695
2696/// This class is used to represent an MGATHER node
2697///
2698class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
2699public:
2700 friend class SelectionDAG;
2701
2702 MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2703 EVT MemVT, MachineMemOperand *MMO,
2704 ISD::MemIndexType IndexType, ISD::LoadExtType ETy)
2705 : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO,
2706 IndexType) {
2707 LoadSDNodeBits.ExtTy = ETy;
2708 }
2709
2710 const SDValue &getPassThru() const { return getOperand(1); }
2711
2712 ISD::LoadExtType getExtensionType() const {
2713 return ISD::LoadExtType(LoadSDNodeBits.ExtTy);
2714 }
2715
2716 static bool classof(const SDNode *N) {
2717 return N->getOpcode() == ISD::MGATHER;
2718 }
2719};
2720
2721/// This class is used to represent an MSCATTER node
2722///
2723class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
2724public:
2725 friend class SelectionDAG;
2726
2727 MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2728 EVT MemVT, MachineMemOperand *MMO,
2729 ISD::MemIndexType IndexType, bool IsTrunc)
2730 : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO,
2731 IndexType) {
2732 StoreSDNodeBits.IsTruncating = IsTrunc;
2733 }
2734
2735 /// Return true if the op does a truncation before store.
2736 /// For integers this is the same as doing a TRUNCATE and storing the result.
2737 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2738 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2739
2740 const SDValue &getValue() const { return getOperand(1); }
2741
2742 static bool classof(const SDNode *N) {
2743 return N->getOpcode() == ISD::MSCATTER;
2744 }
2745};
2746
2747/// An SDNode that represents everything that will be needed
2748/// to construct a MachineInstr. These nodes are created during the
2749/// instruction selection proper phase.
2750///
2751/// Note that the only supported way to set the `memoperands` is by calling the
2752/// `SelectionDAG::setNodeMemRefs` function as the memory management happens
2753/// inside the DAG rather than in the node.
2754class MachineSDNode : public SDNode {
2755private:
2756 friend class SelectionDAG;
2757
2758 MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
2759 : SDNode(Opc, Order, DL, VTs) {}
2760
2761 // We use a pointer union between a single `MachineMemOperand` pointer and
2762 // a pointer to an array of `MachineMemOperand` pointers. This is null when
2763 // the number of these is zero, the single pointer variant used when the
2764 // number is one, and the array is used for larger numbers.
2765 //
2766 // The array is allocated via the `SelectionDAG`'s allocator and so will
2767 // always live until the DAG is cleaned up and doesn't require ownership here.
2768 //
2769 // We can't use something simpler like `TinyPtrVector` here because `SDNode`
2770 // subclasses aren't managed in a conforming C++ manner. See the comments on
2771 // `SelectionDAG::MorphNodeTo` which details what all goes on, but the
2772 // constraint here is that these don't manage memory with their constructor or
2773 // destructor and can be initialized to a good state even if they start off
2774 // uninitialized.
2775 PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {};
2776
2777 // Note that this could be folded into the above `MemRefs` member if doing so
2778 // is advantageous at some point. We don't need to store this in most cases.
2779 // However, at the moment this doesn't appear to make the allocation any
2780 // smaller and makes the code somewhat simpler to read.
2781 int NumMemRefs = 0;
2782
2783public:
2784 using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator;
2785
2786 ArrayRef<MachineMemOperand *> memoperands() const {
2787 // Special case the common cases.
2788 if (NumMemRefs == 0)
2789 return {};
2790 if (NumMemRefs == 1)
2791 return makeArrayRef(MemRefs.getAddrOfPtr1(), 1);
2792
2793 // Otherwise we have an actual array.
2794 return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs);
2795 }
2796 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
2797 mmo_iterator memoperands_end() const { return memoperands().end(); }
2798 bool memoperands_empty() const { return memoperands().empty(); }
2799
2800 /// Clear out the memory reference descriptor list.
2801 void clearMemRefs() {
2802 MemRefs = nullptr;
2803 NumMemRefs = 0;
2804 }
2805
2806 static bool classof(const SDNode *N) {
2807 return N->isMachineOpcode();
2808 }
2809};
2810
2811/// An SDNode that records if a register contains a value that is guaranteed to
2812/// be aligned accordingly.
2813class AssertAlignSDNode : public SDNode {
2814 Align Alignment;
2815
2816public:
2817 AssertAlignSDNode(unsigned Order, const DebugLoc &DL, EVT VT, Align A)
2818 : SDNode(ISD::AssertAlign, Order, DL, getSDVTList(VT)), Alignment(A) {}
2819
2820 Align getAlign() const { return Alignment; }
2821
2822 static bool classof(const SDNode *N) {
2823 return N->getOpcode() == ISD::AssertAlign;
2824 }
2825};
2826
2827class SDNodeIterator {
2828 const SDNode *Node;
2829 unsigned Operand;
2830
2831 SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
2832
2833public:
2834 using iterator_category = std::forward_iterator_tag;
2835 using value_type = SDNode;
2836 using difference_type = std::ptrdiff_t;
2837 using pointer = value_type *;
2838 using reference = value_type &;
2839
2840 bool operator==(const SDNodeIterator& x) const {
2841 return Operand == x.Operand;
2842 }
2843 bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
2844
2845 pointer operator*() const {
2846 return Node->getOperand(Operand).getNode();
2847 }
2848 pointer operator->() const { return operator*(); }
2849
2850 SDNodeIterator& operator++() { // Preincrement
2851 ++Operand;
2852 return *this;
2853 }
2854 SDNodeIterator operator++(int) { // Postincrement
2855 SDNodeIterator tmp = *this; ++*this; return tmp;
2856 }
2857 size_t operator-(SDNodeIterator Other) const {
2858 assert(Node == Other.Node &&(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2859, __extension__ __PRETTY_FUNCTION__))
2859 "Cannot compare iterators of two different nodes!")(static_cast <bool> (Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? void (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "/build/llvm-toolchain-snapshot-14~++20211110111138+cffbfd01e37b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2859, __extension__ __PRETTY_FUNCTION__))
;
2860 return Operand - Other.Operand;
2861 }
2862
2863 static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
2864 static SDNodeIterator end (const SDNode *N) {
2865 return SDNodeIterator(N, N->getNumOperands());
2866 }
2867
2868 unsigned getOperand() const { return Operand; }
2869 const SDNode *getNode() const { return Node; }
2870};
2871
2872template <> struct GraphTraits<SDNode*> {
2873 using NodeRef = SDNode *;
2874 using ChildIteratorType = SDNodeIterator;
2875
2876 static NodeRef getEntryNode(SDNode *N) { return N; }
2877
2878 static ChildIteratorType child_begin(NodeRef N) {
2879 return SDNodeIterator::begin(N);
2880 }
2881
2882 static ChildIteratorType child_end(NodeRef N) {
2883 return SDNodeIterator::end(N);
2884 }
2885};
2886
2887/// A representation of the largest SDNode, for use in sizeof().
2888///
2889/// This needs to be a union because the largest node differs on 32 bit systems
2890/// with 4 and 8 byte pointer alignment, respectively.
2891using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
2892 BlockAddressSDNode,
2893 GlobalAddressSDNode,
2894 PseudoProbeSDNode>;
2895
2896/// The SDNode class with the greatest alignment requirement.
2897using MostAlignedSDNode = GlobalAddressSDNode;
2898
2899namespace ISD {
2900
2901 /// Returns true if the specified node is a non-extending and unindexed load.
2902 inline bool isNormalLoad(const SDNode *N) {
2903 const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
2904 return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
2905 Ld->getAddressingMode() == ISD::UNINDEXED;
2906 }
2907
2908 /// Returns true if the specified node is a non-extending load.
2909 inline bool isNON_EXTLoad(const SDNode *N) {
2910 return isa<LoadSDNode>(N) &&
2911 cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
2912 }
2913
2914 /// Returns true if the specified node is a EXTLOAD.
2915 inline bool isEXTLoad(const SDNode *N) {
2916 return isa<LoadSDNode>(N) &&
2917 cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
2918 }
2919
2920 /// Returns true if the specified node is a SEXTLOAD.
2921 inline bool isSEXTLoad(const SDNode *N) {
2922 return isa<LoadSDNode>(N) &&
2923 cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
2924 }
2925
2926 /// Returns true if the specified node is a ZEXTLOAD.
2927 inline bool isZEXTLoad(const SDNode *N) {
2928 return isa<LoadSDNode>(N) &&
2929 cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
2930 }
2931
2932 /// Returns true if the specified node is an unindexed load.
2933 inline bool isUNINDEXEDLoad(const SDNode *N) {
2934 return isa<LoadSDNode>(N) &&
2935 cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2936 }
2937
2938 /// Returns true if the specified node is a non-truncating
2939 /// and unindexed store.
2940 inline bool isNormalStore(const SDNode *N) {
2941 const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
2942 return St && !St->isTruncatingStore() &&
2943 St->getAddressingMode() == ISD::UNINDEXED;
2944 }
2945
2946 /// Returns true if the specified node is an unindexed store.
2947 inline bool isUNINDEXEDStore(const SDNode *N) {
2948 return isa<StoreSDNode>(N) &&
2949 cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2950 }
2951
2952 /// Attempt to match a unary predicate against a scalar/splat constant or
2953 /// every element of a constant BUILD_VECTOR.
2954 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2955 bool matchUnaryPredicate(SDValue Op,
2956 std::function<bool(ConstantSDNode *)> Match,
2957 bool AllowUndefs = false);
2958
2959 /// Attempt to match a binary predicate against a pair of scalar/splat
2960 /// constants or every element of a pair of constant BUILD_VECTORs.
2961 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2962 /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match.
2963 bool matchBinaryPredicate(
2964 SDValue LHS, SDValue RHS,
2965 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
2966 bool AllowUndefs = false, bool AllowTypeMismatch = false);
2967
2968 /// Returns true if the specified value is the overflow result from one
2969 /// of the overflow intrinsic nodes.
2970 inline bool isOverflowIntrOpRes(SDValue Op) {
2971 unsigned Opc = Op.getOpcode();
2972 return (Op.getResNo() == 1 &&
2973 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
2974 Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO));
2975 }
2976
2977} // end namespace ISD
2978
2979} // end namespace llvm
2980
2981#endif // LLVM_CODEGEN_SELECTIONDAGNODES_H