Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1110, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name WebAssemblyISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Target/WebAssembly -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Target/WebAssembly -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-07-26-235520-9401-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the WebAssemblyTargetLowering class.
11///
12//===----------------------------------------------------------------------===//
13
14#include "WebAssemblyISelLowering.h"
15#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16#include "Utils/WebAssemblyTypeUtilities.h"
17#include "Utils/WebAssemblyUtilities.h"
18#include "WebAssemblyMachineFunctionInfo.h"
19#include "WebAssemblySubtarget.h"
20#include "WebAssemblyTargetMachine.h"
21#include "llvm/CodeGen/CallingConvLower.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineJumpTableInfo.h"
24#include "llvm/CodeGen/MachineModuleInfo.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/SelectionDAG.h"
27#include "llvm/CodeGen/SelectionDAGNodes.h"
28#include "llvm/CodeGen/WasmEHFuncInfo.h"
29#include "llvm/IR/DiagnosticInfo.h"
30#include "llvm/IR/DiagnosticPrinter.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/IR/IntrinsicsWebAssembly.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/ErrorHandling.h"
36#include "llvm/Support/MathExtras.h"
37#include "llvm/Support/raw_ostream.h"
38#include "llvm/Target/TargetOptions.h"
39using namespace llvm;
40
41#define DEBUG_TYPE"wasm-lower" "wasm-lower"
42
43WebAssemblyTargetLowering::WebAssemblyTargetLowering(
44 const TargetMachine &TM, const WebAssemblySubtarget &STI)
45 : TargetLowering(TM), Subtarget(&STI) {
46 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
47
48 // Booleans always contain 0 or 1.
49 setBooleanContents(ZeroOrOneBooleanContent);
50 // Except in SIMD vectors
51 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
52 // We don't know the microarchitecture here, so just reduce register pressure.
53 setSchedulingPreference(Sched::RegPressure);
54 // Tell ISel that we have a stack pointer.
55 setStackPointerRegisterToSaveRestore(
56 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
57 // Set up the register classes.
58 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
59 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
60 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
61 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
62 if (Subtarget->hasSIMD128()) {
63 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
64 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
65 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
66 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
67 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
68 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
69 }
70 if (Subtarget->hasReferenceTypes()) {
71 addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass);
72 addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass);
73 }
74 // Compute derived properties from the register classes.
75 computeRegisterProperties(Subtarget->getRegisterInfo());
76
77 // Transform loads and stores to pointers in address space 1 to loads and
78 // stores to WebAssembly global variables, outside linear memory.
79 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
80 setOperationAction(ISD::LOAD, T, Custom);
81 setOperationAction(ISD::STORE, T, Custom);
82 }
83 if (Subtarget->hasSIMD128()) {
84 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
85 MVT::v2f64}) {
86 setOperationAction(ISD::LOAD, T, Custom);
87 setOperationAction(ISD::STORE, T, Custom);
88 }
89 }
90 if (Subtarget->hasReferenceTypes()) {
91 for (auto T : {MVT::externref, MVT::funcref}) {
92 setOperationAction(ISD::LOAD, T, Custom);
93 setOperationAction(ISD::STORE, T, Custom);
94 }
95 }
96
97 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
98 setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom);
99 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
100 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
101 setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
102 setOperationAction(ISD::BRIND, MVT::Other, Custom);
103
104 // Take the default expansion for va_arg, va_copy, and va_end. There is no
105 // default action for va_start, so we do that custom.
106 setOperationAction(ISD::VASTART, MVT::Other, Custom);
107 setOperationAction(ISD::VAARG, MVT::Other, Expand);
108 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
109 setOperationAction(ISD::VAEND, MVT::Other, Expand);
110
111 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
112 // Don't expand the floating-point types to constant pools.
113 setOperationAction(ISD::ConstantFP, T, Legal);
114 // Expand floating-point comparisons.
115 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
116 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
117 setCondCodeAction(CC, T, Expand);
118 // Expand floating-point library function operators.
119 for (auto Op :
120 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
121 setOperationAction(Op, T, Expand);
122 // Note supported floating-point library function operators that otherwise
123 // default to expand.
124 for (auto Op :
125 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
126 setOperationAction(Op, T, Legal);
127 // Support minimum and maximum, which otherwise default to expand.
128 setOperationAction(ISD::FMINIMUM, T, Legal);
129 setOperationAction(ISD::FMAXIMUM, T, Legal);
130 // WebAssembly currently has no builtin f16 support.
131 setOperationAction(ISD::FP16_TO_FP, T, Expand);
132 setOperationAction(ISD::FP_TO_FP16, T, Expand);
133 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
134 setTruncStoreAction(T, MVT::f16, Expand);
135 }
136
137 // Expand unavailable integer operations.
138 for (auto Op :
139 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
140 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
141 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
142 for (auto T : {MVT::i32, MVT::i64})
143 setOperationAction(Op, T, Expand);
144 if (Subtarget->hasSIMD128())
145 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
146 setOperationAction(Op, T, Expand);
147 }
148
149 if (Subtarget->hasNontrappingFPToInt())
150 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
151 for (auto T : {MVT::i32, MVT::i64})
152 setOperationAction(Op, T, Custom);
153
154 // SIMD-specific configuration
155 if (Subtarget->hasSIMD128()) {
156 // Hoist bitcasts out of shuffles
157 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
158
159 // Combine extends of extract_subvectors into widening ops
160 setTargetDAGCombine(ISD::SIGN_EXTEND);
161 setTargetDAGCombine(ISD::ZERO_EXTEND);
162
163 // Combine int_to_fp or fp_extend of extract_vectors and vice versa into
164 // conversions ops
165 setTargetDAGCombine(ISD::SINT_TO_FP);
166 setTargetDAGCombine(ISD::UINT_TO_FP);
167 setTargetDAGCombine(ISD::FP_EXTEND);
168 setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
169
170 // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa
171 // into conversion ops
172 setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
173 setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
174 setTargetDAGCombine(ISD::FP_ROUND);
175 setTargetDAGCombine(ISD::CONCAT_VECTORS);
176
177 // Support saturating add for i8x16 and i16x8
178 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
179 for (auto T : {MVT::v16i8, MVT::v8i16})
180 setOperationAction(Op, T, Legal);
181
182 // Support integer abs
183 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
184 setOperationAction(ISD::ABS, T, Legal);
185
186 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
187 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
188 MVT::v2f64})
189 setOperationAction(ISD::BUILD_VECTOR, T, Custom);
190
191 // We have custom shuffle lowering to expose the shuffle mask
192 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
193 MVT::v2f64})
194 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
195
196 // Custom lowering since wasm shifts must have a scalar shift amount
197 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
198 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
199 setOperationAction(Op, T, Custom);
200
201 // Custom lower lane accesses to expand out variable indices
202 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
203 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
204 MVT::v2f64})
205 setOperationAction(Op, T, Custom);
206
207 // There is no i8x16.mul instruction
208 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
209
210 // There is no vector conditional select instruction
211 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
212 MVT::v2f64})
213 setOperationAction(ISD::SELECT_CC, T, Expand);
214
215 // Expand integer operations supported for scalars but not SIMD
216 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
217 ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
218 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
219 setOperationAction(Op, T, Expand);
220
221 // But we do have integer min and max operations
222 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
223 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
224 setOperationAction(Op, T, Legal);
225
226 // And we have popcnt for i8x16
227 setOperationAction(ISD::CTPOP, MVT::v16i8, Legal);
228
229 // Expand float operations supported for scalars but not SIMD
230 for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
231 ISD::FEXP, ISD::FEXP2, ISD::FRINT})
232 for (auto T : {MVT::v4f32, MVT::v2f64})
233 setOperationAction(Op, T, Expand);
234
235 // Unsigned comparison operations are unavailable for i64x2 vectors.
236 for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE})
237 setCondCodeAction(CC, MVT::v2i64, Custom);
238
239 // 64x2 conversions are not in the spec
240 for (auto Op :
241 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
242 for (auto T : {MVT::v2i64, MVT::v2f64})
243 setOperationAction(Op, T, Expand);
244
245 // But saturating fp_to_int converstions are
246 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
247 setOperationAction(Op, MVT::v4i32, Custom);
248 }
249
250 // As a special case, these operators use the type to mean the type to
251 // sign-extend from.
252 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
253 if (!Subtarget->hasSignExt()) {
254 // Sign extends are legal only when extending a vector extract
255 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
256 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
257 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
258 }
259 for (auto T : MVT::integer_fixedlen_vector_valuetypes())
260 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
261
262 // Dynamic stack allocation: use the default expansion.
263 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
264 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
265 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
266
267 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
268 setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
269 setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
270
271 // Expand these forms; we pattern-match the forms that we can handle in isel.
272 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
273 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
274 setOperationAction(Op, T, Expand);
275
276 // We have custom switch handling.
277 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
278
279 // WebAssembly doesn't have:
280 // - Floating-point extending loads.
281 // - Floating-point truncating stores.
282 // - i1 extending loads.
283 // - truncating SIMD stores and most extending loads
284 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
285 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
286 for (auto T : MVT::integer_valuetypes())
287 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
288 setLoadExtAction(Ext, T, MVT::i1, Promote);
289 if (Subtarget->hasSIMD128()) {
290 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
291 MVT::v2f64}) {
292 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
293 if (MVT(T) != MemT) {
294 setTruncStoreAction(T, MemT, Expand);
295 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
296 setLoadExtAction(Ext, T, MemT, Expand);
297 }
298 }
299 }
300 // But some vector extending loads are legal
301 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
302 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
303 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
304 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
305 }
306 // And some truncating stores are legal as well
307 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
308 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
309 }
310
311 // Don't do anything clever with build_pairs
312 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
313
314 // Trap lowers to wasm unreachable
315 setOperationAction(ISD::TRAP, MVT::Other, Legal);
316 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
317
318 // Exception handling intrinsics
319 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
320 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
321 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
322
323 setMaxAtomicSizeInBitsSupported(64);
324
325 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
326 // consistent with the f64 and f128 names.
327 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
328 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
329
330 // Define the emscripten name for return address helper.
331 // TODO: when implementing other Wasm backends, make this generic or only do
332 // this on emscripten depending on what they end up doing.
333 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
334
335 // Always convert switches to br_tables unless there is only one case, which
336 // is equivalent to a simple branch. This reduces code size for wasm, and we
337 // defer possible jump table optimizations to the VM.
338 setMinimumJumpTableEntries(2);
339}
340
341TargetLowering::AtomicExpansionKind
342WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
343 // We have wasm instructions for these
344 switch (AI->getOperation()) {
345 case AtomicRMWInst::Add:
346 case AtomicRMWInst::Sub:
347 case AtomicRMWInst::And:
348 case AtomicRMWInst::Or:
349 case AtomicRMWInst::Xor:
350 case AtomicRMWInst::Xchg:
351 return AtomicExpansionKind::None;
352 default:
353 break;
354 }
355 return AtomicExpansionKind::CmpXChg;
356}
357
358bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
359 // Implementation copied from X86TargetLowering.
360 unsigned Opc = VecOp.getOpcode();
361
362 // Assume target opcodes can't be scalarized.
363 // TODO - do we have any exceptions?
364 if (Opc >= ISD::BUILTIN_OP_END)
365 return false;
366
367 // If the vector op is not supported, try to convert to scalar.
368 EVT VecVT = VecOp.getValueType();
369 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
370 return true;
371
372 // If the vector op is supported, but the scalar op is not, the transform may
373 // not be worthwhile.
374 EVT ScalarVT = VecVT.getScalarType();
375 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
376}
377
378FastISel *WebAssemblyTargetLowering::createFastISel(
379 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
380 return WebAssembly::createFastISel(FuncInfo, LibInfo);
381}
382
383MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
384 EVT VT) const {
385 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
386 if (BitWidth > 1 && BitWidth < 8)
387 BitWidth = 8;
388
389 if (BitWidth > 64) {
390 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
391 // the count to be an i32.
392 BitWidth = 32;
393 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&(static_cast <bool> (BitWidth >= Log2_32_Ceil(VT.getSizeInBits
()) && "32-bit shift counts ought to be enough for anyone"
) ? void (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 394, __extension__ __PRETTY_FUNCTION__))
394 "32-bit shift counts ought to be enough for anyone")(static_cast <bool> (BitWidth >= Log2_32_Ceil(VT.getSizeInBits
()) && "32-bit shift counts ought to be enough for anyone"
) ? void (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 394, __extension__ __PRETTY_FUNCTION__))
;
395 }
396
397 MVT Result = MVT::getIntegerVT(BitWidth);
398 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&(static_cast <bool> (Result != MVT::INVALID_SIMPLE_VALUE_TYPE
&& "Unable to represent scalar shift amount type") ?
void (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 399, __extension__ __PRETTY_FUNCTION__))
399 "Unable to represent scalar shift amount type")(static_cast <bool> (Result != MVT::INVALID_SIMPLE_VALUE_TYPE
&& "Unable to represent scalar shift amount type") ?
void (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 399, __extension__ __PRETTY_FUNCTION__))
;
400 return Result;
401}
402
403// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
404// undefined result on invalid/overflow, to the WebAssembly opcode, which
405// traps on invalid/overflow.
406static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
407 MachineBasicBlock *BB,
408 const TargetInstrInfo &TII,
409 bool IsUnsigned, bool Int64,
410 bool Float64, unsigned LoweredOpcode) {
411 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
412
413 Register OutReg = MI.getOperand(0).getReg();
414 Register InReg = MI.getOperand(1).getReg();
415
416 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
417 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
418 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
419 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
420 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
421 unsigned Eqz = WebAssembly::EQZ_I32;
422 unsigned And = WebAssembly::AND_I32;
423 int64_t Limit = Int64 ? INT64_MIN(-9223372036854775807L -1) : INT32_MIN(-2147483647-1);
424 int64_t Substitute = IsUnsigned ? 0 : Limit;
425 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
426 auto &Context = BB->getParent()->getFunction().getContext();
427 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
428
429 const BasicBlock *LLVMBB = BB->getBasicBlock();
430 MachineFunction *F = BB->getParent();
431 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
432 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
433 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
434
435 MachineFunction::iterator It = ++BB->getIterator();
436 F->insert(It, FalseMBB);
437 F->insert(It, TrueMBB);
438 F->insert(It, DoneMBB);
439
440 // Transfer the remainder of BB and its successor edges to DoneMBB.
441 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
442 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
443
444 BB->addSuccessor(TrueMBB);
445 BB->addSuccessor(FalseMBB);
446 TrueMBB->addSuccessor(DoneMBB);
447 FalseMBB->addSuccessor(DoneMBB);
448
449 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
450 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
451 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
452 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
453 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
454 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
455 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
456
457 MI.eraseFromParent();
458 // For signed numbers, we can do a single comparison to determine whether
459 // fabs(x) is within range.
460 if (IsUnsigned) {
461 Tmp0 = InReg;
462 } else {
463 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
464 }
465 BuildMI(BB, DL, TII.get(FConst), Tmp1)
466 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
467 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
468
469 // For unsigned numbers, we have to do a separate comparison with zero.
470 if (IsUnsigned) {
471 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
472 Register SecondCmpReg =
473 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
474 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
475 BuildMI(BB, DL, TII.get(FConst), Tmp1)
476 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
477 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
478 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
479 CmpReg = AndReg;
480 }
481
482 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
483
484 // Create the CFG diamond to select between doing the conversion or using
485 // the substitute value.
486 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
487 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
488 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
489 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
490 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
491 .addReg(FalseReg)
492 .addMBB(FalseMBB)
493 .addReg(TrueReg)
494 .addMBB(TrueMBB);
495
496 return DoneMBB;
497}
498
499static MachineBasicBlock *
500LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
501 const WebAssemblySubtarget *Subtarget,
502 const TargetInstrInfo &TII) {
503 MachineInstr &CallParams = *CallResults.getPrevNode();
504 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS)(static_cast <bool> (CallParams.getOpcode() == WebAssembly
::CALL_PARAMS) ? void (0) : __assert_fail ("CallParams.getOpcode() == WebAssembly::CALL_PARAMS"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 504, __extension__ __PRETTY_FUNCTION__))
;
505 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||(static_cast <bool> (CallResults.getOpcode() == WebAssembly
::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS
) ? void (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 506, __extension__ __PRETTY_FUNCTION__))
506 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS)(static_cast <bool> (CallResults.getOpcode() == WebAssembly
::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS
) ? void (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 506, __extension__ __PRETTY_FUNCTION__))
;
507
508 bool IsIndirect = CallParams.getOperand(0).isReg();
509 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
510
511 bool IsFuncrefCall = false;
512 if (IsIndirect) {
513 Register Reg = CallParams.getOperand(0).getReg();
514 const MachineFunction *MF = BB->getParent();
515 const MachineRegisterInfo &MRI = MF->getRegInfo();
516 const TargetRegisterClass *TRC = MRI.getRegClass(Reg);
517 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
518 assert(!IsFuncrefCall || Subtarget->hasReferenceTypes())(static_cast <bool> (!IsFuncrefCall || Subtarget->hasReferenceTypes
()) ? void (0) : __assert_fail ("!IsFuncrefCall || Subtarget->hasReferenceTypes()"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 518, __extension__ __PRETTY_FUNCTION__))
;
519 }
520
521 unsigned CallOp;
522 if (IsIndirect && IsRetCall) {
523 CallOp = WebAssembly::RET_CALL_INDIRECT;
524 } else if (IsIndirect) {
525 CallOp = WebAssembly::CALL_INDIRECT;
526 } else if (IsRetCall) {
527 CallOp = WebAssembly::RET_CALL;
528 } else {
529 CallOp = WebAssembly::CALL;
530 }
531
532 MachineFunction &MF = *BB->getParent();
533 const MCInstrDesc &MCID = TII.get(CallOp);
534 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
535
536 // See if we must truncate the function pointer.
537 // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
538 // as 64-bit for uniformity with other pointer types.
539 // See also: WebAssemblyFastISel::selectCall
540 if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
541 Register Reg32 =
542 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
543 auto &FnPtr = CallParams.getOperand(0);
544 BuildMI(*BB, CallResults.getIterator(), DL,
545 TII.get(WebAssembly::I32_WRAP_I64), Reg32)
546 .addReg(FnPtr.getReg());
547 FnPtr.setReg(Reg32);
548 }
549
550 // Move the function pointer to the end of the arguments for indirect calls
551 if (IsIndirect) {
552 auto FnPtr = CallParams.getOperand(0);
553 CallParams.RemoveOperand(0);
554 CallParams.addOperand(FnPtr);
555 }
556
557 for (auto Def : CallResults.defs())
558 MIB.add(Def);
559
560 if (IsIndirect) {
561 // Placeholder for the type index.
562 MIB.addImm(0);
563 // The table into which this call_indirect indexes.
564 MCSymbolWasm *Table = IsFuncrefCall
565 ? WebAssembly::getOrCreateFuncrefCallTableSymbol(
566 MF.getContext(), Subtarget)
567 : WebAssembly::getOrCreateFunctionTableSymbol(
568 MF.getContext(), Subtarget);
569 if (Subtarget->hasReferenceTypes()) {
570 MIB.addSym(Table);
571 } else {
572 // For the MVP there is at most one table whose number is 0, but we can't
573 // write a table symbol or issue relocations. Instead we just ensure the
574 // table is live and write a zero.
575 Table->setNoStrip();
576 MIB.addImm(0);
577 }
578 }
579
580 for (auto Use : CallParams.uses())
581 MIB.add(Use);
582
583 BB->insert(CallResults.getIterator(), MIB);
584 CallParams.eraseFromParent();
585 CallResults.eraseFromParent();
586
587 // If this is a funcref call, to avoid hidden GC roots, we need to clear the
588 // table slot with ref.null upon call_indirect return.
589 //
590 // This generates the following code, which comes right after a call_indirect
591 // of a funcref:
592 //
593 // i32.const 0
594 // ref.null func
595 // table.set __funcref_call_table
596 if (IsIndirect && IsFuncrefCall) {
597 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
598 MF.getContext(), Subtarget);
599 Register RegZero =
600 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
601 MachineInstr *Const0 =
602 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
603 BB->insertAfter(MIB.getInstr()->getIterator(), Const0);
604
605 Register RegFuncref =
606 MF.getRegInfo().createVirtualRegister(&WebAssembly::FUNCREFRegClass);
607 MachineInstr *RefNull =
608 BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref)
609 .addImm(static_cast<int32_t>(WebAssembly::HeapType::Funcref));
610 BB->insertAfter(Const0->getIterator(), RefNull);
611
612 MachineInstr *TableSet =
613 BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF))
614 .addSym(Table)
615 .addReg(RegZero)
616 .addReg(RegFuncref);
617 BB->insertAfter(RefNull->getIterator(), TableSet);
618 }
619
620 return BB;
621}
622
623MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
624 MachineInstr &MI, MachineBasicBlock *BB) const {
625 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
626 DebugLoc DL = MI.getDebugLoc();
627
628 switch (MI.getOpcode()) {
629 default:
630 llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 630)
;
631 case WebAssembly::FP_TO_SINT_I32_F32:
632 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
633 WebAssembly::I32_TRUNC_S_F32);
634 case WebAssembly::FP_TO_UINT_I32_F32:
635 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
636 WebAssembly::I32_TRUNC_U_F32);
637 case WebAssembly::FP_TO_SINT_I64_F32:
638 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
639 WebAssembly::I64_TRUNC_S_F32);
640 case WebAssembly::FP_TO_UINT_I64_F32:
641 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
642 WebAssembly::I64_TRUNC_U_F32);
643 case WebAssembly::FP_TO_SINT_I32_F64:
644 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
645 WebAssembly::I32_TRUNC_S_F64);
646 case WebAssembly::FP_TO_UINT_I32_F64:
647 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
648 WebAssembly::I32_TRUNC_U_F64);
649 case WebAssembly::FP_TO_SINT_I64_F64:
650 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
651 WebAssembly::I64_TRUNC_S_F64);
652 case WebAssembly::FP_TO_UINT_I64_F64:
653 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
654 WebAssembly::I64_TRUNC_U_F64);
655 case WebAssembly::CALL_RESULTS:
656 case WebAssembly::RET_CALL_RESULTS:
657 return LowerCallResults(MI, DL, BB, Subtarget, TII);
658 }
659}
660
661const char *
662WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
663 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
664 case WebAssemblyISD::FIRST_NUMBER:
665 case WebAssemblyISD::FIRST_MEM_OPCODE:
666 break;
667#define HANDLE_NODETYPE(NODE) \
668 case WebAssemblyISD::NODE: \
669 return "WebAssemblyISD::" #NODE;
670#define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
671#include "WebAssemblyISD.def"
672#undef HANDLE_MEM_NODETYPE
673#undef HANDLE_NODETYPE
674 }
675 return nullptr;
676}
677
678std::pair<unsigned, const TargetRegisterClass *>
679WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
680 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
681 // First, see if this is a constraint that directly corresponds to a
682 // WebAssembly register class.
683 if (Constraint.size() == 1) {
684 switch (Constraint[0]) {
685 case 'r':
686 assert(VT != MVT::iPTR && "Pointer MVT not expected here")(static_cast <bool> (VT != MVT::iPTR && "Pointer MVT not expected here"
) ? void (0) : __assert_fail ("VT != MVT::iPTR && \"Pointer MVT not expected here\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 686, __extension__ __PRETTY_FUNCTION__))
;
687 if (Subtarget->hasSIMD128() && VT.isVector()) {
688 if (VT.getSizeInBits() == 128)
689 return std::make_pair(0U, &WebAssembly::V128RegClass);
690 }
691 if (VT.isInteger() && !VT.isVector()) {
692 if (VT.getSizeInBits() <= 32)
693 return std::make_pair(0U, &WebAssembly::I32RegClass);
694 if (VT.getSizeInBits() <= 64)
695 return std::make_pair(0U, &WebAssembly::I64RegClass);
696 }
697 if (VT.isFloatingPoint() && !VT.isVector()) {
698 switch (VT.getSizeInBits()) {
699 case 32:
700 return std::make_pair(0U, &WebAssembly::F32RegClass);
701 case 64:
702 return std::make_pair(0U, &WebAssembly::F64RegClass);
703 default:
704 break;
705 }
706 }
707 break;
708 default:
709 break;
710 }
711 }
712
713 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
714}
715
716bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
717 // Assume ctz is a relatively cheap operation.
718 return true;
719}
720
721bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
722 // Assume clz is a relatively cheap operation.
723 return true;
724}
725
726bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
727 const AddrMode &AM,
728 Type *Ty, unsigned AS,
729 Instruction *I) const {
730 // WebAssembly offsets are added as unsigned without wrapping. The
731 // isLegalAddressingMode gives us no way to determine if wrapping could be
732 // happening, so we approximate this by accepting only non-negative offsets.
733 if (AM.BaseOffs < 0)
734 return false;
735
736 // WebAssembly has no scale register operands.
737 if (AM.Scale != 0)
738 return false;
739
740 // Everything else is legal.
741 return true;
742}
743
744bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
745 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
746 MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
747 // WebAssembly supports unaligned accesses, though it should be declared
748 // with the p2align attribute on loads and stores which do so, and there
749 // may be a performance impact. We tell LLVM they're "fast" because
750 // for the kinds of things that LLVM uses this for (merging adjacent stores
751 // of constants, etc.), WebAssembly implementations will either want the
752 // unaligned access or they'll split anyway.
753 if (Fast)
754 *Fast = true;
755 return true;
756}
757
758bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
759 AttributeList Attr) const {
760 // The current thinking is that wasm engines will perform this optimization,
761 // so we can save on code size.
762 return true;
763}
764
765bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
766 EVT ExtT = ExtVal.getValueType();
767 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
768 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
769 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
770 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
771}
772
773EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
774 LLVMContext &C,
775 EVT VT) const {
776 if (VT.isVector())
777 return VT.changeVectorElementTypeToInteger();
778
779 // So far, all branch instructions in Wasm take an I32 condition.
780 // The default TargetLowering::getSetCCResultType returns the pointer size,
781 // which would be useful to reduce instruction counts when testing
782 // against 64-bit pointers/values if at some point Wasm supports that.
783 return EVT::getIntegerVT(C, 32);
784}
785
786bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
787 const CallInst &I,
788 MachineFunction &MF,
789 unsigned Intrinsic) const {
790 switch (Intrinsic) {
791 case Intrinsic::wasm_memory_atomic_notify:
792 Info.opc = ISD::INTRINSIC_W_CHAIN;
793 Info.memVT = MVT::i32;
794 Info.ptrVal = I.getArgOperand(0);
795 Info.offset = 0;
796 Info.align = Align(4);
797 // atomic.notify instruction does not really load the memory specified with
798 // this argument, but MachineMemOperand should either be load or store, so
799 // we set this to a load.
800 // FIXME Volatile isn't really correct, but currently all LLVM atomic
801 // instructions are treated as volatiles in the backend, so we should be
802 // consistent. The same applies for wasm_atomic_wait intrinsics too.
803 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
804 return true;
805 case Intrinsic::wasm_memory_atomic_wait32:
806 Info.opc = ISD::INTRINSIC_W_CHAIN;
807 Info.memVT = MVT::i32;
808 Info.ptrVal = I.getArgOperand(0);
809 Info.offset = 0;
810 Info.align = Align(4);
811 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
812 return true;
813 case Intrinsic::wasm_memory_atomic_wait64:
814 Info.opc = ISD::INTRINSIC_W_CHAIN;
815 Info.memVT = MVT::i64;
816 Info.ptrVal = I.getArgOperand(0);
817 Info.offset = 0;
818 Info.align = Align(8);
819 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
820 return true;
821 default:
822 return false;
823 }
824}
825
826//===----------------------------------------------------------------------===//
827// WebAssembly Lowering private implementation.
828//===----------------------------------------------------------------------===//
829
830//===----------------------------------------------------------------------===//
831// Lowering Code
832//===----------------------------------------------------------------------===//
833
834static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
835 MachineFunction &MF = DAG.getMachineFunction();
836 DAG.getContext()->diagnose(
837 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
838}
839
840// Test whether the given calling convention is supported.
841static bool callingConvSupported(CallingConv::ID CallConv) {
842 // We currently support the language-independent target-independent
843 // conventions. We don't yet have a way to annotate calls with properties like
844 // "cold", and we don't have any call-clobbered registers, so these are mostly
845 // all handled the same.
846 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
847 CallConv == CallingConv::Cold ||
848 CallConv == CallingConv::PreserveMost ||
849 CallConv == CallingConv::PreserveAll ||
850 CallConv == CallingConv::CXX_FAST_TLS ||
851 CallConv == CallingConv::WASM_EmscriptenInvoke ||
852 CallConv == CallingConv::Swift;
853}
854
855SDValue
856WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
857 SmallVectorImpl<SDValue> &InVals) const {
858 SelectionDAG &DAG = CLI.DAG;
859 SDLoc DL = CLI.DL;
860 SDValue Chain = CLI.Chain;
861 SDValue Callee = CLI.Callee;
862 MachineFunction &MF = DAG.getMachineFunction();
863 auto Layout = MF.getDataLayout();
864
865 CallingConv::ID CallConv = CLI.CallConv;
866 if (!callingConvSupported(CallConv))
867 fail(DL, DAG,
868 "WebAssembly doesn't support language-specific or target-specific "
869 "calling conventions yet");
870 if (CLI.IsPatchPoint)
871 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
872
873 if (CLI.IsTailCall) {
874 auto NoTail = [&](const char *Msg) {
875 if (CLI.CB && CLI.CB->isMustTailCall())
876 fail(DL, DAG, Msg);
877 CLI.IsTailCall = false;
878 };
879
880 if (!Subtarget->hasTailCall())
881 NoTail("WebAssembly 'tail-call' feature not enabled");
882
883 // Varargs calls cannot be tail calls because the buffer is on the stack
884 if (CLI.IsVarArg)
885 NoTail("WebAssembly does not support varargs tail calls");
886
887 // Do not tail call unless caller and callee return types match
888 const Function &F = MF.getFunction();
889 const TargetMachine &TM = getTargetMachine();
890 Type *RetTy = F.getReturnType();
891 SmallVector<MVT, 4> CallerRetTys;
892 SmallVector<MVT, 4> CalleeRetTys;
893 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
894 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
895 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
896 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
897 CalleeRetTys.begin());
898 if (!TypesMatch)
899 NoTail("WebAssembly tail call requires caller and callee return types to "
900 "match");
901
902 // If pointers to local stack values are passed, we cannot tail call
903 if (CLI.CB) {
904 for (auto &Arg : CLI.CB->args()) {
905 Value *Val = Arg.get();
906 // Trace the value back through pointer operations
907 while (true) {
908 Value *Src = Val->stripPointerCastsAndAliases();
909 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
910 Src = GEP->getPointerOperand();
911 if (Val == Src)
912 break;
913 Val = Src;
914 }
915 if (isa<AllocaInst>(Val)) {
916 NoTail(
917 "WebAssembly does not support tail calling with stack arguments");
918 break;
919 }
920 }
921 }
922 }
923
924 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
925 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
926 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
927
928 // The generic code may have added an sret argument. If we're lowering an
929 // invoke function, the ABI requires that the function pointer be the first
930 // argument, so we may have to swap the arguments.
931 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
932 Outs[0].Flags.isSRet()) {
933 std::swap(Outs[0], Outs[1]);
934 std::swap(OutVals[0], OutVals[1]);
935 }
936
937 bool HasSwiftSelfArg = false;
938 bool HasSwiftErrorArg = false;
939 unsigned NumFixedArgs = 0;
940 for (unsigned I = 0; I < Outs.size(); ++I) {
941 const ISD::OutputArg &Out = Outs[I];
942 SDValue &OutVal = OutVals[I];
943 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
944 HasSwiftErrorArg |= Out.Flags.isSwiftError();
945 if (Out.Flags.isNest())
946 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
947 if (Out.Flags.isInAlloca())
948 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
949 if (Out.Flags.isInConsecutiveRegs())
950 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
951 if (Out.Flags.isInConsecutiveRegsLast())
952 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
953 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
954 auto &MFI = MF.getFrameInfo();
955 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
956 Out.Flags.getNonZeroByValAlign(),
957 /*isSS=*/false);
958 SDValue SizeNode =
959 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
960 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
961 Chain = DAG.getMemcpy(
962 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
963 /*isVolatile*/ false, /*AlwaysInline=*/false,
964 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
965 OutVal = FINode;
966 }
967 // Count the number of fixed args *after* legalization.
968 NumFixedArgs += Out.IsFixed;
969 }
970
971 bool IsVarArg = CLI.IsVarArg;
972 auto PtrVT = getPointerTy(Layout);
973
974 // For swiftcc, emit additional swiftself and swifterror arguments
975 // if there aren't. These additional arguments are also added for callee
976 // signature They are necessary to match callee and caller signature for
977 // indirect call.
978 if (CallConv == CallingConv::Swift) {
979 if (!HasSwiftSelfArg) {
980 NumFixedArgs++;
981 ISD::OutputArg Arg;
982 Arg.Flags.setSwiftSelf();
983 CLI.Outs.push_back(Arg);
984 SDValue ArgVal = DAG.getUNDEF(PtrVT);
985 CLI.OutVals.push_back(ArgVal);
986 }
987 if (!HasSwiftErrorArg) {
988 NumFixedArgs++;
989 ISD::OutputArg Arg;
990 Arg.Flags.setSwiftError();
991 CLI.Outs.push_back(Arg);
992 SDValue ArgVal = DAG.getUNDEF(PtrVT);
993 CLI.OutVals.push_back(ArgVal);
994 }
995 }
996
997 // Analyze operands of the call, assigning locations to each operand.
998 SmallVector<CCValAssign, 16> ArgLocs;
999 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1000
1001 if (IsVarArg) {
1002 // Outgoing non-fixed arguments are placed in a buffer. First
1003 // compute their offsets and the total amount of buffer space needed.
1004 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
1005 const ISD::OutputArg &Out = Outs[I];
1006 SDValue &Arg = OutVals[I];
1007 EVT VT = Arg.getValueType();
1008 assert(VT != MVT::iPTR && "Legalized args should be concrete")(static_cast <bool> (VT != MVT::iPTR && "Legalized args should be concrete"
) ? void (0) : __assert_fail ("VT != MVT::iPTR && \"Legalized args should be concrete\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1008, __extension__ __PRETTY_FUNCTION__))
;
1009 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
1010 Align Alignment =
1011 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
1012 unsigned Offset =
1013 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
1014 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
1015 Offset, VT.getSimpleVT(),
1016 CCValAssign::Full));
1017 }
1018 }
1019
1020 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1021
1022 SDValue FINode;
1023 if (IsVarArg && NumBytes) {
1024 // For non-fixed arguments, next emit stores to store the argument values
1025 // to the stack buffer at the offsets computed above.
1026 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
1027 Layout.getStackAlignment(),
1028 /*isSS=*/false);
1029 unsigned ValNo = 0;
1030 SmallVector<SDValue, 8> Chains;
1031 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
1032 assert(ArgLocs[ValNo].getValNo() == ValNo &&(static_cast <bool> (ArgLocs[ValNo].getValNo() == ValNo
&& "ArgLocs should remain in order and only hold varargs args"
) ? void (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1033, __extension__ __PRETTY_FUNCTION__))
1033 "ArgLocs should remain in order and only hold varargs args")(static_cast <bool> (ArgLocs[ValNo].getValNo() == ValNo
&& "ArgLocs should remain in order and only hold varargs args"
) ? void (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1033, __extension__ __PRETTY_FUNCTION__))
;
1034 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1035 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1036 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
1037 DAG.getConstant(Offset, DL, PtrVT));
1038 Chains.push_back(
1039 DAG.getStore(Chain, DL, Arg, Add,
1040 MachinePointerInfo::getFixedStack(MF, FI, Offset)));
1041 }
1042 if (!Chains.empty())
1043 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1044 } else if (IsVarArg) {
1045 FINode = DAG.getIntPtrConstant(0, DL);
1046 }
1047
1048 if (Callee->getOpcode() == ISD::GlobalAddress) {
1049 // If the callee is a GlobalAddress node (quite common, every direct call
1050 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
1051 // doesn't at MO_GOT which is not needed for direct calls.
1052 GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
1053 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
1054 getPointerTy(DAG.getDataLayout()),
1055 GA->getOffset());
1056 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1057 getPointerTy(DAG.getDataLayout()), Callee);
1058 }
1059
1060 // Compute the operands for the CALLn node.
1061 SmallVector<SDValue, 16> Ops;
1062 Ops.push_back(Chain);
1063 Ops.push_back(Callee);
1064
1065 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1066 // isn't reliable.
1067 Ops.append(OutVals.begin(),
1068 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1069 // Add a pointer to the vararg buffer.
1070 if (IsVarArg)
1071 Ops.push_back(FINode);
1072
1073 SmallVector<EVT, 8> InTys;
1074 for (const auto &In : Ins) {
1075 assert(!In.Flags.isByVal() && "byval is not valid for return values")(static_cast <bool> (!In.Flags.isByVal() && "byval is not valid for return values"
) ? void (0) : __assert_fail ("!In.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1075, __extension__ __PRETTY_FUNCTION__))
;
1076 assert(!In.Flags.isNest() && "nest is not valid for return values")(static_cast <bool> (!In.Flags.isNest() && "nest is not valid for return values"
) ? void (0) : __assert_fail ("!In.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1076, __extension__ __PRETTY_FUNCTION__))
;
1077 if (In.Flags.isInAlloca())
1078 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1079 if (In.Flags.isInConsecutiveRegs())
1080 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1081 if (In.Flags.isInConsecutiveRegsLast())
1082 fail(DL, DAG,
1083 "WebAssembly hasn't implemented cons regs last return values");
1084 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1085 // registers.
1086 InTys.push_back(In.VT);
1087 }
1088
1089 // Lastly, if this is a call to a funcref we need to add an instruction
1090 // table.set to the chain and transform the call.
1091 if (CLI.CB && isFuncrefType(CLI.CB->getCalledOperand()->getType())) {
1092 // In the absence of function references proposal where a funcref call is
1093 // lowered to call_ref, using reference types we generate a table.set to set
1094 // the funcref to a special table used solely for this purpose, followed by
1095 // a call_indirect. Here we just generate the table set, and return the
1096 // SDValue of the table.set so that LowerCall can finalize the lowering by
1097 // generating the call_indirect.
1098 SDValue Chain = Ops[0];
1099
1100 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
1101 MF.getContext(), Subtarget);
1102 SDValue Sym = DAG.getMCSymbol(Table, PtrVT);
1103 SDValue TableSlot = DAG.getConstant(0, DL, MVT::i32);
1104 SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee};
1105 SDValue TableSet = DAG.getMemIntrinsicNode(
1106 WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps,
1107 MVT::funcref,
1108 // Machine Mem Operand args
1109 MachinePointerInfo(WasmAddressSpace::FUNCREF),
1110 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()),
1111 MachineMemOperand::MOStore);
1112
1113 Ops[0] = TableSet; // The new chain is the TableSet itself
1114 }
1115
1116 if (CLI.IsTailCall) {
1117 // ret_calls do not return values to the current frame
1118 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1119 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1120 }
1121
1122 InTys.push_back(MVT::Other);
1123 SDVTList InTyList = DAG.getVTList(InTys);
1124 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1125
1126 for (size_t I = 0; I < Ins.size(); ++I)
1127 InVals.push_back(Res.getValue(I));
1128
1129 // Return the chain
1130 return Res.getValue(Ins.size());
1131}
1132
1133bool WebAssemblyTargetLowering::CanLowerReturn(
1134 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1135 const SmallVectorImpl<ISD::OutputArg> &Outs,
1136 LLVMContext & /*Context*/) const {
1137 // WebAssembly can only handle returning tuples with multivalue enabled
1138 return Subtarget->hasMultivalue() || Outs.size() <= 1;
1139}
1140
1141SDValue WebAssemblyTargetLowering::LowerReturn(
1142 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1143 const SmallVectorImpl<ISD::OutputArg> &Outs,
1144 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1145 SelectionDAG &DAG) const {
1146 assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&(static_cast <bool> ((Subtarget->hasMultivalue() || Outs
.size() <= 1) && "MVP WebAssembly can only return up to one value"
) ? void (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1147, __extension__ __PRETTY_FUNCTION__))
1147 "MVP WebAssembly can only return up to one value")(static_cast <bool> ((Subtarget->hasMultivalue() || Outs
.size() <= 1) && "MVP WebAssembly can only return up to one value"
) ? void (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1147, __extension__ __PRETTY_FUNCTION__))
;
1148 if (!callingConvSupported(CallConv))
1149 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1150
1151 SmallVector<SDValue, 4> RetOps(1, Chain);
1152 RetOps.append(OutVals.begin(), OutVals.end());
1153 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1154
1155 // Record the number and types of the return values.
1156 for (const ISD::OutputArg &Out : Outs) {
1157 assert(!Out.Flags.isByVal() && "byval is not valid for return values")(static_cast <bool> (!Out.Flags.isByVal() && "byval is not valid for return values"
) ? void (0) : __assert_fail ("!Out.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1157, __extension__ __PRETTY_FUNCTION__))
;
1158 assert(!Out.Flags.isNest() && "nest is not valid for return values")(static_cast <bool> (!Out.Flags.isNest() && "nest is not valid for return values"
) ? void (0) : __assert_fail ("!Out.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1158, __extension__ __PRETTY_FUNCTION__))
;
1159 assert(Out.IsFixed && "non-fixed return value is not valid")(static_cast <bool> (Out.IsFixed && "non-fixed return value is not valid"
) ? void (0) : __assert_fail ("Out.IsFixed && \"non-fixed return value is not valid\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1159, __extension__ __PRETTY_FUNCTION__))
;
1160 if (Out.Flags.isInAlloca())
1161 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1162 if (Out.Flags.isInConsecutiveRegs())
1163 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1164 if (Out.Flags.isInConsecutiveRegsLast())
1165 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1166 }
1167
1168 return Chain;
1169}
1170
1171SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1172 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1173 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1174 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1175 if (!callingConvSupported(CallConv))
1176 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1177
1178 MachineFunction &MF = DAG.getMachineFunction();
1179 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1180
1181 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1182 // of the incoming values before they're represented by virtual registers.
1183 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1184
1185 bool HasSwiftErrorArg = false;
1186 bool HasSwiftSelfArg = false;
1187 for (const ISD::InputArg &In : Ins) {
1188 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1189 HasSwiftErrorArg |= In.Flags.isSwiftError();
1190 if (In.Flags.isInAlloca())
1191 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1192 if (In.Flags.isNest())
1193 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1194 if (In.Flags.isInConsecutiveRegs())
1195 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1196 if (In.Flags.isInConsecutiveRegsLast())
1197 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1198 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1199 // registers.
1200 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1201 DAG.getTargetConstant(InVals.size(),
1202 DL, MVT::i32))
1203 : DAG.getUNDEF(In.VT));
1204
1205 // Record the number and types of arguments.
1206 MFI->addParam(In.VT);
1207 }
1208
1209 // For swiftcc, emit additional swiftself and swifterror arguments
1210 // if there aren't. These additional arguments are also added for callee
1211 // signature They are necessary to match callee and caller signature for
1212 // indirect call.
1213 auto PtrVT = getPointerTy(MF.getDataLayout());
1214 if (CallConv == CallingConv::Swift) {
1215 if (!HasSwiftSelfArg) {
1216 MFI->addParam(PtrVT);
1217 }
1218 if (!HasSwiftErrorArg) {
1219 MFI->addParam(PtrVT);
1220 }
1221 }
1222 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1223 // the buffer is passed as an argument.
1224 if (IsVarArg) {
1225 MVT PtrVT = getPointerTy(MF.getDataLayout());
1226 Register VarargVreg =
1227 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
1228 MFI->setVarargBufferVreg(VarargVreg);
1229 Chain = DAG.getCopyToReg(
1230 Chain, DL, VarargVreg,
1231 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1232 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1233 MFI->addParam(PtrVT);
1234 }
1235
1236 // Record the number and types of arguments and results.
1237 SmallVector<MVT, 4> Params;
1238 SmallVector<MVT, 4> Results;
1239 computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
1240 MF.getFunction(), DAG.getTarget(), Params, Results);
1241 for (MVT VT : Results)
1242 MFI->addResult(VT);
1243 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1244 // the param logic here with ComputeSignatureVTs
1245 assert(MFI->getParams().size() == Params.size() &&(static_cast <bool> (MFI->getParams().size() == Params
.size() && std::equal(MFI->getParams().begin(), MFI
->getParams().end(), Params.begin())) ? void (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1247, __extension__ __PRETTY_FUNCTION__))
1246 std::equal(MFI->getParams().begin(), MFI->getParams().end(),(static_cast <bool> (MFI->getParams().size() == Params
.size() && std::equal(MFI->getParams().begin(), MFI
->getParams().end(), Params.begin())) ? void (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1247, __extension__ __PRETTY_FUNCTION__))
1247 Params.begin()))(static_cast <bool> (MFI->getParams().size() == Params
.size() && std::equal(MFI->getParams().begin(), MFI
->getParams().end(), Params.begin())) ? void (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1247, __extension__ __PRETTY_FUNCTION__))
;
1248
1249 return Chain;
1250}
1251
1252void WebAssemblyTargetLowering::ReplaceNodeResults(
1253 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1254 switch (N->getOpcode()) {
1255 case ISD::SIGN_EXTEND_INREG:
1256 // Do not add any results, signifying that N should not be custom lowered
1257 // after all. This happens because simd128 turns on custom lowering for
1258 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1259 // illegal type.
1260 break;
1261 default:
1262 llvm_unreachable(::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1263)
1263 "ReplaceNodeResults not implemented for this op for WebAssembly!")::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1263)
;
1264 }
1265}
1266
1267//===----------------------------------------------------------------------===//
1268// Custom lowering hooks.
1269//===----------------------------------------------------------------------===//
1270
1271SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1272 SelectionDAG &DAG) const {
1273 SDLoc DL(Op);
1274 switch (Op.getOpcode()) {
1
Control jumps to 'case BUILD_VECTOR:' at line 1311
1275 default:
1276 llvm_unreachable("unimplemented operation lowering")::llvm::llvm_unreachable_internal("unimplemented operation lowering"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1276)
;
1277 return SDValue();
1278 case ISD::FrameIndex:
1279 return LowerFrameIndex(Op, DAG);
1280 case ISD::GlobalAddress:
1281 return LowerGlobalAddress(Op, DAG);
1282 case ISD::GlobalTLSAddress:
1283 return LowerGlobalTLSAddress(Op, DAG);
1284 case ISD::ExternalSymbol:
1285 return LowerExternalSymbol(Op, DAG);
1286 case ISD::JumpTable:
1287 return LowerJumpTable(Op, DAG);
1288 case ISD::BR_JT:
1289 return LowerBR_JT(Op, DAG);
1290 case ISD::VASTART:
1291 return LowerVASTART(Op, DAG);
1292 case ISD::BlockAddress:
1293 case ISD::BRIND:
1294 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1295 return SDValue();
1296 case ISD::RETURNADDR:
1297 return LowerRETURNADDR(Op, DAG);
1298 case ISD::FRAMEADDR:
1299 return LowerFRAMEADDR(Op, DAG);
1300 case ISD::CopyToReg:
1301 return LowerCopyToReg(Op, DAG);
1302 case ISD::EXTRACT_VECTOR_ELT:
1303 case ISD::INSERT_VECTOR_ELT:
1304 return LowerAccessVectorElement(Op, DAG);
1305 case ISD::INTRINSIC_VOID:
1306 case ISD::INTRINSIC_WO_CHAIN:
1307 case ISD::INTRINSIC_W_CHAIN:
1308 return LowerIntrinsic(Op, DAG);
1309 case ISD::SIGN_EXTEND_INREG:
1310 return LowerSIGN_EXTEND_INREG(Op, DAG);
1311 case ISD::BUILD_VECTOR:
1312 return LowerBUILD_VECTOR(Op, DAG);
2
Calling 'WebAssemblyTargetLowering::LowerBUILD_VECTOR'
1313 case ISD::VECTOR_SHUFFLE:
1314 return LowerVECTOR_SHUFFLE(Op, DAG);
1315 case ISD::SETCC:
1316 return LowerSETCC(Op, DAG);
1317 case ISD::SHL:
1318 case ISD::SRA:
1319 case ISD::SRL:
1320 return LowerShift(Op, DAG);
1321 case ISD::FP_TO_SINT_SAT:
1322 case ISD::FP_TO_UINT_SAT:
1323 return LowerFP_TO_INT_SAT(Op, DAG);
1324 case ISD::LOAD:
1325 return LowerLoad(Op, DAG);
1326 case ISD::STORE:
1327 return LowerStore(Op, DAG);
1328 }
1329}
1330
1331static bool IsWebAssemblyGlobal(SDValue Op) {
1332 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1333 return WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace());
1334
1335 return false;
1336}
1337
1338static Optional<unsigned> IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG) {
1339 const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
1340 if (!FI)
1341 return None;
1342
1343 auto &MF = DAG.getMachineFunction();
1344 return WebAssemblyFrameLowering::getLocalForStackObject(MF, FI->getIndex());
1345}
1346
1347bool WebAssemblyTargetLowering::isFuncrefType(const Type *Ty) {
1348 return isa<PointerType>(Ty) &&
1349 Ty->getPointerAddressSpace() == WasmAddressSpace::FUNCREF;
1350}
1351
1352bool WebAssemblyTargetLowering::isExternrefType(const Type *Ty) {
1353 return isa<PointerType>(Ty) &&
1354 Ty->getPointerAddressSpace() == WasmAddressSpace::EXTERNREF;
1355}
1356
1357SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
1358 SelectionDAG &DAG) const {
1359 SDLoc DL(Op);
1360 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
1361 const SDValue &Value = SN->getValue();
1362 const SDValue &Base = SN->getBasePtr();
1363 const SDValue &Offset = SN->getOffset();
1364
1365 if (IsWebAssemblyGlobal(Base)) {
1366 if (!Offset->isUndef())
1367 report_fatal_error("unexpected offset when storing to webassembly global",
1368 false);
1369
1370 SDVTList Tys = DAG.getVTList(MVT::Other);
1371 SDValue Ops[] = {SN->getChain(), Value, Base};
1372 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops,
1373 SN->getMemoryVT(), SN->getMemOperand());
1374 }
1375
1376 if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1377 if (!Offset->isUndef())
1378 report_fatal_error("unexpected offset when storing to webassembly local",
1379 false);
1380
1381 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1382 SDVTList Tys = DAG.getVTList(MVT::Other); // The chain.
1383 SDValue Ops[] = {SN->getChain(), Idx, Value};
1384 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
1385 }
1386
1387 return Op;
1388}
1389
1390SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
1391 SelectionDAG &DAG) const {
1392 SDLoc DL(Op);
1393 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
1394 const SDValue &Base = LN->getBasePtr();
1395 const SDValue &Offset = LN->getOffset();
1396
1397 if (IsWebAssemblyGlobal(Base)) {
1398 if (!Offset->isUndef())
1399 report_fatal_error(
1400 "unexpected offset when loading from webassembly global", false);
1401
1402 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
1403 SDValue Ops[] = {LN->getChain(), Base};
1404 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
1405 LN->getMemoryVT(), LN->getMemOperand());
1406 }
1407
1408 if (Optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1409 if (!Offset->isUndef())
1410 report_fatal_error(
1411 "unexpected offset when loading from webassembly local", false);
1412
1413 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1414 EVT LocalVT = LN->getValueType(0);
1415 SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT,
1416 {LN->getChain(), Idx});
1417 SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL);
1418 assert(Result->getNumValues() == 2 && "Loads must carry a chain!")(static_cast <bool> (Result->getNumValues() == 2 &&
"Loads must carry a chain!") ? void (0) : __assert_fail ("Result->getNumValues() == 2 && \"Loads must carry a chain!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1418, __extension__ __PRETTY_FUNCTION__))
;
1419 return Result;
1420 }
1421
1422 return Op;
1423}
1424
1425SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1426 SelectionDAG &DAG) const {
1427 SDValue Src = Op.getOperand(2);
1428 if (isa<FrameIndexSDNode>(Src.getNode())) {
1429 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1430 // the FI to some LEA-like instruction, but since we don't have that, we
1431 // need to insert some kind of instruction that can take an FI operand and
1432 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1433 // local.copy between Op and its FI operand.
1434 SDValue Chain = Op.getOperand(0);
1435 SDLoc DL(Op);
1436 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1437 EVT VT = Src.getValueType();
1438 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1439 : WebAssembly::COPY_I64,
1440 DL, VT, Src),
1441 0);
1442 return Op.getNode()->getNumValues() == 1
1443 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1444 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1445 Op.getNumOperands() == 4 ? Op.getOperand(3)
1446 : SDValue());
1447 }
1448 return SDValue();
1449}
1450
1451SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1452 SelectionDAG &DAG) const {
1453 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1454 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1455}
1456
1457SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1458 SelectionDAG &DAG) const {
1459 SDLoc DL(Op);
1460
1461 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1462 fail(DL, DAG,
1463 "Non-Emscripten WebAssembly hasn't implemented "
1464 "__builtin_return_address");
1465 return SDValue();
1466 }
1467
1468 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1469 return SDValue();
1470
1471 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1472 MakeLibCallOptions CallOptions;
1473 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1474 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1475 .first;
1476}
1477
1478SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1479 SelectionDAG &DAG) const {
1480 // Non-zero depths are not supported by WebAssembly currently. Use the
1481 // legalizer's default expansion, which is to return 0 (what this function is
1482 // documented to do).
1483 if (Op.getConstantOperandVal(0) > 0)
1484 return SDValue();
1485
1486 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1487 EVT VT = Op.getValueType();
1488 Register FP =
1489 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1490 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1491}
1492
1493SDValue
1494WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1495 SelectionDAG &DAG) const {
1496 SDLoc DL(Op);
1497 const auto *GA = cast<GlobalAddressSDNode>(Op);
1498 MVT PtrVT = getPointerTy(DAG.getDataLayout());
1499
1500 MachineFunction &MF = DAG.getMachineFunction();
1501 if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
1502 report_fatal_error("cannot use thread-local storage without bulk memory",
1503 false);
1504
1505 const GlobalValue *GV = GA->getGlobal();
1506
1507 // Currently Emscripten does not support dynamic linking with threads.
1508 // Therefore, if we have thread-local storage, only the local-exec model
1509 // is possible.
1510 // TODO: remove this and implement proper TLS models once Emscripten
1511 // supports dynamic linking with threads.
1512 if (GV->getThreadLocalMode() != GlobalValue::LocalExecTLSModel &&
1513 !Subtarget->getTargetTriple().isOSEmscripten()) {
1514 report_fatal_error("only -ftls-model=local-exec is supported for now on "
1515 "non-Emscripten OSes: variable " +
1516 GV->getName(),
1517 false);
1518 }
1519
1520 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1521 : WebAssembly::GLOBAL_GET_I32;
1522 const char *BaseName = MF.createExternalSymbolName("__tls_base");
1523
1524 SDValue BaseAddr(
1525 DAG.getMachineNode(GlobalGet, DL, PtrVT,
1526 DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1527 0);
1528
1529 SDValue TLSOffset = DAG.getTargetGlobalAddress(
1530 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1531 SDValue SymAddr = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, TLSOffset);
1532
1533 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
1534}
1535
1536SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1537 SelectionDAG &DAG) const {
1538 SDLoc DL(Op);
1539 const auto *GA = cast<GlobalAddressSDNode>(Op);
1540 EVT VT = Op.getValueType();
1541 assert(GA->getTargetFlags() == 0 &&(static_cast <bool> (GA->getTargetFlags() == 0 &&
"Unexpected target flags on generic GlobalAddressSDNode") ? void
(0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1542, __extension__ __PRETTY_FUNCTION__))
1542 "Unexpected target flags on generic GlobalAddressSDNode")(static_cast <bool> (GA->getTargetFlags() == 0 &&
"Unexpected target flags on generic GlobalAddressSDNode") ? void
(0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1542, __extension__ __PRETTY_FUNCTION__))
;
1543 if (!WebAssembly::isValidAddressSpace(GA->getAddressSpace()))
1544 fail(DL, DAG, "Invalid address space for WebAssembly target");
1545
1546 unsigned OperandFlags = 0;
1547 if (isPositionIndependent()) {
1548 const GlobalValue *GV = GA->getGlobal();
1549 if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1550 MachineFunction &MF = DAG.getMachineFunction();
1551 MVT PtrVT = getPointerTy(MF.getDataLayout());
1552 const char *BaseName;
1553 if (GV->getValueType()->isFunctionTy()) {
1554 BaseName = MF.createExternalSymbolName("__table_base");
1555 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1556 }
1557 else {
1558 BaseName = MF.createExternalSymbolName("__memory_base");
1559 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1560 }
1561 SDValue BaseAddr =
1562 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1563 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1564
1565 SDValue SymAddr = DAG.getNode(
1566 WebAssemblyISD::WrapperPIC, DL, VT,
1567 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1568 OperandFlags));
1569
1570 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1571 } else {
1572 OperandFlags = WebAssemblyII::MO_GOT;
1573 }
1574 }
1575
1576 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1577 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1578 GA->getOffset(), OperandFlags));
1579}
1580
1581SDValue
1582WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1583 SelectionDAG &DAG) const {
1584 SDLoc DL(Op);
1585 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1586 EVT VT = Op.getValueType();
1587 assert(ES->getTargetFlags() == 0 &&(static_cast <bool> (ES->getTargetFlags() == 0 &&
"Unexpected target flags on generic ExternalSymbolSDNode") ?
void (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1588, __extension__ __PRETTY_FUNCTION__))
1588 "Unexpected target flags on generic ExternalSymbolSDNode")(static_cast <bool> (ES->getTargetFlags() == 0 &&
"Unexpected target flags on generic ExternalSymbolSDNode") ?
void (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1588, __extension__ __PRETTY_FUNCTION__))
;
1589 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1590 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1591}
1592
1593SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1594 SelectionDAG &DAG) const {
1595 // There's no need for a Wrapper node because we always incorporate a jump
1596 // table operand into a BR_TABLE instruction, rather than ever
1597 // materializing it in a register.
1598 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1599 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1600 JT->getTargetFlags());
1601}
1602
1603SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1604 SelectionDAG &DAG) const {
1605 SDLoc DL(Op);
1606 SDValue Chain = Op.getOperand(0);
1607 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1608 SDValue Index = Op.getOperand(2);
1609 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags")(static_cast <bool> (JT->getTargetFlags() == 0 &&
"WebAssembly doesn't set target flags") ? void (0) : __assert_fail
("JT->getTargetFlags() == 0 && \"WebAssembly doesn't set target flags\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1609, __extension__ __PRETTY_FUNCTION__))
;
1610
1611 SmallVector<SDValue, 8> Ops;
1612 Ops.push_back(Chain);
1613 Ops.push_back(Index);
1614
1615 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1616 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1617
1618 // Add an operand for each case.
1619 for (auto MBB : MBBs)
1620 Ops.push_back(DAG.getBasicBlock(MBB));
1621
1622 // Add the first MBB as a dummy default target for now. This will be replaced
1623 // with the proper default target (and the preceding range check eliminated)
1624 // if possible by WebAssemblyFixBrTableDefaults.
1625 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1626 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1627}
1628
1629SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1630 SelectionDAG &DAG) const {
1631 SDLoc DL(Op);
1632 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1633
1634 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1635 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1636
1637 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1638 MFI->getVarargBufferVreg(), PtrVT);
1639 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1640 MachinePointerInfo(SV));
1641}
1642
1643static SDValue getCppExceptionSymNode(SDValue Op, unsigned TagIndex,
1644 SelectionDAG &DAG) {
1645 // We only support C++ exceptions for now
1646 int Tag =
1647 cast<ConstantSDNode>(Op.getOperand(TagIndex).getNode())->getZExtValue();
1648 if (Tag != WebAssembly::CPP_EXCEPTION)
1649 llvm_unreachable("Invalid tag: We only support C++ exceptions for now")::llvm::llvm_unreachable_internal("Invalid tag: We only support C++ exceptions for now"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1649)
;
1650 auto &MF = DAG.getMachineFunction();
1651 const auto &TLI = DAG.getTargetLoweringInfo();
1652 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1653 const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1654 return DAG.getNode(WebAssemblyISD::Wrapper, SDLoc(Op), PtrVT,
1655 DAG.getTargetExternalSymbol(SymName, PtrVT));
1656}
1657
1658SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1659 SelectionDAG &DAG) const {
1660 MachineFunction &MF = DAG.getMachineFunction();
1661 unsigned IntNo;
1662 switch (Op.getOpcode()) {
1663 case ISD::INTRINSIC_VOID:
1664 case ISD::INTRINSIC_W_CHAIN:
1665 IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1666 break;
1667 case ISD::INTRINSIC_WO_CHAIN:
1668 IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1669 break;
1670 default:
1671 llvm_unreachable("Invalid intrinsic")::llvm::llvm_unreachable_internal("Invalid intrinsic", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1671)
;
1672 }
1673 SDLoc DL(Op);
1674
1675 switch (IntNo) {
1676 default:
1677 return SDValue(); // Don't custom lower most intrinsics.
1678
1679 case Intrinsic::wasm_lsda: {
1680 EVT VT = Op.getValueType();
1681 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1682 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1683 auto &Context = MF.getMMI().getContext();
1684 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1685 Twine(MF.getFunctionNumber()));
1686 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1687 DAG.getMCSymbol(S, PtrVT));
1688 }
1689
1690 case Intrinsic::wasm_throw: {
1691 SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1692 return DAG.getNode(WebAssemblyISD::THROW, DL,
1693 MVT::Other, // outchain type
1694 {
1695 Op.getOperand(0), // inchain
1696 SymNode, // exception symbol
1697 Op.getOperand(3) // thrown value
1698 });
1699 }
1700
1701 case Intrinsic::wasm_catch: {
1702 SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1703 return DAG.getNode(WebAssemblyISD::CATCH, DL,
1704 {
1705 MVT::i32, // outchain type
1706 MVT::Other // return value
1707 },
1708 {
1709 Op.getOperand(0), // inchain
1710 SymNode // exception symbol
1711 });
1712 }
1713
1714 case Intrinsic::wasm_shuffle: {
1715 // Drop in-chain and replace undefs, but otherwise pass through unchanged
1716 SDValue Ops[18];
1717 size_t OpIdx = 0;
1718 Ops[OpIdx++] = Op.getOperand(1);
1719 Ops[OpIdx++] = Op.getOperand(2);
1720 while (OpIdx < 18) {
1721 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1722 if (MaskIdx.isUndef() ||
1723 cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
1724 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32);
1725 } else {
1726 Ops[OpIdx++] = MaskIdx;
1727 }
1728 }
1729 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1730 }
1731 }
1732}
1733
1734SDValue
1735WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1736 SelectionDAG &DAG) const {
1737 SDLoc DL(Op);
1738 // If sign extension operations are disabled, allow sext_inreg only if operand
1739 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1740 // extension operations, but allowing sext_inreg in this context lets us have
1741 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1742 // everywhere would be simpler in this file, but would necessitate large and
1743 // brittle patterns to undo the expansion and select extract_lane_s
1744 // instructions.
1745 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128())(static_cast <bool> (!Subtarget->hasSignExt() &&
Subtarget->hasSIMD128()) ? void (0) : __assert_fail ("!Subtarget->hasSignExt() && Subtarget->hasSIMD128()"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1745, __extension__ __PRETTY_FUNCTION__))
;
1746 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1747 return SDValue();
1748
1749 const SDValue &Extract = Op.getOperand(0);
1750 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1751 if (VecT.getVectorElementType().getSizeInBits() > 32)
1752 return SDValue();
1753 MVT ExtractedLaneT =
1754 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1755 MVT ExtractedVecT =
1756 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1757 if (ExtractedVecT == VecT)
1758 return Op;
1759
1760 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1761 const SDNode *Index = Extract.getOperand(1).getNode();
1762 if (!isa<ConstantSDNode>(Index))
1763 return SDValue();
1764 unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
1765 unsigned Scale =
1766 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1767 assert(Scale > 1)(static_cast <bool> (Scale > 1) ? void (0) : __assert_fail
("Scale > 1", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1767, __extension__ __PRETTY_FUNCTION__))
;
1768 SDValue NewIndex =
1769 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1770 SDValue NewExtract = DAG.getNode(
1771 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1772 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1773 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1774 Op.getOperand(1));
1775}
1776
1777SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1778 SelectionDAG &DAG) const {
1779 SDLoc DL(Op);
1780 const EVT VecT = Op.getValueType();
1781 const EVT LaneT = Op.getOperand(0).getValueType();
1782 const size_t Lanes = Op.getNumOperands();
1783 bool CanSwizzle = VecT == MVT::v16i8;
1784
1785 // BUILD_VECTORs are lowered to the instruction that initializes the highest
1786 // possible number of lanes at once followed by a sequence of replace_lane
1787 // instructions to individually initialize any remaining lanes.
1788
1789 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
1790 // swizzled lanes should be given greater weight.
1791
1792 // TODO: Investigate looping rather than always extracting/replacing specific
1793 // lanes to fill gaps.
1794
1795 auto IsConstant = [](const SDValue &V) {
1796 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1797 };
1798
1799 // Returns the source vector and index vector pair if they exist. Checks for:
1800 // (extract_vector_elt
1801 // $src,
1802 // (sign_extend_inreg (extract_vector_elt $indices, $i))
1803 // )
1804 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
1805 auto Bail = std::make_pair(SDValue(), SDValue());
1806 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1807 return Bail;
1808 const SDValue &SwizzleSrc = Lane->getOperand(0);
1809 const SDValue &IndexExt = Lane->getOperand(1);
1810 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
1811 return Bail;
1812 const SDValue &Index = IndexExt->getOperand(0);
1813 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1814 return Bail;
1815 const SDValue &SwizzleIndices = Index->getOperand(0);
1816 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
1817 SwizzleIndices.getValueType() != MVT::v16i8 ||
1818 Index->getOperand(1)->getOpcode() != ISD::Constant ||
1819 Index->getConstantOperandVal(1) != I)
1820 return Bail;
1821 return std::make_pair(SwizzleSrc, SwizzleIndices);
1822 };
1823
1824 // If the lane is extracted from another vector at a constant index, return
1825 // that vector. The source vector must not have more lanes than the dest
1826 // because the shufflevector indices are in terms of the destination lanes and
1827 // would not be able to address the smaller individual source lanes.
1828 auto GetShuffleSrc = [&](const SDValue &Lane) {
1829 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1830 return SDValue();
1831 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
1832 return SDValue();
1833 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
1834 VecT.getVectorNumElements())
1835 return SDValue();
1836 return Lane->getOperand(0);
1837 };
1838
1839 using ValueEntry = std::pair<SDValue, size_t>;
1840 SmallVector<ValueEntry, 16> SplatValueCounts;
1841
1842 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
1843 SmallVector<SwizzleEntry, 16> SwizzleCounts;
1844
1845 using ShuffleEntry = std::pair<SDValue, size_t>;
1846 SmallVector<ShuffleEntry, 16> ShuffleCounts;
1847
1848 auto AddCount = [](auto &Counts, const auto &Val) {
1849 auto CountIt =
1850 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
1851 if (CountIt == Counts.end()) {
1852 Counts.emplace_back(Val, 1);
1853 } else {
1854 CountIt->second++;
1855 }
1856 };
1857
1858 auto GetMostCommon = [](auto &Counts) {
1859 auto CommonIt =
1860 std::max_element(Counts.begin(), Counts.end(),
1861 [](auto A, auto B) { return A.second < B.second; });
1862 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector")(static_cast <bool> (CommonIt != Counts.end() &&
"Unexpected all-undef build_vector") ? void (0) : __assert_fail
("CommonIt != Counts.end() && \"Unexpected all-undef build_vector\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1862, __extension__ __PRETTY_FUNCTION__))
;
1863 return *CommonIt;
1864 };
1865
1866 size_t NumConstantLanes = 0;
1867
1868 // Count eligible lanes for each type of vector creation op
1869 for (size_t I = 0; I
2.1
'I' is < 'Lanes'
2.1
'I' is < 'Lanes'
2.1
'I' is < 'Lanes'
< Lanes
; ++I) {
3
Loop condition is true. Entering loop body
9
Assuming 'I' is >= 'Lanes'
10
Loop condition is false. Execution continues on line 1887
1870 const SDValue &Lane = Op->getOperand(I);
1871 if (Lane.isUndef())
4
Taking false branch
1872 continue;
1873
1874 AddCount(SplatValueCounts, Lane);
1875
1876 if (IsConstant(Lane))
5
Taking false branch
1877 NumConstantLanes++;
1878 if (auto ShuffleSrc = GetShuffleSrc(Lane))
6
Taking false branch
1879 AddCount(ShuffleCounts, ShuffleSrc);
1880 if (CanSwizzle
6.1
'CanSwizzle' is true
6.1
'CanSwizzle' is true
6.1
'CanSwizzle' is true
) {
7
Taking true branch
1881 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
1882 if (SwizzleSrcs.first)
8
Taking false branch
1883 AddCount(SwizzleCounts, SwizzleSrcs);
1884 }
1885 }
1886
1887 SDValue SplatValue;
1888 size_t NumSplatLanes;
1889 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
1890
1891 SDValue SwizzleSrc;
1892 SDValue SwizzleIndices;
1893 size_t NumSwizzleLanes = 0;
1894 if (SwizzleCounts.size())
11
Assuming the condition is false
12
Taking false branch
1895 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
1896 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
1897
1898 // Shuffles can draw from up to two vectors, so find the two most common
1899 // sources.
1900 SDValue ShuffleSrc1, ShuffleSrc2;
1901 size_t NumShuffleLanes = 0;
1902 if (ShuffleCounts.size()) {
13
Assuming the condition is false
14
Taking false branch
1903 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
1904 ShuffleCounts.erase(std::remove_if(ShuffleCounts.begin(),
1905 ShuffleCounts.end(),
1906 [&](const auto &Pair) {
1907 return Pair.first == ShuffleSrc1;
1908 }),
1909 ShuffleCounts.end());
1910 }
1911 if (ShuffleCounts.size()) {
15
Taking false branch
1912 size_t AdditionalShuffleLanes;
1913 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
1914 GetMostCommon(ShuffleCounts);
1915 NumShuffleLanes += AdditionalShuffleLanes;
1916 }
1917
1918 // Predicate returning true if the lane is properly initialized by the
1919 // original instruction
1920 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
1921 SDValue Result;
1922 // Prefer swizzles over shuffles over vector consts over splats
1923 if (NumSwizzleLanes
15.1
'NumSwizzleLanes' is >= 'NumShuffleLanes'
15.1
'NumSwizzleLanes' is >= 'NumShuffleLanes'
15.1
'NumSwizzleLanes' is >= 'NumShuffleLanes'
>= NumShuffleLanes &&
17
Taking false branch
1924 NumSwizzleLanes
15.2
'NumSwizzleLanes' is >= 'NumConstantLanes'
15.2
'NumSwizzleLanes' is >= 'NumConstantLanes'
15.2
'NumSwizzleLanes' is >= 'NumConstantLanes'
>= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
16
Assuming 'NumSwizzleLanes' is < 'NumSplatLanes'
1925 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
1926 SwizzleIndices);
1927 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
1928 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
1929 return Swizzled == GetSwizzleSrcs(I, Lane);
1930 };
1931 } else if (NumShuffleLanes
17.1
'NumShuffleLanes' is >= 'NumConstantLanes'
17.1
'NumShuffleLanes' is >= 'NumConstantLanes'
17.1
'NumShuffleLanes' is >= 'NumConstantLanes'
>= NumConstantLanes &&
18
Taking false branch
1932 NumShuffleLanes
17.2
'NumShuffleLanes' is < 'NumSplatLanes'
17.2
'NumShuffleLanes' is < 'NumSplatLanes'
17.2
'NumShuffleLanes' is < 'NumSplatLanes'
>= NumSplatLanes) {
1933 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
1934 size_t DestLaneCount = VecT.getVectorNumElements();
1935 size_t Scale1 = 1;
1936 size_t Scale2 = 1;
1937 SDValue Src1 = ShuffleSrc1;
1938 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
1939 if (Src1.getValueType() != VecT) {
1940 size_t LaneSize =
1941 Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1942 assert(LaneSize > DestLaneSize)(static_cast <bool> (LaneSize > DestLaneSize) ? void
(0) : __assert_fail ("LaneSize > DestLaneSize", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1942, __extension__ __PRETTY_FUNCTION__))
;
1943 Scale1 = LaneSize / DestLaneSize;
1944 Src1 = DAG.getBitcast(VecT, Src1);
1945 }
1946 if (Src2.getValueType() != VecT) {
1947 size_t LaneSize =
1948 Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1949 assert(LaneSize > DestLaneSize)(static_cast <bool> (LaneSize > DestLaneSize) ? void
(0) : __assert_fail ("LaneSize > DestLaneSize", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1949, __extension__ __PRETTY_FUNCTION__))
;
1950 Scale2 = LaneSize / DestLaneSize;
1951 Src2 = DAG.getBitcast(VecT, Src2);
1952 }
1953
1954 int Mask[16];
1955 assert(DestLaneCount <= 16)(static_cast <bool> (DestLaneCount <= 16) ? void (0)
: __assert_fail ("DestLaneCount <= 16", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1955, __extension__ __PRETTY_FUNCTION__))
;
1956 for (size_t I = 0; I < DestLaneCount; ++I) {
1957 const SDValue &Lane = Op->getOperand(I);
1958 SDValue Src = GetShuffleSrc(Lane);
1959 if (Src == ShuffleSrc1) {
1960 Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
1961 } else if (Src && Src == ShuffleSrc2) {
1962 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
1963 } else {
1964 Mask[I] = -1;
1965 }
1966 }
1967 ArrayRef<int> MaskRef(Mask, DestLaneCount);
1968 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
1969 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
1970 auto Src = GetShuffleSrc(Lane);
1971 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
1972 };
1973 } else if (NumConstantLanes
18.1
'NumConstantLanes' is < 'NumSplatLanes'
18.1
'NumConstantLanes' is < 'NumSplatLanes'
18.1
'NumConstantLanes' is < 'NumSplatLanes'
>= NumSplatLanes) {
19
Taking false branch
1974 SmallVector<SDValue, 16> ConstLanes;
1975 for (const SDValue &Lane : Op->op_values()) {
1976 if (IsConstant(Lane)) {
1977 ConstLanes.push_back(Lane);
1978 } else if (LaneT.isFloatingPoint()) {
1979 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1980 } else {
1981 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1982 }
1983 }
1984 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1985 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
1986 return IsConstant(Lane);
1987 };
1988 } else {
1989 // Use a splat, but possibly a load_splat
1990 LoadSDNode *SplattedLoad;
1991 if ((SplattedLoad = dyn_cast<LoadSDNode>(SplatValue)) &&
20
Assuming 'SplattedLoad' is null
21
Assuming pointer value is null
22
Taking false branch
1992 SplattedLoad->getMemoryVT() == VecT.getVectorElementType()) {
1993 Result = DAG.getMemIntrinsicNode(
1994 WebAssemblyISD::LOAD_SPLAT, DL, DAG.getVTList(VecT),
1995 {SplattedLoad->getChain(), SplattedLoad->getBasePtr(),
1996 SplattedLoad->getOffset()},
1997 SplattedLoad->getMemoryVT(), SplattedLoad->getMemOperand());
1998 } else {
1999 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
23
The value of 'SplatValue' is assigned to 'Op.Node'
24
Calling 'SelectionDAG::getSplatBuildVector'
2000 }
2001 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
2002 return Lane == SplatValue;
2003 };
2004 }
2005
2006 assert(Result)(static_cast <bool> (Result) ? void (0) : __assert_fail
("Result", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2006, __extension__ __PRETTY_FUNCTION__))
;
2007 assert(IsLaneConstructed)(static_cast <bool> (IsLaneConstructed) ? void (0) : __assert_fail
("IsLaneConstructed", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2007, __extension__ __PRETTY_FUNCTION__))
;
2008
2009 // Add replace_lane instructions for any unhandled values
2010 for (size_t I = 0; I < Lanes; ++I) {
2011 const SDValue &Lane = Op->getOperand(I);
2012 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
2013 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
2014 DAG.getConstant(I, DL, MVT::i32));
2015 }
2016
2017 return Result;
2018}
2019
2020SDValue
2021WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2022 SelectionDAG &DAG) const {
2023 SDLoc DL(Op);
2024 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
2025 MVT VecType = Op.getOperand(0).getSimpleValueType();
2026 assert(VecType.is128BitVector() && "Unexpected shuffle vector type")(static_cast <bool> (VecType.is128BitVector() &&
"Unexpected shuffle vector type") ? void (0) : __assert_fail
("VecType.is128BitVector() && \"Unexpected shuffle vector type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2026, __extension__ __PRETTY_FUNCTION__))
;
2027 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
2028
2029 // Space for two vector args and sixteen mask indices
2030 SDValue Ops[18];
2031 size_t OpIdx = 0;
2032 Ops[OpIdx++] = Op.getOperand(0);
2033 Ops[OpIdx++] = Op.getOperand(1);
2034
2035 // Expand mask indices to byte indices and materialize them as operands
2036 for (int M : Mask) {
2037 for (size_t J = 0; J < LaneBytes; ++J) {
2038 // Lower undefs (represented by -1 in mask) to zero
2039 uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
2040 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
2041 }
2042 }
2043
2044 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
2045}
2046
2047SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
2048 SelectionDAG &DAG) const {
2049 SDLoc DL(Op);
2050 // The legalizer does not know how to expand the unsupported comparison modes
2051 // of i64x2 vectors, so we manually unroll them here.
2052 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64)(static_cast <bool> (Op->getOperand(0)->getSimpleValueType
(0) == MVT::v2i64) ? void (0) : __assert_fail ("Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2052, __extension__ __PRETTY_FUNCTION__))
;
2053 SmallVector<SDValue, 2> LHS, RHS;
2054 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
2055 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
2056 const SDValue &CC = Op->getOperand(2);
2057 auto MakeLane = [&](unsigned I) {
2058 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
2059 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
2060 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
2061 };
2062 return DAG.getBuildVector(Op->getValueType(0), DL,
2063 {MakeLane(0), MakeLane(1)});
2064}
2065
2066SDValue
2067WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
2068 SelectionDAG &DAG) const {
2069 // Allow constant lane indices, expand variable lane indices
2070 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
2071 if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
2072 return Op;
2073 else
2074 // Perform default expansion
2075 return SDValue();
2076}
2077
2078static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
2079 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
2080 // 32-bit and 64-bit unrolled shifts will have proper semantics
2081 if (LaneT.bitsGE(MVT::i32))
2082 return DAG.UnrollVectorOp(Op.getNode());
2083 // Otherwise mask the shift value to get proper semantics from 32-bit shift
2084 SDLoc DL(Op);
2085 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
2086 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
2087 unsigned ShiftOpcode = Op.getOpcode();
2088 SmallVector<SDValue, 16> ShiftedElements;
2089 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
2090 SmallVector<SDValue, 16> ShiftElements;
2091 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
2092 SmallVector<SDValue, 16> UnrolledOps;
2093 for (size_t i = 0; i < NumLanes; ++i) {
2094 SDValue MaskedShiftValue =
2095 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
2096 SDValue ShiftedValue = ShiftedElements[i];
2097 if (ShiftOpcode == ISD::SRA)
2098 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
2099 ShiftedValue, DAG.getValueType(LaneT));
2100 UnrolledOps.push_back(
2101 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2102 }
2103 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
2104}
2105
2106SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
2107 SelectionDAG &DAG) const {
2108 SDLoc DL(Op);
2109
2110 // Only manually lower vector shifts
2111 assert(Op.getSimpleValueType().isVector())(static_cast <bool> (Op.getSimpleValueType().isVector()
) ? void (0) : __assert_fail ("Op.getSimpleValueType().isVector()"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2111, __extension__ __PRETTY_FUNCTION__))
;
2112
2113 auto ShiftVal = DAG.getSplatValue(Op.getOperand(1));
2114 if (!ShiftVal)
2115 return unrollVectorShift(Op, DAG);
2116
2117 // Use anyext because none of the high bits can affect the shift
2118 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
2119
2120 unsigned Opcode;
2121 switch (Op.getOpcode()) {
2122 case ISD::SHL:
2123 Opcode = WebAssemblyISD::VEC_SHL;
2124 break;
2125 case ISD::SRA:
2126 Opcode = WebAssemblyISD::VEC_SHR_S;
2127 break;
2128 case ISD::SRL:
2129 Opcode = WebAssemblyISD::VEC_SHR_U;
2130 break;
2131 default:
2132 llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2132)
;
2133 }
2134
2135 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
2136}
2137
2138SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2139 SelectionDAG &DAG) const {
2140 SDLoc DL(Op);
2141 EVT ResT = Op.getValueType();
2142 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2143
2144 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2145 (SatVT == MVT::i32 || SatVT == MVT::i64))
2146 return Op;
2147
2148 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2149 return Op;
2150
2151 return SDValue();
2152}
2153
2154//===----------------------------------------------------------------------===//
2155// Custom DAG combine hooks
2156//===----------------------------------------------------------------------===//
2157static SDValue
2158performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2159 auto &DAG = DCI.DAG;
2160 auto Shuffle = cast<ShuffleVectorSDNode>(N);
2161
2162 // Hoist vector bitcasts that don't change the number of lanes out of unary
2163 // shuffles, where they are less likely to get in the way of other combines.
2164 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
2165 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
2166 SDValue Bitcast = N->getOperand(0);
2167 if (Bitcast.getOpcode() != ISD::BITCAST)
2168 return SDValue();
2169 if (!N->getOperand(1).isUndef())
2170 return SDValue();
2171 SDValue CastOp = Bitcast.getOperand(0);
2172 MVT SrcType = CastOp.getSimpleValueType();
2173 MVT DstType = Bitcast.getSimpleValueType();
2174 if (!SrcType.is128BitVector() ||
2175 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2176 return SDValue();
2177 SDValue NewShuffle = DAG.getVectorShuffle(
2178 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
2179 return DAG.getBitcast(DstType, NewShuffle);
2180}
2181
2182static SDValue
2183performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2184 auto &DAG = DCI.DAG;
2185 assert(N->getOpcode() == ISD::SIGN_EXTEND ||(static_cast <bool> (N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) ? void (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2186, __extension__ __PRETTY_FUNCTION__))
2186 N->getOpcode() == ISD::ZERO_EXTEND)(static_cast <bool> (N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) ? void (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2186, __extension__ __PRETTY_FUNCTION__))
;
2187
2188 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
2189 // possible before the extract_subvector can be expanded.
2190 auto Extract = N->getOperand(0);
2191 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2192 return SDValue();
2193 auto Source = Extract.getOperand(0);
2194 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2195 if (IndexNode == nullptr)
2196 return SDValue();
2197 auto Index = IndexNode->getZExtValue();
2198
2199 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
2200 // extracted subvector is the low or high half of its source.
2201 EVT ResVT = N->getValueType(0);
2202 if (ResVT == MVT::v8i16) {
2203 if (Extract.getValueType() != MVT::v8i8 ||
2204 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2205 return SDValue();
2206 } else if (ResVT == MVT::v4i32) {
2207 if (Extract.getValueType() != MVT::v4i16 ||
2208 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2209 return SDValue();
2210 } else if (ResVT == MVT::v2i64) {
2211 if (Extract.getValueType() != MVT::v2i32 ||
2212 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2213 return SDValue();
2214 } else {
2215 return SDValue();
2216 }
2217
2218 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
2219 bool IsLow = Index == 0;
2220
2221 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2222 : WebAssemblyISD::EXTEND_HIGH_S)
2223 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2224 : WebAssemblyISD::EXTEND_HIGH_U);
2225
2226 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2227}
2228
2229static SDValue
2230performVectorConvertLowCombine(SDNode *N,
2231 TargetLowering::DAGCombinerInfo &DCI) {
2232 auto &DAG = DCI.DAG;
2233
2234 EVT ResVT = N->getValueType(0);
2235 if (ResVT != MVT::v2f64)
2236 return SDValue();
2237
2238 auto GetWasmConversionOp = [](unsigned Op) {
2239 switch (Op) {
2240 case ISD::SINT_TO_FP:
2241 return WebAssemblyISD::CONVERT_LOW_S;
2242 case ISD::UINT_TO_FP:
2243 return WebAssemblyISD::CONVERT_LOW_U;
2244 case ISD::FP_EXTEND:
2245 return WebAssemblyISD::PROMOTE_LOW;
2246 }
2247 llvm_unreachable("unexpected op")::llvm::llvm_unreachable_internal("unexpected op", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2247)
;
2248 };
2249
2250 if (N->getOpcode() == ISD::EXTRACT_SUBVECTOR) {
2251 // Combine this:
2252 //
2253 // (v2f64 (extract_subvector
2254 // (v4f64 ({s,u}int_to_fp (v4i32 $x))), 0))
2255 //
2256 // into (f64x2.convert_low_i32x4_{s,u} $x).
2257 //
2258 // Or this:
2259 //
2260 // (v2f64 (extract_subvector
2261 // (v4f64 (fp_extend (v4f32 $x))), 0))
2262 //
2263 // into (f64x2.promote_low_f32x4 $x).
2264 auto Conversion = N->getOperand(0);
2265 auto ConversionOp = Conversion.getOpcode();
2266 MVT ExpectedSourceType;
2267 switch (ConversionOp) {
2268 case ISD::SINT_TO_FP:
2269 case ISD::UINT_TO_FP:
2270 ExpectedSourceType = MVT::v4i32;
2271 break;
2272 case ISD::FP_EXTEND:
2273 ExpectedSourceType = MVT::v4f32;
2274 break;
2275 default:
2276 return SDValue();
2277 }
2278
2279 if (Conversion.getValueType() != MVT::v4f64)
2280 return SDValue();
2281
2282 auto Source = Conversion.getOperand(0);
2283 if (Source.getValueType() != ExpectedSourceType)
2284 return SDValue();
2285
2286 auto IndexNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
2287 if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
2288 return SDValue();
2289
2290 auto Op = GetWasmConversionOp(ConversionOp);
2291 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2292 }
2293
2294 // Combine this:
2295 //
2296 // (v2f64 ({s,u}int_to_fp
2297 // (v2i32 (extract_subvector (v4i32 $x), 0))))
2298 //
2299 // into (f64x2.convert_low_i32x4_{s,u} $x).
2300 //
2301 // Or this:
2302 //
2303 // (v2f64 (fp_extend
2304 // (v2f32 (extract_subvector (v4f32 $x), 0))))
2305 //
2306 // into (f64x2.promote_low_f32x4 $x).
2307 auto ConversionOp = N->getOpcode();
2308 MVT ExpectedExtractType;
2309 MVT ExpectedSourceType;
2310 switch (ConversionOp) {
2311 case ISD::SINT_TO_FP:
2312 case ISD::UINT_TO_FP:
2313 ExpectedExtractType = MVT::v2i32;
2314 ExpectedSourceType = MVT::v4i32;
2315 break;
2316 case ISD::FP_EXTEND:
2317 ExpectedExtractType = MVT::v2f32;
2318 ExpectedSourceType = MVT::v4f32;
2319 break;
2320 default:
2321 llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2321)
;
2322 }
2323
2324 auto Extract = N->getOperand(0);
2325 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2326 return SDValue();
2327
2328 if (Extract.getValueType() != ExpectedExtractType)
2329 return SDValue();
2330
2331 auto Source = Extract.getOperand(0);
2332 if (Source.getValueType() != ExpectedSourceType)
2333 return SDValue();
2334
2335 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2336 if (IndexNode == nullptr || IndexNode->getZExtValue() != 0)
2337 return SDValue();
2338
2339 unsigned Op = GetWasmConversionOp(ConversionOp);
2340 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2341}
2342
2343static SDValue
2344performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2345 auto &DAG = DCI.DAG;
2346
2347 auto GetWasmConversionOp = [](unsigned Op) {
2348 switch (Op) {
2349 case ISD::FP_TO_SINT_SAT:
2350 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
2351 case ISD::FP_TO_UINT_SAT:
2352 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
2353 case ISD::FP_ROUND:
2354 return WebAssemblyISD::DEMOTE_ZERO;
2355 }
2356 llvm_unreachable("unexpected op")::llvm::llvm_unreachable_internal("unexpected op", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2356)
;
2357 };
2358
2359 auto IsZeroSplat = [](SDValue SplatVal) {
2360 auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
2361 APInt SplatValue, SplatUndef;
2362 unsigned SplatBitSize;
2363 bool HasAnyUndefs;
2364 return Splat &&
2365 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2366 HasAnyUndefs) &&
2367 SplatValue == 0;
2368 };
2369
2370 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
2371 // Combine this:
2372 //
2373 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
2374 //
2375 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2376 //
2377 // Or this:
2378 //
2379 // (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0)))
2380 //
2381 // into (f32x4.demote_zero_f64x2 $x).
2382 EVT ResVT;
2383 EVT ExpectedConversionType;
2384 auto Conversion = N->getOperand(0);
2385 auto ConversionOp = Conversion.getOpcode();
2386 switch (ConversionOp) {
2387 case ISD::FP_TO_SINT_SAT:
2388 case ISD::FP_TO_UINT_SAT:
2389 ResVT = MVT::v4i32;
2390 ExpectedConversionType = MVT::v2i32;
2391 break;
2392 case ISD::FP_ROUND:
2393 ResVT = MVT::v4f32;
2394 ExpectedConversionType = MVT::v2f32;
2395 break;
2396 default:
2397 return SDValue();
2398 }
2399
2400 if (N->getValueType(0) != ResVT)
2401 return SDValue();
2402
2403 if (Conversion.getValueType() != ExpectedConversionType)
2404 return SDValue();
2405
2406 auto Source = Conversion.getOperand(0);
2407 if (Source.getValueType() != MVT::v2f64)
2408 return SDValue();
2409
2410 if (!IsZeroSplat(N->getOperand(1)) ||
2411 N->getOperand(1).getValueType() != ExpectedConversionType)
2412 return SDValue();
2413
2414 unsigned Op = GetWasmConversionOp(ConversionOp);
2415 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2416 }
2417
2418 // Combine this:
2419 //
2420 // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32)
2421 //
2422 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2423 //
2424 // Or this:
2425 //
2426 // (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0)))))
2427 //
2428 // into (f32x4.demote_zero_f64x2 $x).
2429 EVT ResVT;
2430 auto ConversionOp = N->getOpcode();
2431 switch (ConversionOp) {
2432 case ISD::FP_TO_SINT_SAT:
2433 case ISD::FP_TO_UINT_SAT:
2434 ResVT = MVT::v4i32;
2435 break;
2436 case ISD::FP_ROUND:
2437 ResVT = MVT::v4f32;
2438 break;
2439 default:
2440 llvm_unreachable("unexpected op")::llvm::llvm_unreachable_internal("unexpected op", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 2440)
;
2441 }
2442
2443 if (N->getValueType(0) != ResVT)
2444 return SDValue();
2445
2446 auto Concat = N->getOperand(0);
2447 if (Concat.getValueType() != MVT::v4f64)
2448 return SDValue();
2449
2450 auto Source = Concat.getOperand(0);
2451 if (Source.getValueType() != MVT::v2f64)
2452 return SDValue();
2453
2454 if (!IsZeroSplat(Concat.getOperand(1)) ||
2455 Concat.getOperand(1).getValueType() != MVT::v2f64)
2456 return SDValue();
2457
2458 unsigned Op = GetWasmConversionOp(ConversionOp);
2459 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2460}
2461
2462SDValue
2463WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
2464 DAGCombinerInfo &DCI) const {
2465 switch (N->getOpcode()) {
2466 default:
2467 return SDValue();
2468 case ISD::VECTOR_SHUFFLE:
2469 return performVECTOR_SHUFFLECombine(N, DCI);
2470 case ISD::SIGN_EXTEND:
2471 case ISD::ZERO_EXTEND:
2472 return performVectorExtendCombine(N, DCI);
2473 case ISD::SINT_TO_FP:
2474 case ISD::UINT_TO_FP:
2475 case ISD::FP_EXTEND:
2476 case ISD::EXTRACT_SUBVECTOR:
2477 return performVectorConvertLowCombine(N, DCI);
2478 case ISD::FP_TO_SINT_SAT:
2479 case ISD::FP_TO_UINT_SAT:
2480 case ISD::FP_ROUND:
2481 case ISD::CONCAT_VECTORS:
2482 return performVectorTruncZeroCombine(N, DCI);
2483 }
2484}

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h

1//===- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SelectionDAG class, and transitively defines the
10// SDNode class and subclasses.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CODEGEN_SELECTIONDAG_H
15#define LLVM_CODEGEN_SELECTIONDAG_H
16
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
21#include "llvm/ADT/DenseSet.h"
22#include "llvm/ADT/FoldingSet.h"
23#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/StringMap.h"
26#include "llvm/ADT/ilist.h"
27#include "llvm/ADT/iterator.h"
28#include "llvm/ADT/iterator_range.h"
29#include "llvm/CodeGen/DAGCombine.h"
30#include "llvm/CodeGen/ISDOpcodes.h"
31#include "llvm/CodeGen/MachineFunction.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/SelectionDAGNodes.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/DebugLoc.h"
36#include "llvm/IR/Instructions.h"
37#include "llvm/IR/Metadata.h"
38#include "llvm/Support/Allocator.h"
39#include "llvm/Support/ArrayRecycler.h"
40#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/CodeGen.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MachineValueType.h"
45#include "llvm/Support/RecyclingAllocator.h"
46#include <algorithm>
47#include <cassert>
48#include <cstdint>
49#include <functional>
50#include <map>
51#include <string>
52#include <tuple>
53#include <utility>
54#include <vector>
55
56namespace llvm {
57
58class AAResults;
59class BlockAddress;
60class BlockFrequencyInfo;
61class Constant;
62class ConstantFP;
63class ConstantInt;
64class DataLayout;
65struct fltSemantics;
66class FunctionLoweringInfo;
67class GlobalValue;
68struct KnownBits;
69class LegacyDivergenceAnalysis;
70class LLVMContext;
71class MachineBasicBlock;
72class MachineConstantPoolValue;
73class MCSymbol;
74class OptimizationRemarkEmitter;
75class ProfileSummaryInfo;
76class SDDbgValue;
77class SDDbgOperand;
78class SDDbgLabel;
79class SelectionDAG;
80class SelectionDAGTargetInfo;
81class TargetLibraryInfo;
82class TargetLowering;
83class TargetMachine;
84class TargetSubtargetInfo;
85class Value;
86
87class SDVTListNode : public FoldingSetNode {
88 friend struct FoldingSetTrait<SDVTListNode>;
89
90 /// A reference to an Interned FoldingSetNodeID for this node.
91 /// The Allocator in SelectionDAG holds the data.
92 /// SDVTList contains all types which are frequently accessed in SelectionDAG.
93 /// The size of this list is not expected to be big so it won't introduce
94 /// a memory penalty.
95 FoldingSetNodeIDRef FastID;
96 const EVT *VTs;
97 unsigned int NumVTs;
98 /// The hash value for SDVTList is fixed, so cache it to avoid
99 /// hash calculation.
100 unsigned HashValue;
101
102public:
103 SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
104 FastID(ID), VTs(VT), NumVTs(Num) {
105 HashValue = ID.ComputeHash();
106 }
107
108 SDVTList getSDVTList() {
109 SDVTList result = {VTs, NumVTs};
110 return result;
111 }
112};
113
114/// Specialize FoldingSetTrait for SDVTListNode
115/// to avoid computing temp FoldingSetNodeID and hash value.
116template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTListNode> {
117 static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
118 ID = X.FastID;
119 }
120
121 static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
122 unsigned IDHash, FoldingSetNodeID &TempID) {
123 if (X.HashValue != IDHash)
124 return false;
125 return ID == X.FastID;
126 }
127
128 static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
129 return X.HashValue;
130 }
131};
132
133template <> struct ilist_alloc_traits<SDNode> {
134 static void deleteNode(SDNode *) {
135 llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!")::llvm::llvm_unreachable_internal("ilist_traits<SDNode> shouldn't see a deleteNode call!"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 135)
;
136 }
137};
138
139/// Keeps track of dbg_value information through SDISel. We do
140/// not build SDNodes for these so as not to perturb the generated code;
141/// instead the info is kept off to the side in this structure. Each SDNode may
142/// have one or more associated dbg_value entries. This information is kept in
143/// DbgValMap.
144/// Byval parameters are handled separately because they don't use alloca's,
145/// which busts the normal mechanism. There is good reason for handling all
146/// parameters separately: they may not have code generated for them, they
147/// should always go at the beginning of the function regardless of other code
148/// motion, and debug info for them is potentially useful even if the parameter
149/// is unused. Right now only byval parameters are handled separately.
150class SDDbgInfo {
151 BumpPtrAllocator Alloc;
152 SmallVector<SDDbgValue*, 32> DbgValues;
153 SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
154 SmallVector<SDDbgLabel*, 4> DbgLabels;
155 using DbgValMapType = DenseMap<const SDNode *, SmallVector<SDDbgValue *, 2>>;
156 DbgValMapType DbgValMap;
157
158public:
159 SDDbgInfo() = default;
160 SDDbgInfo(const SDDbgInfo &) = delete;
161 SDDbgInfo &operator=(const SDDbgInfo &) = delete;
162
163 void add(SDDbgValue *V, bool isParameter);
164
165 void add(SDDbgLabel *L) { DbgLabels.push_back(L); }
166
167 /// Invalidate all DbgValues attached to the node and remove
168 /// it from the Node-to-DbgValues map.
169 void erase(const SDNode *Node);
170
171 void clear() {
172 DbgValMap.clear();
173 DbgValues.clear();
174 ByvalParmDbgValues.clear();
175 DbgLabels.clear();
176 Alloc.Reset();
177 }
178
179 BumpPtrAllocator &getAlloc() { return Alloc; }
180
181 bool empty() const {
182 return DbgValues.empty() && ByvalParmDbgValues.empty() && DbgLabels.empty();
183 }
184
185 ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) const {
186 auto I = DbgValMap.find(Node);
187 if (I != DbgValMap.end())
188 return I->second;
189 return ArrayRef<SDDbgValue*>();
190 }
191
192 using DbgIterator = SmallVectorImpl<SDDbgValue*>::iterator;
193 using DbgLabelIterator = SmallVectorImpl<SDDbgLabel*>::iterator;
194
195 DbgIterator DbgBegin() { return DbgValues.begin(); }
196 DbgIterator DbgEnd() { return DbgValues.end(); }
197 DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
198 DbgIterator ByvalParmDbgEnd() { return ByvalParmDbgValues.end(); }
199 DbgLabelIterator DbgLabelBegin() { return DbgLabels.begin(); }
200 DbgLabelIterator DbgLabelEnd() { return DbgLabels.end(); }
201};
202
203void checkForCycles(const SelectionDAG *DAG, bool force = false);
204
205/// This is used to represent a portion of an LLVM function in a low-level
206/// Data Dependence DAG representation suitable for instruction selection.
207/// This DAG is constructed as the first step of instruction selection in order
208/// to allow implementation of machine specific optimizations
209/// and code simplifications.
210///
211/// The representation used by the SelectionDAG is a target-independent
212/// representation, which has some similarities to the GCC RTL representation,
213/// but is significantly more simple, powerful, and is a graph form instead of a
214/// linear form.
215///
216class SelectionDAG {
217 const TargetMachine &TM;
218 const SelectionDAGTargetInfo *TSI = nullptr;
219 const TargetLowering *TLI = nullptr;
220 const TargetLibraryInfo *LibInfo = nullptr;
221 MachineFunction *MF;
222 Pass *SDAGISelPass = nullptr;
223 LLVMContext *Context;
224 CodeGenOpt::Level OptLevel;
225
226 LegacyDivergenceAnalysis * DA = nullptr;
227 FunctionLoweringInfo * FLI = nullptr;
228
229 /// The function-level optimization remark emitter. Used to emit remarks
230 /// whenever manipulating the DAG.
231 OptimizationRemarkEmitter *ORE;
232
233 ProfileSummaryInfo *PSI = nullptr;
234 BlockFrequencyInfo *BFI = nullptr;
235
236 /// The starting token.
237 SDNode EntryNode;
238
239 /// The root of the entire DAG.
240 SDValue Root;
241
242 /// A linked list of nodes in the current DAG.
243 ilist<SDNode> AllNodes;
244
245 /// The AllocatorType for allocating SDNodes. We use
246 /// pool allocation with recycling.
247 using NodeAllocatorType = RecyclingAllocator<BumpPtrAllocator, SDNode,
248 sizeof(LargestSDNode),
249 alignof(MostAlignedSDNode)>;
250
251 /// Pool allocation for nodes.
252 NodeAllocatorType NodeAllocator;
253
254 /// This structure is used to memoize nodes, automatically performing
255 /// CSE with existing nodes when a duplicate is requested.
256 FoldingSet<SDNode> CSEMap;
257
258 /// Pool allocation for machine-opcode SDNode operands.
259 BumpPtrAllocator OperandAllocator;
260 ArrayRecycler<SDUse> OperandRecycler;
261
262 /// Pool allocation for misc. objects that are created once per SelectionDAG.
263 BumpPtrAllocator Allocator;
264
265 /// Tracks dbg_value and dbg_label information through SDISel.
266 SDDbgInfo *DbgInfo;
267
268 using CallSiteInfo = MachineFunction::CallSiteInfo;
269 using CallSiteInfoImpl = MachineFunction::CallSiteInfoImpl;
270
271 struct CallSiteDbgInfo {
272 CallSiteInfo CSInfo;
273 MDNode *HeapAllocSite = nullptr;
274 bool NoMerge = false;
275 };
276
277 DenseMap<const SDNode *, CallSiteDbgInfo> SDCallSiteDbgInfo;
278
279 uint16_t NextPersistentId = 0;
280
281public:
282 /// Clients of various APIs that cause global effects on
283 /// the DAG can optionally implement this interface. This allows the clients
284 /// to handle the various sorts of updates that happen.
285 ///
286 /// A DAGUpdateListener automatically registers itself with DAG when it is
287 /// constructed, and removes itself when destroyed in RAII fashion.
288 struct DAGUpdateListener {
289 DAGUpdateListener *const Next;
290 SelectionDAG &DAG;
291
292 explicit DAGUpdateListener(SelectionDAG &D)
293 : Next(D.UpdateListeners), DAG(D) {
294 DAG.UpdateListeners = this;
295 }
296
297 virtual ~DAGUpdateListener() {
298 assert(DAG.UpdateListeners == this &&(static_cast <bool> (DAG.UpdateListeners == this &&
"DAGUpdateListeners must be destroyed in LIFO order") ? void
(0) : __assert_fail ("DAG.UpdateListeners == this && \"DAGUpdateListeners must be destroyed in LIFO order\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 299, __extension__ __PRETTY_FUNCTION__))
299 "DAGUpdateListeners must be destroyed in LIFO order")(static_cast <bool> (DAG.UpdateListeners == this &&
"DAGUpdateListeners must be destroyed in LIFO order") ? void
(0) : __assert_fail ("DAG.UpdateListeners == this && \"DAGUpdateListeners must be destroyed in LIFO order\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 299, __extension__ __PRETTY_FUNCTION__))
;
300 DAG.UpdateListeners = Next;
301 }
302
303 /// The node N that was deleted and, if E is not null, an
304 /// equivalent node E that replaced it.
305 virtual void NodeDeleted(SDNode *N, SDNode *E);
306
307 /// The node N that was updated.
308 virtual void NodeUpdated(SDNode *N);
309
310 /// The node N that was inserted.
311 virtual void NodeInserted(SDNode *N);
312 };
313
314 struct DAGNodeDeletedListener : public DAGUpdateListener {
315 std::function<void(SDNode *, SDNode *)> Callback;
316
317 DAGNodeDeletedListener(SelectionDAG &DAG,
318 std::function<void(SDNode *, SDNode *)> Callback)
319 : DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
320
321 void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
322
323 private:
324 virtual void anchor();
325 };
326
327 /// Help to insert SDNodeFlags automatically in transforming. Use
328 /// RAII to save and resume flags in current scope.
329 class FlagInserter {
330 SelectionDAG &DAG;
331 SDNodeFlags Flags;
332 FlagInserter *LastInserter;
333
334 public:
335 FlagInserter(SelectionDAG &SDAG, SDNodeFlags Flags)
336 : DAG(SDAG), Flags(Flags),
337 LastInserter(SDAG.getFlagInserter()) {
338 SDAG.setFlagInserter(this);
339 }
340 FlagInserter(SelectionDAG &SDAG, SDNode *N)
341 : FlagInserter(SDAG, N->getFlags()) {}
342
343 FlagInserter(const FlagInserter &) = delete;
344 FlagInserter &operator=(const FlagInserter &) = delete;
345 ~FlagInserter() { DAG.setFlagInserter(LastInserter); }
346
347 SDNodeFlags getFlags() const { return Flags; }
348 };
349
350 /// When true, additional steps are taken to
351 /// ensure that getConstant() and similar functions return DAG nodes that
352 /// have legal types. This is important after type legalization since
353 /// any illegally typed nodes generated after this point will not experience
354 /// type legalization.
355 bool NewNodesMustHaveLegalTypes = false;
356
357private:
358 /// DAGUpdateListener is a friend so it can manipulate the listener stack.
359 friend struct DAGUpdateListener;
360
361 /// Linked list of registered DAGUpdateListener instances.
362 /// This stack is maintained by DAGUpdateListener RAII.
363 DAGUpdateListener *UpdateListeners = nullptr;
364
365 /// Implementation of setSubgraphColor.
366 /// Return whether we had to truncate the search.
367 bool setSubgraphColorHelper(SDNode *N, const char *Color,
368 DenseSet<SDNode *> &visited,
369 int level, bool &printed);
370
371 template <typename SDNodeT, typename... ArgTypes>
372 SDNodeT *newSDNode(ArgTypes &&... Args) {
373 return new (NodeAllocator.template Allocate<SDNodeT>())
374 SDNodeT(std::forward<ArgTypes>(Args)...);
375 }
376
377 /// Build a synthetic SDNodeT with the given args and extract its subclass
378 /// data as an integer (e.g. for use in a folding set).
379 ///
380 /// The args to this function are the same as the args to SDNodeT's
381 /// constructor, except the second arg (assumed to be a const DebugLoc&) is
382 /// omitted.
383 template <typename SDNodeT, typename... ArgTypes>
384 static uint16_t getSyntheticNodeSubclassData(unsigned IROrder,
385 ArgTypes &&... Args) {
386 // The compiler can reduce this expression to a constant iff we pass an
387 // empty DebugLoc. Thankfully, the debug location doesn't have any bearing
388 // on the subclass data.
389 return SDNodeT(IROrder, DebugLoc(), std::forward<ArgTypes>(Args)...)
390 .getRawSubclassData();
391 }
392
393 template <typename SDNodeTy>
394 static uint16_t getSyntheticNodeSubclassData(unsigned Opc, unsigned Order,
395 SDVTList VTs, EVT MemoryVT,
396 MachineMemOperand *MMO) {
397 return SDNodeTy(Opc, Order, DebugLoc(), VTs, MemoryVT, MMO)
398 .getRawSubclassData();
399 }
400
401 void createOperands(SDNode *Node, ArrayRef<SDValue> Vals);
402
403 void removeOperands(SDNode *Node) {
404 if (!Node->OperandList)
405 return;
406 OperandRecycler.deallocate(
407 ArrayRecycler<SDUse>::Capacity::get(Node->NumOperands),
408 Node->OperandList);
409 Node->NumOperands = 0;
410 Node->OperandList = nullptr;
411 }
412 void CreateTopologicalOrder(std::vector<SDNode*>& Order);
413
414public:
415 // Maximum depth for recursive analysis such as computeKnownBits, etc.
416 static constexpr unsigned MaxRecursionDepth = 6;
417
418 explicit SelectionDAG(const TargetMachine &TM, CodeGenOpt::Level);
419 SelectionDAG(const SelectionDAG &) = delete;
420 SelectionDAG &operator=(const SelectionDAG &) = delete;
421 ~SelectionDAG();
422
423 /// Prepare this SelectionDAG to process code in the given MachineFunction.
424 void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
425 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
426 LegacyDivergenceAnalysis * Divergence,
427 ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin);
428
429 void setFunctionLoweringInfo(FunctionLoweringInfo * FuncInfo) {
430 FLI = FuncInfo;
431 }
432
433 /// Clear state and free memory necessary to make this
434 /// SelectionDAG ready to process a new block.
435 void clear();
436
437 MachineFunction &getMachineFunction() const { return *MF; }
438 const Pass *getPass() const { return SDAGISelPass; }
439
440 const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
441 const TargetMachine &getTarget() const { return TM; }
442 const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
443 const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
444 const TargetLibraryInfo &getLibInfo() const { return *LibInfo; }
445 const SelectionDAGTargetInfo &getSelectionDAGInfo() const { return *TSI; }
446 const LegacyDivergenceAnalysis *getDivergenceAnalysis() const { return DA; }
447 LLVMContext *getContext() const { return Context; }
448 OptimizationRemarkEmitter &getORE() const { return *ORE; }
449 ProfileSummaryInfo *getPSI() const { return PSI; }
450 BlockFrequencyInfo *getBFI() const { return BFI; }
451
452 FlagInserter *getFlagInserter() { return Inserter; }
453 void setFlagInserter(FlagInserter *FI) { Inserter = FI; }
454
455 /// Just dump dot graph to a user-provided path and title.
456 /// This doesn't open the dot viewer program and
457 /// helps visualization when outside debugging session.
458 /// FileName expects absolute path. If provided
459 /// without any path separators then the file
460 /// will be created in the current directory.
461 /// Error will be emitted if the path is insane.
462#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
463 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dumpDotGraph(const Twine &FileName, const Twine &Title);
464#endif
465
466 /// Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
467 void viewGraph(const std::string &Title);
468 void viewGraph();
469
470#ifndef NDEBUG
471 std::map<const SDNode *, std::string> NodeGraphAttrs;
472#endif
473
474 /// Clear all previously defined node graph attributes.
475 /// Intended to be used from a debugging tool (eg. gdb).
476 void clearGraphAttrs();
477
478 /// Set graph attributes for a node. (eg. "color=red".)
479 void setGraphAttrs(const SDNode *N, const char *Attrs);
480
481 /// Get graph attributes for a node. (eg. "color=red".)
482 /// Used from getNodeAttributes.
483 std::string getGraphAttrs(const SDNode *N) const;
484
485 /// Convenience for setting node color attribute.
486 void setGraphColor(const SDNode *N, const char *Color);
487
488 /// Convenience for setting subgraph color attribute.
489 void setSubgraphColor(SDNode *N, const char *Color);
490
491 using allnodes_const_iterator = ilist<SDNode>::const_iterator;
492
493 allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
494 allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
495
496 using allnodes_iterator = ilist<SDNode>::iterator;
497
498 allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
499 allnodes_iterator allnodes_end() { return AllNodes.end(); }
500
501 ilist<SDNode>::size_type allnodes_size() const {
502 return AllNodes.size();
503 }
504
505 iterator_range<allnodes_iterator> allnodes() {
506 return make_range(allnodes_begin(), allnodes_end());
507 }
508 iterator_range<allnodes_const_iterator> allnodes() const {
509 return make_range(allnodes_begin(), allnodes_end());
510 }
511
512 /// Return the root tag of the SelectionDAG.
513 const SDValue &getRoot() const { return Root; }
514
515 /// Return the token chain corresponding to the entry of the function.
516 SDValue getEntryNode() const {
517 return SDValue(const_cast<SDNode *>(&EntryNode), 0);
518 }
519
520 /// Set the current root tag of the SelectionDAG.
521 ///
522 const SDValue &setRoot(SDValue N) {
523 assert((!N.getNode() || N.getValueType() == MVT::Other) &&(static_cast <bool> ((!N.getNode() || N.getValueType() ==
MVT::Other) && "DAG root value is not a chain!") ? void
(0) : __assert_fail ("(!N.getNode() || N.getValueType() == MVT::Other) && \"DAG root value is not a chain!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 524, __extension__ __PRETTY_FUNCTION__))
524 "DAG root value is not a chain!")(static_cast <bool> ((!N.getNode() || N.getValueType() ==
MVT::Other) && "DAG root value is not a chain!") ? void
(0) : __assert_fail ("(!N.getNode() || N.getValueType() == MVT::Other) && \"DAG root value is not a chain!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 524, __extension__ __PRETTY_FUNCTION__))
;
525 if (N.getNode())
526 checkForCycles(N.getNode(), this);
527 Root = N;
528 if (N.getNode())
529 checkForCycles(this);
530 return Root;
531 }
532
533#ifndef NDEBUG
534 void VerifyDAGDiverence();
535#endif
536
537 /// This iterates over the nodes in the SelectionDAG, folding
538 /// certain types of nodes together, or eliminating superfluous nodes. The
539 /// Level argument controls whether Combine is allowed to produce nodes and
540 /// types that are illegal on the target.
541 void Combine(CombineLevel Level, AAResults *AA,
542 CodeGenOpt::Level OptLevel);
543
544 /// This transforms the SelectionDAG into a SelectionDAG that
545 /// only uses types natively supported by the target.
546 /// Returns "true" if it made any changes.
547 ///
548 /// Note that this is an involved process that may invalidate pointers into
549 /// the graph.
550 bool LegalizeTypes();
551
552 /// This transforms the SelectionDAG into a SelectionDAG that is
553 /// compatible with the target instruction selector, as indicated by the
554 /// TargetLowering object.
555 ///
556 /// Note that this is an involved process that may invalidate pointers into
557 /// the graph.
558 void Legalize();
559
560 /// Transforms a SelectionDAG node and any operands to it into a node
561 /// that is compatible with the target instruction selector, as indicated by
562 /// the TargetLowering object.
563 ///
564 /// \returns true if \c N is a valid, legal node after calling this.
565 ///
566 /// This essentially runs a single recursive walk of the \c Legalize process
567 /// over the given node (and its operands). This can be used to incrementally
568 /// legalize the DAG. All of the nodes which are directly replaced,
569 /// potentially including N, are added to the output parameter \c
570 /// UpdatedNodes so that the delta to the DAG can be understood by the
571 /// caller.
572 ///
573 /// When this returns false, N has been legalized in a way that make the
574 /// pointer passed in no longer valid. It may have even been deleted from the
575 /// DAG, and so it shouldn't be used further. When this returns true, the
576 /// N passed in is a legal node, and can be immediately processed as such.
577 /// This may still have done some work on the DAG, and will still populate
578 /// UpdatedNodes with any new nodes replacing those originally in the DAG.
579 bool LegalizeOp(SDNode *N, SmallSetVector<SDNode *, 16> &UpdatedNodes);
580
581 /// This transforms the SelectionDAG into a SelectionDAG
582 /// that only uses vector math operations supported by the target. This is
583 /// necessary as a separate step from Legalize because unrolling a vector
584 /// operation can introduce illegal types, which requires running
585 /// LegalizeTypes again.
586 ///
587 /// This returns true if it made any changes; in that case, LegalizeTypes
588 /// is called again before Legalize.
589 ///
590 /// Note that this is an involved process that may invalidate pointers into
591 /// the graph.
592 bool LegalizeVectors();
593
594 /// This method deletes all unreachable nodes in the SelectionDAG.
595 void RemoveDeadNodes();
596
597 /// Remove the specified node from the system. This node must
598 /// have no referrers.
599 void DeleteNode(SDNode *N);
600
601 /// Return an SDVTList that represents the list of values specified.
602 SDVTList getVTList(EVT VT);
603 SDVTList getVTList(EVT VT1, EVT VT2);
604 SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3);
605 SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4);
606 SDVTList getVTList(ArrayRef<EVT> VTs);
607
608 //===--------------------------------------------------------------------===//
609 // Node creation methods.
610
611 /// Create a ConstantSDNode wrapping a constant value.
612 /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
613 ///
614 /// If only legal types can be produced, this does the necessary
615 /// transformations (e.g., if the vector element type is illegal).
616 /// @{
617 SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
618 bool isTarget = false, bool isOpaque = false);
619 SDValue getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
620 bool isTarget = false, bool isOpaque = false);
621
622 SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget = false,
623 bool IsOpaque = false) {
624 return getConstant(APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL,
625 VT, IsTarget, IsOpaque);
626 }
627
628 SDValue getConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
629 bool isTarget = false, bool isOpaque = false);
630 SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL,
631 bool isTarget = false);
632 SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL,
633 bool LegalTypes = true);
634 SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
635 bool isTarget = false);
636
637 SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT,
638 bool isOpaque = false) {
639 return getConstant(Val, DL, VT, true, isOpaque);
640 }
641 SDValue getTargetConstant(const APInt &Val, const SDLoc &DL, EVT VT,
642 bool isOpaque = false) {
643 return getConstant(Val, DL, VT, true, isOpaque);
644 }
645 SDValue getTargetConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
646 bool isOpaque = false) {
647 return getConstant(Val, DL, VT, true, isOpaque);
648 }
649
650 /// Create a true or false constant of type \p VT using the target's
651 /// BooleanContent for type \p OpVT.
652 SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT);
653 /// @}
654
655 /// Create a ConstantFPSDNode wrapping a constant value.
656 /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
657 ///
658 /// If only legal types can be produced, this does the necessary
659 /// transformations (e.g., if the vector element type is illegal).
660 /// The forms that take a double should only be used for simple constants
661 /// that can be exactly represented in VT. No checks are made.
662 /// @{
663 SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT,
664 bool isTarget = false);
665 SDValue getConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT,
666 bool isTarget = false);
667 SDValue getConstantFP(const ConstantFP &V, const SDLoc &DL, EVT VT,
668 bool isTarget = false);
669 SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT) {
670 return getConstantFP(Val, DL, VT, true);
671 }
672 SDValue getTargetConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT) {
673 return getConstantFP(Val, DL, VT, true);
674 }
675 SDValue getTargetConstantFP(const ConstantFP &Val, const SDLoc &DL, EVT VT) {
676 return getConstantFP(Val, DL, VT, true);
677 }
678 /// @}
679
680 SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
681 int64_t offset = 0, bool isTargetGA = false,
682 unsigned TargetFlags = 0);
683 SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
684 int64_t offset = 0, unsigned TargetFlags = 0) {
685 return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
686 }
687 SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
688 SDValue getTargetFrameIndex(int FI, EVT VT) {
689 return getFrameIndex(FI, VT, true);
690 }
691 SDValue getJumpTable(int JTI, EVT VT, bool isTarget = false,
692 unsigned TargetFlags = 0);
693 SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags = 0) {
694 return getJumpTable(JTI, VT, true, TargetFlags);
695 }
696 SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align = None,
697 int Offs = 0, bool isT = false,
698 unsigned TargetFlags = 0);
699 SDValue getTargetConstantPool(const Constant *C, EVT VT,
700 MaybeAlign Align = None, int Offset = 0,
701 unsigned TargetFlags = 0) {
702 return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
703 }
704 SDValue getConstantPool(MachineConstantPoolValue *C, EVT VT,
705 MaybeAlign Align = None, int Offs = 0,
706 bool isT = false, unsigned TargetFlags = 0);
707 SDValue getTargetConstantPool(MachineConstantPoolValue *C, EVT VT,
708 MaybeAlign Align = None, int Offset = 0,
709 unsigned TargetFlags = 0) {
710 return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
711 }
712 SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
713 unsigned TargetFlags = 0);
714 // When generating a branch to a BB, we don't in general know enough
715 // to provide debug info for the BB at that time, so keep this one around.
716 SDValue getBasicBlock(MachineBasicBlock *MBB);
717 SDValue getExternalSymbol(const char *Sym, EVT VT);
718 SDValue getTargetExternalSymbol(const char *Sym, EVT VT,
719 unsigned TargetFlags = 0);
720 SDValue getMCSymbol(MCSymbol *Sym, EVT VT);
721
722 SDValue getValueType(EVT);
723 SDValue getRegister(unsigned Reg, EVT VT);
724 SDValue getRegisterMask(const uint32_t *RegMask);
725 SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label);
726 SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root,
727 MCSymbol *Label);
728 SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset = 0,
729 bool isTarget = false, unsigned TargetFlags = 0);
730 SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT,
731 int64_t Offset = 0, unsigned TargetFlags = 0) {
732 return getBlockAddress(BA, VT, Offset, true, TargetFlags);
733 }
734
735 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg,
736 SDValue N) {
737 return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
738 getRegister(Reg, N.getValueType()), N);
739 }
740
741 // This version of the getCopyToReg method takes an extra operand, which
742 // indicates that there is potentially an incoming glue value (if Glue is not
743 // null) and that there should be a glue result.
744 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N,
745 SDValue Glue) {
746 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
747 SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
748 return getNode(ISD::CopyToReg, dl, VTs,
749 makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
750 }
751
752 // Similar to last getCopyToReg() except parameter Reg is a SDValue
753 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, SDValue Reg, SDValue N,
754 SDValue Glue) {
755 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
756 SDValue Ops[] = { Chain, Reg, N, Glue };
757 return getNode(ISD::CopyToReg, dl, VTs,
758 makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
759 }
760
761 SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT) {
762 SDVTList VTs = getVTList(VT, MVT::Other);
763 SDValue Ops[] = { Chain, getRegister(Reg, VT) };
764 return getNode(ISD::CopyFromReg, dl, VTs, Ops);
765 }
766
767 // This version of the getCopyFromReg method takes an extra operand, which
768 // indicates that there is potentially an incoming glue value (if Glue is not
769 // null) and that there should be a glue result.
770 SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT,
771 SDValue Glue) {
772 SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
773 SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
774 return getNode(ISD::CopyFromReg, dl, VTs,
775 makeArrayRef(Ops, Glue.getNode() ? 3 : 2));
776 }
777
778 SDValue getCondCode(ISD::CondCode Cond);
779
780 /// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
781 /// which must be a vector type, must match the number of mask elements
782 /// NumElts. An integer mask element equal to -1 is treated as undefined.
783 SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
784 ArrayRef<int> Mask);
785
786 /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
787 /// which must be a vector type, must match the number of operands in Ops.
788 /// The operands must have the same type as (or, for integers, a type wider
789 /// than) VT's element type.
790 SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDValue> Ops) {
791 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
792 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
793 }
794
795 /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
796 /// which must be a vector type, must match the number of operands in Ops.
797 /// The operands must have the same type as (or, for integers, a type wider
798 /// than) VT's element type.
799 SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDUse> Ops) {
800 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
801 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
802 }
803
804 /// Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all
805 /// elements. VT must be a vector type. Op's type must be the same as (or,
806 /// for integers, a type wider than) VT's element type.
807 SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
808 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
809 if (Op.getOpcode() == ISD::UNDEF) {
25
Calling 'SDValue::getOpcode'
810 assert((VT.getVectorElementType() == Op.getValueType() ||(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __extension__ __PRETTY_FUNCTION__))
811 (VT.isInteger() &&(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __extension__ __PRETTY_FUNCTION__))
812 VT.getVectorElementType().bitsLE(Op.getValueType()))) &&(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __extension__ __PRETTY_FUNCTION__))
813 "A splatted value must have a width equal or (for integers) "(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __extension__ __PRETTY_FUNCTION__))
814 "greater than the vector element type!")(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __extension__ __PRETTY_FUNCTION__))
;
815 return getNode(ISD::UNDEF, SDLoc(), VT);
816 }
817
818 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Op);
819 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
820 }
821
822 // Return a splat ISD::SPLAT_VECTOR node, consisting of Op splatted to all
823 // elements.
824 SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op) {
825 if (Op.getOpcode() == ISD::UNDEF) {
826 assert((VT.getVectorElementType() == Op.getValueType() ||(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __extension__ __PRETTY_FUNCTION__))
827 (VT.isInteger() &&(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __extension__ __PRETTY_FUNCTION__))
828 VT.getVectorElementType().bitsLE(Op.getValueType()))) &&(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __extension__ __PRETTY_FUNCTION__))
829 "A splatted value must have a width equal or (for integers) "(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __extension__ __PRETTY_FUNCTION__))
830 "greater than the vector element type!")(static_cast <bool> ((VT.getVectorElementType() == Op.getValueType
() || (VT.isInteger() && VT.getVectorElementType().bitsLE
(Op.getValueType()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? void (0) : __assert_fail
("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __extension__ __PRETTY_FUNCTION__))
;
831 return getNode(ISD::UNDEF, SDLoc(), VT);
832 }
833 return getNode(ISD::SPLAT_VECTOR, DL, VT, Op);
834 }
835
836 /// Returns a vector of type ResVT whose elements contain the linear sequence
837 /// <0, Step, Step * 2, Step * 3, ...>
838 SDValue getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal);
839
840 /// Returns a vector of type ResVT whose elements contain the linear sequence
841 /// <0, 1, 2, 3, ...>
842 SDValue getStepVector(const SDLoc &DL, EVT ResVT);
843
844 /// Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
845 /// the shuffle node in input but with swapped operands.
846 ///
847 /// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
848 SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV);
849
850 /// Convert Op, which must be of float type, to the
851 /// float type VT, by either extending or rounding (by truncation).
852 SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT);
853
854 /// Convert Op, which must be a STRICT operation of float type, to the
855 /// float type VT, by either extending or rounding (by truncation).
856 std::pair<SDValue, SDValue>
857 getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT);
858
859 /// Convert Op, which must be of integer type, to the
860 /// integer type VT, by either any-extending or truncating it.
861 SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
862
863 /// Convert Op, which must be of integer type, to the
864 /// integer type VT, by either sign-extending or truncating it.
865 SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
866
867 /// Convert Op, which must be of integer type, to the
868 /// integer type VT, by either zero-extending or truncating it.
869 SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
870
871 /// Return the expression required to zero extend the Op
872 /// value assuming it was the smaller SrcTy value.
873 SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
874
875 /// Convert Op, which must be of integer type, to the integer type VT, by
876 /// either truncating it or performing either zero or sign extension as
877 /// appropriate extension for the pointer's semantics.
878 SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
879
880 /// Return the expression required to extend the Op as a pointer value
881 /// assuming it was the smaller SrcTy value. This may be either a zero extend
882 /// or a sign extend.
883 SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
884
885 /// Convert Op, which must be of integer type, to the integer type VT,
886 /// by using an extension appropriate for the target's
887 /// BooleanContent for type OpVT or truncating it.
888 SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT);
889
890 /// Create a bitwise NOT operation as (XOR Val, -1).
891 SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT);
892
893 /// Create a logical NOT operation as (XOR Val, BooleanOne).
894 SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT);
895
896 /// Returns sum of the base pointer and offset.
897 /// Unlike getObjectPtrOffset this does not set NoUnsignedWrap by default.
898 SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL,
899 const SDNodeFlags Flags = SDNodeFlags());
900 SDValue getMemBasePlusOffset(SDValue Base, SDValue Offset, const SDLoc &DL,
901 const SDNodeFlags Flags = SDNodeFlags());
902
903 /// Create an add instruction with appropriate flags when used for
904 /// addressing some offset of an object. i.e. if a load is split into multiple
905 /// components, create an add nuw from the base pointer to the offset.
906 SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset) {
907 SDNodeFlags Flags;
908 Flags.setNoUnsignedWrap(true);
909 return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
910 }
911
912 SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, SDValue Offset) {
913 // The object itself can't wrap around the address space, so it shouldn't be
914 // possible for the adds of the offsets to the split parts to overflow.
915 SDNodeFlags Flags;
916 Flags.setNoUnsignedWrap(true);
917 return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
918 }
919
920 /// Return a new CALLSEQ_START node, that starts new call frame, in which
921 /// InSize bytes are set up inside CALLSEQ_START..CALLSEQ_END sequence and
922 /// OutSize specifies part of the frame set up prior to the sequence.
923 SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize,
924 const SDLoc &DL) {
925 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
926 SDValue Ops[] = { Chain,
927 getIntPtrConstant(InSize, DL, true),
928 getIntPtrConstant(OutSize, DL, true) };
929 return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
930 }
931
932 /// Return a new CALLSEQ_END node, which always must have a
933 /// glue result (to ensure it's not CSE'd).
934 /// CALLSEQ_END does not have a useful SDLoc.
935 SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
936 SDValue InGlue, const SDLoc &DL) {
937 SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
938 SmallVector<SDValue, 4> Ops;
939 Ops.push_back(Chain);
940 Ops.push_back(Op1);
941 Ops.push_back(Op2);
942 if (InGlue.getNode())
943 Ops.push_back(InGlue);
944 return getNode(ISD::CALLSEQ_END, DL, NodeTys, Ops);
945 }
946
947 /// Return true if the result of this operation is always undefined.
948 bool isUndef(unsigned Opcode, ArrayRef<SDValue> Ops);
949
950 /// Return an UNDEF node. UNDEF does not have a useful SDLoc.
951 SDValue getUNDEF(EVT VT) {
952 return getNode(ISD::UNDEF, SDLoc(), VT);
953 }
954
955 /// Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
956 SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm) {
957 assert(MulImm.getMinSignedBits() <= VT.getSizeInBits() &&(static_cast <bool> (MulImm.getMinSignedBits() <= VT
.getSizeInBits() && "Immediate does not fit VT") ? void
(0) : __assert_fail ("MulImm.getMinSignedBits() <= VT.getSizeInBits() && \"Immediate does not fit VT\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 958, __extension__ __PRETTY_FUNCTION__))
958 "Immediate does not fit VT")(static_cast <bool> (MulImm.getMinSignedBits() <= VT
.getSizeInBits() && "Immediate does not fit VT") ? void
(0) : __assert_fail ("MulImm.getMinSignedBits() <= VT.getSizeInBits() && \"Immediate does not fit VT\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 958, __extension__ __PRETTY_FUNCTION__))
;
959 return getNode(ISD::VSCALE, DL, VT,
960 getConstant(MulImm.sextOrTrunc(VT.getSizeInBits()), DL, VT));
961 }
962
963 /// Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
964 SDValue getGLOBAL_OFFSET_TABLE(EVT VT) {
965 return getNode(ISD::GLOBAL_OFFSET_TABLE, SDLoc(), VT);
966 }
967
968 /// Gets or creates the specified node.
969 ///
970 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
971 ArrayRef<SDUse> Ops);
972 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
973 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
974 SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
975 ArrayRef<SDValue> Ops);
976 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
977 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
978
979 // Use flags from current flag inserter.
980 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
981 ArrayRef<SDValue> Ops);
982 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
983 ArrayRef<SDValue> Ops);
984 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand);
985 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
986 SDValue N2);
987 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
988 SDValue N2, SDValue N3);
989
990 // Specialize based on number of operands.
991 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
992 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand,
993 const SDNodeFlags Flags);
994 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
995 SDValue N2, const SDNodeFlags Flags);
996 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
997 SDValue N2, SDValue N3, const SDNodeFlags Flags);
998 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
999 SDValue N2, SDValue N3, SDValue N4);
1000 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
1001 SDValue N2, SDValue N3, SDValue N4, SDValue N5);
1002
1003 // Specialize again based on number of operands for nodes with a VTList
1004 // rather than a single VT.
1005 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList);
1006 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N);
1007 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1008 SDValue N2);
1009 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1010 SDValue N2, SDValue N3);
1011 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1012 SDValue N2, SDValue N3, SDValue N4);
1013 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1014 SDValue N2, SDValue N3, SDValue N4, SDValue N5);
1015
1016 /// Compute a TokenFactor to force all the incoming stack arguments to be
1017 /// loaded from the stack. This is used in tail call lowering to protect
1018 /// stack arguments from being clobbered.
1019 SDValue getStackArgumentTokenFactor(SDValue Chain);
1020
1021 SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1022 SDValue Size, Align Alignment, bool isVol,
1023 bool AlwaysInline, bool isTailCall,
1024 MachinePointerInfo DstPtrInfo,
1025 MachinePointerInfo SrcPtrInfo,
1026 const AAMDNodes &AAInfo = AAMDNodes());
1027
1028 SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1029 SDValue Size, Align Alignment, bool isVol, bool isTailCall,
1030 MachinePointerInfo DstPtrInfo,
1031 MachinePointerInfo SrcPtrInfo,
1032 const AAMDNodes &AAInfo = AAMDNodes());
1033
1034 SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1035 SDValue Size, Align Alignment, bool isVol, bool isTailCall,
1036 MachinePointerInfo DstPtrInfo,
1037 const AAMDNodes &AAInfo = AAMDNodes());
1038
1039 SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
1040 unsigned DstAlign, SDValue Src, unsigned SrcAlign,
1041 SDValue Size, Type *SizeTy, unsigned ElemSz,
1042 bool isTailCall, MachinePointerInfo DstPtrInfo,
1043 MachinePointerInfo SrcPtrInfo);
1044
1045 SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
1046 unsigned DstAlign, SDValue Src, unsigned SrcAlign,
1047 SDValue Size, Type *SizeTy, unsigned ElemSz,
1048 bool isTailCall, MachinePointerInfo DstPtrInfo,
1049 MachinePointerInfo SrcPtrInfo);
1050
1051 SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
1052 unsigned DstAlign, SDValue Value, SDValue Size,
1053 Type *SizeTy, unsigned ElemSz, bool isTailCall,
1054 MachinePointerInfo DstPtrInfo);
1055
1056 /// Helper function to make it easier to build SetCC's if you just have an
1057 /// ISD::CondCode instead of an SDValue.
1058 SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
1059 ISD::CondCode Cond, SDValue Chain = SDValue(),
1060 bool IsSignaling = false) {
1061 assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&(static_cast <bool> (LHS.getValueType().isVector() == RHS
.getValueType().isVector() && "Cannot compare scalars to vectors"
) ? void (0) : __assert_fail ("LHS.getValueType().isVector() == RHS.getValueType().isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1062, __extension__ __PRETTY_FUNCTION__))
1062 "Cannot compare scalars to vectors")(static_cast <bool> (LHS.getValueType().isVector() == RHS
.getValueType().isVector() && "Cannot compare scalars to vectors"
) ? void (0) : __assert_fail ("LHS.getValueType().isVector() == RHS.getValueType().isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1062, __extension__ __PRETTY_FUNCTION__))
;
1063 assert(LHS.getValueType().isVector() == VT.isVector() &&(static_cast <bool> (LHS.getValueType().isVector() == VT
.isVector() && "Cannot compare scalars to vectors") ?
void (0) : __assert_fail ("LHS.getValueType().isVector() == VT.isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1064, __extension__ __PRETTY_FUNCTION__))
1064 "Cannot compare scalars to vectors")(static_cast <bool> (LHS.getValueType().isVector() == VT
.isVector() && "Cannot compare scalars to vectors") ?
void (0) : __assert_fail ("LHS.getValueType().isVector() == VT.isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1064, __extension__ __PRETTY_FUNCTION__))
;
1065 assert(Cond != ISD::SETCC_INVALID &&(static_cast <bool> (Cond != ISD::SETCC_INVALID &&
"Cannot create a setCC of an invalid node.") ? void (0) : __assert_fail
("Cond != ISD::SETCC_INVALID && \"Cannot create a setCC of an invalid node.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1066, __extension__ __PRETTY_FUNCTION__))
1066 "Cannot create a setCC of an invalid node.")(static_cast <bool> (Cond != ISD::SETCC_INVALID &&
"Cannot create a setCC of an invalid node.") ? void (0) : __assert_fail
("Cond != ISD::SETCC_INVALID && \"Cannot create a setCC of an invalid node.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1066, __extension__ __PRETTY_FUNCTION__))
;
1067 if (Chain)
1068 return getNode(IsSignaling ? ISD::STRICT_FSETCCS : ISD::STRICT_FSETCC, DL,
1069 {VT, MVT::Other}, {Chain, LHS, RHS, getCondCode(Cond)});
1070 return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
1071 }
1072
1073 /// Helper function to make it easier to build Select's if you just have
1074 /// operands and don't want to check for vector.
1075 SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS,
1076 SDValue RHS) {
1077 assert(LHS.getValueType() == RHS.getValueType() &&(static_cast <bool> (LHS.getValueType() == RHS.getValueType
() && "Cannot use select on differing types") ? void (
0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Cannot use select on differing types\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1078, __extension__ __PRETTY_FUNCTION__))
1078 "Cannot use select on differing types")(static_cast <bool> (LHS.getValueType() == RHS.getValueType
() && "Cannot use select on differing types") ? void (
0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Cannot use select on differing types\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1078, __extension__ __PRETTY_FUNCTION__))
;
1079 assert(VT.isVector() == LHS.getValueType().isVector() &&(static_cast <bool> (VT.isVector() == LHS.getValueType(
).isVector() && "Cannot mix vectors and scalars") ? void
(0) : __assert_fail ("VT.isVector() == LHS.getValueType().isVector() && \"Cannot mix vectors and scalars\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1080, __extension__ __PRETTY_FUNCTION__))
1080 "Cannot mix vectors and scalars")(static_cast <bool> (VT.isVector() == LHS.getValueType(
).isVector() && "Cannot mix vectors and scalars") ? void
(0) : __assert_fail ("VT.isVector() == LHS.getValueType().isVector() && \"Cannot mix vectors and scalars\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1080, __extension__ __PRETTY_FUNCTION__))
;
1081 auto Opcode = Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
1082 return getNode(Opcode, DL, VT, Cond, LHS, RHS);
1083 }
1084
1085 /// Helper function to make it easier to build SelectCC's if you just have an
1086 /// ISD::CondCode instead of an SDValue.
1087 SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True,
1088 SDValue False, ISD::CondCode Cond) {
1089 return getNode(ISD::SELECT_CC, DL, True.getValueType(), LHS, RHS, True,
1090 False, getCondCode(Cond));
1091 }
1092
1093 /// Try to simplify a select/vselect into 1 of its operands or a constant.
1094 SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal);
1095
1096 /// Try to simplify a shift into 1 of its operands or a constant.
1097 SDValue simplifyShift(SDValue X, SDValue Y);
1098
1099 /// Try to simplify a floating-point binary operation into 1 of its operands
1100 /// or a constant.
1101 SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
1102 SDNodeFlags Flags);
1103
1104 /// VAArg produces a result and token chain, and takes a pointer
1105 /// and a source value as input.
1106 SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1107 SDValue SV, unsigned Align);
1108
1109 /// Gets a node for an atomic cmpxchg op. There are two
1110 /// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
1111 /// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
1112 /// a success flag (initially i1), and a chain.
1113 SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
1114 SDVTList VTs, SDValue Chain, SDValue Ptr,
1115 SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);
1116
1117 /// Gets a node for an atomic op, produces result (if relevant)
1118 /// and chain and takes 2 operands.
1119 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
1120 SDValue Ptr, SDValue Val, MachineMemOperand *MMO);
1121
1122 /// Gets a node for an atomic op, produces result and chain and
1123 /// takes 1 operand.
1124 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT,
1125 SDValue Chain, SDValue Ptr, MachineMemOperand *MMO);
1126
1127 /// Gets a node for an atomic op, produces result and chain and takes N
1128 /// operands.
1129 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
1130 SDVTList VTList, ArrayRef<SDValue> Ops,
1131 MachineMemOperand *MMO);
1132
1133 /// Creates a MemIntrinsicNode that may produce a
1134 /// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
1135 /// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
1136 /// less than FIRST_TARGET_MEMORY_OPCODE.
1137 SDValue getMemIntrinsicNode(
1138 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
1139 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
1140 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
1141 MachineMemOperand::MOStore,
1142 uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes());
1143
1144 inline SDValue getMemIntrinsicNode(
1145 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
1146 EVT MemVT, MachinePointerInfo PtrInfo, MaybeAlign Alignment = None,
1147 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
1148 MachineMemOperand::MOStore,
1149 uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) {
1150 // Ensure that codegen never sees alignment 0
1151 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo,
1152 Alignment.getValueOr(getEVTAlign(MemVT)), Flags,
1153 Size, AAInfo);
1154 }
1155
1156 SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList,
1157 ArrayRef<SDValue> Ops, EVT MemVT,
1158 MachineMemOperand *MMO);
1159
1160 /// Creates a LifetimeSDNode that starts (`IsStart==true`) or ends
1161 /// (`IsStart==false`) the lifetime of the portion of `FrameIndex` between
1162 /// offsets `Offset` and `Offset + Size`.
1163 SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain,
1164 int FrameIndex, int64_t Size, int64_t Offset = -1);
1165
1166 /// Creates a PseudoProbeSDNode with function GUID `Guid` and
1167 /// the index of the block `Index` it is probing, as well as the attributes
1168 /// `attr` of the probe.
1169 SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid,
1170 uint64_t Index, uint32_t Attr);
1171
1172 /// Create a MERGE_VALUES node from the given operands.
1173 SDValue getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl);
1174
1175 /// Loads are not normal binary operators: their result type is not
1176 /// determined by their operands, and they produce a value AND a token chain.
1177 ///
1178 /// This function will set the MOLoad flag on MMOFlags, but you can set it if
1179 /// you want. The MOStore flag must not be set.
1180 SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1181 MachinePointerInfo PtrInfo,
1182 MaybeAlign Alignment = MaybeAlign(),
1183 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1184 const AAMDNodes &AAInfo = AAMDNodes(),
1185 const MDNode *Ranges = nullptr);
1186 /// FIXME: Remove once transition to Align is over.
1187 inline SDValue
1188 getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1189 MachinePointerInfo PtrInfo, unsigned Alignment,
1190 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1191 const AAMDNodes &AAInfo = AAMDNodes(),
1192 const MDNode *Ranges = nullptr) {
1193 return getLoad(VT, dl, Chain, Ptr, PtrInfo, MaybeAlign(Alignment), MMOFlags,
1194 AAInfo, Ranges);
1195 }
1196 SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1197 MachineMemOperand *MMO);
1198 SDValue
1199 getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
1200 SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
1201 MaybeAlign Alignment = MaybeAlign(),
1202 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1203 const AAMDNodes &AAInfo = AAMDNodes());
1204 /// FIXME: Remove once transition to Align is over.
1205 inline SDValue
1206 getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
1207 SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
1208 unsigned Alignment,
1209 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1210 const AAMDNodes &AAInfo = AAMDNodes()) {
1211 return getExtLoad(ExtType, dl, VT, Chain, Ptr, PtrInfo, MemVT,
1212 MaybeAlign(Alignment), MMOFlags, AAInfo);
1213 }
1214 SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
1215 SDValue Chain, SDValue Ptr, EVT MemVT,
1216 MachineMemOperand *MMO);
1217 SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
1218 SDValue Offset, ISD::MemIndexedMode AM);
1219 SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1220 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1221 MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment,
1222 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1223 const AAMDNodes &AAInfo = AAMDNodes(),
1224 const MDNode *Ranges = nullptr);
1225 inline SDValue getLoad(
1226 ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
1227 SDValue Chain, SDValue Ptr, SDValue Offset, MachinePointerInfo PtrInfo,
1228 EVT MemVT, MaybeAlign Alignment = MaybeAlign(),
1229 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1230 const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr) {
1231 // Ensures that codegen never sees a None Alignment.
1232 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
1233 Alignment.getValueOr(getEVTAlign(MemVT)), MMOFlags, AAInfo,
1234 Ranges);
1235 }
1236 /// FIXME: Remove once transition to Align is over.
1237 inline SDValue
1238 getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1239 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1240 MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment,
1241 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1242 const AAMDNodes &AAInfo = AAMDNodes(),
1243 const MDNode *Ranges = nullptr) {
1244 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
1245 MaybeAlign(Alignment), MMOFlags, AAInfo, Ranges);
1246 }
1247 SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1248 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1249 EVT MemVT, MachineMemOperand *MMO);
1250
1251 /// Helper function to build ISD::STORE nodes.
1252 ///
1253 /// This function will set the MOStore flag on MMOFlags, but you can set it if
1254 /// you want. The MOLoad and MOInvariant flags must not be set.
1255
1256 SDValue
1257 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1258 MachinePointerInfo PtrInfo, Align Alignment,
1259 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1260 const AAMDNodes &AAInfo = AAMDNodes());
1261 inline SDValue
1262 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1263 MachinePointerInfo PtrInfo, MaybeAlign Alignment = MaybeAlign(),
1264 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1265 const AAMDNodes &AAInfo = AAMDNodes()) {
1266 return getStore(Chain, dl, Val, Ptr, PtrInfo,
1267 Alignment.getValueOr(getEVTAlign(Val.getValueType())),
1268 MMOFlags, AAInfo);
1269 }
1270 /// FIXME: Remove once transition to Align is over.
1271 inline SDValue
1272 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1273 MachinePointerInfo PtrInfo, unsigned Alignment,
1274 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1275 const AAMDNodes &AAInfo = AAMDNodes()) {
1276 return getStore(Chain, dl, Val, Ptr, PtrInfo, MaybeAlign(Alignment),
1277 MMOFlags, AAInfo);
1278 }
1279 SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1280 MachineMemOperand *MMO);
1281 SDValue
1282 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1283 MachinePointerInfo PtrInfo, EVT SVT, Align Alignment,
1284 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1285 const AAMDNodes &AAInfo = AAMDNodes());
1286 inline SDValue
1287 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1288 MachinePointerInfo PtrInfo, EVT SVT,
1289 MaybeAlign Alignment = MaybeAlign(),
1290 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1291 const AAMDNodes &AAInfo = AAMDNodes()) {
1292 return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
1293 Alignment.getValueOr(getEVTAlign(SVT)), MMOFlags,
1294 AAInfo);
1295 }
1296 /// FIXME: Remove once transition to Align is over.
1297 inline SDValue
1298 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1299 MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment,
1300 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1301 const AAMDNodes &AAInfo = AAMDNodes()) {
1302 return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
1303 MaybeAlign(Alignment), MMOFlags, AAInfo);
1304 }
1305 SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
1306 SDValue Ptr, EVT SVT, MachineMemOperand *MMO);
1307 SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base,
1308 SDValue Offset, ISD::MemIndexedMode AM);
1309
1310 SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base,
1311 SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT,
1312 MachineMemOperand *MMO, ISD::MemIndexedMode AM,
1313 ISD::LoadExtType, bool IsExpanding = false);
1314 SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
1315 SDValue Offset, ISD::MemIndexedMode AM);
1316 SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val,
1317 SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT,
1318 MachineMemOperand *MMO, ISD::MemIndexedMode AM,
1319 bool IsTruncating = false, bool IsCompressing = false);
1320 SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
1321 SDValue Base, SDValue Offset,
1322 ISD::MemIndexedMode AM);
1323 SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl,
1324 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
1325 ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy);
1326 SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl,
1327 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
1328 ISD::MemIndexType IndexType,
1329 bool IsTruncating = false);
1330
1331 /// Construct a node to track a Value* through the backend.
1332 SDValue getSrcValue(const Value *v);
1333
1334 /// Return an MDNodeSDNode which holds an MDNode.
1335 SDValue getMDNode(const MDNode *MD);
1336
1337 /// Return a bitcast using the SDLoc of the value operand, and casting to the
1338 /// provided type. Use getNode to set a custom SDLoc.
1339 SDValue getBitcast(EVT VT, SDValue V);
1340
1341 /// Return an AddrSpaceCastSDNode.
1342 SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS,
1343 unsigned DestAS);
1344
1345 /// Return a freeze using the SDLoc of the value operand.
1346 SDValue getFreeze(SDValue V);
1347
1348 /// Return an AssertAlignSDNode.
1349 SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A);
1350
1351 /// Return the specified value casted to
1352 /// the target's desired shift amount type.
1353 SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op);
1354
1355 /// Expand the specified \c ISD::VAARG node as the Legalize pass would.
1356 SDValue expandVAArg(SDNode *Node);
1357
1358 /// Expand the specified \c ISD::VACOPY node as the Legalize pass would.
1359 SDValue expandVACopy(SDNode *Node);
1360
1361 /// Returs an GlobalAddress of the function from the current module with
1362 /// name matching the given ExternalSymbol. Additionally can provide the
1363 /// matched function.
1364 /// Panics the function doesn't exists.
1365 SDValue getSymbolFunctionGlobalAddress(SDValue Op,
1366 Function **TargetFunction = nullptr);
1367
1368 /// *Mutate* the specified node in-place to have the
1369 /// specified operands. If the resultant node already exists in the DAG,
1370 /// this does not modify the specified node, instead it returns the node that
1371 /// already exists. If the resultant node does not exist in the DAG, the
1372 /// input node is returned. As a degenerate case, if you specify the same
1373 /// input operands as the node already has, the input node is returned.
1374 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
1375 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
1376 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1377 SDValue Op3);
1378 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1379 SDValue Op3, SDValue Op4);
1380 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1381 SDValue Op3, SDValue Op4, SDValue Op5);
1382 SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
1383
1384 /// Creates a new TokenFactor containing \p Vals. If \p Vals contains 64k
1385 /// values or more, move values into new TokenFactors in 64k-1 blocks, until
1386 /// the final TokenFactor has less than 64k operands.
1387 SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl<SDValue> &Vals);
1388
1389 /// *Mutate* the specified machine node's memory references to the provided
1390 /// list.
1391 void setNodeMemRefs(MachineSDNode *N,
1392 ArrayRef<MachineMemOperand *> NewMemRefs);
1393
1394 // Calculate divergence of node \p N based on its operands.
1395 bool calculateDivergence(SDNode *N);
1396
1397 // Propagates the change in divergence to users
1398 void updateDivergence(SDNode * N);
1399
1400 /// These are used for target selectors to *mutate* the
1401 /// specified node to have the specified return type, Target opcode, and
1402 /// operands. Note that target opcodes are stored as
1403 /// ~TargetOpcode in the node opcode field. The resultant node is returned.
1404 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT);
1405 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT, SDValue Op1);
1406 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1407 SDValue Op1, SDValue Op2);
1408 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1409 SDValue Op1, SDValue Op2, SDValue Op3);
1410 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1411 ArrayRef<SDValue> Ops);
1412 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1, EVT VT2);
1413 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1414 EVT VT2, ArrayRef<SDValue> Ops);
1415 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1416 EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
1417 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1418 EVT VT2, SDValue Op1, SDValue Op2);
1419 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, SDVTList VTs,
1420 ArrayRef<SDValue> Ops);
1421
1422 /// This *mutates* the specified node to have the specified
1423 /// return type, opcode, and operands.
1424 SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
1425 ArrayRef<SDValue> Ops);
1426
1427 /// Mutate the specified strict FP node to its non-strict equivalent,
1428 /// unlinking the node from its chain and dropping the metadata arguments.
1429 /// The node must be a strict FP node.
1430 SDNode *mutateStrictFPToFP(SDNode *Node);
1431
1432 /// These are used for target selectors to create a new node
1433 /// with specified return type(s), MachineInstr opcode, and operands.
1434 ///
1435 /// Note that getMachineNode returns the resultant node. If there is already
1436 /// a node of the specified opcode and operands, it returns that node instead
1437 /// of the current one.
1438 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT);
1439 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1440 SDValue Op1);
1441 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1442 SDValue Op1, SDValue Op2);
1443 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1444 SDValue Op1, SDValue Op2, SDValue Op3);
1445 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1446 ArrayRef<SDValue> Ops);
1447 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1448 EVT VT2, SDValue Op1, SDValue Op2);
1449 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1450 EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
1451 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1452 EVT VT2, ArrayRef<SDValue> Ops);
1453 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1454 EVT VT2, EVT VT3, SDValue Op1, SDValue Op2);
1455 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1456 EVT VT2, EVT VT3, SDValue Op1, SDValue Op2,
1457 SDValue Op3);
1458 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1459 EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
1460 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl,
1461 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops);
1462 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, SDVTList VTs,
1463 ArrayRef<SDValue> Ops);
1464
1465 /// A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
1466 SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
1467 SDValue Operand);
1468
1469 /// A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
1470 SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
1471 SDValue Operand, SDValue Subreg);
1472
1473 /// Get the specified node if it's already available, or else return NULL.
1474 SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
1475 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
1476 SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
1477 ArrayRef<SDValue> Ops);
1478
1479 /// Check if a node exists without modifying its flags.
1480 bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef<SDValue> Ops);
1481
1482 /// Creates a SDDbgValue node.
1483 SDDbgValue *getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N,
1484 unsigned R, bool IsIndirect, const DebugLoc &DL,
1485 unsigned O);
1486
1487 /// Creates a constant SDDbgValue node.
1488 SDDbgValue *getConstantDbgValue(DIVariable *Var, DIExpression *Expr,
1489 const Value *C, const DebugLoc &DL,
1490 unsigned O);
1491
1492 /// Creates a FrameIndex SDDbgValue node.
1493 SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
1494 unsigned FI, bool IsIndirect,
1495 const DebugLoc &DL, unsigned O);
1496
1497 /// Creates a FrameIndex SDDbgValue node.
1498 SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
1499 unsigned FI,
1500 ArrayRef<SDNode *> Dependencies,
1501 bool IsIndirect, const DebugLoc &DL,
1502 unsigned O);
1503
1504 /// Creates a VReg SDDbgValue node.
1505 SDDbgValue *getVRegDbgValue(DIVariable *Var, DIExpression *Expr,
1506 unsigned VReg, bool IsIndirect,
1507 const DebugLoc &DL, unsigned O);
1508
1509 /// Creates a SDDbgValue node from a list of locations.
1510 SDDbgValue *getDbgValueList(DIVariable *Var, DIExpression *Expr,
1511 ArrayRef<SDDbgOperand> Locs,
1512 ArrayRef<SDNode *> Dependencies, bool IsIndirect,
1513 const DebugLoc &DL, unsigned O, bool IsVariadic);
1514
1515 /// Creates a SDDbgLabel node.
1516 SDDbgLabel *getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O);
1517
1518 /// Transfer debug values from one node to another, while optionally
1519 /// generating fragment expressions for split-up values. If \p InvalidateDbg
1520 /// is set, debug values are invalidated after they are transferred.
1521 void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits = 0,
1522 unsigned SizeInBits = 0, bool InvalidateDbg = true);
1523
1524 /// Remove the specified node from the system. If any of its
1525 /// operands then becomes dead, remove them as well. Inform UpdateListener
1526 /// for each node deleted.
1527 void RemoveDeadNode(SDNode *N);
1528
1529 /// This method deletes the unreachable nodes in the
1530 /// given list, and any nodes that become unreachable as a result.
1531 void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);
1532
1533 /// Modify anything using 'From' to use 'To' instead.
1534 /// This can cause recursive merging of nodes in the DAG. Use the first
1535 /// version if 'From' is known to have a single result, use the second
1536 /// if you have two nodes with identical results (or if 'To' has a superset
1537 /// of the results of 'From'), use the third otherwise.
1538 ///
1539 /// These methods all take an optional UpdateListener, which (if not null) is
1540 /// informed about nodes that are deleted and modified due to recursive
1541 /// changes in the dag.
1542 ///
1543 /// These functions only replace all existing uses. It's possible that as
1544 /// these replacements are being performed, CSE may cause the From node
1545 /// to be given new uses. These new uses of From are left in place, and
1546 /// not automatically transferred to To.
1547 ///
1548 void ReplaceAllUsesWith(SDValue From, SDValue To);
1549 void ReplaceAllUsesWith(SDNode *From, SDNode *To);
1550 void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
1551
1552 /// Replace any uses of From with To, leaving
1553 /// uses of other values produced by From.getNode() alone.
1554 void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);
1555
1556 /// Like ReplaceAllUsesOfValueWith, but for multiple values at once.
1557 /// This correctly handles the case where
1558 /// there is an overlap between the From values and the To values.
1559 void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
1560 unsigned Num);
1561
1562 /// If an existing load has uses of its chain, create a token factor node with
1563 /// that chain and the new memory node's chain and update users of the old
1564 /// chain to the token factor. This ensures that the new memory node will have
1565 /// the same relative memory dependency position as the old load. Returns the
1566 /// new merged load chain.
1567 SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain);
1568
1569 /// If an existing load has uses of its chain, create a token factor node with
1570 /// that chain and the new memory node's chain and update users of the old
1571 /// chain to the token factor. This ensures that the new memory node will have
1572 /// the same relative memory dependency position as the old load. Returns the
1573 /// new merged load chain.
1574 SDValue makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, SDValue NewMemOp);
1575
1576 /// Topological-sort the AllNodes list and a
1577 /// assign a unique node id for each node in the DAG based on their
1578 /// topological order. Returns the number of nodes.
1579 unsigned AssignTopologicalOrder();
1580
1581 /// Move node N in the AllNodes list to be immediately
1582 /// before the given iterator Position. This may be used to update the
1583 /// topological ordering when the list of nodes is modified.
1584 void RepositionNode(allnodes_iterator Position, SDNode *N) {
1585 AllNodes.insert(Position, AllNodes.remove(N));
1586 }
1587
1588 /// Returns an APFloat semantics tag appropriate for the given type. If VT is
1589 /// a vector type, the element semantics are returned.
1590 static const fltSemantics &EVTToAPFloatSemantics(EVT VT) {
1591 switch (VT.getScalarType().getSimpleVT().SimpleTy) {
1592 default: llvm_unreachable("Unknown FP format")::llvm::llvm_unreachable_internal("Unknown FP format", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1592)
;
1593 case MVT::f16: return APFloat::IEEEhalf();
1594 case MVT::bf16: return APFloat::BFloat();
1595 case MVT::f32: return APFloat::IEEEsingle();
1596 case MVT::f64: return APFloat::IEEEdouble();
1597 case MVT::f80: return APFloat::x87DoubleExtended();
1598 case MVT::f128: return APFloat::IEEEquad();
1599 case MVT::ppcf128: return APFloat::PPCDoubleDouble();
1600 }
1601 }
1602
1603 /// Add a dbg_value SDNode. If SD is non-null that means the
1604 /// value is produced by SD.
1605 void AddDbgValue(SDDbgValue *DB, bool isParameter);
1606
1607 /// Add a dbg_label SDNode.
1608 void AddDbgLabel(SDDbgLabel *DB);
1609
1610 /// Get the debug values which reference the given SDNode.
1611 ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) const {
1612 return DbgInfo->getSDDbgValues(SD);
1613 }
1614
1615public:
1616 /// Return true if there are any SDDbgValue nodes associated
1617 /// with this SelectionDAG.
1618 bool hasDebugValues() const { return !DbgInfo->empty(); }
1619
1620 SDDbgInfo::DbgIterator DbgBegin() const { return DbgInfo->DbgBegin(); }
1621 SDDbgInfo::DbgIterator DbgEnd() const { return DbgInfo->DbgEnd(); }
1622
1623 SDDbgInfo::DbgIterator ByvalParmDbgBegin() const {
1624 return DbgInfo->ByvalParmDbgBegin();
1625 }
1626 SDDbgInfo::DbgIterator ByvalParmDbgEnd() const {
1627 return DbgInfo->ByvalParmDbgEnd();
1628 }
1629
1630 SDDbgInfo::DbgLabelIterator DbgLabelBegin() const {
1631 return DbgInfo->DbgLabelBegin();
1632 }
1633 SDDbgInfo::DbgLabelIterator DbgLabelEnd() const {
1634 return DbgInfo->DbgLabelEnd();
1635 }
1636
1637 /// To be invoked on an SDNode that is slated to be erased. This
1638 /// function mirrors \c llvm::salvageDebugInfo.
1639 void salvageDebugInfo(SDNode &N);
1640
1641 void dump() const;
1642
1643 /// In most cases this function returns the ABI alignment for a given type,
1644 /// except for illegal vector types where the alignment exceeds that of the
1645 /// stack. In such cases we attempt to break the vector down to a legal type
1646 /// and return the ABI alignment for that instead.
1647 Align getReducedAlign(EVT VT, bool UseABI);
1648
1649 /// Create a stack temporary based on the size in bytes and the alignment
1650 SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment);
1651
1652 /// Create a stack temporary, suitable for holding the specified value type.
1653 /// If minAlign is specified, the slot size will have at least that alignment.
1654 SDValue CreateStackTemporary(EVT VT, unsigned minAlign = 1);
1655
1656 /// Create a stack temporary suitable for holding either of the specified
1657 /// value types.
1658 SDValue CreateStackTemporary(EVT VT1, EVT VT2);
1659
1660 SDValue FoldSymbolOffset(unsigned Opcode, EVT VT,
1661 const GlobalAddressSDNode *GA,
1662 const SDNode *N2);
1663
1664 SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
1665 ArrayRef<SDValue> Ops);
1666
1667 SDValue FoldConstantVectorArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
1668 ArrayRef<SDValue> Ops,
1669 const SDNodeFlags Flags = SDNodeFlags());
1670
1671 /// Fold floating-point operations with 2 operands when both operands are
1672 /// constants and/or undefined.
1673 SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT,
1674 SDValue N1, SDValue N2);
1675
1676 /// Constant fold a setcc to true or false.
1677 SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond,
1678 const SDLoc &dl);
1679
1680 /// See if the specified operand can be simplified with the knowledge that
1681 /// only the bits specified by DemandedBits are used. If so, return the
1682 /// simpler operand, otherwise return a null SDValue.
1683 ///
1684 /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
1685 /// simplify nodes with multiple uses more aggressively.)
1686 SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits);
1687
1688 /// See if the specified operand can be simplified with the knowledge that
1689 /// only the bits specified by DemandedBits are used in the elements specified
1690 /// by DemandedElts. If so, return the simpler operand, otherwise return a
1691 /// null SDValue.
1692 ///
1693 /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
1694 /// simplify nodes with multiple uses more aggressively.)
1695 SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits,
1696 const APInt &DemandedElts);
1697
1698 /// Return true if the sign bit of Op is known to be zero.
1699 /// We use this predicate to simplify operations downstream.
1700 bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;
1701
1702 /// Return true if 'Op & Mask' is known to be zero. We
1703 /// use this predicate to simplify operations downstream. Op and Mask are
1704 /// known to be the same type.
1705 bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
1706 unsigned Depth = 0) const;
1707
1708 /// Return true if 'Op & Mask' is known to be zero in DemandedElts. We
1709 /// use this predicate to simplify operations downstream. Op and Mask are
1710 /// known to be the same type.
1711 bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
1712 const APInt &DemandedElts, unsigned Depth = 0) const;
1713
1714 /// Return true if '(Op & Mask) == Mask'.
1715 /// Op and Mask are known to be the same type.
1716 bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask,
1717 unsigned Depth = 0) const;
1718
1719 /// Determine which bits of Op are known to be either zero or one and return
1720 /// them in Known. For vectors, the known bits are those that are shared by
1721 /// every vector element.
1722 /// Targets can implement the computeKnownBitsForTargetNode method in the
1723 /// TargetLowering class to allow target nodes to be understood.
1724 KnownBits computeKnownBits(SDValue Op, unsigned Depth = 0) const;
1725
1726 /// Determine which bits of Op are known to be either zero or one and return
1727 /// them in Known. The DemandedElts argument allows us to only collect the
1728 /// known bits that are shared by the requested vector elements.
1729 /// Targets can implement the computeKnownBitsForTargetNode method in the
1730 /// TargetLowering class to allow target nodes to be understood.
1731 KnownBits computeKnownBits(SDValue Op, const APInt &DemandedElts,
1732 unsigned Depth = 0) const;
1733
1734 /// Used to represent the possible overflow behavior of an operation.
1735 /// Never: the operation cannot overflow.
1736 /// Always: the operation will always overflow.
1737 /// Sometime: the operation may or may not overflow.
1738 enum OverflowKind {
1739 OFK_Never,
1740 OFK_Sometime,
1741 OFK_Always,
1742 };
1743
1744 /// Determine if the result of the addition of 2 node can overflow.
1745 OverflowKind computeOverflowKind(SDValue N0, SDValue N1) const;
1746
1747 /// Test if the given value is known to have exactly one bit set. This differs
1748 /// from computeKnownBits in that it doesn't necessarily determine which bit
1749 /// is set.
1750 bool isKnownToBeAPowerOfTwo(SDValue Val) const;
1751
1752 /// Return the number of times the sign bit of the register is replicated into
1753 /// the other bits. We know that at least 1 bit is always equal to the sign
1754 /// bit (itself), but other cases can give us information. For example,
1755 /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
1756 /// to each other, so we return 3. Targets can implement the
1757 /// ComputeNumSignBitsForTarget method in the TargetLowering class to allow
1758 /// target nodes to be understood.
1759 unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;
1760
1761 /// Return the number of times the sign bit of the register is replicated into
1762 /// the other bits. We know that at least 1 bit is always equal to the sign
1763 /// bit (itself), but other cases can give us information. For example,
1764 /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
1765 /// to each other, so we return 3. The DemandedElts argument allows
1766 /// us to only collect the minimum sign bits of the requested vector elements.
1767 /// Targets can implement the ComputeNumSignBitsForTarget method in the
1768 /// TargetLowering class to allow target nodes to be understood.
1769 unsigned ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
1770 unsigned Depth = 0) const;
1771
1772 /// Return true if this function can prove that \p Op is never poison
1773 /// and, if \p PoisonOnly is false, does not have undef bits.
1774 bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly = false,
1775 unsigned Depth = 0) const;
1776
1777 /// Return true if this function can prove that \p Op is never poison
1778 /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts
1779 /// argument limits the check to the requested vector elements.
1780 bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, const APInt &DemandedElts,
1781 bool PoisonOnly = false,
1782 unsigned Depth = 0) const;
1783
1784 /// Return true if this function can prove that \p Op is never poison.
1785 bool isGuaranteedNotToBePoison(SDValue Op, unsigned Depth = 0) const {
1786 return isGuaranteedNotToBeUndefOrPoison(Op, /*PoisonOnly*/ true, Depth);
1787 }
1788
1789 /// Return true if this function can prove that \p Op is never poison. The
1790 /// DemandedElts argument limits the check to the requested vector elements.
1791 bool isGuaranteedNotToBePoison(SDValue Op, const APInt &DemandedElts,
1792 unsigned Depth = 0) const {
1793 return isGuaranteedNotToBeUndefOrPoison(Op, DemandedElts,
1794 /*PoisonOnly*/ true, Depth);
1795 }
1796
1797 /// Return true if the specified operand is an ISD::ADD with a ConstantSDNode
1798 /// on the right-hand side, or if it is an ISD::OR with a ConstantSDNode that
1799 /// is guaranteed to have the same semantics as an ADD. This handles the
1800 /// equivalence:
1801 /// X|Cst == X+Cst iff X&Cst = 0.
1802 bool isBaseWithConstantOffset(SDValue Op) const;
1803
1804 /// Test whether the given SDValue is known to never be NaN. If \p SNaN is
1805 /// true, returns if \p Op is known to never be a signaling NaN (it may still
1806 /// be a qNaN).
1807 bool isKnownNeverNaN(SDValue Op, bool SNaN = false, unsigned Depth = 0) const;
1808
1809 /// \returns true if \p Op is known to never be a signaling NaN.
1810 bool isKnownNeverSNaN(SDValue Op, unsigned Depth = 0) const {
1811 return isKnownNeverNaN(Op, true, Depth);
1812 }
1813
1814 /// Test whether the given floating point SDValue is known to never be
1815 /// positive or negative zero.
1816 bool isKnownNeverZeroFloat(SDValue Op) const;
1817
1818 /// Test whether the given SDValue is known to contain non-zero value(s).
1819 bool isKnownNeverZero(SDValue Op) const;
1820
1821 /// Test whether two SDValues are known to compare equal. This
1822 /// is true if they are the same value, or if one is negative zero and the
1823 /// other positive zero.
1824 bool isEqualTo(SDValue A, SDValue B) const;
1825
1826 /// Return true if A and B have no common bits set. As an example, this can
1827 /// allow an 'add' to be transformed into an 'or'.
1828 bool haveNoCommonBitsSet(SDValue A, SDValue B) const;
1829
1830 /// Test whether \p V has a splatted value for all the demanded elements.
1831 ///
1832 /// On success \p UndefElts will indicate the elements that have UNDEF
1833 /// values instead of the splat value, this is only guaranteed to be correct
1834 /// for \p DemandedElts.
1835 ///
1836 /// NOTE: The function will return true for a demanded splat of UNDEF values.
1837 bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts,
1838 unsigned Depth = 0);
1839
1840 /// Test whether \p V has a splatted value.
1841 bool isSplatValue(SDValue V, bool AllowUndefs = false);
1842
1843 /// If V is a splatted value, return the source vector and its splat index.
1844 SDValue getSplatSourceVector(SDValue V, int &SplatIndex);
1845
1846 /// If V is a splat vector, return its scalar source operand by extracting
1847 /// that element from the source vector. If LegalTypes is true, this method
1848 /// may only return a legally-typed splat value. If it cannot legalize the
1849 /// splatted value it will return SDValue().
1850 SDValue getSplatValue(SDValue V, bool LegalTypes = false);
1851
1852 /// If a SHL/SRA/SRL node \p V has a constant or splat constant shift amount
1853 /// that is less than the element bit-width of the shift node, return it.
1854 const APInt *getValidShiftAmountConstant(SDValue V,
1855 const APInt &DemandedElts) const;
1856
1857 /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
1858 /// than the element bit-width of the shift node, return the minimum value.
1859 const APInt *
1860 getValidMinimumShiftAmountConstant(SDValue V,
1861 const APInt &DemandedElts) const;
1862
1863 /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
1864 /// than the element bit-width of the shift node, return the maximum value.
1865 const APInt *
1866 getValidMaximumShiftAmountConstant(SDValue V,
1867 const APInt &DemandedElts) const;
1868
1869 /// Match a binop + shuffle pyramid that represents a horizontal reduction
1870 /// over the elements of a vector starting from the EXTRACT_VECTOR_ELT node /p
1871 /// Extract. The reduction must use one of the opcodes listed in /p
1872 /// CandidateBinOps and on success /p BinOp will contain the matching opcode.
1873 /// Returns the vector that is being reduced on, or SDValue() if a reduction
1874 /// was not matched. If \p AllowPartials is set then in the case of a
1875 /// reduction pattern that only matches the first few stages, the extracted
1876 /// subvector of the start of the reduction is returned.
1877 SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
1878 ArrayRef<ISD::NodeType> CandidateBinOps,
1879 bool AllowPartials = false);
1880
1881 /// Utility function used by legalize and lowering to
1882 /// "unroll" a vector operation by splitting out the scalars and operating
1883 /// on each element individually. If the ResNE is 0, fully unroll the vector
1884 /// op. If ResNE is less than the width of the vector op, unroll up to ResNE.
1885 /// If the ResNE is greater than the width of the vector op, unroll the
1886 /// vector op and fill the end of the resulting vector with UNDEFS.
1887 SDValue UnrollVectorOp(SDNode *N, unsigned ResNE = 0);
1888
1889 /// Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
1890 /// This is a separate function because those opcodes have two results.
1891 std::pair<SDValue, SDValue> UnrollVectorOverflowOp(SDNode *N,
1892 unsigned ResNE = 0);
1893
1894 /// Return true if loads are next to each other and can be
1895 /// merged. Check that both are nonvolatile and if LD is loading
1896 /// 'Bytes' bytes from a location that is 'Dist' units away from the
1897 /// location that the 'Base' load is loading from.
1898 bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base,
1899 unsigned Bytes, int Dist) const;
1900
1901 /// Infer alignment of a load / store address. Return None if it cannot be
1902 /// inferred.
1903 MaybeAlign InferPtrAlign(SDValue Ptr) const;
1904
1905 /// Compute the VTs needed for the low/hi parts of a type
1906 /// which is split (or expanded) into two not necessarily identical pieces.
1907 std::pair<EVT, EVT> GetSplitDestVTs(const EVT &VT) const;
1908
1909 /// Compute the VTs needed for the low/hi parts of a type, dependent on an
1910 /// enveloping VT that has been split into two identical pieces. Sets the
1911 /// HisIsEmpty flag when hi type has zero storage size.
1912 std::pair<EVT, EVT> GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
1913 bool *HiIsEmpty) const;
1914
1915 /// Split the vector with EXTRACT_SUBVECTOR using the provides
1916 /// VTs and return the low/high part.
1917 std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL,
1918 const EVT &LoVT, const EVT &HiVT);
1919
1920 /// Split the vector with EXTRACT_SUBVECTOR and return the low/high part.
1921 std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL) {
1922 EVT LoVT, HiVT;
1923 std::tie(LoVT, HiVT) = GetSplitDestVTs(N.getValueType());
1924 return SplitVector(N, DL, LoVT, HiVT);
1925 }
1926
1927 /// Split the node's operand with EXTRACT_SUBVECTOR and
1928 /// return the low/high part.
1929 std::pair<SDValue, SDValue> SplitVectorOperand(const SDNode *N, unsigned OpNo)
1930 {
1931 return SplitVector(N->getOperand(OpNo), SDLoc(N));
1932 }
1933
1934 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
1935 SDValue WidenVector(const SDValue &N, const SDLoc &DL);
1936
1937 /// Append the extracted elements from Start to Count out of the vector Op in
1938 /// Args. If Count is 0, all of the elements will be extracted. The extracted
1939 /// elements will have type EVT if it is provided, and otherwise their type
1940 /// will be Op's element type.
1941 void ExtractVectorElements(SDValue Op, SmallVectorImpl<SDValue> &Args,
1942 unsigned Start = 0, unsigned Count = 0,
1943 EVT EltVT = EVT());
1944
1945 /// Compute the default alignment value for the given type.
1946 Align getEVTAlign(EVT MemoryVT) const;
1947 /// Compute the default alignment value for the given type.
1948 /// FIXME: Remove once transition to Align is over.
1949 inline unsigned getEVTAlignment(EVT MemoryVT) const {
1950 return getEVTAlign(MemoryVT).value();
1951 }
1952
1953 /// Test whether the given value is a constant int or similar node.
1954 SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N) const;
1955
1956 /// Test whether the given value is a constant FP or similar node.
1957 SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) const ;
1958
1959 /// \returns true if \p N is any kind of constant or build_vector of
1960 /// constants, int or float. If a vector, it may not necessarily be a splat.
1961 inline bool isConstantValueOfAnyType(SDValue N) const {
1962 return isConstantIntBuildVectorOrConstantInt(N) ||
1963 isConstantFPBuildVectorOrConstantFP(N);
1964 }
1965
1966 void addCallSiteInfo(const SDNode *CallNode, CallSiteInfoImpl &&CallInfo) {
1967 SDCallSiteDbgInfo[CallNode].CSInfo = std::move(CallInfo);
1968 }
1969
1970 CallSiteInfo getSDCallSiteInfo(const SDNode *CallNode) {
1971 auto I = SDCallSiteDbgInfo.find(CallNode);
1972 if (I != SDCallSiteDbgInfo.end())
1973 return std::move(I->second).CSInfo;
1974 return CallSiteInfo();
1975 }
1976
1977 void addHeapAllocSite(const SDNode *Node, MDNode *MD) {
1978 SDCallSiteDbgInfo[Node].HeapAllocSite = MD;
1979 }
1980
1981 /// Return the HeapAllocSite type associated with the SDNode, if it exists.
1982 MDNode *getHeapAllocSite(const SDNode *Node) {
1983 auto It = SDCallSiteDbgInfo.find(Node);
1984 if (It == SDCallSiteDbgInfo.end())
1985 return nullptr;
1986 return It->second.HeapAllocSite;
1987 }
1988
1989 void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge) {
1990 if (NoMerge)
1991 SDCallSiteDbgInfo[Node].NoMerge = NoMerge;
1992 }
1993
1994 bool getNoMergeSiteInfo(const SDNode *Node) {
1995 auto I = SDCallSiteDbgInfo.find(Node);
1996 if (I == SDCallSiteDbgInfo.end())
1997 return false;
1998 return I->second.NoMerge;
1999 }
2000
2001 /// Return the current function's default denormal handling kind for the given
2002 /// floating point type.
2003 DenormalMode getDenormalMode(EVT VT) const {
2004 return MF->getDenormalMode(EVTToAPFloatSemantics(VT));
2005 }
2006
2007 bool shouldOptForSize() const;
2008
2009 /// Get the (commutative) neutral element for the given opcode, if it exists.
2010 SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT,
2011 SDNodeFlags Flags);
2012
2013private:
2014 void InsertNode(SDNode *N);
2015 bool RemoveNodeFromCSEMaps(SDNode *N);
2016 void AddModifiedNodeToCSEMaps(SDNode *N);
2017 SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
2018 SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
2019 void *&InsertPos);
2020 SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
2021 void *&InsertPos);
2022 SDNode *UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &loc);
2023
2024 void DeleteNodeNotInCSEMaps(SDNode *N);
2025 void DeallocateNode(SDNode *N);
2026
2027 void allnodes_clear();
2028
2029 /// Look up the node specified by ID in CSEMap. If it exists, return it. If
2030 /// not, return the insertion token that will make insertion faster. This
2031 /// overload is for nodes other than Constant or ConstantFP, use the other one
2032 /// for those.
2033 SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
2034
2035 /// Look up the node specified by ID in CSEMap. If it exists, return it. If
2036 /// not, return the insertion token that will make insertion faster. Performs
2037 /// additional processing for constant nodes.
2038 SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, const SDLoc &DL,
2039 void *&InsertPos);
2040
2041 /// List of non-single value types.
2042 FoldingSet<SDVTListNode> VTListMap;
2043
2044 /// Maps to auto-CSE operations.
2045 std::vector<CondCodeSDNode*> CondCodeNodes;
2046
2047 std::vector<SDNode*> ValueTypeNodes;
2048 std::map<EVT, SDNode*, EVT::compareRawBits> ExtendedValueTypeNodes;
2049 StringMap<SDNode*> ExternalSymbols;
2050
2051 std::map<std::pair<std::string, unsigned>, SDNode *> TargetExternalSymbols;
2052 DenseMap<MCSymbol *, SDNode *> MCSymbols;
2053
2054 FlagInserter *Inserter = nullptr;
2055};
2056
2057template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
2058 using nodes_iterator = pointer_iterator<SelectionDAG::allnodes_iterator>;
2059
2060 static nodes_iterator nodes_begin(SelectionDAG *G) {
2061 return nodes_iterator(G->allnodes_begin());
2062 }
2063
2064 static nodes_iterator nodes_end(SelectionDAG *G) {
2065 return nodes_iterator(G->allnodes_end());
2066 }
2067};
2068
2069} // end namespace llvm
2070
2071#endif // LLVM_CODEGEN_SELECTIONDAG_H

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/Register.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Metadata.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/Support/AlignOf.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/TypeSize.h"
47#include <algorithm>
48#include <cassert>
49#include <climits>
50#include <cstddef>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <string>
55#include <tuple>
56
57namespace llvm {
58
59class APInt;
60class Constant;
61template <typename T> struct DenseMapInfo;
62class GlobalValue;
63class MachineBasicBlock;
64class MachineConstantPoolValue;
65class MCSymbol;
66class raw_ostream;
67class SDNode;
68class SelectionDAG;
69class Type;
70class Value;
71
72void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
73 bool force = false);
74
75/// This represents a list of ValueType's that has been intern'd by
76/// a SelectionDAG. Instances of this simple value class are returned by
77/// SelectionDAG::getVTList(...).
78///
79struct SDVTList {
80 const EVT *VTs;
81 unsigned int NumVTs;
82};
83
84namespace ISD {
85
86 /// Node predicates
87
88/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
89/// same constant or undefined, return true and return the constant value in
90/// \p SplatValue.
91bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
92
93/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
94/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
95/// true, it only checks BUILD_VECTOR.
96bool isConstantSplatVectorAllOnes(const SDNode *N,
97 bool BuildVectorOnly = false);
98
99/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
100/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
101/// only checks BUILD_VECTOR.
102bool isConstantSplatVectorAllZeros(const SDNode *N,
103 bool BuildVectorOnly = false);
104
105/// Return true if the specified node is a BUILD_VECTOR where all of the
106/// elements are ~0 or undef.
107bool isBuildVectorAllOnes(const SDNode *N);
108
109/// Return true if the specified node is a BUILD_VECTOR where all of the
110/// elements are 0 or undef.
111bool isBuildVectorAllZeros(const SDNode *N);
112
113/// Return true if the specified node is a BUILD_VECTOR node of all
114/// ConstantSDNode or undef.
115bool isBuildVectorOfConstantSDNodes(const SDNode *N);
116
117/// Return true if the specified node is a BUILD_VECTOR node of all
118/// ConstantFPSDNode or undef.
119bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
120
121/// Return true if the node has at least one operand and all operands of the
122/// specified node are ISD::UNDEF.
123bool allOperandsUndef(const SDNode *N);
124
125} // end namespace ISD
126
127//===----------------------------------------------------------------------===//
128/// Unlike LLVM values, Selection DAG nodes may return multiple
129/// values as the result of a computation. Many nodes return multiple values,
130/// from loads (which define a token and a return value) to ADDC (which returns
131/// a result and a carry value), to calls (which may return an arbitrary number
132/// of values).
133///
134/// As such, each use of a SelectionDAG computation must indicate the node that
135/// computes it as well as which return value to use from that node. This pair
136/// of information is represented with the SDValue value type.
137///
138class SDValue {
139 friend struct DenseMapInfo<SDValue>;
140
141 SDNode *Node = nullptr; // The node defining the value we are using.
142 unsigned ResNo = 0; // Which return value of the node we are using.
143
144public:
145 SDValue() = default;
146 SDValue(SDNode *node, unsigned resno);
147
148 /// get the index which selects a specific result in the SDNode
149 unsigned getResNo() const { return ResNo; }
150
151 /// get the SDNode which holds the desired result
152 SDNode *getNode() const { return Node; }
153
154 /// set the SDNode
155 void setNode(SDNode *N) { Node = N; }
156
157 inline SDNode *operator->() const { return Node; }
158
159 bool operator==(const SDValue &O) const {
160 return Node == O.Node && ResNo == O.ResNo;
161 }
162 bool operator!=(const SDValue &O) const {
163 return !operator==(O);
164 }
165 bool operator<(const SDValue &O) const {
166 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
167 }
168 explicit operator bool() const {
169 return Node != nullptr;
170 }
171
172 SDValue getValue(unsigned R) const {
173 return SDValue(Node, R);
174 }
175
176 /// Return true if this node is an operand of N.
177 bool isOperandOf(const SDNode *N) const;
178
179 /// Return the ValueType of the referenced return value.
180 inline EVT getValueType() const;
181
182 /// Return the simple ValueType of the referenced return value.
183 MVT getSimpleValueType() const {
184 return getValueType().getSimpleVT();
185 }
186
187 /// Returns the size of the value in bits.
188 ///
189 /// If the value type is a scalable vector type, the scalable property will
190 /// be set and the runtime size will be a positive integer multiple of the
191 /// base size.
192 TypeSize getValueSizeInBits() const {
193 return getValueType().getSizeInBits();
194 }
195
196 uint64_t getScalarValueSizeInBits() const {
197 return getValueType().getScalarType().getFixedSizeInBits();
198 }
199
200 // Forwarding methods - These forward to the corresponding methods in SDNode.
201 inline unsigned getOpcode() const;
202 inline unsigned getNumOperands() const;
203 inline const SDValue &getOperand(unsigned i) const;
204 inline uint64_t getConstantOperandVal(unsigned i) const;
205 inline const APInt &getConstantOperandAPInt(unsigned i) const;
206 inline bool isTargetMemoryOpcode() const;
207 inline bool isTargetOpcode() const;
208 inline bool isMachineOpcode() const;
209 inline bool isUndef() const;
210 inline unsigned getMachineOpcode() const;
211 inline const DebugLoc &getDebugLoc() const;
212 inline void dump() const;
213 inline void dump(const SelectionDAG *G) const;
214 inline void dumpr() const;
215 inline void dumpr(const SelectionDAG *G) const;
216
217 /// Return true if this operand (which must be a chain) reaches the
218 /// specified operand without crossing any side-effecting instructions.
219 /// In practice, this looks through token factors and non-volatile loads.
220 /// In order to remain efficient, this only
221 /// looks a couple of nodes in, it does not do an exhaustive search.
222 bool reachesChainWithoutSideEffects(SDValue Dest,
223 unsigned Depth = 2) const;
224
225 /// Return true if there are no nodes using value ResNo of Node.
226 inline bool use_empty() const;
227
228 /// Return true if there is exactly one node using value ResNo of Node.
229 inline bool hasOneUse() const;
230};
231
232template<> struct DenseMapInfo<SDValue> {
233 static inline SDValue getEmptyKey() {
234 SDValue V;
235 V.ResNo = -1U;
236 return V;
237 }
238
239 static inline SDValue getTombstoneKey() {
240 SDValue V;
241 V.ResNo = -2U;
242 return V;
243 }
244
245 static unsigned getHashValue(const SDValue &Val) {
246 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
247 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
248 }
249
250 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
251 return LHS == RHS;
252 }
253};
254
255/// Allow casting operators to work directly on
256/// SDValues as if they were SDNode*'s.
257template<> struct simplify_type<SDValue> {
258 using SimpleType = SDNode *;
259
260 static SimpleType getSimplifiedValue(SDValue &Val) {
261 return Val.getNode();
262 }
263};
264template<> struct simplify_type<const SDValue> {
265 using SimpleType = /*const*/ SDNode *;
266
267 static SimpleType getSimplifiedValue(const SDValue &Val) {
268 return Val.getNode();
269 }
270};
271
272/// Represents a use of a SDNode. This class holds an SDValue,
273/// which records the SDNode being used and the result number, a
274/// pointer to the SDNode using the value, and Next and Prev pointers,
275/// which link together all the uses of an SDNode.
276///
277class SDUse {
278 /// Val - The value being used.
279 SDValue Val;
280 /// User - The user of this value.
281 SDNode *User = nullptr;
282 /// Prev, Next - Pointers to the uses list of the SDNode referred by
283 /// this operand.
284 SDUse **Prev = nullptr;
285 SDUse *Next = nullptr;
286
287public:
288 SDUse() = default;
289 SDUse(const SDUse &U) = delete;
290 SDUse &operator=(const SDUse &) = delete;
291
292 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
293 operator const SDValue&() const { return Val; }
294
295 /// If implicit conversion to SDValue doesn't work, the get() method returns
296 /// the SDValue.
297 const SDValue &get() const { return Val; }
298
299 /// This returns the SDNode that contains this Use.
300 SDNode *getUser() { return User; }
301
302 /// Get the next SDUse in the use list.
303 SDUse *getNext() const { return Next; }
304
305 /// Convenience function for get().getNode().
306 SDNode *getNode() const { return Val.getNode(); }
307 /// Convenience function for get().getResNo().
308 unsigned getResNo() const { return Val.getResNo(); }
309 /// Convenience function for get().getValueType().
310 EVT getValueType() const { return Val.getValueType(); }
311
312 /// Convenience function for get().operator==
313 bool operator==(const SDValue &V) const {
314 return Val == V;
315 }
316
317 /// Convenience function for get().operator!=
318 bool operator!=(const SDValue &V) const {
319 return Val != V;
320 }
321
322 /// Convenience function for get().operator<
323 bool operator<(const SDValue &V) const {
324 return Val < V;
325 }
326
327private:
328 friend class SelectionDAG;
329 friend class SDNode;
330 // TODO: unfriend HandleSDNode once we fix its operand handling.
331 friend class HandleSDNode;
332
333 void setUser(SDNode *p) { User = p; }
334
335 /// Remove this use from its existing use list, assign it the
336 /// given value, and add it to the new value's node's use list.
337 inline void set(const SDValue &V);
338 /// Like set, but only supports initializing a newly-allocated
339 /// SDUse with a non-null value.
340 inline void setInitial(const SDValue &V);
341 /// Like set, but only sets the Node portion of the value,
342 /// leaving the ResNo portion unmodified.
343 inline void setNode(SDNode *N);
344
345 void addToList(SDUse **List) {
346 Next = *List;
347 if (Next) Next->Prev = &Next;
348 Prev = List;
349 *List = this;
350 }
351
352 void removeFromList() {
353 *Prev = Next;
354 if (Next) Next->Prev = Prev;
355 }
356};
357
358/// simplify_type specializations - Allow casting operators to work directly on
359/// SDValues as if they were SDNode*'s.
360template<> struct simplify_type<SDUse> {
361 using SimpleType = SDNode *;
362
363 static SimpleType getSimplifiedValue(SDUse &Val) {
364 return Val.getNode();
365 }
366};
367
368/// These are IR-level optimization flags that may be propagated to SDNodes.
369/// TODO: This data structure should be shared by the IR optimizer and the
370/// the backend.
371struct SDNodeFlags {
372private:
373 bool NoUnsignedWrap : 1;
374 bool NoSignedWrap : 1;
375 bool Exact : 1;
376 bool NoNaNs : 1;
377 bool NoInfs : 1;
378 bool NoSignedZeros : 1;
379 bool AllowReciprocal : 1;
380 bool AllowContract : 1;
381 bool ApproximateFuncs : 1;
382 bool AllowReassociation : 1;
383
384 // We assume instructions do not raise floating-point exceptions by default,
385 // and only those marked explicitly may do so. We could choose to represent
386 // this via a positive "FPExcept" flags like on the MI level, but having a
387 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
388 // intersection logic more straightforward.
389 bool NoFPExcept : 1;
390
391public:
392 /// Default constructor turns off all optimization flags.
393 SDNodeFlags()
394 : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
395 NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
396 AllowContract(false), ApproximateFuncs(false),
397 AllowReassociation(false), NoFPExcept(false) {}
398
399 /// Propagate the fast-math-flags from an IR FPMathOperator.
400 void copyFMF(const FPMathOperator &FPMO) {
401 setNoNaNs(FPMO.hasNoNaNs());
402 setNoInfs(FPMO.hasNoInfs());
403 setNoSignedZeros(FPMO.hasNoSignedZeros());
404 setAllowReciprocal(FPMO.hasAllowReciprocal());
405 setAllowContract(FPMO.hasAllowContract());
406 setApproximateFuncs(FPMO.hasApproxFunc());
407 setAllowReassociation(FPMO.hasAllowReassoc());
408 }
409
410 // These are mutators for each flag.
411 void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
412 void setNoSignedWrap(bool b) { NoSignedWrap = b; }
413 void setExact(bool b) { Exact = b; }
414 void setNoNaNs(bool b) { NoNaNs = b; }
415 void setNoInfs(bool b) { NoInfs = b; }
416 void setNoSignedZeros(bool b) { NoSignedZeros = b; }
417 void setAllowReciprocal(bool b) { AllowReciprocal = b; }
418 void setAllowContract(bool b) { AllowContract = b; }
419 void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
420 void setAllowReassociation(bool b) { AllowReassociation = b; }
421 void setNoFPExcept(bool b) { NoFPExcept = b; }
422
423 // These are accessors for each flag.
424 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
425 bool hasNoSignedWrap() const { return NoSignedWrap; }
426 bool hasExact() const { return Exact; }
427 bool hasNoNaNs() const { return NoNaNs; }
428 bool hasNoInfs() const { return NoInfs; }
429 bool hasNoSignedZeros() const { return NoSignedZeros; }
430 bool hasAllowReciprocal() const { return AllowReciprocal; }
431 bool hasAllowContract() const { return AllowContract; }
432 bool hasApproximateFuncs() const { return ApproximateFuncs; }
433 bool hasAllowReassociation() const { return AllowReassociation; }
434 bool hasNoFPExcept() const { return NoFPExcept; }
435
436 /// Clear any flags in this flag set that aren't also set in Flags. All
437 /// flags will be cleared if Flags are undefined.
438 void intersectWith(const SDNodeFlags Flags) {
439 NoUnsignedWrap &= Flags.NoUnsignedWrap;
440 NoSignedWrap &= Flags.NoSignedWrap;
441 Exact &= Flags.Exact;
442 NoNaNs &= Flags.NoNaNs;
443 NoInfs &= Flags.NoInfs;
444 NoSignedZeros &= Flags.NoSignedZeros;
445 AllowReciprocal &= Flags.AllowReciprocal;
446 AllowContract &= Flags.AllowContract;
447 ApproximateFuncs &= Flags.ApproximateFuncs;
448 AllowReassociation &= Flags.AllowReassociation;
449 NoFPExcept &= Flags.NoFPExcept;
450 }
451};
452
453/// Represents one node in the SelectionDAG.
454///
455class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
456private:
457 /// The operation that this node performs.
458 int16_t NodeType;
459
460protected:
461 // We define a set of mini-helper classes to help us interpret the bits in our
462 // SubclassData. These are designed to fit within a uint16_t so they pack
463 // with NodeType.
464
465#if defined(_AIX) && (!defined(__GNUC__4) || defined(__clang__1))
466// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
467// and give the `pack` pragma push semantics.
468#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
469#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
470#else
471#define BEGIN_TWO_BYTE_PACK()
472#define END_TWO_BYTE_PACK()
473#endif
474
475BEGIN_TWO_BYTE_PACK()
476 class SDNodeBitfields {
477 friend class SDNode;
478 friend class MemIntrinsicSDNode;
479 friend class MemSDNode;
480 friend class SelectionDAG;
481
482 uint16_t HasDebugValue : 1;
483 uint16_t IsMemIntrinsic : 1;
484 uint16_t IsDivergent : 1;
485 };
486 enum { NumSDNodeBits = 3 };
487
488 class ConstantSDNodeBitfields {
489 friend class ConstantSDNode;
490
491 uint16_t : NumSDNodeBits;
492
493 uint16_t IsOpaque : 1;
494 };
495
496 class MemSDNodeBitfields {
497 friend class MemSDNode;
498 friend class MemIntrinsicSDNode;
499 friend class AtomicSDNode;
500
501 uint16_t : NumSDNodeBits;
502
503 uint16_t IsVolatile : 1;
504 uint16_t IsNonTemporal : 1;
505 uint16_t IsDereferenceable : 1;
506 uint16_t IsInvariant : 1;
507 };
508 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
509
510 class LSBaseSDNodeBitfields {
511 friend class LSBaseSDNode;
512 friend class MaskedLoadStoreSDNode;
513 friend class MaskedGatherScatterSDNode;
514
515 uint16_t : NumMemSDNodeBits;
516
517 // This storage is shared between disparate class hierarchies to hold an
518 // enumeration specific to the class hierarchy in use.
519 // LSBaseSDNode => enum ISD::MemIndexedMode
520 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
521 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
522 uint16_t AddressingMode : 3;
523 };
524 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
525
526 class LoadSDNodeBitfields {
527 friend class LoadSDNode;
528 friend class MaskedLoadSDNode;
529 friend class MaskedGatherSDNode;
530
531 uint16_t : NumLSBaseSDNodeBits;
532
533 uint16_t ExtTy : 2; // enum ISD::LoadExtType
534 uint16_t IsExpanding : 1;
535 };
536
537 class StoreSDNodeBitfields {
538 friend class StoreSDNode;
539 friend class MaskedStoreSDNode;
540 friend class MaskedScatterSDNode;
541
542 uint16_t : NumLSBaseSDNodeBits;
543
544 uint16_t IsTruncating : 1;
545 uint16_t IsCompressing : 1;
546 };
547
548 union {
549 char RawSDNodeBits[sizeof(uint16_t)];
550 SDNodeBitfields SDNodeBits;
551 ConstantSDNodeBitfields ConstantSDNodeBits;
552 MemSDNodeBitfields MemSDNodeBits;
553 LSBaseSDNodeBitfields LSBaseSDNodeBits;
554 LoadSDNodeBitfields LoadSDNodeBits;
555 StoreSDNodeBitfields StoreSDNodeBits;
556 };
557END_TWO_BYTE_PACK()
558#undef BEGIN_TWO_BYTE_PACK
559#undef END_TWO_BYTE_PACK
560
561 // RawSDNodeBits must cover the entirety of the union. This means that all of
562 // the union's members must have size <= RawSDNodeBits. We write the RHS as
563 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
564 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
565 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
566 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
567 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
568 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
569 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
570
571private:
572 friend class SelectionDAG;
573 // TODO: unfriend HandleSDNode once we fix its operand handling.
574 friend class HandleSDNode;
575
576 /// Unique id per SDNode in the DAG.
577 int NodeId = -1;
578
579 /// The values that are used by this operation.
580 SDUse *OperandList = nullptr;
581
582 /// The types of the values this node defines. SDNode's may
583 /// define multiple values simultaneously.
584 const EVT *ValueList;
585
586 /// List of uses for this SDNode.
587 SDUse *UseList = nullptr;
588
589 /// The number of entries in the Operand/Value list.
590 unsigned short NumOperands = 0;
591 unsigned short NumValues;
592
593 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
594 // original LLVM instructions.
595 // This is used for turning off scheduling, because we'll forgo
596 // the normal scheduling algorithms and output the instructions according to
597 // this ordering.
598 unsigned IROrder;
599
600 /// Source line information.
601 DebugLoc debugLoc;
602
603 /// Return a pointer to the specified value type.
604 static const EVT *getValueTypeList(EVT VT);
605
606 SDNodeFlags Flags;
607
608public:
609 /// Unique and persistent id per SDNode in the DAG.
610 /// Used for debug printing.
611 uint16_t PersistentId;
612
613 //===--------------------------------------------------------------------===//
614 // Accessors
615 //
616
617 /// Return the SelectionDAG opcode value for this node. For
618 /// pre-isel nodes (those for which isMachineOpcode returns false), these
619 /// are the opcode values in the ISD and <target>ISD namespaces. For
620 /// post-isel opcodes, see getMachineOpcode.
621 unsigned getOpcode() const { return (unsigned short)NodeType; }
622
623 /// Test if this node has a target-specific opcode (in the
624 /// \<target\>ISD namespace).
625 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
626
627 /// Test if this node has a target-specific opcode that may raise
628 /// FP exceptions (in the \<target\>ISD namespace and greater than
629 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
630 /// opcode are currently automatically considered to possibly raise
631 /// FP exceptions as well.
632 bool isTargetStrictFPOpcode() const {
633 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
634 }
635
636 /// Test if this node has a target-specific
637 /// memory-referencing opcode (in the \<target\>ISD namespace and
638 /// greater than FIRST_TARGET_MEMORY_OPCODE).
639 bool isTargetMemoryOpcode() const {
640 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
641 }
642
643 /// Return true if the type of the node type undefined.
644 bool isUndef() const { return NodeType == ISD::UNDEF; }
645
646 /// Test if this node is a memory intrinsic (with valid pointer information).
647 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
648 /// non-memory intrinsics (with chains) that are not really instances of
649 /// MemSDNode. For such nodes, we need some extra state to determine the
650 /// proper classof relationship.
651 bool isMemIntrinsic() const {
652 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
653 NodeType == ISD::INTRINSIC_VOID) &&
654 SDNodeBits.IsMemIntrinsic;
655 }
656
657 /// Test if this node is a strict floating point pseudo-op.
658 bool isStrictFPOpcode() {
659 switch (NodeType) {
660 default:
661 return false;
662 case ISD::STRICT_FP16_TO_FP:
663 case ISD::STRICT_FP_TO_FP16:
664#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
665 case ISD::STRICT_##DAGN:
666#include "llvm/IR/ConstrainedOps.def"
667 return true;
668 }
669 }
670
671 /// Test if this node has a post-isel opcode, directly
672 /// corresponding to a MachineInstr opcode.
673 bool isMachineOpcode() const { return NodeType < 0; }
674
675 /// This may only be called if isMachineOpcode returns
676 /// true. It returns the MachineInstr opcode value that the node's opcode
677 /// corresponds to.
678 unsigned getMachineOpcode() const {
679 assert(isMachineOpcode() && "Not a MachineInstr opcode!")(static_cast <bool> (isMachineOpcode() && "Not a MachineInstr opcode!"
) ? void (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 679, __extension__ __PRETTY_FUNCTION__))
;
680 return ~NodeType;
681 }
682
683 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
684 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
685
686 bool isDivergent() const { return SDNodeBits.IsDivergent; }
687
688 /// Return true if there are no uses of this node.
689 bool use_empty() const { return UseList == nullptr; }
690
691 /// Return true if there is exactly one use of this node.
692 bool hasOneUse() const { return hasSingleElement(uses()); }
693
694 /// Return the number of uses of this node. This method takes
695 /// time proportional to the number of uses.
696 size_t use_size() const { return std::distance(use_begin(), use_end()); }
697
698 /// Return the unique node id.
699 int getNodeId() const { return NodeId; }
700
701 /// Set unique node id.
702 void setNodeId(int Id) { NodeId = Id; }
703
704 /// Return the node ordering.
705 unsigned getIROrder() const { return IROrder; }
706
707 /// Set the node ordering.
708 void setIROrder(unsigned Order) { IROrder = Order; }
709
710 /// Return the source location info.
711 const DebugLoc &getDebugLoc() const { return debugLoc; }
712
713 /// Set source location info. Try to avoid this, putting
714 /// it in the constructor is preferable.
715 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
716
717 /// This class provides iterator support for SDUse
718 /// operands that use a specific SDNode.
719 class use_iterator {
720 friend class SDNode;
721
722 SDUse *Op = nullptr;
723
724 explicit use_iterator(SDUse *op) : Op(op) {}
725
726 public:
727 using iterator_category = std::forward_iterator_tag;
728 using value_type = SDUse;
729 using difference_type = std::ptrdiff_t;
730 using pointer = value_type *;
731 using reference = value_type &;
732
733 use_iterator() = default;
734 use_iterator(const use_iterator &I) : Op(I.Op) {}
735
736 bool operator==(const use_iterator &x) const {
737 return Op == x.Op;
738 }
739 bool operator!=(const use_iterator &x) const {
740 return !operator==(x);
741 }
742
743 /// Return true if this iterator is at the end of uses list.
744 bool atEnd() const { return Op == nullptr; }
745
746 // Iterator traversal: forward iteration only.
747 use_iterator &operator++() { // Preincrement
748 assert(Op && "Cannot increment end iterator!")(static_cast <bool> (Op && "Cannot increment end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 748, __extension__ __PRETTY_FUNCTION__))
;
749 Op = Op->getNext();
750 return *this;
751 }
752
753 use_iterator operator++(int) { // Postincrement
754 use_iterator tmp = *this; ++*this; return tmp;
755 }
756
757 /// Retrieve a pointer to the current user node.
758 SDNode *operator*() const {
759 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 759, __extension__ __PRETTY_FUNCTION__))
;
760 return Op->getUser();
761 }
762
763 SDNode *operator->() const { return operator*(); }
764
765 SDUse &getUse() const { return *Op; }
766
767 /// Retrieve the operand # of this use in its user.
768 unsigned getOperandNo() const {
769 assert(Op && "Cannot dereference end iterator!")(static_cast <bool> (Op && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 769, __extension__ __PRETTY_FUNCTION__))
;
770 return (unsigned)(Op - Op->getUser()->OperandList);
771 }
772 };
773
774 /// Provide iteration support to walk over all uses of an SDNode.
775 use_iterator use_begin() const {
776 return use_iterator(UseList);
777 }
778
779 static use_iterator use_end() { return use_iterator(nullptr); }
780
781 inline iterator_range<use_iterator> uses() {
782 return make_range(use_begin(), use_end());
783 }
784 inline iterator_range<use_iterator> uses() const {
785 return make_range(use_begin(), use_end());
786 }
787
788 /// Return true if there are exactly NUSES uses of the indicated value.
789 /// This method ignores uses of other values defined by this operation.
790 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
791
792 /// Return true if there are any use of the indicated value.
793 /// This method ignores uses of other values defined by this operation.
794 bool hasAnyUseOfValue(unsigned Value) const;
795
796 /// Return true if this node is the only use of N.
797 bool isOnlyUserOf(const SDNode *N) const;
798
799 /// Return true if this node is an operand of N.
800 bool isOperandOf(const SDNode *N) const;
801
802 /// Return true if this node is a predecessor of N.
803 /// NOTE: Implemented on top of hasPredecessor and every bit as
804 /// expensive. Use carefully.
805 bool isPredecessorOf(const SDNode *N) const {
806 return N->hasPredecessor(this);
807 }
808
809 /// Return true if N is a predecessor of this node.
810 /// N is either an operand of this node, or can be reached by recursively
811 /// traversing up the operands.
812 /// NOTE: This is an expensive method. Use it carefully.
813 bool hasPredecessor(const SDNode *N) const;
814
815 /// Returns true if N is a predecessor of any node in Worklist. This
816 /// helper keeps Visited and Worklist sets externally to allow unions
817 /// searches to be performed in parallel, caching of results across
818 /// queries and incremental addition to Worklist. Stops early if N is
819 /// found but will resume. Remember to clear Visited and Worklists
820 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
821 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
822 /// topologically ordered (Operands have strictly smaller node id) and search
823 /// can be pruned leveraging this.
824 static bool hasPredecessorHelper(const SDNode *N,
825 SmallPtrSetImpl<const SDNode *> &Visited,
826 SmallVectorImpl<const SDNode *> &Worklist,
827 unsigned int MaxSteps = 0,
828 bool TopologicalPrune = false) {
829 SmallVector<const SDNode *, 8> DeferredNodes;
830 if (Visited.count(N))
831 return true;
832
833 // Node Id's are assigned in three places: As a topological
834 // ordering (> 0), during legalization (results in values set to
835 // 0), new nodes (set to -1). If N has a topolgical id then we
836 // know that all nodes with ids smaller than it cannot be
837 // successors and we need not check them. Filter out all node
838 // that can't be matches. We add them to the worklist before exit
839 // in case of multiple calls. Note that during selection the topological id
840 // may be violated if a node's predecessor is selected before it. We mark
841 // this at selection negating the id of unselected successors and
842 // restricting topological pruning to positive ids.
843
844 int NId = N->getNodeId();
845 // If we Invalidated the Id, reconstruct original NId.
846 if (NId < -1)
847 NId = -(NId + 1);
848
849 bool Found = false;
850 while (!Worklist.empty()) {
851 const SDNode *M = Worklist.pop_back_val();
852 int MId = M->getNodeId();
853 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
854 (MId > 0) && (MId < NId)) {
855 DeferredNodes.push_back(M);
856 continue;
857 }
858 for (const SDValue &OpV : M->op_values()) {
859 SDNode *Op = OpV.getNode();
860 if (Visited.insert(Op).second)
861 Worklist.push_back(Op);
862 if (Op == N)
863 Found = true;
864 }
865 if (Found)
866 break;
867 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
868 break;
869 }
870 // Push deferred nodes back on worklist.
871 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
872 // If we bailed early, conservatively return found.
873 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
874 return true;
875 return Found;
876 }
877
878 /// Return true if all the users of N are contained in Nodes.
879 /// NOTE: Requires at least one match, but doesn't require them all.
880 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
881
882 /// Return the number of values used by this operation.
883 unsigned getNumOperands() const { return NumOperands; }
884
885 /// Return the maximum number of operands that a SDNode can hold.
886 static constexpr size_t getMaxNumOperands() {
887 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
888 }
889
890 /// Helper method returns the integer value of a ConstantSDNode operand.
891 inline uint64_t getConstantOperandVal(unsigned Num) const;
892
893 /// Helper method returns the APInt of a ConstantSDNode operand.
894 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
895
896 const SDValue &getOperand(unsigned Num) const {
897 assert(Num < NumOperands && "Invalid child # of SDNode!")(static_cast <bool> (Num < NumOperands && "Invalid child # of SDNode!"
) ? void (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 897, __extension__ __PRETTY_FUNCTION__))
;
898 return OperandList[Num];
899 }
900
901 using op_iterator = SDUse *;
902
903 op_iterator op_begin() const { return OperandList; }
904 op_iterator op_end() const { return OperandList+NumOperands; }
905 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
906
907 /// Iterator for directly iterating over the operand SDValue's.
908 struct value_op_iterator
909 : iterator_adaptor_base<value_op_iterator, op_iterator,
910 std::random_access_iterator_tag, SDValue,
911 ptrdiff_t, value_op_iterator *,
912 value_op_iterator *> {
913 explicit value_op_iterator(SDUse *U = nullptr)
914 : iterator_adaptor_base(U) {}
915
916 const SDValue &operator*() const { return I->get(); }
917 };
918
919 iterator_range<value_op_iterator> op_values() const {
920 return make_range(value_op_iterator(op_begin()),
921 value_op_iterator(op_end()));
922 }
923
924 SDVTList getVTList() const {
925 SDVTList X = { ValueList, NumValues };
926 return X;
927 }
928
929 /// If this node has a glue operand, return the node
930 /// to which the glue operand points. Otherwise return NULL.
931 SDNode *getGluedNode() const {
932 if (getNumOperands() != 0 &&
933 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
934 return getOperand(getNumOperands()-1).getNode();
935 return nullptr;
936 }
937
938 /// If this node has a glue value with a user, return
939 /// the user (there is at most one). Otherwise return NULL.
940 SDNode *getGluedUser() const {
941 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
942 if (UI.getUse().get().getValueType() == MVT::Glue)
943 return *UI;
944 return nullptr;
945 }
946
947 SDNodeFlags getFlags() const { return Flags; }
948 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
949
950 /// Clear any flags in this node that aren't also set in Flags.
951 /// If Flags is not in a defined state then this has no effect.
952 void intersectFlagsWith(const SDNodeFlags Flags);
953
954 /// Return the number of values defined/returned by this operator.
955 unsigned getNumValues() const { return NumValues; }
956
957 /// Return the type of a specified result.
958 EVT getValueType(unsigned ResNo) const {
959 assert(ResNo < NumValues && "Illegal result number!")(static_cast <bool> (ResNo < NumValues && "Illegal result number!"
) ? void (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 959, __extension__ __PRETTY_FUNCTION__))
;
960 return ValueList[ResNo];
961 }
962
963 /// Return the type of a specified result as a simple type.
964 MVT getSimpleValueType(unsigned ResNo) const {
965 return getValueType(ResNo).getSimpleVT();
966 }
967
968 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
969 ///
970 /// If the value type is a scalable vector type, the scalable property will
971 /// be set and the runtime size will be a positive integer multiple of the
972 /// base size.
973 TypeSize getValueSizeInBits(unsigned ResNo) const {
974 return getValueType(ResNo).getSizeInBits();
975 }
976
977 using value_iterator = const EVT *;
978
979 value_iterator value_begin() const { return ValueList; }
980 value_iterator value_end() const { return ValueList+NumValues; }
981 iterator_range<value_iterator> values() const {
982 return llvm::make_range(value_begin(), value_end());
983 }
984
985 /// Return the opcode of this operation for printing.
986 std::string getOperationName(const SelectionDAG *G = nullptr) const;
987 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
988 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
989 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
990 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
991 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
992
993 /// Print a SelectionDAG node and all children down to
994 /// the leaves. The given SelectionDAG allows target-specific nodes
995 /// to be printed in human-readable form. Unlike printr, this will
996 /// print the whole DAG, including children that appear multiple
997 /// times.
998 ///
999 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1000
1001 /// Print a SelectionDAG node and children up to
1002 /// depth "depth." The given SelectionDAG allows target-specific
1003 /// nodes to be printed in human-readable form. Unlike printr, this
1004 /// will print children that appear multiple times wherever they are
1005 /// used.
1006 ///
1007 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1008 unsigned depth = 100) const;
1009
1010 /// Dump this node, for debugging.
1011 void dump() const;
1012
1013 /// Dump (recursively) this node and its use-def subgraph.
1014 void dumpr() const;
1015
1016 /// Dump this node, for debugging.
1017 /// The given SelectionDAG allows target-specific nodes to be printed
1018 /// in human-readable form.
1019 void dump(const SelectionDAG *G) const;
1020
1021 /// Dump (recursively) this node and its use-def subgraph.
1022 /// The given SelectionDAG allows target-specific nodes to be printed
1023 /// in human-readable form.
1024 void dumpr(const SelectionDAG *G) const;
1025
1026 /// printrFull to dbgs(). The given SelectionDAG allows
1027 /// target-specific nodes to be printed in human-readable form.
1028 /// Unlike dumpr, this will print the whole DAG, including children
1029 /// that appear multiple times.
1030 void dumprFull(const SelectionDAG *G = nullptr) const;
1031
1032 /// printrWithDepth to dbgs(). The given
1033 /// SelectionDAG allows target-specific nodes to be printed in
1034 /// human-readable form. Unlike dumpr, this will print children
1035 /// that appear multiple times wherever they are used.
1036 ///
1037 void dumprWithDepth(const SelectionDAG *G = nullptr,
1038 unsigned depth = 100) const;
1039
1040 /// Gather unique data for the node.
1041 void Profile(FoldingSetNodeID &ID) const;
1042
1043 /// This method should only be used by the SDUse class.
1044 void addUse(SDUse &U) { U.addToList(&UseList); }
1045
1046protected:
1047 static SDVTList getSDVTList(EVT VT) {
1048 SDVTList Ret = { getValueTypeList(VT), 1 };
1049 return Ret;
1050 }
1051
1052 /// Create an SDNode.
1053 ///
1054 /// SDNodes are created without any operands, and never own the operand
1055 /// storage. To add operands, see SelectionDAG::createOperands.
1056 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1057 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1058 IROrder(Order), debugLoc(std::move(dl)) {
1059 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1060 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")(static_cast <bool> (debugLoc.hasTrivialDestructor() &&
"Expected trivial destructor") ? void (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1060, __extension__ __PRETTY_FUNCTION__))
;
1061 assert(NumValues == VTs.NumVTs &&(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __extension__ __PRETTY_FUNCTION__))
1062 "NumValues wasn't wide enough for its operands!")(static_cast <bool> (NumValues == VTs.NumVTs &&
"NumValues wasn't wide enough for its operands!") ? void (0)
: __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __extension__ __PRETTY_FUNCTION__))
;
1063 }
1064
1065 /// Release the operands and set this node to have zero operands.
1066 void DropOperands();
1067};
1068
1069/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1070/// into SDNode creation functions.
1071/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1072/// from the original Instruction, and IROrder is the ordinal position of
1073/// the instruction.
1074/// When an SDNode is created after the DAG is being built, both DebugLoc and
1075/// the IROrder are propagated from the original SDNode.
1076/// So SDLoc class provides two constructors besides the default one, one to
1077/// be used by the DAGBuilder, the other to be used by others.
1078class SDLoc {
1079private:
1080 DebugLoc DL;
1081 int IROrder = 0;
1082
1083public:
1084 SDLoc() = default;
1085 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1086 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1087 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1088 assert(Order >= 0 && "bad IROrder")(static_cast <bool> (Order >= 0 && "bad IROrder"
) ? void (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1088, __extension__ __PRETTY_FUNCTION__))
;
1089 if (I)
1090 DL = I->getDebugLoc();
1091 }
1092
1093 unsigned getIROrder() const { return IROrder; }
1094 const DebugLoc &getDebugLoc() const { return DL; }
1095};
1096
1097// Define inline functions from the SDValue class.
1098
1099inline SDValue::SDValue(SDNode *node, unsigned resno)
1100 : Node(node), ResNo(resno) {
1101 // Explicitly check for !ResNo to avoid use-after-free, because there are
1102 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1103 // combines.
1104 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __extension__ __PRETTY_FUNCTION__))
1105 "Invalid result number for the given node!")(static_cast <bool> ((!Node || !ResNo || ResNo < Node
->getNumValues()) && "Invalid result number for the given node!"
) ? void (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __extension__ __PRETTY_FUNCTION__))
;
1106 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")(static_cast <bool> (ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? void (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1106, __extension__ __PRETTY_FUNCTION__))
;
1107}
1108
1109inline unsigned SDValue::getOpcode() const {
1110 return Node->getOpcode();
26
Called C++ object pointer is null
1111}
1112
1113inline EVT SDValue::getValueType() const {
1114 return Node->getValueType(ResNo);
1115}
1116
1117inline unsigned SDValue::getNumOperands() const {
1118 return Node->getNumOperands();
1119}
1120
1121inline const SDValue &SDValue::getOperand(unsigned i) const {
1122 return Node->getOperand(i);
1123}
1124
1125inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1126 return Node->getConstantOperandVal(i);
1127}
1128
1129inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1130 return Node->getConstantOperandAPInt(i);
1131}
1132
1133inline bool SDValue::isTargetOpcode() const {
1134 return Node->isTargetOpcode();
1135}
1136
1137inline bool SDValue::isTargetMemoryOpcode() const {
1138 return Node->isTargetMemoryOpcode();
1139}
1140
1141inline bool SDValue::isMachineOpcode() const {
1142 return Node->isMachineOpcode();
1143}
1144
1145inline unsigned SDValue::getMachineOpcode() const {
1146 return Node->getMachineOpcode();
1147}
1148
1149inline bool SDValue::isUndef() const {
1150 return Node->isUndef();
1151}
1152
1153inline bool SDValue::use_empty() const {
1154 return !Node->hasAnyUseOfValue(ResNo);
1155}
1156
1157inline bool SDValue::hasOneUse() const {
1158 return Node->hasNUsesOfValue(1, ResNo);
1159}
1160
1161inline const DebugLoc &SDValue::getDebugLoc() const {
1162 return Node->getDebugLoc();
1163}
1164
1165inline void SDValue::dump() const {
1166 return Node->dump();
1167}
1168
1169inline void SDValue::dump(const SelectionDAG *G) const {
1170 return Node->dump(G);
1171}
1172
1173inline void SDValue::dumpr() const {
1174 return Node->dumpr();
1175}