Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1110, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name WebAssemblyISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/WebAssembly -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/WebAssembly -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-04-14-063029-18377-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the WebAssemblyTargetLowering class.
11///
12//===----------------------------------------------------------------------===//
13
14#include "WebAssemblyISelLowering.h"
15#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16#include "WebAssemblyMachineFunctionInfo.h"
17#include "WebAssemblySubtarget.h"
18#include "WebAssemblyTargetMachine.h"
19#include "WebAssemblyUtilities.h"
20#include "llvm/CodeGen/Analysis.h"
21#include "llvm/CodeGen/CallingConvLower.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineJumpTableInfo.h"
24#include "llvm/CodeGen/MachineModuleInfo.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/SelectionDAG.h"
27#include "llvm/CodeGen/WasmEHFuncInfo.h"
28#include "llvm/IR/DiagnosticInfo.h"
29#include "llvm/IR/DiagnosticPrinter.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Intrinsics.h"
32#include "llvm/IR/IntrinsicsWebAssembly.h"
33#include "llvm/Support/Debug.h"
34#include "llvm/Support/ErrorHandling.h"
35#include "llvm/Support/MathExtras.h"
36#include "llvm/Support/raw_ostream.h"
37#include "llvm/Target/TargetOptions.h"
38using namespace llvm;
39
40#define DEBUG_TYPE"wasm-lower" "wasm-lower"
41
42WebAssemblyTargetLowering::WebAssemblyTargetLowering(
43 const TargetMachine &TM, const WebAssemblySubtarget &STI)
44 : TargetLowering(TM), Subtarget(&STI) {
45 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
46
47 // Booleans always contain 0 or 1.
48 setBooleanContents(ZeroOrOneBooleanContent);
49 // Except in SIMD vectors
50 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
51 // We don't know the microarchitecture here, so just reduce register pressure.
52 setSchedulingPreference(Sched::RegPressure);
53 // Tell ISel that we have a stack pointer.
54 setStackPointerRegisterToSaveRestore(
55 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
56 // Set up the register classes.
57 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
58 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
59 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
60 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
61 if (Subtarget->hasSIMD128()) {
62 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
63 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
64 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
65 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
66 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
67 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
68 }
69 // Compute derived properties from the register classes.
70 computeRegisterProperties(Subtarget->getRegisterInfo());
71
72 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
73 setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom);
74 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
75 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
76 setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
77 setOperationAction(ISD::BRIND, MVT::Other, Custom);
78
79 // Take the default expansion for va_arg, va_copy, and va_end. There is no
80 // default action for va_start, so we do that custom.
81 setOperationAction(ISD::VASTART, MVT::Other, Custom);
82 setOperationAction(ISD::VAARG, MVT::Other, Expand);
83 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
84 setOperationAction(ISD::VAEND, MVT::Other, Expand);
85
86 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
87 // Don't expand the floating-point types to constant pools.
88 setOperationAction(ISD::ConstantFP, T, Legal);
89 // Expand floating-point comparisons.
90 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
91 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
92 setCondCodeAction(CC, T, Expand);
93 // Expand floating-point library function operators.
94 for (auto Op :
95 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
96 setOperationAction(Op, T, Expand);
97 // Note supported floating-point library function operators that otherwise
98 // default to expand.
99 for (auto Op :
100 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
101 setOperationAction(Op, T, Legal);
102 // Support minimum and maximum, which otherwise default to expand.
103 setOperationAction(ISD::FMINIMUM, T, Legal);
104 setOperationAction(ISD::FMAXIMUM, T, Legal);
105 // WebAssembly currently has no builtin f16 support.
106 setOperationAction(ISD::FP16_TO_FP, T, Expand);
107 setOperationAction(ISD::FP_TO_FP16, T, Expand);
108 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
109 setTruncStoreAction(T, MVT::f16, Expand);
110 }
111
112 // Expand unavailable integer operations.
113 for (auto Op :
114 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
115 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
116 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
117 for (auto T : {MVT::i32, MVT::i64})
118 setOperationAction(Op, T, Expand);
119 if (Subtarget->hasSIMD128())
120 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
121 setOperationAction(Op, T, Expand);
122 }
123
124 // SIMD-specific configuration
125 if (Subtarget->hasSIMD128()) {
126 // Hoist bitcasts out of shuffles
127 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
128
129 // Combine extends of extract_subvectors into widening ops
130 setTargetDAGCombine(ISD::SIGN_EXTEND);
131 setTargetDAGCombine(ISD::ZERO_EXTEND);
132
133 // Support saturating add for i8x16 and i16x8
134 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
135 for (auto T : {MVT::v16i8, MVT::v8i16})
136 setOperationAction(Op, T, Legal);
137
138 // Support integer abs
139 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
140 setOperationAction(ISD::ABS, T, Legal);
141
142 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
143 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
144 MVT::v2f64})
145 setOperationAction(ISD::BUILD_VECTOR, T, Custom);
146
147 // We have custom shuffle lowering to expose the shuffle mask
148 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
149 MVT::v2f64})
150 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
151
152 // Custom lowering since wasm shifts must have a scalar shift amount
153 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
154 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
155 setOperationAction(Op, T, Custom);
156
157 // Custom lower lane accesses to expand out variable indices
158 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
159 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
160 MVT::v2f64})
161 setOperationAction(Op, T, Custom);
162
163 // There is no i8x16.mul instruction
164 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
165
166 // There is no vector conditional select instruction
167 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
168 MVT::v2f64})
169 setOperationAction(ISD::SELECT_CC, T, Expand);
170
171 // Expand integer operations supported for scalars but not SIMD
172 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
173 ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
174 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
175 setOperationAction(Op, T, Expand);
176
177 // But we do have integer min and max operations
178 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
179 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
180 setOperationAction(Op, T, Legal);
181
182 // Expand float operations supported for scalars but not SIMD
183 for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT,
184 ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
185 ISD::FEXP, ISD::FEXP2, ISD::FRINT})
186 for (auto T : {MVT::v4f32, MVT::v2f64})
187 setOperationAction(Op, T, Expand);
188
189 // Unsigned comparison operations are unavailable for i64x2 vectors.
190 for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE})
191 setCondCodeAction(CC, MVT::v2i64, Custom);
192
193 // 64x2 conversions are not in the spec
194 for (auto Op :
195 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
196 for (auto T : {MVT::v2i64, MVT::v2f64})
197 setOperationAction(Op, T, Expand);
198 }
199
200 // As a special case, these operators use the type to mean the type to
201 // sign-extend from.
202 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
203 if (!Subtarget->hasSignExt()) {
204 // Sign extends are legal only when extending a vector extract
205 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
206 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
207 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
208 }
209 for (auto T : MVT::integer_fixedlen_vector_valuetypes())
210 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
211
212 // Dynamic stack allocation: use the default expansion.
213 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
214 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
215 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
216
217 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
218 setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
219 setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
220
221 // Expand these forms; we pattern-match the forms that we can handle in isel.
222 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
223 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
224 setOperationAction(Op, T, Expand);
225
226 // We have custom switch handling.
227 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
228
229 // WebAssembly doesn't have:
230 // - Floating-point extending loads.
231 // - Floating-point truncating stores.
232 // - i1 extending loads.
233 // - truncating SIMD stores and most extending loads
234 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
235 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
236 for (auto T : MVT::integer_valuetypes())
237 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
238 setLoadExtAction(Ext, T, MVT::i1, Promote);
239 if (Subtarget->hasSIMD128()) {
240 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
241 MVT::v2f64}) {
242 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
243 if (MVT(T) != MemT) {
244 setTruncStoreAction(T, MemT, Expand);
245 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
246 setLoadExtAction(Ext, T, MemT, Expand);
247 }
248 }
249 }
250 // But some vector extending loads are legal
251 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
252 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
253 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
254 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
255 }
256 // And some truncating stores are legal as well
257 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
258 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
259 }
260
261 // Don't do anything clever with build_pairs
262 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
263
264 // Trap lowers to wasm unreachable
265 setOperationAction(ISD::TRAP, MVT::Other, Legal);
266 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
267
268 // Exception handling intrinsics
269 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
270 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
271 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
272
273 setMaxAtomicSizeInBitsSupported(64);
274
275 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
276 // consistent with the f64 and f128 names.
277 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
278 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
279
280 // Define the emscripten name for return address helper.
281 // TODO: when implementing other Wasm backends, make this generic or only do
282 // this on emscripten depending on what they end up doing.
283 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
284
285 // Always convert switches to br_tables unless there is only one case, which
286 // is equivalent to a simple branch. This reduces code size for wasm, and we
287 // defer possible jump table optimizations to the VM.
288 setMinimumJumpTableEntries(2);
289}
290
291TargetLowering::AtomicExpansionKind
292WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
293 // We have wasm instructions for these
294 switch (AI->getOperation()) {
295 case AtomicRMWInst::Add:
296 case AtomicRMWInst::Sub:
297 case AtomicRMWInst::And:
298 case AtomicRMWInst::Or:
299 case AtomicRMWInst::Xor:
300 case AtomicRMWInst::Xchg:
301 return AtomicExpansionKind::None;
302 default:
303 break;
304 }
305 return AtomicExpansionKind::CmpXChg;
306}
307
308FastISel *WebAssemblyTargetLowering::createFastISel(
309 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
310 return WebAssembly::createFastISel(FuncInfo, LibInfo);
311}
312
313MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
314 EVT VT) const {
315 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
316 if (BitWidth > 1 && BitWidth < 8)
317 BitWidth = 8;
318
319 if (BitWidth > 64) {
320 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
321 // the count to be an i32.
322 BitWidth = 32;
323 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&((BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && "32-bit shift counts ought to be enough for anyone"
) ? static_cast<void> (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 324, __PRETTY_FUNCTION__))
324 "32-bit shift counts ought to be enough for anyone")((BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && "32-bit shift counts ought to be enough for anyone"
) ? static_cast<void> (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 324, __PRETTY_FUNCTION__))
;
325 }
326
327 MVT Result = MVT::getIntegerVT(BitWidth);
328 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&((Result != MVT::INVALID_SIMPLE_VALUE_TYPE && "Unable to represent scalar shift amount type"
) ? static_cast<void> (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 329, __PRETTY_FUNCTION__))
329 "Unable to represent scalar shift amount type")((Result != MVT::INVALID_SIMPLE_VALUE_TYPE && "Unable to represent scalar shift amount type"
) ? static_cast<void> (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 329, __PRETTY_FUNCTION__))
;
330 return Result;
331}
332
333// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
334// undefined result on invalid/overflow, to the WebAssembly opcode, which
335// traps on invalid/overflow.
336static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
337 MachineBasicBlock *BB,
338 const TargetInstrInfo &TII,
339 bool IsUnsigned, bool Int64,
340 bool Float64, unsigned LoweredOpcode) {
341 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
342
343 Register OutReg = MI.getOperand(0).getReg();
344 Register InReg = MI.getOperand(1).getReg();
345
346 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
347 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
348 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
349 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
350 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
351 unsigned Eqz = WebAssembly::EQZ_I32;
352 unsigned And = WebAssembly::AND_I32;
353 int64_t Limit = Int64 ? INT64_MIN(-9223372036854775807L -1) : INT32_MIN(-2147483647-1);
354 int64_t Substitute = IsUnsigned ? 0 : Limit;
355 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
356 auto &Context = BB->getParent()->getFunction().getContext();
357 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
358
359 const BasicBlock *LLVMBB = BB->getBasicBlock();
360 MachineFunction *F = BB->getParent();
361 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
362 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
363 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
364
365 MachineFunction::iterator It = ++BB->getIterator();
366 F->insert(It, FalseMBB);
367 F->insert(It, TrueMBB);
368 F->insert(It, DoneMBB);
369
370 // Transfer the remainder of BB and its successor edges to DoneMBB.
371 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
372 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
373
374 BB->addSuccessor(TrueMBB);
375 BB->addSuccessor(FalseMBB);
376 TrueMBB->addSuccessor(DoneMBB);
377 FalseMBB->addSuccessor(DoneMBB);
378
379 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
380 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
381 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
382 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
383 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
384 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
385 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
386
387 MI.eraseFromParent();
388 // For signed numbers, we can do a single comparison to determine whether
389 // fabs(x) is within range.
390 if (IsUnsigned) {
391 Tmp0 = InReg;
392 } else {
393 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
394 }
395 BuildMI(BB, DL, TII.get(FConst), Tmp1)
396 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
397 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
398
399 // For unsigned numbers, we have to do a separate comparison with zero.
400 if (IsUnsigned) {
401 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
402 Register SecondCmpReg =
403 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
404 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
405 BuildMI(BB, DL, TII.get(FConst), Tmp1)
406 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
407 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
408 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
409 CmpReg = AndReg;
410 }
411
412 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
413
414 // Create the CFG diamond to select between doing the conversion or using
415 // the substitute value.
416 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
417 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
418 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
419 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
420 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
421 .addReg(FalseReg)
422 .addMBB(FalseMBB)
423 .addReg(TrueReg)
424 .addMBB(TrueMBB);
425
426 return DoneMBB;
427}
428
429static MachineBasicBlock *
430LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
431 const WebAssemblySubtarget *Subtarget,
432 const TargetInstrInfo &TII) {
433 MachineInstr &CallParams = *CallResults.getPrevNode();
434 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS)((CallParams.getOpcode() == WebAssembly::CALL_PARAMS) ? static_cast
<void> (0) : __assert_fail ("CallParams.getOpcode() == WebAssembly::CALL_PARAMS"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 434, __PRETTY_FUNCTION__))
;
435 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||((CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults
.getOpcode() == WebAssembly::RET_CALL_RESULTS) ? static_cast<
void> (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 436, __PRETTY_FUNCTION__))
436 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS)((CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults
.getOpcode() == WebAssembly::RET_CALL_RESULTS) ? static_cast<
void> (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 436, __PRETTY_FUNCTION__))
;
437
438 bool IsIndirect = CallParams.getOperand(0).isReg();
439 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
440
441 unsigned CallOp;
442 if (IsIndirect && IsRetCall) {
443 CallOp = WebAssembly::RET_CALL_INDIRECT;
444 } else if (IsIndirect) {
445 CallOp = WebAssembly::CALL_INDIRECT;
446 } else if (IsRetCall) {
447 CallOp = WebAssembly::RET_CALL;
448 } else {
449 CallOp = WebAssembly::CALL;
450 }
451
452 MachineFunction &MF = *BB->getParent();
453 const MCInstrDesc &MCID = TII.get(CallOp);
454 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
455
456 // See if we must truncate the function pointer.
457 // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
458 // as 64-bit for uniformity with other pointer types.
459 // See also: WebAssemblyFastISel::selectCall
460 if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
461 Register Reg32 =
462 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
463 auto &FnPtr = CallParams.getOperand(0);
464 BuildMI(*BB, CallResults.getIterator(), DL,
465 TII.get(WebAssembly::I32_WRAP_I64), Reg32)
466 .addReg(FnPtr.getReg());
467 FnPtr.setReg(Reg32);
468 }
469
470 // Move the function pointer to the end of the arguments for indirect calls
471 if (IsIndirect) {
472 auto FnPtr = CallParams.getOperand(0);
473 CallParams.RemoveOperand(0);
474 CallParams.addOperand(FnPtr);
475 }
476
477 for (auto Def : CallResults.defs())
478 MIB.add(Def);
479
480 if (IsIndirect) {
481 // Placeholder for the type index.
482 MIB.addImm(0);
483 // The table into which this call_indirect indexes.
484 MCSymbolWasm *Table =
485 WebAssembly::getOrCreateFunctionTableSymbol(MF.getContext(), Subtarget);
486 if (Subtarget->hasReferenceTypes()) {
487 MIB.addSym(Table);
488 } else {
489 // For the MVP there is at most one table whose number is 0, but we can't
490 // write a table symbol or issue relocations. Instead we just ensure the
491 // table is live and write a zero.
492 Table->setNoStrip();
493 MIB.addImm(0);
494 }
495 }
496
497 for (auto Use : CallParams.uses())
498 MIB.add(Use);
499
500 BB->insert(CallResults.getIterator(), MIB);
501 CallParams.eraseFromParent();
502 CallResults.eraseFromParent();
503
504 return BB;
505}
506
507MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
508 MachineInstr &MI, MachineBasicBlock *BB) const {
509 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
510 DebugLoc DL = MI.getDebugLoc();
511
512 switch (MI.getOpcode()) {
513 default:
514 llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 514)
;
515 case WebAssembly::FP_TO_SINT_I32_F32:
516 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
517 WebAssembly::I32_TRUNC_S_F32);
518 case WebAssembly::FP_TO_UINT_I32_F32:
519 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
520 WebAssembly::I32_TRUNC_U_F32);
521 case WebAssembly::FP_TO_SINT_I64_F32:
522 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
523 WebAssembly::I64_TRUNC_S_F32);
524 case WebAssembly::FP_TO_UINT_I64_F32:
525 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
526 WebAssembly::I64_TRUNC_U_F32);
527 case WebAssembly::FP_TO_SINT_I32_F64:
528 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
529 WebAssembly::I32_TRUNC_S_F64);
530 case WebAssembly::FP_TO_UINT_I32_F64:
531 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
532 WebAssembly::I32_TRUNC_U_F64);
533 case WebAssembly::FP_TO_SINT_I64_F64:
534 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
535 WebAssembly::I64_TRUNC_S_F64);
536 case WebAssembly::FP_TO_UINT_I64_F64:
537 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
538 WebAssembly::I64_TRUNC_U_F64);
539 case WebAssembly::CALL_RESULTS:
540 case WebAssembly::RET_CALL_RESULTS:
541 return LowerCallResults(MI, DL, BB, Subtarget, TII);
542 }
543}
544
545const char *
546WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
547 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
548 case WebAssemblyISD::FIRST_NUMBER:
549 case WebAssemblyISD::FIRST_MEM_OPCODE:
550 break;
551#define HANDLE_NODETYPE(NODE) \
552 case WebAssemblyISD::NODE: \
553 return "WebAssemblyISD::" #NODE;
554#define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
555#include "WebAssemblyISD.def"
556#undef HANDLE_MEM_NODETYPE
557#undef HANDLE_NODETYPE
558 }
559 return nullptr;
560}
561
562std::pair<unsigned, const TargetRegisterClass *>
563WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
564 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
565 // First, see if this is a constraint that directly corresponds to a
566 // WebAssembly register class.
567 if (Constraint.size() == 1) {
568 switch (Constraint[0]) {
569 case 'r':
570 assert(VT != MVT::iPTR && "Pointer MVT not expected here")((VT != MVT::iPTR && "Pointer MVT not expected here")
? static_cast<void> (0) : __assert_fail ("VT != MVT::iPTR && \"Pointer MVT not expected here\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 570, __PRETTY_FUNCTION__))
;
571 if (Subtarget->hasSIMD128() && VT.isVector()) {
572 if (VT.getSizeInBits() == 128)
573 return std::make_pair(0U, &WebAssembly::V128RegClass);
574 }
575 if (VT.isInteger() && !VT.isVector()) {
576 if (VT.getSizeInBits() <= 32)
577 return std::make_pair(0U, &WebAssembly::I32RegClass);
578 if (VT.getSizeInBits() <= 64)
579 return std::make_pair(0U, &WebAssembly::I64RegClass);
580 }
581 if (VT.isFloatingPoint() && !VT.isVector()) {
582 switch (VT.getSizeInBits()) {
583 case 32:
584 return std::make_pair(0U, &WebAssembly::F32RegClass);
585 case 64:
586 return std::make_pair(0U, &WebAssembly::F64RegClass);
587 default:
588 break;
589 }
590 }
591 break;
592 default:
593 break;
594 }
595 }
596
597 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
598}
599
600bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
601 // Assume ctz is a relatively cheap operation.
602 return true;
603}
604
605bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
606 // Assume clz is a relatively cheap operation.
607 return true;
608}
609
610bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
611 const AddrMode &AM,
612 Type *Ty, unsigned AS,
613 Instruction *I) const {
614 // WebAssembly offsets are added as unsigned without wrapping. The
615 // isLegalAddressingMode gives us no way to determine if wrapping could be
616 // happening, so we approximate this by accepting only non-negative offsets.
617 if (AM.BaseOffs < 0)
618 return false;
619
620 // WebAssembly has no scale register operands.
621 if (AM.Scale != 0)
622 return false;
623
624 // Everything else is legal.
625 return true;
626}
627
628bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
629 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
630 MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
631 // WebAssembly supports unaligned accesses, though it should be declared
632 // with the p2align attribute on loads and stores which do so, and there
633 // may be a performance impact. We tell LLVM they're "fast" because
634 // for the kinds of things that LLVM uses this for (merging adjacent stores
635 // of constants, etc.), WebAssembly implementations will either want the
636 // unaligned access or they'll split anyway.
637 if (Fast)
638 *Fast = true;
639 return true;
640}
641
642bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
643 AttributeList Attr) const {
644 // The current thinking is that wasm engines will perform this optimization,
645 // so we can save on code size.
646 return true;
647}
648
649bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
650 EVT ExtT = ExtVal.getValueType();
651 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
652 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
653 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
654 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
655}
656
657EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
658 LLVMContext &C,
659 EVT VT) const {
660 if (VT.isVector())
661 return VT.changeVectorElementTypeToInteger();
662
663 // So far, all branch instructions in Wasm take an I32 condition.
664 // The default TargetLowering::getSetCCResultType returns the pointer size,
665 // which would be useful to reduce instruction counts when testing
666 // against 64-bit pointers/values if at some point Wasm supports that.
667 return EVT::getIntegerVT(C, 32);
668}
669
670bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
671 const CallInst &I,
672 MachineFunction &MF,
673 unsigned Intrinsic) const {
674 switch (Intrinsic) {
675 case Intrinsic::wasm_memory_atomic_notify:
676 Info.opc = ISD::INTRINSIC_W_CHAIN;
677 Info.memVT = MVT::i32;
678 Info.ptrVal = I.getArgOperand(0);
679 Info.offset = 0;
680 Info.align = Align(4);
681 // atomic.notify instruction does not really load the memory specified with
682 // this argument, but MachineMemOperand should either be load or store, so
683 // we set this to a load.
684 // FIXME Volatile isn't really correct, but currently all LLVM atomic
685 // instructions are treated as volatiles in the backend, so we should be
686 // consistent. The same applies for wasm_atomic_wait intrinsics too.
687 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
688 return true;
689 case Intrinsic::wasm_memory_atomic_wait32:
690 Info.opc = ISD::INTRINSIC_W_CHAIN;
691 Info.memVT = MVT::i32;
692 Info.ptrVal = I.getArgOperand(0);
693 Info.offset = 0;
694 Info.align = Align(4);
695 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
696 return true;
697 case Intrinsic::wasm_memory_atomic_wait64:
698 Info.opc = ISD::INTRINSIC_W_CHAIN;
699 Info.memVT = MVT::i64;
700 Info.ptrVal = I.getArgOperand(0);
701 Info.offset = 0;
702 Info.align = Align(8);
703 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
704 return true;
705 case Intrinsic::wasm_load32_zero:
706 case Intrinsic::wasm_load64_zero:
707 Info.opc = ISD::INTRINSIC_W_CHAIN;
708 Info.memVT = Intrinsic == Intrinsic::wasm_load32_zero ? MVT::i32 : MVT::i64;
709 Info.ptrVal = I.getArgOperand(0);
710 Info.offset = 0;
711 Info.align = Info.memVT == MVT::i32 ? Align(4) : Align(8);
712 Info.flags = MachineMemOperand::MOLoad;
713 return true;
714 case Intrinsic::wasm_load8_lane:
715 case Intrinsic::wasm_load16_lane:
716 case Intrinsic::wasm_load32_lane:
717 case Intrinsic::wasm_load64_lane:
718 case Intrinsic::wasm_store8_lane:
719 case Intrinsic::wasm_store16_lane:
720 case Intrinsic::wasm_store32_lane:
721 case Intrinsic::wasm_store64_lane: {
722 MVT MemVT;
723 Align MemAlign;
724 switch (Intrinsic) {
725 case Intrinsic::wasm_load8_lane:
726 case Intrinsic::wasm_store8_lane:
727 MemVT = MVT::i8;
728 MemAlign = Align(1);
729 break;
730 case Intrinsic::wasm_load16_lane:
731 case Intrinsic::wasm_store16_lane:
732 MemVT = MVT::i16;
733 MemAlign = Align(2);
734 break;
735 case Intrinsic::wasm_load32_lane:
736 case Intrinsic::wasm_store32_lane:
737 MemVT = MVT::i32;
738 MemAlign = Align(4);
739 break;
740 case Intrinsic::wasm_load64_lane:
741 case Intrinsic::wasm_store64_lane:
742 MemVT = MVT::i64;
743 MemAlign = Align(8);
744 break;
745 default:
746 llvm_unreachable("unexpected intrinsic")::llvm::llvm_unreachable_internal("unexpected intrinsic", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 746)
;
747 }
748 if (Intrinsic == Intrinsic::wasm_load8_lane ||
749 Intrinsic == Intrinsic::wasm_load16_lane ||
750 Intrinsic == Intrinsic::wasm_load32_lane ||
751 Intrinsic == Intrinsic::wasm_load64_lane) {
752 Info.opc = ISD::INTRINSIC_W_CHAIN;
753 Info.flags = MachineMemOperand::MOLoad;
754 } else {
755 Info.opc = ISD::INTRINSIC_VOID;
756 Info.flags = MachineMemOperand::MOStore;
757 }
758 Info.ptrVal = I.getArgOperand(0);
759 Info.memVT = MemVT;
760 Info.offset = 0;
761 Info.align = MemAlign;
762 return true;
763 }
764 default:
765 return false;
766 }
767}
768
769//===----------------------------------------------------------------------===//
770// WebAssembly Lowering private implementation.
771//===----------------------------------------------------------------------===//
772
773//===----------------------------------------------------------------------===//
774// Lowering Code
775//===----------------------------------------------------------------------===//
776
777static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
778 MachineFunction &MF = DAG.getMachineFunction();
779 DAG.getContext()->diagnose(
780 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
781}
782
783// Test whether the given calling convention is supported.
784static bool callingConvSupported(CallingConv::ID CallConv) {
785 // We currently support the language-independent target-independent
786 // conventions. We don't yet have a way to annotate calls with properties like
787 // "cold", and we don't have any call-clobbered registers, so these are mostly
788 // all handled the same.
789 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
790 CallConv == CallingConv::Cold ||
791 CallConv == CallingConv::PreserveMost ||
792 CallConv == CallingConv::PreserveAll ||
793 CallConv == CallingConv::CXX_FAST_TLS ||
794 CallConv == CallingConv::WASM_EmscriptenInvoke ||
795 CallConv == CallingConv::Swift;
796}
797
798SDValue
799WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
800 SmallVectorImpl<SDValue> &InVals) const {
801 SelectionDAG &DAG = CLI.DAG;
802 SDLoc DL = CLI.DL;
803 SDValue Chain = CLI.Chain;
804 SDValue Callee = CLI.Callee;
805 MachineFunction &MF = DAG.getMachineFunction();
806 auto Layout = MF.getDataLayout();
807
808 CallingConv::ID CallConv = CLI.CallConv;
809 if (!callingConvSupported(CallConv))
810 fail(DL, DAG,
811 "WebAssembly doesn't support language-specific or target-specific "
812 "calling conventions yet");
813 if (CLI.IsPatchPoint)
814 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
815
816 if (CLI.IsTailCall) {
817 auto NoTail = [&](const char *Msg) {
818 if (CLI.CB && CLI.CB->isMustTailCall())
819 fail(DL, DAG, Msg);
820 CLI.IsTailCall = false;
821 };
822
823 if (!Subtarget->hasTailCall())
824 NoTail("WebAssembly 'tail-call' feature not enabled");
825
826 // Varargs calls cannot be tail calls because the buffer is on the stack
827 if (CLI.IsVarArg)
828 NoTail("WebAssembly does not support varargs tail calls");
829
830 // Do not tail call unless caller and callee return types match
831 const Function &F = MF.getFunction();
832 const TargetMachine &TM = getTargetMachine();
833 Type *RetTy = F.getReturnType();
834 SmallVector<MVT, 4> CallerRetTys;
835 SmallVector<MVT, 4> CalleeRetTys;
836 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
837 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
838 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
839 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
840 CalleeRetTys.begin());
841 if (!TypesMatch)
842 NoTail("WebAssembly tail call requires caller and callee return types to "
843 "match");
844
845 // If pointers to local stack values are passed, we cannot tail call
846 if (CLI.CB) {
847 for (auto &Arg : CLI.CB->args()) {
848 Value *Val = Arg.get();
849 // Trace the value back through pointer operations
850 while (true) {
851 Value *Src = Val->stripPointerCastsAndAliases();
852 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
853 Src = GEP->getPointerOperand();
854 if (Val == Src)
855 break;
856 Val = Src;
857 }
858 if (isa<AllocaInst>(Val)) {
859 NoTail(
860 "WebAssembly does not support tail calling with stack arguments");
861 break;
862 }
863 }
864 }
865 }
866
867 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
868 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
869 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
870
871 // The generic code may have added an sret argument. If we're lowering an
872 // invoke function, the ABI requires that the function pointer be the first
873 // argument, so we may have to swap the arguments.
874 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
875 Outs[0].Flags.isSRet()) {
876 std::swap(Outs[0], Outs[1]);
877 std::swap(OutVals[0], OutVals[1]);
878 }
879
880 bool HasSwiftSelfArg = false;
881 bool HasSwiftErrorArg = false;
882 unsigned NumFixedArgs = 0;
883 for (unsigned I = 0; I < Outs.size(); ++I) {
884 const ISD::OutputArg &Out = Outs[I];
885 SDValue &OutVal = OutVals[I];
886 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
887 HasSwiftErrorArg |= Out.Flags.isSwiftError();
888 if (Out.Flags.isNest())
889 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
890 if (Out.Flags.isInAlloca())
891 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
892 if (Out.Flags.isInConsecutiveRegs())
893 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
894 if (Out.Flags.isInConsecutiveRegsLast())
895 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
896 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
897 auto &MFI = MF.getFrameInfo();
898 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
899 Out.Flags.getNonZeroByValAlign(),
900 /*isSS=*/false);
901 SDValue SizeNode =
902 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
903 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
904 Chain = DAG.getMemcpy(
905 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
906 /*isVolatile*/ false, /*AlwaysInline=*/false,
907 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
908 OutVal = FINode;
909 }
910 // Count the number of fixed args *after* legalization.
911 NumFixedArgs += Out.IsFixed;
912 }
913
914 bool IsVarArg = CLI.IsVarArg;
915 auto PtrVT = getPointerTy(Layout);
916
917 // For swiftcc, emit additional swiftself and swifterror arguments
918 // if there aren't. These additional arguments are also added for callee
919 // signature They are necessary to match callee and caller signature for
920 // indirect call.
921 if (CallConv == CallingConv::Swift) {
922 if (!HasSwiftSelfArg) {
923 NumFixedArgs++;
924 ISD::OutputArg Arg;
925 Arg.Flags.setSwiftSelf();
926 CLI.Outs.push_back(Arg);
927 SDValue ArgVal = DAG.getUNDEF(PtrVT);
928 CLI.OutVals.push_back(ArgVal);
929 }
930 if (!HasSwiftErrorArg) {
931 NumFixedArgs++;
932 ISD::OutputArg Arg;
933 Arg.Flags.setSwiftError();
934 CLI.Outs.push_back(Arg);
935 SDValue ArgVal = DAG.getUNDEF(PtrVT);
936 CLI.OutVals.push_back(ArgVal);
937 }
938 }
939
940 // Analyze operands of the call, assigning locations to each operand.
941 SmallVector<CCValAssign, 16> ArgLocs;
942 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
943
944 if (IsVarArg) {
945 // Outgoing non-fixed arguments are placed in a buffer. First
946 // compute their offsets and the total amount of buffer space needed.
947 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
948 const ISD::OutputArg &Out = Outs[I];
949 SDValue &Arg = OutVals[I];
950 EVT VT = Arg.getValueType();
951 assert(VT != MVT::iPTR && "Legalized args should be concrete")((VT != MVT::iPTR && "Legalized args should be concrete"
) ? static_cast<void> (0) : __assert_fail ("VT != MVT::iPTR && \"Legalized args should be concrete\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 951, __PRETTY_FUNCTION__))
;
952 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
953 Align Alignment =
954 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
955 unsigned Offset =
956 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
957 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
958 Offset, VT.getSimpleVT(),
959 CCValAssign::Full));
960 }
961 }
962
963 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
964
965 SDValue FINode;
966 if (IsVarArg && NumBytes) {
967 // For non-fixed arguments, next emit stores to store the argument values
968 // to the stack buffer at the offsets computed above.
969 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
970 Layout.getStackAlignment(),
971 /*isSS=*/false);
972 unsigned ValNo = 0;
973 SmallVector<SDValue, 8> Chains;
974 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
975 assert(ArgLocs[ValNo].getValNo() == ValNo &&((ArgLocs[ValNo].getValNo() == ValNo && "ArgLocs should remain in order and only hold varargs args"
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 976, __PRETTY_FUNCTION__))
976 "ArgLocs should remain in order and only hold varargs args")((ArgLocs[ValNo].getValNo() == ValNo && "ArgLocs should remain in order and only hold varargs args"
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 976, __PRETTY_FUNCTION__))
;
977 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
978 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
979 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
980 DAG.getConstant(Offset, DL, PtrVT));
981 Chains.push_back(
982 DAG.getStore(Chain, DL, Arg, Add,
983 MachinePointerInfo::getFixedStack(MF, FI, Offset)));
984 }
985 if (!Chains.empty())
986 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
987 } else if (IsVarArg) {
988 FINode = DAG.getIntPtrConstant(0, DL);
989 }
990
991 if (Callee->getOpcode() == ISD::GlobalAddress) {
992 // If the callee is a GlobalAddress node (quite common, every direct call
993 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
994 // doesn't at MO_GOT which is not needed for direct calls.
995 GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
996 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
997 getPointerTy(DAG.getDataLayout()),
998 GA->getOffset());
999 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1000 getPointerTy(DAG.getDataLayout()), Callee);
1001 }
1002
1003 // Compute the operands for the CALLn node.
1004 SmallVector<SDValue, 16> Ops;
1005 Ops.push_back(Chain);
1006 Ops.push_back(Callee);
1007
1008 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1009 // isn't reliable.
1010 Ops.append(OutVals.begin(),
1011 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1012 // Add a pointer to the vararg buffer.
1013 if (IsVarArg)
1014 Ops.push_back(FINode);
1015
1016 SmallVector<EVT, 8> InTys;
1017 for (const auto &In : Ins) {
1018 assert(!In.Flags.isByVal() && "byval is not valid for return values")((!In.Flags.isByVal() && "byval is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!In.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1018, __PRETTY_FUNCTION__))
;
1019 assert(!In.Flags.isNest() && "nest is not valid for return values")((!In.Flags.isNest() && "nest is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!In.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1019, __PRETTY_FUNCTION__))
;
1020 if (In.Flags.isInAlloca())
1021 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1022 if (In.Flags.isInConsecutiveRegs())
1023 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1024 if (In.Flags.isInConsecutiveRegsLast())
1025 fail(DL, DAG,
1026 "WebAssembly hasn't implemented cons regs last return values");
1027 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1028 // registers.
1029 InTys.push_back(In.VT);
1030 }
1031
1032 if (CLI.IsTailCall) {
1033 // ret_calls do not return values to the current frame
1034 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1035 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1036 }
1037
1038 InTys.push_back(MVT::Other);
1039 SDVTList InTyList = DAG.getVTList(InTys);
1040 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1041
1042 for (size_t I = 0; I < Ins.size(); ++I)
1043 InVals.push_back(Res.getValue(I));
1044
1045 // Return the chain
1046 return Res.getValue(Ins.size());
1047}
1048
1049bool WebAssemblyTargetLowering::CanLowerReturn(
1050 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1051 const SmallVectorImpl<ISD::OutputArg> &Outs,
1052 LLVMContext & /*Context*/) const {
1053 // WebAssembly can only handle returning tuples with multivalue enabled
1054 return Subtarget->hasMultivalue() || Outs.size() <= 1;
1055}
1056
1057SDValue WebAssemblyTargetLowering::LowerReturn(
1058 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1059 const SmallVectorImpl<ISD::OutputArg> &Outs,
1060 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1061 SelectionDAG &DAG) const {
1062 assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&(((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
"MVP WebAssembly can only return up to one value") ? static_cast
<void> (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1063, __PRETTY_FUNCTION__))
1063 "MVP WebAssembly can only return up to one value")(((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
"MVP WebAssembly can only return up to one value") ? static_cast
<void> (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1063, __PRETTY_FUNCTION__))
;
1064 if (!callingConvSupported(CallConv))
1065 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1066
1067 SmallVector<SDValue, 4> RetOps(1, Chain);
1068 RetOps.append(OutVals.begin(), OutVals.end());
1069 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1070
1071 // Record the number and types of the return values.
1072 for (const ISD::OutputArg &Out : Outs) {
1073 assert(!Out.Flags.isByVal() && "byval is not valid for return values")((!Out.Flags.isByVal() && "byval is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!Out.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1073, __PRETTY_FUNCTION__))
;
1074 assert(!Out.Flags.isNest() && "nest is not valid for return values")((!Out.Flags.isNest() && "nest is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!Out.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1074, __PRETTY_FUNCTION__))
;
1075 assert(Out.IsFixed && "non-fixed return value is not valid")((Out.IsFixed && "non-fixed return value is not valid"
) ? static_cast<void> (0) : __assert_fail ("Out.IsFixed && \"non-fixed return value is not valid\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1075, __PRETTY_FUNCTION__))
;
1076 if (Out.Flags.isInAlloca())
1077 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1078 if (Out.Flags.isInConsecutiveRegs())
1079 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1080 if (Out.Flags.isInConsecutiveRegsLast())
1081 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1082 }
1083
1084 return Chain;
1085}
1086
1087SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1088 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1089 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1090 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1091 if (!callingConvSupported(CallConv))
1092 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1093
1094 MachineFunction &MF = DAG.getMachineFunction();
1095 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1096
1097 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1098 // of the incoming values before they're represented by virtual registers.
1099 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1100
1101 bool HasSwiftErrorArg = false;
1102 bool HasSwiftSelfArg = false;
1103 for (const ISD::InputArg &In : Ins) {
1104 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1105 HasSwiftErrorArg |= In.Flags.isSwiftError();
1106 if (In.Flags.isInAlloca())
1107 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1108 if (In.Flags.isNest())
1109 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1110 if (In.Flags.isInConsecutiveRegs())
1111 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1112 if (In.Flags.isInConsecutiveRegsLast())
1113 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1114 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1115 // registers.
1116 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1117 DAG.getTargetConstant(InVals.size(),
1118 DL, MVT::i32))
1119 : DAG.getUNDEF(In.VT));
1120
1121 // Record the number and types of arguments.
1122 MFI->addParam(In.VT);
1123 }
1124
1125 // For swiftcc, emit additional swiftself and swifterror arguments
1126 // if there aren't. These additional arguments are also added for callee
1127 // signature They are necessary to match callee and caller signature for
1128 // indirect call.
1129 auto PtrVT = getPointerTy(MF.getDataLayout());
1130 if (CallConv == CallingConv::Swift) {
1131 if (!HasSwiftSelfArg) {
1132 MFI->addParam(PtrVT);
1133 }
1134 if (!HasSwiftErrorArg) {
1135 MFI->addParam(PtrVT);
1136 }
1137 }
1138 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1139 // the buffer is passed as an argument.
1140 if (IsVarArg) {
1141 MVT PtrVT = getPointerTy(MF.getDataLayout());
1142 Register VarargVreg =
1143 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
1144 MFI->setVarargBufferVreg(VarargVreg);
1145 Chain = DAG.getCopyToReg(
1146 Chain, DL, VarargVreg,
1147 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1148 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1149 MFI->addParam(PtrVT);
1150 }
1151
1152 // Record the number and types of arguments and results.
1153 SmallVector<MVT, 4> Params;
1154 SmallVector<MVT, 4> Results;
1155 computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
1156 MF.getFunction(), DAG.getTarget(), Params, Results);
1157 for (MVT VT : Results)
1158 MFI->addResult(VT);
1159 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1160 // the param logic here with ComputeSignatureVTs
1161 assert(MFI->getParams().size() == Params.size() &&((MFI->getParams().size() == Params.size() && std::
equal(MFI->getParams().begin(), MFI->getParams().end(),
Params.begin())) ? static_cast<void> (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1163, __PRETTY_FUNCTION__))
1162 std::equal(MFI->getParams().begin(), MFI->getParams().end(),((MFI->getParams().size() == Params.size() && std::
equal(MFI->getParams().begin(), MFI->getParams().end(),
Params.begin())) ? static_cast<void> (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1163, __PRETTY_FUNCTION__))
1163 Params.begin()))((MFI->getParams().size() == Params.size() && std::
equal(MFI->getParams().begin(), MFI->getParams().end(),
Params.begin())) ? static_cast<void> (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1163, __PRETTY_FUNCTION__))
;
1164
1165 return Chain;
1166}
1167
1168void WebAssemblyTargetLowering::ReplaceNodeResults(
1169 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1170 switch (N->getOpcode()) {
1171 case ISD::SIGN_EXTEND_INREG:
1172 // Do not add any results, signifying that N should not be custom lowered
1173 // after all. This happens because simd128 turns on custom lowering for
1174 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1175 // illegal type.
1176 break;
1177 default:
1178 llvm_unreachable(::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1179)
1179 "ReplaceNodeResults not implemented for this op for WebAssembly!")::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1179)
;
1180 }
1181}
1182
1183//===----------------------------------------------------------------------===//
1184// Custom lowering hooks.
1185//===----------------------------------------------------------------------===//
1186
1187SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1188 SelectionDAG &DAG) const {
1189 SDLoc DL(Op);
1190 switch (Op.getOpcode()) {
1
Control jumps to 'case BUILD_VECTOR:' at line 1227
1191 default:
1192 llvm_unreachable("unimplemented operation lowering")::llvm::llvm_unreachable_internal("unimplemented operation lowering"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1192)
;
1193 return SDValue();
1194 case ISD::FrameIndex:
1195 return LowerFrameIndex(Op, DAG);
1196 case ISD::GlobalAddress:
1197 return LowerGlobalAddress(Op, DAG);
1198 case ISD::GlobalTLSAddress:
1199 return LowerGlobalTLSAddress(Op, DAG);
1200 case ISD::ExternalSymbol:
1201 return LowerExternalSymbol(Op, DAG);
1202 case ISD::JumpTable:
1203 return LowerJumpTable(Op, DAG);
1204 case ISD::BR_JT:
1205 return LowerBR_JT(Op, DAG);
1206 case ISD::VASTART:
1207 return LowerVASTART(Op, DAG);
1208 case ISD::BlockAddress:
1209 case ISD::BRIND:
1210 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1211 return SDValue();
1212 case ISD::RETURNADDR:
1213 return LowerRETURNADDR(Op, DAG);
1214 case ISD::FRAMEADDR:
1215 return LowerFRAMEADDR(Op, DAG);
1216 case ISD::CopyToReg:
1217 return LowerCopyToReg(Op, DAG);
1218 case ISD::EXTRACT_VECTOR_ELT:
1219 case ISD::INSERT_VECTOR_ELT:
1220 return LowerAccessVectorElement(Op, DAG);
1221 case ISD::INTRINSIC_VOID:
1222 case ISD::INTRINSIC_WO_CHAIN:
1223 case ISD::INTRINSIC_W_CHAIN:
1224 return LowerIntrinsic(Op, DAG);
1225 case ISD::SIGN_EXTEND_INREG:
1226 return LowerSIGN_EXTEND_INREG(Op, DAG);
1227 case ISD::BUILD_VECTOR:
1228 return LowerBUILD_VECTOR(Op, DAG);
2
Calling 'WebAssemblyTargetLowering::LowerBUILD_VECTOR'
1229 case ISD::VECTOR_SHUFFLE:
1230 return LowerVECTOR_SHUFFLE(Op, DAG);
1231 case ISD::SETCC:
1232 return LowerSETCC(Op, DAG);
1233 case ISD::SHL:
1234 case ISD::SRA:
1235 case ISD::SRL:
1236 return LowerShift(Op, DAG);
1237 }
1238}
1239
1240SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1241 SelectionDAG &DAG) const {
1242 SDValue Src = Op.getOperand(2);
1243 if (isa<FrameIndexSDNode>(Src.getNode())) {
1244 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1245 // the FI to some LEA-like instruction, but since we don't have that, we
1246 // need to insert some kind of instruction that can take an FI operand and
1247 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1248 // local.copy between Op and its FI operand.
1249 SDValue Chain = Op.getOperand(0);
1250 SDLoc DL(Op);
1251 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1252 EVT VT = Src.getValueType();
1253 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1254 : WebAssembly::COPY_I64,
1255 DL, VT, Src),
1256 0);
1257 return Op.getNode()->getNumValues() == 1
1258 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1259 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1260 Op.getNumOperands() == 4 ? Op.getOperand(3)
1261 : SDValue());
1262 }
1263 return SDValue();
1264}
1265
1266SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1267 SelectionDAG &DAG) const {
1268 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1269 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1270}
1271
1272SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1273 SelectionDAG &DAG) const {
1274 SDLoc DL(Op);
1275
1276 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1277 fail(DL, DAG,
1278 "Non-Emscripten WebAssembly hasn't implemented "
1279 "__builtin_return_address");
1280 return SDValue();
1281 }
1282
1283 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1284 return SDValue();
1285
1286 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1287 MakeLibCallOptions CallOptions;
1288 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1289 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1290 .first;
1291}
1292
1293SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1294 SelectionDAG &DAG) const {
1295 // Non-zero depths are not supported by WebAssembly currently. Use the
1296 // legalizer's default expansion, which is to return 0 (what this function is
1297 // documented to do).
1298 if (Op.getConstantOperandVal(0) > 0)
1299 return SDValue();
1300
1301 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1302 EVT VT = Op.getValueType();
1303 Register FP =
1304 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1305 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1306}
1307
1308SDValue
1309WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1310 SelectionDAG &DAG) const {
1311 SDLoc DL(Op);
1312 const auto *GA = cast<GlobalAddressSDNode>(Op);
1313 MVT PtrVT = getPointerTy(DAG.getDataLayout());
1314
1315 MachineFunction &MF = DAG.getMachineFunction();
1316 if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
1317 report_fatal_error("cannot use thread-local storage without bulk memory",
1318 false);
1319
1320 const GlobalValue *GV = GA->getGlobal();
1321
1322 // Currently Emscripten does not support dynamic linking with threads.
1323 // Therefore, if we have thread-local storage, only the local-exec model
1324 // is possible.
1325 // TODO: remove this and implement proper TLS models once Emscripten
1326 // supports dynamic linking with threads.
1327 if (GV->getThreadLocalMode() != GlobalValue::LocalExecTLSModel &&
1328 !Subtarget->getTargetTriple().isOSEmscripten()) {
1329 report_fatal_error("only -ftls-model=local-exec is supported for now on "
1330 "non-Emscripten OSes: variable " +
1331 GV->getName(),
1332 false);
1333 }
1334
1335 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1336 : WebAssembly::GLOBAL_GET_I32;
1337 const char *BaseName = MF.createExternalSymbolName("__tls_base");
1338
1339 SDValue BaseAddr(
1340 DAG.getMachineNode(GlobalGet, DL, PtrVT,
1341 DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1342 0);
1343
1344 SDValue TLSOffset = DAG.getTargetGlobalAddress(
1345 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1346 SDValue SymAddr = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, TLSOffset);
1347
1348 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
1349}
1350
1351SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1352 SelectionDAG &DAG) const {
1353 SDLoc DL(Op);
1354 const auto *GA = cast<GlobalAddressSDNode>(Op);
1355 EVT VT = Op.getValueType();
1356 assert(GA->getTargetFlags() == 0 &&((GA->getTargetFlags() == 0 && "Unexpected target flags on generic GlobalAddressSDNode"
) ? static_cast<void> (0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1357, __PRETTY_FUNCTION__))
1357 "Unexpected target flags on generic GlobalAddressSDNode")((GA->getTargetFlags() == 0 && "Unexpected target flags on generic GlobalAddressSDNode"
) ? static_cast<void> (0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1357, __PRETTY_FUNCTION__))
;
1358 if (GA->getAddressSpace() != 0)
1359 fail(DL, DAG, "WebAssembly only expects the 0 address space");
1360
1361 unsigned OperandFlags = 0;
1362 if (isPositionIndependent()) {
1363 const GlobalValue *GV = GA->getGlobal();
1364 if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1365 MachineFunction &MF = DAG.getMachineFunction();
1366 MVT PtrVT = getPointerTy(MF.getDataLayout());
1367 const char *BaseName;
1368 if (GV->getValueType()->isFunctionTy()) {
1369 BaseName = MF.createExternalSymbolName("__table_base");
1370 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1371 }
1372 else {
1373 BaseName = MF.createExternalSymbolName("__memory_base");
1374 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1375 }
1376 SDValue BaseAddr =
1377 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1378 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1379
1380 SDValue SymAddr = DAG.getNode(
1381 WebAssemblyISD::WrapperPIC, DL, VT,
1382 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1383 OperandFlags));
1384
1385 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1386 } else {
1387 OperandFlags = WebAssemblyII::MO_GOT;
1388 }
1389 }
1390
1391 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1392 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1393 GA->getOffset(), OperandFlags));
1394}
1395
1396SDValue
1397WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1398 SelectionDAG &DAG) const {
1399 SDLoc DL(Op);
1400 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1401 EVT VT = Op.getValueType();
1402 assert(ES->getTargetFlags() == 0 &&((ES->getTargetFlags() == 0 && "Unexpected target flags on generic ExternalSymbolSDNode"
) ? static_cast<void> (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1403, __PRETTY_FUNCTION__))
1403 "Unexpected target flags on generic ExternalSymbolSDNode")((ES->getTargetFlags() == 0 && "Unexpected target flags on generic ExternalSymbolSDNode"
) ? static_cast<void> (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1403, __PRETTY_FUNCTION__))
;
1404 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1405 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1406}
1407
1408SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1409 SelectionDAG &DAG) const {
1410 // There's no need for a Wrapper node because we always incorporate a jump
1411 // table operand into a BR_TABLE instruction, rather than ever
1412 // materializing it in a register.
1413 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1414 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1415 JT->getTargetFlags());
1416}
1417
1418SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1419 SelectionDAG &DAG) const {
1420 SDLoc DL(Op);
1421 SDValue Chain = Op.getOperand(0);
1422 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1423 SDValue Index = Op.getOperand(2);
1424 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags")((JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags"
) ? static_cast<void> (0) : __assert_fail ("JT->getTargetFlags() == 0 && \"WebAssembly doesn't set target flags\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1424, __PRETTY_FUNCTION__))
;
1425
1426 SmallVector<SDValue, 8> Ops;
1427 Ops.push_back(Chain);
1428 Ops.push_back(Index);
1429
1430 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1431 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1432
1433 // Add an operand for each case.
1434 for (auto MBB : MBBs)
1435 Ops.push_back(DAG.getBasicBlock(MBB));
1436
1437 // Add the first MBB as a dummy default target for now. This will be replaced
1438 // with the proper default target (and the preceding range check eliminated)
1439 // if possible by WebAssemblyFixBrTableDefaults.
1440 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1441 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1442}
1443
1444SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1445 SelectionDAG &DAG) const {
1446 SDLoc DL(Op);
1447 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1448
1449 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1450 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1451
1452 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1453 MFI->getVarargBufferVreg(), PtrVT);
1454 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1455 MachinePointerInfo(SV));
1456}
1457
1458static SDValue getCppExceptionSymNode(SDValue Op, unsigned TagIndex,
1459 SelectionDAG &DAG) {
1460 // We only support C++ exceptions for now
1461 int Tag =
1462 cast<ConstantSDNode>(Op.getOperand(TagIndex).getNode())->getZExtValue();
1463 if (Tag != WebAssembly::CPP_EXCEPTION)
1464 llvm_unreachable("Invalid tag: We only support C++ exceptions for now")::llvm::llvm_unreachable_internal("Invalid tag: We only support C++ exceptions for now"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1464)
;
1465 auto &MF = DAG.getMachineFunction();
1466 const auto &TLI = DAG.getTargetLoweringInfo();
1467 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1468 const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1469 return DAG.getNode(WebAssemblyISD::Wrapper, SDLoc(Op), PtrVT,
1470 DAG.getTargetExternalSymbol(SymName, PtrVT));
1471}
1472
1473SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1474 SelectionDAG &DAG) const {
1475 MachineFunction &MF = DAG.getMachineFunction();
1476 unsigned IntNo;
1477 switch (Op.getOpcode()) {
1478 case ISD::INTRINSIC_VOID:
1479 case ISD::INTRINSIC_W_CHAIN:
1480 IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1481 break;
1482 case ISD::INTRINSIC_WO_CHAIN:
1483 IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1484 break;
1485 default:
1486 llvm_unreachable("Invalid intrinsic")::llvm::llvm_unreachable_internal("Invalid intrinsic", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1486)
;
1487 }
1488 SDLoc DL(Op);
1489
1490 switch (IntNo) {
1491 default:
1492 return SDValue(); // Don't custom lower most intrinsics.
1493
1494 case Intrinsic::wasm_lsda: {
1495 EVT VT = Op.getValueType();
1496 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1497 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1498 auto &Context = MF.getMMI().getContext();
1499 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1500 Twine(MF.getFunctionNumber()));
1501 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1502 DAG.getMCSymbol(S, PtrVT));
1503 }
1504
1505 case Intrinsic::wasm_throw: {
1506 SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1507 return DAG.getNode(WebAssemblyISD::THROW, DL,
1508 MVT::Other, // outchain type
1509 {
1510 Op.getOperand(0), // inchain
1511 SymNode, // exception symbol
1512 Op.getOperand(3) // thrown value
1513 });
1514 }
1515
1516 case Intrinsic::wasm_catch: {
1517 SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1518 return DAG.getNode(WebAssemblyISD::CATCH, DL,
1519 {
1520 MVT::i32, // outchain type
1521 MVT::Other // return value
1522 },
1523 {
1524 Op.getOperand(0), // inchain
1525 SymNode // exception symbol
1526 });
1527 }
1528
1529 case Intrinsic::wasm_shuffle: {
1530 // Drop in-chain and replace undefs, but otherwise pass through unchanged
1531 SDValue Ops[18];
1532 size_t OpIdx = 0;
1533 Ops[OpIdx++] = Op.getOperand(1);
1534 Ops[OpIdx++] = Op.getOperand(2);
1535 while (OpIdx < 18) {
1536 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1537 if (MaskIdx.isUndef() ||
1538 cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
1539 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32);
1540 } else {
1541 Ops[OpIdx++] = MaskIdx;
1542 }
1543 }
1544 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1545 }
1546 }
1547}
1548
1549SDValue
1550WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1551 SelectionDAG &DAG) const {
1552 SDLoc DL(Op);
1553 // If sign extension operations are disabled, allow sext_inreg only if operand
1554 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1555 // extension operations, but allowing sext_inreg in this context lets us have
1556 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1557 // everywhere would be simpler in this file, but would necessitate large and
1558 // brittle patterns to undo the expansion and select extract_lane_s
1559 // instructions.
1560 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128())((!Subtarget->hasSignExt() && Subtarget->hasSIMD128
()) ? static_cast<void> (0) : __assert_fail ("!Subtarget->hasSignExt() && Subtarget->hasSIMD128()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1560, __PRETTY_FUNCTION__))
;
1561 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1562 return SDValue();
1563
1564 const SDValue &Extract = Op.getOperand(0);
1565 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1566 if (VecT.getVectorElementType().getSizeInBits() > 32)
1567 return SDValue();
1568 MVT ExtractedLaneT =
1569 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1570 MVT ExtractedVecT =
1571 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1572 if (ExtractedVecT == VecT)
1573 return Op;
1574
1575 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1576 const SDNode *Index = Extract.getOperand(1).getNode();
1577 if (!isa<ConstantSDNode>(Index))
1578 return SDValue();
1579 unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
1580 unsigned Scale =
1581 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1582 assert(Scale > 1)((Scale > 1) ? static_cast<void> (0) : __assert_fail
("Scale > 1", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1582, __PRETTY_FUNCTION__))
;
1583 SDValue NewIndex =
1584 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1585 SDValue NewExtract = DAG.getNode(
1586 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1587 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1588 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1589 Op.getOperand(1));
1590}
1591
1592SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1593 SelectionDAG &DAG) const {
1594 SDLoc DL(Op);
1595 const EVT VecT = Op.getValueType();
1596 const EVT LaneT = Op.getOperand(0).getValueType();
1597 const size_t Lanes = Op.getNumOperands();
1598 bool CanSwizzle = VecT == MVT::v16i8;
1599
1600 // BUILD_VECTORs are lowered to the instruction that initializes the highest
1601 // possible number of lanes at once followed by a sequence of replace_lane
1602 // instructions to individually initialize any remaining lanes.
1603
1604 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
1605 // swizzled lanes should be given greater weight.
1606
1607 // TODO: Investigate looping rather than always extracting/replacing specific
1608 // lanes to fill gaps.
1609
1610 auto IsConstant = [](const SDValue &V) {
1611 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1612 };
1613
1614 // Returns the source vector and index vector pair if they exist. Checks for:
1615 // (extract_vector_elt
1616 // $src,
1617 // (sign_extend_inreg (extract_vector_elt $indices, $i))
1618 // )
1619 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
1620 auto Bail = std::make_pair(SDValue(), SDValue());
1621 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1622 return Bail;
1623 const SDValue &SwizzleSrc = Lane->getOperand(0);
1624 const SDValue &IndexExt = Lane->getOperand(1);
1625 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
1626 return Bail;
1627 const SDValue &Index = IndexExt->getOperand(0);
1628 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1629 return Bail;
1630 const SDValue &SwizzleIndices = Index->getOperand(0);
1631 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
1632 SwizzleIndices.getValueType() != MVT::v16i8 ||
1633 Index->getOperand(1)->getOpcode() != ISD::Constant ||
1634 Index->getConstantOperandVal(1) != I)
1635 return Bail;
1636 return std::make_pair(SwizzleSrc, SwizzleIndices);
1637 };
1638
1639 // If the lane is extracted from another vector at a constant index, return
1640 // that vector. The source vector must not have more lanes than the dest
1641 // because the shufflevector indices are in terms of the destination lanes and
1642 // would not be able to address the smaller individual source lanes.
1643 auto GetShuffleSrc = [&](const SDValue &Lane) {
1644 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1645 return SDValue();
1646 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
1647 return SDValue();
1648 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
1649 VecT.getVectorNumElements())
1650 return SDValue();
1651 return Lane->getOperand(0);
1652 };
1653
1654 using ValueEntry = std::pair<SDValue, size_t>;
1655 SmallVector<ValueEntry, 16> SplatValueCounts;
1656
1657 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
1658 SmallVector<SwizzleEntry, 16> SwizzleCounts;
1659
1660 using ShuffleEntry = std::pair<SDValue, size_t>;
1661 SmallVector<ShuffleEntry, 16> ShuffleCounts;
1662
1663 auto AddCount = [](auto &Counts, const auto &Val) {
1664 auto CountIt =
1665 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
1666 if (CountIt == Counts.end()) {
1667 Counts.emplace_back(Val, 1);
1668 } else {
1669 CountIt->second++;
1670 }
1671 };
1672
1673 auto GetMostCommon = [](auto &Counts) {
1674 auto CommonIt =
1675 std::max_element(Counts.begin(), Counts.end(),
1676 [](auto A, auto B) { return A.second < B.second; });
1677 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector")((CommonIt != Counts.end() && "Unexpected all-undef build_vector"
) ? static_cast<void> (0) : __assert_fail ("CommonIt != Counts.end() && \"Unexpected all-undef build_vector\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1677, __PRETTY_FUNCTION__))
;
1678 return *CommonIt;
1679 };
1680
1681 size_t NumConstantLanes = 0;
1682
1683 // Count eligible lanes for each type of vector creation op
1684 for (size_t I = 0; I
2.1
'I' is < 'Lanes'
2.1
'I' is < 'Lanes'
2.1
'I' is < 'Lanes'
< Lanes
; ++I) {
3
Loop condition is true. Entering loop body
9
Assuming 'I' is >= 'Lanes'
10
Loop condition is false. Execution continues on line 1702
1685 const SDValue &Lane = Op->getOperand(I);
1686 if (Lane.isUndef())
4
Taking false branch
1687 continue;
1688
1689 AddCount(SplatValueCounts, Lane);
1690
1691 if (IsConstant(Lane))
5
Taking false branch
1692 NumConstantLanes++;
1693 if (auto ShuffleSrc = GetShuffleSrc(Lane))
6
Taking false branch
1694 AddCount(ShuffleCounts, ShuffleSrc);
1695 if (CanSwizzle
6.1
'CanSwizzle' is true
6.1
'CanSwizzle' is true
6.1
'CanSwizzle' is true
) {
7
Taking true branch
1696 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
1697 if (SwizzleSrcs.first)
8
Taking true branch
1698 AddCount(SwizzleCounts, SwizzleSrcs);
1699 }
1700 }
1701
1702 SDValue SplatValue;
1703 size_t NumSplatLanes;
1704 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
1705
1706 SDValue SwizzleSrc;
1707 SDValue SwizzleIndices;
1708 size_t NumSwizzleLanes = 0;
1709 if (SwizzleCounts.size())
11
Assuming the condition is false
12
Taking false branch
1710 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
1711 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
1712
1713 // Shuffles can draw from up to two vectors, so find the two most common
1714 // sources.
1715 SDValue ShuffleSrc1, ShuffleSrc2;
1716 size_t NumShuffleLanes = 0;
1717 if (ShuffleCounts.size()) {
13
Assuming the condition is false
14
Taking false branch
1718 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
1719 ShuffleCounts.erase(std::remove_if(ShuffleCounts.begin(),
1720 ShuffleCounts.end(),
1721 [&](const auto &Pair) {
1722 return Pair.first == ShuffleSrc1;
1723 }),
1724 ShuffleCounts.end());
1725 }
1726 if (ShuffleCounts.size()) {
15
Taking false branch
1727 size_t AdditionalShuffleLanes;
1728 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
1729 GetMostCommon(ShuffleCounts);
1730 NumShuffleLanes += AdditionalShuffleLanes;
1731 }
1732
1733 // Predicate returning true if the lane is properly initialized by the
1734 // original instruction
1735 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
1736 SDValue Result;
1737 // Prefer swizzles over shuffles over vector consts over splats
1738 if (NumSwizzleLanes
15.1
'NumSwizzleLanes' is >= 'NumShuffleLanes'
15.1
'NumSwizzleLanes' is >= 'NumShuffleLanes'
15.1
'NumSwizzleLanes' is >= 'NumShuffleLanes'
>= NumShuffleLanes &&
17
Taking false branch
1739 NumSwizzleLanes
15.2
'NumSwizzleLanes' is >= 'NumConstantLanes'
15.2
'NumSwizzleLanes' is >= 'NumConstantLanes'
15.2
'NumSwizzleLanes' is >= 'NumConstantLanes'
>= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
16
Assuming 'NumSwizzleLanes' is < 'NumSplatLanes'
1740 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
1741 SwizzleIndices);
1742 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
1743 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
1744 return Swizzled == GetSwizzleSrcs(I, Lane);
1745 };
1746 } else if (NumShuffleLanes
17.1
'NumShuffleLanes' is >= 'NumConstantLanes'
17.1
'NumShuffleLanes' is >= 'NumConstantLanes'
17.1
'NumShuffleLanes' is >= 'NumConstantLanes'
>= NumConstantLanes &&
18
Taking false branch
1747 NumShuffleLanes
17.2
'NumShuffleLanes' is < 'NumSplatLanes'
17.2
'NumShuffleLanes' is < 'NumSplatLanes'
17.2
'NumShuffleLanes' is < 'NumSplatLanes'
>= NumSplatLanes) {
1748 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
1749 size_t DestLaneCount = VecT.getVectorNumElements();
1750 size_t Scale1 = 1;
1751 size_t Scale2 = 1;
1752 SDValue Src1 = ShuffleSrc1;
1753 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
1754 if (Src1.getValueType() != VecT) {
1755 size_t LaneSize =
1756 Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1757 assert(LaneSize > DestLaneSize)((LaneSize > DestLaneSize) ? static_cast<void> (0) :
__assert_fail ("LaneSize > DestLaneSize", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1757, __PRETTY_FUNCTION__))
;
1758 Scale1 = LaneSize / DestLaneSize;
1759 Src1 = DAG.getBitcast(VecT, Src1);
1760 }
1761 if (Src2.getValueType() != VecT) {
1762 size_t LaneSize =
1763 Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
1764 assert(LaneSize > DestLaneSize)((LaneSize > DestLaneSize) ? static_cast<void> (0) :
__assert_fail ("LaneSize > DestLaneSize", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1764, __PRETTY_FUNCTION__))
;
1765 Scale2 = LaneSize / DestLaneSize;
1766 Src2 = DAG.getBitcast(VecT, Src2);
1767 }
1768
1769 int Mask[16];
1770 assert(DestLaneCount <= 16)((DestLaneCount <= 16) ? static_cast<void> (0) : __assert_fail
("DestLaneCount <= 16", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1770, __PRETTY_FUNCTION__))
;
1771 for (size_t I = 0; I < DestLaneCount; ++I) {
1772 const SDValue &Lane = Op->getOperand(I);
1773 SDValue Src = GetShuffleSrc(Lane);
1774 if (Src == ShuffleSrc1) {
1775 Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
1776 } else if (Src && Src == ShuffleSrc2) {
1777 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
1778 } else {
1779 Mask[I] = -1;
1780 }
1781 }
1782 ArrayRef<int> MaskRef(Mask, DestLaneCount);
1783 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
1784 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
1785 auto Src = GetShuffleSrc(Lane);
1786 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
1787 };
1788 } else if (NumConstantLanes
18.1
'NumConstantLanes' is < 'NumSplatLanes'
18.1
'NumConstantLanes' is < 'NumSplatLanes'
18.1
'NumConstantLanes' is < 'NumSplatLanes'
>= NumSplatLanes) {
19
Taking false branch
1789 SmallVector<SDValue, 16> ConstLanes;
1790 for (const SDValue &Lane : Op->op_values()) {
1791 if (IsConstant(Lane)) {
1792 ConstLanes.push_back(Lane);
1793 } else if (LaneT.isFloatingPoint()) {
1794 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1795 } else {
1796 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1797 }
1798 }
1799 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1800 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
1801 return IsConstant(Lane);
1802 };
1803 } else {
1804 // Use a splat, but possibly a load_splat
1805 LoadSDNode *SplattedLoad;
1806 if ((SplattedLoad = dyn_cast<LoadSDNode>(SplatValue)) &&
20
Assuming 'SplattedLoad' is null
21
Assuming pointer value is null
22
Taking false branch
1807 SplattedLoad->getMemoryVT() == VecT.getVectorElementType()) {
1808 Result = DAG.getMemIntrinsicNode(
1809 WebAssemblyISD::LOAD_SPLAT, DL, DAG.getVTList(VecT),
1810 {SplattedLoad->getChain(), SplattedLoad->getBasePtr(),
1811 SplattedLoad->getOffset()},
1812 SplattedLoad->getMemoryVT(), SplattedLoad->getMemOperand());
1813 } else {
1814 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
23
Value assigned to 'Op.Node'
24
Calling 'SelectionDAG::getSplatBuildVector'
1815 }
1816 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
1817 return Lane == SplatValue;
1818 };
1819 }
1820
1821 assert(Result)((Result) ? static_cast<void> (0) : __assert_fail ("Result"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1821, __PRETTY_FUNCTION__))
;
1822 assert(IsLaneConstructed)((IsLaneConstructed) ? static_cast<void> (0) : __assert_fail
("IsLaneConstructed", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1822, __PRETTY_FUNCTION__))
;
1823
1824 // Add replace_lane instructions for any unhandled values
1825 for (size_t I = 0; I < Lanes; ++I) {
1826 const SDValue &Lane = Op->getOperand(I);
1827 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
1828 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1829 DAG.getConstant(I, DL, MVT::i32));
1830 }
1831
1832 return Result;
1833}
1834
1835SDValue
1836WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1837 SelectionDAG &DAG) const {
1838 SDLoc DL(Op);
1839 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1840 MVT VecType = Op.getOperand(0).getSimpleValueType();
1841 assert(VecType.is128BitVector() && "Unexpected shuffle vector type")((VecType.is128BitVector() && "Unexpected shuffle vector type"
) ? static_cast<void> (0) : __assert_fail ("VecType.is128BitVector() && \"Unexpected shuffle vector type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1841, __PRETTY_FUNCTION__))
;
1842 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1843
1844 // Space for two vector args and sixteen mask indices
1845 SDValue Ops[18];
1846 size_t OpIdx = 0;
1847 Ops[OpIdx++] = Op.getOperand(0);
1848 Ops[OpIdx++] = Op.getOperand(1);
1849
1850 // Expand mask indices to byte indices and materialize them as operands
1851 for (int M : Mask) {
1852 for (size_t J = 0; J < LaneBytes; ++J) {
1853 // Lower undefs (represented by -1 in mask) to zero
1854 uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1855 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1856 }
1857 }
1858
1859 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1860}
1861
1862SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
1863 SelectionDAG &DAG) const {
1864 SDLoc DL(Op);
1865 // The legalizer does not know how to expand the unsupported comparison modes
1866 // of i64x2 vectors, so we manually unroll them here.
1867 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64)((Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64
) ? static_cast<void> (0) : __assert_fail ("Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1867, __PRETTY_FUNCTION__))
;
1868 SmallVector<SDValue, 2> LHS, RHS;
1869 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
1870 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
1871 const SDValue &CC = Op->getOperand(2);
1872 auto MakeLane = [&](unsigned I) {
1873 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
1874 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
1875 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
1876 };
1877 return DAG.getBuildVector(Op->getValueType(0), DL,
1878 {MakeLane(0), MakeLane(1)});
1879}
1880
1881SDValue
1882WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1883 SelectionDAG &DAG) const {
1884 // Allow constant lane indices, expand variable lane indices
1885 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1886 if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1887 return Op;
1888 else
1889 // Perform default expansion
1890 return SDValue();
1891}
1892
1893static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
1894 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
1895 // 32-bit and 64-bit unrolled shifts will have proper semantics
1896 if (LaneT.bitsGE(MVT::i32))
1897 return DAG.UnrollVectorOp(Op.getNode());
1898 // Otherwise mask the shift value to get proper semantics from 32-bit shift
1899 SDLoc DL(Op);
1900 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
1901 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
1902 unsigned ShiftOpcode = Op.getOpcode();
1903 SmallVector<SDValue, 16> ShiftedElements;
1904 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
1905 SmallVector<SDValue, 16> ShiftElements;
1906 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
1907 SmallVector<SDValue, 16> UnrolledOps;
1908 for (size_t i = 0; i < NumLanes; ++i) {
1909 SDValue MaskedShiftValue =
1910 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
1911 SDValue ShiftedValue = ShiftedElements[i];
1912 if (ShiftOpcode == ISD::SRA)
1913 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
1914 ShiftedValue, DAG.getValueType(LaneT));
1915 UnrolledOps.push_back(
1916 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
1917 }
1918 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
1919}
1920
1921SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1922 SelectionDAG &DAG) const {
1923 SDLoc DL(Op);
1924
1925 // Only manually lower vector shifts
1926 assert(Op.getSimpleValueType().isVector())((Op.getSimpleValueType().isVector()) ? static_cast<void>
(0) : __assert_fail ("Op.getSimpleValueType().isVector()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1926, __PRETTY_FUNCTION__))
;
1927
1928 auto ShiftVal = DAG.getSplatValue(Op.getOperand(1));
1929 if (!ShiftVal)
1930 return unrollVectorShift(Op, DAG);
1931
1932 // Use anyext because none of the high bits can affect the shift
1933 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
1934
1935 unsigned Opcode;
1936 switch (Op.getOpcode()) {
1937 case ISD::SHL:
1938 Opcode = WebAssemblyISD::VEC_SHL;
1939 break;
1940 case ISD::SRA:
1941 Opcode = WebAssemblyISD::VEC_SHR_S;
1942 break;
1943 case ISD::SRL:
1944 Opcode = WebAssemblyISD::VEC_SHR_U;
1945 break;
1946 default:
1947 llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1947)
;
1948 }
1949
1950 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
1951}
1952
1953//===----------------------------------------------------------------------===//
1954// Custom DAG combine hooks
1955//===----------------------------------------------------------------------===//
1956static SDValue
1957performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
1958 auto &DAG = DCI.DAG;
1959 auto Shuffle = cast<ShuffleVectorSDNode>(N);
1960
1961 // Hoist vector bitcasts that don't change the number of lanes out of unary
1962 // shuffles, where they are less likely to get in the way of other combines.
1963 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
1964 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
1965 SDValue Bitcast = N->getOperand(0);
1966 if (Bitcast.getOpcode() != ISD::BITCAST)
1967 return SDValue();
1968 if (!N->getOperand(1).isUndef())
1969 return SDValue();
1970 SDValue CastOp = Bitcast.getOperand(0);
1971 MVT SrcType = CastOp.getSimpleValueType();
1972 MVT DstType = Bitcast.getSimpleValueType();
1973 if (!SrcType.is128BitVector() ||
1974 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
1975 return SDValue();
1976 SDValue NewShuffle = DAG.getVectorShuffle(
1977 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
1978 return DAG.getBitcast(DstType, NewShuffle);
1979}
1980
1981static SDValue
1982performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
1983 auto &DAG = DCI.DAG;
1984 assert(N->getOpcode() == ISD::SIGN_EXTEND ||((N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() ==
ISD::ZERO_EXTEND) ? static_cast<void> (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1985, __PRETTY_FUNCTION__))
1985 N->getOpcode() == ISD::ZERO_EXTEND)((N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() ==
ISD::ZERO_EXTEND) ? static_cast<void> (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1985, __PRETTY_FUNCTION__))
;
1986
1987 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
1988 // possible before the extract_subvector can be expanded.
1989 auto Extract = N->getOperand(0);
1990 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
1991 return SDValue();
1992 auto Source = Extract.getOperand(0);
1993 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
1994 if (IndexNode == nullptr)
1995 return SDValue();
1996 auto Index = IndexNode->getZExtValue();
1997
1998 // Only v8i8 and v4i16 extracts can be widened, and only if the extracted
1999 // subvector is the low or high half of its source.
2000 EVT ResVT = N->getValueType(0);
2001 if (ResVT == MVT::v8i16) {
2002 if (Extract.getValueType() != MVT::v8i8 ||
2003 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2004 return SDValue();
2005 } else if (ResVT == MVT::v4i32) {
2006 if (Extract.getValueType() != MVT::v4i16 ||
2007 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2008 return SDValue();
2009 } else {
2010 return SDValue();
2011 }
2012
2013 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
2014 bool IsLow = Index == 0;
2015
2016 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2017 : WebAssemblyISD::EXTEND_HIGH_S)
2018 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2019 : WebAssemblyISD::EXTEND_HIGH_U);
2020
2021 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2022}
2023
2024SDValue
2025WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
2026 DAGCombinerInfo &DCI) const {
2027 switch (N->getOpcode()) {
2028 default:
2029 return SDValue();
2030 case ISD::VECTOR_SHUFFLE:
2031 return performVECTOR_SHUFFLECombine(N, DCI);
2032 case ISD::SIGN_EXTEND:
2033 case ISD::ZERO_EXTEND:
2034 return performVectorExtendCombine(N, DCI);
2035 }
2036}

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h

1//===- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SelectionDAG class, and transitively defines the
10// SDNode class and subclasses.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CODEGEN_SELECTIONDAG_H
15#define LLVM_CODEGEN_SELECTIONDAG_H
16
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
21#include "llvm/ADT/DenseSet.h"
22#include "llvm/ADT/FoldingSet.h"
23#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/StringMap.h"
26#include "llvm/ADT/ilist.h"
27#include "llvm/ADT/iterator.h"
28#include "llvm/ADT/iterator_range.h"
29#include "llvm/CodeGen/DAGCombine.h"
30#include "llvm/CodeGen/ISDOpcodes.h"
31#include "llvm/CodeGen/MachineFunction.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/SelectionDAGNodes.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/DebugLoc.h"
36#include "llvm/IR/Instructions.h"
37#include "llvm/IR/Metadata.h"
38#include "llvm/Support/Allocator.h"
39#include "llvm/Support/ArrayRecycler.h"
40#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/CodeGen.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MachineValueType.h"
45#include "llvm/Support/RecyclingAllocator.h"
46#include <algorithm>
47#include <cassert>
48#include <cstdint>
49#include <functional>
50#include <map>
51#include <string>
52#include <tuple>
53#include <utility>
54#include <vector>
55
56namespace llvm {
57
58class AAResults;
59class BlockAddress;
60class BlockFrequencyInfo;
61class Constant;
62class ConstantFP;
63class ConstantInt;
64class DataLayout;
65struct fltSemantics;
66class FunctionLoweringInfo;
67class GlobalValue;
68struct KnownBits;
69class LegacyDivergenceAnalysis;
70class LLVMContext;
71class MachineBasicBlock;
72class MachineConstantPoolValue;
73class MCSymbol;
74class OptimizationRemarkEmitter;
75class ProfileSummaryInfo;
76class SDDbgValue;
77class SDDbgOperand;
78class SDDbgLabel;
79class SelectionDAG;
80class SelectionDAGTargetInfo;
81class TargetLibraryInfo;
82class TargetLowering;
83class TargetMachine;
84class TargetSubtargetInfo;
85class Value;
86
87class SDVTListNode : public FoldingSetNode {
88 friend struct FoldingSetTrait<SDVTListNode>;
89
90 /// A reference to an Interned FoldingSetNodeID for this node.
91 /// The Allocator in SelectionDAG holds the data.
92 /// SDVTList contains all types which are frequently accessed in SelectionDAG.
93 /// The size of this list is not expected to be big so it won't introduce
94 /// a memory penalty.
95 FoldingSetNodeIDRef FastID;
96 const EVT *VTs;
97 unsigned int NumVTs;
98 /// The hash value for SDVTList is fixed, so cache it to avoid
99 /// hash calculation.
100 unsigned HashValue;
101
102public:
103 SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
104 FastID(ID), VTs(VT), NumVTs(Num) {
105 HashValue = ID.ComputeHash();
106 }
107
108 SDVTList getSDVTList() {
109 SDVTList result = {VTs, NumVTs};
110 return result;
111 }
112};
113
114/// Specialize FoldingSetTrait for SDVTListNode
115/// to avoid computing temp FoldingSetNodeID and hash value.
116template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTListNode> {
117 static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
118 ID = X.FastID;
119 }
120
121 static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
122 unsigned IDHash, FoldingSetNodeID &TempID) {
123 if (X.HashValue != IDHash)
124 return false;
125 return ID == X.FastID;
126 }
127
128 static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
129 return X.HashValue;
130 }
131};
132
133template <> struct ilist_alloc_traits<SDNode> {
134 static void deleteNode(SDNode *) {
135 llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!")::llvm::llvm_unreachable_internal("ilist_traits<SDNode> shouldn't see a deleteNode call!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 135)
;
136 }
137};
138
139/// Keeps track of dbg_value information through SDISel. We do
140/// not build SDNodes for these so as not to perturb the generated code;
141/// instead the info is kept off to the side in this structure. Each SDNode may
142/// have one or more associated dbg_value entries. This information is kept in
143/// DbgValMap.
144/// Byval parameters are handled separately because they don't use alloca's,
145/// which busts the normal mechanism. There is good reason for handling all
146/// parameters separately: they may not have code generated for them, they
147/// should always go at the beginning of the function regardless of other code
148/// motion, and debug info for them is potentially useful even if the parameter
149/// is unused. Right now only byval parameters are handled separately.
150class SDDbgInfo {
151 BumpPtrAllocator Alloc;
152 SmallVector<SDDbgValue*, 32> DbgValues;
153 SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
154 SmallVector<SDDbgLabel*, 4> DbgLabels;
155 using DbgValMapType = DenseMap<const SDNode *, SmallVector<SDDbgValue *, 2>>;
156 DbgValMapType DbgValMap;
157
158public:
159 SDDbgInfo() = default;
160 SDDbgInfo(const SDDbgInfo &) = delete;
161 SDDbgInfo &operator=(const SDDbgInfo &) = delete;
162
163 void add(SDDbgValue *V, bool isParameter);
164
165 void add(SDDbgLabel *L) { DbgLabels.push_back(L); }
166
167 /// Invalidate all DbgValues attached to the node and remove
168 /// it from the Node-to-DbgValues map.
169 void erase(const SDNode *Node);
170
171 void clear() {
172 DbgValMap.clear();
173 DbgValues.clear();
174 ByvalParmDbgValues.clear();
175 DbgLabels.clear();
176 Alloc.Reset();
177 }
178
179 BumpPtrAllocator &getAlloc() { return Alloc; }
180
181 bool empty() const {
182 return DbgValues.empty() && ByvalParmDbgValues.empty() && DbgLabels.empty();
183 }
184
185 ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) const {
186 auto I = DbgValMap.find(Node);
187 if (I != DbgValMap.end())
188 return I->second;
189 return ArrayRef<SDDbgValue*>();
190 }
191
192 using DbgIterator = SmallVectorImpl<SDDbgValue*>::iterator;
193 using DbgLabelIterator = SmallVectorImpl<SDDbgLabel*>::iterator;
194
195 DbgIterator DbgBegin() { return DbgValues.begin(); }
196 DbgIterator DbgEnd() { return DbgValues.end(); }
197 DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
198 DbgIterator ByvalParmDbgEnd() { return ByvalParmDbgValues.end(); }
199 DbgLabelIterator DbgLabelBegin() { return DbgLabels.begin(); }
200 DbgLabelIterator DbgLabelEnd() { return DbgLabels.end(); }
201};
202
203void checkForCycles(const SelectionDAG *DAG, bool force = false);
204
205/// This is used to represent a portion of an LLVM function in a low-level
206/// Data Dependence DAG representation suitable for instruction selection.
207/// This DAG is constructed as the first step of instruction selection in order
208/// to allow implementation of machine specific optimizations
209/// and code simplifications.
210///
211/// The representation used by the SelectionDAG is a target-independent
212/// representation, which has some similarities to the GCC RTL representation,
213/// but is significantly more simple, powerful, and is a graph form instead of a
214/// linear form.
215///
216class SelectionDAG {
217 const TargetMachine &TM;
218 const SelectionDAGTargetInfo *TSI = nullptr;
219 const TargetLowering *TLI = nullptr;
220 const TargetLibraryInfo *LibInfo = nullptr;
221 MachineFunction *MF;
222 Pass *SDAGISelPass = nullptr;
223 LLVMContext *Context;
224 CodeGenOpt::Level OptLevel;
225
226 LegacyDivergenceAnalysis * DA = nullptr;
227 FunctionLoweringInfo * FLI = nullptr;
228
229 /// The function-level optimization remark emitter. Used to emit remarks
230 /// whenever manipulating the DAG.
231 OptimizationRemarkEmitter *ORE;
232
233 ProfileSummaryInfo *PSI = nullptr;
234 BlockFrequencyInfo *BFI = nullptr;
235
236 /// The starting token.
237 SDNode EntryNode;
238
239 /// The root of the entire DAG.
240 SDValue Root;
241
242 /// A linked list of nodes in the current DAG.
243 ilist<SDNode> AllNodes;
244
245 /// The AllocatorType for allocating SDNodes. We use
246 /// pool allocation with recycling.
247 using NodeAllocatorType = RecyclingAllocator<BumpPtrAllocator, SDNode,
248 sizeof(LargestSDNode),
249 alignof(MostAlignedSDNode)>;
250
251 /// Pool allocation for nodes.
252 NodeAllocatorType NodeAllocator;
253
254 /// This structure is used to memoize nodes, automatically performing
255 /// CSE with existing nodes when a duplicate is requested.
256 FoldingSet<SDNode> CSEMap;
257
258 /// Pool allocation for machine-opcode SDNode operands.
259 BumpPtrAllocator OperandAllocator;
260 ArrayRecycler<SDUse> OperandRecycler;
261
262 /// Pool allocation for misc. objects that are created once per SelectionDAG.
263 BumpPtrAllocator Allocator;
264
265 /// Tracks dbg_value and dbg_label information through SDISel.
266 SDDbgInfo *DbgInfo;
267
268 using CallSiteInfo = MachineFunction::CallSiteInfo;
269 using CallSiteInfoImpl = MachineFunction::CallSiteInfoImpl;
270
271 struct CallSiteDbgInfo {
272 CallSiteInfo CSInfo;
273 MDNode *HeapAllocSite = nullptr;
274 bool NoMerge = false;
275 };
276
277 DenseMap<const SDNode *, CallSiteDbgInfo> SDCallSiteDbgInfo;
278
279 uint16_t NextPersistentId = 0;
280
281public:
282 /// Clients of various APIs that cause global effects on
283 /// the DAG can optionally implement this interface. This allows the clients
284 /// to handle the various sorts of updates that happen.
285 ///
286 /// A DAGUpdateListener automatically registers itself with DAG when it is
287 /// constructed, and removes itself when destroyed in RAII fashion.
288 struct DAGUpdateListener {
289 DAGUpdateListener *const Next;
290 SelectionDAG &DAG;
291
292 explicit DAGUpdateListener(SelectionDAG &D)
293 : Next(D.UpdateListeners), DAG(D) {
294 DAG.UpdateListeners = this;
295 }
296
297 virtual ~DAGUpdateListener() {
298 assert(DAG.UpdateListeners == this &&((DAG.UpdateListeners == this && "DAGUpdateListeners must be destroyed in LIFO order"
) ? static_cast<void> (0) : __assert_fail ("DAG.UpdateListeners == this && \"DAGUpdateListeners must be destroyed in LIFO order\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 299, __PRETTY_FUNCTION__))
299 "DAGUpdateListeners must be destroyed in LIFO order")((DAG.UpdateListeners == this && "DAGUpdateListeners must be destroyed in LIFO order"
) ? static_cast<void> (0) : __assert_fail ("DAG.UpdateListeners == this && \"DAGUpdateListeners must be destroyed in LIFO order\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 299, __PRETTY_FUNCTION__))
;
300 DAG.UpdateListeners = Next;
301 }
302
303 /// The node N that was deleted and, if E is not null, an
304 /// equivalent node E that replaced it.
305 virtual void NodeDeleted(SDNode *N, SDNode *E);
306
307 /// The node N that was updated.
308 virtual void NodeUpdated(SDNode *N);
309
310 /// The node N that was inserted.
311 virtual void NodeInserted(SDNode *N);
312 };
313
314 struct DAGNodeDeletedListener : public DAGUpdateListener {
315 std::function<void(SDNode *, SDNode *)> Callback;
316
317 DAGNodeDeletedListener(SelectionDAG &DAG,
318 std::function<void(SDNode *, SDNode *)> Callback)
319 : DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
320
321 void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
322
323 private:
324 virtual void anchor();
325 };
326
327 /// Help to insert SDNodeFlags automatically in transforming. Use
328 /// RAII to save and resume flags in current scope.
329 class FlagInserter {
330 SelectionDAG &DAG;
331 SDNodeFlags Flags;
332 FlagInserter *LastInserter;
333
334 public:
335 FlagInserter(SelectionDAG &SDAG, SDNodeFlags Flags)
336 : DAG(SDAG), Flags(Flags),
337 LastInserter(SDAG.getFlagInserter()) {
338 SDAG.setFlagInserter(this);
339 }
340 FlagInserter(SelectionDAG &SDAG, SDNode *N)
341 : FlagInserter(SDAG, N->getFlags()) {}
342
343 FlagInserter(const FlagInserter &) = delete;
344 FlagInserter &operator=(const FlagInserter &) = delete;
345 ~FlagInserter() { DAG.setFlagInserter(LastInserter); }
346
347 SDNodeFlags getFlags() const { return Flags; }
348 };
349
350 /// When true, additional steps are taken to
351 /// ensure that getConstant() and similar functions return DAG nodes that
352 /// have legal types. This is important after type legalization since
353 /// any illegally typed nodes generated after this point will not experience
354 /// type legalization.
355 bool NewNodesMustHaveLegalTypes = false;
356
357private:
358 /// DAGUpdateListener is a friend so it can manipulate the listener stack.
359 friend struct DAGUpdateListener;
360
361 /// Linked list of registered DAGUpdateListener instances.
362 /// This stack is maintained by DAGUpdateListener RAII.
363 DAGUpdateListener *UpdateListeners = nullptr;
364
365 /// Implementation of setSubgraphColor.
366 /// Return whether we had to truncate the search.
367 bool setSubgraphColorHelper(SDNode *N, const char *Color,
368 DenseSet<SDNode *> &visited,
369 int level, bool &printed);
370
371 template <typename SDNodeT, typename... ArgTypes>
372 SDNodeT *newSDNode(ArgTypes &&... Args) {
373 return new (NodeAllocator.template Allocate<SDNodeT>())
374 SDNodeT(std::forward<ArgTypes>(Args)...);
375 }
376
377 /// Build a synthetic SDNodeT with the given args and extract its subclass
378 /// data as an integer (e.g. for use in a folding set).
379 ///
380 /// The args to this function are the same as the args to SDNodeT's
381 /// constructor, except the second arg (assumed to be a const DebugLoc&) is
382 /// omitted.
383 template <typename SDNodeT, typename... ArgTypes>
384 static uint16_t getSyntheticNodeSubclassData(unsigned IROrder,
385 ArgTypes &&... Args) {
386 // The compiler can reduce this expression to a constant iff we pass an
387 // empty DebugLoc. Thankfully, the debug location doesn't have any bearing
388 // on the subclass data.
389 return SDNodeT(IROrder, DebugLoc(), std::forward<ArgTypes>(Args)...)
390 .getRawSubclassData();
391 }
392
393 template <typename SDNodeTy>
394 static uint16_t getSyntheticNodeSubclassData(unsigned Opc, unsigned Order,
395 SDVTList VTs, EVT MemoryVT,
396 MachineMemOperand *MMO) {
397 return SDNodeTy(Opc, Order, DebugLoc(), VTs, MemoryVT, MMO)
398 .getRawSubclassData();
399 }
400
401 void createOperands(SDNode *Node, ArrayRef<SDValue> Vals);
402
403 void removeOperands(SDNode *Node) {
404 if (!Node->OperandList)
405 return;
406 OperandRecycler.deallocate(
407 ArrayRecycler<SDUse>::Capacity::get(Node->NumOperands),
408 Node->OperandList);
409 Node->NumOperands = 0;
410 Node->OperandList = nullptr;
411 }
412 void CreateTopologicalOrder(std::vector<SDNode*>& Order);
413
414public:
415 // Maximum depth for recursive analysis such as computeKnownBits, etc.
416 static constexpr unsigned MaxRecursionDepth = 6;
417
418 explicit SelectionDAG(const TargetMachine &TM, CodeGenOpt::Level);
419 SelectionDAG(const SelectionDAG &) = delete;
420 SelectionDAG &operator=(const SelectionDAG &) = delete;
421 ~SelectionDAG();
422
423 /// Prepare this SelectionDAG to process code in the given MachineFunction.
424 void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
425 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
426 LegacyDivergenceAnalysis * Divergence,
427 ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin);
428
429 void setFunctionLoweringInfo(FunctionLoweringInfo * FuncInfo) {
430 FLI = FuncInfo;
431 }
432
433 /// Clear state and free memory necessary to make this
434 /// SelectionDAG ready to process a new block.
435 void clear();
436
437 MachineFunction &getMachineFunction() const { return *MF; }
438 const Pass *getPass() const { return SDAGISelPass; }
439
440 const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
441 const TargetMachine &getTarget() const { return TM; }
442 const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
443 const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
444 const TargetLibraryInfo &getLibInfo() const { return *LibInfo; }
445 const SelectionDAGTargetInfo &getSelectionDAGInfo() const { return *TSI; }
446 const LegacyDivergenceAnalysis *getDivergenceAnalysis() const { return DA; }
447 LLVMContext *getContext() const { return Context; }
448 OptimizationRemarkEmitter &getORE() const { return *ORE; }
449 ProfileSummaryInfo *getPSI() const { return PSI; }
450 BlockFrequencyInfo *getBFI() const { return BFI; }
451
452 FlagInserter *getFlagInserter() { return Inserter; }
453 void setFlagInserter(FlagInserter *FI) { Inserter = FI; }
454
455 /// Just dump dot graph to a user-provided path and title.
456 /// This doesn't open the dot viewer program and
457 /// helps visualization when outside debugging session.
458 /// FileName expects absolute path. If provided
459 /// without any path separators then the file
460 /// will be created in the current directory.
461 /// Error will be emitted if the path is insane.
462#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
463 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dumpDotGraph(const Twine &FileName, const Twine &Title);
464#endif
465
466 /// Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
467 void viewGraph(const std::string &Title);
468 void viewGraph();
469
470#ifndef NDEBUG
471 std::map<const SDNode *, std::string> NodeGraphAttrs;
472#endif
473
474 /// Clear all previously defined node graph attributes.
475 /// Intended to be used from a debugging tool (eg. gdb).
476 void clearGraphAttrs();
477
478 /// Set graph attributes for a node. (eg. "color=red".)
479 void setGraphAttrs(const SDNode *N, const char *Attrs);
480
481 /// Get graph attributes for a node. (eg. "color=red".)
482 /// Used from getNodeAttributes.
483 std::string getGraphAttrs(const SDNode *N) const;
484
485 /// Convenience for setting node color attribute.
486 void setGraphColor(const SDNode *N, const char *Color);
487
488 /// Convenience for setting subgraph color attribute.
489 void setSubgraphColor(SDNode *N, const char *Color);
490
491 using allnodes_const_iterator = ilist<SDNode>::const_iterator;
492
493 allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
494 allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
495
496 using allnodes_iterator = ilist<SDNode>::iterator;
497
498 allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
499 allnodes_iterator allnodes_end() { return AllNodes.end(); }
500
501 ilist<SDNode>::size_type allnodes_size() const {
502 return AllNodes.size();
503 }
504
505 iterator_range<allnodes_iterator> allnodes() {
506 return make_range(allnodes_begin(), allnodes_end());
507 }
508 iterator_range<allnodes_const_iterator> allnodes() const {
509 return make_range(allnodes_begin(), allnodes_end());
510 }
511
512 /// Return the root tag of the SelectionDAG.
513 const SDValue &getRoot() const { return Root; }
514
515 /// Return the token chain corresponding to the entry of the function.
516 SDValue getEntryNode() const {
517 return SDValue(const_cast<SDNode *>(&EntryNode), 0);
518 }
519
520 /// Set the current root tag of the SelectionDAG.
521 ///
522 const SDValue &setRoot(SDValue N) {
523 assert((!N.getNode() || N.getValueType() == MVT::Other) &&(((!N.getNode() || N.getValueType() == MVT::Other) &&
"DAG root value is not a chain!") ? static_cast<void> (
0) : __assert_fail ("(!N.getNode() || N.getValueType() == MVT::Other) && \"DAG root value is not a chain!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 524, __PRETTY_FUNCTION__))
524 "DAG root value is not a chain!")(((!N.getNode() || N.getValueType() == MVT::Other) &&
"DAG root value is not a chain!") ? static_cast<void> (
0) : __assert_fail ("(!N.getNode() || N.getValueType() == MVT::Other) && \"DAG root value is not a chain!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 524, __PRETTY_FUNCTION__))
;
525 if (N.getNode())
526 checkForCycles(N.getNode(), this);
527 Root = N;
528 if (N.getNode())
529 checkForCycles(this);
530 return Root;
531 }
532
533#ifndef NDEBUG
534 void VerifyDAGDiverence();
535#endif
536
537 /// This iterates over the nodes in the SelectionDAG, folding
538 /// certain types of nodes together, or eliminating superfluous nodes. The
539 /// Level argument controls whether Combine is allowed to produce nodes and
540 /// types that are illegal on the target.
541 void Combine(CombineLevel Level, AAResults *AA,
542 CodeGenOpt::Level OptLevel);
543
544 /// This transforms the SelectionDAG into a SelectionDAG that
545 /// only uses types natively supported by the target.
546 /// Returns "true" if it made any changes.
547 ///
548 /// Note that this is an involved process that may invalidate pointers into
549 /// the graph.
550 bool LegalizeTypes();
551
552 /// This transforms the SelectionDAG into a SelectionDAG that is
553 /// compatible with the target instruction selector, as indicated by the
554 /// TargetLowering object.
555 ///
556 /// Note that this is an involved process that may invalidate pointers into
557 /// the graph.
558 void Legalize();
559
560 /// Transforms a SelectionDAG node and any operands to it into a node
561 /// that is compatible with the target instruction selector, as indicated by
562 /// the TargetLowering object.
563 ///
564 /// \returns true if \c N is a valid, legal node after calling this.
565 ///
566 /// This essentially runs a single recursive walk of the \c Legalize process
567 /// over the given node (and its operands). This can be used to incrementally
568 /// legalize the DAG. All of the nodes which are directly replaced,
569 /// potentially including N, are added to the output parameter \c
570 /// UpdatedNodes so that the delta to the DAG can be understood by the
571 /// caller.
572 ///
573 /// When this returns false, N has been legalized in a way that make the
574 /// pointer passed in no longer valid. It may have even been deleted from the
575 /// DAG, and so it shouldn't be used further. When this returns true, the
576 /// N passed in is a legal node, and can be immediately processed as such.
577 /// This may still have done some work on the DAG, and will still populate
578 /// UpdatedNodes with any new nodes replacing those originally in the DAG.
579 bool LegalizeOp(SDNode *N, SmallSetVector<SDNode *, 16> &UpdatedNodes);
580
581 /// This transforms the SelectionDAG into a SelectionDAG
582 /// that only uses vector math operations supported by the target. This is
583 /// necessary as a separate step from Legalize because unrolling a vector
584 /// operation can introduce illegal types, which requires running
585 /// LegalizeTypes again.
586 ///
587 /// This returns true if it made any changes; in that case, LegalizeTypes
588 /// is called again before Legalize.
589 ///
590 /// Note that this is an involved process that may invalidate pointers into
591 /// the graph.
592 bool LegalizeVectors();
593
594 /// This method deletes all unreachable nodes in the SelectionDAG.
595 void RemoveDeadNodes();
596
597 /// Remove the specified node from the system. This node must
598 /// have no referrers.
599 void DeleteNode(SDNode *N);
600
601 /// Return an SDVTList that represents the list of values specified.
602 SDVTList getVTList(EVT VT);
603 SDVTList getVTList(EVT VT1, EVT VT2);
604 SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3);
605 SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4);
606 SDVTList getVTList(ArrayRef<EVT> VTs);
607
608 //===--------------------------------------------------------------------===//
609 // Node creation methods.
610
611 /// Create a ConstantSDNode wrapping a constant value.
612 /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
613 ///
614 /// If only legal types can be produced, this does the necessary
615 /// transformations (e.g., if the vector element type is illegal).
616 /// @{
617 SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
618 bool isTarget = false, bool isOpaque = false);
619 SDValue getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
620 bool isTarget = false, bool isOpaque = false);
621
622 SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget = false,
623 bool IsOpaque = false) {
624 return getConstant(APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL,
625 VT, IsTarget, IsOpaque);
626 }
627
628 SDValue getConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
629 bool isTarget = false, bool isOpaque = false);
630 SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL,
631 bool isTarget = false);
632 SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL,
633 bool LegalTypes = true);
634 SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
635 bool isTarget = false);
636
637 SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT,
638 bool isOpaque = false) {
639 return getConstant(Val, DL, VT, true, isOpaque);
640 }
641 SDValue getTargetConstant(const APInt &Val, const SDLoc &DL, EVT VT,
642 bool isOpaque = false) {
643 return getConstant(Val, DL, VT, true, isOpaque);
644 }
645 SDValue getTargetConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
646 bool isOpaque = false) {
647 return getConstant(Val, DL, VT, true, isOpaque);
648 }
649
650 /// Create a true or false constant of type \p VT using the target's
651 /// BooleanContent for type \p OpVT.
652 SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT);
653 /// @}
654
655 /// Create a ConstantFPSDNode wrapping a constant value.
656 /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
657 ///
658 /// If only legal types can be produced, this does the necessary
659 /// transformations (e.g., if the vector element type is illegal).
660 /// The forms that take a double should only be used for simple constants
661 /// that can be exactly represented in VT. No checks are made.
662 /// @{
663 SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT,
664 bool isTarget = false);
665 SDValue getConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT,
666 bool isTarget = false);
667 SDValue getConstantFP(const ConstantFP &V, const SDLoc &DL, EVT VT,
668 bool isTarget = false);
669 SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT) {
670 return getConstantFP(Val, DL, VT, true);
671 }
672 SDValue getTargetConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT) {
673 return getConstantFP(Val, DL, VT, true);
674 }
675 SDValue getTargetConstantFP(const ConstantFP &Val, const SDLoc &DL, EVT VT) {
676 return getConstantFP(Val, DL, VT, true);
677 }
678 /// @}
679
680 SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
681 int64_t offset = 0, bool isTargetGA = false,
682 unsigned TargetFlags = 0);
683 SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
684 int64_t offset = 0, unsigned TargetFlags = 0) {
685 return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
686 }
687 SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
688 SDValue getTargetFrameIndex(int FI, EVT VT) {
689 return getFrameIndex(FI, VT, true);
690 }
691 SDValue getJumpTable(int JTI, EVT VT, bool isTarget = false,
692 unsigned TargetFlags = 0);
693 SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags = 0) {
694 return getJumpTable(JTI, VT, true, TargetFlags);
695 }
696 SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align = None,
697 int Offs = 0, bool isT = false,
698 unsigned TargetFlags = 0);
699 SDValue getTargetConstantPool(const Constant *C, EVT VT,
700 MaybeAlign Align = None, int Offset = 0,
701 unsigned TargetFlags = 0) {
702 return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
703 }
704 SDValue getConstantPool(MachineConstantPoolValue *C, EVT VT,
705 MaybeAlign Align = None, int Offs = 0,
706 bool isT = false, unsigned TargetFlags = 0);
707 SDValue getTargetConstantPool(MachineConstantPoolValue *C, EVT VT,
708 MaybeAlign Align = None, int Offset = 0,
709 unsigned TargetFlags = 0) {
710 return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
711 }
712 SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
713 unsigned TargetFlags = 0);
714 // When generating a branch to a BB, we don't in general know enough
715 // to provide debug info for the BB at that time, so keep this one around.
716 SDValue getBasicBlock(MachineBasicBlock *MBB);
717 SDValue getExternalSymbol(const char *Sym, EVT VT);
718 SDValue getTargetExternalSymbol(const char *Sym, EVT VT,
719 unsigned TargetFlags = 0);
720 SDValue getMCSymbol(MCSymbol *Sym, EVT VT);
721
722 SDValue getValueType(EVT);
723 SDValue getRegister(unsigned Reg, EVT VT);
724 SDValue getRegisterMask(const uint32_t *RegMask);
725 SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label);
726 SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root,
727 MCSymbol *Label);
728 SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset = 0,
729 bool isTarget = false, unsigned TargetFlags = 0);
730 SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT,
731 int64_t Offset = 0, unsigned TargetFlags = 0) {
732 return getBlockAddress(BA, VT, Offset, true, TargetFlags);
733 }
734
735 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg,
736 SDValue N) {
737 return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
738 getRegister(Reg, N.getValueType()), N);
739 }
740
741 // This version of the getCopyToReg method takes an extra operand, which
742 // indicates that there is potentially an incoming glue value (if Glue is not
743 // null) and that there should be a glue result.
744 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N,
745 SDValue Glue) {
746 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
747 SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
748 return getNode(ISD::CopyToReg, dl, VTs,
749 makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
750 }
751
752 // Similar to last getCopyToReg() except parameter Reg is a SDValue
753 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, SDValue Reg, SDValue N,
754 SDValue Glue) {
755 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
756 SDValue Ops[] = { Chain, Reg, N, Glue };
757 return getNode(ISD::CopyToReg, dl, VTs,
758 makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
759 }
760
761 SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT) {
762 SDVTList VTs = getVTList(VT, MVT::Other);
763 SDValue Ops[] = { Chain, getRegister(Reg, VT) };
764 return getNode(ISD::CopyFromReg, dl, VTs, Ops);
765 }
766
767 // This version of the getCopyFromReg method takes an extra operand, which
768 // indicates that there is potentially an incoming glue value (if Glue is not
769 // null) and that there should be a glue result.
770 SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT,
771 SDValue Glue) {
772 SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
773 SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
774 return getNode(ISD::CopyFromReg, dl, VTs,
775 makeArrayRef(Ops, Glue.getNode() ? 3 : 2));
776 }
777
778 SDValue getCondCode(ISD::CondCode Cond);
779
780 /// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
781 /// which must be a vector type, must match the number of mask elements
782 /// NumElts. An integer mask element equal to -1 is treated as undefined.
783 SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
784 ArrayRef<int> Mask);
785
786 /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
787 /// which must be a vector type, must match the number of operands in Ops.
788 /// The operands must have the same type as (or, for integers, a type wider
789 /// than) VT's element type.
790 SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDValue> Ops) {
791 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
792 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
793 }
794
795 /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
796 /// which must be a vector type, must match the number of operands in Ops.
797 /// The operands must have the same type as (or, for integers, a type wider
798 /// than) VT's element type.
799 SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDUse> Ops) {
800 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
801 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
802 }
803
804 /// Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all
805 /// elements. VT must be a vector type. Op's type must be the same as (or,
806 /// for integers, a type wider than) VT's element type.
807 SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
808 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
809 if (Op.getOpcode() == ISD::UNDEF) {
25
Calling 'SDValue::getOpcode'
810 assert((VT.getVectorElementType() == Op.getValueType() ||(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __PRETTY_FUNCTION__))
811 (VT.isInteger() &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __PRETTY_FUNCTION__))
812 VT.getVectorElementType().bitsLE(Op.getValueType()))) &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __PRETTY_FUNCTION__))
813 "A splatted value must have a width equal or (for integers) "(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __PRETTY_FUNCTION__))
814 "greater than the vector element type!")(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 814, __PRETTY_FUNCTION__))
;
815 return getNode(ISD::UNDEF, SDLoc(), VT);
816 }
817
818 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Op);
819 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
820 }
821
822 // Return a splat ISD::SPLAT_VECTOR node, consisting of Op splatted to all
823 // elements.
824 SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op) {
825 if (Op.getOpcode() == ISD::UNDEF) {
826 assert((VT.getVectorElementType() == Op.getValueType() ||(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __PRETTY_FUNCTION__))
827 (VT.isInteger() &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __PRETTY_FUNCTION__))
828 VT.getVectorElementType().bitsLE(Op.getValueType()))) &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __PRETTY_FUNCTION__))
829 "A splatted value must have a width equal or (for integers) "(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __PRETTY_FUNCTION__))
830 "greater than the vector element type!")(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 830, __PRETTY_FUNCTION__))
;
831 return getNode(ISD::UNDEF, SDLoc(), VT);
832 }
833 return getNode(ISD::SPLAT_VECTOR, DL, VT, Op);
834 }
835
836 /// Returns a vector of type ResVT whose elements contain the linear sequence
837 /// <0, Step, Step * 2, Step * 3, ...>
838 SDValue getStepVector(const SDLoc &DL, EVT ResVT, SDValue Step);
839
840 /// Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
841 /// the shuffle node in input but with swapped operands.
842 ///
843 /// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
844 SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV);
845
846 /// Convert Op, which must be of float type, to the
847 /// float type VT, by either extending or rounding (by truncation).
848 SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT);
849
850 /// Convert Op, which must be a STRICT operation of float type, to the
851 /// float type VT, by either extending or rounding (by truncation).
852 std::pair<SDValue, SDValue>
853 getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT);
854
855 /// Convert Op, which must be of integer type, to the
856 /// integer type VT, by either any-extending or truncating it.
857 SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
858
859 /// Convert Op, which must be of integer type, to the
860 /// integer type VT, by either sign-extending or truncating it.
861 SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
862
863 /// Convert Op, which must be of integer type, to the
864 /// integer type VT, by either zero-extending or truncating it.
865 SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
866
867 /// Return the expression required to zero extend the Op
868 /// value assuming it was the smaller SrcTy value.
869 SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
870
871 /// Convert Op, which must be of integer type, to the integer type VT, by
872 /// either truncating it or performing either zero or sign extension as
873 /// appropriate extension for the pointer's semantics.
874 SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
875
876 /// Return the expression required to extend the Op as a pointer value
877 /// assuming it was the smaller SrcTy value. This may be either a zero extend
878 /// or a sign extend.
879 SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
880
881 /// Convert Op, which must be of integer type, to the integer type VT,
882 /// by using an extension appropriate for the target's
883 /// BooleanContent for type OpVT or truncating it.
884 SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT);
885
886 /// Create a bitwise NOT operation as (XOR Val, -1).
887 SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT);
888
889 /// Create a logical NOT operation as (XOR Val, BooleanOne).
890 SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT);
891
892 /// Returns sum of the base pointer and offset.
893 /// Unlike getObjectPtrOffset this does not set NoUnsignedWrap by default.
894 SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL,
895 const SDNodeFlags Flags = SDNodeFlags());
896 SDValue getMemBasePlusOffset(SDValue Base, SDValue Offset, const SDLoc &DL,
897 const SDNodeFlags Flags = SDNodeFlags());
898
899 /// Create an add instruction with appropriate flags when used for
900 /// addressing some offset of an object. i.e. if a load is split into multiple
901 /// components, create an add nuw from the base pointer to the offset.
902 SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset) {
903 SDNodeFlags Flags;
904 Flags.setNoUnsignedWrap(true);
905 return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
906 }
907
908 SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, SDValue Offset) {
909 // The object itself can't wrap around the address space, so it shouldn't be
910 // possible for the adds of the offsets to the split parts to overflow.
911 SDNodeFlags Flags;
912 Flags.setNoUnsignedWrap(true);
913 return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
914 }
915
916 /// Return a new CALLSEQ_START node, that starts new call frame, in which
917 /// InSize bytes are set up inside CALLSEQ_START..CALLSEQ_END sequence and
918 /// OutSize specifies part of the frame set up prior to the sequence.
919 SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize,
920 const SDLoc &DL) {
921 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
922 SDValue Ops[] = { Chain,
923 getIntPtrConstant(InSize, DL, true),
924 getIntPtrConstant(OutSize, DL, true) };
925 return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
926 }
927
928 /// Return a new CALLSEQ_END node, which always must have a
929 /// glue result (to ensure it's not CSE'd).
930 /// CALLSEQ_END does not have a useful SDLoc.
931 SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
932 SDValue InGlue, const SDLoc &DL) {
933 SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
934 SmallVector<SDValue, 4> Ops;
935 Ops.push_back(Chain);
936 Ops.push_back(Op1);
937 Ops.push_back(Op2);
938 if (InGlue.getNode())
939 Ops.push_back(InGlue);
940 return getNode(ISD::CALLSEQ_END, DL, NodeTys, Ops);
941 }
942
943 /// Return true if the result of this operation is always undefined.
944 bool isUndef(unsigned Opcode, ArrayRef<SDValue> Ops);
945
946 /// Return an UNDEF node. UNDEF does not have a useful SDLoc.
947 SDValue getUNDEF(EVT VT) {
948 return getNode(ISD::UNDEF, SDLoc(), VT);
949 }
950
951 /// Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
952 SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm) {
953 assert(MulImm.getMinSignedBits() <= VT.getSizeInBits() &&((MulImm.getMinSignedBits() <= VT.getSizeInBits() &&
"Immediate does not fit VT") ? static_cast<void> (0) :
__assert_fail ("MulImm.getMinSignedBits() <= VT.getSizeInBits() && \"Immediate does not fit VT\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 954, __PRETTY_FUNCTION__))
954 "Immediate does not fit VT")((MulImm.getMinSignedBits() <= VT.getSizeInBits() &&
"Immediate does not fit VT") ? static_cast<void> (0) :
__assert_fail ("MulImm.getMinSignedBits() <= VT.getSizeInBits() && \"Immediate does not fit VT\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 954, __PRETTY_FUNCTION__))
;
955 return getNode(ISD::VSCALE, DL, VT,
956 getConstant(MulImm.sextOrTrunc(VT.getSizeInBits()), DL, VT));
957 }
958
959 /// Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
960 SDValue getGLOBAL_OFFSET_TABLE(EVT VT) {
961 return getNode(ISD::GLOBAL_OFFSET_TABLE, SDLoc(), VT);
962 }
963
964 /// Gets or creates the specified node.
965 ///
966 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
967 ArrayRef<SDUse> Ops);
968 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
969 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
970 SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
971 ArrayRef<SDValue> Ops);
972 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
973 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
974
975 // Use flags from current flag inserter.
976 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
977 ArrayRef<SDValue> Ops);
978 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
979 ArrayRef<SDValue> Ops);
980 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand);
981 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
982 SDValue N2);
983 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
984 SDValue N2, SDValue N3);
985
986 // Specialize based on number of operands.
987 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
988 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand,
989 const SDNodeFlags Flags);
990 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
991 SDValue N2, const SDNodeFlags Flags);
992 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
993 SDValue N2, SDValue N3, const SDNodeFlags Flags);
994 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
995 SDValue N2, SDValue N3, SDValue N4);
996 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
997 SDValue N2, SDValue N3, SDValue N4, SDValue N5);
998
999 // Specialize again based on number of operands for nodes with a VTList
1000 // rather than a single VT.
1001 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList);
1002 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N);
1003 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1004 SDValue N2);
1005 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1006 SDValue N2, SDValue N3);
1007 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1008 SDValue N2, SDValue N3, SDValue N4);
1009 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1010 SDValue N2, SDValue N3, SDValue N4, SDValue N5);
1011
1012 /// Compute a TokenFactor to force all the incoming stack arguments to be
1013 /// loaded from the stack. This is used in tail call lowering to protect
1014 /// stack arguments from being clobbered.
1015 SDValue getStackArgumentTokenFactor(SDValue Chain);
1016
1017 LLVM_ATTRIBUTE_DEPRECATED(SDValue getMemcpy(SDValue Chain, const SDLoc &dl,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1018 SDValue Dst, SDValue Src,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1019 SDValue Size, unsigned Align,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1020 bool isVol, bool AlwaysInline,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1021 bool isTailCall,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1022 MachinePointerInfo DstPtrInfo,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1023 MachinePointerInfo SrcPtrInfo),[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1024 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
{
1025 return getMemcpy(Chain, dl, Dst, Src, Size, llvm::Align(Align), isVol,
1026 AlwaysInline, isTailCall, DstPtrInfo, SrcPtrInfo);
1027 }
1028
1029 SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1030 SDValue Size, Align Alignment, bool isVol,
1031 bool AlwaysInline, bool isTailCall,
1032 MachinePointerInfo DstPtrInfo,
1033 MachinePointerInfo SrcPtrInfo);
1034
1035 LLVM_ATTRIBUTE_DEPRECATED(SDValue getMemmove(SDValue Chain, const SDLoc &dl,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
1036 SDValue Dst, SDValue Src,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
1037 SDValue Size, unsigned Align,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
1038 bool isVol, bool isTailCall,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
1039 MachinePointerInfo DstPtrInfo,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
1040 MachinePointerInfo SrcPtrInfo),[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
1041 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
{
1042 return getMemmove(Chain, dl, Dst, Src, Size, llvm::Align(Align), isVol,
1043 isTailCall, DstPtrInfo, SrcPtrInfo);
1044 }
1045 SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1046 SDValue Size, Align Alignment, bool isVol, bool isTailCall,
1047 MachinePointerInfo DstPtrInfo,
1048 MachinePointerInfo SrcPtrInfo);
1049
1050 LLVM_ATTRIBUTE_DEPRECATED(SDValue getMemset(SDValue Chain, const SDLoc &dl,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo)
1051 SDValue Dst, SDValue Src,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo)
1052 SDValue Size, unsigned Align,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo)
1053 bool isVol, bool isTailCall,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo)
1054 MachinePointerInfo DstPtrInfo),[[deprecated("Use the version that takes Align instead")]] SDValue
getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo)
1055 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] SDValue
getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo)
{
1056 return getMemset(Chain, dl, Dst, Src, Size, llvm::Align(Align), isVol,
1057 isTailCall, DstPtrInfo);
1058 }
1059 SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1060 SDValue Size, Align Alignment, bool isVol, bool isTailCall,
1061 MachinePointerInfo DstPtrInfo);
1062
1063 SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
1064 unsigned DstAlign, SDValue Src, unsigned SrcAlign,
1065 SDValue Size, Type *SizeTy, unsigned ElemSz,
1066 bool isTailCall, MachinePointerInfo DstPtrInfo,
1067 MachinePointerInfo SrcPtrInfo);
1068
1069 SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
1070 unsigned DstAlign, SDValue Src, unsigned SrcAlign,
1071 SDValue Size, Type *SizeTy, unsigned ElemSz,
1072 bool isTailCall, MachinePointerInfo DstPtrInfo,
1073 MachinePointerInfo SrcPtrInfo);
1074
1075 SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
1076 unsigned DstAlign, SDValue Value, SDValue Size,
1077 Type *SizeTy, unsigned ElemSz, bool isTailCall,
1078 MachinePointerInfo DstPtrInfo);
1079
1080 /// Helper function to make it easier to build SetCC's if you just have an
1081 /// ISD::CondCode instead of an SDValue.
1082 SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
1083 ISD::CondCode Cond, SDValue Chain = SDValue(),
1084 bool IsSignaling = false) {
1085 assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&((LHS.getValueType().isVector() == RHS.getValueType().isVector
() && "Cannot compare scalars to vectors") ? static_cast
<void> (0) : __assert_fail ("LHS.getValueType().isVector() == RHS.getValueType().isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1086, __PRETTY_FUNCTION__))
1086 "Cannot compare scalars to vectors")((LHS.getValueType().isVector() == RHS.getValueType().isVector
() && "Cannot compare scalars to vectors") ? static_cast
<void> (0) : __assert_fail ("LHS.getValueType().isVector() == RHS.getValueType().isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1086, __PRETTY_FUNCTION__))
;
1087 assert(LHS.getValueType().isVector() == VT.isVector() &&((LHS.getValueType().isVector() == VT.isVector() && "Cannot compare scalars to vectors"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType().isVector() == VT.isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1088, __PRETTY_FUNCTION__))
1088 "Cannot compare scalars to vectors")((LHS.getValueType().isVector() == VT.isVector() && "Cannot compare scalars to vectors"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType().isVector() == VT.isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1088, __PRETTY_FUNCTION__))
;
1089 assert(Cond != ISD::SETCC_INVALID &&((Cond != ISD::SETCC_INVALID && "Cannot create a setCC of an invalid node."
) ? static_cast<void> (0) : __assert_fail ("Cond != ISD::SETCC_INVALID && \"Cannot create a setCC of an invalid node.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1090, __PRETTY_FUNCTION__))
1090 "Cannot create a setCC of an invalid node.")((Cond != ISD::SETCC_INVALID && "Cannot create a setCC of an invalid node."
) ? static_cast<void> (0) : __assert_fail ("Cond != ISD::SETCC_INVALID && \"Cannot create a setCC of an invalid node.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1090, __PRETTY_FUNCTION__))
;
1091 if (Chain)
1092 return getNode(IsSignaling ? ISD::STRICT_FSETCCS : ISD::STRICT_FSETCC, DL,
1093 {VT, MVT::Other}, {Chain, LHS, RHS, getCondCode(Cond)});
1094 return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
1095 }
1096
1097 /// Helper function to make it easier to build Select's if you just have
1098 /// operands and don't want to check for vector.
1099 SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS,
1100 SDValue RHS) {
1101 assert(LHS.getValueType() == RHS.getValueType() &&((LHS.getValueType() == RHS.getValueType() && "Cannot use select on differing types"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Cannot use select on differing types\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1102, __PRETTY_FUNCTION__))
1102 "Cannot use select on differing types")((LHS.getValueType() == RHS.getValueType() && "Cannot use select on differing types"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Cannot use select on differing types\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1102, __PRETTY_FUNCTION__))
;
1103 assert(VT.isVector() == LHS.getValueType().isVector() &&((VT.isVector() == LHS.getValueType().isVector() && "Cannot mix vectors and scalars"
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() == LHS.getValueType().isVector() && \"Cannot mix vectors and scalars\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1104, __PRETTY_FUNCTION__))
1104 "Cannot mix vectors and scalars")((VT.isVector() == LHS.getValueType().isVector() && "Cannot mix vectors and scalars"
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() == LHS.getValueType().isVector() && \"Cannot mix vectors and scalars\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1104, __PRETTY_FUNCTION__))
;
1105 auto Opcode = Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
1106 return getNode(Opcode, DL, VT, Cond, LHS, RHS);
1107 }
1108
1109 /// Helper function to make it easier to build SelectCC's if you just have an
1110 /// ISD::CondCode instead of an SDValue.
1111 SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True,
1112 SDValue False, ISD::CondCode Cond) {
1113 return getNode(ISD::SELECT_CC, DL, True.getValueType(), LHS, RHS, True,
1114 False, getCondCode(Cond));
1115 }
1116
1117 /// Try to simplify a select/vselect into 1 of its operands or a constant.
1118 SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal);
1119
1120 /// Try to simplify a shift into 1 of its operands or a constant.
1121 SDValue simplifyShift(SDValue X, SDValue Y);
1122
1123 /// Try to simplify a floating-point binary operation into 1 of its operands
1124 /// or a constant.
1125 SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
1126 SDNodeFlags Flags);
1127
1128 /// VAArg produces a result and token chain, and takes a pointer
1129 /// and a source value as input.
1130 SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1131 SDValue SV, unsigned Align);
1132
1133 /// Gets a node for an atomic cmpxchg op. There are two
1134 /// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
1135 /// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
1136 /// a success flag (initially i1), and a chain.
1137 SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
1138 SDVTList VTs, SDValue Chain, SDValue Ptr,
1139 SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);
1140
1141 /// Gets a node for an atomic op, produces result (if relevant)
1142 /// and chain and takes 2 operands.
1143 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
1144 SDValue Ptr, SDValue Val, MachineMemOperand *MMO);
1145
1146 /// Gets a node for an atomic op, produces result and chain and
1147 /// takes 1 operand.
1148 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT,
1149 SDValue Chain, SDValue Ptr, MachineMemOperand *MMO);
1150
1151 /// Gets a node for an atomic op, produces result and chain and takes N
1152 /// operands.
1153 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
1154 SDVTList VTList, ArrayRef<SDValue> Ops,
1155 MachineMemOperand *MMO);
1156
1157 /// Creates a MemIntrinsicNode that may produce a
1158 /// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
1159 /// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
1160 /// less than FIRST_TARGET_MEMORY_OPCODE.
1161 SDValue getMemIntrinsicNode(
1162 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
1163 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
1164 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
1165 MachineMemOperand::MOStore,
1166 uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes());
1167
1168 inline SDValue getMemIntrinsicNode(
1169 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
1170 EVT MemVT, MachinePointerInfo PtrInfo, MaybeAlign Alignment = None,
1171 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
1172 MachineMemOperand::MOStore,
1173 uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) {
1174 // Ensure that codegen never sees alignment 0
1175 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo,
1176 Alignment.getValueOr(getEVTAlign(MemVT)), Flags,
1177 Size, AAInfo);
1178 }
1179
1180 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1181 inline SDValue getMemIntrinsicNode([[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1182 unsigned Opcode, const SDLoc &dl, SDVTList VTList,[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1183 ArrayRef<SDValue> Ops, EVT MemVT, MachinePointerInfo PtrInfo,[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1184 unsigned Alignment,[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1185 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1186 MachineMemOperand::MOStore,[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1187 uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()),[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1188 "")[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
{
1189 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo,
1190 MaybeAlign(Alignment), Flags, Size, AAInfo);
1191 }
1192
1193 SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList,
1194 ArrayRef<SDValue> Ops, EVT MemVT,
1195 MachineMemOperand *MMO);
1196
1197 /// Creates a LifetimeSDNode that starts (`IsStart==true`) or ends
1198 /// (`IsStart==false`) the lifetime of the portion of `FrameIndex` between
1199 /// offsets `Offset` and `Offset + Size`.
1200 SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain,
1201 int FrameIndex, int64_t Size, int64_t Offset = -1);
1202
1203 /// Creates a PseudoProbeSDNode with function GUID `Guid` and
1204 /// the index of the block `Index` it is probing, as well as the attributes
1205 /// `attr` of the probe.
1206 SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid,
1207 uint64_t Index, uint32_t Attr);
1208
1209 /// Create a MERGE_VALUES node from the given operands.
1210 SDValue getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl);
1211
1212 /// Loads are not normal binary operators: their result type is not
1213 /// determined by their operands, and they produce a value AND a token chain.
1214 ///
1215 /// This function will set the MOLoad flag on MMOFlags, but you can set it if
1216 /// you want. The MOStore flag must not be set.
1217 SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1218 MachinePointerInfo PtrInfo,
1219 MaybeAlign Alignment = MaybeAlign(),
1220 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1221 const AAMDNodes &AAInfo = AAMDNodes(),
1222 const MDNode *Ranges = nullptr);
1223 /// FIXME: Remove once transition to Align is over.
1224 inline SDValue
1225 getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1226 MachinePointerInfo PtrInfo, unsigned Alignment,
1227 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1228 const AAMDNodes &AAInfo = AAMDNodes(),
1229 const MDNode *Ranges = nullptr) {
1230 return getLoad(VT, dl, Chain, Ptr, PtrInfo, MaybeAlign(Alignment), MMOFlags,
1231 AAInfo, Ranges);
1232 }
1233 SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1234 MachineMemOperand *MMO);
1235 SDValue
1236 getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
1237 SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
1238 MaybeAlign Alignment = MaybeAlign(),
1239 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1240 const AAMDNodes &AAInfo = AAMDNodes());
1241 /// FIXME: Remove once transition to Align is over.
1242 inline SDValue
1243 getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
1244 SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
1245 unsigned Alignment,
1246 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1247 const AAMDNodes &AAInfo = AAMDNodes()) {
1248 return getExtLoad(ExtType, dl, VT, Chain, Ptr, PtrInfo, MemVT,
1249 MaybeAlign(Alignment), MMOFlags, AAInfo);
1250 }
1251 SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
1252 SDValue Chain, SDValue Ptr, EVT MemVT,
1253 MachineMemOperand *MMO);
1254 SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
1255 SDValue Offset, ISD::MemIndexedMode AM);
1256 SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1257 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1258 MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment,
1259 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1260 const AAMDNodes &AAInfo = AAMDNodes(),
1261 const MDNode *Ranges = nullptr);
1262 inline SDValue getLoad(
1263 ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
1264 SDValue Chain, SDValue Ptr, SDValue Offset, MachinePointerInfo PtrInfo,
1265 EVT MemVT, MaybeAlign Alignment = MaybeAlign(),
1266 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1267 const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr) {
1268 // Ensures that codegen never sees a None Alignment.
1269 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
1270 Alignment.getValueOr(getEVTAlign(MemVT)), MMOFlags, AAInfo,
1271 Ranges);
1272 }
1273 /// FIXME: Remove once transition to Align is over.
1274 inline SDValue
1275 getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1276 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1277 MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment,
1278 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1279 const AAMDNodes &AAInfo = AAMDNodes(),
1280 const MDNode *Ranges = nullptr) {
1281 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
1282 MaybeAlign(Alignment), MMOFlags, AAInfo, Ranges);
1283 }
1284 SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1285 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1286 EVT MemVT, MachineMemOperand *MMO);
1287
1288 /// Helper function to build ISD::STORE nodes.
1289 ///
1290 /// This function will set the MOStore flag on MMOFlags, but you can set it if
1291 /// you want. The MOLoad and MOInvariant flags must not be set.
1292
1293 SDValue
1294 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1295 MachinePointerInfo PtrInfo, Align Alignment,
1296 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1297 const AAMDNodes &AAInfo = AAMDNodes());
1298 inline SDValue
1299 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1300 MachinePointerInfo PtrInfo, MaybeAlign Alignment = MaybeAlign(),
1301 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1302 const AAMDNodes &AAInfo = AAMDNodes()) {
1303 return getStore(Chain, dl, Val, Ptr, PtrInfo,
1304 Alignment.getValueOr(getEVTAlign(Val.getValueType())),
1305 MMOFlags, AAInfo);
1306 }
1307 /// FIXME: Remove once transition to Align is over.
1308 inline SDValue
1309 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1310 MachinePointerInfo PtrInfo, unsigned Alignment,
1311 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1312 const AAMDNodes &AAInfo = AAMDNodes()) {
1313 return getStore(Chain, dl, Val, Ptr, PtrInfo, MaybeAlign(Alignment),
1314 MMOFlags, AAInfo);
1315 }
1316 SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1317 MachineMemOperand *MMO);
1318 SDValue
1319 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1320 MachinePointerInfo PtrInfo, EVT SVT, Align Alignment,
1321 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1322 const AAMDNodes &AAInfo = AAMDNodes());
1323 inline SDValue
1324 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1325 MachinePointerInfo PtrInfo, EVT SVT,
1326 MaybeAlign Alignment = MaybeAlign(),
1327 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1328 const AAMDNodes &AAInfo = AAMDNodes()) {
1329 return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
1330 Alignment.getValueOr(getEVTAlign(SVT)), MMOFlags,
1331 AAInfo);
1332 }
1333 /// FIXME: Remove once transition to Align is over.
1334 inline SDValue
1335 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1336 MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment,
1337 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1338 const AAMDNodes &AAInfo = AAMDNodes()) {
1339 return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
1340 MaybeAlign(Alignment), MMOFlags, AAInfo);
1341 }
1342 SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
1343 SDValue Ptr, EVT SVT, MachineMemOperand *MMO);
1344 SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base,
1345 SDValue Offset, ISD::MemIndexedMode AM);
1346
1347 SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base,
1348 SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT,
1349 MachineMemOperand *MMO, ISD::MemIndexedMode AM,
1350 ISD::LoadExtType, bool IsExpanding = false);
1351 SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
1352 SDValue Offset, ISD::MemIndexedMode AM);
1353 SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val,
1354 SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT,
1355 MachineMemOperand *MMO, ISD::MemIndexedMode AM,
1356 bool IsTruncating = false, bool IsCompressing = false);
1357 SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
1358 SDValue Base, SDValue Offset,
1359 ISD::MemIndexedMode AM);
1360 SDValue getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
1361 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
1362 ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy);
1363 SDValue getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
1364 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
1365 ISD::MemIndexType IndexType,
1366 bool IsTruncating = false);
1367
1368 /// Construct a node to track a Value* through the backend.
1369 SDValue getSrcValue(const Value *v);
1370
1371 /// Return an MDNodeSDNode which holds an MDNode.
1372 SDValue getMDNode(const MDNode *MD);
1373
1374 /// Return a bitcast using the SDLoc of the value operand, and casting to the
1375 /// provided type. Use getNode to set a custom SDLoc.
1376 SDValue getBitcast(EVT VT, SDValue V);
1377
1378 /// Return an AddrSpaceCastSDNode.
1379 SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS,
1380 unsigned DestAS);
1381
1382 /// Return a freeze using the SDLoc of the value operand.
1383 SDValue getFreeze(SDValue V);
1384
1385 /// Return an AssertAlignSDNode.
1386 SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A);
1387
1388 /// Return the specified value casted to
1389 /// the target's desired shift amount type.
1390 SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op);
1391
1392 /// Expand the specified \c ISD::VAARG node as the Legalize pass would.
1393 SDValue expandVAArg(SDNode *Node);
1394
1395 /// Expand the specified \c ISD::VACOPY node as the Legalize pass would.
1396 SDValue expandVACopy(SDNode *Node);
1397
1398 /// Returs an GlobalAddress of the function from the current module with
1399 /// name matching the given ExternalSymbol. Additionally can provide the
1400 /// matched function.
1401 /// Panics the function doesn't exists.
1402 SDValue getSymbolFunctionGlobalAddress(SDValue Op,
1403 Function **TargetFunction = nullptr);
1404
1405 /// *Mutate* the specified node in-place to have the
1406 /// specified operands. If the resultant node already exists in the DAG,
1407 /// this does not modify the specified node, instead it returns the node that
1408 /// already exists. If the resultant node does not exist in the DAG, the
1409 /// input node is returned. As a degenerate case, if you specify the same
1410 /// input operands as the node already has, the input node is returned.
1411 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
1412 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
1413 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1414 SDValue Op3);
1415 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1416 SDValue Op3, SDValue Op4);
1417 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1418 SDValue Op3, SDValue Op4, SDValue Op5);
1419 SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
1420
1421 /// Creates a new TokenFactor containing \p Vals. If \p Vals contains 64k
1422 /// values or more, move values into new TokenFactors in 64k-1 blocks, until
1423 /// the final TokenFactor has less than 64k operands.
1424 SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl<SDValue> &Vals);
1425
1426 /// *Mutate* the specified machine node's memory references to the provided
1427 /// list.
1428 void setNodeMemRefs(MachineSDNode *N,
1429 ArrayRef<MachineMemOperand *> NewMemRefs);
1430
1431 // Calculate divergence of node \p N based on its operands.
1432 bool calculateDivergence(SDNode *N);
1433
1434 // Propagates the change in divergence to users
1435 void updateDivergence(SDNode * N);
1436
1437 /// These are used for target selectors to *mutate* the
1438 /// specified node to have the specified return type, Target opcode, and
1439 /// operands. Note that target opcodes are stored as
1440 /// ~TargetOpcode in the node opcode field. The resultant node is returned.
1441 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT);
1442 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT, SDValue Op1);
1443 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1444 SDValue Op1, SDValue Op2);
1445 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1446 SDValue Op1, SDValue Op2, SDValue Op3);
1447 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1448 ArrayRef<SDValue> Ops);
1449 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1, EVT VT2);
1450 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1451 EVT VT2, ArrayRef<SDValue> Ops);
1452 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1453 EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
1454 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1455 EVT VT2, SDValue Op1, SDValue Op2);
1456 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, SDVTList VTs,
1457 ArrayRef<SDValue> Ops);
1458
1459 /// This *mutates* the specified node to have the specified
1460 /// return type, opcode, and operands.
1461 SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
1462 ArrayRef<SDValue> Ops);
1463
1464 /// Mutate the specified strict FP node to its non-strict equivalent,
1465 /// unlinking the node from its chain and dropping the metadata arguments.
1466 /// The node must be a strict FP node.
1467 SDNode *mutateStrictFPToFP(SDNode *Node);
1468
1469 /// These are used for target selectors to create a new node
1470 /// with specified return type(s), MachineInstr opcode, and operands.
1471 ///
1472 /// Note that getMachineNode returns the resultant node. If there is already
1473 /// a node of the specified opcode and operands, it returns that node instead
1474 /// of the current one.
1475 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT);
1476 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1477 SDValue Op1);
1478 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1479 SDValue Op1, SDValue Op2);
1480 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1481 SDValue Op1, SDValue Op2, SDValue Op3);
1482 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1483 ArrayRef<SDValue> Ops);
1484 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1485 EVT VT2, SDValue Op1, SDValue Op2);
1486 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1487 EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
1488 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1489 EVT VT2, ArrayRef<SDValue> Ops);
1490 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1491 EVT VT2, EVT VT3, SDValue Op1, SDValue Op2);
1492 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1493 EVT VT2, EVT VT3, SDValue Op1, SDValue Op2,
1494 SDValue Op3);
1495 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1496 EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
1497 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl,
1498 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops);
1499 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, SDVTList VTs,
1500 ArrayRef<SDValue> Ops);
1501
1502 /// A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
1503 SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
1504 SDValue Operand);
1505
1506 /// A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
1507 SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
1508 SDValue Operand, SDValue Subreg);
1509
1510 /// Get the specified node if it's already available, or else return NULL.
1511 SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
1512 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
1513 SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
1514 ArrayRef<SDValue> Ops);
1515
1516 /// Check if a node exists without modifying its flags.
1517 bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef<SDValue> Ops);
1518
1519 /// Creates a SDDbgValue node.
1520 SDDbgValue *getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N,
1521 unsigned R, bool IsIndirect, const DebugLoc &DL,
1522 unsigned O);
1523
1524 /// Creates a constant SDDbgValue node.
1525 SDDbgValue *getConstantDbgValue(DIVariable *Var, DIExpression *Expr,
1526 const Value *C, const DebugLoc &DL,
1527 unsigned O);
1528
1529 /// Creates a FrameIndex SDDbgValue node.
1530 SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
1531 unsigned FI, bool IsIndirect,
1532 const DebugLoc &DL, unsigned O);
1533
1534 /// Creates a FrameIndex SDDbgValue node.
1535 SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
1536 unsigned FI,
1537 ArrayRef<SDNode *> Dependencies,
1538 bool IsIndirect, const DebugLoc &DL,
1539 unsigned O);
1540
1541 /// Creates a VReg SDDbgValue node.
1542 SDDbgValue *getVRegDbgValue(DIVariable *Var, DIExpression *Expr,
1543 unsigned VReg, bool IsIndirect,
1544 const DebugLoc &DL, unsigned O);
1545
1546 /// Creates a SDDbgValue node from a list of locations.
1547 SDDbgValue *getDbgValueList(DIVariable *Var, DIExpression *Expr,
1548 ArrayRef<SDDbgOperand> Locs,
1549 ArrayRef<SDNode *> Dependencies, bool IsIndirect,
1550 const DebugLoc &DL, unsigned O, bool IsVariadic);
1551
1552 /// Creates a SDDbgLabel node.
1553 SDDbgLabel *getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O);
1554
1555 /// Transfer debug values from one node to another, while optionally
1556 /// generating fragment expressions for split-up values. If \p InvalidateDbg
1557 /// is set, debug values are invalidated after they are transferred.
1558 void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits = 0,
1559 unsigned SizeInBits = 0, bool InvalidateDbg = true);
1560
1561 /// Remove the specified node from the system. If any of its
1562 /// operands then becomes dead, remove them as well. Inform UpdateListener
1563 /// for each node deleted.
1564 void RemoveDeadNode(SDNode *N);
1565
1566 /// This method deletes the unreachable nodes in the
1567 /// given list, and any nodes that become unreachable as a result.
1568 void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);
1569
1570 /// Modify anything using 'From' to use 'To' instead.
1571 /// This can cause recursive merging of nodes in the DAG. Use the first
1572 /// version if 'From' is known to have a single result, use the second
1573 /// if you have two nodes with identical results (or if 'To' has a superset
1574 /// of the results of 'From'), use the third otherwise.
1575 ///
1576 /// These methods all take an optional UpdateListener, which (if not null) is
1577 /// informed about nodes that are deleted and modified due to recursive
1578 /// changes in the dag.
1579 ///
1580 /// These functions only replace all existing uses. It's possible that as
1581 /// these replacements are being performed, CSE may cause the From node
1582 /// to be given new uses. These new uses of From are left in place, and
1583 /// not automatically transferred to To.
1584 ///
1585 void ReplaceAllUsesWith(SDValue From, SDValue To);
1586 void ReplaceAllUsesWith(SDNode *From, SDNode *To);
1587 void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
1588
1589 /// Replace any uses of From with To, leaving
1590 /// uses of other values produced by From.getNode() alone.
1591 void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);
1592
1593 /// Like ReplaceAllUsesOfValueWith, but for multiple values at once.
1594 /// This correctly handles the case where
1595 /// there is an overlap between the From values and the To values.
1596 void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
1597 unsigned Num);
1598
1599 /// If an existing load has uses of its chain, create a token factor node with
1600 /// that chain and the new memory node's chain and update users of the old
1601 /// chain to the token factor. This ensures that the new memory node will have
1602 /// the same relative memory dependency position as the old load. Returns the
1603 /// new merged load chain.
1604 SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain);
1605
1606 /// If an existing load has uses of its chain, create a token factor node with
1607 /// that chain and the new memory node's chain and update users of the old
1608 /// chain to the token factor. This ensures that the new memory node will have
1609 /// the same relative memory dependency position as the old load. Returns the
1610 /// new merged load chain.
1611 SDValue makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, SDValue NewMemOp);
1612
1613 /// Topological-sort the AllNodes list and a
1614 /// assign a unique node id for each node in the DAG based on their
1615 /// topological order. Returns the number of nodes.
1616 unsigned AssignTopologicalOrder();
1617
1618 /// Move node N in the AllNodes list to be immediately
1619 /// before the given iterator Position. This may be used to update the
1620 /// topological ordering when the list of nodes is modified.
1621 void RepositionNode(allnodes_iterator Position, SDNode *N) {
1622 AllNodes.insert(Position, AllNodes.remove(N));
1623 }
1624
1625 /// Returns an APFloat semantics tag appropriate for the given type. If VT is
1626 /// a vector type, the element semantics are returned.
1627 static const fltSemantics &EVTToAPFloatSemantics(EVT VT) {
1628 switch (VT.getScalarType().getSimpleVT().SimpleTy) {
1629 default: llvm_unreachable("Unknown FP format")::llvm::llvm_unreachable_internal("Unknown FP format", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1629)
;
1630 case MVT::f16: return APFloat::IEEEhalf();
1631 case MVT::bf16: return APFloat::BFloat();
1632 case MVT::f32: return APFloat::IEEEsingle();
1633 case MVT::f64: return APFloat::IEEEdouble();
1634 case MVT::f80: return APFloat::x87DoubleExtended();
1635 case MVT::f128: return APFloat::IEEEquad();
1636 case MVT::ppcf128: return APFloat::PPCDoubleDouble();
1637 }
1638 }
1639
1640 /// Add a dbg_value SDNode. If SD is non-null that means the
1641 /// value is produced by SD.
1642 void AddDbgValue(SDDbgValue *DB, bool isParameter);
1643
1644 /// Add a dbg_label SDNode.
1645 void AddDbgLabel(SDDbgLabel *DB);
1646
1647 /// Get the debug values which reference the given SDNode.
1648 ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) const {
1649 return DbgInfo->getSDDbgValues(SD);
1650 }
1651
1652public:
1653 /// Return true if there are any SDDbgValue nodes associated
1654 /// with this SelectionDAG.
1655 bool hasDebugValues() const { return !DbgInfo->empty(); }
1656
1657 SDDbgInfo::DbgIterator DbgBegin() const { return DbgInfo->DbgBegin(); }
1658 SDDbgInfo::DbgIterator DbgEnd() const { return DbgInfo->DbgEnd(); }
1659
1660 SDDbgInfo::DbgIterator ByvalParmDbgBegin() const {
1661 return DbgInfo->ByvalParmDbgBegin();
1662 }
1663 SDDbgInfo::DbgIterator ByvalParmDbgEnd() const {
1664 return DbgInfo->ByvalParmDbgEnd();
1665 }
1666
1667 SDDbgInfo::DbgLabelIterator DbgLabelBegin() const {
1668 return DbgInfo->DbgLabelBegin();
1669 }
1670 SDDbgInfo::DbgLabelIterator DbgLabelEnd() const {
1671 return DbgInfo->DbgLabelEnd();
1672 }
1673
1674 /// To be invoked on an SDNode that is slated to be erased. This
1675 /// function mirrors \c llvm::salvageDebugInfo.
1676 void salvageDebugInfo(SDNode &N);
1677
1678 void dump() const;
1679
1680 /// In most cases this function returns the ABI alignment for a given type,
1681 /// except for illegal vector types where the alignment exceeds that of the
1682 /// stack. In such cases we attempt to break the vector down to a legal type
1683 /// and return the ABI alignment for that instead.
1684 Align getReducedAlign(EVT VT, bool UseABI);
1685
1686 /// Create a stack temporary based on the size in bytes and the alignment
1687 SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment);
1688
1689 /// Create a stack temporary, suitable for holding the specified value type.
1690 /// If minAlign is specified, the slot size will have at least that alignment.
1691 SDValue CreateStackTemporary(EVT VT, unsigned minAlign = 1);
1692
1693 /// Create a stack temporary suitable for holding either of the specified
1694 /// value types.
1695 SDValue CreateStackTemporary(EVT VT1, EVT VT2);
1696
1697 SDValue FoldSymbolOffset(unsigned Opcode, EVT VT,
1698 const GlobalAddressSDNode *GA,
1699 const SDNode *N2);
1700
1701 SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
1702 ArrayRef<SDValue> Ops);
1703
1704 SDValue FoldConstantVectorArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
1705 ArrayRef<SDValue> Ops,
1706 const SDNodeFlags Flags = SDNodeFlags());
1707
1708 /// Fold floating-point operations with 2 operands when both operands are
1709 /// constants and/or undefined.
1710 SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT,
1711 SDValue N1, SDValue N2);
1712
1713 /// Constant fold a setcc to true or false.
1714 SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond,
1715 const SDLoc &dl);
1716
1717 /// See if the specified operand can be simplified with the knowledge that
1718 /// only the bits specified by DemandedBits are used. If so, return the
1719 /// simpler operand, otherwise return a null SDValue.
1720 ///
1721 /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
1722 /// simplify nodes with multiple uses more aggressively.)
1723 SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits);
1724
1725 /// See if the specified operand can be simplified with the knowledge that
1726 /// only the bits specified by DemandedBits are used in the elements specified
1727 /// by DemandedElts. If so, return the simpler operand, otherwise return a
1728 /// null SDValue.
1729 ///
1730 /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
1731 /// simplify nodes with multiple uses more aggressively.)
1732 SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits,
1733 const APInt &DemandedElts);
1734
1735 /// Return true if the sign bit of Op is known to be zero.
1736 /// We use this predicate to simplify operations downstream.
1737 bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;
1738
1739 /// Return true if 'Op & Mask' is known to be zero. We
1740 /// use this predicate to simplify operations downstream. Op and Mask are
1741 /// known to be the same type.
1742 bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
1743 unsigned Depth = 0) const;
1744
1745 /// Return true if 'Op & Mask' is known to be zero in DemandedElts. We
1746 /// use this predicate to simplify operations downstream. Op and Mask are
1747 /// known to be the same type.
1748 bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
1749 const APInt &DemandedElts, unsigned Depth = 0) const;
1750
1751 /// Return true if '(Op & Mask) == Mask'.
1752 /// Op and Mask are known to be the same type.
1753 bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask,
1754 unsigned Depth = 0) const;
1755
1756 /// Determine which bits of Op are known to be either zero or one and return
1757 /// them in Known. For vectors, the known bits are those that are shared by
1758 /// every vector element.
1759 /// Targets can implement the computeKnownBitsForTargetNode method in the
1760 /// TargetLowering class to allow target nodes to be understood.
1761 KnownBits computeKnownBits(SDValue Op, unsigned Depth = 0) const;
1762
1763 /// Determine which bits of Op are known to be either zero or one and return
1764 /// them in Known. The DemandedElts argument allows us to only collect the
1765 /// known bits that are shared by the requested vector elements.
1766 /// Targets can implement the computeKnownBitsForTargetNode method in the
1767 /// TargetLowering class to allow target nodes to be understood.
1768 KnownBits computeKnownBits(SDValue Op, const APInt &DemandedElts,
1769 unsigned Depth = 0) const;
1770
1771 /// Used to represent the possible overflow behavior of an operation.
1772 /// Never: the operation cannot overflow.
1773 /// Always: the operation will always overflow.
1774 /// Sometime: the operation may or may not overflow.
1775 enum OverflowKind {
1776 OFK_Never,
1777 OFK_Sometime,
1778 OFK_Always,
1779 };
1780
1781 /// Determine if the result of the addition of 2 node can overflow.
1782 OverflowKind computeOverflowKind(SDValue N0, SDValue N1) const;
1783
1784 /// Test if the given value is known to have exactly one bit set. This differs
1785 /// from computeKnownBits in that it doesn't necessarily determine which bit
1786 /// is set.
1787 bool isKnownToBeAPowerOfTwo(SDValue Val) const;
1788
1789 /// Return the number of times the sign bit of the register is replicated into
1790 /// the other bits. We know that at least 1 bit is always equal to the sign
1791 /// bit (itself), but other cases can give us information. For example,
1792 /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
1793 /// to each other, so we return 3. Targets can implement the
1794 /// ComputeNumSignBitsForTarget method in the TargetLowering class to allow
1795 /// target nodes to be understood.
1796 unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;
1797
1798 /// Return the number of times the sign bit of the register is replicated into
1799 /// the other bits. We know that at least 1 bit is always equal to the sign
1800 /// bit (itself), but other cases can give us information. For example,
1801 /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
1802 /// to each other, so we return 3. The DemandedElts argument allows
1803 /// us to only collect the minimum sign bits of the requested vector elements.
1804 /// Targets can implement the ComputeNumSignBitsForTarget method in the
1805 /// TargetLowering class to allow target nodes to be understood.
1806 unsigned ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
1807 unsigned Depth = 0) const;
1808
1809 /// Return true if the specified operand is an ISD::ADD with a ConstantSDNode
1810 /// on the right-hand side, or if it is an ISD::OR with a ConstantSDNode that
1811 /// is guaranteed to have the same semantics as an ADD. This handles the
1812 /// equivalence:
1813 /// X|Cst == X+Cst iff X&Cst = 0.
1814 bool isBaseWithConstantOffset(SDValue Op) const;
1815
1816 /// Test whether the given SDValue is known to never be NaN. If \p SNaN is
1817 /// true, returns if \p Op is known to never be a signaling NaN (it may still
1818 /// be a qNaN).
1819 bool isKnownNeverNaN(SDValue Op, bool SNaN = false, unsigned Depth = 0) const;
1820
1821 /// \returns true if \p Op is known to never be a signaling NaN.
1822 bool isKnownNeverSNaN(SDValue Op, unsigned Depth = 0) const {
1823 return isKnownNeverNaN(Op, true, Depth);
1824 }
1825
1826 /// Test whether the given floating point SDValue is known to never be
1827 /// positive or negative zero.
1828 bool isKnownNeverZeroFloat(SDValue Op) const;
1829
1830 /// Test whether the given SDValue is known to contain non-zero value(s).
1831 bool isKnownNeverZero(SDValue Op) const;
1832
1833 /// Test whether two SDValues are known to compare equal. This
1834 /// is true if they are the same value, or if one is negative zero and the
1835 /// other positive zero.
1836 bool isEqualTo(SDValue A, SDValue B) const;
1837
1838 /// Return true if A and B have no common bits set. As an example, this can
1839 /// allow an 'add' to be transformed into an 'or'.
1840 bool haveNoCommonBitsSet(SDValue A, SDValue B) const;
1841
1842 /// Test whether \p V has a splatted value for all the demanded elements.
1843 ///
1844 /// On success \p UndefElts will indicate the elements that have UNDEF
1845 /// values instead of the splat value, this is only guaranteed to be correct
1846 /// for \p DemandedElts.
1847 ///
1848 /// NOTE: The function will return true for a demanded splat of UNDEF values.
1849 bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts,
1850 unsigned Depth = 0);
1851
1852 /// Test whether \p V has a splatted value.
1853 bool isSplatValue(SDValue V, bool AllowUndefs = false);
1854
1855 /// If V is a splatted value, return the source vector and its splat index.
1856 SDValue getSplatSourceVector(SDValue V, int &SplatIndex);
1857
1858 /// If V is a splat vector, return its scalar source operand by extracting
1859 /// that element from the source vector.
1860 SDValue getSplatValue(SDValue V);
1861
1862 /// If a SHL/SRA/SRL node \p V has a constant or splat constant shift amount
1863 /// that is less than the element bit-width of the shift node, return it.
1864 const APInt *getValidShiftAmountConstant(SDValue V,
1865 const APInt &DemandedElts) const;
1866
1867 /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
1868 /// than the element bit-width of the shift node, return the minimum value.
1869 const APInt *
1870 getValidMinimumShiftAmountConstant(SDValue V,
1871 const APInt &DemandedElts) const;
1872
1873 /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
1874 /// than the element bit-width of the shift node, return the maximum value.
1875 const APInt *
1876 getValidMaximumShiftAmountConstant(SDValue V,
1877 const APInt &DemandedElts) const;
1878
1879 /// Match a binop + shuffle pyramid that represents a horizontal reduction
1880 /// over the elements of a vector starting from the EXTRACT_VECTOR_ELT node /p
1881 /// Extract. The reduction must use one of the opcodes listed in /p
1882 /// CandidateBinOps and on success /p BinOp will contain the matching opcode.
1883 /// Returns the vector that is being reduced on, or SDValue() if a reduction
1884 /// was not matched. If \p AllowPartials is set then in the case of a
1885 /// reduction pattern that only matches the first few stages, the extracted
1886 /// subvector of the start of the reduction is returned.
1887 SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
1888 ArrayRef<ISD::NodeType> CandidateBinOps,
1889 bool AllowPartials = false);
1890
1891 /// Utility function used by legalize and lowering to
1892 /// "unroll" a vector operation by splitting out the scalars and operating
1893 /// on each element individually. If the ResNE is 0, fully unroll the vector
1894 /// op. If ResNE is less than the width of the vector op, unroll up to ResNE.
1895 /// If the ResNE is greater than the width of the vector op, unroll the
1896 /// vector op and fill the end of the resulting vector with UNDEFS.
1897 SDValue UnrollVectorOp(SDNode *N, unsigned ResNE = 0);
1898
1899 /// Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
1900 /// This is a separate function because those opcodes have two results.
1901 std::pair<SDValue, SDValue> UnrollVectorOverflowOp(SDNode *N,
1902 unsigned ResNE = 0);
1903
1904 /// Return true if loads are next to each other and can be
1905 /// merged. Check that both are nonvolatile and if LD is loading
1906 /// 'Bytes' bytes from a location that is 'Dist' units away from the
1907 /// location that the 'Base' load is loading from.
1908 bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base,
1909 unsigned Bytes, int Dist) const;
1910
1911 /// Infer alignment of a load / store address. Return None if it cannot be
1912 /// inferred.
1913 MaybeAlign InferPtrAlign(SDValue Ptr) const;
1914
1915 LLVM_ATTRIBUTE_DEPRECATED(inline unsigned InferPtrAlignment(SDValue Ptr)[[deprecated("Use InferPtrAlign instead")]] inline unsigned InferPtrAlignment
(SDValue Ptr) const
1916 const,[[deprecated("Use InferPtrAlign instead")]] inline unsigned InferPtrAlignment
(SDValue Ptr) const
1917 "Use InferPtrAlign instead")[[deprecated("Use InferPtrAlign instead")]] inline unsigned InferPtrAlignment
(SDValue Ptr) const
{
1918 if (auto A = InferPtrAlign(Ptr))
1919 return A->value();
1920 return 0;
1921 }
1922
1923 /// Compute the VTs needed for the low/hi parts of a type
1924 /// which is split (or expanded) into two not necessarily identical pieces.
1925 std::pair<EVT, EVT> GetSplitDestVTs(const EVT &VT) const;
1926
1927 /// Compute the VTs needed for the low/hi parts of a type, dependent on an
1928 /// enveloping VT that has been split into two identical pieces. Sets the
1929 /// HisIsEmpty flag when hi type has zero storage size.
1930 std::pair<EVT, EVT> GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
1931 bool *HiIsEmpty) const;
1932
1933 /// Split the vector with EXTRACT_SUBVECTOR using the provides
1934 /// VTs and return the low/high part.
1935 std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL,
1936 const EVT &LoVT, const EVT &HiVT);
1937
1938 /// Split the vector with EXTRACT_SUBVECTOR and return the low/high part.
1939 std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL) {
1940 EVT LoVT, HiVT;
1941 std::tie(LoVT, HiVT) = GetSplitDestVTs(N.getValueType());
1942 return SplitVector(N, DL, LoVT, HiVT);
1943 }
1944
1945 /// Split the node's operand with EXTRACT_SUBVECTOR and
1946 /// return the low/high part.
1947 std::pair<SDValue, SDValue> SplitVectorOperand(const SDNode *N, unsigned OpNo)
1948 {
1949 return SplitVector(N->getOperand(OpNo), SDLoc(N));
1950 }
1951
1952 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
1953 SDValue WidenVector(const SDValue &N, const SDLoc &DL);
1954
1955 /// Append the extracted elements from Start to Count out of the vector Op in
1956 /// Args. If Count is 0, all of the elements will be extracted. The extracted
1957 /// elements will have type EVT if it is provided, and otherwise their type
1958 /// will be Op's element type.
1959 void ExtractVectorElements(SDValue Op, SmallVectorImpl<SDValue> &Args,
1960 unsigned Start = 0, unsigned Count = 0,
1961 EVT EltVT = EVT());
1962
1963 /// Compute the default alignment value for the given type.
1964 Align getEVTAlign(EVT MemoryVT) const;
1965 /// Compute the default alignment value for the given type.
1966 /// FIXME: Remove once transition to Align is over.
1967 inline unsigned getEVTAlignment(EVT MemoryVT) const {
1968 return getEVTAlign(MemoryVT).value();
1969 }
1970
1971 /// Test whether the given value is a constant int or similar node.
1972 SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N) const;
1973
1974 /// Test whether the given value is a constant FP or similar node.
1975 SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) const ;
1976
1977 /// \returns true if \p N is any kind of constant or build_vector of
1978 /// constants, int or float. If a vector, it may not necessarily be a splat.
1979 inline bool isConstantValueOfAnyType(SDValue N) const {
1980 return isConstantIntBuildVectorOrConstantInt(N) ||
1981 isConstantFPBuildVectorOrConstantFP(N);
1982 }
1983
1984 void addCallSiteInfo(const SDNode *CallNode, CallSiteInfoImpl &&CallInfo) {
1985 SDCallSiteDbgInfo[CallNode].CSInfo = std::move(CallInfo);
1986 }
1987
1988 CallSiteInfo getSDCallSiteInfo(const SDNode *CallNode) {
1989 auto I = SDCallSiteDbgInfo.find(CallNode);
1990 if (I != SDCallSiteDbgInfo.end())
1991 return std::move(I->second).CSInfo;
1992 return CallSiteInfo();
1993 }
1994
1995 void addHeapAllocSite(const SDNode *Node, MDNode *MD) {
1996 SDCallSiteDbgInfo[Node].HeapAllocSite = MD;
1997 }
1998
1999 /// Return the HeapAllocSite type associated with the SDNode, if it exists.
2000 MDNode *getHeapAllocSite(const SDNode *Node) {
2001 auto It = SDCallSiteDbgInfo.find(Node);
2002 if (It == SDCallSiteDbgInfo.end())
2003 return nullptr;
2004 return It->second.HeapAllocSite;
2005 }
2006
2007 void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge) {
2008 if (NoMerge)
2009 SDCallSiteDbgInfo[Node].NoMerge = NoMerge;
2010 }
2011
2012 bool getNoMergeSiteInfo(const SDNode *Node) {
2013 auto I = SDCallSiteDbgInfo.find(Node);
2014 if (I == SDCallSiteDbgInfo.end())
2015 return false;
2016 return I->second.NoMerge;
2017 }
2018
2019 /// Return the current function's default denormal handling kind for the given
2020 /// floating point type.
2021 DenormalMode getDenormalMode(EVT VT) const {
2022 return MF->getDenormalMode(EVTToAPFloatSemantics(VT));
2023 }
2024
2025 bool shouldOptForSize() const;
2026
2027 /// Get the (commutative) neutral element for the given opcode, if it exists.
2028 SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT,
2029 SDNodeFlags Flags);
2030
2031private:
2032 void InsertNode(SDNode *N);
2033 bool RemoveNodeFromCSEMaps(SDNode *N);
2034 void AddModifiedNodeToCSEMaps(SDNode *N);
2035 SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
2036 SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
2037 void *&InsertPos);
2038 SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
2039 void *&InsertPos);
2040 SDNode *UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &loc);
2041
2042 void DeleteNodeNotInCSEMaps(SDNode *N);
2043 void DeallocateNode(SDNode *N);
2044
2045 void allnodes_clear();
2046
2047 /// Look up the node specified by ID in CSEMap. If it exists, return it. If
2048 /// not, return the insertion token that will make insertion faster. This
2049 /// overload is for nodes other than Constant or ConstantFP, use the other one
2050 /// for those.
2051 SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
2052
2053 /// Look up the node specified by ID in CSEMap. If it exists, return it. If
2054 /// not, return the insertion token that will make insertion faster. Performs
2055 /// additional processing for constant nodes.
2056 SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, const SDLoc &DL,
2057 void *&InsertPos);
2058
2059 /// List of non-single value types.
2060 FoldingSet<SDVTListNode> VTListMap;
2061
2062 /// Maps to auto-CSE operations.
2063 std::vector<CondCodeSDNode*> CondCodeNodes;
2064
2065 std::vector<SDNode*> ValueTypeNodes;
2066 std::map<EVT, SDNode*, EVT::compareRawBits> ExtendedValueTypeNodes;
2067 StringMap<SDNode*> ExternalSymbols;
2068
2069 std::map<std::pair<std::string, unsigned>, SDNode *> TargetExternalSymbols;
2070 DenseMap<MCSymbol *, SDNode *> MCSymbols;
2071
2072 FlagInserter *Inserter = nullptr;
2073};
2074
2075template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
2076 using nodes_iterator = pointer_iterator<SelectionDAG::allnodes_iterator>;
2077
2078 static nodes_iterator nodes_begin(SelectionDAG *G) {
2079 return nodes_iterator(G->allnodes_begin());
2080 }
2081
2082 static nodes_iterator nodes_end(SelectionDAG *G) {
2083 return nodes_iterator(G->allnodes_end());
2084 }
2085};
2086
2087} // end namespace llvm
2088
2089#endif // LLVM_CODEGEN_SELECTIONDAG_H

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/Register.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Metadata.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/Support/AlignOf.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/TypeSize.h"
47#include <algorithm>
48#include <cassert>
49#include <climits>
50#include <cstddef>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <string>
55#include <tuple>
56
57namespace llvm {
58
59class APInt;
60class Constant;
61template <typename T> struct DenseMapInfo;
62class GlobalValue;
63class MachineBasicBlock;
64class MachineConstantPoolValue;
65class MCSymbol;
66class raw_ostream;
67class SDNode;
68class SelectionDAG;
69class Type;
70class Value;
71
72void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
73 bool force = false);
74
75/// This represents a list of ValueType's that has been intern'd by
76/// a SelectionDAG. Instances of this simple value class are returned by
77/// SelectionDAG::getVTList(...).
78///
79struct SDVTList {
80 const EVT *VTs;
81 unsigned int NumVTs;
82};
83
84namespace ISD {
85
86 /// Node predicates
87
88/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
89/// same constant or undefined, return true and return the constant value in
90/// \p SplatValue.
91bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
92
93/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
94/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
95/// true, it only checks BUILD_VECTOR.
96bool isConstantSplatVectorAllOnes(const SDNode *N,
97 bool BuildVectorOnly = false);
98
99/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
100/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
101/// only checks BUILD_VECTOR.
102bool isConstantSplatVectorAllZeros(const SDNode *N,
103 bool BuildVectorOnly = false);
104
105/// Return true if the specified node is a BUILD_VECTOR where all of the
106/// elements are ~0 or undef.
107bool isBuildVectorAllOnes(const SDNode *N);
108
109/// Return true if the specified node is a BUILD_VECTOR where all of the
110/// elements are 0 or undef.
111bool isBuildVectorAllZeros(const SDNode *N);
112
113/// Return true if the specified node is a BUILD_VECTOR node of all
114/// ConstantSDNode or undef.
115bool isBuildVectorOfConstantSDNodes(const SDNode *N);
116
117/// Return true if the specified node is a BUILD_VECTOR node of all
118/// ConstantFPSDNode or undef.
119bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
120
121/// Return true if the node has at least one operand and all operands of the
122/// specified node are ISD::UNDEF.
123bool allOperandsUndef(const SDNode *N);
124
125} // end namespace ISD
126
127//===----------------------------------------------------------------------===//
128/// Unlike LLVM values, Selection DAG nodes may return multiple
129/// values as the result of a computation. Many nodes return multiple values,
130/// from loads (which define a token and a return value) to ADDC (which returns
131/// a result and a carry value), to calls (which may return an arbitrary number
132/// of values).
133///
134/// As such, each use of a SelectionDAG computation must indicate the node that
135/// computes it as well as which return value to use from that node. This pair
136/// of information is represented with the SDValue value type.
137///
138class SDValue {
139 friend struct DenseMapInfo<SDValue>;
140
141 SDNode *Node = nullptr; // The node defining the value we are using.
142 unsigned ResNo = 0; // Which return value of the node we are using.
143
144public:
145 SDValue() = default;
146 SDValue(SDNode *node, unsigned resno);
147
148 /// get the index which selects a specific result in the SDNode
149 unsigned getResNo() const { return ResNo; }
150
151 /// get the SDNode which holds the desired result
152 SDNode *getNode() const { return Node; }
153
154 /// set the SDNode
155 void setNode(SDNode *N) { Node = N; }
156
157 inline SDNode *operator->() const { return Node; }
158
159 bool operator==(const SDValue &O) const {
160 return Node == O.Node && ResNo == O.ResNo;
161 }
162 bool operator!=(const SDValue &O) const {
163 return !operator==(O);
164 }
165 bool operator<(const SDValue &O) const {
166 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
167 }
168 explicit operator bool() const {
169 return Node != nullptr;
170 }
171
172 SDValue getValue(unsigned R) const {
173 return SDValue(Node, R);
174 }
175
176 /// Return true if this node is an operand of N.
177 bool isOperandOf(const SDNode *N) const;
178
179 /// Return the ValueType of the referenced return value.
180 inline EVT getValueType() const;
181
182 /// Return the simple ValueType of the referenced return value.
183 MVT getSimpleValueType() const {
184 return getValueType().getSimpleVT();
185 }
186
187 /// Returns the size of the value in bits.
188 ///
189 /// If the value type is a scalable vector type, the scalable property will
190 /// be set and the runtime size will be a positive integer multiple of the
191 /// base size.
192 TypeSize getValueSizeInBits() const {
193 return getValueType().getSizeInBits();
194 }
195
196 uint64_t getScalarValueSizeInBits() const {
197 return getValueType().getScalarType().getFixedSizeInBits();
198 }
199
200 // Forwarding methods - These forward to the corresponding methods in SDNode.
201 inline unsigned getOpcode() const;
202 inline unsigned getNumOperands() const;
203 inline const SDValue &getOperand(unsigned i) const;
204 inline uint64_t getConstantOperandVal(unsigned i) const;
205 inline const APInt &getConstantOperandAPInt(unsigned i) const;
206 inline bool isTargetMemoryOpcode() const;
207 inline bool isTargetOpcode() const;
208 inline bool isMachineOpcode() const;
209 inline bool isUndef() const;
210 inline unsigned getMachineOpcode() const;
211 inline const DebugLoc &getDebugLoc() const;
212 inline void dump() const;
213 inline void dump(const SelectionDAG *G) const;
214 inline void dumpr() const;
215 inline void dumpr(const SelectionDAG *G) const;
216
217 /// Return true if this operand (which must be a chain) reaches the
218 /// specified operand without crossing any side-effecting instructions.
219 /// In practice, this looks through token factors and non-volatile loads.
220 /// In order to remain efficient, this only
221 /// looks a couple of nodes in, it does not do an exhaustive search.
222 bool reachesChainWithoutSideEffects(SDValue Dest,
223 unsigned Depth = 2) const;
224
225 /// Return true if there are no nodes using value ResNo of Node.
226 inline bool use_empty() const;
227
228 /// Return true if there is exactly one node using value ResNo of Node.
229 inline bool hasOneUse() const;
230};
231
232template<> struct DenseMapInfo<SDValue> {
233 static inline SDValue getEmptyKey() {
234 SDValue V;
235 V.ResNo = -1U;
236 return V;
237 }
238
239 static inline SDValue getTombstoneKey() {
240 SDValue V;
241 V.ResNo = -2U;
242 return V;
243 }
244
245 static unsigned getHashValue(const SDValue &Val) {
246 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
247 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
248 }
249
250 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
251 return LHS == RHS;
252 }
253};
254
255/// Allow casting operators to work directly on
256/// SDValues as if they were SDNode*'s.
257template<> struct simplify_type<SDValue> {
258 using SimpleType = SDNode *;
259
260 static SimpleType getSimplifiedValue(SDValue &Val) {
261 return Val.getNode();
262 }
263};
264template<> struct simplify_type<const SDValue> {
265 using SimpleType = /*const*/ SDNode *;
266
267 static SimpleType getSimplifiedValue(const SDValue &Val) {
268 return Val.getNode();
269 }
270};
271
272/// Represents a use of a SDNode. This class holds an SDValue,
273/// which records the SDNode being used and the result number, a
274/// pointer to the SDNode using the value, and Next and Prev pointers,
275/// which link together all the uses of an SDNode.
276///
277class SDUse {
278 /// Val - The value being used.
279 SDValue Val;
280 /// User - The user of this value.
281 SDNode *User = nullptr;
282 /// Prev, Next - Pointers to the uses list of the SDNode referred by
283 /// this operand.
284 SDUse **Prev = nullptr;
285 SDUse *Next = nullptr;
286
287public:
288 SDUse() = default;
289 SDUse(const SDUse &U) = delete;
290 SDUse &operator=(const SDUse &) = delete;
291
292 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
293 operator const SDValue&() const { return Val; }
294
295 /// If implicit conversion to SDValue doesn't work, the get() method returns
296 /// the SDValue.
297 const SDValue &get() const { return Val; }
298
299 /// This returns the SDNode that contains this Use.
300 SDNode *getUser() { return User; }
301
302 /// Get the next SDUse in the use list.
303 SDUse *getNext() const { return Next; }
304
305 /// Convenience function for get().getNode().
306 SDNode *getNode() const { return Val.getNode(); }
307 /// Convenience function for get().getResNo().
308 unsigned getResNo() const { return Val.getResNo(); }
309 /// Convenience function for get().getValueType().
310 EVT getValueType() const { return Val.getValueType(); }
311
312 /// Convenience function for get().operator==
313 bool operator==(const SDValue &V) const {
314 return Val == V;
315 }
316
317 /// Convenience function for get().operator!=
318 bool operator!=(const SDValue &V) const {
319 return Val != V;
320 }
321
322 /// Convenience function for get().operator<
323 bool operator<(const SDValue &V) const {
324 return Val < V;
325 }
326
327private:
328 friend class SelectionDAG;
329 friend class SDNode;
330 // TODO: unfriend HandleSDNode once we fix its operand handling.
331 friend class HandleSDNode;
332
333 void setUser(SDNode *p) { User = p; }
334
335 /// Remove this use from its existing use list, assign it the
336 /// given value, and add it to the new value's node's use list.
337 inline void set(const SDValue &V);
338 /// Like set, but only supports initializing a newly-allocated
339 /// SDUse with a non-null value.
340 inline void setInitial(const SDValue &V);
341 /// Like set, but only sets the Node portion of the value,
342 /// leaving the ResNo portion unmodified.
343 inline void setNode(SDNode *N);
344
345 void addToList(SDUse **List) {
346 Next = *List;
347 if (Next) Next->Prev = &Next;
348 Prev = List;
349 *List = this;
350 }
351
352 void removeFromList() {
353 *Prev = Next;
354 if (Next) Next->Prev = Prev;
355 }
356};
357
358/// simplify_type specializations - Allow casting operators to work directly on
359/// SDValues as if they were SDNode*'s.
360template<> struct simplify_type<SDUse> {
361 using SimpleType = SDNode *;
362
363 static SimpleType getSimplifiedValue(SDUse &Val) {
364 return Val.getNode();
365 }
366};
367
368/// These are IR-level optimization flags that may be propagated to SDNodes.
369/// TODO: This data structure should be shared by the IR optimizer and the
370/// the backend.
371struct SDNodeFlags {
372private:
373 bool NoUnsignedWrap : 1;
374 bool NoSignedWrap : 1;
375 bool Exact : 1;
376 bool NoNaNs : 1;
377 bool NoInfs : 1;
378 bool NoSignedZeros : 1;
379 bool AllowReciprocal : 1;
380 bool AllowContract : 1;
381 bool ApproximateFuncs : 1;
382 bool AllowReassociation : 1;
383
384 // We assume instructions do not raise floating-point exceptions by default,
385 // and only those marked explicitly may do so. We could choose to represent
386 // this via a positive "FPExcept" flags like on the MI level, but having a
387 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
388 // intersection logic more straightforward.
389 bool NoFPExcept : 1;
390
391public:
392 /// Default constructor turns off all optimization flags.
393 SDNodeFlags()
394 : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
395 NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
396 AllowContract(false), ApproximateFuncs(false),
397 AllowReassociation(false), NoFPExcept(false) {}
398
399 /// Propagate the fast-math-flags from an IR FPMathOperator.
400 void copyFMF(const FPMathOperator &FPMO) {
401 setNoNaNs(FPMO.hasNoNaNs());
402 setNoInfs(FPMO.hasNoInfs());
403 setNoSignedZeros(FPMO.hasNoSignedZeros());
404 setAllowReciprocal(FPMO.hasAllowReciprocal());
405 setAllowContract(FPMO.hasAllowContract());
406 setApproximateFuncs(FPMO.hasApproxFunc());
407 setAllowReassociation(FPMO.hasAllowReassoc());
408 }
409
410 // These are mutators for each flag.
411 void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
412 void setNoSignedWrap(bool b) { NoSignedWrap = b; }
413 void setExact(bool b) { Exact = b; }
414 void setNoNaNs(bool b) { NoNaNs = b; }
415 void setNoInfs(bool b) { NoInfs = b; }
416 void setNoSignedZeros(bool b) { NoSignedZeros = b; }
417 void setAllowReciprocal(bool b) { AllowReciprocal = b; }
418 void setAllowContract(bool b) { AllowContract = b; }
419 void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
420 void setAllowReassociation(bool b) { AllowReassociation = b; }
421 void setNoFPExcept(bool b) { NoFPExcept = b; }
422
423 // These are accessors for each flag.
424 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
425 bool hasNoSignedWrap() const { return NoSignedWrap; }
426 bool hasExact() const { return Exact; }
427 bool hasNoNaNs() const { return NoNaNs; }
428 bool hasNoInfs() const { return NoInfs; }
429 bool hasNoSignedZeros() const { return NoSignedZeros; }
430 bool hasAllowReciprocal() const { return AllowReciprocal; }
431 bool hasAllowContract() const { return AllowContract; }
432 bool hasApproximateFuncs() const { return ApproximateFuncs; }
433 bool hasAllowReassociation() const { return AllowReassociation; }
434 bool hasNoFPExcept() const { return NoFPExcept; }
435
436 /// Clear any flags in this flag set that aren't also set in Flags. All
437 /// flags will be cleared if Flags are undefined.
438 void intersectWith(const SDNodeFlags Flags) {
439 NoUnsignedWrap &= Flags.NoUnsignedWrap;
440 NoSignedWrap &= Flags.NoSignedWrap;
441 Exact &= Flags.Exact;
442 NoNaNs &= Flags.NoNaNs;
443 NoInfs &= Flags.NoInfs;
444 NoSignedZeros &= Flags.NoSignedZeros;
445 AllowReciprocal &= Flags.AllowReciprocal;
446 AllowContract &= Flags.AllowContract;
447 ApproximateFuncs &= Flags.ApproximateFuncs;
448 AllowReassociation &= Flags.AllowReassociation;
449 NoFPExcept &= Flags.NoFPExcept;
450 }
451};
452
453/// Represents one node in the SelectionDAG.
454///
455class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
456private:
457 /// The operation that this node performs.
458 int16_t NodeType;
459
460protected:
461 // We define a set of mini-helper classes to help us interpret the bits in our
462 // SubclassData. These are designed to fit within a uint16_t so they pack
463 // with NodeType.
464
465#if defined(_AIX) && (!defined(__GNUC__4) || defined(__ibmxl__))
466// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
467// and give the `pack` pragma push semantics.
468#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
469#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
470#else
471#define BEGIN_TWO_BYTE_PACK()
472#define END_TWO_BYTE_PACK()
473#endif
474
475BEGIN_TWO_BYTE_PACK()
476 class SDNodeBitfields {
477 friend class SDNode;
478 friend class MemIntrinsicSDNode;
479 friend class MemSDNode;
480 friend class SelectionDAG;
481
482 uint16_t HasDebugValue : 1;
483 uint16_t IsMemIntrinsic : 1;
484 uint16_t IsDivergent : 1;
485 };
486 enum { NumSDNodeBits = 3 };
487
488 class ConstantSDNodeBitfields {
489 friend class ConstantSDNode;
490
491 uint16_t : NumSDNodeBits;
492
493 uint16_t IsOpaque : 1;
494 };
495
496 class MemSDNodeBitfields {
497 friend class MemSDNode;
498 friend class MemIntrinsicSDNode;
499 friend class AtomicSDNode;
500
501 uint16_t : NumSDNodeBits;
502
503 uint16_t IsVolatile : 1;
504 uint16_t IsNonTemporal : 1;
505 uint16_t IsDereferenceable : 1;
506 uint16_t IsInvariant : 1;
507 };
508 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
509
510 class LSBaseSDNodeBitfields {
511 friend class LSBaseSDNode;
512 friend class MaskedLoadStoreSDNode;
513 friend class MaskedGatherScatterSDNode;
514
515 uint16_t : NumMemSDNodeBits;
516
517 // This storage is shared between disparate class hierarchies to hold an
518 // enumeration specific to the class hierarchy in use.
519 // LSBaseSDNode => enum ISD::MemIndexedMode
520 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
521 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
522 uint16_t AddressingMode : 3;
523 };
524 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
525
526 class LoadSDNodeBitfields {
527 friend class LoadSDNode;
528 friend class MaskedLoadSDNode;
529 friend class MaskedGatherSDNode;
530
531 uint16_t : NumLSBaseSDNodeBits;
532
533 uint16_t ExtTy : 2; // enum ISD::LoadExtType
534 uint16_t IsExpanding : 1;
535 };
536
537 class StoreSDNodeBitfields {
538 friend class StoreSDNode;
539 friend class MaskedStoreSDNode;
540 friend class MaskedScatterSDNode;
541
542 uint16_t : NumLSBaseSDNodeBits;
543
544 uint16_t IsTruncating : 1;
545 uint16_t IsCompressing : 1;
546 };
547
548 union {
549 char RawSDNodeBits[sizeof(uint16_t)];
550 SDNodeBitfields SDNodeBits;
551 ConstantSDNodeBitfields ConstantSDNodeBits;
552 MemSDNodeBitfields MemSDNodeBits;
553 LSBaseSDNodeBitfields LSBaseSDNodeBits;
554 LoadSDNodeBitfields LoadSDNodeBits;
555 StoreSDNodeBitfields StoreSDNodeBits;
556 };
557END_TWO_BYTE_PACK()
558#undef BEGIN_TWO_BYTE_PACK
559#undef END_TWO_BYTE_PACK
560
561 // RawSDNodeBits must cover the entirety of the union. This means that all of
562 // the union's members must have size <= RawSDNodeBits. We write the RHS as
563 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
564 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
565 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
566 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
567 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
568 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
569 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
570
571private:
572 friend class SelectionDAG;
573 // TODO: unfriend HandleSDNode once we fix its operand handling.
574 friend class HandleSDNode;
575
576 /// Unique id per SDNode in the DAG.
577 int NodeId = -1;
578
579 /// The values that are used by this operation.
580 SDUse *OperandList = nullptr;
581
582 /// The types of the values this node defines. SDNode's may
583 /// define multiple values simultaneously.
584 const EVT *ValueList;
585
586 /// List of uses for this SDNode.
587 SDUse *UseList = nullptr;
588
589 /// The number of entries in the Operand/Value list.
590 unsigned short NumOperands = 0;
591 unsigned short NumValues;
592
593 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
594 // original LLVM instructions.
595 // This is used for turning off scheduling, because we'll forgo
596 // the normal scheduling algorithms and output the instructions according to
597 // this ordering.
598 unsigned IROrder;
599
600 /// Source line information.
601 DebugLoc debugLoc;
602
603 /// Return a pointer to the specified value type.
604 static const EVT *getValueTypeList(EVT VT);
605
606 SDNodeFlags Flags;
607
608public:
609 /// Unique and persistent id per SDNode in the DAG.
610 /// Used for debug printing.
611 uint16_t PersistentId;
612
613 //===--------------------------------------------------------------------===//
614 // Accessors
615 //
616
617 /// Return the SelectionDAG opcode value for this node. For
618 /// pre-isel nodes (those for which isMachineOpcode returns false), these
619 /// are the opcode values in the ISD and <target>ISD namespaces. For
620 /// post-isel opcodes, see getMachineOpcode.
621 unsigned getOpcode() const { return (unsigned short)NodeType; }
622
623 /// Test if this node has a target-specific opcode (in the
624 /// \<target\>ISD namespace).
625 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
626
627 /// Test if this node has a target-specific opcode that may raise
628 /// FP exceptions (in the \<target\>ISD namespace and greater than
629 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
630 /// opcode are currently automatically considered to possibly raise
631 /// FP exceptions as well.
632 bool isTargetStrictFPOpcode() const {
633 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
634 }
635
636 /// Test if this node has a target-specific
637 /// memory-referencing opcode (in the \<target\>ISD namespace and
638 /// greater than FIRST_TARGET_MEMORY_OPCODE).
639 bool isTargetMemoryOpcode() const {
640 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
641 }
642
643 /// Return true if the type of the node type undefined.
644 bool isUndef() const { return NodeType == ISD::UNDEF; }
645
646 /// Test if this node is a memory intrinsic (with valid pointer information).
647 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
648 /// non-memory intrinsics (with chains) that are not really instances of
649 /// MemSDNode. For such nodes, we need some extra state to determine the
650 /// proper classof relationship.
651 bool isMemIntrinsic() const {
652 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
653 NodeType == ISD::INTRINSIC_VOID) &&
654 SDNodeBits.IsMemIntrinsic;
655 }
656
657 /// Test if this node is a strict floating point pseudo-op.
658 bool isStrictFPOpcode() {
659 switch (NodeType) {
660 default:
661 return false;
662 case ISD::STRICT_FP16_TO_FP:
663 case ISD::STRICT_FP_TO_FP16:
664#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
665 case ISD::STRICT_##DAGN:
666#include "llvm/IR/ConstrainedOps.def"
667 return true;
668 }
669 }
670
671 /// Test if this node has a post-isel opcode, directly
672 /// corresponding to a MachineInstr opcode.
673 bool isMachineOpcode() const { return NodeType < 0; }
674
675 /// This may only be called if isMachineOpcode returns
676 /// true. It returns the MachineInstr opcode value that the node's opcode
677 /// corresponds to.
678 unsigned getMachineOpcode() const {
679 assert(isMachineOpcode() && "Not a MachineInstr opcode!")((isMachineOpcode() && "Not a MachineInstr opcode!") ?
static_cast<void> (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 679, __PRETTY_FUNCTION__))
;
680 return ~NodeType;
681 }
682
683 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
684 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
685
686 bool isDivergent() const { return SDNodeBits.IsDivergent; }
687
688 /// Return true if there are no uses of this node.
689 bool use_empty() const { return UseList == nullptr; }
690
691 /// Return true if there is exactly one use of this node.
692 bool hasOneUse() const { return hasSingleElement(uses()); }
693
694 /// Return the number of uses of this node. This method takes
695 /// time proportional to the number of uses.
696 size_t use_size() const { return std::distance(use_begin(), use_end()); }
697
698 /// Return the unique node id.
699 int getNodeId() const { return NodeId; }
700
701 /// Set unique node id.
702 void setNodeId(int Id) { NodeId = Id; }
703
704 /// Return the node ordering.
705 unsigned getIROrder() const { return IROrder; }
706
707 /// Set the node ordering.
708 void setIROrder(unsigned Order) { IROrder = Order; }
709
710 /// Return the source location info.
711 const DebugLoc &getDebugLoc() const { return debugLoc; }
712
713 /// Set source location info. Try to avoid this, putting
714 /// it in the constructor is preferable.
715 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
716
717 /// This class provides iterator support for SDUse
718 /// operands that use a specific SDNode.
719 class use_iterator {
720 friend class SDNode;
721
722 SDUse *Op = nullptr;
723
724 explicit use_iterator(SDUse *op) : Op(op) {}
725
726 public:
727 using iterator_category = std::forward_iterator_tag;
728 using value_type = SDUse;
729 using difference_type = std::ptrdiff_t;
730 using pointer = value_type *;
731 using reference = value_type &;
732
733 use_iterator() = default;
734 use_iterator(const use_iterator &I) : Op(I.Op) {}
735
736 bool operator==(const use_iterator &x) const {
737 return Op == x.Op;
738 }
739 bool operator!=(const use_iterator &x) const {
740 return !operator==(x);
741 }
742
743 /// Return true if this iterator is at the end of uses list.
744 bool atEnd() const { return Op == nullptr; }
745
746 // Iterator traversal: forward iteration only.
747 use_iterator &operator++() { // Preincrement
748 assert(Op && "Cannot increment end iterator!")((Op && "Cannot increment end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 748, __PRETTY_FUNCTION__))
;
749 Op = Op->getNext();
750 return *this;
751 }
752
753 use_iterator operator++(int) { // Postincrement
754 use_iterator tmp = *this; ++*this; return tmp;
755 }
756
757 /// Retrieve a pointer to the current user node.
758 SDNode *operator*() const {
759 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 759, __PRETTY_FUNCTION__))
;
760 return Op->getUser();
761 }
762
763 SDNode *operator->() const { return operator*(); }
764
765 SDUse &getUse() const { return *Op; }
766
767 /// Retrieve the operand # of this use in its user.
768 unsigned getOperandNo() const {
769 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 769, __PRETTY_FUNCTION__))
;
770 return (unsigned)(Op - Op->getUser()->OperandList);
771 }
772 };
773
774 /// Provide iteration support to walk over all uses of an SDNode.
775 use_iterator use_begin() const {
776 return use_iterator(UseList);
777 }
778
779 static use_iterator use_end() { return use_iterator(nullptr); }
780
781 inline iterator_range<use_iterator> uses() {
782 return make_range(use_begin(), use_end());
783 }
784 inline iterator_range<use_iterator> uses() const {
785 return make_range(use_begin(), use_end());
786 }
787
788 /// Return true if there are exactly NUSES uses of the indicated value.
789 /// This method ignores uses of other values defined by this operation.
790 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
791
792 /// Return true if there are any use of the indicated value.
793 /// This method ignores uses of other values defined by this operation.
794 bool hasAnyUseOfValue(unsigned Value) const;
795
796 /// Return true if this node is the only use of N.
797 bool isOnlyUserOf(const SDNode *N) const;
798
799 /// Return true if this node is an operand of N.
800 bool isOperandOf(const SDNode *N) const;
801
802 /// Return true if this node is a predecessor of N.
803 /// NOTE: Implemented on top of hasPredecessor and every bit as
804 /// expensive. Use carefully.
805 bool isPredecessorOf(const SDNode *N) const {
806 return N->hasPredecessor(this);
807 }
808
809 /// Return true if N is a predecessor of this node.
810 /// N is either an operand of this node, or can be reached by recursively
811 /// traversing up the operands.
812 /// NOTE: This is an expensive method. Use it carefully.
813 bool hasPredecessor(const SDNode *N) const;
814
815 /// Returns true if N is a predecessor of any node in Worklist. This
816 /// helper keeps Visited and Worklist sets externally to allow unions
817 /// searches to be performed in parallel, caching of results across
818 /// queries and incremental addition to Worklist. Stops early if N is
819 /// found but will resume. Remember to clear Visited and Worklists
820 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
821 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
822 /// topologically ordered (Operands have strictly smaller node id) and search
823 /// can be pruned leveraging this.
824 static bool hasPredecessorHelper(const SDNode *N,
825 SmallPtrSetImpl<const SDNode *> &Visited,
826 SmallVectorImpl<const SDNode *> &Worklist,
827 unsigned int MaxSteps = 0,
828 bool TopologicalPrune = false) {
829 SmallVector<const SDNode *, 8> DeferredNodes;
830 if (Visited.count(N))
831 return true;
832
833 // Node Id's are assigned in three places: As a topological
834 // ordering (> 0), during legalization (results in values set to
835 // 0), new nodes (set to -1). If N has a topolgical id then we
836 // know that all nodes with ids smaller than it cannot be
837 // successors and we need not check them. Filter out all node
838 // that can't be matches. We add them to the worklist before exit
839 // in case of multiple calls. Note that during selection the topological id
840 // may be violated if a node's predecessor is selected before it. We mark
841 // this at selection negating the id of unselected successors and
842 // restricting topological pruning to positive ids.
843
844 int NId = N->getNodeId();
845 // If we Invalidated the Id, reconstruct original NId.
846 if (NId < -1)
847 NId = -(NId + 1);
848
849 bool Found = false;
850 while (!Worklist.empty()) {
851 const SDNode *M = Worklist.pop_back_val();
852 int MId = M->getNodeId();
853 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
854 (MId > 0) && (MId < NId)) {
855 DeferredNodes.push_back(M);
856 continue;
857 }
858 for (const SDValue &OpV : M->op_values()) {
859 SDNode *Op = OpV.getNode();
860 if (Visited.insert(Op).second)
861 Worklist.push_back(Op);
862 if (Op == N)
863 Found = true;
864 }
865 if (Found)
866 break;
867 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
868 break;
869 }
870 // Push deferred nodes back on worklist.
871 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
872 // If we bailed early, conservatively return found.
873 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
874 return true;
875 return Found;
876 }
877
878 /// Return true if all the users of N are contained in Nodes.
879 /// NOTE: Requires at least one match, but doesn't require them all.
880 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
881
882 /// Return the number of values used by this operation.
883 unsigned getNumOperands() const { return NumOperands; }
884
885 /// Return the maximum number of operands that a SDNode can hold.
886 static constexpr size_t getMaxNumOperands() {
887 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
888 }
889
890 /// Helper method returns the integer value of a ConstantSDNode operand.
891 inline uint64_t getConstantOperandVal(unsigned Num) const;
892
893 /// Helper method returns the APInt of a ConstantSDNode operand.
894 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
895
896 const SDValue &getOperand(unsigned Num) const {
897 assert(Num < NumOperands && "Invalid child # of SDNode!")((Num < NumOperands && "Invalid child # of SDNode!"
) ? static_cast<void> (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 897, __PRETTY_FUNCTION__))
;
898 return OperandList[Num];
899 }
900
901 using op_iterator = SDUse *;
902
903 op_iterator op_begin() const { return OperandList; }
904 op_iterator op_end() const { return OperandList+NumOperands; }
905 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
906
907 /// Iterator for directly iterating over the operand SDValue's.
908 struct value_op_iterator
909 : iterator_adaptor_base<value_op_iterator, op_iterator,
910 std::random_access_iterator_tag, SDValue,
911 ptrdiff_t, value_op_iterator *,
912 value_op_iterator *> {
913 explicit value_op_iterator(SDUse *U = nullptr)
914 : iterator_adaptor_base(U) {}
915
916 const SDValue &operator*() const { return I->get(); }
917 };
918
919 iterator_range<value_op_iterator> op_values() const {
920 return make_range(value_op_iterator(op_begin()),
921 value_op_iterator(op_end()));
922 }
923
924 SDVTList getVTList() const {
925 SDVTList X = { ValueList, NumValues };
926 return X;
927 }
928
929 /// If this node has a glue operand, return the node
930 /// to which the glue operand points. Otherwise return NULL.
931 SDNode *getGluedNode() const {
932 if (getNumOperands() != 0 &&
933 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
934 return getOperand(getNumOperands()-1).getNode();
935 return nullptr;
936 }
937
938 /// If this node has a glue value with a user, return
939 /// the user (there is at most one). Otherwise return NULL.
940 SDNode *getGluedUser() const {
941 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
942 if (UI.getUse().get().getValueType() == MVT::Glue)
943 return *UI;
944 return nullptr;
945 }
946
947 SDNodeFlags getFlags() const { return Flags; }
948 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
949
950 /// Clear any flags in this node that aren't also set in Flags.
951 /// If Flags is not in a defined state then this has no effect.
952 void intersectFlagsWith(const SDNodeFlags Flags);
953
954 /// Return the number of values defined/returned by this operator.
955 unsigned getNumValues() const { return NumValues; }
956
957 /// Return the type of a specified result.
958 EVT getValueType(unsigned ResNo) const {
959 assert(ResNo < NumValues && "Illegal result number!")((ResNo < NumValues && "Illegal result number!") ?
static_cast<void> (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 959, __PRETTY_FUNCTION__))
;
960 return ValueList[ResNo];
961 }
962
963 /// Return the type of a specified result as a simple type.
964 MVT getSimpleValueType(unsigned ResNo) const {
965 return getValueType(ResNo).getSimpleVT();
966 }
967
968 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
969 ///
970 /// If the value type is a scalable vector type, the scalable property will
971 /// be set and the runtime size will be a positive integer multiple of the
972 /// base size.
973 TypeSize getValueSizeInBits(unsigned ResNo) const {
974 return getValueType(ResNo).getSizeInBits();
975 }
976
977 using value_iterator = const EVT *;
978
979 value_iterator value_begin() const { return ValueList; }
980 value_iterator value_end() const { return ValueList+NumValues; }
981 iterator_range<value_iterator> values() const {
982 return llvm::make_range(value_begin(), value_end());
983 }
984
985 /// Return the opcode of this operation for printing.
986 std::string getOperationName(const SelectionDAG *G = nullptr) const;
987 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
988 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
989 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
990 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
991 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
992
993 /// Print a SelectionDAG node and all children down to
994 /// the leaves. The given SelectionDAG allows target-specific nodes
995 /// to be printed in human-readable form. Unlike printr, this will
996 /// print the whole DAG, including children that appear multiple
997 /// times.
998 ///
999 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1000
1001 /// Print a SelectionDAG node and children up to
1002 /// depth "depth." The given SelectionDAG allows target-specific
1003 /// nodes to be printed in human-readable form. Unlike printr, this
1004 /// will print children that appear multiple times wherever they are
1005 /// used.
1006 ///
1007 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1008 unsigned depth = 100) const;
1009
1010 /// Dump this node, for debugging.
1011 void dump() const;
1012
1013 /// Dump (recursively) this node and its use-def subgraph.
1014 void dumpr() const;
1015
1016 /// Dump this node, for debugging.
1017 /// The given SelectionDAG allows target-specific nodes to be printed
1018 /// in human-readable form.
1019 void dump(const SelectionDAG *G) const;
1020
1021 /// Dump (recursively) this node and its use-def subgraph.
1022 /// The given SelectionDAG allows target-specific nodes to be printed
1023 /// in human-readable form.
1024 void dumpr(const SelectionDAG *G) const;
1025
1026 /// printrFull to dbgs(). The given SelectionDAG allows
1027 /// target-specific nodes to be printed in human-readable form.
1028 /// Unlike dumpr, this will print the whole DAG, including children
1029 /// that appear multiple times.
1030 void dumprFull(const SelectionDAG *G = nullptr) const;
1031
1032 /// printrWithDepth to dbgs(). The given
1033 /// SelectionDAG allows target-specific nodes to be printed in
1034 /// human-readable form. Unlike dumpr, this will print children
1035 /// that appear multiple times wherever they are used.
1036 ///
1037 void dumprWithDepth(const SelectionDAG *G = nullptr,
1038 unsigned depth = 100) const;
1039
1040 /// Gather unique data for the node.
1041 void Profile(FoldingSetNodeID &ID) const;
1042
1043 /// This method should only be used by the SDUse class.
1044 void addUse(SDUse &U) { U.addToList(&UseList); }
1045
1046protected:
1047 static SDVTList getSDVTList(EVT VT) {
1048 SDVTList Ret = { getValueTypeList(VT), 1 };
1049 return Ret;
1050 }
1051
1052 /// Create an SDNode.
1053 ///
1054 /// SDNodes are created without any operands, and never own the operand
1055 /// storage. To add operands, see SelectionDAG::createOperands.
1056 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1057 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1058 IROrder(Order), debugLoc(std::move(dl)) {
1059 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1060 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor"
) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1060, __PRETTY_FUNCTION__))
;
1061 assert(NumValues == VTs.NumVTs &&((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __PRETTY_FUNCTION__))
1062 "NumValues wasn't wide enough for its operands!")((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __PRETTY_FUNCTION__))
;
1063 }
1064
1065 /// Release the operands and set this node to have zero operands.
1066 void DropOperands();
1067};
1068
1069/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1070/// into SDNode creation functions.
1071/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1072/// from the original Instruction, and IROrder is the ordinal position of
1073/// the instruction.
1074/// When an SDNode is created after the DAG is being built, both DebugLoc and
1075/// the IROrder are propagated from the original SDNode.
1076/// So SDLoc class provides two constructors besides the default one, one to
1077/// be used by the DAGBuilder, the other to be used by others.
1078class SDLoc {
1079private:
1080 DebugLoc DL;
1081 int IROrder = 0;
1082
1083public:
1084 SDLoc() = default;
1085 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1086 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1087 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1088 assert(Order >= 0 && "bad IROrder")((Order >= 0 && "bad IROrder") ? static_cast<void
> (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1088, __PRETTY_FUNCTION__))
;
1089 if (I)
1090 DL = I->getDebugLoc();
1091 }
1092
1093 unsigned getIROrder() const { return IROrder; }
1094 const DebugLoc &getDebugLoc() const { return DL; }
1095};
1096
1097// Define inline functions from the SDValue class.
1098
1099inline SDValue::SDValue(SDNode *node, unsigned resno)
1100 : Node(node), ResNo(resno) {
1101 // Explicitly check for !ResNo to avoid use-after-free, because there are
1102 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1103 // combines.
1104 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __PRETTY_FUNCTION__))
1105 "Invalid result number for the given node!")(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __PRETTY_FUNCTION__))
;
1106 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")((ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? static_cast<void> (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1106, __PRETTY_FUNCTION__))
;
1107}
1108
1109inline unsigned SDValue::getOpcode() const {
1110 return Node->getOpcode();
26
Called C++ object pointer is null
1111}
1112
1113inline EVT SDValue::getValueType() const {
1114 return Node->getValueType(ResNo);
1115}
1116
1117inline unsigned SDValue::getNumOperands() const {
1118 return Node->getNumOperands();
1119}
1120
1121inline const SDValue &SDValue::getOperand(unsigned i) const {
1122 return Node->getOperand(i);
1123}
1124
1125inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1126 return Node->getConstantOperandVal(i);
1127}
1128
1129inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1130 return Node->getConstantOperandAPInt(i);
1131}
1132
1133inline bool SDValue::isTargetOpcode() const {
1134 return Node->isTargetOpcode();
1135}
1136
1137inline bool SDValue::isTargetMemoryOpcode() const {
1138 return Node->isTargetMemoryOpcode();
1139}
1140
1141inline bool SDValue::isMachineOpcode() const {
1142 return Node->isMachineOpcode();
1143}
1144
1145inline unsigned SDValue::getMachineOpcode() const {
1146 return Node->getMachineOpcode();
1147}
1148
1149inline bool SDValue::isUndef() const {
1150 return Node->isUndef();
1151}
1152
1153inline bool SDValue::use_empty() const {
1154 return !Node->hasAnyUseOfValue(ResNo);
1155}
1156
1157inline bool SDValue::hasOneUse() const {
1158 return Node->hasNUsesOfValue(1, ResNo);
1159}
1160
1161inline const DebugLoc &SDValue::getDebugLoc() const {
1162 return Node->getDebugLoc();
1163}
1164
1165inline void SDValue::dump() const {
1166 return Node->dump();
1167}
1168
1169inline void SDValue::dump(const SelectionDAG *G) const {
1170 return Node->dump(G);
1171}
1172
1173inline void SDValue::dumpr() const {
1174 return Node->dumpr();
1175}
1176
1177inline void SDValue::dumpr(const SelectionDAG *G) const {
1178 return Node->dumpr(G);
1179}
1180
1181// Define inline functions from the SDUse class.
1182
1183inline void SDUse::set(const SDValue &V) {
1184 if (Val.getNode()) removeFromList();
1185 Val = V;
1186 if (V.getNode()) V.getNode()->addUse(*this);
1187}
1188
1189inline void SDUse::setInitial(const SDValue &V) {
1190 Val = V;
1191 V.getNode()->addUse(*this);
1192}
1193
1194inline void SDUse::setNode(SDNode *N) {
1195 if (Val.getNode()) removeFromList();
1196 Val.setNode(N);
1197 if (N) N->addUse(*this);
1198}
1199
1200/// This class is used to form a handle around another node that
1201/// is persistent and is updated across invocations of replaceAllUsesWith on its
1202/// operand. This node should be directly created by end-users and not added to
1203/// the AllNodes list.
1204class HandleSDNode : public SDNode {
1205 SDUse Op;
1206
1207public:
1208 explicit HandleSDNode(SDValue X)
1209 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1210 // HandleSDNodes are never inserted into the DAG, so they won't be
1211 // auto-numbered. Use ID 65535 as a sentinel.
1212 PersistentId = 0xffff;
1213
1214 // Manually set up the operand list. This node type is special in that it's
1215 // always stack allocated and SelectionDAG does not manage its operands.
1216 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1217 // be so special.
1218 Op.setUser(this);
1219 Op.setInitial(X);
1220 NumOperands = 1;
1221 OperandList = &Op;
1222 }
1223 ~HandleSDNode();
1224
1225 const SDValue &getValue() const { return Op; }
1226};
1227
1228class AddrSpaceCastSDNode : public SDNode {
1229private:
1230 unsigned SrcAddrSpace;
1231 unsigned DestAddrSpace;
1232
1233public:
1234 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1235 unsigned SrcAS, unsigned DestAS);
1236
1237 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1238 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1239
1240 static bool classof(const SDNode *N) {
1241 return N->getOpcode() == ISD::ADDRSPACECAST;
1242 }
1243};
1244
1245/// This is an abstract virtual class for memory operations.
1246class MemSDNode : public SDNode {
1247private:
1248 // VT of in-memory value.
1249 EVT MemoryVT;
1250
1251protected:
1252 /// Memory reference information.
1253 MachineMemOperand *MMO;
1254
1255public:
1256 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1257 EVT memvt, MachineMemOperand *MMO);
1258
1259 bool readMem() const { return MMO->isLoad(); }
1260 bool writeMem() const { return MMO->isStore(); }
1261
1262 /// Returns alignment and volatility of the memory access
1263 Align getOriginalAlign() const { return MMO->getBaseAlign(); }
1264 Align getAlign() const { return MMO->getAlign(); }
1265 LLVM_ATTRIBUTE_DEPRECATED(unsigned getOriginalAlignment() const,[[deprecated("Use getOriginalAlign() instead")]] unsigned getOriginalAlignment
() const
1266 "Use getOriginalAlign() instead")[[deprecated("Use getOriginalAlign() instead")]] unsigned getOriginalAlignment
() const
{
1267 return MMO->getBaseAlign().value();
1268 }
1269 // FIXME: Remove once transition to getAlign is over.
1270 unsigned getAlignment() const { return MMO->getAlign().value(); }
1271
1272 /// Return the SubclassData value, without HasDebugValue. This contains an
1273 /// encoding of the volatile flag, as well as bits used by subclasses. This
1274 /// function should only be used to compute a FoldingSetNodeID value.
1275 /// The HasDebugValue bit is masked out because CSE map needs to match
1276 /// nodes with debug info with nodes without debug info. Same is about
1277 /// isDivergent bit.
1278 unsigned getRawSubclassData() const {
1279 uint16_t Data;
1280 union {
1281 char RawSDNodeBits[sizeof(uint16_t)];
1282 SDNodeBitfields SDNodeBits;
1283 };
1284 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1285 SDNodeBits.HasDebugValue = 0;
1286 SDNodeBits.IsDivergent = false;
1287 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1288 return Data;
1289 }
1290
1291 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1292 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1293 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1294 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1295
1296 // Returns the offset from the location of the access.
1297 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1298
1299 /// Returns the AA info that describes the dereference.
1300 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1301
1302 /// Returns the Ranges that describes the dereference.
1303 const MDNode *getRanges() const { return MMO->getRanges(); }
1304
1305 /// Returns the synchronization scope ID for this memory operation.
1306 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1307
1308 /// Return the atomic ordering requirements for this memory operation. For
1309 /// cmpxchg atomic operations, return the atomic ordering requirements when
1310 /// store occurs.
1311 AtomicOrdering getOrdering() const { return MMO->getOrdering(); }
1312
1313 /// Return true if the memory operation ordering is Unordered or higher.
1314 bool isAtomic() const { return MMO->isAtomic(); }
1315
1316 /// Returns true if the memory operation doesn't imply any ordering
1317 /// constraints on surrounding memory operations beyond the normal memory
1318 /// aliasing rules.
1319 bool isUnordered() const { return MMO->isUnordered(); }
1320
1321 /// Returns true if the memory operation is neither atomic or volatile.
1322 bool isSimple() const { return !isAtomic() && !isVolatile(); }
1323
1324 /// Return the type of the in-memory value.
1325 EVT getMemoryVT() const { return MemoryVT; }
1326
1327 /// Return a MachineMemOperand object describing the memory
1328 /// reference performed by operation.
1329 MachineMemOperand *getMemOperand() const { return MMO; }
1330
1331 const MachinePointerInfo &getPointerInfo() const {
1332 return MMO->getPointerInfo();
1333 }
1334
1335 /// Return the address space for the associated pointer
1336 unsigned getAddressSpace() const {
1337 return getPointerInfo().getAddrSpace();
1338 }
1339
1340 /// Update this MemSDNode's MachineMemOperand information
1341 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1342 /// This must only be used when the new alignment applies to all users of
1343 /// this MachineMemOperand.
1344 void refineAlignment(const MachineMemOperand *NewMMO) {
1345 MMO->refineAlignment(NewMMO);
1346 }
1347
1348 const SDValue &getChain() const { return getOperand(0); }
1349
1350 const SDValue &getBasePtr() const {
1351 switch (getOpcode()) {
1352 case ISD::STORE:
1353 case ISD::MSTORE:
1354 return getOperand(2);
1355 case ISD::MGATHER:
1356 case ISD::MSCATTER:
1357 return getOperand(3);
1358 default:
1359 return getOperand(1);
1360 }
1361 }
1362
1363 // Methods to support isa and dyn_cast
1364 static bool classof(const SDNode *N) {
1365 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1366 // with either an intrinsic or a target opcode.
1367 return N->getOpcode() == ISD::LOAD ||
1368 N->getOpcode() == ISD::STORE ||
1369 N->getOpcode() == ISD::PREFETCH ||
1370 N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1371 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1372 N->getOpcode() == ISD::ATOMIC_SWAP ||
1373 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1374 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1375 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1376 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1377 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1378 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1379 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1380 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1381 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1382 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1383 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1384 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1385 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1386 N->getOpcode() == ISD::ATOMIC_LOAD ||
1387 N->getOpcode() == ISD::ATOMIC_STORE ||
1388 N->getOpcode() == ISD::MLOAD ||
1389 N->getOpcode() == ISD::MSTORE ||
1390 N->getOpcode() == ISD::MGATHER ||
1391 N->getOpcode() == ISD::MSCATTER ||
1392 N->isMemIntrinsic() ||
1393 N->isTargetMemoryOpcode();
1394 }
1395};
1396
1397/// This is an SDNode representing atomic operations.
1398class AtomicSDNode : public MemSDNode {
1399public:
1400 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1401 EVT MemVT, MachineMemOperand *MMO)
1402 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1403 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1404, __PRETTY_FUNCTION__))
1404 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1404, __PRETTY_FUNCTION__))
;
1405 }
1406
1407 const SDValue &getBasePtr() const { return getOperand(1); }
1408 const SDValue &getVal() const { return getOperand(2); }
1409
1410 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1411 /// otherwise.
1412 bool isCompareAndSwap() const {
1413 unsigned Op = getOpcode();
1414 return Op == ISD::ATOMIC_CMP_SWAP ||
1415 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1416 }
1417
1418 /// For cmpxchg atomic operations, return the atomic ordering requirements
1419 /// when store does not occur.
1420 AtomicOrdering getFailureOrdering() const {
1421 assert(isCompareAndSwap() && "Must be cmpxchg operation")((isCompareAndSwap() && "Must be cmpxchg operation") ?
static_cast<void> (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1421, __PRETTY_FUNCTION__))
;
1422 return MMO->getFailureOrdering();
1423 }
1424
1425 // Methods to support isa and dyn_cast
1426 static bool classof(const SDNode *N) {
1427 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1428 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1429 N->getOpcode() == ISD::ATOMIC_SWAP ||
1430 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1431 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1432 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1433 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1434 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1435 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1436 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1437 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1438 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1439 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1440 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1441 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1442 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1443 N->getOpcode() == ISD::ATOMIC_LOAD ||
1444 N->getOpcode() == ISD::ATOMIC_STORE;
1445 }
1446};
1447
1448/// This SDNode is used for target intrinsics that touch
1449/// memory and need an associated MachineMemOperand. Its opcode may be
1450/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1451/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1452class MemIntrinsicSDNode : public MemSDNode {
1453public:
1454 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1455 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1456 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1457 SDNodeBits.IsMemIntrinsic = true;
1458 }
1459
1460 // Methods to support isa and dyn_cast
1461 static bool classof(const SDNode *N) {
1462 // We lower some target intrinsics to their target opcode
1463 // early a node with a target opcode can be of this class
1464 return N->isMemIntrinsic() ||
1465 N->getOpcode() == ISD::PREFETCH ||
1466 N->isTargetMemoryOpcode();
1467 }
1468};
1469
1470/// This SDNode is used to implement the code generator
1471/// support for the llvm IR shufflevector instruction. It combines elements
1472/// from two input vectors into a new input vector, with the selection and
1473/// ordering of elements determined by an array of integers, referred to as
1474/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1475/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1476/// An index of -1 is treated as undef, such that the code generator may put
1477/// any value in the corresponding element of the result.
1478class ShuffleVectorSDNode : public SDNode {
1479 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1480 // is freed when the SelectionDAG object is destroyed.
1481 const int *Mask;
1482
1483protected:
1484 friend class SelectionDAG;
1485
1486 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1487 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1488
1489public:
1490 ArrayRef<int> getMask() const {
1491 EVT VT = getValueType(0);
1492 return makeArrayRef(Mask, VT.getVectorNumElements());
1493 }
1494
1495 int getMaskElt(unsigned Idx) const {
1496 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")((Idx < getValueType(0).getVectorNumElements() && "Idx out of range!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1496, __PRETTY_FUNCTION__))
;
1497 return Mask[Idx];
1498 }
1499
1500 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1501
1502 int getSplatIndex() const {
1503 assert(isSplat() && "Cannot get splat index for non-splat!")((isSplat() && "Cannot get splat index for non-splat!"
) ? static_cast<void> (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1503, __PRETTY_FUNCTION__))
;
1504 EVT VT = getValueType(0);
1505 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1506 if (Mask[i] >= 0)
1507 return Mask[i];
1508
1509 // We can choose any index value here and be correct because all elements
1510 // are undefined. Return 0 for better potential for callers to simplify.
1511 return 0;
1512 }
1513
1514 static bool isSplatMask(const int *Mask, EVT VT);
1515
1516