Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1110, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name WebAssemblyISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/build-llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/build-llvm/lib/Target/WebAssembly -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2021-01-24-223304-31662-1 -x c++ /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the WebAssemblyTargetLowering class.
11///
12//===----------------------------------------------------------------------===//
13
14#include "WebAssemblyISelLowering.h"
15#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16#include "WebAssemblyMachineFunctionInfo.h"
17#include "WebAssemblySubtarget.h"
18#include "WebAssemblyTargetMachine.h"
19#include "WebAssemblyUtilities.h"
20#include "llvm/CodeGen/Analysis.h"
21#include "llvm/CodeGen/CallingConvLower.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineJumpTableInfo.h"
24#include "llvm/CodeGen/MachineModuleInfo.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/SelectionDAG.h"
27#include "llvm/CodeGen/WasmEHFuncInfo.h"
28#include "llvm/IR/DiagnosticInfo.h"
29#include "llvm/IR/DiagnosticPrinter.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Intrinsics.h"
32#include "llvm/IR/IntrinsicsWebAssembly.h"
33#include "llvm/Support/Debug.h"
34#include "llvm/Support/ErrorHandling.h"
35#include "llvm/Support/MathExtras.h"
36#include "llvm/Support/raw_ostream.h"
37#include "llvm/Target/TargetOptions.h"
38using namespace llvm;
39
40#define DEBUG_TYPE"wasm-lower" "wasm-lower"
41
42WebAssemblyTargetLowering::WebAssemblyTargetLowering(
43 const TargetMachine &TM, const WebAssemblySubtarget &STI)
44 : TargetLowering(TM), Subtarget(&STI) {
45 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
46
47 // Booleans always contain 0 or 1.
48 setBooleanContents(ZeroOrOneBooleanContent);
49 // Except in SIMD vectors
50 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
51 // We don't know the microarchitecture here, so just reduce register pressure.
52 setSchedulingPreference(Sched::RegPressure);
53 // Tell ISel that we have a stack pointer.
54 setStackPointerRegisterToSaveRestore(
55 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
56 // Set up the register classes.
57 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
58 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
59 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
60 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
61 if (Subtarget->hasSIMD128()) {
62 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
63 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
64 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
65 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
66 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
67 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
68 }
69 // Compute derived properties from the register classes.
70 computeRegisterProperties(Subtarget->getRegisterInfo());
71
72 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
73 setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom);
74 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
75 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
76 setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
77 setOperationAction(ISD::BRIND, MVT::Other, Custom);
78
79 // Take the default expansion for va_arg, va_copy, and va_end. There is no
80 // default action for va_start, so we do that custom.
81 setOperationAction(ISD::VASTART, MVT::Other, Custom);
82 setOperationAction(ISD::VAARG, MVT::Other, Expand);
83 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
84 setOperationAction(ISD::VAEND, MVT::Other, Expand);
85
86 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
87 // Don't expand the floating-point types to constant pools.
88 setOperationAction(ISD::ConstantFP, T, Legal);
89 // Expand floating-point comparisons.
90 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
91 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
92 setCondCodeAction(CC, T, Expand);
93 // Expand floating-point library function operators.
94 for (auto Op :
95 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
96 setOperationAction(Op, T, Expand);
97 // Note supported floating-point library function operators that otherwise
98 // default to expand.
99 for (auto Op :
100 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
101 setOperationAction(Op, T, Legal);
102 // Support minimum and maximum, which otherwise default to expand.
103 setOperationAction(ISD::FMINIMUM, T, Legal);
104 setOperationAction(ISD::FMAXIMUM, T, Legal);
105 // WebAssembly currently has no builtin f16 support.
106 setOperationAction(ISD::FP16_TO_FP, T, Expand);
107 setOperationAction(ISD::FP_TO_FP16, T, Expand);
108 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
109 setTruncStoreAction(T, MVT::f16, Expand);
110 }
111
112 // Expand unavailable integer operations.
113 for (auto Op :
114 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
115 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
116 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
117 for (auto T : {MVT::i32, MVT::i64})
118 setOperationAction(Op, T, Expand);
119 if (Subtarget->hasSIMD128())
120 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
121 setOperationAction(Op, T, Expand);
122 }
123
124 // SIMD-specific configuration
125 if (Subtarget->hasSIMD128()) {
126 // Hoist bitcasts out of shuffles
127 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
128
129 // Combine extends of extract_subvectors into widening ops
130 setTargetDAGCombine(ISD::SIGN_EXTEND);
131 setTargetDAGCombine(ISD::ZERO_EXTEND);
132
133 // Support saturating add for i8x16 and i16x8
134 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
135 for (auto T : {MVT::v16i8, MVT::v8i16})
136 setOperationAction(Op, T, Legal);
137
138 // Support integer abs
139 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
140 setOperationAction(ISD::ABS, T, Legal);
141
142 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
143 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
144 MVT::v2f64})
145 setOperationAction(ISD::BUILD_VECTOR, T, Custom);
146
147 // We have custom shuffle lowering to expose the shuffle mask
148 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
149 MVT::v2f64})
150 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
151
152 // Custom lowering since wasm shifts must have a scalar shift amount
153 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
154 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
155 setOperationAction(Op, T, Custom);
156
157 // Custom lower lane accesses to expand out variable indices
158 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
159 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
160 MVT::v2f64})
161 setOperationAction(Op, T, Custom);
162
163 // There is no i8x16.mul instruction
164 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
165
166 // There is no vector conditional select instruction
167 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
168 MVT::v2f64})
169 setOperationAction(ISD::SELECT_CC, T, Expand);
170
171 // Expand integer operations supported for scalars but not SIMD
172 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
173 ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
174 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
175 setOperationAction(Op, T, Expand);
176
177 // But we do have integer min and max operations
178 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
179 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
180 setOperationAction(Op, T, Legal);
181
182 // Expand float operations supported for scalars but not SIMD
183 for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT,
184 ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
185 ISD::FEXP, ISD::FEXP2, ISD::FRINT})
186 for (auto T : {MVT::v4f32, MVT::v2f64})
187 setOperationAction(Op, T, Expand);
188
189 // Expand operations not supported for i64x2 vectors
190 for (unsigned CC = 0; CC < ISD::SETCC_INVALID; ++CC)
191 setCondCodeAction(static_cast<ISD::CondCode>(CC), MVT::v2i64, Custom);
192
193 // 64x2 conversions are not in the spec
194 for (auto Op :
195 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
196 for (auto T : {MVT::v2i64, MVT::v2f64})
197 setOperationAction(Op, T, Expand);
198 }
199
200 // As a special case, these operators use the type to mean the type to
201 // sign-extend from.
202 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
203 if (!Subtarget->hasSignExt()) {
204 // Sign extends are legal only when extending a vector extract
205 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
206 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
207 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
208 }
209 for (auto T : MVT::integer_fixedlen_vector_valuetypes())
210 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
211
212 // Dynamic stack allocation: use the default expansion.
213 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
214 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
215 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
216
217 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
218 setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
219 setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
220
221 // Expand these forms; we pattern-match the forms that we can handle in isel.
222 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
223 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
224 setOperationAction(Op, T, Expand);
225
226 // We have custom switch handling.
227 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
228
229 // WebAssembly doesn't have:
230 // - Floating-point extending loads.
231 // - Floating-point truncating stores.
232 // - i1 extending loads.
233 // - truncating SIMD stores and most extending loads
234 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
235 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
236 for (auto T : MVT::integer_valuetypes())
237 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
238 setLoadExtAction(Ext, T, MVT::i1, Promote);
239 if (Subtarget->hasSIMD128()) {
240 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
241 MVT::v2f64}) {
242 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
243 if (MVT(T) != MemT) {
244 setTruncStoreAction(T, MemT, Expand);
245 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
246 setLoadExtAction(Ext, T, MemT, Expand);
247 }
248 }
249 }
250 // But some vector extending loads are legal
251 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
252 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
253 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
254 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
255 }
256 // And some truncating stores are legal as well
257 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
258 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
259 }
260
261 // Don't do anything clever with build_pairs
262 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
263
264 // Trap lowers to wasm unreachable
265 setOperationAction(ISD::TRAP, MVT::Other, Legal);
266 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
267
268 // Exception handling intrinsics
269 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
270 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
271 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
272
273 setMaxAtomicSizeInBitsSupported(64);
274
275 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
276 // consistent with the f64 and f128 names.
277 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
278 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
279
280 // Define the emscripten name for return address helper.
281 // TODO: when implementing other Wasm backends, make this generic or only do
282 // this on emscripten depending on what they end up doing.
283 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
284
285 // Always convert switches to br_tables unless there is only one case, which
286 // is equivalent to a simple branch. This reduces code size for wasm, and we
287 // defer possible jump table optimizations to the VM.
288 setMinimumJumpTableEntries(2);
289}
290
291TargetLowering::AtomicExpansionKind
292WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
293 // We have wasm instructions for these
294 switch (AI->getOperation()) {
295 case AtomicRMWInst::Add:
296 case AtomicRMWInst::Sub:
297 case AtomicRMWInst::And:
298 case AtomicRMWInst::Or:
299 case AtomicRMWInst::Xor:
300 case AtomicRMWInst::Xchg:
301 return AtomicExpansionKind::None;
302 default:
303 break;
304 }
305 return AtomicExpansionKind::CmpXChg;
306}
307
308FastISel *WebAssemblyTargetLowering::createFastISel(
309 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
310 return WebAssembly::createFastISel(FuncInfo, LibInfo);
311}
312
313MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
314 EVT VT) const {
315 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
316 if (BitWidth > 1 && BitWidth < 8)
317 BitWidth = 8;
318
319 if (BitWidth > 64) {
320 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
321 // the count to be an i32.
322 BitWidth = 32;
323 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&((BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && "32-bit shift counts ought to be enough for anyone"
) ? static_cast<void> (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 324, __PRETTY_FUNCTION__))
324 "32-bit shift counts ought to be enough for anyone")((BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && "32-bit shift counts ought to be enough for anyone"
) ? static_cast<void> (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 324, __PRETTY_FUNCTION__))
;
325 }
326
327 MVT Result = MVT::getIntegerVT(BitWidth);
328 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&((Result != MVT::INVALID_SIMPLE_VALUE_TYPE && "Unable to represent scalar shift amount type"
) ? static_cast<void> (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 329, __PRETTY_FUNCTION__))
329 "Unable to represent scalar shift amount type")((Result != MVT::INVALID_SIMPLE_VALUE_TYPE && "Unable to represent scalar shift amount type"
) ? static_cast<void> (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 329, __PRETTY_FUNCTION__))
;
330 return Result;
331}
332
333// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
334// undefined result on invalid/overflow, to the WebAssembly opcode, which
335// traps on invalid/overflow.
336static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
337 MachineBasicBlock *BB,
338 const TargetInstrInfo &TII,
339 bool IsUnsigned, bool Int64,
340 bool Float64, unsigned LoweredOpcode) {
341 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
342
343 Register OutReg = MI.getOperand(0).getReg();
344 Register InReg = MI.getOperand(1).getReg();
345
346 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
347 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
348 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
349 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
350 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
351 unsigned Eqz = WebAssembly::EQZ_I32;
352 unsigned And = WebAssembly::AND_I32;
353 int64_t Limit = Int64 ? INT64_MIN(-9223372036854775807L -1) : INT32_MIN(-2147483647-1);
354 int64_t Substitute = IsUnsigned ? 0 : Limit;
355 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
356 auto &Context = BB->getParent()->getFunction().getContext();
357 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
358
359 const BasicBlock *LLVMBB = BB->getBasicBlock();
360 MachineFunction *F = BB->getParent();
361 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
362 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
363 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
364
365 MachineFunction::iterator It = ++BB->getIterator();
366 F->insert(It, FalseMBB);
367 F->insert(It, TrueMBB);
368 F->insert(It, DoneMBB);
369
370 // Transfer the remainder of BB and its successor edges to DoneMBB.
371 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
372 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
373
374 BB->addSuccessor(TrueMBB);
375 BB->addSuccessor(FalseMBB);
376 TrueMBB->addSuccessor(DoneMBB);
377 FalseMBB->addSuccessor(DoneMBB);
378
379 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
380 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
381 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
382 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
383 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
384 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
385 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
386
387 MI.eraseFromParent();
388 // For signed numbers, we can do a single comparison to determine whether
389 // fabs(x) is within range.
390 if (IsUnsigned) {
391 Tmp0 = InReg;
392 } else {
393 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
394 }
395 BuildMI(BB, DL, TII.get(FConst), Tmp1)
396 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
397 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
398
399 // For unsigned numbers, we have to do a separate comparison with zero.
400 if (IsUnsigned) {
401 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
402 Register SecondCmpReg =
403 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
404 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
405 BuildMI(BB, DL, TII.get(FConst), Tmp1)
406 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
407 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
408 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
409 CmpReg = AndReg;
410 }
411
412 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
413
414 // Create the CFG diamond to select between doing the conversion or using
415 // the substitute value.
416 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
417 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
418 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
419 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
420 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
421 .addReg(FalseReg)
422 .addMBB(FalseMBB)
423 .addReg(TrueReg)
424 .addMBB(TrueMBB);
425
426 return DoneMBB;
427}
428
429static MachineBasicBlock *LowerCallResults(MachineInstr &CallResults,
430 DebugLoc DL, MachineBasicBlock *BB,
431 const TargetInstrInfo &TII) {
432 MachineInstr &CallParams = *CallResults.getPrevNode();
433 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS)((CallParams.getOpcode() == WebAssembly::CALL_PARAMS) ? static_cast
<void> (0) : __assert_fail ("CallParams.getOpcode() == WebAssembly::CALL_PARAMS"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 433, __PRETTY_FUNCTION__))
;
434 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||((CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults
.getOpcode() == WebAssembly::RET_CALL_RESULTS) ? static_cast<
void> (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 435, __PRETTY_FUNCTION__))
435 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS)((CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults
.getOpcode() == WebAssembly::RET_CALL_RESULTS) ? static_cast<
void> (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 435, __PRETTY_FUNCTION__))
;
436
437 bool IsIndirect = CallParams.getOperand(0).isReg();
438 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
439
440 unsigned CallOp;
441 if (IsIndirect && IsRetCall) {
442 CallOp = WebAssembly::RET_CALL_INDIRECT;
443 } else if (IsIndirect) {
444 CallOp = WebAssembly::CALL_INDIRECT;
445 } else if (IsRetCall) {
446 CallOp = WebAssembly::RET_CALL;
447 } else {
448 CallOp = WebAssembly::CALL;
449 }
450
451 MachineFunction &MF = *BB->getParent();
452 const MCInstrDesc &MCID = TII.get(CallOp);
453 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
454
455 // See if we must truncate the function pointer.
456 // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
457 // as 64-bit for uniformity with other pointer types.
458 if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
459 Register Reg32 =
460 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
461 auto &FnPtr = CallParams.getOperand(0);
462 BuildMI(*BB, CallResults.getIterator(), DL,
463 TII.get(WebAssembly::I32_WRAP_I64), Reg32)
464 .addReg(FnPtr.getReg());
465 FnPtr.setReg(Reg32);
466 }
467
468 // Move the function pointer to the end of the arguments for indirect calls
469 if (IsIndirect) {
470 auto FnPtr = CallParams.getOperand(0);
471 CallParams.RemoveOperand(0);
472 CallParams.addOperand(FnPtr);
473 }
474
475 for (auto Def : CallResults.defs())
476 MIB.add(Def);
477
478 // Add placeholders for the type index and immediate flags
479 if (IsIndirect) {
480 MIB.addImm(0);
481 MIB.addImm(0);
482
483 // Ensure that the object file has a __indirect_function_table import, as we
484 // call_indirect against it.
485 MCSymbolWasm *Sym = WebAssembly::getOrCreateFunctionTableSymbol(
486 MF.getContext(), "__indirect_function_table");
487 // Until call_indirect emits TABLE_NUMBER relocs against this symbol, mark
488 // it as NO_STRIP so as to ensure that the indirect function table makes it
489 // to linked output.
490 Sym->setNoStrip();
491 }
492
493 for (auto Use : CallParams.uses())
494 MIB.add(Use);
495
496 BB->insert(CallResults.getIterator(), MIB);
497 CallParams.eraseFromParent();
498 CallResults.eraseFromParent();
499
500 return BB;
501}
502
503MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
504 MachineInstr &MI, MachineBasicBlock *BB) const {
505 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
506 DebugLoc DL = MI.getDebugLoc();
507
508 switch (MI.getOpcode()) {
509 default:
510 llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 510)
;
511 case WebAssembly::FP_TO_SINT_I32_F32:
512 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
513 WebAssembly::I32_TRUNC_S_F32);
514 case WebAssembly::FP_TO_UINT_I32_F32:
515 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
516 WebAssembly::I32_TRUNC_U_F32);
517 case WebAssembly::FP_TO_SINT_I64_F32:
518 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
519 WebAssembly::I64_TRUNC_S_F32);
520 case WebAssembly::FP_TO_UINT_I64_F32:
521 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
522 WebAssembly::I64_TRUNC_U_F32);
523 case WebAssembly::FP_TO_SINT_I32_F64:
524 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
525 WebAssembly::I32_TRUNC_S_F64);
526 case WebAssembly::FP_TO_UINT_I32_F64:
527 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
528 WebAssembly::I32_TRUNC_U_F64);
529 case WebAssembly::FP_TO_SINT_I64_F64:
530 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
531 WebAssembly::I64_TRUNC_S_F64);
532 case WebAssembly::FP_TO_UINT_I64_F64:
533 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
534 WebAssembly::I64_TRUNC_U_F64);
535 case WebAssembly::CALL_RESULTS:
536 case WebAssembly::RET_CALL_RESULTS:
537 return LowerCallResults(MI, DL, BB, TII);
538 }
539}
540
541const char *
542WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
543 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
544 case WebAssemblyISD::FIRST_NUMBER:
545 case WebAssemblyISD::FIRST_MEM_OPCODE:
546 break;
547#define HANDLE_NODETYPE(NODE) \
548 case WebAssemblyISD::NODE: \
549 return "WebAssemblyISD::" #NODE;
550#define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
551#include "WebAssemblyISD.def"
552#undef HANDLE_MEM_NODETYPE
553#undef HANDLE_NODETYPE
554 }
555 return nullptr;
556}
557
558std::pair<unsigned, const TargetRegisterClass *>
559WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
560 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
561 // First, see if this is a constraint that directly corresponds to a
562 // WebAssembly register class.
563 if (Constraint.size() == 1) {
564 switch (Constraint[0]) {
565 case 'r':
566 assert(VT != MVT::iPTR && "Pointer MVT not expected here")((VT != MVT::iPTR && "Pointer MVT not expected here")
? static_cast<void> (0) : __assert_fail ("VT != MVT::iPTR && \"Pointer MVT not expected here\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 566, __PRETTY_FUNCTION__))
;
567 if (Subtarget->hasSIMD128() && VT.isVector()) {
568 if (VT.getSizeInBits() == 128)
569 return std::make_pair(0U, &WebAssembly::V128RegClass);
570 }
571 if (VT.isInteger() && !VT.isVector()) {
572 if (VT.getSizeInBits() <= 32)
573 return std::make_pair(0U, &WebAssembly::I32RegClass);
574 if (VT.getSizeInBits() <= 64)
575 return std::make_pair(0U, &WebAssembly::I64RegClass);
576 }
577 if (VT.isFloatingPoint() && !VT.isVector()) {
578 switch (VT.getSizeInBits()) {
579 case 32:
580 return std::make_pair(0U, &WebAssembly::F32RegClass);
581 case 64:
582 return std::make_pair(0U, &WebAssembly::F64RegClass);
583 default:
584 break;
585 }
586 }
587 break;
588 default:
589 break;
590 }
591 }
592
593 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
594}
595
596bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
597 // Assume ctz is a relatively cheap operation.
598 return true;
599}
600
601bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
602 // Assume clz is a relatively cheap operation.
603 return true;
604}
605
606bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
607 const AddrMode &AM,
608 Type *Ty, unsigned AS,
609 Instruction *I) const {
610 // WebAssembly offsets are added as unsigned without wrapping. The
611 // isLegalAddressingMode gives us no way to determine if wrapping could be
612 // happening, so we approximate this by accepting only non-negative offsets.
613 if (AM.BaseOffs < 0)
614 return false;
615
616 // WebAssembly has no scale register operands.
617 if (AM.Scale != 0)
618 return false;
619
620 // Everything else is legal.
621 return true;
622}
623
624bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
625 EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/,
626 MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
627 // WebAssembly supports unaligned accesses, though it should be declared
628 // with the p2align attribute on loads and stores which do so, and there
629 // may be a performance impact. We tell LLVM they're "fast" because
630 // for the kinds of things that LLVM uses this for (merging adjacent stores
631 // of constants, etc.), WebAssembly implementations will either want the
632 // unaligned access or they'll split anyway.
633 if (Fast)
634 *Fast = true;
635 return true;
636}
637
638bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
639 AttributeList Attr) const {
640 // The current thinking is that wasm engines will perform this optimization,
641 // so we can save on code size.
642 return true;
643}
644
645bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
646 EVT ExtT = ExtVal.getValueType();
647 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
648 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
649 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
650 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
651}
652
653EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
654 LLVMContext &C,
655 EVT VT) const {
656 if (VT.isVector())
657 return VT.changeVectorElementTypeToInteger();
658
659 // So far, all branch instructions in Wasm take an I32 condition.
660 // The default TargetLowering::getSetCCResultType returns the pointer size,
661 // which would be useful to reduce instruction counts when testing
662 // against 64-bit pointers/values if at some point Wasm supports that.
663 return EVT::getIntegerVT(C, 32);
664}
665
666bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
667 const CallInst &I,
668 MachineFunction &MF,
669 unsigned Intrinsic) const {
670 switch (Intrinsic) {
671 case Intrinsic::wasm_memory_atomic_notify:
672 Info.opc = ISD::INTRINSIC_W_CHAIN;
673 Info.memVT = MVT::i32;
674 Info.ptrVal = I.getArgOperand(0);
675 Info.offset = 0;
676 Info.align = Align(4);
677 // atomic.notify instruction does not really load the memory specified with
678 // this argument, but MachineMemOperand should either be load or store, so
679 // we set this to a load.
680 // FIXME Volatile isn't really correct, but currently all LLVM atomic
681 // instructions are treated as volatiles in the backend, so we should be
682 // consistent. The same applies for wasm_atomic_wait intrinsics too.
683 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
684 return true;
685 case Intrinsic::wasm_memory_atomic_wait32:
686 Info.opc = ISD::INTRINSIC_W_CHAIN;
687 Info.memVT = MVT::i32;
688 Info.ptrVal = I.getArgOperand(0);
689 Info.offset = 0;
690 Info.align = Align(4);
691 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
692 return true;
693 case Intrinsic::wasm_memory_atomic_wait64:
694 Info.opc = ISD::INTRINSIC_W_CHAIN;
695 Info.memVT = MVT::i64;
696 Info.ptrVal = I.getArgOperand(0);
697 Info.offset = 0;
698 Info.align = Align(8);
699 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
700 return true;
701 case Intrinsic::wasm_load32_zero:
702 case Intrinsic::wasm_load64_zero:
703 Info.opc = ISD::INTRINSIC_W_CHAIN;
704 Info.memVT = Intrinsic == Intrinsic::wasm_load32_zero ? MVT::i32 : MVT::i64;
705 Info.ptrVal = I.getArgOperand(0);
706 Info.offset = 0;
707 Info.align = Info.memVT == MVT::i32 ? Align(4) : Align(8);
708 Info.flags = MachineMemOperand::MOLoad;
709 return true;
710 case Intrinsic::wasm_load8_lane:
711 case Intrinsic::wasm_load16_lane:
712 case Intrinsic::wasm_load32_lane:
713 case Intrinsic::wasm_load64_lane:
714 case Intrinsic::wasm_store8_lane:
715 case Intrinsic::wasm_store16_lane:
716 case Intrinsic::wasm_store32_lane:
717 case Intrinsic::wasm_store64_lane: {
718 MVT MemVT;
719 Align MemAlign;
720 switch (Intrinsic) {
721 case Intrinsic::wasm_load8_lane:
722 case Intrinsic::wasm_store8_lane:
723 MemVT = MVT::i8;
724 MemAlign = Align(1);
725 break;
726 case Intrinsic::wasm_load16_lane:
727 case Intrinsic::wasm_store16_lane:
728 MemVT = MVT::i16;
729 MemAlign = Align(2);
730 break;
731 case Intrinsic::wasm_load32_lane:
732 case Intrinsic::wasm_store32_lane:
733 MemVT = MVT::i32;
734 MemAlign = Align(4);
735 break;
736 case Intrinsic::wasm_load64_lane:
737 case Intrinsic::wasm_store64_lane:
738 MemVT = MVT::i64;
739 MemAlign = Align(8);
740 break;
741 default:
742 llvm_unreachable("unexpected intrinsic")::llvm::llvm_unreachable_internal("unexpected intrinsic", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 742)
;
743 }
744 if (Intrinsic == Intrinsic::wasm_load8_lane ||
745 Intrinsic == Intrinsic::wasm_load16_lane ||
746 Intrinsic == Intrinsic::wasm_load32_lane ||
747 Intrinsic == Intrinsic::wasm_load64_lane) {
748 Info.opc = ISD::INTRINSIC_W_CHAIN;
749 Info.flags = MachineMemOperand::MOLoad;
750 } else {
751 Info.opc = ISD::INTRINSIC_VOID;
752 Info.flags = MachineMemOperand::MOStore;
753 }
754 Info.ptrVal = I.getArgOperand(0);
755 Info.memVT = MemVT;
756 Info.offset = 0;
757 Info.align = MemAlign;
758 return true;
759 }
760 case Intrinsic::wasm_prefetch_t:
761 case Intrinsic::wasm_prefetch_nt: {
762 Info.opc = ISD::INTRINSIC_VOID;
763 Info.memVT = MVT::i8;
764 Info.ptrVal = I.getArgOperand(0);
765 Info.offset = 0;
766 Info.align = Align(1);
767 Info.flags = MachineMemOperand::MOLoad;
768 return true;
769 }
770 default:
771 return false;
772 }
773}
774
775//===----------------------------------------------------------------------===//
776// WebAssembly Lowering private implementation.
777//===----------------------------------------------------------------------===//
778
779//===----------------------------------------------------------------------===//
780// Lowering Code
781//===----------------------------------------------------------------------===//
782
783static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
784 MachineFunction &MF = DAG.getMachineFunction();
785 DAG.getContext()->diagnose(
786 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
787}
788
789// Test whether the given calling convention is supported.
790static bool callingConvSupported(CallingConv::ID CallConv) {
791 // We currently support the language-independent target-independent
792 // conventions. We don't yet have a way to annotate calls with properties like
793 // "cold", and we don't have any call-clobbered registers, so these are mostly
794 // all handled the same.
795 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
796 CallConv == CallingConv::Cold ||
797 CallConv == CallingConv::PreserveMost ||
798 CallConv == CallingConv::PreserveAll ||
799 CallConv == CallingConv::CXX_FAST_TLS ||
800 CallConv == CallingConv::WASM_EmscriptenInvoke ||
801 CallConv == CallingConv::Swift;
802}
803
804SDValue
805WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
806 SmallVectorImpl<SDValue> &InVals) const {
807 SelectionDAG &DAG = CLI.DAG;
808 SDLoc DL = CLI.DL;
809 SDValue Chain = CLI.Chain;
810 SDValue Callee = CLI.Callee;
811 MachineFunction &MF = DAG.getMachineFunction();
812 auto Layout = MF.getDataLayout();
813
814 CallingConv::ID CallConv = CLI.CallConv;
815 if (!callingConvSupported(CallConv))
816 fail(DL, DAG,
817 "WebAssembly doesn't support language-specific or target-specific "
818 "calling conventions yet");
819 if (CLI.IsPatchPoint)
820 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
821
822 if (CLI.IsTailCall) {
823 auto NoTail = [&](const char *Msg) {
824 if (CLI.CB && CLI.CB->isMustTailCall())
825 fail(DL, DAG, Msg);
826 CLI.IsTailCall = false;
827 };
828
829 if (!Subtarget->hasTailCall())
830 NoTail("WebAssembly 'tail-call' feature not enabled");
831
832 // Varargs calls cannot be tail calls because the buffer is on the stack
833 if (CLI.IsVarArg)
834 NoTail("WebAssembly does not support varargs tail calls");
835
836 // Do not tail call unless caller and callee return types match
837 const Function &F = MF.getFunction();
838 const TargetMachine &TM = getTargetMachine();
839 Type *RetTy = F.getReturnType();
840 SmallVector<MVT, 4> CallerRetTys;
841 SmallVector<MVT, 4> CalleeRetTys;
842 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
843 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
844 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
845 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
846 CalleeRetTys.begin());
847 if (!TypesMatch)
848 NoTail("WebAssembly tail call requires caller and callee return types to "
849 "match");
850
851 // If pointers to local stack values are passed, we cannot tail call
852 if (CLI.CB) {
853 for (auto &Arg : CLI.CB->args()) {
854 Value *Val = Arg.get();
855 // Trace the value back through pointer operations
856 while (true) {
857 Value *Src = Val->stripPointerCastsAndAliases();
858 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
859 Src = GEP->getPointerOperand();
860 if (Val == Src)
861 break;
862 Val = Src;
863 }
864 if (isa<AllocaInst>(Val)) {
865 NoTail(
866 "WebAssembly does not support tail calling with stack arguments");
867 break;
868 }
869 }
870 }
871 }
872
873 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
874 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
875 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
876
877 // The generic code may have added an sret argument. If we're lowering an
878 // invoke function, the ABI requires that the function pointer be the first
879 // argument, so we may have to swap the arguments.
880 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
881 Outs[0].Flags.isSRet()) {
882 std::swap(Outs[0], Outs[1]);
883 std::swap(OutVals[0], OutVals[1]);
884 }
885
886 bool HasSwiftSelfArg = false;
887 bool HasSwiftErrorArg = false;
888 unsigned NumFixedArgs = 0;
889 for (unsigned I = 0; I < Outs.size(); ++I) {
890 const ISD::OutputArg &Out = Outs[I];
891 SDValue &OutVal = OutVals[I];
892 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
893 HasSwiftErrorArg |= Out.Flags.isSwiftError();
894 if (Out.Flags.isNest())
895 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
896 if (Out.Flags.isInAlloca())
897 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
898 if (Out.Flags.isInConsecutiveRegs())
899 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
900 if (Out.Flags.isInConsecutiveRegsLast())
901 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
902 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
903 auto &MFI = MF.getFrameInfo();
904 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
905 Out.Flags.getNonZeroByValAlign(),
906 /*isSS=*/false);
907 SDValue SizeNode =
908 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
909 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
910 Chain = DAG.getMemcpy(
911 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
912 /*isVolatile*/ false, /*AlwaysInline=*/false,
913 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
914 OutVal = FINode;
915 }
916 // Count the number of fixed args *after* legalization.
917 NumFixedArgs += Out.IsFixed;
918 }
919
920 bool IsVarArg = CLI.IsVarArg;
921 auto PtrVT = getPointerTy(Layout);
922
923 // For swiftcc, emit additional swiftself and swifterror arguments
924 // if there aren't. These additional arguments are also added for callee
925 // signature They are necessary to match callee and caller signature for
926 // indirect call.
927 if (CallConv == CallingConv::Swift) {
928 if (!HasSwiftSelfArg) {
929 NumFixedArgs++;
930 ISD::OutputArg Arg;
931 Arg.Flags.setSwiftSelf();
932 CLI.Outs.push_back(Arg);
933 SDValue ArgVal = DAG.getUNDEF(PtrVT);
934 CLI.OutVals.push_back(ArgVal);
935 }
936 if (!HasSwiftErrorArg) {
937 NumFixedArgs++;
938 ISD::OutputArg Arg;
939 Arg.Flags.setSwiftError();
940 CLI.Outs.push_back(Arg);
941 SDValue ArgVal = DAG.getUNDEF(PtrVT);
942 CLI.OutVals.push_back(ArgVal);
943 }
944 }
945
946 // Analyze operands of the call, assigning locations to each operand.
947 SmallVector<CCValAssign, 16> ArgLocs;
948 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
949
950 if (IsVarArg) {
951 // Outgoing non-fixed arguments are placed in a buffer. First
952 // compute their offsets and the total amount of buffer space needed.
953 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
954 const ISD::OutputArg &Out = Outs[I];
955 SDValue &Arg = OutVals[I];
956 EVT VT = Arg.getValueType();
957 assert(VT != MVT::iPTR && "Legalized args should be concrete")((VT != MVT::iPTR && "Legalized args should be concrete"
) ? static_cast<void> (0) : __assert_fail ("VT != MVT::iPTR && \"Legalized args should be concrete\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 957, __PRETTY_FUNCTION__))
;
958 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
959 Align Alignment =
960 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
961 unsigned Offset =
962 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
963 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
964 Offset, VT.getSimpleVT(),
965 CCValAssign::Full));
966 }
967 }
968
969 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
970
971 SDValue FINode;
972 if (IsVarArg && NumBytes) {
973 // For non-fixed arguments, next emit stores to store the argument values
974 // to the stack buffer at the offsets computed above.
975 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
976 Layout.getStackAlignment(),
977 /*isSS=*/false);
978 unsigned ValNo = 0;
979 SmallVector<SDValue, 8> Chains;
980 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
981 assert(ArgLocs[ValNo].getValNo() == ValNo &&((ArgLocs[ValNo].getValNo() == ValNo && "ArgLocs should remain in order and only hold varargs args"
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 982, __PRETTY_FUNCTION__))
982 "ArgLocs should remain in order and only hold varargs args")((ArgLocs[ValNo].getValNo() == ValNo && "ArgLocs should remain in order and only hold varargs args"
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 982, __PRETTY_FUNCTION__))
;
983 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
984 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
985 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
986 DAG.getConstant(Offset, DL, PtrVT));
987 Chains.push_back(
988 DAG.getStore(Chain, DL, Arg, Add,
989 MachinePointerInfo::getFixedStack(MF, FI, Offset)));
990 }
991 if (!Chains.empty())
992 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
993 } else if (IsVarArg) {
994 FINode = DAG.getIntPtrConstant(0, DL);
995 }
996
997 if (Callee->getOpcode() == ISD::GlobalAddress) {
998 // If the callee is a GlobalAddress node (quite common, every direct call
999 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
1000 // doesn't at MO_GOT which is not needed for direct calls.
1001 GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
1002 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
1003 getPointerTy(DAG.getDataLayout()),
1004 GA->getOffset());
1005 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1006 getPointerTy(DAG.getDataLayout()), Callee);
1007 }
1008
1009 // Compute the operands for the CALLn node.
1010 SmallVector<SDValue, 16> Ops;
1011 Ops.push_back(Chain);
1012 Ops.push_back(Callee);
1013
1014 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1015 // isn't reliable.
1016 Ops.append(OutVals.begin(),
1017 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1018 // Add a pointer to the vararg buffer.
1019 if (IsVarArg)
1020 Ops.push_back(FINode);
1021
1022 SmallVector<EVT, 8> InTys;
1023 for (const auto &In : Ins) {
1024 assert(!In.Flags.isByVal() && "byval is not valid for return values")((!In.Flags.isByVal() && "byval is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!In.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1024, __PRETTY_FUNCTION__))
;
1025 assert(!In.Flags.isNest() && "nest is not valid for return values")((!In.Flags.isNest() && "nest is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!In.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1025, __PRETTY_FUNCTION__))
;
1026 if (In.Flags.isInAlloca())
1027 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1028 if (In.Flags.isInConsecutiveRegs())
1029 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1030 if (In.Flags.isInConsecutiveRegsLast())
1031 fail(DL, DAG,
1032 "WebAssembly hasn't implemented cons regs last return values");
1033 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1034 // registers.
1035 InTys.push_back(In.VT);
1036 }
1037
1038 if (CLI.IsTailCall) {
1039 // ret_calls do not return values to the current frame
1040 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1041 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1042 }
1043
1044 InTys.push_back(MVT::Other);
1045 SDVTList InTyList = DAG.getVTList(InTys);
1046 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1047
1048 for (size_t I = 0; I < Ins.size(); ++I)
1049 InVals.push_back(Res.getValue(I));
1050
1051 // Return the chain
1052 return Res.getValue(Ins.size());
1053}
1054
1055bool WebAssemblyTargetLowering::CanLowerReturn(
1056 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1057 const SmallVectorImpl<ISD::OutputArg> &Outs,
1058 LLVMContext & /*Context*/) const {
1059 // WebAssembly can only handle returning tuples with multivalue enabled
1060 return Subtarget->hasMultivalue() || Outs.size() <= 1;
1061}
1062
1063SDValue WebAssemblyTargetLowering::LowerReturn(
1064 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1065 const SmallVectorImpl<ISD::OutputArg> &Outs,
1066 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1067 SelectionDAG &DAG) const {
1068 assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&(((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
"MVP WebAssembly can only return up to one value") ? static_cast
<void> (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1069, __PRETTY_FUNCTION__))
1069 "MVP WebAssembly can only return up to one value")(((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
"MVP WebAssembly can only return up to one value") ? static_cast
<void> (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1069, __PRETTY_FUNCTION__))
;
1070 if (!callingConvSupported(CallConv))
1071 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1072
1073 SmallVector<SDValue, 4> RetOps(1, Chain);
1074 RetOps.append(OutVals.begin(), OutVals.end());
1075 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1076
1077 // Record the number and types of the return values.
1078 for (const ISD::OutputArg &Out : Outs) {
1079 assert(!Out.Flags.isByVal() && "byval is not valid for return values")((!Out.Flags.isByVal() && "byval is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!Out.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1079, __PRETTY_FUNCTION__))
;
1080 assert(!Out.Flags.isNest() && "nest is not valid for return values")((!Out.Flags.isNest() && "nest is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!Out.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1080, __PRETTY_FUNCTION__))
;
1081 assert(Out.IsFixed && "non-fixed return value is not valid")((Out.IsFixed && "non-fixed return value is not valid"
) ? static_cast<void> (0) : __assert_fail ("Out.IsFixed && \"non-fixed return value is not valid\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1081, __PRETTY_FUNCTION__))
;
1082 if (Out.Flags.isInAlloca())
1083 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1084 if (Out.Flags.isInConsecutiveRegs())
1085 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1086 if (Out.Flags.isInConsecutiveRegsLast())
1087 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1088 }
1089
1090 return Chain;
1091}
1092
1093SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1094 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1095 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1096 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1097 if (!callingConvSupported(CallConv))
1098 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1099
1100 MachineFunction &MF = DAG.getMachineFunction();
1101 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1102
1103 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1104 // of the incoming values before they're represented by virtual registers.
1105 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1106
1107 bool HasSwiftErrorArg = false;
1108 bool HasSwiftSelfArg = false;
1109 for (const ISD::InputArg &In : Ins) {
1110 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1111 HasSwiftErrorArg |= In.Flags.isSwiftError();
1112 if (In.Flags.isInAlloca())
1113 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1114 if (In.Flags.isNest())
1115 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1116 if (In.Flags.isInConsecutiveRegs())
1117 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1118 if (In.Flags.isInConsecutiveRegsLast())
1119 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1120 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1121 // registers.
1122 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1123 DAG.getTargetConstant(InVals.size(),
1124 DL, MVT::i32))
1125 : DAG.getUNDEF(In.VT));
1126
1127 // Record the number and types of arguments.
1128 MFI->addParam(In.VT);
1129 }
1130
1131 // For swiftcc, emit additional swiftself and swifterror arguments
1132 // if there aren't. These additional arguments are also added for callee
1133 // signature They are necessary to match callee and caller signature for
1134 // indirect call.
1135 auto PtrVT = getPointerTy(MF.getDataLayout());
1136 if (CallConv == CallingConv::Swift) {
1137 if (!HasSwiftSelfArg) {
1138 MFI->addParam(PtrVT);
1139 }
1140 if (!HasSwiftErrorArg) {
1141 MFI->addParam(PtrVT);
1142 }
1143 }
1144 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1145 // the buffer is passed as an argument.
1146 if (IsVarArg) {
1147 MVT PtrVT = getPointerTy(MF.getDataLayout());
1148 Register VarargVreg =
1149 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
1150 MFI->setVarargBufferVreg(VarargVreg);
1151 Chain = DAG.getCopyToReg(
1152 Chain, DL, VarargVreg,
1153 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1154 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1155 MFI->addParam(PtrVT);
1156 }
1157
1158 // Record the number and types of arguments and results.
1159 SmallVector<MVT, 4> Params;
1160 SmallVector<MVT, 4> Results;
1161 computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
1162 MF.getFunction(), DAG.getTarget(), Params, Results);
1163 for (MVT VT : Results)
1164 MFI->addResult(VT);
1165 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1166 // the param logic here with ComputeSignatureVTs
1167 assert(MFI->getParams().size() == Params.size() &&((MFI->getParams().size() == Params.size() && std::
equal(MFI->getParams().begin(), MFI->getParams().end(),
Params.begin())) ? static_cast<void> (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1169, __PRETTY_FUNCTION__))
1168 std::equal(MFI->getParams().begin(), MFI->getParams().end(),((MFI->getParams().size() == Params.size() && std::
equal(MFI->getParams().begin(), MFI->getParams().end(),
Params.begin())) ? static_cast<void> (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1169, __PRETTY_FUNCTION__))
1169 Params.begin()))((MFI->getParams().size() == Params.size() && std::
equal(MFI->getParams().begin(), MFI->getParams().end(),
Params.begin())) ? static_cast<void> (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1169, __PRETTY_FUNCTION__))
;
1170
1171 return Chain;
1172}
1173
1174void WebAssemblyTargetLowering::ReplaceNodeResults(
1175 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1176 switch (N->getOpcode()) {
1177 case ISD::SIGN_EXTEND_INREG:
1178 // Do not add any results, signifying that N should not be custom lowered
1179 // after all. This happens because simd128 turns on custom lowering for
1180 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1181 // illegal type.
1182 break;
1183 default:
1184 llvm_unreachable(::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1185)
1185 "ReplaceNodeResults not implemented for this op for WebAssembly!")::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1185)
;
1186 }
1187}
1188
1189//===----------------------------------------------------------------------===//
1190// Custom lowering hooks.
1191//===----------------------------------------------------------------------===//
1192
1193SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1194 SelectionDAG &DAG) const {
1195 SDLoc DL(Op);
1196 switch (Op.getOpcode()) {
1
Control jumps to 'case BUILD_VECTOR:' at line 1233
1197 default:
1198 llvm_unreachable("unimplemented operation lowering")::llvm::llvm_unreachable_internal("unimplemented operation lowering"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1198)
;
1199 return SDValue();
1200 case ISD::FrameIndex:
1201 return LowerFrameIndex(Op, DAG);
1202 case ISD::GlobalAddress:
1203 return LowerGlobalAddress(Op, DAG);
1204 case ISD::GlobalTLSAddress:
1205 return LowerGlobalTLSAddress(Op, DAG);
1206 case ISD::ExternalSymbol:
1207 return LowerExternalSymbol(Op, DAG);
1208 case ISD::JumpTable:
1209 return LowerJumpTable(Op, DAG);
1210 case ISD::BR_JT:
1211 return LowerBR_JT(Op, DAG);
1212 case ISD::VASTART:
1213 return LowerVASTART(Op, DAG);
1214 case ISD::BlockAddress:
1215 case ISD::BRIND:
1216 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1217 return SDValue();
1218 case ISD::RETURNADDR:
1219 return LowerRETURNADDR(Op, DAG);
1220 case ISD::FRAMEADDR:
1221 return LowerFRAMEADDR(Op, DAG);
1222 case ISD::CopyToReg:
1223 return LowerCopyToReg(Op, DAG);
1224 case ISD::EXTRACT_VECTOR_ELT:
1225 case ISD::INSERT_VECTOR_ELT:
1226 return LowerAccessVectorElement(Op, DAG);
1227 case ISD::INTRINSIC_VOID:
1228 case ISD::INTRINSIC_WO_CHAIN:
1229 case ISD::INTRINSIC_W_CHAIN:
1230 return LowerIntrinsic(Op, DAG);
1231 case ISD::SIGN_EXTEND_INREG:
1232 return LowerSIGN_EXTEND_INREG(Op, DAG);
1233 case ISD::BUILD_VECTOR:
1234 return LowerBUILD_VECTOR(Op, DAG);
2
Calling 'WebAssemblyTargetLowering::LowerBUILD_VECTOR'
1235 case ISD::VECTOR_SHUFFLE:
1236 return LowerVECTOR_SHUFFLE(Op, DAG);
1237 case ISD::SETCC:
1238 return LowerSETCC(Op, DAG);
1239 case ISD::SHL:
1240 case ISD::SRA:
1241 case ISD::SRL:
1242 return LowerShift(Op, DAG);
1243 }
1244}
1245
1246SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1247 SelectionDAG &DAG) const {
1248 SDValue Src = Op.getOperand(2);
1249 if (isa<FrameIndexSDNode>(Src.getNode())) {
1250 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1251 // the FI to some LEA-like instruction, but since we don't have that, we
1252 // need to insert some kind of instruction that can take an FI operand and
1253 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1254 // local.copy between Op and its FI operand.
1255 SDValue Chain = Op.getOperand(0);
1256 SDLoc DL(Op);
1257 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1258 EVT VT = Src.getValueType();
1259 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1260 : WebAssembly::COPY_I64,
1261 DL, VT, Src),
1262 0);
1263 return Op.getNode()->getNumValues() == 1
1264 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1265 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1266 Op.getNumOperands() == 4 ? Op.getOperand(3)
1267 : SDValue());
1268 }
1269 return SDValue();
1270}
1271
1272SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1273 SelectionDAG &DAG) const {
1274 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1275 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1276}
1277
1278SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1279 SelectionDAG &DAG) const {
1280 SDLoc DL(Op);
1281
1282 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1283 fail(DL, DAG,
1284 "Non-Emscripten WebAssembly hasn't implemented "
1285 "__builtin_return_address");
1286 return SDValue();
1287 }
1288
1289 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1290 return SDValue();
1291
1292 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1293 MakeLibCallOptions CallOptions;
1294 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1295 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1296 .first;
1297}
1298
1299SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1300 SelectionDAG &DAG) const {
1301 // Non-zero depths are not supported by WebAssembly currently. Use the
1302 // legalizer's default expansion, which is to return 0 (what this function is
1303 // documented to do).
1304 if (Op.getConstantOperandVal(0) > 0)
1305 return SDValue();
1306
1307 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1308 EVT VT = Op.getValueType();
1309 Register FP =
1310 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1311 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1312}
1313
1314SDValue
1315WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1316 SelectionDAG &DAG) const {
1317 SDLoc DL(Op);
1318 const auto *GA = cast<GlobalAddressSDNode>(Op);
1319 MVT PtrVT = getPointerTy(DAG.getDataLayout());
1320
1321 MachineFunction &MF = DAG.getMachineFunction();
1322 if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
1323 report_fatal_error("cannot use thread-local storage without bulk memory",
1324 false);
1325
1326 const GlobalValue *GV = GA->getGlobal();
1327
1328 // Currently Emscripten does not support dynamic linking with threads.
1329 // Therefore, if we have thread-local storage, only the local-exec model
1330 // is possible.
1331 // TODO: remove this and implement proper TLS models once Emscripten
1332 // supports dynamic linking with threads.
1333 if (GV->getThreadLocalMode() != GlobalValue::LocalExecTLSModel &&
1334 !Subtarget->getTargetTriple().isOSEmscripten()) {
1335 report_fatal_error("only -ftls-model=local-exec is supported for now on "
1336 "non-Emscripten OSes: variable " +
1337 GV->getName(),
1338 false);
1339 }
1340
1341 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1342 : WebAssembly::GLOBAL_GET_I32;
1343 const char *BaseName = MF.createExternalSymbolName("__tls_base");
1344
1345 SDValue BaseAddr(
1346 DAG.getMachineNode(GlobalGet, DL, PtrVT,
1347 DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1348 0);
1349
1350 SDValue TLSOffset = DAG.getTargetGlobalAddress(
1351 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1352 SDValue SymAddr = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, TLSOffset);
1353
1354 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
1355}
1356
1357SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1358 SelectionDAG &DAG) const {
1359 SDLoc DL(Op);
1360 const auto *GA = cast<GlobalAddressSDNode>(Op);
1361 EVT VT = Op.getValueType();
1362 assert(GA->getTargetFlags() == 0 &&((GA->getTargetFlags() == 0 && "Unexpected target flags on generic GlobalAddressSDNode"
) ? static_cast<void> (0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1363, __PRETTY_FUNCTION__))
1363 "Unexpected target flags on generic GlobalAddressSDNode")((GA->getTargetFlags() == 0 && "Unexpected target flags on generic GlobalAddressSDNode"
) ? static_cast<void> (0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1363, __PRETTY_FUNCTION__))
;
1364 if (GA->getAddressSpace() != 0)
1365 fail(DL, DAG, "WebAssembly only expects the 0 address space");
1366
1367 unsigned OperandFlags = 0;
1368 if (isPositionIndependent()) {
1369 const GlobalValue *GV = GA->getGlobal();
1370 if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1371 MachineFunction &MF = DAG.getMachineFunction();
1372 MVT PtrVT = getPointerTy(MF.getDataLayout());
1373 const char *BaseName;
1374 if (GV->getValueType()->isFunctionTy()) {
1375 BaseName = MF.createExternalSymbolName("__table_base");
1376 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1377 }
1378 else {
1379 BaseName = MF.createExternalSymbolName("__memory_base");
1380 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1381 }
1382 SDValue BaseAddr =
1383 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1384 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1385
1386 SDValue SymAddr = DAG.getNode(
1387 WebAssemblyISD::WrapperPIC, DL, VT,
1388 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1389 OperandFlags));
1390
1391 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1392 } else {
1393 OperandFlags = WebAssemblyII::MO_GOT;
1394 }
1395 }
1396
1397 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1398 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1399 GA->getOffset(), OperandFlags));
1400}
1401
1402SDValue
1403WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1404 SelectionDAG &DAG) const {
1405 SDLoc DL(Op);
1406 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1407 EVT VT = Op.getValueType();
1408 assert(ES->getTargetFlags() == 0 &&((ES->getTargetFlags() == 0 && "Unexpected target flags on generic ExternalSymbolSDNode"
) ? static_cast<void> (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1409, __PRETTY_FUNCTION__))
1409 "Unexpected target flags on generic ExternalSymbolSDNode")((ES->getTargetFlags() == 0 && "Unexpected target flags on generic ExternalSymbolSDNode"
) ? static_cast<void> (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1409, __PRETTY_FUNCTION__))
;
1410 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1411 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1412}
1413
1414SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1415 SelectionDAG &DAG) const {
1416 // There's no need for a Wrapper node because we always incorporate a jump
1417 // table operand into a BR_TABLE instruction, rather than ever
1418 // materializing it in a register.
1419 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1420 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1421 JT->getTargetFlags());
1422}
1423
1424SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1425 SelectionDAG &DAG) const {
1426 SDLoc DL(Op);
1427 SDValue Chain = Op.getOperand(0);
1428 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1429 SDValue Index = Op.getOperand(2);
1430 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags")((JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags"
) ? static_cast<void> (0) : __assert_fail ("JT->getTargetFlags() == 0 && \"WebAssembly doesn't set target flags\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1430, __PRETTY_FUNCTION__))
;
1431
1432 SmallVector<SDValue, 8> Ops;
1433 Ops.push_back(Chain);
1434 Ops.push_back(Index);
1435
1436 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1437 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1438
1439 // Add an operand for each case.
1440 for (auto MBB : MBBs)
1441 Ops.push_back(DAG.getBasicBlock(MBB));
1442
1443 // Add the first MBB as a dummy default target for now. This will be replaced
1444 // with the proper default target (and the preceding range check eliminated)
1445 // if possible by WebAssemblyFixBrTableDefaults.
1446 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1447 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1448}
1449
1450SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1451 SelectionDAG &DAG) const {
1452 SDLoc DL(Op);
1453 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1454
1455 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1456 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1457
1458 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1459 MFI->getVarargBufferVreg(), PtrVT);
1460 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1461 MachinePointerInfo(SV));
1462}
1463
1464static SDValue getCppExceptionSymNode(SDValue Op, unsigned TagIndex,
1465 SelectionDAG &DAG) {
1466 // We only support C++ exceptions for now
1467 int Tag =
1468 cast<ConstantSDNode>(Op.getOperand(TagIndex).getNode())->getZExtValue();
1469 if (Tag != WebAssembly::CPP_EXCEPTION)
1470 llvm_unreachable("Invalid tag: We only support C++ exceptions for now")::llvm::llvm_unreachable_internal("Invalid tag: We only support C++ exceptions for now"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1470)
;
1471 auto &MF = DAG.getMachineFunction();
1472 const auto &TLI = DAG.getTargetLoweringInfo();
1473 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1474 const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1475 return DAG.getNode(WebAssemblyISD::Wrapper, SDLoc(Op), PtrVT,
1476 DAG.getTargetExternalSymbol(SymName, PtrVT));
1477}
1478
1479SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1480 SelectionDAG &DAG) const {
1481 MachineFunction &MF = DAG.getMachineFunction();
1482 unsigned IntNo;
1483 switch (Op.getOpcode()) {
1484 case ISD::INTRINSIC_VOID:
1485 case ISD::INTRINSIC_W_CHAIN:
1486 IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1487 break;
1488 case ISD::INTRINSIC_WO_CHAIN:
1489 IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1490 break;
1491 default:
1492 llvm_unreachable("Invalid intrinsic")::llvm::llvm_unreachable_internal("Invalid intrinsic", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1492)
;
1493 }
1494 SDLoc DL(Op);
1495
1496 switch (IntNo) {
1497 default:
1498 return SDValue(); // Don't custom lower most intrinsics.
1499
1500 case Intrinsic::wasm_lsda: {
1501 EVT VT = Op.getValueType();
1502 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1503 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1504 auto &Context = MF.getMMI().getContext();
1505 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1506 Twine(MF.getFunctionNumber()));
1507 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1508 DAG.getMCSymbol(S, PtrVT));
1509 }
1510
1511 case Intrinsic::wasm_throw: {
1512 SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1513 return DAG.getNode(WebAssemblyISD::THROW, DL,
1514 MVT::Other, // outchain type
1515 {
1516 Op.getOperand(0), // inchain
1517 SymNode, // exception symbol
1518 Op.getOperand(3) // thrown value
1519 });
1520 }
1521
1522 case Intrinsic::wasm_catch: {
1523 SDValue SymNode = getCppExceptionSymNode(Op, 2, DAG);
1524 return DAG.getNode(WebAssemblyISD::CATCH, DL,
1525 {
1526 MVT::i32, // outchain type
1527 MVT::Other // return value
1528 },
1529 {
1530 Op.getOperand(0), // inchain
1531 SymNode // exception symbol
1532 });
1533 }
1534
1535 case Intrinsic::wasm_shuffle: {
1536 // Drop in-chain and replace undefs, but otherwise pass through unchanged
1537 SDValue Ops[18];
1538 size_t OpIdx = 0;
1539 Ops[OpIdx++] = Op.getOperand(1);
1540 Ops[OpIdx++] = Op.getOperand(2);
1541 while (OpIdx < 18) {
1542 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1543 if (MaskIdx.isUndef() ||
1544 cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
1545 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32);
1546 } else {
1547 Ops[OpIdx++] = MaskIdx;
1548 }
1549 }
1550 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1551 }
1552 }
1553}
1554
1555SDValue
1556WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1557 SelectionDAG &DAG) const {
1558 SDLoc DL(Op);
1559 // If sign extension operations are disabled, allow sext_inreg only if operand
1560 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1561 // extension operations, but allowing sext_inreg in this context lets us have
1562 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1563 // everywhere would be simpler in this file, but would necessitate large and
1564 // brittle patterns to undo the expansion and select extract_lane_s
1565 // instructions.
1566 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128())((!Subtarget->hasSignExt() && Subtarget->hasSIMD128
()) ? static_cast<void> (0) : __assert_fail ("!Subtarget->hasSignExt() && Subtarget->hasSIMD128()"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1566, __PRETTY_FUNCTION__))
;
1567 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1568 return SDValue();
1569
1570 const SDValue &Extract = Op.getOperand(0);
1571 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1572 if (VecT.getVectorElementType().getSizeInBits() > 32)
1573 return SDValue();
1574 MVT ExtractedLaneT =
1575 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1576 MVT ExtractedVecT =
1577 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1578 if (ExtractedVecT == VecT)
1579 return Op;
1580
1581 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1582 const SDNode *Index = Extract.getOperand(1).getNode();
1583 if (!isa<ConstantSDNode>(Index))
1584 return SDValue();
1585 unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
1586 unsigned Scale =
1587 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1588 assert(Scale > 1)((Scale > 1) ? static_cast<void> (0) : __assert_fail
("Scale > 1", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1588, __PRETTY_FUNCTION__))
;
1589 SDValue NewIndex =
1590 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1591 SDValue NewExtract = DAG.getNode(
1592 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1593 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1594 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1595 Op.getOperand(1));
1596}
1597
1598SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1599 SelectionDAG &DAG) const {
1600 SDLoc DL(Op);
1601 const EVT VecT = Op.getValueType();
1602 const EVT LaneT = Op.getOperand(0).getValueType();
1603 const size_t Lanes = Op.getNumOperands();
1604 bool CanSwizzle = VecT == MVT::v16i8;
1605
1606 // BUILD_VECTORs are lowered to the instruction that initializes the highest
1607 // possible number of lanes at once followed by a sequence of replace_lane
1608 // instructions to individually initialize any remaining lanes.
1609
1610 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
1611 // swizzled lanes should be given greater weight.
1612
1613 // TODO: Investigate building vectors by shuffling together vectors built by
1614 // separately specialized means.
1615
1616 auto IsConstant = [](const SDValue &V) {
1617 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1618 };
1619
1620 // Returns the source vector and index vector pair if they exist. Checks for:
1621 // (extract_vector_elt
1622 // $src,
1623 // (sign_extend_inreg (extract_vector_elt $indices, $i))
1624 // )
1625 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
1626 auto Bail = std::make_pair(SDValue(), SDValue());
1627 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1628 return Bail;
1629 const SDValue &SwizzleSrc = Lane->getOperand(0);
1630 const SDValue &IndexExt = Lane->getOperand(1);
1631 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
1632 return Bail;
1633 const SDValue &Index = IndexExt->getOperand(0);
1634 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1635 return Bail;
1636 const SDValue &SwizzleIndices = Index->getOperand(0);
1637 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
1638 SwizzleIndices.getValueType() != MVT::v16i8 ||
1639 Index->getOperand(1)->getOpcode() != ISD::Constant ||
1640 Index->getConstantOperandVal(1) != I)
1641 return Bail;
1642 return std::make_pair(SwizzleSrc, SwizzleIndices);
1643 };
1644
1645 using ValueEntry = std::pair<SDValue, size_t>;
1646 SmallVector<ValueEntry, 16> SplatValueCounts;
1647
1648 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
1649 SmallVector<SwizzleEntry, 16> SwizzleCounts;
1650
1651 auto AddCount = [](auto &Counts, const auto &Val) {
1652 auto CountIt =
1653 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
1654 if (CountIt == Counts.end()) {
1655 Counts.emplace_back(Val, 1);
1656 } else {
1657 CountIt->second++;
1658 }
1659 };
1660
1661 auto GetMostCommon = [](auto &Counts) {
1662 auto CommonIt =
1663 std::max_element(Counts.begin(), Counts.end(),
1664 [](auto A, auto B) { return A.second < B.second; });
1665 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector")((CommonIt != Counts.end() && "Unexpected all-undef build_vector"
) ? static_cast<void> (0) : __assert_fail ("CommonIt != Counts.end() && \"Unexpected all-undef build_vector\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1665, __PRETTY_FUNCTION__))
;
1666 return *CommonIt;
1667 };
1668
1669 size_t NumConstantLanes = 0;
1670
1671 // Count eligible lanes for each type of vector creation op
1672 for (size_t I = 0; I
2.1
'I' is < 'Lanes'
2.1
'I' is < 'Lanes'
2.1
'I' is < 'Lanes'
< Lanes
; ++I) {
3
Loop condition is true. Entering loop body
8
Assuming 'I' is >= 'Lanes'
9
Loop condition is false. Execution continues on line 1688
1673 const SDValue &Lane = Op->getOperand(I);
1674 if (Lane.isUndef())
4
Taking false branch
1675 continue;
1676
1677 AddCount(SplatValueCounts, Lane);
1678
1679 if (IsConstant(Lane)) {
5
Taking false branch
1680 NumConstantLanes++;
1681 } else if (CanSwizzle
5.1
'CanSwizzle' is true
5.1
'CanSwizzle' is true
5.1
'CanSwizzle' is true
) {
6
Taking true branch
1682 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
1683 if (SwizzleSrcs.first)
7
Taking true branch
1684 AddCount(SwizzleCounts, SwizzleSrcs);
1685 }
1686 }
1687
1688 SDValue SplatValue;
1689 size_t NumSplatLanes;
1690 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
1691
1692 SDValue SwizzleSrc;
1693 SDValue SwizzleIndices;
1694 size_t NumSwizzleLanes = 0;
1695 if (SwizzleCounts.size())
10
Assuming the condition is false
11
Taking false branch
1696 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
1697 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
1698
1699 // Predicate returning true if the lane is properly initialized by the
1700 // original instruction
1701 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
1702 SDValue Result;
1703 // Prefer swizzles over vector consts over splats
1704 if (NumSwizzleLanes >= NumSplatLanes &&
12
Assuming 'NumSwizzleLanes' is < 'NumSplatLanes'
1705 (!Subtarget->hasUnimplementedSIMD128() ||
1706 NumSwizzleLanes >= NumConstantLanes)) {
1707 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
1708 SwizzleIndices);
1709 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
1710 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
1711 return Swizzled == GetSwizzleSrcs(I, Lane);
1712 };
1713 } else if (NumConstantLanes
12.1
'NumConstantLanes' is < 'NumSplatLanes'
12.1
'NumConstantLanes' is < 'NumSplatLanes'
12.1
'NumConstantLanes' is < 'NumSplatLanes'
>= NumSplatLanes &&
1714 Subtarget->hasUnimplementedSIMD128()) {
1715 // If we support v128.const, emit it directly
1716 SmallVector<SDValue, 16> ConstLanes;
1717 for (const SDValue &Lane : Op->op_values()) {
1718 if (IsConstant(Lane)) {
1719 ConstLanes.push_back(Lane);
1720 } else if (LaneT.isFloatingPoint()) {
1721 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1722 } else {
1723 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1724 }
1725 }
1726 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1727 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
1728 return IsConstant(Lane);
1729 };
1730 } else if (NumConstantLanes
12.2
'NumConstantLanes' is < 'NumSplatLanes'
12.2
'NumConstantLanes' is < 'NumSplatLanes'
12.2
'NumConstantLanes' is < 'NumSplatLanes'
>= NumSplatLanes && VecT.isInteger()) {
1731 // Otherwise, if this is an integer vector, pack the lane values together so
1732 // we can construct the 128-bit constant from a pair of i64s using a splat
1733 // followed by at most one i64x2.replace_lane. Also keep track of the lanes
1734 // that actually matter so we can avoid the replace_lane in more cases.
1735 std::array<uint64_t, 2> I64s{{0, 0}};
1736 std::array<uint64_t, 2> ConstLaneMasks{{0, 0}};
1737 size_t LaneBits = 128 / Lanes;
1738 size_t HalfLanes = Lanes / 2;
1739 for (size_t I = 0; I < Lanes; ++I) {
1740 const SDValue &Lane = Op.getOperand(I);
1741 if (IsConstant(Lane)) {
1742 // How much we need to shift Val to position it in an i64
1743 auto Shift = LaneBits * (I % HalfLanes);
1744 auto Mask = maskTrailingOnes<uint64_t>(LaneBits);
1745 auto Val = cast<ConstantSDNode>(Lane.getNode())->getZExtValue() & Mask;
1746 I64s[I / HalfLanes] |= Val << Shift;
1747 ConstLaneMasks[I / HalfLanes] |= Mask << Shift;
1748 }
1749 }
1750 // Check whether all constant lanes in the second half of the vector are
1751 // equivalent in the first half or vice versa to determine whether splatting
1752 // either side will be sufficient to materialize the constant. As a special
1753 // case, if the first and second halves have no constant lanes in common, we
1754 // can just combine them.
1755 bool FirstHalfSufficient = (I64s[0] & ConstLaneMasks[1]) == I64s[1];
1756 bool SecondHalfSufficient = (I64s[1] & ConstLaneMasks[0]) == I64s[0];
1757 bool CombinedSufficient = (ConstLaneMasks[0] & ConstLaneMasks[1]) == 0;
1758
1759 uint64_t Splatted;
1760 if (SecondHalfSufficient) {
1761 Splatted = I64s[1];
1762 } else if (CombinedSufficient) {
1763 Splatted = I64s[0] | I64s[1];
1764 } else {
1765 Splatted = I64s[0];
1766 }
1767
1768 Result = DAG.getSplatBuildVector(MVT::v2i64, DL,
1769 DAG.getConstant(Splatted, DL, MVT::i64));
1770 if (!FirstHalfSufficient && !SecondHalfSufficient && !CombinedSufficient) {
1771 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v2i64, Result,
1772 DAG.getConstant(I64s[1], DL, MVT::i64),
1773 DAG.getConstant(1, DL, MVT::i32));
1774 }
1775 Result = DAG.getBitcast(VecT, Result);
1776 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
1777 return IsConstant(Lane);
1778 };
1779 } else {
1780 // Use a splat, but possibly a load_splat
1781 LoadSDNode *SplattedLoad;
1782 if ((SplattedLoad = dyn_cast<LoadSDNode>(SplatValue)) &&
13
Assuming 'SplattedLoad' is null
14
Assuming pointer value is null
15
Taking false branch
1783 SplattedLoad->getMemoryVT() == VecT.getVectorElementType()) {
1784 Result = DAG.getMemIntrinsicNode(
1785 WebAssemblyISD::LOAD_SPLAT, DL, DAG.getVTList(VecT),
1786 {SplattedLoad->getChain(), SplattedLoad->getBasePtr(),
1787 SplattedLoad->getOffset()},
1788 SplattedLoad->getMemoryVT(), SplattedLoad->getMemOperand());
1789 } else {
1790 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
16
Value assigned to 'Op.Node'
17
Calling 'SelectionDAG::getSplatBuildVector'
1791 }
1792 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
1793 return Lane == SplatValue;
1794 };
1795 }
1796
1797 assert(Result)((Result) ? static_cast<void> (0) : __assert_fail ("Result"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1797, __PRETTY_FUNCTION__))
;
1798 assert(IsLaneConstructed)((IsLaneConstructed) ? static_cast<void> (0) : __assert_fail
("IsLaneConstructed", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1798, __PRETTY_FUNCTION__))
;
1799
1800 // Add replace_lane instructions for any unhandled values
1801 for (size_t I = 0; I < Lanes; ++I) {
1802 const SDValue &Lane = Op->getOperand(I);
1803 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
1804 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1805 DAG.getConstant(I, DL, MVT::i32));
1806 }
1807
1808 return Result;
1809}
1810
1811SDValue
1812WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1813 SelectionDAG &DAG) const {
1814 SDLoc DL(Op);
1815 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1816 MVT VecType = Op.getOperand(0).getSimpleValueType();
1817 assert(VecType.is128BitVector() && "Unexpected shuffle vector type")((VecType.is128BitVector() && "Unexpected shuffle vector type"
) ? static_cast<void> (0) : __assert_fail ("VecType.is128BitVector() && \"Unexpected shuffle vector type\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1817, __PRETTY_FUNCTION__))
;
1818 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1819
1820 // Space for two vector args and sixteen mask indices
1821 SDValue Ops[18];
1822 size_t OpIdx = 0;
1823 Ops[OpIdx++] = Op.getOperand(0);
1824 Ops[OpIdx++] = Op.getOperand(1);
1825
1826 // Expand mask indices to byte indices and materialize them as operands
1827 for (int M : Mask) {
1828 for (size_t J = 0; J < LaneBytes; ++J) {
1829 // Lower undefs (represented by -1 in mask) to zero
1830 uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1831 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1832 }
1833 }
1834
1835 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1836}
1837
1838SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
1839 SelectionDAG &DAG) const {
1840 SDLoc DL(Op);
1841 // The legalizer does not know how to expand the comparison modes of i64x2
1842 // vectors because no comparison modes are supported. We could solve this by
1843 // expanding all i64x2 SETCC nodes, but that seems to expand f64x2 SETCC nodes
1844 // (which return i64x2 results) as well. So instead we manually unroll i64x2
1845 // comparisons here.
1846 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64)((Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64
) ? static_cast<void> (0) : __assert_fail ("Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1846, __PRETTY_FUNCTION__))
;
1847 SmallVector<SDValue, 2> LHS, RHS;
1848 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
1849 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
1850 const SDValue &CC = Op->getOperand(2);
1851 auto MakeLane = [&](unsigned I) {
1852 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
1853 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
1854 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
1855 };
1856 return DAG.getBuildVector(Op->getValueType(0), DL,
1857 {MakeLane(0), MakeLane(1)});
1858}
1859
1860SDValue
1861WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1862 SelectionDAG &DAG) const {
1863 // Allow constant lane indices, expand variable lane indices
1864 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1865 if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1866 return Op;
1867 else
1868 // Perform default expansion
1869 return SDValue();
1870}
1871
1872static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
1873 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
1874 // 32-bit and 64-bit unrolled shifts will have proper semantics
1875 if (LaneT.bitsGE(MVT::i32))
1876 return DAG.UnrollVectorOp(Op.getNode());
1877 // Otherwise mask the shift value to get proper semantics from 32-bit shift
1878 SDLoc DL(Op);
1879 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
1880 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
1881 unsigned ShiftOpcode = Op.getOpcode();
1882 SmallVector<SDValue, 16> ShiftedElements;
1883 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
1884 SmallVector<SDValue, 16> ShiftElements;
1885 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
1886 SmallVector<SDValue, 16> UnrolledOps;
1887 for (size_t i = 0; i < NumLanes; ++i) {
1888 SDValue MaskedShiftValue =
1889 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
1890 SDValue ShiftedValue = ShiftedElements[i];
1891 if (ShiftOpcode == ISD::SRA)
1892 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
1893 ShiftedValue, DAG.getValueType(LaneT));
1894 UnrolledOps.push_back(
1895 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
1896 }
1897 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
1898}
1899
1900SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1901 SelectionDAG &DAG) const {
1902 SDLoc DL(Op);
1903
1904 // Only manually lower vector shifts
1905 assert(Op.getSimpleValueType().isVector())((Op.getSimpleValueType().isVector()) ? static_cast<void>
(0) : __assert_fail ("Op.getSimpleValueType().isVector()", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1905, __PRETTY_FUNCTION__))
;
1906
1907 auto ShiftVal = DAG.getSplatValue(Op.getOperand(1));
1908 if (!ShiftVal)
1909 return unrollVectorShift(Op, DAG);
1910
1911 // Use anyext because none of the high bits can affect the shift
1912 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
1913
1914 unsigned Opcode;
1915 switch (Op.getOpcode()) {
1916 case ISD::SHL:
1917 Opcode = WebAssemblyISD::VEC_SHL;
1918 break;
1919 case ISD::SRA:
1920 Opcode = WebAssemblyISD::VEC_SHR_S;
1921 break;
1922 case ISD::SRL:
1923 Opcode = WebAssemblyISD::VEC_SHR_U;
1924 break;
1925 default:
1926 llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1926)
;
1927 }
1928
1929 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
1930}
1931
1932//===----------------------------------------------------------------------===//
1933// Custom DAG combine hooks
1934//===----------------------------------------------------------------------===//
1935static SDValue
1936performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
1937 auto &DAG = DCI.DAG;
1938 auto Shuffle = cast<ShuffleVectorSDNode>(N);
1939
1940 // Hoist vector bitcasts that don't change the number of lanes out of unary
1941 // shuffles, where they are less likely to get in the way of other combines.
1942 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
1943 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
1944 SDValue Bitcast = N->getOperand(0);
1945 if (Bitcast.getOpcode() != ISD::BITCAST)
1946 return SDValue();
1947 if (!N->getOperand(1).isUndef())
1948 return SDValue();
1949 SDValue CastOp = Bitcast.getOperand(0);
1950 MVT SrcType = CastOp.getSimpleValueType();
1951 MVT DstType = Bitcast.getSimpleValueType();
1952 if (!SrcType.is128BitVector() ||
1953 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
1954 return SDValue();
1955 SDValue NewShuffle = DAG.getVectorShuffle(
1956 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
1957 return DAG.getBitcast(DstType, NewShuffle);
1958}
1959
1960static SDValue performVectorWidenCombine(SDNode *N,
1961 TargetLowering::DAGCombinerInfo &DCI) {
1962 auto &DAG = DCI.DAG;
1963 assert(N->getOpcode() == ISD::SIGN_EXTEND ||((N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() ==
ISD::ZERO_EXTEND) ? static_cast<void> (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1964, __PRETTY_FUNCTION__))
1964 N->getOpcode() == ISD::ZERO_EXTEND)((N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() ==
ISD::ZERO_EXTEND) ? static_cast<void> (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1964, __PRETTY_FUNCTION__))
;
1965
1966 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
1967 // possible before the extract_subvector can be expanded.
1968 auto Extract = N->getOperand(0);
1969 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
1970 return SDValue();
1971 auto Source = Extract.getOperand(0);
1972 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
1973 if (IndexNode == nullptr)
1974 return SDValue();
1975 auto Index = IndexNode->getZExtValue();
1976
1977 // Only v8i8 and v4i16 extracts can be widened, and only if the extracted
1978 // subvector is the low or high half of its source.
1979 EVT ResVT = N->getValueType(0);
1980 if (ResVT == MVT::v8i16) {
1981 if (Extract.getValueType() != MVT::v8i8 ||
1982 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
1983 return SDValue();
1984 } else if (ResVT == MVT::v4i32) {
1985 if (Extract.getValueType() != MVT::v4i16 ||
1986 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
1987 return SDValue();
1988 } else {
1989 return SDValue();
1990 }
1991
1992 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
1993 bool IsLow = Index == 0;
1994
1995 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::WIDEN_LOW_S
1996 : WebAssemblyISD::WIDEN_HIGH_S)
1997 : (IsLow ? WebAssemblyISD::WIDEN_LOW_U
1998 : WebAssemblyISD::WIDEN_HIGH_U);
1999
2000 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2001}
2002
2003SDValue
2004WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
2005 DAGCombinerInfo &DCI) const {
2006 switch (N->getOpcode()) {
2007 default:
2008 return SDValue();
2009 case ISD::VECTOR_SHUFFLE:
2010 return performVECTOR_SHUFFLECombine(N, DCI);
2011 case ISD::SIGN_EXTEND:
2012 case ISD::ZERO_EXTEND:
2013 return performVectorWidenCombine(N, DCI);
2014 }
2015}

/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h

1//===- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SelectionDAG class, and transitively defines the
10// SDNode class and subclasses.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CODEGEN_SELECTIONDAG_H
15#define LLVM_CODEGEN_SELECTIONDAG_H
16
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
21#include "llvm/ADT/DenseSet.h"
22#include "llvm/ADT/FoldingSet.h"
23#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/StringMap.h"
26#include "llvm/ADT/ilist.h"
27#include "llvm/ADT/iterator.h"
28#include "llvm/ADT/iterator_range.h"
29#include "llvm/CodeGen/DAGCombine.h"
30#include "llvm/CodeGen/ISDOpcodes.h"
31#include "llvm/CodeGen/MachineFunction.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/SelectionDAGNodes.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/DebugLoc.h"
36#include "llvm/IR/Instructions.h"
37#include "llvm/IR/Metadata.h"
38#include "llvm/Support/Allocator.h"
39#include "llvm/Support/ArrayRecycler.h"
40#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/CodeGen.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MachineValueType.h"
45#include "llvm/Support/RecyclingAllocator.h"
46#include <algorithm>
47#include <cassert>
48#include <cstdint>
49#include <functional>
50#include <map>
51#include <string>
52#include <tuple>
53#include <utility>
54#include <vector>
55
56namespace llvm {
57
58class AAResults;
59class BlockAddress;
60class BlockFrequencyInfo;
61class Constant;
62class ConstantFP;
63class ConstantInt;
64class DataLayout;
65struct fltSemantics;
66class FunctionLoweringInfo;
67class GlobalValue;
68struct KnownBits;
69class LegacyDivergenceAnalysis;
70class LLVMContext;
71class MachineBasicBlock;
72class MachineConstantPoolValue;
73class MCSymbol;
74class OptimizationRemarkEmitter;
75class ProfileSummaryInfo;
76class SDDbgValue;
77class SDDbgLabel;
78class SelectionDAG;
79class SelectionDAGTargetInfo;
80class TargetLibraryInfo;
81class TargetLowering;
82class TargetMachine;
83class TargetSubtargetInfo;
84class Value;
85
86class SDVTListNode : public FoldingSetNode {
87 friend struct FoldingSetTrait<SDVTListNode>;
88
89 /// A reference to an Interned FoldingSetNodeID for this node.
90 /// The Allocator in SelectionDAG holds the data.
91 /// SDVTList contains all types which are frequently accessed in SelectionDAG.
92 /// The size of this list is not expected to be big so it won't introduce
93 /// a memory penalty.
94 FoldingSetNodeIDRef FastID;
95 const EVT *VTs;
96 unsigned int NumVTs;
97 /// The hash value for SDVTList is fixed, so cache it to avoid
98 /// hash calculation.
99 unsigned HashValue;
100
101public:
102 SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
103 FastID(ID), VTs(VT), NumVTs(Num) {
104 HashValue = ID.ComputeHash();
105 }
106
107 SDVTList getSDVTList() {
108 SDVTList result = {VTs, NumVTs};
109 return result;
110 }
111};
112
113/// Specialize FoldingSetTrait for SDVTListNode
114/// to avoid computing temp FoldingSetNodeID and hash value.
115template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTListNode> {
116 static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
117 ID = X.FastID;
118 }
119
120 static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
121 unsigned IDHash, FoldingSetNodeID &TempID) {
122 if (X.HashValue != IDHash)
123 return false;
124 return ID == X.FastID;
125 }
126
127 static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
128 return X.HashValue;
129 }
130};
131
132template <> struct ilist_alloc_traits<SDNode> {
133 static void deleteNode(SDNode *) {
134 llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!")::llvm::llvm_unreachable_internal("ilist_traits<SDNode> shouldn't see a deleteNode call!"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 134)
;
135 }
136};
137
138/// Keeps track of dbg_value information through SDISel. We do
139/// not build SDNodes for these so as not to perturb the generated code;
140/// instead the info is kept off to the side in this structure. Each SDNode may
141/// have one or more associated dbg_value entries. This information is kept in
142/// DbgValMap.
143/// Byval parameters are handled separately because they don't use alloca's,
144/// which busts the normal mechanism. There is good reason for handling all
145/// parameters separately: they may not have code generated for them, they
146/// should always go at the beginning of the function regardless of other code
147/// motion, and debug info for them is potentially useful even if the parameter
148/// is unused. Right now only byval parameters are handled separately.
149class SDDbgInfo {
150 BumpPtrAllocator Alloc;
151 SmallVector<SDDbgValue*, 32> DbgValues;
152 SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
153 SmallVector<SDDbgLabel*, 4> DbgLabels;
154 using DbgValMapType = DenseMap<const SDNode *, SmallVector<SDDbgValue *, 2>>;
155 DbgValMapType DbgValMap;
156
157public:
158 SDDbgInfo() = default;
159 SDDbgInfo(const SDDbgInfo &) = delete;
160 SDDbgInfo &operator=(const SDDbgInfo &) = delete;
161
162 void add(SDDbgValue *V, const SDNode *Node, bool isParameter) {
163 if (isParameter) {
164 ByvalParmDbgValues.push_back(V);
165 } else DbgValues.push_back(V);
166 if (Node)
167 DbgValMap[Node].push_back(V);
168 }
169
170 void add(SDDbgLabel *L) {
171 DbgLabels.push_back(L);
172 }
173
174 /// Invalidate all DbgValues attached to the node and remove
175 /// it from the Node-to-DbgValues map.
176 void erase(const SDNode *Node);
177
178 void clear() {
179 DbgValMap.clear();
180 DbgValues.clear();
181 ByvalParmDbgValues.clear();
182 DbgLabels.clear();
183 Alloc.Reset();
184 }
185
186 BumpPtrAllocator &getAlloc() { return Alloc; }
187
188 bool empty() const {
189 return DbgValues.empty() && ByvalParmDbgValues.empty() && DbgLabels.empty();
190 }
191
192 ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) const {
193 auto I = DbgValMap.find(Node);
194 if (I != DbgValMap.end())
195 return I->second;
196 return ArrayRef<SDDbgValue*>();
197 }
198
199 using DbgIterator = SmallVectorImpl<SDDbgValue*>::iterator;
200 using DbgLabelIterator = SmallVectorImpl<SDDbgLabel*>::iterator;
201
202 DbgIterator DbgBegin() { return DbgValues.begin(); }
203 DbgIterator DbgEnd() { return DbgValues.end(); }
204 DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
205 DbgIterator ByvalParmDbgEnd() { return ByvalParmDbgValues.end(); }
206 DbgLabelIterator DbgLabelBegin() { return DbgLabels.begin(); }
207 DbgLabelIterator DbgLabelEnd() { return DbgLabels.end(); }
208};
209
210void checkForCycles(const SelectionDAG *DAG, bool force = false);
211
212/// This is used to represent a portion of an LLVM function in a low-level
213/// Data Dependence DAG representation suitable for instruction selection.
214/// This DAG is constructed as the first step of instruction selection in order
215/// to allow implementation of machine specific optimizations
216/// and code simplifications.
217///
218/// The representation used by the SelectionDAG is a target-independent
219/// representation, which has some similarities to the GCC RTL representation,
220/// but is significantly more simple, powerful, and is a graph form instead of a
221/// linear form.
222///
223class SelectionDAG {
224 const TargetMachine &TM;
225 const SelectionDAGTargetInfo *TSI = nullptr;
226 const TargetLowering *TLI = nullptr;
227 const TargetLibraryInfo *LibInfo = nullptr;
228 MachineFunction *MF;
229 Pass *SDAGISelPass = nullptr;
230 LLVMContext *Context;
231 CodeGenOpt::Level OptLevel;
232
233 LegacyDivergenceAnalysis * DA = nullptr;
234 FunctionLoweringInfo * FLI = nullptr;
235
236 /// The function-level optimization remark emitter. Used to emit remarks
237 /// whenever manipulating the DAG.
238 OptimizationRemarkEmitter *ORE;
239
240 ProfileSummaryInfo *PSI = nullptr;
241 BlockFrequencyInfo *BFI = nullptr;
242
243 /// The starting token.
244 SDNode EntryNode;
245
246 /// The root of the entire DAG.
247 SDValue Root;
248
249 /// A linked list of nodes in the current DAG.
250 ilist<SDNode> AllNodes;
251
252 /// The AllocatorType for allocating SDNodes. We use
253 /// pool allocation with recycling.
254 using NodeAllocatorType = RecyclingAllocator<BumpPtrAllocator, SDNode,
255 sizeof(LargestSDNode),
256 alignof(MostAlignedSDNode)>;
257
258 /// Pool allocation for nodes.
259 NodeAllocatorType NodeAllocator;
260
261 /// This structure is used to memoize nodes, automatically performing
262 /// CSE with existing nodes when a duplicate is requested.
263 FoldingSet<SDNode> CSEMap;
264
265 /// Pool allocation for machine-opcode SDNode operands.
266 BumpPtrAllocator OperandAllocator;
267 ArrayRecycler<SDUse> OperandRecycler;
268
269 /// Pool allocation for misc. objects that are created once per SelectionDAG.
270 BumpPtrAllocator Allocator;
271
272 /// Tracks dbg_value and dbg_label information through SDISel.
273 SDDbgInfo *DbgInfo;
274
275 using CallSiteInfo = MachineFunction::CallSiteInfo;
276 using CallSiteInfoImpl = MachineFunction::CallSiteInfoImpl;
277
278 struct CallSiteDbgInfo {
279 CallSiteInfo CSInfo;
280 MDNode *HeapAllocSite = nullptr;
281 bool NoMerge = false;
282 };
283
284 DenseMap<const SDNode *, CallSiteDbgInfo> SDCallSiteDbgInfo;
285
286 uint16_t NextPersistentId = 0;
287
288public:
289 /// Clients of various APIs that cause global effects on
290 /// the DAG can optionally implement this interface. This allows the clients
291 /// to handle the various sorts of updates that happen.
292 ///
293 /// A DAGUpdateListener automatically registers itself with DAG when it is
294 /// constructed, and removes itself when destroyed in RAII fashion.
295 struct DAGUpdateListener {
296 DAGUpdateListener *const Next;
297 SelectionDAG &DAG;
298
299 explicit DAGUpdateListener(SelectionDAG &D)
300 : Next(D.UpdateListeners), DAG(D) {
301 DAG.UpdateListeners = this;
302 }
303
304 virtual ~DAGUpdateListener() {
305 assert(DAG.UpdateListeners == this &&((DAG.UpdateListeners == this && "DAGUpdateListeners must be destroyed in LIFO order"
) ? static_cast<void> (0) : __assert_fail ("DAG.UpdateListeners == this && \"DAGUpdateListeners must be destroyed in LIFO order\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 306, __PRETTY_FUNCTION__))
306 "DAGUpdateListeners must be destroyed in LIFO order")((DAG.UpdateListeners == this && "DAGUpdateListeners must be destroyed in LIFO order"
) ? static_cast<void> (0) : __assert_fail ("DAG.UpdateListeners == this && \"DAGUpdateListeners must be destroyed in LIFO order\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 306, __PRETTY_FUNCTION__))
;
307 DAG.UpdateListeners = Next;
308 }
309
310 /// The node N that was deleted and, if E is not null, an
311 /// equivalent node E that replaced it.
312 virtual void NodeDeleted(SDNode *N, SDNode *E);
313
314 /// The node N that was updated.
315 virtual void NodeUpdated(SDNode *N);
316
317 /// The node N that was inserted.
318 virtual void NodeInserted(SDNode *N);
319 };
320
321 struct DAGNodeDeletedListener : public DAGUpdateListener {
322 std::function<void(SDNode *, SDNode *)> Callback;
323
324 DAGNodeDeletedListener(SelectionDAG &DAG,
325 std::function<void(SDNode *, SDNode *)> Callback)
326 : DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
327
328 void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
329
330 private:
331 virtual void anchor();
332 };
333
334 /// Help to insert SDNodeFlags automatically in transforming. Use
335 /// RAII to save and resume flags in current scope.
336 class FlagInserter {
337 SelectionDAG &DAG;
338 SDNodeFlags Flags;
339 FlagInserter *LastInserter;
340
341 public:
342 FlagInserter(SelectionDAG &SDAG, SDNodeFlags Flags)
343 : DAG(SDAG), Flags(Flags),
344 LastInserter(SDAG.getFlagInserter()) {
345 SDAG.setFlagInserter(this);
346 }
347 FlagInserter(SelectionDAG &SDAG, SDNode *N)
348 : FlagInserter(SDAG, N->getFlags()) {}
349
350 FlagInserter(const FlagInserter &) = delete;
351 FlagInserter &operator=(const FlagInserter &) = delete;
352 ~FlagInserter() { DAG.setFlagInserter(LastInserter); }
353
354 const SDNodeFlags getFlags() const { return Flags; }
355 };
356
357 /// When true, additional steps are taken to
358 /// ensure that getConstant() and similar functions return DAG nodes that
359 /// have legal types. This is important after type legalization since
360 /// any illegally typed nodes generated after this point will not experience
361 /// type legalization.
362 bool NewNodesMustHaveLegalTypes = false;
363
364private:
365 /// DAGUpdateListener is a friend so it can manipulate the listener stack.
366 friend struct DAGUpdateListener;
367
368 /// Linked list of registered DAGUpdateListener instances.
369 /// This stack is maintained by DAGUpdateListener RAII.
370 DAGUpdateListener *UpdateListeners = nullptr;
371
372 /// Implementation of setSubgraphColor.
373 /// Return whether we had to truncate the search.
374 bool setSubgraphColorHelper(SDNode *N, const char *Color,
375 DenseSet<SDNode *> &visited,
376 int level, bool &printed);
377
378 template <typename SDNodeT, typename... ArgTypes>
379 SDNodeT *newSDNode(ArgTypes &&... Args) {
380 return new (NodeAllocator.template Allocate<SDNodeT>())
381 SDNodeT(std::forward<ArgTypes>(Args)...);
382 }
383
384 /// Build a synthetic SDNodeT with the given args and extract its subclass
385 /// data as an integer (e.g. for use in a folding set).
386 ///
387 /// The args to this function are the same as the args to SDNodeT's
388 /// constructor, except the second arg (assumed to be a const DebugLoc&) is
389 /// omitted.
390 template <typename SDNodeT, typename... ArgTypes>
391 static uint16_t getSyntheticNodeSubclassData(unsigned IROrder,
392 ArgTypes &&... Args) {
393 // The compiler can reduce this expression to a constant iff we pass an
394 // empty DebugLoc. Thankfully, the debug location doesn't have any bearing
395 // on the subclass data.
396 return SDNodeT(IROrder, DebugLoc(), std::forward<ArgTypes>(Args)...)
397 .getRawSubclassData();
398 }
399
400 template <typename SDNodeTy>
401 static uint16_t getSyntheticNodeSubclassData(unsigned Opc, unsigned Order,
402 SDVTList VTs, EVT MemoryVT,
403 MachineMemOperand *MMO) {
404 return SDNodeTy(Opc, Order, DebugLoc(), VTs, MemoryVT, MMO)
405 .getRawSubclassData();
406 }
407
408 void createOperands(SDNode *Node, ArrayRef<SDValue> Vals);
409
410 void removeOperands(SDNode *Node) {
411 if (!Node->OperandList)
412 return;
413 OperandRecycler.deallocate(
414 ArrayRecycler<SDUse>::Capacity::get(Node->NumOperands),
415 Node->OperandList);
416 Node->NumOperands = 0;
417 Node->OperandList = nullptr;
418 }
419 void CreateTopologicalOrder(std::vector<SDNode*>& Order);
420
421public:
422 // Maximum depth for recursive analysis such as computeKnownBits, etc.
423 static constexpr unsigned MaxRecursionDepth = 6;
424
425 explicit SelectionDAG(const TargetMachine &TM, CodeGenOpt::Level);
426 SelectionDAG(const SelectionDAG &) = delete;
427 SelectionDAG &operator=(const SelectionDAG &) = delete;
428 ~SelectionDAG();
429
430 /// Prepare this SelectionDAG to process code in the given MachineFunction.
431 void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
432 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
433 LegacyDivergenceAnalysis * Divergence,
434 ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin);
435
436 void setFunctionLoweringInfo(FunctionLoweringInfo * FuncInfo) {
437 FLI = FuncInfo;
438 }
439
440 /// Clear state and free memory necessary to make this
441 /// SelectionDAG ready to process a new block.
442 void clear();
443
444 MachineFunction &getMachineFunction() const { return *MF; }
445 const Pass *getPass() const { return SDAGISelPass; }
446
447 const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
448 const TargetMachine &getTarget() const { return TM; }
449 const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
450 const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
451 const TargetLibraryInfo &getLibInfo() const { return *LibInfo; }
452 const SelectionDAGTargetInfo &getSelectionDAGInfo() const { return *TSI; }
453 const LegacyDivergenceAnalysis *getDivergenceAnalysis() const { return DA; }
454 LLVMContext *getContext() const { return Context; }
455 OptimizationRemarkEmitter &getORE() const { return *ORE; }
456 ProfileSummaryInfo *getPSI() const { return PSI; }
457 BlockFrequencyInfo *getBFI() const { return BFI; }
458
459 FlagInserter *getFlagInserter() { return Inserter; }
460 void setFlagInserter(FlagInserter *FI) { Inserter = FI; }
461
462 /// Just dump dot graph to a user-provided path and title.
463 /// This doesn't open the dot viewer program and
464 /// helps visualization when outside debugging session.
465 /// FileName expects absolute path. If provided
466 /// without any path separators then the file
467 /// will be created in the current directory.
468 /// Error will be emitted if the path is insane.
469#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
470 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dumpDotGraph(const Twine &FileName, const Twine &Title);
471#endif
472
473 /// Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
474 void viewGraph(const std::string &Title);
475 void viewGraph();
476
477#ifndef NDEBUG
478 std::map<const SDNode *, std::string> NodeGraphAttrs;
479#endif
480
481 /// Clear all previously defined node graph attributes.
482 /// Intended to be used from a debugging tool (eg. gdb).
483 void clearGraphAttrs();
484
485 /// Set graph attributes for a node. (eg. "color=red".)
486 void setGraphAttrs(const SDNode *N, const char *Attrs);
487
488 /// Get graph attributes for a node. (eg. "color=red".)
489 /// Used from getNodeAttributes.
490 const std::string getGraphAttrs(const SDNode *N) const;
491
492 /// Convenience for setting node color attribute.
493 void setGraphColor(const SDNode *N, const char *Color);
494
495 /// Convenience for setting subgraph color attribute.
496 void setSubgraphColor(SDNode *N, const char *Color);
497
498 using allnodes_const_iterator = ilist<SDNode>::const_iterator;
499
500 allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
501 allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
502
503 using allnodes_iterator = ilist<SDNode>::iterator;
504
505 allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
506 allnodes_iterator allnodes_end() { return AllNodes.end(); }
507
508 ilist<SDNode>::size_type allnodes_size() const {
509 return AllNodes.size();
510 }
511
512 iterator_range<allnodes_iterator> allnodes() {
513 return make_range(allnodes_begin(), allnodes_end());
514 }
515 iterator_range<allnodes_const_iterator> allnodes() const {
516 return make_range(allnodes_begin(), allnodes_end());
517 }
518
519 /// Return the root tag of the SelectionDAG.
520 const SDValue &getRoot() const { return Root; }
521
522 /// Return the token chain corresponding to the entry of the function.
523 SDValue getEntryNode() const {
524 return SDValue(const_cast<SDNode *>(&EntryNode), 0);
525 }
526
527 /// Set the current root tag of the SelectionDAG.
528 ///
529 const SDValue &setRoot(SDValue N) {
530 assert((!N.getNode() || N.getValueType() == MVT::Other) &&(((!N.getNode() || N.getValueType() == MVT::Other) &&
"DAG root value is not a chain!") ? static_cast<void> (
0) : __assert_fail ("(!N.getNode() || N.getValueType() == MVT::Other) && \"DAG root value is not a chain!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 531, __PRETTY_FUNCTION__))
531 "DAG root value is not a chain!")(((!N.getNode() || N.getValueType() == MVT::Other) &&
"DAG root value is not a chain!") ? static_cast<void> (
0) : __assert_fail ("(!N.getNode() || N.getValueType() == MVT::Other) && \"DAG root value is not a chain!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 531, __PRETTY_FUNCTION__))
;
532 if (N.getNode())
533 checkForCycles(N.getNode(), this);
534 Root = N;
535 if (N.getNode())
536 checkForCycles(this);
537 return Root;
538 }
539
540#ifndef NDEBUG
541 void VerifyDAGDiverence();
542#endif
543
544 /// This iterates over the nodes in the SelectionDAG, folding
545 /// certain types of nodes together, or eliminating superfluous nodes. The
546 /// Level argument controls whether Combine is allowed to produce nodes and
547 /// types that are illegal on the target.
548 void Combine(CombineLevel Level, AAResults *AA,
549 CodeGenOpt::Level OptLevel);
550
551 /// This transforms the SelectionDAG into a SelectionDAG that
552 /// only uses types natively supported by the target.
553 /// Returns "true" if it made any changes.
554 ///
555 /// Note that this is an involved process that may invalidate pointers into
556 /// the graph.
557 bool LegalizeTypes();
558
559 /// This transforms the SelectionDAG into a SelectionDAG that is
560 /// compatible with the target instruction selector, as indicated by the
561 /// TargetLowering object.
562 ///
563 /// Note that this is an involved process that may invalidate pointers into
564 /// the graph.
565 void Legalize();
566
567 /// Transforms a SelectionDAG node and any operands to it into a node
568 /// that is compatible with the target instruction selector, as indicated by
569 /// the TargetLowering object.
570 ///
571 /// \returns true if \c N is a valid, legal node after calling this.
572 ///
573 /// This essentially runs a single recursive walk of the \c Legalize process
574 /// over the given node (and its operands). This can be used to incrementally
575 /// legalize the DAG. All of the nodes which are directly replaced,
576 /// potentially including N, are added to the output parameter \c
577 /// UpdatedNodes so that the delta to the DAG can be understood by the
578 /// caller.
579 ///
580 /// When this returns false, N has been legalized in a way that make the
581 /// pointer passed in no longer valid. It may have even been deleted from the
582 /// DAG, and so it shouldn't be used further. When this returns true, the
583 /// N passed in is a legal node, and can be immediately processed as such.
584 /// This may still have done some work on the DAG, and will still populate
585 /// UpdatedNodes with any new nodes replacing those originally in the DAG.
586 bool LegalizeOp(SDNode *N, SmallSetVector<SDNode *, 16> &UpdatedNodes);
587
588 /// This transforms the SelectionDAG into a SelectionDAG
589 /// that only uses vector math operations supported by the target. This is
590 /// necessary as a separate step from Legalize because unrolling a vector
591 /// operation can introduce illegal types, which requires running
592 /// LegalizeTypes again.
593 ///
594 /// This returns true if it made any changes; in that case, LegalizeTypes
595 /// is called again before Legalize.
596 ///
597 /// Note that this is an involved process that may invalidate pointers into
598 /// the graph.
599 bool LegalizeVectors();
600
601 /// This method deletes all unreachable nodes in the SelectionDAG.
602 void RemoveDeadNodes();
603
604 /// Remove the specified node from the system. This node must
605 /// have no referrers.
606 void DeleteNode(SDNode *N);
607
608 /// Return an SDVTList that represents the list of values specified.
609 SDVTList getVTList(EVT VT);
610 SDVTList getVTList(EVT VT1, EVT VT2);
611 SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3);
612 SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4);
613 SDVTList getVTList(ArrayRef<EVT> VTs);
614
615 //===--------------------------------------------------------------------===//
616 // Node creation methods.
617
618 /// Create a ConstantSDNode wrapping a constant value.
619 /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
620 ///
621 /// If only legal types can be produced, this does the necessary
622 /// transformations (e.g., if the vector element type is illegal).
623 /// @{
624 SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
625 bool isTarget = false, bool isOpaque = false);
626 SDValue getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
627 bool isTarget = false, bool isOpaque = false);
628
629 SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget = false,
630 bool IsOpaque = false) {
631 return getConstant(APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL,
632 VT, IsTarget, IsOpaque);
633 }
634
635 SDValue getConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
636 bool isTarget = false, bool isOpaque = false);
637 SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL,
638 bool isTarget = false);
639 SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL,
640 bool LegalTypes = true);
641 SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
642 bool isTarget = false);
643
644 SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT,
645 bool isOpaque = false) {
646 return getConstant(Val, DL, VT, true, isOpaque);
647 }
648 SDValue getTargetConstant(const APInt &Val, const SDLoc &DL, EVT VT,
649 bool isOpaque = false) {
650 return getConstant(Val, DL, VT, true, isOpaque);
651 }
652 SDValue getTargetConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
653 bool isOpaque = false) {
654 return getConstant(Val, DL, VT, true, isOpaque);
655 }
656
657 /// Create a true or false constant of type \p VT using the target's
658 /// BooleanContent for type \p OpVT.
659 SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT);
660 /// @}
661
662 /// Create a ConstantFPSDNode wrapping a constant value.
663 /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
664 ///
665 /// If only legal types can be produced, this does the necessary
666 /// transformations (e.g., if the vector element type is illegal).
667 /// The forms that take a double should only be used for simple constants
668 /// that can be exactly represented in VT. No checks are made.
669 /// @{
670 SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT,
671 bool isTarget = false);
672 SDValue getConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT,
673 bool isTarget = false);
674 SDValue getConstantFP(const ConstantFP &V, const SDLoc &DL, EVT VT,
675 bool isTarget = false);
676 SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT) {
677 return getConstantFP(Val, DL, VT, true);
678 }
679 SDValue getTargetConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT) {
680 return getConstantFP(Val, DL, VT, true);
681 }
682 SDValue getTargetConstantFP(const ConstantFP &Val, const SDLoc &DL, EVT VT) {
683 return getConstantFP(Val, DL, VT, true);
684 }
685 /// @}
686
687 SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
688 int64_t offset = 0, bool isTargetGA = false,
689 unsigned TargetFlags = 0);
690 SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
691 int64_t offset = 0, unsigned TargetFlags = 0) {
692 return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
693 }
694 SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
695 SDValue getTargetFrameIndex(int FI, EVT VT) {
696 return getFrameIndex(FI, VT, true);
697 }
698 SDValue getJumpTable(int JTI, EVT VT, bool isTarget = false,
699 unsigned TargetFlags = 0);
700 SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags = 0) {
701 return getJumpTable(JTI, VT, true, TargetFlags);
702 }
703 SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align = None,
704 int Offs = 0, bool isT = false,
705 unsigned TargetFlags = 0);
706 SDValue getTargetConstantPool(const Constant *C, EVT VT,
707 MaybeAlign Align = None, int Offset = 0,
708 unsigned TargetFlags = 0) {
709 return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
710 }
711 SDValue getConstantPool(MachineConstantPoolValue *C, EVT VT,
712 MaybeAlign Align = None, int Offs = 0,
713 bool isT = false, unsigned TargetFlags = 0);
714 SDValue getTargetConstantPool(MachineConstantPoolValue *C, EVT VT,
715 MaybeAlign Align = None, int Offset = 0,
716 unsigned TargetFlags = 0) {
717 return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
718 }
719 SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
720 unsigned TargetFlags = 0);
721 // When generating a branch to a BB, we don't in general know enough
722 // to provide debug info for the BB at that time, so keep this one around.
723 SDValue getBasicBlock(MachineBasicBlock *MBB);
724 SDValue getExternalSymbol(const char *Sym, EVT VT);
725 SDValue getTargetExternalSymbol(const char *Sym, EVT VT,
726 unsigned TargetFlags = 0);
727 SDValue getMCSymbol(MCSymbol *Sym, EVT VT);
728
729 SDValue getValueType(EVT);
730 SDValue getRegister(unsigned Reg, EVT VT);
731 SDValue getRegisterMask(const uint32_t *RegMask);
732 SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label);
733 SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root,
734 MCSymbol *Label);
735 SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset = 0,
736 bool isTarget = false, unsigned TargetFlags = 0);
737 SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT,
738 int64_t Offset = 0, unsigned TargetFlags = 0) {
739 return getBlockAddress(BA, VT, Offset, true, TargetFlags);
740 }
741
742 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg,
743 SDValue N) {
744 return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
745 getRegister(Reg, N.getValueType()), N);
746 }
747
748 // This version of the getCopyToReg method takes an extra operand, which
749 // indicates that there is potentially an incoming glue value (if Glue is not
750 // null) and that there should be a glue result.
751 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N,
752 SDValue Glue) {
753 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
754 SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
755 return getNode(ISD::CopyToReg, dl, VTs,
756 makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
757 }
758
759 // Similar to last getCopyToReg() except parameter Reg is a SDValue
760 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, SDValue Reg, SDValue N,
761 SDValue Glue) {
762 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
763 SDValue Ops[] = { Chain, Reg, N, Glue };
764 return getNode(ISD::CopyToReg, dl, VTs,
765 makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
766 }
767
768 SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT) {
769 SDVTList VTs = getVTList(VT, MVT::Other);
770 SDValue Ops[] = { Chain, getRegister(Reg, VT) };
771 return getNode(ISD::CopyFromReg, dl, VTs, Ops);
772 }
773
774 // This version of the getCopyFromReg method takes an extra operand, which
775 // indicates that there is potentially an incoming glue value (if Glue is not
776 // null) and that there should be a glue result.
777 SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT,
778 SDValue Glue) {
779 SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
780 SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
781 return getNode(ISD::CopyFromReg, dl, VTs,
782 makeArrayRef(Ops, Glue.getNode() ? 3 : 2));
783 }
784
785 SDValue getCondCode(ISD::CondCode Cond);
786
787 /// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
788 /// which must be a vector type, must match the number of mask elements
789 /// NumElts. An integer mask element equal to -1 is treated as undefined.
790 SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
791 ArrayRef<int> Mask);
792
793 /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
794 /// which must be a vector type, must match the number of operands in Ops.
795 /// The operands must have the same type as (or, for integers, a type wider
796 /// than) VT's element type.
797 SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDValue> Ops) {
798 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
799 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
800 }
801
802 /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
803 /// which must be a vector type, must match the number of operands in Ops.
804 /// The operands must have the same type as (or, for integers, a type wider
805 /// than) VT's element type.
806 SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDUse> Ops) {
807 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
808 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
809 }
810
811 /// Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all
812 /// elements. VT must be a vector type. Op's type must be the same as (or,
813 /// for integers, a type wider than) VT's element type.
814 SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
815 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
816 if (Op.getOpcode() == ISD::UNDEF) {
18
Calling 'SDValue::getOpcode'
817 assert((VT.getVectorElementType() == Op.getValueType() ||(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 821, __PRETTY_FUNCTION__))
818 (VT.isInteger() &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 821, __PRETTY_FUNCTION__))
819 VT.getVectorElementType().bitsLE(Op.getValueType()))) &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 821, __PRETTY_FUNCTION__))
820 "A splatted value must have a width equal or (for integers) "(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 821, __PRETTY_FUNCTION__))
821 "greater than the vector element type!")(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 821, __PRETTY_FUNCTION__))
;
822 return getNode(ISD::UNDEF, SDLoc(), VT);
823 }
824
825 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Op);
826 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
827 }
828
829 // Return a splat ISD::SPLAT_VECTOR node, consisting of Op splatted to all
830 // elements.
831 SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op) {
832 if (Op.getOpcode() == ISD::UNDEF) {
833 assert((VT.getVectorElementType() == Op.getValueType() ||(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 837, __PRETTY_FUNCTION__))
834 (VT.isInteger() &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 837, __PRETTY_FUNCTION__))
835 VT.getVectorElementType().bitsLE(Op.getValueType()))) &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 837, __PRETTY_FUNCTION__))
836 "A splatted value must have a width equal or (for integers) "(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 837, __PRETTY_FUNCTION__))
837 "greater than the vector element type!")(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 837, __PRETTY_FUNCTION__))
;
838 return getNode(ISD::UNDEF, SDLoc(), VT);
839 }
840 return getNode(ISD::SPLAT_VECTOR, DL, VT, Op);
841 }
842
843 /// Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
844 /// the shuffle node in input but with swapped operands.
845 ///
846 /// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
847 SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV);
848
849 /// Convert Op, which must be of float type, to the
850 /// float type VT, by either extending or rounding (by truncation).
851 SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT);
852
853 /// Convert Op, which must be a STRICT operation of float type, to the
854 /// float type VT, by either extending or rounding (by truncation).
855 std::pair<SDValue, SDValue>
856 getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT);
857
858 /// Convert Op, which must be of integer type, to the
859 /// integer type VT, by either any-extending or truncating it.
860 SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
861
862 /// Convert Op, which must be of integer type, to the
863 /// integer type VT, by either sign-extending or truncating it.
864 SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
865
866 /// Convert Op, which must be of integer type, to the
867 /// integer type VT, by either zero-extending or truncating it.
868 SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
869
870 /// Return the expression required to zero extend the Op
871 /// value assuming it was the smaller SrcTy value.
872 SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
873
874 /// Convert Op, which must be of integer type, to the integer type VT, by
875 /// either truncating it or performing either zero or sign extension as
876 /// appropriate extension for the pointer's semantics.
877 SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
878
879 /// Return the expression required to extend the Op as a pointer value
880 /// assuming it was the smaller SrcTy value. This may be either a zero extend
881 /// or a sign extend.
882 SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
883
884 /// Convert Op, which must be of integer type, to the integer type VT,
885 /// by using an extension appropriate for the target's
886 /// BooleanContent for type OpVT or truncating it.
887 SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT);
888
889 /// Create a bitwise NOT operation as (XOR Val, -1).
890 SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT);
891
892 /// Create a logical NOT operation as (XOR Val, BooleanOne).
893 SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT);
894
895 /// Returns sum of the base pointer and offset.
896 /// Unlike getObjectPtrOffset this does not set NoUnsignedWrap by default.
897 SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL,
898 const SDNodeFlags Flags = SDNodeFlags());
899 SDValue getMemBasePlusOffset(SDValue Base, SDValue Offset, const SDLoc &DL,
900 const SDNodeFlags Flags = SDNodeFlags());
901
902 /// Create an add instruction with appropriate flags when used for
903 /// addressing some offset of an object. i.e. if a load is split into multiple
904 /// components, create an add nuw from the base pointer to the offset.
905 SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset) {
906 SDNodeFlags Flags;
907 Flags.setNoUnsignedWrap(true);
908 return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
909 }
910
911 SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, SDValue Offset) {
912 // The object itself can't wrap around the address space, so it shouldn't be
913 // possible for the adds of the offsets to the split parts to overflow.
914 SDNodeFlags Flags;
915 Flags.setNoUnsignedWrap(true);
916 return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
917 }
918
919 /// Return a new CALLSEQ_START node, that starts new call frame, in which
920 /// InSize bytes are set up inside CALLSEQ_START..CALLSEQ_END sequence and
921 /// OutSize specifies part of the frame set up prior to the sequence.
922 SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize,
923 const SDLoc &DL) {
924 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
925 SDValue Ops[] = { Chain,
926 getIntPtrConstant(InSize, DL, true),
927 getIntPtrConstant(OutSize, DL, true) };
928 return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
929 }
930
931 /// Return a new CALLSEQ_END node, which always must have a
932 /// glue result (to ensure it's not CSE'd).
933 /// CALLSEQ_END does not have a useful SDLoc.
934 SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
935 SDValue InGlue, const SDLoc &DL) {
936 SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
937 SmallVector<SDValue, 4> Ops;
938 Ops.push_back(Chain);
939 Ops.push_back(Op1);
940 Ops.push_back(Op2);
941 if (InGlue.getNode())
942 Ops.push_back(InGlue);
943 return getNode(ISD::CALLSEQ_END, DL, NodeTys, Ops);
944 }
945
946 /// Return true if the result of this operation is always undefined.
947 bool isUndef(unsigned Opcode, ArrayRef<SDValue> Ops);
948
949 /// Return an UNDEF node. UNDEF does not have a useful SDLoc.
950 SDValue getUNDEF(EVT VT) {
951 return getNode(ISD::UNDEF, SDLoc(), VT);
952 }
953
954 /// Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
955 SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm) {
956 assert(MulImm.getMinSignedBits() <= VT.getSizeInBits() &&((MulImm.getMinSignedBits() <= VT.getSizeInBits() &&
"Immediate does not fit VT") ? static_cast<void> (0) :
__assert_fail ("MulImm.getMinSignedBits() <= VT.getSizeInBits() && \"Immediate does not fit VT\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 957, __PRETTY_FUNCTION__))
957 "Immediate does not fit VT")((MulImm.getMinSignedBits() <= VT.getSizeInBits() &&
"Immediate does not fit VT") ? static_cast<void> (0) :
__assert_fail ("MulImm.getMinSignedBits() <= VT.getSizeInBits() && \"Immediate does not fit VT\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 957, __PRETTY_FUNCTION__))
;
958 return getNode(ISD::VSCALE, DL, VT,
959 getConstant(MulImm.sextOrTrunc(VT.getSizeInBits()), DL, VT));
960 }
961
962 /// Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
963 SDValue getGLOBAL_OFFSET_TABLE(EVT VT) {
964 return getNode(ISD::GLOBAL_OFFSET_TABLE, SDLoc(), VT);
965 }
966
967 /// Gets or creates the specified node.
968 ///
969 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
970 ArrayRef<SDUse> Ops);
971 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
972 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
973 SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
974 ArrayRef<SDValue> Ops);
975 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
976 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
977
978 // Use flags from current flag inserter.
979 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
980 ArrayRef<SDValue> Ops);
981 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
982 ArrayRef<SDValue> Ops);
983 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand);
984 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
985 SDValue N2);
986 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
987 SDValue N2, SDValue N3);
988
989 // Specialize based on number of operands.
990 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
991 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand,
992 const SDNodeFlags Flags);
993 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
994 SDValue N2, const SDNodeFlags Flags);
995 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
996 SDValue N2, SDValue N3, const SDNodeFlags Flags);
997 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
998 SDValue N2, SDValue N3, SDValue N4);
999 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
1000 SDValue N2, SDValue N3, SDValue N4, SDValue N5);
1001
1002 // Specialize again based on number of operands for nodes with a VTList
1003 // rather than a single VT.
1004 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList);
1005 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N);
1006 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1007 SDValue N2);
1008 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1009 SDValue N2, SDValue N3);
1010 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1011 SDValue N2, SDValue N3, SDValue N4);
1012 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1013 SDValue N2, SDValue N3, SDValue N4, SDValue N5);
1014
1015 /// Compute a TokenFactor to force all the incoming stack arguments to be
1016 /// loaded from the stack. This is used in tail call lowering to protect
1017 /// stack arguments from being clobbered.
1018 SDValue getStackArgumentTokenFactor(SDValue Chain);
1019
1020 LLVM_ATTRIBUTE_DEPRECATED(SDValue getMemcpy(SDValue Chain, const SDLoc &dl,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1021 SDValue Dst, SDValue Src,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1022 SDValue Size, unsigned Align,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1023 bool isVol, bool AlwaysInline,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1024 bool isTailCall,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1025 MachinePointerInfo DstPtrInfo,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1026 MachinePointerInfo SrcPtrInfo),[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
1027 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] SDValue
getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline
, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo)
{
1028 return getMemcpy(Chain, dl, Dst, Src, Size, llvm::Align(Align), isVol,
1029 AlwaysInline, isTailCall, DstPtrInfo, SrcPtrInfo);
1030 }
1031
1032 SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1033 SDValue Size, Align Alignment, bool isVol,
1034 bool AlwaysInline, bool isTailCall,
1035 MachinePointerInfo DstPtrInfo,
1036 MachinePointerInfo SrcPtrInfo);
1037
1038 LLVM_ATTRIBUTE_DEPRECATED(SDValue getMemmove(SDValue Chain, const SDLoc &dl,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
1039 SDValue Dst, SDValue Src,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
1040 SDValue Size, unsigned Align,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
1041 bool isVol, bool isTailCall,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
1042 MachinePointerInfo DstPtrInfo,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
1043 MachinePointerInfo SrcPtrInfo),[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
1044 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] SDValue
getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo
)
{
1045 return getMemmove(Chain, dl, Dst, Src, Size, llvm::Align(Align), isVol,
1046 isTailCall, DstPtrInfo, SrcPtrInfo);
1047 }
1048 SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1049 SDValue Size, Align Alignment, bool isVol, bool isTailCall,
1050 MachinePointerInfo DstPtrInfo,
1051 MachinePointerInfo SrcPtrInfo);
1052
1053 LLVM_ATTRIBUTE_DEPRECATED(SDValue getMemset(SDValue Chain, const SDLoc &dl,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo)
1054 SDValue Dst, SDValue Src,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo)
1055 SDValue Size, unsigned Align,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo)
1056 bool isVol, bool isTailCall,[[deprecated("Use the version that takes Align instead")]] SDValue
getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo)
1057 MachinePointerInfo DstPtrInfo),[[deprecated("Use the version that takes Align instead")]] SDValue
getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo)
1058 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] SDValue
getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue
Src, SDValue Size, unsigned Align, bool isVol, bool isTailCall
, MachinePointerInfo DstPtrInfo)
{
1059 return getMemset(Chain, dl, Dst, Src, Size, llvm::Align(Align), isVol,
1060 isTailCall, DstPtrInfo);
1061 }
1062 SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1063 SDValue Size, Align Alignment, bool isVol, bool isTailCall,
1064 MachinePointerInfo DstPtrInfo);
1065
1066 SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
1067 unsigned DstAlign, SDValue Src, unsigned SrcAlign,
1068 SDValue Size, Type *SizeTy, unsigned ElemSz,
1069 bool isTailCall, MachinePointerInfo DstPtrInfo,
1070 MachinePointerInfo SrcPtrInfo);
1071
1072 SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
1073 unsigned DstAlign, SDValue Src, unsigned SrcAlign,
1074 SDValue Size, Type *SizeTy, unsigned ElemSz,
1075 bool isTailCall, MachinePointerInfo DstPtrInfo,
1076 MachinePointerInfo SrcPtrInfo);
1077
1078 SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
1079 unsigned DstAlign, SDValue Value, SDValue Size,
1080 Type *SizeTy, unsigned ElemSz, bool isTailCall,
1081 MachinePointerInfo DstPtrInfo);
1082
1083 /// Helper function to make it easier to build SetCC's if you just have an
1084 /// ISD::CondCode instead of an SDValue.
1085 SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
1086 ISD::CondCode Cond, SDValue Chain = SDValue(),
1087 bool IsSignaling = false) {
1088 assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&((LHS.getValueType().isVector() == RHS.getValueType().isVector
() && "Cannot compare scalars to vectors") ? static_cast
<void> (0) : __assert_fail ("LHS.getValueType().isVector() == RHS.getValueType().isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1089, __PRETTY_FUNCTION__))
1089 "Cannot compare scalars to vectors")((LHS.getValueType().isVector() == RHS.getValueType().isVector
() && "Cannot compare scalars to vectors") ? static_cast
<void> (0) : __assert_fail ("LHS.getValueType().isVector() == RHS.getValueType().isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1089, __PRETTY_FUNCTION__))
;
1090 assert(LHS.getValueType().isVector() == VT.isVector() &&((LHS.getValueType().isVector() == VT.isVector() && "Cannot compare scalars to vectors"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType().isVector() == VT.isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1091, __PRETTY_FUNCTION__))
1091 "Cannot compare scalars to vectors")((LHS.getValueType().isVector() == VT.isVector() && "Cannot compare scalars to vectors"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType().isVector() == VT.isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1091, __PRETTY_FUNCTION__))
;
1092 assert(Cond != ISD::SETCC_INVALID &&((Cond != ISD::SETCC_INVALID && "Cannot create a setCC of an invalid node."
) ? static_cast<void> (0) : __assert_fail ("Cond != ISD::SETCC_INVALID && \"Cannot create a setCC of an invalid node.\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1093, __PRETTY_FUNCTION__))
1093 "Cannot create a setCC of an invalid node.")((Cond != ISD::SETCC_INVALID && "Cannot create a setCC of an invalid node."
) ? static_cast<void> (0) : __assert_fail ("Cond != ISD::SETCC_INVALID && \"Cannot create a setCC of an invalid node.\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1093, __PRETTY_FUNCTION__))
;
1094 if (Chain)
1095 return getNode(IsSignaling ? ISD::STRICT_FSETCCS : ISD::STRICT_FSETCC, DL,
1096 {VT, MVT::Other}, {Chain, LHS, RHS, getCondCode(Cond)});
1097 return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
1098 }
1099
1100 /// Helper function to make it easier to build Select's if you just have
1101 /// operands and don't want to check for vector.
1102 SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS,
1103 SDValue RHS) {
1104 assert(LHS.getValueType() == RHS.getValueType() &&((LHS.getValueType() == RHS.getValueType() && "Cannot use select on differing types"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Cannot use select on differing types\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1105, __PRETTY_FUNCTION__))
1105 "Cannot use select on differing types")((LHS.getValueType() == RHS.getValueType() && "Cannot use select on differing types"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Cannot use select on differing types\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1105, __PRETTY_FUNCTION__))
;
1106 assert(VT.isVector() == LHS.getValueType().isVector() &&((VT.isVector() == LHS.getValueType().isVector() && "Cannot mix vectors and scalars"
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() == LHS.getValueType().isVector() && \"Cannot mix vectors and scalars\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1107, __PRETTY_FUNCTION__))
1107 "Cannot mix vectors and scalars")((VT.isVector() == LHS.getValueType().isVector() && "Cannot mix vectors and scalars"
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() == LHS.getValueType().isVector() && \"Cannot mix vectors and scalars\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1107, __PRETTY_FUNCTION__))
;
1108 auto Opcode = Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
1109 return getNode(Opcode, DL, VT, Cond, LHS, RHS);
1110 }
1111
1112 /// Helper function to make it easier to build SelectCC's if you just have an
1113 /// ISD::CondCode instead of an SDValue.
1114 SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True,
1115 SDValue False, ISD::CondCode Cond) {
1116 return getNode(ISD::SELECT_CC, DL, True.getValueType(), LHS, RHS, True,
1117 False, getCondCode(Cond));
1118 }
1119
1120 /// Try to simplify a select/vselect into 1 of its operands or a constant.
1121 SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal);
1122
1123 /// Try to simplify a shift into 1 of its operands or a constant.
1124 SDValue simplifyShift(SDValue X, SDValue Y);
1125
1126 /// Try to simplify a floating-point binary operation into 1 of its operands
1127 /// or a constant.
1128 SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
1129 SDNodeFlags Flags);
1130
1131 /// VAArg produces a result and token chain, and takes a pointer
1132 /// and a source value as input.
1133 SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1134 SDValue SV, unsigned Align);
1135
1136 /// Gets a node for an atomic cmpxchg op. There are two
1137 /// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
1138 /// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
1139 /// a success flag (initially i1), and a chain.
1140 SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
1141 SDVTList VTs, SDValue Chain, SDValue Ptr,
1142 SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);
1143
1144 /// Gets a node for an atomic op, produces result (if relevant)
1145 /// and chain and takes 2 operands.
1146 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
1147 SDValue Ptr, SDValue Val, MachineMemOperand *MMO);
1148
1149 /// Gets a node for an atomic op, produces result and chain and
1150 /// takes 1 operand.
1151 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT,
1152 SDValue Chain, SDValue Ptr, MachineMemOperand *MMO);
1153
1154 /// Gets a node for an atomic op, produces result and chain and takes N
1155 /// operands.
1156 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
1157 SDVTList VTList, ArrayRef<SDValue> Ops,
1158 MachineMemOperand *MMO);
1159
1160 /// Creates a MemIntrinsicNode that may produce a
1161 /// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
1162 /// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
1163 /// less than FIRST_TARGET_MEMORY_OPCODE.
1164 SDValue getMemIntrinsicNode(
1165 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
1166 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
1167 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
1168 MachineMemOperand::MOStore,
1169 uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes());
1170
1171 inline SDValue getMemIntrinsicNode(
1172 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
1173 EVT MemVT, MachinePointerInfo PtrInfo, MaybeAlign Alignment = None,
1174 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
1175 MachineMemOperand::MOStore,
1176 uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) {
1177 // Ensure that codegen never sees alignment 0
1178 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo,
1179 Alignment.getValueOr(getEVTAlign(MemVT)), Flags,
1180 Size, AAInfo);
1181 }
1182
1183 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1184 inline SDValue getMemIntrinsicNode([[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1185 unsigned Opcode, const SDLoc &dl, SDVTList VTList,[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1186 ArrayRef<SDValue> Ops, EVT MemVT, MachinePointerInfo PtrInfo,[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1187 unsigned Alignment,[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1188 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1189 MachineMemOperand::MOStore,[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1190 uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()),[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
1191 "")[[deprecated("")]] inline SDValue getMemIntrinsicNode( unsigned
Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue
> Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment
, MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOStore, uint64_t Size = 0, const AAMDNodes
&AAInfo = AAMDNodes())
{
1192 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo,
1193 MaybeAlign(Alignment), Flags, Size, AAInfo);
1194 }
1195
1196 SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList,
1197 ArrayRef<SDValue> Ops, EVT MemVT,
1198 MachineMemOperand *MMO);
1199
1200 /// Creates a LifetimeSDNode that starts (`IsStart==true`) or ends
1201 /// (`IsStart==false`) the lifetime of the portion of `FrameIndex` between
1202 /// offsets `Offset` and `Offset + Size`.
1203 SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain,
1204 int FrameIndex, int64_t Size, int64_t Offset = -1);
1205
1206 /// Creates a PseudoProbeSDNode with function GUID `Guid` and
1207 /// the index of the block `Index` it is probing, as well as the attributes
1208 /// `attr` of the probe.
1209 SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid,
1210 uint64_t Index, uint32_t Attr);
1211
1212 /// Create a MERGE_VALUES node from the given operands.
1213 SDValue getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl);
1214
1215 /// Loads are not normal binary operators: their result type is not
1216 /// determined by their operands, and they produce a value AND a token chain.
1217 ///
1218 /// This function will set the MOLoad flag on MMOFlags, but you can set it if
1219 /// you want. The MOStore flag must not be set.
1220 SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1221 MachinePointerInfo PtrInfo,
1222 MaybeAlign Alignment = MaybeAlign(),
1223 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1224 const AAMDNodes &AAInfo = AAMDNodes(),
1225 const MDNode *Ranges = nullptr);
1226 /// FIXME: Remove once transition to Align is over.
1227 inline SDValue
1228 getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1229 MachinePointerInfo PtrInfo, unsigned Alignment,
1230 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1231 const AAMDNodes &AAInfo = AAMDNodes(),
1232 const MDNode *Ranges = nullptr) {
1233 return getLoad(VT, dl, Chain, Ptr, PtrInfo, MaybeAlign(Alignment), MMOFlags,
1234 AAInfo, Ranges);
1235 }
1236 SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1237 MachineMemOperand *MMO);
1238 SDValue
1239 getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
1240 SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
1241 MaybeAlign Alignment = MaybeAlign(),
1242 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1243 const AAMDNodes &AAInfo = AAMDNodes());
1244 /// FIXME: Remove once transition to Align is over.
1245 inline SDValue
1246 getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
1247 SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
1248 unsigned Alignment,
1249 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1250 const AAMDNodes &AAInfo = AAMDNodes()) {
1251 return getExtLoad(ExtType, dl, VT, Chain, Ptr, PtrInfo, MemVT,
1252 MaybeAlign(Alignment), MMOFlags, AAInfo);
1253 }
1254 SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
1255 SDValue Chain, SDValue Ptr, EVT MemVT,
1256 MachineMemOperand *MMO);
1257 SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
1258 SDValue Offset, ISD::MemIndexedMode AM);
1259 SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1260 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1261 MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment,
1262 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1263 const AAMDNodes &AAInfo = AAMDNodes(),
1264 const MDNode *Ranges = nullptr);
1265 inline SDValue getLoad(
1266 ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
1267 SDValue Chain, SDValue Ptr, SDValue Offset, MachinePointerInfo PtrInfo,
1268 EVT MemVT, MaybeAlign Alignment = MaybeAlign(),
1269 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1270 const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr) {
1271 // Ensures that codegen never sees a None Alignment.
1272 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
1273 Alignment.getValueOr(getEVTAlign(MemVT)), MMOFlags, AAInfo,
1274 Ranges);
1275 }
1276 /// FIXME: Remove once transition to Align is over.
1277 inline SDValue
1278 getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1279 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1280 MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment,
1281 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1282 const AAMDNodes &AAInfo = AAMDNodes(),
1283 const MDNode *Ranges = nullptr) {
1284 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
1285 MaybeAlign(Alignment), MMOFlags, AAInfo, Ranges);
1286 }
1287 SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1288 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1289 EVT MemVT, MachineMemOperand *MMO);
1290
1291 /// Helper function to build ISD::STORE nodes.
1292 ///
1293 /// This function will set the MOStore flag on MMOFlags, but you can set it if
1294 /// you want. The MOLoad and MOInvariant flags must not be set.
1295
1296 SDValue
1297 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1298 MachinePointerInfo PtrInfo, Align Alignment,
1299 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1300 const AAMDNodes &AAInfo = AAMDNodes());
1301 inline SDValue
1302 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1303 MachinePointerInfo PtrInfo, MaybeAlign Alignment = MaybeAlign(),
1304 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1305 const AAMDNodes &AAInfo = AAMDNodes()) {
1306 return getStore(Chain, dl, Val, Ptr, PtrInfo,
1307 Alignment.getValueOr(getEVTAlign(Val.getValueType())),
1308 MMOFlags, AAInfo);
1309 }
1310 /// FIXME: Remove once transition to Align is over.
1311 inline SDValue
1312 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1313 MachinePointerInfo PtrInfo, unsigned Alignment,
1314 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1315 const AAMDNodes &AAInfo = AAMDNodes()) {
1316 return getStore(Chain, dl, Val, Ptr, PtrInfo, MaybeAlign(Alignment),
1317 MMOFlags, AAInfo);
1318 }
1319 SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1320 MachineMemOperand *MMO);
1321 SDValue
1322 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1323 MachinePointerInfo PtrInfo, EVT SVT, Align Alignment,
1324 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1325 const AAMDNodes &AAInfo = AAMDNodes());
1326 inline SDValue
1327 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1328 MachinePointerInfo PtrInfo, EVT SVT,
1329 MaybeAlign Alignment = MaybeAlign(),
1330 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1331 const AAMDNodes &AAInfo = AAMDNodes()) {
1332 return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
1333 Alignment.getValueOr(getEVTAlign(SVT)), MMOFlags,
1334 AAInfo);
1335 }
1336 /// FIXME: Remove once transition to Align is over.
1337 inline SDValue
1338 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1339 MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment,
1340 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1341 const AAMDNodes &AAInfo = AAMDNodes()) {
1342 return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
1343 MaybeAlign(Alignment), MMOFlags, AAInfo);
1344 }
1345 SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
1346 SDValue Ptr, EVT SVT, MachineMemOperand *MMO);
1347 SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base,
1348 SDValue Offset, ISD::MemIndexedMode AM);
1349
1350 SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base,
1351 SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT,
1352 MachineMemOperand *MMO, ISD::MemIndexedMode AM,
1353 ISD::LoadExtType, bool IsExpanding = false);
1354 SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
1355 SDValue Offset, ISD::MemIndexedMode AM);
1356 SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val,
1357 SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT,
1358 MachineMemOperand *MMO, ISD::MemIndexedMode AM,
1359 bool IsTruncating = false, bool IsCompressing = false);
1360 SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
1361 SDValue Base, SDValue Offset,
1362 ISD::MemIndexedMode AM);
1363 SDValue getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
1364 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
1365 ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy);
1366 SDValue getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
1367 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
1368 ISD::MemIndexType IndexType,
1369 bool IsTruncating = false);
1370
1371 /// Construct a node to track a Value* through the backend.
1372 SDValue getSrcValue(const Value *v);
1373
1374 /// Return an MDNodeSDNode which holds an MDNode.
1375 SDValue getMDNode(const MDNode *MD);
1376
1377 /// Return a bitcast using the SDLoc of the value operand, and casting to the
1378 /// provided type. Use getNode to set a custom SDLoc.
1379 SDValue getBitcast(EVT VT, SDValue V);
1380
1381 /// Return an AddrSpaceCastSDNode.
1382 SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS,
1383 unsigned DestAS);
1384
1385 /// Return a freeze using the SDLoc of the value operand.
1386 SDValue getFreeze(SDValue V);
1387
1388 /// Return an AssertAlignSDNode.
1389 SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A);
1390
1391 /// Return the specified value casted to
1392 /// the target's desired shift amount type.
1393 SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op);
1394
1395 /// Expand the specified \c ISD::VAARG node as the Legalize pass would.
1396 SDValue expandVAArg(SDNode *Node);
1397
1398 /// Expand the specified \c ISD::VACOPY node as the Legalize pass would.
1399 SDValue expandVACopy(SDNode *Node);
1400
1401 /// Returs an GlobalAddress of the function from the current module with
1402 /// name matching the given ExternalSymbol. Additionally can provide the
1403 /// matched function.
1404 /// Panics the function doesn't exists.
1405 SDValue getSymbolFunctionGlobalAddress(SDValue Op,
1406 Function **TargetFunction = nullptr);
1407
1408 /// *Mutate* the specified node in-place to have the
1409 /// specified operands. If the resultant node already exists in the DAG,
1410 /// this does not modify the specified node, instead it returns the node that
1411 /// already exists. If the resultant node does not exist in the DAG, the
1412 /// input node is returned. As a degenerate case, if you specify the same
1413 /// input operands as the node already has, the input node is returned.
1414 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
1415 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
1416 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1417 SDValue Op3);
1418 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1419 SDValue Op3, SDValue Op4);
1420 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1421 SDValue Op3, SDValue Op4, SDValue Op5);
1422 SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
1423
1424 /// Creates a new TokenFactor containing \p Vals. If \p Vals contains 64k
1425 /// values or more, move values into new TokenFactors in 64k-1 blocks, until
1426 /// the final TokenFactor has less than 64k operands.
1427 SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl<SDValue> &Vals);
1428
1429 /// *Mutate* the specified machine node's memory references to the provided
1430 /// list.
1431 void setNodeMemRefs(MachineSDNode *N,
1432 ArrayRef<MachineMemOperand *> NewMemRefs);
1433
1434 // Calculate divergence of node \p N based on its operands.
1435 bool calculateDivergence(SDNode *N);
1436
1437 // Propagates the change in divergence to users
1438 void updateDivergence(SDNode * N);
1439
1440 /// These are used for target selectors to *mutate* the
1441 /// specified node to have the specified return type, Target opcode, and
1442 /// operands. Note that target opcodes are stored as
1443 /// ~TargetOpcode in the node opcode field. The resultant node is returned.
1444 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT);
1445 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT, SDValue Op1);
1446 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1447 SDValue Op1, SDValue Op2);
1448 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1449 SDValue Op1, SDValue Op2, SDValue Op3);
1450 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1451 ArrayRef<SDValue> Ops);
1452 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1, EVT VT2);
1453 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1454 EVT VT2, ArrayRef<SDValue> Ops);
1455 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1456 EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
1457 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1458 EVT VT2, SDValue Op1, SDValue Op2);
1459 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, SDVTList VTs,
1460 ArrayRef<SDValue> Ops);
1461
1462 /// This *mutates* the specified node to have the specified
1463 /// return type, opcode, and operands.
1464 SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
1465 ArrayRef<SDValue> Ops);
1466
1467 /// Mutate the specified strict FP node to its non-strict equivalent,
1468 /// unlinking the node from its chain and dropping the metadata arguments.
1469 /// The node must be a strict FP node.
1470 SDNode *mutateStrictFPToFP(SDNode *Node);
1471
1472 /// These are used for target selectors to create a new node
1473 /// with specified return type(s), MachineInstr opcode, and operands.
1474 ///
1475 /// Note that getMachineNode returns the resultant node. If there is already
1476 /// a node of the specified opcode and operands, it returns that node instead
1477 /// of the current one.
1478 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT);
1479 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1480 SDValue Op1);
1481 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1482 SDValue Op1, SDValue Op2);
1483 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1484 SDValue Op1, SDValue Op2, SDValue Op3);
1485 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1486 ArrayRef<SDValue> Ops);
1487 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1488 EVT VT2, SDValue Op1, SDValue Op2);
1489 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1490 EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
1491 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1492 EVT VT2, ArrayRef<SDValue> Ops);
1493 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1494 EVT VT2, EVT VT3, SDValue Op1, SDValue Op2);
1495 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1496 EVT VT2, EVT VT3, SDValue Op1, SDValue Op2,
1497 SDValue Op3);
1498 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1499 EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
1500 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl,
1501 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops);
1502 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, SDVTList VTs,
1503 ArrayRef<SDValue> Ops);
1504
1505 /// A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
1506 SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
1507 SDValue Operand);
1508
1509 /// A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
1510 SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
1511 SDValue Operand, SDValue Subreg);
1512
1513 /// Get the specified node if it's already available, or else return NULL.
1514 SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
1515 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
1516 SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
1517 ArrayRef<SDValue> Ops);
1518
1519 /// Check if a node exists without modifying its flags.
1520 bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef<SDValue> Ops);
1521
1522 /// Creates a SDDbgValue node.
1523 SDDbgValue *getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N,
1524 unsigned R, bool IsIndirect, const DebugLoc &DL,
1525 unsigned O);
1526
1527 /// Creates a constant SDDbgValue node.
1528 SDDbgValue *getConstantDbgValue(DIVariable *Var, DIExpression *Expr,
1529 const Value *C, const DebugLoc &DL,
1530 unsigned O);
1531
1532 /// Creates a FrameIndex SDDbgValue node.
1533 SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
1534 unsigned FI, bool IsIndirect,
1535 const DebugLoc &DL, unsigned O);
1536
1537 /// Creates a VReg SDDbgValue node.
1538 SDDbgValue *getVRegDbgValue(DIVariable *Var, DIExpression *Expr,
1539 unsigned VReg, bool IsIndirect,
1540 const DebugLoc &DL, unsigned O);
1541
1542 /// Creates a SDDbgLabel node.
1543 SDDbgLabel *getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O);
1544
1545 /// Transfer debug values from one node to another, while optionally
1546 /// generating fragment expressions for split-up values. If \p InvalidateDbg
1547 /// is set, debug values are invalidated after they are transferred.
1548 void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits = 0,
1549 unsigned SizeInBits = 0, bool InvalidateDbg = true);
1550
1551 /// Remove the specified node from the system. If any of its
1552 /// operands then becomes dead, remove them as well. Inform UpdateListener
1553 /// for each node deleted.
1554 void RemoveDeadNode(SDNode *N);
1555
1556 /// This method deletes the unreachable nodes in the
1557 /// given list, and any nodes that become unreachable as a result.
1558 void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);
1559
1560 /// Modify anything using 'From' to use 'To' instead.
1561 /// This can cause recursive merging of nodes in the DAG. Use the first
1562 /// version if 'From' is known to have a single result, use the second
1563 /// if you have two nodes with identical results (or if 'To' has a superset
1564 /// of the results of 'From'), use the third otherwise.
1565 ///
1566 /// These methods all take an optional UpdateListener, which (if not null) is
1567 /// informed about nodes that are deleted and modified due to recursive
1568 /// changes in the dag.
1569 ///
1570 /// These functions only replace all existing uses. It's possible that as
1571 /// these replacements are being performed, CSE may cause the From node
1572 /// to be given new uses. These new uses of From are left in place, and
1573 /// not automatically transferred to To.
1574 ///
1575 void ReplaceAllUsesWith(SDValue From, SDValue To);
1576 void ReplaceAllUsesWith(SDNode *From, SDNode *To);
1577 void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
1578
1579 /// Replace any uses of From with To, leaving
1580 /// uses of other values produced by From.getNode() alone.
1581 void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);
1582
1583 /// Like ReplaceAllUsesOfValueWith, but for multiple values at once.
1584 /// This correctly handles the case where
1585 /// there is an overlap between the From values and the To values.
1586 void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
1587 unsigned Num);
1588
1589 /// If an existing load has uses of its chain, create a token factor node with
1590 /// that chain and the new memory node's chain and update users of the old
1591 /// chain to the token factor. This ensures that the new memory node will have
1592 /// the same relative memory dependency position as the old load. Returns the
1593 /// new merged load chain.
1594 SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain);
1595
1596 /// If an existing load has uses of its chain, create a token factor node with
1597 /// that chain and the new memory node's chain and update users of the old
1598 /// chain to the token factor. This ensures that the new memory node will have
1599 /// the same relative memory dependency position as the old load. Returns the
1600 /// new merged load chain.
1601 SDValue makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, SDValue NewMemOp);
1602
1603 /// Topological-sort the AllNodes list and a
1604 /// assign a unique node id for each node in the DAG based on their
1605 /// topological order. Returns the number of nodes.
1606 unsigned AssignTopologicalOrder();
1607
1608 /// Move node N in the AllNodes list to be immediately
1609 /// before the given iterator Position. This may be used to update the
1610 /// topological ordering when the list of nodes is modified.
1611 void RepositionNode(allnodes_iterator Position, SDNode *N) {
1612 AllNodes.insert(Position, AllNodes.remove(N));
1613 }
1614
1615 /// Returns an APFloat semantics tag appropriate for the given type. If VT is
1616 /// a vector type, the element semantics are returned.
1617 static const fltSemantics &EVTToAPFloatSemantics(EVT VT) {
1618 switch (VT.getScalarType().getSimpleVT().SimpleTy) {
1619 default: llvm_unreachable("Unknown FP format")::llvm::llvm_unreachable_internal("Unknown FP format", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1619)
;
1620 case MVT::f16: return APFloat::IEEEhalf();
1621 case MVT::bf16: return APFloat::BFloat();
1622 case MVT::f32: return APFloat::IEEEsingle();
1623 case MVT::f64: return APFloat::IEEEdouble();
1624 case MVT::f80: return APFloat::x87DoubleExtended();
1625 case MVT::f128: return APFloat::IEEEquad();
1626 case MVT::ppcf128: return APFloat::PPCDoubleDouble();
1627 }
1628 }
1629
1630 /// Add a dbg_value SDNode. If SD is non-null that means the
1631 /// value is produced by SD.
1632 void AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter);
1633
1634 /// Add a dbg_label SDNode.
1635 void AddDbgLabel(SDDbgLabel *DB);
1636
1637 /// Get the debug values which reference the given SDNode.
1638 ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) const {
1639 return DbgInfo->getSDDbgValues(SD);
1640 }
1641
1642public:
1643 /// Return true if there are any SDDbgValue nodes associated
1644 /// with this SelectionDAG.
1645 bool hasDebugValues() const { return !DbgInfo->empty(); }
1646
1647 SDDbgInfo::DbgIterator DbgBegin() const { return DbgInfo->DbgBegin(); }
1648 SDDbgInfo::DbgIterator DbgEnd() const { return DbgInfo->DbgEnd(); }
1649
1650 SDDbgInfo::DbgIterator ByvalParmDbgBegin() const {
1651 return DbgInfo->ByvalParmDbgBegin();
1652 }
1653 SDDbgInfo::DbgIterator ByvalParmDbgEnd() const {
1654 return DbgInfo->ByvalParmDbgEnd();
1655 }
1656
1657 SDDbgInfo::DbgLabelIterator DbgLabelBegin() const {
1658 return DbgInfo->DbgLabelBegin();
1659 }
1660 SDDbgInfo::DbgLabelIterator DbgLabelEnd() const {
1661 return DbgInfo->DbgLabelEnd();
1662 }
1663
1664 /// To be invoked on an SDNode that is slated to be erased. This
1665 /// function mirrors \c llvm::salvageDebugInfo.
1666 void salvageDebugInfo(SDNode &N);
1667
1668 void dump() const;
1669
1670 /// In most cases this function returns the ABI alignment for a given type,
1671 /// except for illegal vector types where the alignment exceeds that of the
1672 /// stack. In such cases we attempt to break the vector down to a legal type
1673 /// and return the ABI alignment for that instead.
1674 Align getReducedAlign(EVT VT, bool UseABI);
1675
1676 /// Create a stack temporary based on the size in bytes and the alignment
1677 SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment);
1678
1679 /// Create a stack temporary, suitable for holding the specified value type.
1680 /// If minAlign is specified, the slot size will have at least that alignment.
1681 SDValue CreateStackTemporary(EVT VT, unsigned minAlign = 1);
1682
1683 /// Create a stack temporary suitable for holding either of the specified
1684 /// value types.
1685 SDValue CreateStackTemporary(EVT VT1, EVT VT2);
1686
1687 SDValue FoldSymbolOffset(unsigned Opcode, EVT VT,
1688 const GlobalAddressSDNode *GA,
1689 const SDNode *N2);
1690
1691 SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
1692 ArrayRef<SDValue> Ops);
1693
1694 SDValue FoldConstantVectorArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
1695 ArrayRef<SDValue> Ops,
1696 const SDNodeFlags Flags = SDNodeFlags());
1697
1698 /// Fold floating-point operations with 2 operands when both operands are
1699 /// constants and/or undefined.
1700 SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT,
1701 SDValue N1, SDValue N2);
1702
1703 /// Constant fold a setcc to true or false.
1704 SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond,
1705 const SDLoc &dl);
1706
1707 /// See if the specified operand can be simplified with the knowledge that
1708 /// only the bits specified by DemandedBits are used. If so, return the
1709 /// simpler operand, otherwise return a null SDValue.
1710 ///
1711 /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
1712 /// simplify nodes with multiple uses more aggressively.)
1713 SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits);
1714
1715 /// See if the specified operand can be simplified with the knowledge that
1716 /// only the bits specified by DemandedBits are used in the elements specified
1717 /// by DemandedElts. If so, return the simpler operand, otherwise return a
1718 /// null SDValue.
1719 ///
1720 /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
1721 /// simplify nodes with multiple uses more aggressively.)
1722 SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits,
1723 const APInt &DemandedElts);
1724
1725 /// Return true if the sign bit of Op is known to be zero.
1726 /// We use this predicate to simplify operations downstream.
1727 bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;
1728
1729 /// Return true if 'Op & Mask' is known to be zero. We
1730 /// use this predicate to simplify operations downstream. Op and Mask are
1731 /// known to be the same type.
1732 bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
1733 unsigned Depth = 0) const;
1734
1735 /// Return true if 'Op & Mask' is known to be zero in DemandedElts. We
1736 /// use this predicate to simplify operations downstream. Op and Mask are
1737 /// known to be the same type.
1738 bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
1739 const APInt &DemandedElts, unsigned Depth = 0) const;
1740
1741 /// Return true if '(Op & Mask) == Mask'.
1742 /// Op and Mask are known to be the same type.
1743 bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask,
1744 unsigned Depth = 0) const;
1745
1746 /// Determine which bits of Op are known to be either zero or one and return
1747 /// them in Known. For vectors, the known bits are those that are shared by
1748 /// every vector element.
1749 /// Targets can implement the computeKnownBitsForTargetNode method in the
1750 /// TargetLowering class to allow target nodes to be understood.
1751 KnownBits computeKnownBits(SDValue Op, unsigned Depth = 0) const;
1752
1753 /// Determine which bits of Op are known to be either zero or one and return
1754 /// them in Known. The DemandedElts argument allows us to only collect the
1755 /// known bits that are shared by the requested vector elements.
1756 /// Targets can implement the computeKnownBitsForTargetNode method in the
1757 /// TargetLowering class to allow target nodes to be understood.
1758 KnownBits computeKnownBits(SDValue Op, const APInt &DemandedElts,
1759 unsigned Depth = 0) const;
1760
1761 /// Used to represent the possible overflow behavior of an operation.
1762 /// Never: the operation cannot overflow.
1763 /// Always: the operation will always overflow.
1764 /// Sometime: the operation may or may not overflow.
1765 enum OverflowKind {
1766 OFK_Never,
1767 OFK_Sometime,
1768 OFK_Always,
1769 };
1770
1771 /// Determine if the result of the addition of 2 node can overflow.
1772 OverflowKind computeOverflowKind(SDValue N0, SDValue N1) const;
1773
1774 /// Test if the given value is known to have exactly one bit set. This differs
1775 /// from computeKnownBits in that it doesn't necessarily determine which bit
1776 /// is set.
1777 bool isKnownToBeAPowerOfTwo(SDValue Val) const;
1778
1779 /// Return the number of times the sign bit of the register is replicated into
1780 /// the other bits. We know that at least 1 bit is always equal to the sign
1781 /// bit (itself), but other cases can give us information. For example,
1782 /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
1783 /// to each other, so we return 3. Targets can implement the
1784 /// ComputeNumSignBitsForTarget method in the TargetLowering class to allow
1785 /// target nodes to be understood.
1786 unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;
1787
1788 /// Return the number of times the sign bit of the register is replicated into
1789 /// the other bits. We know that at least 1 bit is always equal to the sign
1790 /// bit (itself), but other cases can give us information. For example,
1791 /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
1792 /// to each other, so we return 3. The DemandedElts argument allows
1793 /// us to only collect the minimum sign bits of the requested vector elements.
1794 /// Targets can implement the ComputeNumSignBitsForTarget method in the
1795 /// TargetLowering class to allow target nodes to be understood.
1796 unsigned ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
1797 unsigned Depth = 0) const;
1798
1799 /// Return true if the specified operand is an ISD::ADD with a ConstantSDNode
1800 /// on the right-hand side, or if it is an ISD::OR with a ConstantSDNode that
1801 /// is guaranteed to have the same semantics as an ADD. This handles the
1802 /// equivalence:
1803 /// X|Cst == X+Cst iff X&Cst = 0.
1804 bool isBaseWithConstantOffset(SDValue Op) const;
1805
1806 /// Test whether the given SDValue is known to never be NaN. If \p SNaN is
1807 /// true, returns if \p Op is known to never be a signaling NaN (it may still
1808 /// be a qNaN).
1809 bool isKnownNeverNaN(SDValue Op, bool SNaN = false, unsigned Depth = 0) const;
1810
1811 /// \returns true if \p Op is known to never be a signaling NaN.
1812 bool isKnownNeverSNaN(SDValue Op, unsigned Depth = 0) const {
1813 return isKnownNeverNaN(Op, true, Depth);
1814 }
1815
1816 /// Test whether the given floating point SDValue is known to never be
1817 /// positive or negative zero.
1818 bool isKnownNeverZeroFloat(SDValue Op) const;
1819
1820 /// Test whether the given SDValue is known to contain non-zero value(s).
1821 bool isKnownNeverZero(SDValue Op) const;
1822
1823 /// Test whether two SDValues are known to compare equal. This
1824 /// is true if they are the same value, or if one is negative zero and the
1825 /// other positive zero.
1826 bool isEqualTo(SDValue A, SDValue B) const;
1827
1828 /// Return true if A and B have no common bits set. As an example, this can
1829 /// allow an 'add' to be transformed into an 'or'.
1830 bool haveNoCommonBitsSet(SDValue A, SDValue B) const;
1831
1832 /// Test whether \p V has a splatted value for all the demanded elements.
1833 ///
1834 /// On success \p UndefElts will indicate the elements that have UNDEF
1835 /// values instead of the splat value, this is only guaranteed to be correct
1836 /// for \p DemandedElts.
1837 ///
1838 /// NOTE: The function will return true for a demanded splat of UNDEF values.
1839 bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts,
1840 unsigned Depth = 0);
1841
1842 /// Test whether \p V has a splatted value.
1843 bool isSplatValue(SDValue V, bool AllowUndefs = false);
1844
1845 /// If V is a splatted value, return the source vector and its splat index.
1846 SDValue getSplatSourceVector(SDValue V, int &SplatIndex);
1847
1848 /// If V is a splat vector, return its scalar source operand by extracting
1849 /// that element from the source vector.
1850 SDValue getSplatValue(SDValue V);
1851
1852 /// If a SHL/SRA/SRL node \p V has a constant or splat constant shift amount
1853 /// that is less than the element bit-width of the shift node, return it.
1854 const APInt *getValidShiftAmountConstant(SDValue V,
1855 const APInt &DemandedElts) const;
1856
1857 /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
1858 /// than the element bit-width of the shift node, return the minimum value.
1859 const APInt *
1860 getValidMinimumShiftAmountConstant(SDValue V,
1861 const APInt &DemandedElts) const;
1862
1863 /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
1864 /// than the element bit-width of the shift node, return the maximum value.
1865 const APInt *
1866 getValidMaximumShiftAmountConstant(SDValue V,
1867 const APInt &DemandedElts) const;
1868
1869 /// Match a binop + shuffle pyramid that represents a horizontal reduction
1870 /// over the elements of a vector starting from the EXTRACT_VECTOR_ELT node /p
1871 /// Extract. The reduction must use one of the opcodes listed in /p
1872 /// CandidateBinOps and on success /p BinOp will contain the matching opcode.
1873 /// Returns the vector that is being reduced on, or SDValue() if a reduction
1874 /// was not matched. If \p AllowPartials is set then in the case of a
1875 /// reduction pattern that only matches the first few stages, the extracted
1876 /// subvector of the start of the reduction is returned.
1877 SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
1878 ArrayRef<ISD::NodeType> CandidateBinOps,
1879 bool AllowPartials = false);
1880
1881 /// Utility function used by legalize and lowering to
1882 /// "unroll" a vector operation by splitting out the scalars and operating
1883 /// on each element individually. If the ResNE is 0, fully unroll the vector
1884 /// op. If ResNE is less than the width of the vector op, unroll up to ResNE.
1885 /// If the ResNE is greater than the width of the vector op, unroll the
1886 /// vector op and fill the end of the resulting vector with UNDEFS.
1887 SDValue UnrollVectorOp(SDNode *N, unsigned ResNE = 0);
1888
1889 /// Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
1890 /// This is a separate function because those opcodes have two results.
1891 std::pair<SDValue, SDValue> UnrollVectorOverflowOp(SDNode *N,
1892 unsigned ResNE = 0);
1893
1894 /// Return true if loads are next to each other and can be
1895 /// merged. Check that both are nonvolatile and if LD is loading
1896 /// 'Bytes' bytes from a location that is 'Dist' units away from the
1897 /// location that the 'Base' load is loading from.
1898 bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base,
1899 unsigned Bytes, int Dist) const;
1900
1901 /// Infer alignment of a load / store address. Return None if it cannot be
1902 /// inferred.
1903 MaybeAlign InferPtrAlign(SDValue Ptr) const;
1904
1905 LLVM_ATTRIBUTE_DEPRECATED(inline unsigned InferPtrAlignment(SDValue Ptr)[[deprecated("Use InferPtrAlign instead")]] inline unsigned InferPtrAlignment
(SDValue Ptr) const
1906 const,[[deprecated("Use InferPtrAlign instead")]] inline unsigned InferPtrAlignment
(SDValue Ptr) const
1907 "Use InferPtrAlign instead")[[deprecated("Use InferPtrAlign instead")]] inline unsigned InferPtrAlignment
(SDValue Ptr) const
{
1908 if (auto A = InferPtrAlign(Ptr))
1909 return A->value();
1910 return 0;
1911 }
1912
1913 /// Compute the VTs needed for the low/hi parts of a type
1914 /// which is split (or expanded) into two not necessarily identical pieces.
1915 std::pair<EVT, EVT> GetSplitDestVTs(const EVT &VT) const;
1916
1917 /// Compute the VTs needed for the low/hi parts of a type, dependent on an
1918 /// enveloping VT that has been split into two identical pieces. Sets the
1919 /// HisIsEmpty flag when hi type has zero storage size.
1920 std::pair<EVT, EVT> GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
1921 bool *HiIsEmpty) const;
1922
1923 /// Split the vector with EXTRACT_SUBVECTOR using the provides
1924 /// VTs and return the low/high part.
1925 std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL,
1926 const EVT &LoVT, const EVT &HiVT);
1927
1928 /// Split the vector with EXTRACT_SUBVECTOR and return the low/high part.
1929 std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL) {
1930 EVT LoVT, HiVT;
1931 std::tie(LoVT, HiVT) = GetSplitDestVTs(N.getValueType());
1932 return SplitVector(N, DL, LoVT, HiVT);
1933 }
1934
1935 /// Split the node's operand with EXTRACT_SUBVECTOR and
1936 /// return the low/high part.
1937 std::pair<SDValue, SDValue> SplitVectorOperand(const SDNode *N, unsigned OpNo)
1938 {
1939 return SplitVector(N->getOperand(OpNo), SDLoc(N));
1940 }
1941
1942 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
1943 SDValue WidenVector(const SDValue &N, const SDLoc &DL);
1944
1945 /// Append the extracted elements from Start to Count out of the vector Op in
1946 /// Args. If Count is 0, all of the elements will be extracted. The extracted
1947 /// elements will have type EVT if it is provided, and otherwise their type
1948 /// will be Op's element type.
1949 void ExtractVectorElements(SDValue Op, SmallVectorImpl<SDValue> &Args,
1950 unsigned Start = 0, unsigned Count = 0,
1951 EVT EltVT = EVT());
1952
1953 /// Compute the default alignment value for the given type.
1954 Align getEVTAlign(EVT MemoryVT) const;
1955 /// Compute the default alignment value for the given type.
1956 /// FIXME: Remove once transition to Align is over.
1957 inline unsigned getEVTAlignment(EVT MemoryVT) const {
1958 return getEVTAlign(MemoryVT).value();
1959 }
1960
1961 /// Test whether the given value is a constant int or similar node.
1962 SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N) const;
1963
1964 /// Test whether the given value is a constant FP or similar node.
1965 SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) const ;
1966
1967 /// \returns true if \p N is any kind of constant or build_vector of
1968 /// constants, int or float. If a vector, it may not necessarily be a splat.
1969 inline bool isConstantValueOfAnyType(SDValue N) const {
1970 return isConstantIntBuildVectorOrConstantInt(N) ||
1971 isConstantFPBuildVectorOrConstantFP(N);
1972 }
1973
1974 void addCallSiteInfo(const SDNode *CallNode, CallSiteInfoImpl &&CallInfo) {
1975 SDCallSiteDbgInfo[CallNode].CSInfo = std::move(CallInfo);
1976 }
1977
1978 CallSiteInfo getSDCallSiteInfo(const SDNode *CallNode) {
1979 auto I = SDCallSiteDbgInfo.find(CallNode);
1980 if (I != SDCallSiteDbgInfo.end())
1981 return std::move(I->second).CSInfo;
1982 return CallSiteInfo();
1983 }
1984
1985 void addHeapAllocSite(const SDNode *Node, MDNode *MD) {
1986 SDCallSiteDbgInfo[Node].HeapAllocSite = MD;
1987 }
1988
1989 /// Return the HeapAllocSite type associated with the SDNode, if it exists.
1990 MDNode *getHeapAllocSite(const SDNode *Node) {
1991 auto It = SDCallSiteDbgInfo.find(Node);
1992 if (It == SDCallSiteDbgInfo.end())
1993 return nullptr;
1994 return It->second.HeapAllocSite;
1995 }
1996
1997 void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge) {
1998 if (NoMerge)
1999 SDCallSiteDbgInfo[Node].NoMerge = NoMerge;
2000 }
2001
2002 bool getNoMergeSiteInfo(const SDNode *Node) {
2003 auto I = SDCallSiteDbgInfo.find(Node);
2004 if (I == SDCallSiteDbgInfo.end())
2005 return false;
2006 return I->second.NoMerge;
2007 }
2008
2009 /// Return the current function's default denormal handling kind for the given
2010 /// floating point type.
2011 DenormalMode getDenormalMode(EVT VT) const {
2012 return MF->getDenormalMode(EVTToAPFloatSemantics(VT));
2013 }
2014
2015 bool shouldOptForSize() const;
2016
2017 /// Get the (commutative) neutral element for the given opcode, if it exists.
2018 SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT,
2019 SDNodeFlags Flags);
2020
2021private:
2022 void InsertNode(SDNode *N);
2023 bool RemoveNodeFromCSEMaps(SDNode *N);
2024 void AddModifiedNodeToCSEMaps(SDNode *N);
2025 SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
2026 SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
2027 void *&InsertPos);
2028 SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
2029 void *&InsertPos);
2030 SDNode *UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &loc);
2031
2032 void DeleteNodeNotInCSEMaps(SDNode *N);
2033 void DeallocateNode(SDNode *N);
2034
2035 void allnodes_clear();
2036
2037 /// Look up the node specified by ID in CSEMap. If it exists, return it. If
2038 /// not, return the insertion token that will make insertion faster. This
2039 /// overload is for nodes other than Constant or ConstantFP, use the other one
2040 /// for those.
2041 SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
2042
2043 /// Look up the node specified by ID in CSEMap. If it exists, return it. If
2044 /// not, return the insertion token that will make insertion faster. Performs
2045 /// additional processing for constant nodes.
2046 SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, const SDLoc &DL,
2047 void *&InsertPos);
2048
2049 /// List of non-single value types.
2050 FoldingSet<SDVTListNode> VTListMap;
2051
2052 /// Maps to auto-CSE operations.
2053 std::vector<CondCodeSDNode*> CondCodeNodes;
2054
2055 std::vector<SDNode*> ValueTypeNodes;
2056 std::map<EVT, SDNode*, EVT::compareRawBits> ExtendedValueTypeNodes;
2057 StringMap<SDNode*> ExternalSymbols;
2058
2059 std::map<std::pair<std::string, unsigned>, SDNode *> TargetExternalSymbols;
2060 DenseMap<MCSymbol *, SDNode *> MCSymbols;
2061
2062 FlagInserter *Inserter = nullptr;
2063};
2064
2065template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
2066 using nodes_iterator = pointer_iterator<SelectionDAG::allnodes_iterator>;
2067
2068 static nodes_iterator nodes_begin(SelectionDAG *G) {
2069 return nodes_iterator(G->allnodes_begin());
2070 }
2071
2072 static nodes_iterator nodes_end(SelectionDAG *G) {
2073 return nodes_iterator(G->allnodes_end());
2074 }
2075};
2076
2077} // end namespace llvm
2078
2079#endif // LLVM_CODEGEN_SELECTIONDAG_H

/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/Register.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Metadata.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/Support/AlignOf.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/TypeSize.h"
47#include <algorithm>
48#include <cassert>
49#include <climits>
50#include <cstddef>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <string>
55#include <tuple>
56
57namespace llvm {
58
59class APInt;
60class Constant;
61template <typename T> struct DenseMapInfo;
62class GlobalValue;
63class MachineBasicBlock;
64class MachineConstantPoolValue;
65class MCSymbol;
66class raw_ostream;
67class SDNode;
68class SelectionDAG;
69class Type;
70class Value;
71
72void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
73 bool force = false);
74
75/// This represents a list of ValueType's that has been intern'd by
76/// a SelectionDAG. Instances of this simple value class are returned by
77/// SelectionDAG::getVTList(...).
78///
79struct SDVTList {
80 const EVT *VTs;
81 unsigned int NumVTs;
82};
83
84namespace ISD {
85
86 /// Node predicates
87
88/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
89/// same constant or undefined, return true and return the constant value in
90/// \p SplatValue.
91bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
92
93/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
94/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
95/// true, it only checks BUILD_VECTOR.
96bool isConstantSplatVectorAllOnes(const SDNode *N,
97 bool BuildVectorOnly = false);
98
99/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
100/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
101/// only checks BUILD_VECTOR.
102bool isConstantSplatVectorAllZeros(const SDNode *N,
103 bool BuildVectorOnly = false);
104
105/// Return true if the specified node is a BUILD_VECTOR where all of the
106/// elements are ~0 or undef.
107bool isBuildVectorAllOnes(const SDNode *N);
108
109/// Return true if the specified node is a BUILD_VECTOR where all of the
110/// elements are 0 or undef.
111bool isBuildVectorAllZeros(const SDNode *N);
112
113/// Return true if the specified node is a BUILD_VECTOR node of all
114/// ConstantSDNode or undef.
115bool isBuildVectorOfConstantSDNodes(const SDNode *N);
116
117/// Return true if the specified node is a BUILD_VECTOR node of all
118/// ConstantFPSDNode or undef.
119bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
120
121/// Return true if the node has at least one operand and all operands of the
122/// specified node are ISD::UNDEF.
123bool allOperandsUndef(const SDNode *N);
124
125} // end namespace ISD
126
127//===----------------------------------------------------------------------===//
128/// Unlike LLVM values, Selection DAG nodes may return multiple
129/// values as the result of a computation. Many nodes return multiple values,
130/// from loads (which define a token and a return value) to ADDC (which returns
131/// a result and a carry value), to calls (which may return an arbitrary number
132/// of values).
133///
134/// As such, each use of a SelectionDAG computation must indicate the node that
135/// computes it as well as which return value to use from that node. This pair
136/// of information is represented with the SDValue value type.
137///
138class SDValue {
139 friend struct DenseMapInfo<SDValue>;
140
141 SDNode *Node = nullptr; // The node defining the value we are using.
142 unsigned ResNo = 0; // Which return value of the node we are using.
143
144public:
145 SDValue() = default;
146 SDValue(SDNode *node, unsigned resno);
147
148 /// get the index which selects a specific result in the SDNode
149 unsigned getResNo() const { return ResNo; }
150
151 /// get the SDNode which holds the desired result
152 SDNode *getNode() const { return Node; }
153
154 /// set the SDNode
155 void setNode(SDNode *N) { Node = N; }
156
157 inline SDNode *operator->() const { return Node; }
158
159 bool operator==(const SDValue &O) const {
160 return Node == O.Node && ResNo == O.ResNo;
161 }
162 bool operator!=(const SDValue &O) const {
163 return !operator==(O);
164 }
165 bool operator<(const SDValue &O) const {
166 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
167 }
168 explicit operator bool() const {
169 return Node != nullptr;
170 }
171
172 SDValue getValue(unsigned R) const {
173 return SDValue(Node, R);
174 }
175
176 /// Return true if this node is an operand of N.
177 bool isOperandOf(const SDNode *N) const;
178
179 /// Return the ValueType of the referenced return value.
180 inline EVT getValueType() const;
181
182 /// Return the simple ValueType of the referenced return value.
183 MVT getSimpleValueType() const {
184 return getValueType().getSimpleVT();
185 }
186
187 /// Returns the size of the value in bits.
188 ///
189 /// If the value type is a scalable vector type, the scalable property will
190 /// be set and the runtime size will be a positive integer multiple of the
191 /// base size.
192 TypeSize getValueSizeInBits() const {
193 return getValueType().getSizeInBits();
194 }
195
196 uint64_t getScalarValueSizeInBits() const {
197 return getValueType().getScalarType().getFixedSizeInBits();
198 }
199
200 // Forwarding methods - These forward to the corresponding methods in SDNode.
201 inline unsigned getOpcode() const;
202 inline unsigned getNumOperands() const;
203 inline const SDValue &getOperand(unsigned i) const;
204 inline uint64_t getConstantOperandVal(unsigned i) const;
205 inline const APInt &getConstantOperandAPInt(unsigned i) const;
206 inline bool isTargetMemoryOpcode() const;
207 inline bool isTargetOpcode() const;
208 inline bool isMachineOpcode() const;
209 inline bool isUndef() const;
210 inline unsigned getMachineOpcode() const;
211 inline const DebugLoc &getDebugLoc() const;
212 inline void dump() const;
213 inline void dump(const SelectionDAG *G) const;
214 inline void dumpr() const;
215 inline void dumpr(const SelectionDAG *G) const;
216
217 /// Return true if this operand (which must be a chain) reaches the
218 /// specified operand without crossing any side-effecting instructions.
219 /// In practice, this looks through token factors and non-volatile loads.
220 /// In order to remain efficient, this only
221 /// looks a couple of nodes in, it does not do an exhaustive search.
222 bool reachesChainWithoutSideEffects(SDValue Dest,
223 unsigned Depth = 2) const;
224
225 /// Return true if there are no nodes using value ResNo of Node.
226 inline bool use_empty() const;
227
228 /// Return true if there is exactly one node using value ResNo of Node.
229 inline bool hasOneUse() const;
230};
231
232template<> struct DenseMapInfo<SDValue> {
233 static inline SDValue getEmptyKey() {
234 SDValue V;
235 V.ResNo = -1U;
236 return V;
237 }
238
239 static inline SDValue getTombstoneKey() {
240 SDValue V;
241 V.ResNo = -2U;
242 return V;
243 }
244
245 static unsigned getHashValue(const SDValue &Val) {
246 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
247 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
248 }
249
250 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
251 return LHS == RHS;
252 }
253};
254
255/// Allow casting operators to work directly on
256/// SDValues as if they were SDNode*'s.
257template<> struct simplify_type<SDValue> {
258 using SimpleType = SDNode *;
259
260 static SimpleType getSimplifiedValue(SDValue &Val) {
261 return Val.getNode();
262 }
263};
264template<> struct simplify_type<const SDValue> {
265 using SimpleType = /*const*/ SDNode *;
266
267 static SimpleType getSimplifiedValue(const SDValue &Val) {
268 return Val.getNode();
269 }
270};
271
272/// Represents a use of a SDNode. This class holds an SDValue,
273/// which records the SDNode being used and the result number, a
274/// pointer to the SDNode using the value, and Next and Prev pointers,
275/// which link together all the uses of an SDNode.
276///
277class SDUse {
278 /// Val - The value being used.
279 SDValue Val;
280 /// User - The user of this value.
281 SDNode *User = nullptr;
282 /// Prev, Next - Pointers to the uses list of the SDNode referred by
283 /// this operand.
284 SDUse **Prev = nullptr;
285 SDUse *Next = nullptr;
286
287public:
288 SDUse() = default;
289 SDUse(const SDUse &U) = delete;
290 SDUse &operator=(const SDUse &) = delete;
291
292 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
293 operator const SDValue&() const { return Val; }
294
295 /// If implicit conversion to SDValue doesn't work, the get() method returns
296 /// the SDValue.
297 const SDValue &get() const { return Val; }
298
299 /// This returns the SDNode that contains this Use.
300 SDNode *getUser() { return User; }
301
302 /// Get the next SDUse in the use list.
303 SDUse *getNext() const { return Next; }
304
305 /// Convenience function for get().getNode().
306 SDNode *getNode() const { return Val.getNode(); }
307 /// Convenience function for get().getResNo().
308 unsigned getResNo() const { return Val.getResNo(); }
309 /// Convenience function for get().getValueType().
310 EVT getValueType() const { return Val.getValueType(); }
311
312 /// Convenience function for get().operator==
313 bool operator==(const SDValue &V) const {
314 return Val == V;
315 }
316
317 /// Convenience function for get().operator!=
318 bool operator!=(const SDValue &V) const {
319 return Val != V;
320 }
321
322 /// Convenience function for get().operator<
323 bool operator<(const SDValue &V) const {
324 return Val < V;
325 }
326
327private:
328 friend class SelectionDAG;
329 friend class SDNode;
330 // TODO: unfriend HandleSDNode once we fix its operand handling.
331 friend class HandleSDNode;
332
333 void setUser(SDNode *p) { User = p; }
334
335 /// Remove this use from its existing use list, assign it the
336 /// given value, and add it to the new value's node's use list.
337 inline void set(const SDValue &V);
338 /// Like set, but only supports initializing a newly-allocated
339 /// SDUse with a non-null value.
340 inline void setInitial(const SDValue &V);
341 /// Like set, but only sets the Node portion of the value,
342 /// leaving the ResNo portion unmodified.
343 inline void setNode(SDNode *N);
344
345 void addToList(SDUse **List) {
346 Next = *List;
347 if (Next) Next->Prev = &Next;
348 Prev = List;
349 *List = this;
350 }
351
352 void removeFromList() {
353 *Prev = Next;
354 if (Next) Next->Prev = Prev;
355 }
356};
357
358/// simplify_type specializations - Allow casting operators to work directly on
359/// SDValues as if they were SDNode*'s.
360template<> struct simplify_type<SDUse> {
361 using SimpleType = SDNode *;
362
363 static SimpleType getSimplifiedValue(SDUse &Val) {
364 return Val.getNode();
365 }
366};
367
368/// These are IR-level optimization flags that may be propagated to SDNodes.
369/// TODO: This data structure should be shared by the IR optimizer and the
370/// the backend.
371struct SDNodeFlags {
372private:
373 bool NoUnsignedWrap : 1;
374 bool NoSignedWrap : 1;
375 bool Exact : 1;
376 bool NoNaNs : 1;
377 bool NoInfs : 1;
378 bool NoSignedZeros : 1;
379 bool AllowReciprocal : 1;
380 bool AllowContract : 1;
381 bool ApproximateFuncs : 1;
382 bool AllowReassociation : 1;
383
384 // We assume instructions do not raise floating-point exceptions by default,
385 // and only those marked explicitly may do so. We could choose to represent
386 // this via a positive "FPExcept" flags like on the MI level, but having a
387 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
388 // intersection logic more straightforward.
389 bool NoFPExcept : 1;
390
391public:
392 /// Default constructor turns off all optimization flags.
393 SDNodeFlags()
394 : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
395 NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
396 AllowContract(false), ApproximateFuncs(false),
397 AllowReassociation(false), NoFPExcept(false) {}
398
399 /// Propagate the fast-math-flags from an IR FPMathOperator.
400 void copyFMF(const FPMathOperator &FPMO) {
401 setNoNaNs(FPMO.hasNoNaNs());
402 setNoInfs(FPMO.hasNoInfs());
403 setNoSignedZeros(FPMO.hasNoSignedZeros());
404 setAllowReciprocal(FPMO.hasAllowReciprocal());
405 setAllowContract(FPMO.hasAllowContract());
406 setApproximateFuncs(FPMO.hasApproxFunc());
407 setAllowReassociation(FPMO.hasAllowReassoc());
408 }
409
410 // These are mutators for each flag.
411 void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
412 void setNoSignedWrap(bool b) { NoSignedWrap = b; }
413 void setExact(bool b) { Exact = b; }
414 void setNoNaNs(bool b) { NoNaNs = b; }
415 void setNoInfs(bool b) { NoInfs = b; }
416 void setNoSignedZeros(bool b) { NoSignedZeros = b; }
417 void setAllowReciprocal(bool b) { AllowReciprocal = b; }
418 void setAllowContract(bool b) { AllowContract = b; }
419 void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
420 void setAllowReassociation(bool b) { AllowReassociation = b; }
421 void setNoFPExcept(bool b) { NoFPExcept = b; }
422
423 // These are accessors for each flag.
424 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
425 bool hasNoSignedWrap() const { return NoSignedWrap; }
426 bool hasExact() const { return Exact; }
427 bool hasNoNaNs() const { return NoNaNs; }
428 bool hasNoInfs() const { return NoInfs; }
429 bool hasNoSignedZeros() const { return NoSignedZeros; }
430 bool hasAllowReciprocal() const { return AllowReciprocal; }
431 bool hasAllowContract() const { return AllowContract; }
432 bool hasApproximateFuncs() const { return ApproximateFuncs; }
433 bool hasAllowReassociation() const { return AllowReassociation; }
434 bool hasNoFPExcept() const { return NoFPExcept; }
435
436 /// Clear any flags in this flag set that aren't also set in Flags. All
437 /// flags will be cleared if Flags are undefined.
438 void intersectWith(const SDNodeFlags Flags) {
439 NoUnsignedWrap &= Flags.NoUnsignedWrap;
440 NoSignedWrap &= Flags.NoSignedWrap;
441 Exact &= Flags.Exact;
442 NoNaNs &= Flags.NoNaNs;
443 NoInfs &= Flags.NoInfs;
444 NoSignedZeros &= Flags.NoSignedZeros;
445 AllowReciprocal &= Flags.AllowReciprocal;
446 AllowContract &= Flags.AllowContract;
447 ApproximateFuncs &= Flags.ApproximateFuncs;
448 AllowReassociation &= Flags.AllowReassociation;
449 NoFPExcept &= Flags.NoFPExcept;
450 }
451};
452
453/// Represents one node in the SelectionDAG.
454///
455class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
456private:
457 /// The operation that this node performs.
458 int16_t NodeType;
459
460protected:
461 // We define a set of mini-helper classes to help us interpret the bits in our
462 // SubclassData. These are designed to fit within a uint16_t so they pack
463 // with NodeType.
464
465#if defined(_AIX) && (!defined(__GNUC__4) || defined(__ibmxl__))
466// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
467// and give the `pack` pragma push semantics.
468#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
469#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
470#else
471#define BEGIN_TWO_BYTE_PACK()
472#define END_TWO_BYTE_PACK()
473#endif
474
475BEGIN_TWO_BYTE_PACK()
476 class SDNodeBitfields {
477 friend class SDNode;
478 friend class MemIntrinsicSDNode;
479 friend class MemSDNode;
480 friend class SelectionDAG;
481
482 uint16_t HasDebugValue : 1;
483 uint16_t IsMemIntrinsic : 1;
484 uint16_t IsDivergent : 1;
485 };
486 enum { NumSDNodeBits = 3 };
487
488 class ConstantSDNodeBitfields {
489 friend class ConstantSDNode;
490
491 uint16_t : NumSDNodeBits;
492
493 uint16_t IsOpaque : 1;
494 };
495
496 class MemSDNodeBitfields {
497 friend class MemSDNode;
498 friend class MemIntrinsicSDNode;
499 friend class AtomicSDNode;
500
501 uint16_t : NumSDNodeBits;
502
503 uint16_t IsVolatile : 1;
504 uint16_t IsNonTemporal : 1;
505 uint16_t IsDereferenceable : 1;
506 uint16_t IsInvariant : 1;
507 };
508 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
509
510 class LSBaseSDNodeBitfields {
511 friend class LSBaseSDNode;
512 friend class MaskedLoadStoreSDNode;
513 friend class MaskedGatherScatterSDNode;
514
515 uint16_t : NumMemSDNodeBits;
516
517 // This storage is shared between disparate class hierarchies to hold an
518 // enumeration specific to the class hierarchy in use.
519 // LSBaseSDNode => enum ISD::MemIndexedMode
520 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
521 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
522 uint16_t AddressingMode : 3;
523 };
524 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
525
526 class LoadSDNodeBitfields {
527 friend class LoadSDNode;
528 friend class MaskedLoadSDNode;
529 friend class MaskedGatherSDNode;
530
531 uint16_t : NumLSBaseSDNodeBits;
532
533 uint16_t ExtTy : 2; // enum ISD::LoadExtType
534 uint16_t IsExpanding : 1;
535 };
536
537 class StoreSDNodeBitfields {
538 friend class StoreSDNode;
539 friend class MaskedStoreSDNode;
540 friend class MaskedScatterSDNode;
541
542 uint16_t : NumLSBaseSDNodeBits;
543
544 uint16_t IsTruncating : 1;
545 uint16_t IsCompressing : 1;
546 };
547
548 union {
549 char RawSDNodeBits[sizeof(uint16_t)];
550 SDNodeBitfields SDNodeBits;
551 ConstantSDNodeBitfields ConstantSDNodeBits;
552 MemSDNodeBitfields MemSDNodeBits;
553 LSBaseSDNodeBitfields LSBaseSDNodeBits;
554 LoadSDNodeBitfields LoadSDNodeBits;
555 StoreSDNodeBitfields StoreSDNodeBits;
556 };
557END_TWO_BYTE_PACK()
558#undef BEGIN_TWO_BYTE_PACK
559#undef END_TWO_BYTE_PACK
560
561 // RawSDNodeBits must cover the entirety of the union. This means that all of
562 // the union's members must have size <= RawSDNodeBits. We write the RHS as
563 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
564 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
565 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
566 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
567 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
568 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
569 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
570
571private:
572 friend class SelectionDAG;
573 // TODO: unfriend HandleSDNode once we fix its operand handling.
574 friend class HandleSDNode;
575
576 /// Unique id per SDNode in the DAG.
577 int NodeId = -1;
578
579 /// The values that are used by this operation.
580 SDUse *OperandList = nullptr;
581
582 /// The types of the values this node defines. SDNode's may
583 /// define multiple values simultaneously.
584 const EVT *ValueList;
585
586 /// List of uses for this SDNode.
587 SDUse *UseList = nullptr;
588
589 /// The number of entries in the Operand/Value list.
590 unsigned short NumOperands = 0;
591 unsigned short NumValues;
592
593 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
594 // original LLVM instructions.
595 // This is used for turning off scheduling, because we'll forgo
596 // the normal scheduling algorithms and output the instructions according to
597 // this ordering.
598 unsigned IROrder;
599
600 /// Source line information.
601 DebugLoc debugLoc;
602
603 /// Return a pointer to the specified value type.
604 static const EVT *getValueTypeList(EVT VT);
605
606 SDNodeFlags Flags;
607
608public:
609 /// Unique and persistent id per SDNode in the DAG.
610 /// Used for debug printing.
611 uint16_t PersistentId;
612
613 //===--------------------------------------------------------------------===//
614 // Accessors
615 //
616
617 /// Return the SelectionDAG opcode value for this node. For
618 /// pre-isel nodes (those for which isMachineOpcode returns false), these
619 /// are the opcode values in the ISD and <target>ISD namespaces. For
620 /// post-isel opcodes, see getMachineOpcode.
621 unsigned getOpcode() const { return (unsigned short)NodeType; }
622
623 /// Test if this node has a target-specific opcode (in the
624 /// \<target\>ISD namespace).
625 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
626
627 /// Test if this node has a target-specific opcode that may raise
628 /// FP exceptions (in the \<target\>ISD namespace and greater than
629 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
630 /// opcode are currently automatically considered to possibly raise
631 /// FP exceptions as well.
632 bool isTargetStrictFPOpcode() const {
633 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
634 }
635
636 /// Test if this node has a target-specific
637 /// memory-referencing opcode (in the \<target\>ISD namespace and
638 /// greater than FIRST_TARGET_MEMORY_OPCODE).
639 bool isTargetMemoryOpcode() const {
640 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
641 }
642
643 /// Return true if the type of the node type undefined.
644 bool isUndef() const { return NodeType == ISD::UNDEF; }
645
646 /// Test if this node is a memory intrinsic (with valid pointer information).
647 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
648 /// non-memory intrinsics (with chains) that are not really instances of
649 /// MemSDNode. For such nodes, we need some extra state to determine the
650 /// proper classof relationship.
651 bool isMemIntrinsic() const {
652 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
653 NodeType == ISD::INTRINSIC_VOID) &&
654 SDNodeBits.IsMemIntrinsic;
655 }
656
657 /// Test if this node is a strict floating point pseudo-op.
658 bool isStrictFPOpcode() {
659 switch (NodeType) {
660 default:
661 return false;
662 case ISD::STRICT_FP16_TO_FP:
663 case ISD::STRICT_FP_TO_FP16:
664#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
665 case ISD::STRICT_##DAGN:
666#include "llvm/IR/ConstrainedOps.def"
667 return true;
668 }
669 }
670
671 /// Test if this node has a post-isel opcode, directly
672 /// corresponding to a MachineInstr opcode.
673 bool isMachineOpcode() const { return NodeType < 0; }
674
675 /// This may only be called if isMachineOpcode returns
676 /// true. It returns the MachineInstr opcode value that the node's opcode
677 /// corresponds to.
678 unsigned getMachineOpcode() const {
679 assert(isMachineOpcode() && "Not a MachineInstr opcode!")((isMachineOpcode() && "Not a MachineInstr opcode!") ?
static_cast<void> (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 679, __PRETTY_FUNCTION__))
;
680 return ~NodeType;
681 }
682
683 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
684 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
685
686 bool isDivergent() const { return SDNodeBits.IsDivergent; }
687
688 /// Return true if there are no uses of this node.
689 bool use_empty() const { return UseList == nullptr; }
690
691 /// Return true if there is exactly one use of this node.
692 bool hasOneUse() const { return hasSingleElement(uses()); }
693
694 /// Return the number of uses of this node. This method takes
695 /// time proportional to the number of uses.
696 size_t use_size() const { return std::distance(use_begin(), use_end()); }
697
698 /// Return the unique node id.
699 int getNodeId() const { return NodeId; }
700
701 /// Set unique node id.
702 void setNodeId(int Id) { NodeId = Id; }
703
704 /// Return the node ordering.
705 unsigned getIROrder() const { return IROrder; }
706
707 /// Set the node ordering.
708 void setIROrder(unsigned Order) { IROrder = Order; }
709
710 /// Return the source location info.
711 const DebugLoc &getDebugLoc() const { return debugLoc; }
712
713 /// Set source location info. Try to avoid this, putting
714 /// it in the constructor is preferable.
715 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
716
717 /// This class provides iterator support for SDUse
718 /// operands that use a specific SDNode.
719 class use_iterator
720 : public std::iterator<std::forward_iterator_tag, SDUse, ptrdiff_t> {
721 friend class SDNode;
722
723 SDUse *Op = nullptr;
724
725 explicit use_iterator(SDUse *op) : Op(op) {}
726
727 public:
728 using reference = std::iterator<std::forward_iterator_tag,
729 SDUse, ptrdiff_t>::reference;
730 using pointer = std::iterator<std::forward_iterator_tag,
731 SDUse, ptrdiff_t>::pointer;
732
733 use_iterator() = default;
734 use_iterator(const use_iterator &I) : Op(I.Op) {}
735
736 bool operator==(const use_iterator &x) const {
737 return Op == x.Op;
738 }
739 bool operator!=(const use_iterator &x) const {
740 return !operator==(x);
741 }
742
743 /// Return true if this iterator is at the end of uses list.
744 bool atEnd() const { return Op == nullptr; }
745
746 // Iterator traversal: forward iteration only.
747 use_iterator &operator++() { // Preincrement
748 assert(Op && "Cannot increment end iterator!")((Op && "Cannot increment end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 748, __PRETTY_FUNCTION__))
;
749 Op = Op->getNext();
750 return *this;
751 }
752
753 use_iterator operator++(int) { // Postincrement
754 use_iterator tmp = *this; ++*this; return tmp;
755 }
756
757 /// Retrieve a pointer to the current user node.
758 SDNode *operator*() const {
759 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 759, __PRETTY_FUNCTION__))
;
760 return Op->getUser();
761 }
762
763 SDNode *operator->() const { return operator*(); }
764
765 SDUse &getUse() const { return *Op; }
766
767 /// Retrieve the operand # of this use in its user.
768 unsigned getOperandNo() const {
769 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 769, __PRETTY_FUNCTION__))
;
770 return (unsigned)(Op - Op->getUser()->OperandList);
771 }
772 };
773
774 /// Provide iteration support to walk over all uses of an SDNode.
775 use_iterator use_begin() const {
776 return use_iterator(UseList);
777 }
778
779 static use_iterator use_end() { return use_iterator(nullptr); }
780
781 inline iterator_range<use_iterator> uses() {
782 return make_range(use_begin(), use_end());
783 }
784 inline iterator_range<use_iterator> uses() const {
785 return make_range(use_begin(), use_end());
786 }
787
788 /// Return true if there are exactly NUSES uses of the indicated value.
789 /// This method ignores uses of other values defined by this operation.
790 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
791
792 /// Return true if there are any use of the indicated value.
793 /// This method ignores uses of other values defined by this operation.
794 bool hasAnyUseOfValue(unsigned Value) const;
795
796 /// Return true if this node is the only use of N.
797 bool isOnlyUserOf(const SDNode *N) const;
798
799 /// Return true if this node is an operand of N.
800 bool isOperandOf(const SDNode *N) const;
801
802 /// Return true if this node is a predecessor of N.
803 /// NOTE: Implemented on top of hasPredecessor and every bit as
804 /// expensive. Use carefully.
805 bool isPredecessorOf(const SDNode *N) const {
806 return N->hasPredecessor(this);
807 }
808
809 /// Return true if N is a predecessor of this node.
810 /// N is either an operand of this node, or can be reached by recursively
811 /// traversing up the operands.
812 /// NOTE: This is an expensive method. Use it carefully.
813 bool hasPredecessor(const SDNode *N) const;
814
815 /// Returns true if N is a predecessor of any node in Worklist. This
816 /// helper keeps Visited and Worklist sets externally to allow unions
817 /// searches to be performed in parallel, caching of results across
818 /// queries and incremental addition to Worklist. Stops early if N is
819 /// found but will resume. Remember to clear Visited and Worklists
820 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
821 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
822 /// topologically ordered (Operands have strictly smaller node id) and search
823 /// can be pruned leveraging this.
824 static bool hasPredecessorHelper(const SDNode *N,
825 SmallPtrSetImpl<const SDNode *> &Visited,
826 SmallVectorImpl<const SDNode *> &Worklist,
827 unsigned int MaxSteps = 0,
828 bool TopologicalPrune = false) {
829 SmallVector<const SDNode *, 8> DeferredNodes;
830 if (Visited.count(N))
831 return true;
832
833 // Node Id's are assigned in three places: As a topological
834 // ordering (> 0), during legalization (results in values set to
835 // 0), new nodes (set to -1). If N has a topolgical id then we
836 // know that all nodes with ids smaller than it cannot be
837 // successors and we need not check them. Filter out all node
838 // that can't be matches. We add them to the worklist before exit
839 // in case of multiple calls. Note that during selection the topological id
840 // may be violated if a node's predecessor is selected before it. We mark
841 // this at selection negating the id of unselected successors and
842 // restricting topological pruning to positive ids.
843
844 int NId = N->getNodeId();
845 // If we Invalidated the Id, reconstruct original NId.
846 if (NId < -1)
847 NId = -(NId + 1);
848
849 bool Found = false;
850 while (!Worklist.empty()) {
851 const SDNode *M = Worklist.pop_back_val();
852 int MId = M->getNodeId();
853 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
854 (MId > 0) && (MId < NId)) {
855 DeferredNodes.push_back(M);
856 continue;
857 }
858 for (const SDValue &OpV : M->op_values()) {
859 SDNode *Op = OpV.getNode();
860 if (Visited.insert(Op).second)
861 Worklist.push_back(Op);
862 if (Op == N)
863 Found = true;
864 }
865 if (Found)
866 break;
867 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
868 break;
869 }
870 // Push deferred nodes back on worklist.
871 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
872 // If we bailed early, conservatively return found.
873 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
874 return true;
875 return Found;
876 }
877
878 /// Return true if all the users of N are contained in Nodes.
879 /// NOTE: Requires at least one match, but doesn't require them all.
880 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
881
882 /// Return the number of values used by this operation.
883 unsigned getNumOperands() const { return NumOperands; }
884
885 /// Return the maximum number of operands that a SDNode can hold.
886 static constexpr size_t getMaxNumOperands() {
887 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
888 }
889
890 /// Helper method returns the integer value of a ConstantSDNode operand.
891 inline uint64_t getConstantOperandVal(unsigned Num) const;
892
893 /// Helper method returns the APInt of a ConstantSDNode operand.
894 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
895
896 const SDValue &getOperand(unsigned Num) const {
897 assert(Num < NumOperands && "Invalid child # of SDNode!")((Num < NumOperands && "Invalid child # of SDNode!"
) ? static_cast<void> (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 897, __PRETTY_FUNCTION__))
;
898 return OperandList[Num];
899 }
900
901 using op_iterator = SDUse *;
902
903 op_iterator op_begin() const { return OperandList; }
904 op_iterator op_end() const { return OperandList+NumOperands; }
905 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
906
907 /// Iterator for directly iterating over the operand SDValue's.
908 struct value_op_iterator
909 : iterator_adaptor_base<value_op_iterator, op_iterator,
910 std::random_access_iterator_tag, SDValue,
911 ptrdiff_t, value_op_iterator *,
912 value_op_iterator *> {
913 explicit value_op_iterator(SDUse *U = nullptr)
914 : iterator_adaptor_base(U) {}
915
916 const SDValue &operator*() const { return I->get(); }
917 };
918
919 iterator_range<value_op_iterator> op_values() const {
920 return make_range(value_op_iterator(op_begin()),
921 value_op_iterator(op_end()));
922 }
923
924 SDVTList getVTList() const {
925 SDVTList X = { ValueList, NumValues };
926 return X;
927 }
928
929 /// If this node has a glue operand, return the node
930 /// to which the glue operand points. Otherwise return NULL.
931 SDNode *getGluedNode() const {
932 if (getNumOperands() != 0 &&
933 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
934 return getOperand(getNumOperands()-1).getNode();
935 return nullptr;
936 }
937
938 /// If this node has a glue value with a user, return
939 /// the user (there is at most one). Otherwise return NULL.
940 SDNode *getGluedUser() const {
941 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
942 if (UI.getUse().get().getValueType() == MVT::Glue)
943 return *UI;
944 return nullptr;
945 }
946
947 const SDNodeFlags getFlags() const { return Flags; }
948 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
949
950 /// Clear any flags in this node that aren't also set in Flags.
951 /// If Flags is not in a defined state then this has no effect.
952 void intersectFlagsWith(const SDNodeFlags Flags);
953
954 /// Return the number of values defined/returned by this operator.
955 unsigned getNumValues() const { return NumValues; }
956
957 /// Return the type of a specified result.
958 EVT getValueType(unsigned ResNo) const {
959 assert(ResNo < NumValues && "Illegal result number!")((ResNo < NumValues && "Illegal result number!") ?
static_cast<void> (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 959, __PRETTY_FUNCTION__))
;
960 return ValueList[ResNo];
961 }
962
963 /// Return the type of a specified result as a simple type.
964 MVT getSimpleValueType(unsigned ResNo) const {
965 return getValueType(ResNo).getSimpleVT();
966 }
967
968 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
969 ///
970 /// If the value type is a scalable vector type, the scalable property will
971 /// be set and the runtime size will be a positive integer multiple of the
972 /// base size.
973 TypeSize getValueSizeInBits(unsigned ResNo) const {
974 return getValueType(ResNo).getSizeInBits();
975 }
976
977 using value_iterator = const EVT *;
978
979 value_iterator value_begin() const { return ValueList; }
980 value_iterator value_end() const { return ValueList+NumValues; }
981 iterator_range<value_iterator> values() const {
982 return llvm::make_range(value_begin(), value_end());
983 }
984
985 /// Return the opcode of this operation for printing.
986 std::string getOperationName(const SelectionDAG *G = nullptr) const;
987 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
988 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
989 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
990 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
991 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
992
993 /// Print a SelectionDAG node and all children down to
994 /// the leaves. The given SelectionDAG allows target-specific nodes
995 /// to be printed in human-readable form. Unlike printr, this will
996 /// print the whole DAG, including children that appear multiple
997 /// times.
998 ///
999 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1000
1001 /// Print a SelectionDAG node and children up to
1002 /// depth "depth." The given SelectionDAG allows target-specific
1003 /// nodes to be printed in human-readable form. Unlike printr, this
1004 /// will print children that appear multiple times wherever they are
1005 /// used.
1006 ///
1007 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1008 unsigned depth = 100) const;
1009
1010 /// Dump this node, for debugging.
1011 void dump() const;
1012
1013 /// Dump (recursively) this node and its use-def subgraph.
1014 void dumpr() const;
1015
1016 /// Dump this node, for debugging.
1017 /// The given SelectionDAG allows target-specific nodes to be printed
1018 /// in human-readable form.
1019 void dump(const SelectionDAG *G) const;
1020
1021 /// Dump (recursively) this node and its use-def subgraph.
1022 /// The given SelectionDAG allows target-specific nodes to be printed
1023 /// in human-readable form.
1024 void dumpr(const SelectionDAG *G) const;
1025
1026 /// printrFull to dbgs(). The given SelectionDAG allows
1027 /// target-specific nodes to be printed in human-readable form.
1028 /// Unlike dumpr, this will print the whole DAG, including children
1029 /// that appear multiple times.
1030 void dumprFull(const SelectionDAG *G = nullptr) const;
1031
1032 /// printrWithDepth to dbgs(). The given
1033 /// SelectionDAG allows target-specific nodes to be printed in
1034 /// human-readable form. Unlike dumpr, this will print children
1035 /// that appear multiple times wherever they are used.
1036 ///
1037 void dumprWithDepth(const SelectionDAG *G = nullptr,
1038 unsigned depth = 100) const;
1039
1040 /// Gather unique data for the node.
1041 void Profile(FoldingSetNodeID &ID) const;
1042
1043 /// This method should only be used by the SDUse class.
1044 void addUse(SDUse &U) { U.addToList(&UseList); }
1045
1046protected:
1047 static SDVTList getSDVTList(EVT VT) {
1048 SDVTList Ret = { getValueTypeList(VT), 1 };
1049 return Ret;
1050 }
1051
1052 /// Create an SDNode.
1053 ///
1054 /// SDNodes are created without any operands, and never own the operand
1055 /// storage. To add operands, see SelectionDAG::createOperands.
1056 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1057 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1058 IROrder(Order), debugLoc(std::move(dl)) {
1059 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1060 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor"
) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1060, __PRETTY_FUNCTION__))
;
1061 assert(NumValues == VTs.NumVTs &&((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __PRETTY_FUNCTION__))
1062 "NumValues wasn't wide enough for its operands!")((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __PRETTY_FUNCTION__))
;
1063 }
1064
1065 /// Release the operands and set this node to have zero operands.
1066 void DropOperands();
1067};
1068
1069/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1070/// into SDNode creation functions.
1071/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1072/// from the original Instruction, and IROrder is the ordinal position of
1073/// the instruction.
1074/// When an SDNode is created after the DAG is being built, both DebugLoc and
1075/// the IROrder are propagated from the original SDNode.
1076/// So SDLoc class provides two constructors besides the default one, one to
1077/// be used by the DAGBuilder, the other to be used by others.
1078class SDLoc {
1079private:
1080 DebugLoc DL;
1081 int IROrder = 0;
1082
1083public:
1084 SDLoc() = default;
1085 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1086 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1087 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1088 assert(Order >= 0 && "bad IROrder")((Order >= 0 && "bad IROrder") ? static_cast<void
> (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1088, __PRETTY_FUNCTION__))
;
1089 if (I)
1090 DL = I->getDebugLoc();
1091 }
1092
1093 unsigned getIROrder() const { return IROrder; }
1094 const DebugLoc &getDebugLoc() const { return DL; }
1095};
1096
1097// Define inline functions from the SDValue class.
1098
1099inline SDValue::SDValue(SDNode *node, unsigned resno)
1100 : Node(node), ResNo(resno) {
1101 // Explicitly check for !ResNo to avoid use-after-free, because there are
1102 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1103 // combines.
1104 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __PRETTY_FUNCTION__))
1105 "Invalid result number for the given node!")(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __PRETTY_FUNCTION__))
;
1106 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")((ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? static_cast<void> (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1106, __PRETTY_FUNCTION__))
;
1107}
1108
1109inline unsigned SDValue::getOpcode() const {
1110 return Node->getOpcode();
19
Called C++ object pointer is null
1111}
1112
1113inline EVT SDValue::getValueType() const {
1114 return Node->getValueType(ResNo);
1115}
1116
1117inline unsigned SDValue::getNumOperands() const {
1118 return Node->getNumOperands();
1119}
1120
1121inline const SDValue &SDValue::getOperand(unsigned i) const {
1122 return Node->getOperand(i);
1123}
1124
1125inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1126 return Node->getConstantOperandVal(i);
1127}
1128
1129inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1130 return Node->getConstantOperandAPInt(i);
1131}
1132
1133inline bool SDValue::isTargetOpcode() const {
1134 return Node->isTargetOpcode();
1135}
1136
1137inline bool SDValue::isTargetMemoryOpcode() const {
1138 return Node->isTargetMemoryOpcode();
1139}
1140
1141inline bool SDValue::isMachineOpcode() const {
1142 return Node->isMachineOpcode();
1143}
1144
1145inline unsigned SDValue::getMachineOpcode() const {
1146 return Node->getMachineOpcode();
1147}
1148
1149inline bool SDValue::isUndef() const {
1150 return Node->isUndef();
1151}
1152
1153inline bool SDValue::use_empty() const {
1154 return !Node->hasAnyUseOfValue(ResNo);
1155}
1156
1157inline bool SDValue::hasOneUse() const {
1158 return Node->hasNUsesOfValue(1, ResNo);
1159}
1160
1161inline const DebugLoc &SDValue::getDebugLoc() const {
1162 return Node->getDebugLoc();
1163}
1164
1165inline void SDValue::dump() const {
1166 return Node->dump();
1167}
1168
1169inline void SDValue::dump(const SelectionDAG *G) const {
1170 return Node->dump(G);
1171}
1172
1173inline void SDValue::dumpr() const {
1174 return Node->dumpr();
1175}
1176
1177inline void SDValue::dumpr(const SelectionDAG *G) const {
1178 return Node->dumpr(G);
1179}
1180
1181// Define inline functions from the SDUse class.
1182
1183inline void SDUse::set(const SDValue &V) {
1184 if (Val.getNode()) removeFromList();
1185 Val = V;
1186 if (V.getNode()) V.getNode()->addUse(*this);
1187}
1188
1189inline void SDUse::setInitial(const SDValue &V) {
1190 Val = V;
1191 V.getNode()->addUse(*this);
1192}
1193
1194inline void SDUse::setNode(SDNode *N) {
1195 if (Val.getNode()) removeFromList();
1196 Val.setNode(N);
1197 if (N) N->addUse(*this);
1198}
1199
1200/// This class is used to form a handle around another node that
1201/// is persistent and is updated across invocations of replaceAllUsesWith on its
1202/// operand. This node should be directly created by end-users and not added to
1203/// the AllNodes list.
1204class HandleSDNode : public SDNode {
1205 SDUse Op;
1206
1207public:
1208 explicit HandleSDNode(SDValue X)
1209 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1210 // HandleSDNodes are never inserted into the DAG, so they won't be
1211 // auto-numbered. Use ID 65535 as a sentinel.
1212 PersistentId = 0xffff;
1213
1214 // Manually set up the operand list. This node type is special in that it's
1215 // always stack allocated and SelectionDAG does not manage its operands.
1216 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1217 // be so special.
1218 Op.setUser(this);
1219 Op.setInitial(X);
1220 NumOperands = 1;
1221 OperandList = &Op;
1222 }
1223 ~HandleSDNode();
1224
1225 const SDValue &getValue() const { return Op; }
1226};
1227
1228class AddrSpaceCastSDNode : public SDNode {
1229private:
1230 unsigned SrcAddrSpace;
1231 unsigned DestAddrSpace;
1232
1233public:
1234 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1235 unsigned SrcAS, unsigned DestAS);
1236
1237 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1238 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1239
1240 static bool classof(const SDNode *N) {
1241 return N->getOpcode() == ISD::ADDRSPACECAST;
1242 }
1243};
1244
1245/// This is an abstract virtual class for memory operations.
1246class MemSDNode : public SDNode {
1247private:
1248 // VT of in-memory value.
1249 EVT MemoryVT;
1250
1251protected:
1252 /// Memory reference information.
1253 MachineMemOperand *MMO;
1254
1255public:
1256 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1257 EVT memvt, MachineMemOperand *MMO);
1258
1259 bool readMem() const { return MMO->isLoad(); }
1260 bool writeMem() const { return MMO->isStore(); }
1261
1262 /// Returns alignment and volatility of the memory access
1263 Align getOriginalAlign() const { return MMO->getBaseAlign(); }
1264 Align getAlign() const { return MMO->getAlign(); }
1265 LLVM_ATTRIBUTE_DEPRECATED(unsigned getOriginalAlignment() const,[[deprecated("Use getOriginalAlign() instead")]] unsigned getOriginalAlignment
() const
1266 "Use getOriginalAlign() instead")[[deprecated("Use getOriginalAlign() instead")]] unsigned getOriginalAlignment
() const
{
1267 return MMO->getBaseAlign().value();
1268 }
1269 // FIXME: Remove once transition to getAlign is over.
1270 unsigned getAlignment() const { return MMO->getAlign().value(); }
1271
1272 /// Return the SubclassData value, without HasDebugValue. This contains an
1273 /// encoding of the volatile flag, as well as bits used by subclasses. This
1274 /// function should only be used to compute a FoldingSetNodeID value.
1275 /// The HasDebugValue bit is masked out because CSE map needs to match
1276 /// nodes with debug info with nodes without debug info. Same is about
1277 /// isDivergent bit.
1278 unsigned getRawSubclassData() const {
1279 uint16_t Data;
1280 union {
1281 char RawSDNodeBits[sizeof(uint16_t)];
1282 SDNodeBitfields SDNodeBits;
1283 };
1284 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1285 SDNodeBits.HasDebugValue = 0;
1286 SDNodeBits.IsDivergent = false;
1287 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1288 return Data;
1289 }
1290
1291 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1292 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1293 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1294 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1295
1296 // Returns the offset from the location of the access.
1297 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1298
1299 /// Returns the AA info that describes the dereference.
1300 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1301
1302 /// Returns the Ranges that describes the dereference.
1303 const MDNode *getRanges() const { return MMO->getRanges(); }
1304
1305 /// Returns the synchronization scope ID for this memory operation.
1306 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1307
1308 /// Return the atomic ordering requirements for this memory operation. For
1309 /// cmpxchg atomic operations, return the atomic ordering requirements when
1310 /// store occurs.
1311 AtomicOrdering getOrdering() const { return MMO->getOrdering(); }
1312
1313 /// Return true if the memory operation ordering is Unordered or higher.
1314 bool isAtomic() const { return MMO->isAtomic(); }
1315
1316 /// Returns true if the memory operation doesn't imply any ordering
1317 /// constraints on surrounding memory operations beyond the normal memory
1318 /// aliasing rules.
1319 bool isUnordered() const { return MMO->isUnordered(); }
1320
1321 /// Returns true if the memory operation is neither atomic or volatile.
1322 bool isSimple() const { return !isAtomic() && !isVolatile(); }
1323
1324 /// Return the type of the in-memory value.
1325 EVT getMemoryVT() const { return MemoryVT; }
1326
1327 /// Return a MachineMemOperand object describing the memory
1328 /// reference performed by operation.
1329 MachineMemOperand *getMemOperand() const { return MMO; }
1330
1331 const MachinePointerInfo &getPointerInfo() const {
1332 return MMO->getPointerInfo();
1333 }
1334
1335 /// Return the address space for the associated pointer
1336 unsigned getAddressSpace() const {
1337 return getPointerInfo().getAddrSpace();
1338 }
1339
1340 /// Update this MemSDNode's MachineMemOperand information
1341 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1342 /// This must only be used when the new alignment applies to all users of
1343 /// this MachineMemOperand.
1344 void refineAlignment(const MachineMemOperand *NewMMO) {
1345 MMO->refineAlignment(NewMMO);
1346 }
1347
1348 const SDValue &getChain() const { return getOperand(0); }
1349
1350 const SDValue &getBasePtr() const {
1351 switch (getOpcode()) {
1352 case ISD::STORE:
1353 case ISD::MSTORE:
1354 return getOperand(2);
1355 case ISD::MGATHER:
1356 case ISD::MSCATTER:
1357 return getOperand(3);
1358 default:
1359 return getOperand(1);
1360 }
1361 }
1362
1363 // Methods to support isa and dyn_cast
1364 static bool classof(const SDNode *N) {
1365 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1366 // with either an intrinsic or a target opcode.
1367 return N->getOpcode() == ISD::LOAD ||
1368 N->getOpcode() == ISD::STORE ||
1369 N->getOpcode() == ISD::PREFETCH ||
1370 N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1371 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1372 N->getOpcode() == ISD::ATOMIC_SWAP ||
1373 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1374 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1375 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1376 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1377 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1378 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1379 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1380 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1381 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1382 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1383 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1384 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1385 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1386 N->getOpcode() == ISD::ATOMIC_LOAD ||
1387 N->getOpcode() == ISD::ATOMIC_STORE ||
1388 N->getOpcode() == ISD::MLOAD ||
1389 N->getOpcode() == ISD::MSTORE ||
1390 N->getOpcode() == ISD::MGATHER ||
1391 N->getOpcode() == ISD::MSCATTER ||
1392 N->isMemIntrinsic() ||
1393 N->isTargetMemoryOpcode();
1394 }
1395};
1396
1397/// This is an SDNode representing atomic operations.
1398class AtomicSDNode : public MemSDNode {
1399public:
1400 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1401 EVT MemVT, MachineMemOperand *MMO)
1402 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1403 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1404, __PRETTY_FUNCTION__))
1404 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1404, __PRETTY_FUNCTION__))
;
1405 }
1406
1407 const SDValue &getBasePtr() const { return getOperand(1); }
1408 const SDValue &getVal() const { return getOperand(2); }
1409
1410 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1411 /// otherwise.
1412 bool isCompareAndSwap() const {
1413 unsigned Op = getOpcode();
1414 return Op == ISD::ATOMIC_CMP_SWAP ||
1415 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1416 }
1417
1418 /// For cmpxchg atomic operations, return the atomic ordering requirements
1419 /// when store does not occur.
1420 AtomicOrdering getFailureOrdering() const {
1421 assert(isCompareAndSwap() && "Must be cmpxchg operation")((isCompareAndSwap() && "Must be cmpxchg operation") ?
static_cast<void> (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1421, __PRETTY_FUNCTION__))
;
1422 return MMO->getFailureOrdering();
1423 }
1424
1425 // Methods to support isa and dyn_cast
1426 static bool classof(const SDNode *N) {
1427 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1428 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1429 N->getOpcode() == ISD::ATOMIC_SWAP ||
1430 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1431 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1432 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1433 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1434 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1435 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1436 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1437 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1438 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1439 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1440 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1441 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1442 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1443 N->getOpcode() == ISD::ATOMIC_LOAD ||
1444 N->getOpcode() == ISD::ATOMIC_STORE;
1445 }
1446};
1447
1448/// This SDNode is used for target intrinsics that touch
1449/// memory and need an associated MachineMemOperand. Its opcode may be
1450/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1451/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1452class MemIntrinsicSDNode : public MemSDNode {
1453public:
1454 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1455 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1456 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1457 SDNodeBits.IsMemIntrinsic = true;
1458 }
1459
1460 // Methods to support isa and dyn_cast
1461 static bool classof(const SDNode *N) {
1462 // We lower some target intrinsics to their target opcode
1463 // early a node with a target opcode can be of this class
1464 return N->isMemIntrinsic() ||
1465 N->getOpcode() == ISD::PREFETCH ||
1466 N->isTargetMemoryOpcode();
1467 }
1468};
1469
1470/// This SDNode is used to implement the code generator
1471/// support for the llvm IR shufflevector instruction. It combines elements
1472/// from two input vectors into a new input vector, with the selection and
1473/// ordering of elements determined by an array of integers, referred to as
1474/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1475/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1476/// An index of -1 is treated as undef, such that the code generator may put
1477/// any value in the corresponding element of the result.
1478class ShuffleVectorSDNode : public SDNode {
1479 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1480 // is freed when the SelectionDAG object is destroyed.
1481 const int *Mask;
1482
1483protected:
1484 friend class SelectionDAG;
1485
1486 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1487 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1488
1489public:
1490 ArrayRef<int> getMask() const {
1491 EVT VT = getValueType(0);
1492 return makeArrayRef(Mask, VT.getVectorNumElements());
1493 }
1494
1495 int getMaskElt(unsigned Idx) const {
1496 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")((Idx < getValueType(0).getVectorNumElements() && "Idx out of range!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1496, __PRETTY_FUNCTION__))
;
1497 return Mask[Idx];
1498 }
1499
1500 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1501
1502 int getSplatIndex() const {
1503 assert(isSplat() && "Cannot get splat index for non-splat!")((isSplat() && "Cannot get splat index for non-splat!"
) ? static_cast<void> (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1503, __PRETTY_FUNCTION__))
;
1504 EVT VT = getValueType(0);
1505 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1506 if (Mask[i] >= 0)
1507 return Mask[i];
1508
1509 // We can choose any index value here and be correct because all elements
1510 // are undefined. Return 0 for better potential for callers to simplify.
1511 return 0;
1512 }
1513
1514 static bool isSplatMask(const int *Mask, EVT VT);
1515
1516 /// Change values in a shuffle permute mask assuming
1517 /// the two vector operands have swapped position.
1518 static void commuteMask(MutableArrayRef<int> Mask) {
1519 unsigned NumElems = Mask.size();
1520 for (unsigned i = 0; i != NumElems; ++i) {
1521 int idx = Mask[i];
1522 if (idx < 0)
1523 continue;
1524 else if (idx < (int)NumElems)
1525 Mask[i] = idx + NumElems;
1526 else
1527 Mask[i] = idx - NumElems;
1528 }
1529 }
1530
1531 static bool classof(const SDNode *N) {
1532 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1533 }
1534};
1535
1536class ConstantSDNode : public SDNode {
1537 friend class SelectionDAG;
1538
1539 const ConstantInt *Value;
1540
1541 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1542 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1543 getSDVTList(VT)),
1544 Value(val) {
1545 ConstantSDNodeBits.IsOpaque = isOpaque;
1546 }
1547
1548public:
1549 const ConstantInt *getConstantIntValue() const { return Value; }
1550 const APInt &getAPIntValue() const { return Value->getValue(); }
1551 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1552 int64_t getSExtValue() const { return Value->getSExtValue(); }
1553 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1554 return Value->getLimitedValue(Limit);
1555 }
1556 MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); }
1557 Align getAlignValue() const { return Value->getAlignValue(); }
1558
1559 bool isOne() const { return Value->isOne(); }
1560 bool isNullValue() const { return Value->isZero(); }
1561 bool isAllOnesValue() const { return Value->isMinusOne(); }
1562
1563 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1564
1565 static bool classof(const SDNode *N) {
1566 return N->getOpcode() == ISD::Constant ||
1567 N->getOpcode() == ISD::TargetConstant;
1568 }
1569};
1570
1571uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1572 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1573}
1574
1575const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1576 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1577}
1578