Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1146, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name WebAssemblyISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fdenormal-fp-math=ieee,ieee -fdenormal-fp-math-f32=ieee,ieee -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-11/lib/clang/11.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/build-llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/build-llvm/include -I /build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-11/lib/clang/11.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/build-llvm/lib/Target/WebAssembly -fdebug-prefix-map=/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b=. -ferror-limit 19 -fmessage-length 0 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-02-26-193302-13812-1 -x c++ /build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the WebAssemblyTargetLowering class.
11///
12//===----------------------------------------------------------------------===//
13
14#include "WebAssemblyISelLowering.h"
15#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16#include "WebAssemblyMachineFunctionInfo.h"
17#include "WebAssemblySubtarget.h"
18#include "WebAssemblyTargetMachine.h"
19#include "llvm/CodeGen/Analysis.h"
20#include "llvm/CodeGen/CallingConvLower.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineJumpTableInfo.h"
23#include "llvm/CodeGen/MachineModuleInfo.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/SelectionDAG.h"
26#include "llvm/CodeGen/WasmEHFuncInfo.h"
27#include "llvm/IR/DiagnosticInfo.h"
28#include "llvm/IR/DiagnosticPrinter.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/Intrinsics.h"
31#include "llvm/IR/IntrinsicsWebAssembly.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Target/TargetOptions.h"
36using namespace llvm;
37
38#define DEBUG_TYPE"wasm-lower" "wasm-lower"
39
40WebAssemblyTargetLowering::WebAssemblyTargetLowering(
41 const TargetMachine &TM, const WebAssemblySubtarget &STI)
42 : TargetLowering(TM), Subtarget(&STI) {
43 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
44
45 // Booleans always contain 0 or 1.
46 setBooleanContents(ZeroOrOneBooleanContent);
47 // Except in SIMD vectors
48 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
49 // We don't know the microarchitecture here, so just reduce register pressure.
50 setSchedulingPreference(Sched::RegPressure);
51 // Tell ISel that we have a stack pointer.
52 setStackPointerRegisterToSaveRestore(
53 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
54 // Set up the register classes.
55 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
56 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
57 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
58 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
59 if (Subtarget->hasSIMD128()) {
60 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
61 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
62 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
63 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
64 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
65 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
66 }
67 // Compute derived properties from the register classes.
68 computeRegisterProperties(Subtarget->getRegisterInfo());
69
70 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
71 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
72 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
73 setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
74 setOperationAction(ISD::BRIND, MVT::Other, Custom);
75
76 // Take the default expansion for va_arg, va_copy, and va_end. There is no
77 // default action for va_start, so we do that custom.
78 setOperationAction(ISD::VASTART, MVT::Other, Custom);
79 setOperationAction(ISD::VAARG, MVT::Other, Expand);
80 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
81 setOperationAction(ISD::VAEND, MVT::Other, Expand);
82
83 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
84 // Don't expand the floating-point types to constant pools.
85 setOperationAction(ISD::ConstantFP, T, Legal);
86 // Expand floating-point comparisons.
87 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
88 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
89 setCondCodeAction(CC, T, Expand);
90 // Expand floating-point library function operators.
91 for (auto Op :
92 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
93 setOperationAction(Op, T, Expand);
94 // Note supported floating-point library function operators that otherwise
95 // default to expand.
96 for (auto Op :
97 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
98 setOperationAction(Op, T, Legal);
99 // Support minimum and maximum, which otherwise default to expand.
100 setOperationAction(ISD::FMINIMUM, T, Legal);
101 setOperationAction(ISD::FMAXIMUM, T, Legal);
102 // WebAssembly currently has no builtin f16 support.
103 setOperationAction(ISD::FP16_TO_FP, T, Expand);
104 setOperationAction(ISD::FP_TO_FP16, T, Expand);
105 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
106 setTruncStoreAction(T, MVT::f16, Expand);
107 }
108
109 // Expand unavailable integer operations.
110 for (auto Op :
111 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
112 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
113 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
114 for (auto T : {MVT::i32, MVT::i64})
115 setOperationAction(Op, T, Expand);
116 if (Subtarget->hasSIMD128())
117 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
118 setOperationAction(Op, T, Expand);
119 }
120
121 // SIMD-specific configuration
122 if (Subtarget->hasSIMD128()) {
123 // Support saturating add for i8x16 and i16x8
124 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
125 for (auto T : {MVT::v16i8, MVT::v8i16})
126 setOperationAction(Op, T, Legal);
127
128 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
129 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
130 MVT::v2f64})
131 setOperationAction(ISD::BUILD_VECTOR, T, Custom);
132
133 // We have custom shuffle lowering to expose the shuffle mask
134 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
135 MVT::v2f64})
136 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
137
138 // Custom lowering since wasm shifts must have a scalar shift amount
139 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
140 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
141 setOperationAction(Op, T, Custom);
142
143 // Custom lower lane accesses to expand out variable indices
144 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
145 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
146 MVT::v2f64})
147 setOperationAction(Op, T, Custom);
148
149 // There is no i64x2.mul instruction
150 // TODO: Actually, there is now. Implement it.
151 setOperationAction(ISD::MUL, MVT::v2i64, Expand);
152
153 // There are no vector select instructions
154 for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT})
155 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
156 MVT::v2f64})
157 setOperationAction(Op, T, Expand);
158
159 // Expand integer operations supported for scalars but not SIMD
160 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
161 ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
162 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
163 setOperationAction(Op, T, Expand);
164
165 // But we do have integer min and max operations
166 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
167 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
168 setOperationAction(Op, T, Legal);
169
170 // Expand float operations supported for scalars but not SIMD
171 for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT,
172 ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
173 ISD::FEXP, ISD::FEXP2, ISD::FRINT})
174 for (auto T : {MVT::v4f32, MVT::v2f64})
175 setOperationAction(Op, T, Expand);
176
177 // Expand operations not supported for i64x2 vectors
178 for (unsigned CC = 0; CC < ISD::SETCC_INVALID; ++CC)
179 setCondCodeAction(static_cast<ISD::CondCode>(CC), MVT::v2i64, Custom);
180
181 // 64x2 conversions are not in the spec
182 if (!Subtarget->hasUnimplementedSIMD128())
183 for (auto Op :
184 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
185 for (auto T : {MVT::v2i64, MVT::v2f64})
186 setOperationAction(Op, T, Expand);
187 }
188
189 // As a special case, these operators use the type to mean the type to
190 // sign-extend from.
191 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
192 if (!Subtarget->hasSignExt()) {
193 // Sign extends are legal only when extending a vector extract
194 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
195 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
196 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
197 }
198 for (auto T : MVT::integer_fixedlen_vector_valuetypes())
199 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
200
201 // Dynamic stack allocation: use the default expansion.
202 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
203 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
204 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
205
206 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
207 setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
208
209 // Expand these forms; we pattern-match the forms that we can handle in isel.
210 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
211 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
212 setOperationAction(Op, T, Expand);
213
214 // We have custom switch handling.
215 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
216
217 // WebAssembly doesn't have:
218 // - Floating-point extending loads.
219 // - Floating-point truncating stores.
220 // - i1 extending loads.
221 // - truncating SIMD stores and most extending loads
222 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
223 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
224 for (auto T : MVT::integer_valuetypes())
225 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
226 setLoadExtAction(Ext, T, MVT::i1, Promote);
227 if (Subtarget->hasSIMD128()) {
228 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
229 MVT::v2f64}) {
230 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
231 if (MVT(T) != MemT) {
232 setTruncStoreAction(T, MemT, Expand);
233 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
234 setLoadExtAction(Ext, T, MemT, Expand);
235 }
236 }
237 }
238 // But some vector extending loads are legal
239 if (Subtarget->hasUnimplementedSIMD128()) {
240 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
241 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
242 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
243 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
244 }
245 }
246 }
247
248 // Don't do anything clever with build_pairs
249 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
250
251 // Trap lowers to wasm unreachable
252 setOperationAction(ISD::TRAP, MVT::Other, Legal);
253
254 // Exception handling intrinsics
255 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
256 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
257
258 setMaxAtomicSizeInBitsSupported(64);
259
260 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
261 // consistent with the f64 and f128 names.
262 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
263 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
264
265 // Define the emscripten name for return address helper.
266 // TODO: when implementing other WASM backends, make this generic or only do
267 // this on emscripten depending on what they end up doing.
268 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
269
270 // Always convert switches to br_tables unless there is only one case, which
271 // is equivalent to a simple branch. This reduces code size for wasm, and we
272 // defer possible jump table optimizations to the VM.
273 setMinimumJumpTableEntries(2);
274}
275
276TargetLowering::AtomicExpansionKind
277WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
278 // We have wasm instructions for these
279 switch (AI->getOperation()) {
280 case AtomicRMWInst::Add:
281 case AtomicRMWInst::Sub:
282 case AtomicRMWInst::And:
283 case AtomicRMWInst::Or:
284 case AtomicRMWInst::Xor:
285 case AtomicRMWInst::Xchg:
286 return AtomicExpansionKind::None;
287 default:
288 break;
289 }
290 return AtomicExpansionKind::CmpXChg;
291}
292
293FastISel *WebAssemblyTargetLowering::createFastISel(
294 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
295 return WebAssembly::createFastISel(FuncInfo, LibInfo);
296}
297
298MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
299 EVT VT) const {
300 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
301 if (BitWidth > 1 && BitWidth < 8)
302 BitWidth = 8;
303
304 if (BitWidth > 64) {
305 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
306 // the count to be an i32.
307 BitWidth = 32;
308 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&((BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && "32-bit shift counts ought to be enough for anyone"
) ? static_cast<void> (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 309, __PRETTY_FUNCTION__))
309 "32-bit shift counts ought to be enough for anyone")((BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && "32-bit shift counts ought to be enough for anyone"
) ? static_cast<void> (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 309, __PRETTY_FUNCTION__))
;
310 }
311
312 MVT Result = MVT::getIntegerVT(BitWidth);
313 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&((Result != MVT::INVALID_SIMPLE_VALUE_TYPE && "Unable to represent scalar shift amount type"
) ? static_cast<void> (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 314, __PRETTY_FUNCTION__))
314 "Unable to represent scalar shift amount type")((Result != MVT::INVALID_SIMPLE_VALUE_TYPE && "Unable to represent scalar shift amount type"
) ? static_cast<void> (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 314, __PRETTY_FUNCTION__))
;
315 return Result;
316}
317
318// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
319// undefined result on invalid/overflow, to the WebAssembly opcode, which
320// traps on invalid/overflow.
321static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
322 MachineBasicBlock *BB,
323 const TargetInstrInfo &TII,
324 bool IsUnsigned, bool Int64,
325 bool Float64, unsigned LoweredOpcode) {
326 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
327
328 Register OutReg = MI.getOperand(0).getReg();
329 Register InReg = MI.getOperand(1).getReg();
330
331 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
332 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
333 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
334 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
335 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
336 unsigned Eqz = WebAssembly::EQZ_I32;
337 unsigned And = WebAssembly::AND_I32;
338 int64_t Limit = Int64 ? INT64_MIN(-9223372036854775807L -1) : INT32_MIN(-2147483647-1);
339 int64_t Substitute = IsUnsigned ? 0 : Limit;
340 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
341 auto &Context = BB->getParent()->getFunction().getContext();
342 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
343
344 const BasicBlock *LLVMBB = BB->getBasicBlock();
345 MachineFunction *F = BB->getParent();
346 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
347 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
348 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
349
350 MachineFunction::iterator It = ++BB->getIterator();
351 F->insert(It, FalseMBB);
352 F->insert(It, TrueMBB);
353 F->insert(It, DoneMBB);
354
355 // Transfer the remainder of BB and its successor edges to DoneMBB.
356 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
357 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
358
359 BB->addSuccessor(TrueMBB);
360 BB->addSuccessor(FalseMBB);
361 TrueMBB->addSuccessor(DoneMBB);
362 FalseMBB->addSuccessor(DoneMBB);
363
364 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
365 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
366 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
367 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
368 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
369 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
370 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
371
372 MI.eraseFromParent();
373 // For signed numbers, we can do a single comparison to determine whether
374 // fabs(x) is within range.
375 if (IsUnsigned) {
376 Tmp0 = InReg;
377 } else {
378 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
379 }
380 BuildMI(BB, DL, TII.get(FConst), Tmp1)
381 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
382 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
383
384 // For unsigned numbers, we have to do a separate comparison with zero.
385 if (IsUnsigned) {
386 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
387 Register SecondCmpReg =
388 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
389 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
390 BuildMI(BB, DL, TII.get(FConst), Tmp1)
391 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
392 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
393 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
394 CmpReg = AndReg;
395 }
396
397 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
398
399 // Create the CFG diamond to select between doing the conversion or using
400 // the substitute value.
401 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
402 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
403 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
404 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
405 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
406 .addReg(FalseReg)
407 .addMBB(FalseMBB)
408 .addReg(TrueReg)
409 .addMBB(TrueMBB);
410
411 return DoneMBB;
412}
413
414static MachineBasicBlock *LowerCallResults(MachineInstr &CallResults,
415 DebugLoc DL, MachineBasicBlock *BB,
416 const TargetInstrInfo &TII) {
417 MachineInstr &CallParams = *CallResults.getPrevNode();
418 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS)((CallParams.getOpcode() == WebAssembly::CALL_PARAMS) ? static_cast
<void> (0) : __assert_fail ("CallParams.getOpcode() == WebAssembly::CALL_PARAMS"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 418, __PRETTY_FUNCTION__))
;
419 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||((CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults
.getOpcode() == WebAssembly::RET_CALL_RESULTS) ? static_cast<
void> (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 420, __PRETTY_FUNCTION__))
420 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS)((CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults
.getOpcode() == WebAssembly::RET_CALL_RESULTS) ? static_cast<
void> (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 420, __PRETTY_FUNCTION__))
;
421
422 bool IsIndirect = CallParams.getOperand(0).isReg();
423 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
424
425 unsigned CallOp;
426 if (IsIndirect && IsRetCall) {
427 CallOp = WebAssembly::RET_CALL_INDIRECT;
428 } else if (IsIndirect) {
429 CallOp = WebAssembly::CALL_INDIRECT;
430 } else if (IsRetCall) {
431 CallOp = WebAssembly::RET_CALL;
432 } else {
433 CallOp = WebAssembly::CALL;
434 }
435
436 MachineFunction &MF = *BB->getParent();
437 const MCInstrDesc &MCID = TII.get(CallOp);
438 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
439
440 // Move the function pointer to the end of the arguments for indirect calls
441 if (IsIndirect) {
442 auto FnPtr = CallParams.getOperand(0);
443 CallParams.RemoveOperand(0);
444 CallParams.addOperand(FnPtr);
445 }
446
447 for (auto Def : CallResults.defs())
448 MIB.add(Def);
449
450 // Add placeholders for the type index and immediate flags
451 if (IsIndirect) {
452 MIB.addImm(0);
453 MIB.addImm(0);
454 }
455
456 for (auto Use : CallParams.uses())
457 MIB.add(Use);
458
459 BB->insert(CallResults.getIterator(), MIB);
460 CallParams.eraseFromParent();
461 CallResults.eraseFromParent();
462
463 return BB;
464}
465
466MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
467 MachineInstr &MI, MachineBasicBlock *BB) const {
468 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
469 DebugLoc DL = MI.getDebugLoc();
470
471 switch (MI.getOpcode()) {
472 default:
473 llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 473)
;
474 case WebAssembly::FP_TO_SINT_I32_F32:
475 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
476 WebAssembly::I32_TRUNC_S_F32);
477 case WebAssembly::FP_TO_UINT_I32_F32:
478 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
479 WebAssembly::I32_TRUNC_U_F32);
480 case WebAssembly::FP_TO_SINT_I64_F32:
481 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
482 WebAssembly::I64_TRUNC_S_F32);
483 case WebAssembly::FP_TO_UINT_I64_F32:
484 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
485 WebAssembly::I64_TRUNC_U_F32);
486 case WebAssembly::FP_TO_SINT_I32_F64:
487 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
488 WebAssembly::I32_TRUNC_S_F64);
489 case WebAssembly::FP_TO_UINT_I32_F64:
490 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
491 WebAssembly::I32_TRUNC_U_F64);
492 case WebAssembly::FP_TO_SINT_I64_F64:
493 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
494 WebAssembly::I64_TRUNC_S_F64);
495 case WebAssembly::FP_TO_UINT_I64_F64:
496 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
497 WebAssembly::I64_TRUNC_U_F64);
498 case WebAssembly::CALL_RESULTS:
499 case WebAssembly::RET_CALL_RESULTS:
500 return LowerCallResults(MI, DL, BB, TII);
501 }
502}
503
504const char *
505WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
506 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
507 case WebAssemblyISD::FIRST_NUMBER:
508 case WebAssemblyISD::FIRST_MEM_OPCODE:
509 break;
510#define HANDLE_NODETYPE(NODE) \
511 case WebAssemblyISD::NODE: \
512 return "WebAssemblyISD::" #NODE;
513#define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
514#include "WebAssemblyISD.def"
515#undef HANDLE_MEM_NODETYPE
516#undef HANDLE_NODETYPE
517 }
518 return nullptr;
519}
520
521std::pair<unsigned, const TargetRegisterClass *>
522WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
523 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
524 // First, see if this is a constraint that directly corresponds to a
525 // WebAssembly register class.
526 if (Constraint.size() == 1) {
527 switch (Constraint[0]) {
528 case 'r':
529 assert(VT != MVT::iPTR && "Pointer MVT not expected here")((VT != MVT::iPTR && "Pointer MVT not expected here")
? static_cast<void> (0) : __assert_fail ("VT != MVT::iPTR && \"Pointer MVT not expected here\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 529, __PRETTY_FUNCTION__))
;
530 if (Subtarget->hasSIMD128() && VT.isVector()) {
531 if (VT.getSizeInBits() == 128)
532 return std::make_pair(0U, &WebAssembly::V128RegClass);
533 }
534 if (VT.isInteger() && !VT.isVector()) {
535 if (VT.getSizeInBits() <= 32)
536 return std::make_pair(0U, &WebAssembly::I32RegClass);
537 if (VT.getSizeInBits() <= 64)
538 return std::make_pair(0U, &WebAssembly::I64RegClass);
539 }
540 break;
541 default:
542 break;
543 }
544 }
545
546 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
547}
548
549bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
550 // Assume ctz is a relatively cheap operation.
551 return true;
552}
553
554bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
555 // Assume clz is a relatively cheap operation.
556 return true;
557}
558
559bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
560 const AddrMode &AM,
561 Type *Ty, unsigned AS,
562 Instruction *I) const {
563 // WebAssembly offsets are added as unsigned without wrapping. The
564 // isLegalAddressingMode gives us no way to determine if wrapping could be
565 // happening, so we approximate this by accepting only non-negative offsets.
566 if (AM.BaseOffs < 0)
567 return false;
568
569 // WebAssembly has no scale register operands.
570 if (AM.Scale != 0)
571 return false;
572
573 // Everything else is legal.
574 return true;
575}
576
577bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
578 EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/,
579 MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
580 // WebAssembly supports unaligned accesses, though it should be declared
581 // with the p2align attribute on loads and stores which do so, and there
582 // may be a performance impact. We tell LLVM they're "fast" because
583 // for the kinds of things that LLVM uses this for (merging adjacent stores
584 // of constants, etc.), WebAssembly implementations will either want the
585 // unaligned access or they'll split anyway.
586 if (Fast)
587 *Fast = true;
588 return true;
589}
590
591bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
592 AttributeList Attr) const {
593 // The current thinking is that wasm engines will perform this optimization,
594 // so we can save on code size.
595 return true;
596}
597
598bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
599 if (!Subtarget->hasUnimplementedSIMD128())
600 return false;
601 MVT ExtT = ExtVal.getSimpleValueType();
602 MVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getSimpleValueType(0);
603 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
604 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
605 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
606}
607
608EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
609 LLVMContext &C,
610 EVT VT) const {
611 if (VT.isVector())
612 return VT.changeVectorElementTypeToInteger();
613
614 return TargetLowering::getSetCCResultType(DL, C, VT);
615}
616
617bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
618 const CallInst &I,
619 MachineFunction &MF,
620 unsigned Intrinsic) const {
621 switch (Intrinsic) {
622 case Intrinsic::wasm_atomic_notify:
623 Info.opc = ISD::INTRINSIC_W_CHAIN;
624 Info.memVT = MVT::i32;
625 Info.ptrVal = I.getArgOperand(0);
626 Info.offset = 0;
627 Info.align = Align(4);
628 // atomic.notify instruction does not really load the memory specified with
629 // this argument, but MachineMemOperand should either be load or store, so
630 // we set this to a load.
631 // FIXME Volatile isn't really correct, but currently all LLVM atomic
632 // instructions are treated as volatiles in the backend, so we should be
633 // consistent. The same applies for wasm_atomic_wait intrinsics too.
634 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
635 return true;
636 case Intrinsic::wasm_atomic_wait_i32:
637 Info.opc = ISD::INTRINSIC_W_CHAIN;
638 Info.memVT = MVT::i32;
639 Info.ptrVal = I.getArgOperand(0);
640 Info.offset = 0;
641 Info.align = Align(4);
642 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
643 return true;
644 case Intrinsic::wasm_atomic_wait_i64:
645 Info.opc = ISD::INTRINSIC_W_CHAIN;
646 Info.memVT = MVT::i64;
647 Info.ptrVal = I.getArgOperand(0);
648 Info.offset = 0;
649 Info.align = Align(8);
650 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
651 return true;
652 default:
653 return false;
654 }
655}
656
657//===----------------------------------------------------------------------===//
658// WebAssembly Lowering private implementation.
659//===----------------------------------------------------------------------===//
660
661//===----------------------------------------------------------------------===//
662// Lowering Code
663//===----------------------------------------------------------------------===//
664
665static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
666 MachineFunction &MF = DAG.getMachineFunction();
667 DAG.getContext()->diagnose(
668 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
669}
670
671// Test whether the given calling convention is supported.
672static bool callingConvSupported(CallingConv::ID CallConv) {
673 // We currently support the language-independent target-independent
674 // conventions. We don't yet have a way to annotate calls with properties like
675 // "cold", and we don't have any call-clobbered registers, so these are mostly
676 // all handled the same.
677 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
678 CallConv == CallingConv::Cold ||
679 CallConv == CallingConv::PreserveMost ||
680 CallConv == CallingConv::PreserveAll ||
681 CallConv == CallingConv::CXX_FAST_TLS ||
682 CallConv == CallingConv::WASM_EmscriptenInvoke ||
683 CallConv == CallingConv::Swift;
684}
685
686SDValue
687WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
688 SmallVectorImpl<SDValue> &InVals) const {
689 SelectionDAG &DAG = CLI.DAG;
690 SDLoc DL = CLI.DL;
691 SDValue Chain = CLI.Chain;
692 SDValue Callee = CLI.Callee;
693 MachineFunction &MF = DAG.getMachineFunction();
694 auto Layout = MF.getDataLayout();
695
696 CallingConv::ID CallConv = CLI.CallConv;
697 if (!callingConvSupported(CallConv))
698 fail(DL, DAG,
699 "WebAssembly doesn't support language-specific or target-specific "
700 "calling conventions yet");
701 if (CLI.IsPatchPoint)
702 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
703
704 if (CLI.IsTailCall) {
705 auto NoTail = [&](const char *Msg) {
706 if (CLI.CS && CLI.CS.isMustTailCall())
707 fail(DL, DAG, Msg);
708 CLI.IsTailCall = false;
709 };
710
711 if (!Subtarget->hasTailCall())
712 NoTail("WebAssembly 'tail-call' feature not enabled");
713
714 // Varargs calls cannot be tail calls because the buffer is on the stack
715 if (CLI.IsVarArg)
716 NoTail("WebAssembly does not support varargs tail calls");
717
718 // Do not tail call unless caller and callee return types match
719 const Function &F = MF.getFunction();
720 const TargetMachine &TM = getTargetMachine();
721 Type *RetTy = F.getReturnType();
722 SmallVector<MVT, 4> CallerRetTys;
723 SmallVector<MVT, 4> CalleeRetTys;
724 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
725 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
726 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
727 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
728 CalleeRetTys.begin());
729 if (!TypesMatch)
730 NoTail("WebAssembly tail call requires caller and callee return types to "
731 "match");
732
733 // If pointers to local stack values are passed, we cannot tail call
734 if (CLI.CS) {
735 for (auto &Arg : CLI.CS.args()) {
736 Value *Val = Arg.get();
737 // Trace the value back through pointer operations
738 while (true) {
739 Value *Src = Val->stripPointerCastsAndAliases();
740 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
741 Src = GEP->getPointerOperand();
742 if (Val == Src)
743 break;
744 Val = Src;
745 }
746 if (isa<AllocaInst>(Val)) {
747 NoTail(
748 "WebAssembly does not support tail calling with stack arguments");
749 break;
750 }
751 }
752 }
753 }
754
755 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
756 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
757 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
758
759 // The generic code may have added an sret argument. If we're lowering an
760 // invoke function, the ABI requires that the function pointer be the first
761 // argument, so we may have to swap the arguments.
762 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
763 Outs[0].Flags.isSRet()) {
764 std::swap(Outs[0], Outs[1]);
765 std::swap(OutVals[0], OutVals[1]);
766 }
767
768 unsigned NumFixedArgs = 0;
769 for (unsigned I = 0; I < Outs.size(); ++I) {
770 const ISD::OutputArg &Out = Outs[I];
771 SDValue &OutVal = OutVals[I];
772 if (Out.Flags.isNest())
773 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
774 if (Out.Flags.isInAlloca())
775 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
776 if (Out.Flags.isInConsecutiveRegs())
777 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
778 if (Out.Flags.isInConsecutiveRegsLast())
779 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
780 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
781 auto &MFI = MF.getFrameInfo();
782 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
783 Out.Flags.getNonZeroByValAlign(),
784 /*isSS=*/false);
785 SDValue SizeNode =
786 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
787 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
788 Chain = DAG.getMemcpy(
789 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
790 /*isVolatile*/ false, /*AlwaysInline=*/false,
791 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
792 OutVal = FINode;
793 }
794 // Count the number of fixed args *after* legalization.
795 NumFixedArgs += Out.IsFixed;
796 }
797
798 bool IsVarArg = CLI.IsVarArg;
799 auto PtrVT = getPointerTy(Layout);
800
801 // Analyze operands of the call, assigning locations to each operand.
802 SmallVector<CCValAssign, 16> ArgLocs;
803 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
804
805 if (IsVarArg) {
806 // Outgoing non-fixed arguments are placed in a buffer. First
807 // compute their offsets and the total amount of buffer space needed.
808 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
809 const ISD::OutputArg &Out = Outs[I];
810 SDValue &Arg = OutVals[I];
811 EVT VT = Arg.getValueType();
812 assert(VT != MVT::iPTR && "Legalized args should be concrete")((VT != MVT::iPTR && "Legalized args should be concrete"
) ? static_cast<void> (0) : __assert_fail ("VT != MVT::iPTR && \"Legalized args should be concrete\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 812, __PRETTY_FUNCTION__))
;
813 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
814 unsigned Align = std::max(Out.Flags.getOrigAlign(),
815 Layout.getABITypeAlignment(Ty));
816 unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty),
817 Align);
818 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
819 Offset, VT.getSimpleVT(),
820 CCValAssign::Full));
821 }
822 }
823
824 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
825
826 SDValue FINode;
827 if (IsVarArg && NumBytes) {
828 // For non-fixed arguments, next emit stores to store the argument values
829 // to the stack buffer at the offsets computed above.
830 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
831 Layout.getStackAlignment(),
832 /*isSS=*/false);
833 unsigned ValNo = 0;
834 SmallVector<SDValue, 8> Chains;
835 for (SDValue Arg :
836 make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
837 assert(ArgLocs[ValNo].getValNo() == ValNo &&((ArgLocs[ValNo].getValNo() == ValNo && "ArgLocs should remain in order and only hold varargs args"
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 838, __PRETTY_FUNCTION__))
838 "ArgLocs should remain in order and only hold varargs args")((ArgLocs[ValNo].getValNo() == ValNo && "ArgLocs should remain in order and only hold varargs args"
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 838, __PRETTY_FUNCTION__))
;
839 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
840 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
841 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
842 DAG.getConstant(Offset, DL, PtrVT));
843 Chains.push_back(
844 DAG.getStore(Chain, DL, Arg, Add,
845 MachinePointerInfo::getFixedStack(MF, FI, Offset), 0));
846 }
847 if (!Chains.empty())
848 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
849 } else if (IsVarArg) {
850 FINode = DAG.getIntPtrConstant(0, DL);
851 }
852
853 if (Callee->getOpcode() == ISD::GlobalAddress) {
854 // If the callee is a GlobalAddress node (quite common, every direct call
855 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
856 // doesn't at MO_GOT which is not needed for direct calls.
857 GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
858 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
859 getPointerTy(DAG.getDataLayout()),
860 GA->getOffset());
861 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
862 getPointerTy(DAG.getDataLayout()), Callee);
863 }
864
865 // Compute the operands for the CALLn node.
866 SmallVector<SDValue, 16> Ops;
867 Ops.push_back(Chain);
868 Ops.push_back(Callee);
869
870 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
871 // isn't reliable.
872 Ops.append(OutVals.begin(),
873 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
874 // Add a pointer to the vararg buffer.
875 if (IsVarArg)
876 Ops.push_back(FINode);
877
878 SmallVector<EVT, 8> InTys;
879 for (const auto &In : Ins) {
880 assert(!In.Flags.isByVal() && "byval is not valid for return values")((!In.Flags.isByVal() && "byval is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!In.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 880, __PRETTY_FUNCTION__))
;
881 assert(!In.Flags.isNest() && "nest is not valid for return values")((!In.Flags.isNest() && "nest is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!In.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 881, __PRETTY_FUNCTION__))
;
882 if (In.Flags.isInAlloca())
883 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
884 if (In.Flags.isInConsecutiveRegs())
885 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
886 if (In.Flags.isInConsecutiveRegsLast())
887 fail(DL, DAG,
888 "WebAssembly hasn't implemented cons regs last return values");
889 // Ignore In.getOrigAlign() because all our arguments are passed in
890 // registers.
891 InTys.push_back(In.VT);
892 }
893
894 if (CLI.IsTailCall) {
895 // ret_calls do not return values to the current frame
896 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
897 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
898 }
899
900 InTys.push_back(MVT::Other);
901 SDVTList InTyList = DAG.getVTList(InTys);
902 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
903
904 for (size_t I = 0; I < Ins.size(); ++I)
905 InVals.push_back(Res.getValue(I));
906
907 // Return the chain
908 return Res.getValue(Ins.size());
909}
910
911bool WebAssemblyTargetLowering::CanLowerReturn(
912 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
913 const SmallVectorImpl<ISD::OutputArg> &Outs,
914 LLVMContext & /*Context*/) const {
915 // WebAssembly can only handle returning tuples with multivalue enabled
916 return Subtarget->hasMultivalue() || Outs.size() <= 1;
917}
918
919SDValue WebAssemblyTargetLowering::LowerReturn(
920 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
921 const SmallVectorImpl<ISD::OutputArg> &Outs,
922 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
923 SelectionDAG &DAG) const {
924 assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&(((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
"MVP WebAssembly can only return up to one value") ? static_cast
<void> (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 925, __PRETTY_FUNCTION__))
925 "MVP WebAssembly can only return up to one value")(((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
"MVP WebAssembly can only return up to one value") ? static_cast
<void> (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 925, __PRETTY_FUNCTION__))
;
926 if (!callingConvSupported(CallConv))
927 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
928
929 SmallVector<SDValue, 4> RetOps(1, Chain);
930 RetOps.append(OutVals.begin(), OutVals.end());
931 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
932
933 // Record the number and types of the return values.
934 for (const ISD::OutputArg &Out : Outs) {
935 assert(!Out.Flags.isByVal() && "byval is not valid for return values")((!Out.Flags.isByVal() && "byval is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!Out.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 935, __PRETTY_FUNCTION__))
;
936 assert(!Out.Flags.isNest() && "nest is not valid for return values")((!Out.Flags.isNest() && "nest is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!Out.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 936, __PRETTY_FUNCTION__))
;
937 assert(Out.IsFixed && "non-fixed return value is not valid")((Out.IsFixed && "non-fixed return value is not valid"
) ? static_cast<void> (0) : __assert_fail ("Out.IsFixed && \"non-fixed return value is not valid\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 937, __PRETTY_FUNCTION__))
;
938 if (Out.Flags.isInAlloca())
939 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
940 if (Out.Flags.isInConsecutiveRegs())
941 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
942 if (Out.Flags.isInConsecutiveRegsLast())
943 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
944 }
945
946 return Chain;
947}
948
949SDValue WebAssemblyTargetLowering::LowerFormalArguments(
950 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
951 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
952 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
953 if (!callingConvSupported(CallConv))
954 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
955
956 MachineFunction &MF = DAG.getMachineFunction();
957 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
958
959 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
960 // of the incoming values before they're represented by virtual registers.
961 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
962
963 for (const ISD::InputArg &In : Ins) {
964 if (In.Flags.isInAlloca())
965 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
966 if (In.Flags.isNest())
967 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
968 if (In.Flags.isInConsecutiveRegs())
969 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
970 if (In.Flags.isInConsecutiveRegsLast())
971 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
972 // Ignore In.getOrigAlign() because all our arguments are passed in
973 // registers.
974 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
975 DAG.getTargetConstant(InVals.size(),
976 DL, MVT::i32))
977 : DAG.getUNDEF(In.VT));
978
979 // Record the number and types of arguments.
980 MFI->addParam(In.VT);
981 }
982
983 // Varargs are copied into a buffer allocated by the caller, and a pointer to
984 // the buffer is passed as an argument.
985 if (IsVarArg) {
986 MVT PtrVT = getPointerTy(MF.getDataLayout());
987 Register VarargVreg =
988 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
989 MFI->setVarargBufferVreg(VarargVreg);
990 Chain = DAG.getCopyToReg(
991 Chain, DL, VarargVreg,
992 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
993 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
994 MFI->addParam(PtrVT);
995 }
996
997 // Record the number and types of arguments and results.
998 SmallVector<MVT, 4> Params;
999 SmallVector<MVT, 4> Results;
1000 computeSignatureVTs(MF.getFunction().getFunctionType(), MF.getFunction(),
1001 DAG.getTarget(), Params, Results);
1002 for (MVT VT : Results)
1003 MFI->addResult(VT);
1004 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1005 // the param logic here with ComputeSignatureVTs
1006 assert(MFI->getParams().size() == Params.size() &&((MFI->getParams().size() == Params.size() && std::
equal(MFI->getParams().begin(), MFI->getParams().end(),
Params.begin())) ? static_cast<void> (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1008, __PRETTY_FUNCTION__))
1007 std::equal(MFI->getParams().begin(), MFI->getParams().end(),((MFI->getParams().size() == Params.size() && std::
equal(MFI->getParams().begin(), MFI->getParams().end(),
Params.begin())) ? static_cast<void> (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1008, __PRETTY_FUNCTION__))
1008 Params.begin()))((MFI->getParams().size() == Params.size() && std::
equal(MFI->getParams().begin(), MFI->getParams().end(),
Params.begin())) ? static_cast<void> (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1008, __PRETTY_FUNCTION__))
;
1009
1010 return Chain;
1011}
1012
1013void WebAssemblyTargetLowering::ReplaceNodeResults(
1014 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1015 switch (N->getOpcode()) {
1016 case ISD::SIGN_EXTEND_INREG:
1017 // Do not add any results, signifying that N should not be custom lowered
1018 // after all. This happens because simd128 turns on custom lowering for
1019 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1020 // illegal type.
1021 break;
1022 default:
1023 llvm_unreachable(::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1024)
1024 "ReplaceNodeResults not implemented for this op for WebAssembly!")::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1024)
;
1025 }
1026}
1027
1028//===----------------------------------------------------------------------===//
1029// Custom lowering hooks.
1030//===----------------------------------------------------------------------===//
1031
1032SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1033 SelectionDAG &DAG) const {
1034 SDLoc DL(Op);
1035 switch (Op.getOpcode()) {
1
Control jumps to 'case BUILD_VECTOR:' at line 1070
1036 default:
1037 llvm_unreachable("unimplemented operation lowering")::llvm::llvm_unreachable_internal("unimplemented operation lowering"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1037)
;
1038 return SDValue();
1039 case ISD::FrameIndex:
1040 return LowerFrameIndex(Op, DAG);
1041 case ISD::GlobalAddress:
1042 return LowerGlobalAddress(Op, DAG);
1043 case ISD::ExternalSymbol:
1044 return LowerExternalSymbol(Op, DAG);
1045 case ISD::JumpTable:
1046 return LowerJumpTable(Op, DAG);
1047 case ISD::BR_JT:
1048 return LowerBR_JT(Op, DAG);
1049 case ISD::VASTART:
1050 return LowerVASTART(Op, DAG);
1051 case ISD::BlockAddress:
1052 case ISD::BRIND:
1053 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1054 return SDValue();
1055 case ISD::RETURNADDR:
1056 return LowerRETURNADDR(Op, DAG);
1057 case ISD::FRAMEADDR:
1058 return LowerFRAMEADDR(Op, DAG);
1059 case ISD::CopyToReg:
1060 return LowerCopyToReg(Op, DAG);
1061 case ISD::EXTRACT_VECTOR_ELT:
1062 case ISD::INSERT_VECTOR_ELT:
1063 return LowerAccessVectorElement(Op, DAG);
1064 case ISD::INTRINSIC_VOID:
1065 case ISD::INTRINSIC_WO_CHAIN:
1066 case ISD::INTRINSIC_W_CHAIN:
1067 return LowerIntrinsic(Op, DAG);
1068 case ISD::SIGN_EXTEND_INREG:
1069 return LowerSIGN_EXTEND_INREG(Op, DAG);
1070 case ISD::BUILD_VECTOR:
1071 return LowerBUILD_VECTOR(Op, DAG);
2
Calling 'WebAssemblyTargetLowering::LowerBUILD_VECTOR'
1072 case ISD::VECTOR_SHUFFLE:
1073 return LowerVECTOR_SHUFFLE(Op, DAG);
1074 case ISD::SETCC:
1075 return LowerSETCC(Op, DAG);
1076 case ISD::SHL:
1077 case ISD::SRA:
1078 case ISD::SRL:
1079 return LowerShift(Op, DAG);
1080 }
1081}
1082
1083SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1084 SelectionDAG &DAG) const {
1085 SDValue Src = Op.getOperand(2);
1086 if (isa<FrameIndexSDNode>(Src.getNode())) {
1087 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1088 // the FI to some LEA-like instruction, but since we don't have that, we
1089 // need to insert some kind of instruction that can take an FI operand and
1090 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1091 // local.copy between Op and its FI operand.
1092 SDValue Chain = Op.getOperand(0);
1093 SDLoc DL(Op);
1094 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1095 EVT VT = Src.getValueType();
1096 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1097 : WebAssembly::COPY_I64,
1098 DL, VT, Src),
1099 0);
1100 return Op.getNode()->getNumValues() == 1
1101 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1102 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1103 Op.getNumOperands() == 4 ? Op.getOperand(3)
1104 : SDValue());
1105 }
1106 return SDValue();
1107}
1108
1109SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1110 SelectionDAG &DAG) const {
1111 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1112 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1113}
1114
1115SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1116 SelectionDAG &DAG) const {
1117 SDLoc DL(Op);
1118
1119 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1120 fail(DL, DAG,
1121 "Non-Emscripten WebAssembly hasn't implemented "
1122 "__builtin_return_address");
1123 return SDValue();
1124 }
1125
1126 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1127 return SDValue();
1128
1129 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1130 MakeLibCallOptions CallOptions;
1131 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1132 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1133 .first;
1134}
1135
1136SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1137 SelectionDAG &DAG) const {
1138 // Non-zero depths are not supported by WebAssembly currently. Use the
1139 // legalizer's default expansion, which is to return 0 (what this function is
1140 // documented to do).
1141 if (Op.getConstantOperandVal(0) > 0)
1142 return SDValue();
1143
1144 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1145 EVT VT = Op.getValueType();
1146 Register FP =
1147 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1148 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1149}
1150
1151SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1152 SelectionDAG &DAG) const {
1153 SDLoc DL(Op);
1154 const auto *GA = cast<GlobalAddressSDNode>(Op);
1155 EVT VT = Op.getValueType();
1156 assert(GA->getTargetFlags() == 0 &&((GA->getTargetFlags() == 0 && "Unexpected target flags on generic GlobalAddressSDNode"
) ? static_cast<void> (0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1157, __PRETTY_FUNCTION__))
1157 "Unexpected target flags on generic GlobalAddressSDNode")((GA->getTargetFlags() == 0 && "Unexpected target flags on generic GlobalAddressSDNode"
) ? static_cast<void> (0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1157, __PRETTY_FUNCTION__))
;
1158 if (GA->getAddressSpace() != 0)
1159 fail(DL, DAG, "WebAssembly only expects the 0 address space");
1160
1161 unsigned OperandFlags = 0;
1162 if (isPositionIndependent()) {
1163 const GlobalValue *GV = GA->getGlobal();
1164 if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1165 MachineFunction &MF = DAG.getMachineFunction();
1166 MVT PtrVT = getPointerTy(MF.getDataLayout());
1167 const char *BaseName;
1168 if (GV->getValueType()->isFunctionTy()) {
1169 BaseName = MF.createExternalSymbolName("__table_base");
1170 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1171 }
1172 else {
1173 BaseName = MF.createExternalSymbolName("__memory_base");
1174 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1175 }
1176 SDValue BaseAddr =
1177 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1178 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1179
1180 SDValue SymAddr = DAG.getNode(
1181 WebAssemblyISD::WrapperPIC, DL, VT,
1182 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1183 OperandFlags));
1184
1185 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1186 } else {
1187 OperandFlags = WebAssemblyII::MO_GOT;
1188 }
1189 }
1190
1191 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1192 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1193 GA->getOffset(), OperandFlags));
1194}
1195
1196SDValue
1197WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1198 SelectionDAG &DAG) const {
1199 SDLoc DL(Op);
1200 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1201 EVT VT = Op.getValueType();
1202 assert(ES->getTargetFlags() == 0 &&((ES->getTargetFlags() == 0 && "Unexpected target flags on generic ExternalSymbolSDNode"
) ? static_cast<void> (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1203, __PRETTY_FUNCTION__))
1203 "Unexpected target flags on generic ExternalSymbolSDNode")((ES->getTargetFlags() == 0 && "Unexpected target flags on generic ExternalSymbolSDNode"
) ? static_cast<void> (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1203, __PRETTY_FUNCTION__))
;
1204 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1205 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1206}
1207
1208SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1209 SelectionDAG &DAG) const {
1210 // There's no need for a Wrapper node because we always incorporate a jump
1211 // table operand into a BR_TABLE instruction, rather than ever
1212 // materializing it in a register.
1213 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1214 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1215 JT->getTargetFlags());
1216}
1217
1218SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1219 SelectionDAG &DAG) const {
1220 SDLoc DL(Op);
1221 SDValue Chain = Op.getOperand(0);
1222 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1223 SDValue Index = Op.getOperand(2);
1224 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags")((JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags"
) ? static_cast<void> (0) : __assert_fail ("JT->getTargetFlags() == 0 && \"WebAssembly doesn't set target flags\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1224, __PRETTY_FUNCTION__))
;
1225
1226 SmallVector<SDValue, 8> Ops;
1227 Ops.push_back(Chain);
1228 Ops.push_back(Index);
1229
1230 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1231 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1232
1233 // Add an operand for each case.
1234 for (auto MBB : MBBs)
1235 Ops.push_back(DAG.getBasicBlock(MBB));
1236
1237 // TODO: For now, we just pick something arbitrary for a default case for now.
1238 // We really want to sniff out the guard and put in the real default case (and
1239 // delete the guard).
1240 Ops.push_back(DAG.getBasicBlock(MBBs[0]));
1241
1242 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1243}
1244
1245SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1246 SelectionDAG &DAG) const {
1247 SDLoc DL(Op);
1248 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1249
1250 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1251 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1252
1253 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1254 MFI->getVarargBufferVreg(), PtrVT);
1255 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1256 MachinePointerInfo(SV), 0);
1257}
1258
1259SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1260 SelectionDAG &DAG) const {
1261 MachineFunction &MF = DAG.getMachineFunction();
1262 unsigned IntNo;
1263 switch (Op.getOpcode()) {
1264 case ISD::INTRINSIC_VOID:
1265 case ISD::INTRINSIC_W_CHAIN:
1266 IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1267 break;
1268 case ISD::INTRINSIC_WO_CHAIN:
1269 IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1270 break;
1271 default:
1272 llvm_unreachable("Invalid intrinsic")::llvm::llvm_unreachable_internal("Invalid intrinsic", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1272)
;
1273 }
1274 SDLoc DL(Op);
1275
1276 switch (IntNo) {
1277 default:
1278 return SDValue(); // Don't custom lower most intrinsics.
1279
1280 case Intrinsic::wasm_lsda: {
1281 EVT VT = Op.getValueType();
1282 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1283 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1284 auto &Context = MF.getMMI().getContext();
1285 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1286 Twine(MF.getFunctionNumber()));
1287 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1288 DAG.getMCSymbol(S, PtrVT));
1289 }
1290
1291 case Intrinsic::wasm_throw: {
1292 // We only support C++ exceptions for now
1293 int Tag = cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1294 if (Tag != CPP_EXCEPTION)
1295 llvm_unreachable("Invalid tag!")::llvm::llvm_unreachable_internal("Invalid tag!", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1295)
;
1296 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1297 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1298 const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1299 SDValue SymNode = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1300 DAG.getTargetExternalSymbol(SymName, PtrVT));
1301 return DAG.getNode(WebAssemblyISD::THROW, DL,
1302 MVT::Other, // outchain type
1303 {
1304 Op.getOperand(0), // inchain
1305 SymNode, // exception symbol
1306 Op.getOperand(3) // thrown value
1307 });
1308 }
1309 }
1310}
1311
1312SDValue
1313WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1314 SelectionDAG &DAG) const {
1315 SDLoc DL(Op);
1316 // If sign extension operations are disabled, allow sext_inreg only if operand
1317 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1318 // extension operations, but allowing sext_inreg in this context lets us have
1319 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1320 // everywhere would be simpler in this file, but would necessitate large and
1321 // brittle patterns to undo the expansion and select extract_lane_s
1322 // instructions.
1323 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128())((!Subtarget->hasSignExt() && Subtarget->hasSIMD128
()) ? static_cast<void> (0) : __assert_fail ("!Subtarget->hasSignExt() && Subtarget->hasSIMD128()"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1323, __PRETTY_FUNCTION__))
;
1324 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1325 return SDValue();
1326
1327 const SDValue &Extract = Op.getOperand(0);
1328 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1329 if (VecT.getVectorElementType().getSizeInBits() > 32)
1330 return SDValue();
1331 MVT ExtractedLaneT = static_cast<VTSDNode *>(Op.getOperand(1).getNode())
1332 ->getVT()
1333 .getSimpleVT();
1334 MVT ExtractedVecT =
1335 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1336 if (ExtractedVecT == VecT)
1337 return Op;
1338
1339 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1340 const SDValue &Index = Extract.getOperand(1);
1341 unsigned IndexVal =
1342 static_cast<ConstantSDNode *>(Index.getNode())->getZExtValue();
1343 unsigned Scale =
1344 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1345 assert(Scale > 1)((Scale > 1) ? static_cast<void> (0) : __assert_fail
("Scale > 1", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1345, __PRETTY_FUNCTION__))
;
1346 SDValue NewIndex =
1347 DAG.getConstant(IndexVal * Scale, DL, Index.getValueType());
1348 SDValue NewExtract = DAG.getNode(
1349 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1350 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1351 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1352 Op.getOperand(1));
1353}
1354
1355SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1356 SelectionDAG &DAG) const {
1357 SDLoc DL(Op);
1358 const EVT VecT = Op.getValueType();
1359 const EVT LaneT = Op.getOperand(0).getValueType();
1360 const size_t Lanes = Op.getNumOperands();
1361 bool CanSwizzle = Subtarget->hasUnimplementedSIMD128() && VecT == MVT::v16i8;
1362
1363 // BUILD_VECTORs are lowered to the instruction that initializes the highest
1364 // possible number of lanes at once followed by a sequence of replace_lane
1365 // instructions to individually initialize any remaining lanes.
1366
1367 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
1368 // swizzled lanes should be given greater weight.
1369
1370 // TODO: Investigate building vectors by shuffling together vectors built by
1371 // separately specialized means.
1372
1373 auto IsConstant = [](const SDValue &V) {
1374 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1375 };
1376
1377 // Returns the source vector and index vector pair if they exist. Checks for:
1378 // (extract_vector_elt
1379 // $src,
1380 // (sign_extend_inreg (extract_vector_elt $indices, $i))
1381 // )
1382 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
1383 auto Bail = std::make_pair(SDValue(), SDValue());
1384 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1385 return Bail;
1386 const SDValue &SwizzleSrc = Lane->getOperand(0);
1387 const SDValue &IndexExt = Lane->getOperand(1);
1388 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
1389 return Bail;
1390 const SDValue &Index = IndexExt->getOperand(0);
1391 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1392 return Bail;
1393 const SDValue &SwizzleIndices = Index->getOperand(0);
1394 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
1395 SwizzleIndices.getValueType() != MVT::v16i8 ||
1396 Index->getOperand(1)->getOpcode() != ISD::Constant ||
1397 Index->getConstantOperandVal(1) != I)
1398 return Bail;
1399 return std::make_pair(SwizzleSrc, SwizzleIndices);
1400 };
1401
1402 using ValueEntry = std::pair<SDValue, size_t>;
1403 SmallVector<ValueEntry, 16> SplatValueCounts;
1404
1405 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
1406 SmallVector<SwizzleEntry, 16> SwizzleCounts;
1407
1408 auto AddCount = [](auto &Counts, const auto &Val) {
1409 auto CountIt = std::find_if(Counts.begin(), Counts.end(),
1410 [&Val](auto E) { return E.first == Val; });
1411 if (CountIt == Counts.end()) {
1412 Counts.emplace_back(Val, 1);
1413 } else {
1414 CountIt->second++;
1415 }
1416 };
1417
1418 auto GetMostCommon = [](auto &Counts) {
1419 auto CommonIt =
1420 std::max_element(Counts.begin(), Counts.end(),
1421 [](auto A, auto B) { return A.second < B.second; });
1422 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector")((CommonIt != Counts.end() && "Unexpected all-undef build_vector"
) ? static_cast<void> (0) : __assert_fail ("CommonIt != Counts.end() && \"Unexpected all-undef build_vector\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1422, __PRETTY_FUNCTION__))
;
1423 return *CommonIt;
1424 };
1425
1426 size_t NumConstantLanes = 0;
1427
1428 // Count eligible lanes for each type of vector creation op
1429 for (size_t I = 0; I
2.1
'I' is < 'Lanes'
2.1
'I' is < 'Lanes'
2.1
'I' is < 'Lanes'
< Lanes
; ++I) {
3
Loop condition is true. Entering loop body
6
Assuming 'I' is >= 'Lanes'
7
Loop condition is false. Execution continues on line 1445
1430 const SDValue &Lane = Op->getOperand(I);
1431 if (Lane.isUndef())
4
Taking true branch
1432 continue;
5
Execution continues on line 1429
1433
1434 AddCount(SplatValueCounts, Lane);
1435
1436 if (IsConstant(Lane)) {
1437 NumConstantLanes++;
1438 } else if (CanSwizzle) {
1439 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
1440 if (SwizzleSrcs.first)
1441 AddCount(SwizzleCounts, SwizzleSrcs);
1442 }
1443 }
1444
1445 SDValue SplatValue;
1446 size_t NumSplatLanes;
1447 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
1448
1449 SDValue SwizzleSrc;
1450 SDValue SwizzleIndices;
1451 size_t NumSwizzleLanes = 0;
1452 if (SwizzleCounts.size())
8
Assuming the condition is false
9
Taking false branch
1453 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
1454 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
1455
1456 // Predicate returning true if the lane is properly initialized by the
1457 // original instruction
1458 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
1459 SDValue Result;
1460 if (Subtarget->hasUnimplementedSIMD128()) {
10
Taking true branch
1461 // Prefer swizzles over vector consts over splats
1462 if (NumSwizzleLanes >= NumSplatLanes &&
11
Assuming 'NumSwizzleLanes' is < 'NumSplatLanes'
1463 NumSwizzleLanes >= NumConstantLanes) {
1464 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
1465 SwizzleIndices);
1466 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
1467 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
1468 return Swizzled == GetSwizzleSrcs(I, Lane);
1469 };
1470 } else if (NumConstantLanes
11.1
'NumConstantLanes' is < 'NumSplatLanes'
11.1
'NumConstantLanes' is < 'NumSplatLanes'
11.1
'NumConstantLanes' is < 'NumSplatLanes'
>= NumSplatLanes) {
12
Taking false branch
1471 SmallVector<SDValue, 16> ConstLanes;
1472 for (const SDValue &Lane : Op->op_values()) {
1473 if (IsConstant(Lane)) {
1474 ConstLanes.push_back(Lane);
1475 } else if (LaneT.isFloatingPoint()) {
1476 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1477 } else {
1478 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1479 }
1480 }
1481 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1482 IsLaneConstructed = [&](size_t _, const SDValue &Lane) {
1483 return IsConstant(Lane);
1484 };
1485 }
1486 }
1487 if (!Result) {
13
Taking true branch
1488 // Use a splat, but possibly a load_splat
1489 LoadSDNode *SplattedLoad;
1490 if (Subtarget->hasUnimplementedSIMD128() &&
15
Assuming pointer value is null
16
Taking false branch
1491 (SplattedLoad = dyn_cast<LoadSDNode>(SplatValue)) &&
14
Assuming 'SplattedLoad' is null
1492 SplattedLoad->getMemoryVT() == VecT.getVectorElementType()) {
1493 Result = DAG.getMemIntrinsicNode(
1494 WebAssemblyISD::LOAD_SPLAT, DL, DAG.getVTList(VecT),
1495 {SplattedLoad->getChain(), SplattedLoad->getBasePtr(),
1496 SplattedLoad->getOffset()},
1497 SplattedLoad->getMemoryVT(), SplattedLoad->getMemOperand());
1498 } else {
1499 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
17
Value assigned to 'Op.Node'
18
Calling 'SelectionDAG::getSplatBuildVector'
1500 }
1501 IsLaneConstructed = [&](size_t _, const SDValue &Lane) {
1502 return Lane == SplatValue;
1503 };
1504 }
1505
1506 // Add replace_lane instructions for any unhandled values
1507 for (size_t I = 0; I < Lanes; ++I) {
1508 const SDValue &Lane = Op->getOperand(I);
1509 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
1510 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1511 DAG.getConstant(I, DL, MVT::i32));
1512 }
1513
1514 return Result;
1515}
1516
1517SDValue
1518WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1519 SelectionDAG &DAG) const {
1520 SDLoc DL(Op);
1521 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1522 MVT VecType = Op.getOperand(0).getSimpleValueType();
1523 assert(VecType.is128BitVector() && "Unexpected shuffle vector type")((VecType.is128BitVector() && "Unexpected shuffle vector type"
) ? static_cast<void> (0) : __assert_fail ("VecType.is128BitVector() && \"Unexpected shuffle vector type\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1523, __PRETTY_FUNCTION__))
;
1524 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1525
1526 // Space for two vector args and sixteen mask indices
1527 SDValue Ops[18];
1528 size_t OpIdx = 0;
1529 Ops[OpIdx++] = Op.getOperand(0);
1530 Ops[OpIdx++] = Op.getOperand(1);
1531
1532 // Expand mask indices to byte indices and materialize them as operands
1533 for (int M : Mask) {
1534 for (size_t J = 0; J < LaneBytes; ++J) {
1535 // Lower undefs (represented by -1 in mask) to zero
1536 uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1537 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1538 }
1539 }
1540
1541 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1542}
1543
1544SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
1545 SelectionDAG &DAG) const {
1546 SDLoc DL(Op);
1547 // The legalizer does not know how to expand the comparison modes of i64x2
1548 // vectors because no comparison modes are supported. We could solve this by
1549 // expanding all i64x2 SETCC nodes, but that seems to expand f64x2 SETCC nodes
1550 // (which return i64x2 results) as well. So instead we manually unroll i64x2
1551 // comparisons here.
1552 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64)((Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64
) ? static_cast<void> (0) : __assert_fail ("Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1552, __PRETTY_FUNCTION__))
;
1553 SmallVector<SDValue, 2> LHS, RHS;
1554 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
1555 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
1556 const SDValue &CC = Op->getOperand(2);
1557 auto MakeLane = [&](unsigned I) {
1558 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
1559 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
1560 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
1561 };
1562 return DAG.getBuildVector(Op->getValueType(0), DL,
1563 {MakeLane(0), MakeLane(1)});
1564}
1565
1566SDValue
1567WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1568 SelectionDAG &DAG) const {
1569 // Allow constant lane indices, expand variable lane indices
1570 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1571 if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1572 return Op;
1573 else
1574 // Perform default expansion
1575 return SDValue();
1576}
1577
1578static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
1579 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
1580 // 32-bit and 64-bit unrolled shifts will have proper semantics
1581 if (LaneT.bitsGE(MVT::i32))
1582 return DAG.UnrollVectorOp(Op.getNode());
1583 // Otherwise mask the shift value to get proper semantics from 32-bit shift
1584 SDLoc DL(Op);
1585 SDValue ShiftVal = Op.getOperand(1);
1586 uint64_t MaskVal = LaneT.getSizeInBits() - 1;
1587 SDValue MaskedShiftVal = DAG.getNode(
1588 ISD::AND, // mask opcode
1589 DL, ShiftVal.getValueType(), // masked value type
1590 ShiftVal, // original shift value operand
1591 DAG.getConstant(MaskVal, DL, ShiftVal.getValueType()) // mask operand
1592 );
1593
1594 return DAG.UnrollVectorOp(
1595 DAG.getNode(Op.getOpcode(), // original shift opcode
1596 DL, Op.getValueType(), // original return type
1597 Op.getOperand(0), // original vector operand,
1598 MaskedShiftVal // new masked shift value operand
1599 )
1600 .getNode());
1601}
1602
1603SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1604 SelectionDAG &DAG) const {
1605 SDLoc DL(Op);
1606
1607 // Only manually lower vector shifts
1608 assert(Op.getSimpleValueType().isVector())((Op.getSimpleValueType().isVector()) ? static_cast<void>
(0) : __assert_fail ("Op.getSimpleValueType().isVector()", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1608, __PRETTY_FUNCTION__))
;
1609
1610 // Unroll non-splat vector shifts
1611 BuildVectorSDNode *ShiftVec;
1612 SDValue SplatVal;
1613 if (!(ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) ||
1614 !(SplatVal = ShiftVec->getSplatValue()))
1615 return unrollVectorShift(Op, DAG);
1616
1617 // All splats except i64x2 const splats are handled by patterns
1618 auto *SplatConst = dyn_cast<ConstantSDNode>(SplatVal);
1619 if (!SplatConst || Op.getSimpleValueType() != MVT::v2i64)
1620 return Op;
1621
1622 // i64x2 const splats are custom lowered to avoid unnecessary wraps
1623 unsigned Opcode;
1624 switch (Op.getOpcode()) {
1625 case ISD::SHL:
1626 Opcode = WebAssemblyISD::VEC_SHL;
1627 break;
1628 case ISD::SRA:
1629 Opcode = WebAssemblyISD::VEC_SHR_S;
1630 break;
1631 case ISD::SRL:
1632 Opcode = WebAssemblyISD::VEC_SHR_U;
1633 break;
1634 default:
1635 llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1635)
;
1636 }
1637 APInt Shift = SplatConst->getAPIntValue().zextOrTrunc(32);
1638 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0),
1639 DAG.getConstant(Shift, DL, MVT::i32));
1640}
1641
1642//===----------------------------------------------------------------------===//
1643// WebAssembly Optimization Hooks
1644//===----------------------------------------------------------------------===//

/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h

1//===- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SelectionDAG class, and transitively defines the
10// SDNode class and subclasses.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CODEGEN_SELECTIONDAG_H
15#define LLVM_CODEGEN_SELECTIONDAG_H
16
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
21#include "llvm/ADT/DenseSet.h"
22#include "llvm/ADT/FoldingSet.h"
23#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/StringMap.h"
26#include "llvm/ADT/ilist.h"
27#include "llvm/ADT/iterator.h"
28#include "llvm/ADT/iterator_range.h"
29#include "llvm/CodeGen/DAGCombine.h"
30#include "llvm/CodeGen/FunctionLoweringInfo.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineFunction.h"
33#include "llvm/CodeGen/MachineMemOperand.h"
34#include "llvm/CodeGen/SelectionDAGNodes.h"
35#include "llvm/CodeGen/ValueTypes.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/Metadata.h"
39#include "llvm/Support/Allocator.h"
40#include "llvm/Support/ArrayRecycler.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/CodeGen.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/RecyclingAllocator.h"
47#include <algorithm>
48#include <cassert>
49#include <cstdint>
50#include <functional>
51#include <map>
52#include <string>
53#include <tuple>
54#include <utility>
55#include <vector>
56
57namespace llvm {
58
59class AAResults;
60class BlockAddress;
61class BlockFrequencyInfo;
62class Constant;
63class ConstantFP;
64class ConstantInt;
65class DataLayout;
66struct fltSemantics;
67class GlobalValue;
68struct KnownBits;
69class LegacyDivergenceAnalysis;
70class LLVMContext;
71class MachineBasicBlock;
72class MachineConstantPoolValue;
73class MCSymbol;
74class OptimizationRemarkEmitter;
75class ProfileSummaryInfo;
76class SDDbgValue;
77class SDDbgLabel;
78class SelectionDAG;
79class SelectionDAGTargetInfo;
80class TargetLibraryInfo;
81class TargetLowering;
82class TargetMachine;
83class TargetSubtargetInfo;
84class Value;
85
86class SDVTListNode : public FoldingSetNode {
87 friend struct FoldingSetTrait<SDVTListNode>;
88
89 /// A reference to an Interned FoldingSetNodeID for this node.
90 /// The Allocator in SelectionDAG holds the data.
91 /// SDVTList contains all types which are frequently accessed in SelectionDAG.
92 /// The size of this list is not expected to be big so it won't introduce
93 /// a memory penalty.
94 FoldingSetNodeIDRef FastID;
95 const EVT *VTs;
96 unsigned int NumVTs;
97 /// The hash value for SDVTList is fixed, so cache it to avoid
98 /// hash calculation.
99 unsigned HashValue;
100
101public:
102 SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
103 FastID(ID), VTs(VT), NumVTs(Num) {
104 HashValue = ID.ComputeHash();
105 }
106
107 SDVTList getSDVTList() {
108 SDVTList result = {VTs, NumVTs};
109 return result;
110 }
111};
112
113/// Specialize FoldingSetTrait for SDVTListNode
114/// to avoid computing temp FoldingSetNodeID and hash value.
115template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTListNode> {
116 static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
117 ID = X.FastID;
118 }
119
120 static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
121 unsigned IDHash, FoldingSetNodeID &TempID) {
122 if (X.HashValue != IDHash)
123 return false;
124 return ID == X.FastID;
125 }
126
127 static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
128 return X.HashValue;
129 }
130};
131
132template <> struct ilist_alloc_traits<SDNode> {
133 static void deleteNode(SDNode *) {
134 llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!")::llvm::llvm_unreachable_internal("ilist_traits<SDNode> shouldn't see a deleteNode call!"
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 134)
;
135 }
136};
137
138/// Keeps track of dbg_value information through SDISel. We do
139/// not build SDNodes for these so as not to perturb the generated code;
140/// instead the info is kept off to the side in this structure. Each SDNode may
141/// have one or more associated dbg_value entries. This information is kept in
142/// DbgValMap.
143/// Byval parameters are handled separately because they don't use alloca's,
144/// which busts the normal mechanism. There is good reason for handling all
145/// parameters separately: they may not have code generated for them, they
146/// should always go at the beginning of the function regardless of other code
147/// motion, and debug info for them is potentially useful even if the parameter
148/// is unused. Right now only byval parameters are handled separately.
149class SDDbgInfo {
150 BumpPtrAllocator Alloc;
151 SmallVector<SDDbgValue*, 32> DbgValues;
152 SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
153 SmallVector<SDDbgLabel*, 4> DbgLabels;
154 using DbgValMapType = DenseMap<const SDNode *, SmallVector<SDDbgValue *, 2>>;
155 DbgValMapType DbgValMap;
156
157public:
158 SDDbgInfo() = default;
159 SDDbgInfo(const SDDbgInfo &) = delete;
160 SDDbgInfo &operator=(const SDDbgInfo &) = delete;
161
162 void add(SDDbgValue *V, const SDNode *Node, bool isParameter) {
163 if (isParameter) {
164 ByvalParmDbgValues.push_back(V);
165 } else DbgValues.push_back(V);
166 if (Node)
167 DbgValMap[Node].push_back(V);
168 }
169
170 void add(SDDbgLabel *L) {
171 DbgLabels.push_back(L);
172 }
173
174 /// Invalidate all DbgValues attached to the node and remove
175 /// it from the Node-to-DbgValues map.
176 void erase(const SDNode *Node);
177
178 void clear() {
179 DbgValMap.clear();
180 DbgValues.clear();
181 ByvalParmDbgValues.clear();
182 DbgLabels.clear();
183 Alloc.Reset();
184 }
185
186 BumpPtrAllocator &getAlloc() { return Alloc; }
187
188 bool empty() const {
189 return DbgValues.empty() && ByvalParmDbgValues.empty() && DbgLabels.empty();
190 }
191
192 ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) const {
193 auto I = DbgValMap.find(Node);
194 if (I != DbgValMap.end())
195 return I->second;
196 return ArrayRef<SDDbgValue*>();
197 }
198
199 using DbgIterator = SmallVectorImpl<SDDbgValue*>::iterator;
200 using DbgLabelIterator = SmallVectorImpl<SDDbgLabel*>::iterator;
201
202 DbgIterator DbgBegin() { return DbgValues.begin(); }
203 DbgIterator DbgEnd() { return DbgValues.end(); }
204 DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
205 DbgIterator ByvalParmDbgEnd() { return ByvalParmDbgValues.end(); }
206 DbgLabelIterator DbgLabelBegin() { return DbgLabels.begin(); }
207 DbgLabelIterator DbgLabelEnd() { return DbgLabels.end(); }
208};
209
210void checkForCycles(const SelectionDAG *DAG, bool force = false);
211
212/// This is used to represent a portion of an LLVM function in a low-level
213/// Data Dependence DAG representation suitable for instruction selection.
214/// This DAG is constructed as the first step of instruction selection in order
215/// to allow implementation of machine specific optimizations
216/// and code simplifications.
217///
218/// The representation used by the SelectionDAG is a target-independent
219/// representation, which has some similarities to the GCC RTL representation,
220/// but is significantly more simple, powerful, and is a graph form instead of a
221/// linear form.
222///
223class SelectionDAG {
224 const TargetMachine &TM;
225 const SelectionDAGTargetInfo *TSI = nullptr;
226 const TargetLowering *TLI = nullptr;
227 const TargetLibraryInfo *LibInfo = nullptr;
228 MachineFunction *MF;
229 Pass *SDAGISelPass = nullptr;
230 LLVMContext *Context;
231 CodeGenOpt::Level OptLevel;
232
233 LegacyDivergenceAnalysis * DA = nullptr;
234 FunctionLoweringInfo * FLI = nullptr;
235
236 /// The function-level optimization remark emitter. Used to emit remarks
237 /// whenever manipulating the DAG.
238 OptimizationRemarkEmitter *ORE;
239
240 ProfileSummaryInfo *PSI = nullptr;
241 BlockFrequencyInfo *BFI = nullptr;
242
243 /// The starting token.
244 SDNode EntryNode;
245
246 /// The root of the entire DAG.
247 SDValue Root;
248
249 /// A linked list of nodes in the current DAG.
250 ilist<SDNode> AllNodes;
251
252 /// The AllocatorType for allocating SDNodes. We use
253 /// pool allocation with recycling.
254 using NodeAllocatorType = RecyclingAllocator<BumpPtrAllocator, SDNode,
255 sizeof(LargestSDNode),
256 alignof(MostAlignedSDNode)>;
257
258 /// Pool allocation for nodes.
259 NodeAllocatorType NodeAllocator;
260
261 /// This structure is used to memoize nodes, automatically performing
262 /// CSE with existing nodes when a duplicate is requested.
263 FoldingSet<SDNode> CSEMap;
264
265 /// Pool allocation for machine-opcode SDNode operands.
266 BumpPtrAllocator OperandAllocator;
267 ArrayRecycler<SDUse> OperandRecycler;
268
269 /// Pool allocation for misc. objects that are created once per SelectionDAG.
270 BumpPtrAllocator Allocator;
271
272 /// Tracks dbg_value and dbg_label information through SDISel.
273 SDDbgInfo *DbgInfo;
274
275 using CallSiteInfo = MachineFunction::CallSiteInfo;
276 using CallSiteInfoImpl = MachineFunction::CallSiteInfoImpl;
277
278 struct CallSiteDbgInfo {
279 CallSiteInfo CSInfo;
280 MDNode *HeapAllocSite = nullptr;
281 };
282
283 DenseMap<const SDNode *, CallSiteDbgInfo> SDCallSiteDbgInfo;
284
285 uint16_t NextPersistentId = 0;
286
287public:
288 /// Clients of various APIs that cause global effects on
289 /// the DAG can optionally implement this interface. This allows the clients
290 /// to handle the various sorts of updates that happen.
291 ///
292 /// A DAGUpdateListener automatically registers itself with DAG when it is
293 /// constructed, and removes itself when destroyed in RAII fashion.
294 struct DAGUpdateListener {
295 DAGUpdateListener *const Next;
296 SelectionDAG &DAG;
297
298 explicit DAGUpdateListener(SelectionDAG &D)
299 : Next(D.UpdateListeners), DAG(D) {
300 DAG.UpdateListeners = this;
301 }
302
303 virtual ~DAGUpdateListener() {
304 assert(DAG.UpdateListeners == this &&((DAG.UpdateListeners == this && "DAGUpdateListeners must be destroyed in LIFO order"
) ? static_cast<void> (0) : __assert_fail ("DAG.UpdateListeners == this && \"DAGUpdateListeners must be destroyed in LIFO order\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 305, __PRETTY_FUNCTION__))
305 "DAGUpdateListeners must be destroyed in LIFO order")((DAG.UpdateListeners == this && "DAGUpdateListeners must be destroyed in LIFO order"
) ? static_cast<void> (0) : __assert_fail ("DAG.UpdateListeners == this && \"DAGUpdateListeners must be destroyed in LIFO order\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 305, __PRETTY_FUNCTION__))
;
306 DAG.UpdateListeners = Next;
307 }
308
309 /// The node N that was deleted and, if E is not null, an
310 /// equivalent node E that replaced it.
311 virtual void NodeDeleted(SDNode *N, SDNode *E);
312
313 /// The node N that was updated.
314 virtual void NodeUpdated(SDNode *N);
315
316 /// The node N that was inserted.
317 virtual void NodeInserted(SDNode *N);
318 };
319
320 struct DAGNodeDeletedListener : public DAGUpdateListener {
321 std::function<void(SDNode *, SDNode *)> Callback;
322
323 DAGNodeDeletedListener(SelectionDAG &DAG,
324 std::function<void(SDNode *, SDNode *)> Callback)
325 : DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
326
327 void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
328
329 private:
330 virtual void anchor();
331 };
332
333 /// When true, additional steps are taken to
334 /// ensure that getConstant() and similar functions return DAG nodes that
335 /// have legal types. This is important after type legalization since
336 /// any illegally typed nodes generated after this point will not experience
337 /// type legalization.
338 bool NewNodesMustHaveLegalTypes = false;
339
340private:
341 /// DAGUpdateListener is a friend so it can manipulate the listener stack.
342 friend struct DAGUpdateListener;
343
344 /// Linked list of registered DAGUpdateListener instances.
345 /// This stack is maintained by DAGUpdateListener RAII.
346 DAGUpdateListener *UpdateListeners = nullptr;
347
348 /// Implementation of setSubgraphColor.
349 /// Return whether we had to truncate the search.
350 bool setSubgraphColorHelper(SDNode *N, const char *Color,
351 DenseSet<SDNode *> &visited,
352 int level, bool &printed);
353
354 template <typename SDNodeT, typename... ArgTypes>
355 SDNodeT *newSDNode(ArgTypes &&... Args) {
356 return new (NodeAllocator.template Allocate<SDNodeT>())
357 SDNodeT(std::forward<ArgTypes>(Args)...);
358 }
359
360 /// Build a synthetic SDNodeT with the given args and extract its subclass
361 /// data as an integer (e.g. for use in a folding set).
362 ///
363 /// The args to this function are the same as the args to SDNodeT's
364 /// constructor, except the second arg (assumed to be a const DebugLoc&) is
365 /// omitted.
366 template <typename SDNodeT, typename... ArgTypes>
367 static uint16_t getSyntheticNodeSubclassData(unsigned IROrder,
368 ArgTypes &&... Args) {
369 // The compiler can reduce this expression to a constant iff we pass an
370 // empty DebugLoc. Thankfully, the debug location doesn't have any bearing
371 // on the subclass data.
372 return SDNodeT(IROrder, DebugLoc(), std::forward<ArgTypes>(Args)...)
373 .getRawSubclassData();
374 }
375
376 template <typename SDNodeTy>
377 static uint16_t getSyntheticNodeSubclassData(unsigned Opc, unsigned Order,
378 SDVTList VTs, EVT MemoryVT,
379 MachineMemOperand *MMO) {
380 return SDNodeTy(Opc, Order, DebugLoc(), VTs, MemoryVT, MMO)
381 .getRawSubclassData();
382 }
383
384 void createOperands(SDNode *Node, ArrayRef<SDValue> Vals);
385
386 void removeOperands(SDNode *Node) {
387 if (!Node->OperandList)
388 return;
389 OperandRecycler.deallocate(
390 ArrayRecycler<SDUse>::Capacity::get(Node->NumOperands),
391 Node->OperandList);
392 Node->NumOperands = 0;
393 Node->OperandList = nullptr;
394 }
395 void CreateTopologicalOrder(std::vector<SDNode*>& Order);
396
397public:
398 // Maximum depth for recursive analysis such as computeKnownBits, etc.
399 static constexpr unsigned MaxRecursionDepth = 6;
400
401 explicit SelectionDAG(const TargetMachine &TM, CodeGenOpt::Level);
402 SelectionDAG(const SelectionDAG &) = delete;
403 SelectionDAG &operator=(const SelectionDAG &) = delete;
404 ~SelectionDAG();
405
406 /// Prepare this SelectionDAG to process code in the given MachineFunction.
407 void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
408 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
409 LegacyDivergenceAnalysis * Divergence,
410 ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin);
411
412 void setFunctionLoweringInfo(FunctionLoweringInfo * FuncInfo) {
413 FLI = FuncInfo;
414 }
415
416 /// Clear state and free memory necessary to make this
417 /// SelectionDAG ready to process a new block.
418 void clear();
419
420 MachineFunction &getMachineFunction() const { return *MF; }
421 const Pass *getPass() const { return SDAGISelPass; }
422
423 const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
424 const TargetMachine &getTarget() const { return TM; }
425 const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
426 const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
427 const TargetLibraryInfo &getLibInfo() const { return *LibInfo; }
428 const SelectionDAGTargetInfo &getSelectionDAGInfo() const { return *TSI; }
429 const LegacyDivergenceAnalysis *getDivergenceAnalysis() const { return DA; }
430 LLVMContext *getContext() const { return Context; }
431 OptimizationRemarkEmitter &getORE() const { return *ORE; }
432 ProfileSummaryInfo *getPSI() const { return PSI; }
433 BlockFrequencyInfo *getBFI() const { return BFI; }
434
435 /// Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
436 void viewGraph(const std::string &Title);
437 void viewGraph();
438
439#ifndef NDEBUG
440 std::map<const SDNode *, std::string> NodeGraphAttrs;
441#endif
442
443 /// Clear all previously defined node graph attributes.
444 /// Intended to be used from a debugging tool (eg. gdb).
445 void clearGraphAttrs();
446
447 /// Set graph attributes for a node. (eg. "color=red".)
448 void setGraphAttrs(const SDNode *N, const char *Attrs);
449
450 /// Get graph attributes for a node. (eg. "color=red".)
451 /// Used from getNodeAttributes.
452 const std::string getGraphAttrs(const SDNode *N) const;
453
454 /// Convenience for setting node color attribute.
455 void setGraphColor(const SDNode *N, const char *Color);
456
457 /// Convenience for setting subgraph color attribute.
458 void setSubgraphColor(SDNode *N, const char *Color);
459
460 using allnodes_const_iterator = ilist<SDNode>::const_iterator;
461
462 allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
463 allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
464
465 using allnodes_iterator = ilist<SDNode>::iterator;
466
467 allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
468 allnodes_iterator allnodes_end() { return AllNodes.end(); }
469
470 ilist<SDNode>::size_type allnodes_size() const {
471 return AllNodes.size();
472 }
473
474 iterator_range<allnodes_iterator> allnodes() {
475 return make_range(allnodes_begin(), allnodes_end());
476 }
477 iterator_range<allnodes_const_iterator> allnodes() const {
478 return make_range(allnodes_begin(), allnodes_end());
479 }
480
481 /// Return the root tag of the SelectionDAG.
482 const SDValue &getRoot() const { return Root; }
483
484 /// Return the token chain corresponding to the entry of the function.
485 SDValue getEntryNode() const {
486 return SDValue(const_cast<SDNode *>(&EntryNode), 0);
487 }
488
489 /// Set the current root tag of the SelectionDAG.
490 ///
491 const SDValue &setRoot(SDValue N) {
492 assert((!N.getNode() || N.getValueType() == MVT::Other) &&(((!N.getNode() || N.getValueType() == MVT::Other) &&
"DAG root value is not a chain!") ? static_cast<void> (
0) : __assert_fail ("(!N.getNode() || N.getValueType() == MVT::Other) && \"DAG root value is not a chain!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 493, __PRETTY_FUNCTION__))
493 "DAG root value is not a chain!")(((!N.getNode() || N.getValueType() == MVT::Other) &&
"DAG root value is not a chain!") ? static_cast<void> (
0) : __assert_fail ("(!N.getNode() || N.getValueType() == MVT::Other) && \"DAG root value is not a chain!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 493, __PRETTY_FUNCTION__))
;
494 if (N.getNode())
495 checkForCycles(N.getNode(), this);
496 Root = N;
497 if (N.getNode())
498 checkForCycles(this);
499 return Root;
500 }
501
502#ifndef NDEBUG
503 void VerifyDAGDiverence();
504#endif
505
506 /// This iterates over the nodes in the SelectionDAG, folding
507 /// certain types of nodes together, or eliminating superfluous nodes. The
508 /// Level argument controls whether Combine is allowed to produce nodes and
509 /// types that are illegal on the target.
510 void Combine(CombineLevel Level, AAResults *AA,
511 CodeGenOpt::Level OptLevel);
512
513 /// This transforms the SelectionDAG into a SelectionDAG that
514 /// only uses types natively supported by the target.
515 /// Returns "true" if it made any changes.
516 ///
517 /// Note that this is an involved process that may invalidate pointers into
518 /// the graph.
519 bool LegalizeTypes();
520
521 /// This transforms the SelectionDAG into a SelectionDAG that is
522 /// compatible with the target instruction selector, as indicated by the
523 /// TargetLowering object.
524 ///
525 /// Note that this is an involved process that may invalidate pointers into
526 /// the graph.
527 void Legalize();
528
529 /// Transforms a SelectionDAG node and any operands to it into a node
530 /// that is compatible with the target instruction selector, as indicated by
531 /// the TargetLowering object.
532 ///
533 /// \returns true if \c N is a valid, legal node after calling this.
534 ///
535 /// This essentially runs a single recursive walk of the \c Legalize process
536 /// over the given node (and its operands). This can be used to incrementally
537 /// legalize the DAG. All of the nodes which are directly replaced,
538 /// potentially including N, are added to the output parameter \c
539 /// UpdatedNodes so that the delta to the DAG can be understood by the
540 /// caller.
541 ///
542 /// When this returns false, N has been legalized in a way that make the
543 /// pointer passed in no longer valid. It may have even been deleted from the
544 /// DAG, and so it shouldn't be used further. When this returns true, the
545 /// N passed in is a legal node, and can be immediately processed as such.
546 /// This may still have done some work on the DAG, and will still populate
547 /// UpdatedNodes with any new nodes replacing those originally in the DAG.
548 bool LegalizeOp(SDNode *N, SmallSetVector<SDNode *, 16> &UpdatedNodes);
549
550 /// This transforms the SelectionDAG into a SelectionDAG
551 /// that only uses vector math operations supported by the target. This is
552 /// necessary as a separate step from Legalize because unrolling a vector
553 /// operation can introduce illegal types, which requires running
554 /// LegalizeTypes again.
555 ///
556 /// This returns true if it made any changes; in that case, LegalizeTypes
557 /// is called again before Legalize.
558 ///
559 /// Note that this is an involved process that may invalidate pointers into
560 /// the graph.
561 bool LegalizeVectors();
562
563 /// This method deletes all unreachable nodes in the SelectionDAG.
564 void RemoveDeadNodes();
565
566 /// Remove the specified node from the system. This node must
567 /// have no referrers.
568 void DeleteNode(SDNode *N);
569
570 /// Return an SDVTList that represents the list of values specified.
571 SDVTList getVTList(EVT VT);
572 SDVTList getVTList(EVT VT1, EVT VT2);
573 SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3);
574 SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4);
575 SDVTList getVTList(ArrayRef<EVT> VTs);
576
577 //===--------------------------------------------------------------------===//
578 // Node creation methods.
579
580 /// Create a ConstantSDNode wrapping a constant value.
581 /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
582 ///
583 /// If only legal types can be produced, this does the necessary
584 /// transformations (e.g., if the vector element type is illegal).
585 /// @{
586 SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
587 bool isTarget = false, bool isOpaque = false);
588 SDValue getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
589 bool isTarget = false, bool isOpaque = false);
590
591 SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget = false,
592 bool IsOpaque = false) {
593 return getConstant(APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL,
594 VT, IsTarget, IsOpaque);
595 }
596
597 SDValue getConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
598 bool isTarget = false, bool isOpaque = false);
599 SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL,
600 bool isTarget = false);
601 SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL,
602 bool LegalTypes = true);
603 SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
604 bool isTarget = false);
605
606 SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT,
607 bool isOpaque = false) {
608 return getConstant(Val, DL, VT, true, isOpaque);
609 }
610 SDValue getTargetConstant(const APInt &Val, const SDLoc &DL, EVT VT,
611 bool isOpaque = false) {
612 return getConstant(Val, DL, VT, true, isOpaque);
613 }
614 SDValue getTargetConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
615 bool isOpaque = false) {
616 return getConstant(Val, DL, VT, true, isOpaque);
617 }
618
619 /// Create a true or false constant of type \p VT using the target's
620 /// BooleanContent for type \p OpVT.
621 SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT);
622 /// @}
623
624 /// Create a ConstantFPSDNode wrapping a constant value.
625 /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
626 ///
627 /// If only legal types can be produced, this does the necessary
628 /// transformations (e.g., if the vector element type is illegal).
629 /// The forms that take a double should only be used for simple constants
630 /// that can be exactly represented in VT. No checks are made.
631 /// @{
632 SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT,
633 bool isTarget = false);
634 SDValue getConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT,
635 bool isTarget = false);
636 SDValue getConstantFP(const ConstantFP &V, const SDLoc &DL, EVT VT,
637 bool isTarget = false);
638 SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT) {
639 return getConstantFP(Val, DL, VT, true);
640 }
641 SDValue getTargetConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT) {
642 return getConstantFP(Val, DL, VT, true);
643 }
644 SDValue getTargetConstantFP(const ConstantFP &Val, const SDLoc &DL, EVT VT) {
645 return getConstantFP(Val, DL, VT, true);
646 }
647 /// @}
648
649 SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
650 int64_t offset = 0, bool isTargetGA = false,
651 unsigned TargetFlags = 0);
652 SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
653 int64_t offset = 0, unsigned TargetFlags = 0) {
654 return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
655 }
656 SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
657 SDValue getTargetFrameIndex(int FI, EVT VT) {
658 return getFrameIndex(FI, VT, true);
659 }
660 SDValue getJumpTable(int JTI, EVT VT, bool isTarget = false,
661 unsigned TargetFlags = 0);
662 SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags = 0) {
663 return getJumpTable(JTI, VT, true, TargetFlags);
664 }
665 SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align = 0,
666 int Offs = 0, bool isT = false,
667 unsigned TargetFlags = 0);
668 SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align = 0,
669 int Offset = 0, unsigned TargetFlags = 0) {
670 return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
671 }
672 SDValue getConstantPool(MachineConstantPoolValue *C, EVT VT,
673 unsigned Align = 0, int Offs = 0, bool isT=false,
674 unsigned TargetFlags = 0);
675 SDValue getTargetConstantPool(MachineConstantPoolValue *C, EVT VT,
676 unsigned Align = 0, int Offset = 0,
677 unsigned TargetFlags = 0) {
678 return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
679 }
680 SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
681 unsigned TargetFlags = 0);
682 // When generating a branch to a BB, we don't in general know enough
683 // to provide debug info for the BB at that time, so keep this one around.
684 SDValue getBasicBlock(MachineBasicBlock *MBB);
685 SDValue getBasicBlock(MachineBasicBlock *MBB, SDLoc dl);
686 SDValue getExternalSymbol(const char *Sym, EVT VT);
687 SDValue getExternalSymbol(const char *Sym, const SDLoc &dl, EVT VT);
688 SDValue getTargetExternalSymbol(const char *Sym, EVT VT,
689 unsigned TargetFlags = 0);
690 SDValue getMCSymbol(MCSymbol *Sym, EVT VT);
691
692 SDValue getValueType(EVT);
693 SDValue getRegister(unsigned Reg, EVT VT);
694 SDValue getRegisterMask(const uint32_t *RegMask);
695 SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label);
696 SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root,
697 MCSymbol *Label);
698 SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset = 0,
699 bool isTarget = false, unsigned TargetFlags = 0);
700 SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT,
701 int64_t Offset = 0, unsigned TargetFlags = 0) {
702 return getBlockAddress(BA, VT, Offset, true, TargetFlags);
703 }
704
705 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg,
706 SDValue N) {
707 return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
708 getRegister(Reg, N.getValueType()), N);
709 }
710
711 // This version of the getCopyToReg method takes an extra operand, which
712 // indicates that there is potentially an incoming glue value (if Glue is not
713 // null) and that there should be a glue result.
714 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N,
715 SDValue Glue) {
716 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
717 SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
718 return getNode(ISD::CopyToReg, dl, VTs,
719 makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
720 }
721
722 // Similar to last getCopyToReg() except parameter Reg is a SDValue
723 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, SDValue Reg, SDValue N,
724 SDValue Glue) {
725 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
726 SDValue Ops[] = { Chain, Reg, N, Glue };
727 return getNode(ISD::CopyToReg, dl, VTs,
728 makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
729 }
730
731 SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT) {
732 SDVTList VTs = getVTList(VT, MVT::Other);
733 SDValue Ops[] = { Chain, getRegister(Reg, VT) };
734 return getNode(ISD::CopyFromReg, dl, VTs, Ops);
735 }
736
737 // This version of the getCopyFromReg method takes an extra operand, which
738 // indicates that there is potentially an incoming glue value (if Glue is not
739 // null) and that there should be a glue result.
740 SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT,
741 SDValue Glue) {
742 SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
743 SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
744 return getNode(ISD::CopyFromReg, dl, VTs,
745 makeArrayRef(Ops, Glue.getNode() ? 3 : 2));
746 }
747
748 SDValue getCondCode(ISD::CondCode Cond);
749
750 /// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
751 /// which must be a vector type, must match the number of mask elements
752 /// NumElts. An integer mask element equal to -1 is treated as undefined.
753 SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
754 ArrayRef<int> Mask);
755
756 /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
757 /// which must be a vector type, must match the number of operands in Ops.
758 /// The operands must have the same type as (or, for integers, a type wider
759 /// than) VT's element type.
760 SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDValue> Ops) {
761 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
762 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
763 }
764
765 /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
766 /// which must be a vector type, must match the number of operands in Ops.
767 /// The operands must have the same type as (or, for integers, a type wider
768 /// than) VT's element type.
769 SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDUse> Ops) {
770 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
771 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
772 }
773
774 /// Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all
775 /// elements. VT must be a vector type. Op's type must be the same as (or,
776 /// for integers, a type wider than) VT's element type.
777 SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
778 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
779 if (Op.getOpcode() == ISD::UNDEF) {
19
Calling 'SDValue::getOpcode'
780 assert((VT.getVectorElementType() == Op.getValueType() ||(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 784, __PRETTY_FUNCTION__))
781 (VT.isInteger() &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 784, __PRETTY_FUNCTION__))
782 VT.getVectorElementType().bitsLE(Op.getValueType()))) &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 784, __PRETTY_FUNCTION__))
783 "A splatted value must have a width equal or (for integers) "(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 784, __PRETTY_FUNCTION__))
784 "greater than the vector element type!")(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 784, __PRETTY_FUNCTION__))
;
785 return getNode(ISD::UNDEF, SDLoc(), VT);
786 }
787
788 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Op);
789 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
790 }
791
792 // Return a splat ISD::SPLAT_VECTOR node, consisting of Op splatted to all
793 // elements.
794 SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op) {
795 if (Op.getOpcode() == ISD::UNDEF) {
796 assert((VT.getVectorElementType() == Op.getValueType() ||(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 800, __PRETTY_FUNCTION__))
797 (VT.isInteger() &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 800, __PRETTY_FUNCTION__))
798 VT.getVectorElementType().bitsLE(Op.getValueType()))) &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 800, __PRETTY_FUNCTION__))
799 "A splatted value must have a width equal or (for integers) "(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 800, __PRETTY_FUNCTION__))
800 "greater than the vector element type!")(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 800, __PRETTY_FUNCTION__))
;
801 return getNode(ISD::UNDEF, SDLoc(), VT);
802 }
803 return getNode(ISD::SPLAT_VECTOR, DL, VT, Op);
804 }
805
806 /// Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
807 /// the shuffle node in input but with swapped operands.
808 ///
809 /// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
810 SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV);
811
812 /// Convert Op, which must be of float type, to the
813 /// float type VT, by either extending or rounding (by truncation).
814 SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT);
815
816 /// Convert Op, which must be a STRICT operation of float type, to the
817 /// float type VT, by either extending or rounding (by truncation).
818 std::pair<SDValue, SDValue>
819 getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT);
820
821 /// Convert Op, which must be of integer type, to the
822 /// integer type VT, by either any-extending or truncating it.
823 SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
824
825 /// Convert Op, which must be of integer type, to the
826 /// integer type VT, by either sign-extending or truncating it.
827 SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
828
829 /// Convert Op, which must be of integer type, to the
830 /// integer type VT, by either zero-extending or truncating it.
831 SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
832
833 /// Return the expression required to zero extend the Op
834 /// value assuming it was the smaller SrcTy value.
835 SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
836
837 /// Convert Op, which must be of integer type, to the integer type VT, by
838 /// either truncating it or performing either zero or sign extension as
839 /// appropriate extension for the pointer's semantics.
840 SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
841
842 /// Return the expression required to extend the Op as a pointer value
843 /// assuming it was the smaller SrcTy value. This may be either a zero extend
844 /// or a sign extend.
845 SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
846
847 /// Convert Op, which must be of integer type, to the integer type VT,
848 /// by using an extension appropriate for the target's
849 /// BooleanContent for type OpVT or truncating it.
850 SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT);
851
852 /// Create a bitwise NOT operation as (XOR Val, -1).
853 SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT);
854
855 /// Create a logical NOT operation as (XOR Val, BooleanOne).
856 SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT);
857
858 /// Returns sum of the base pointer and offset.
859 /// Unlike getObjectPtrOffset this does not set NoUnsignedWrap by default.
860 SDValue getMemBasePlusOffset(SDValue Base, int64_t Offset, const SDLoc &DL,
861 const SDNodeFlags Flags = SDNodeFlags());
862 SDValue getMemBasePlusOffset(SDValue Base, SDValue Offset, const SDLoc &DL,
863 const SDNodeFlags Flags = SDNodeFlags());
864
865 /// Create an add instruction with appropriate flags when used for
866 /// addressing some offset of an object. i.e. if a load is split into multiple
867 /// components, create an add nuw from the base pointer to the offset.
868 SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, int64_t Offset) {
869 SDNodeFlags Flags;
870 Flags.setNoUnsignedWrap(true);
871 return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
872 }
873
874 SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, SDValue Offset) {
875 // The object itself can't wrap around the address space, so it shouldn't be
876 // possible for the adds of the offsets to the split parts to overflow.
877 SDNodeFlags Flags;
878 Flags.setNoUnsignedWrap(true);
879 return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
880 }
881
882 /// Return a new CALLSEQ_START node, that starts new call frame, in which
883 /// InSize bytes are set up inside CALLSEQ_START..CALLSEQ_END sequence and
884 /// OutSize specifies part of the frame set up prior to the sequence.
885 SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize,
886 const SDLoc &DL) {
887 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
888 SDValue Ops[] = { Chain,
889 getIntPtrConstant(InSize, DL, true),
890 getIntPtrConstant(OutSize, DL, true) };
891 return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
892 }
893
894 /// Return a new CALLSEQ_END node, which always must have a
895 /// glue result (to ensure it's not CSE'd).
896 /// CALLSEQ_END does not have a useful SDLoc.
897 SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
898 SDValue InGlue, const SDLoc &DL) {
899 SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
900 SmallVector<SDValue, 4> Ops;
901 Ops.push_back(Chain);
902 Ops.push_back(Op1);
903 Ops.push_back(Op2);
904 if (InGlue.getNode())
905 Ops.push_back(InGlue);
906 return getNode(ISD::CALLSEQ_END, DL, NodeTys, Ops);
907 }
908
909 /// Return true if the result of this operation is always undefined.
910 bool isUndef(unsigned Opcode, ArrayRef<SDValue> Ops);
911
912 /// Return an UNDEF node. UNDEF does not have a useful SDLoc.
913 SDValue getUNDEF(EVT VT) {
914 return getNode(ISD::UNDEF, SDLoc(), VT);
915 }
916
917 /// Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
918 SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm) {
919 assert(MulImm.getMinSignedBits() <= VT.getSizeInBits() &&((MulImm.getMinSignedBits() <= VT.getSizeInBits() &&
"Immediate does not fit VT") ? static_cast<void> (0) :
__assert_fail ("MulImm.getMinSignedBits() <= VT.getSizeInBits() && \"Immediate does not fit VT\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 920, __PRETTY_FUNCTION__))
920 "Immediate does not fit VT")((MulImm.getMinSignedBits() <= VT.getSizeInBits() &&
"Immediate does not fit VT") ? static_cast<void> (0) :
__assert_fail ("MulImm.getMinSignedBits() <= VT.getSizeInBits() && \"Immediate does not fit VT\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 920, __PRETTY_FUNCTION__))
;
921 return getNode(ISD::VSCALE, DL, VT, getConstant(MulImm, DL, VT));
922 }
923
924 /// Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
925 SDValue getGLOBAL_OFFSET_TABLE(EVT VT) {
926 return getNode(ISD::GLOBAL_OFFSET_TABLE, SDLoc(), VT);
927 }
928
929 /// Gets or creates the specified node.
930 ///
931 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
932 ArrayRef<SDUse> Ops);
933 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
934 ArrayRef<SDValue> Ops, const SDNodeFlags Flags = SDNodeFlags());
935 SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
936 ArrayRef<SDValue> Ops);
937 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
938 ArrayRef<SDValue> Ops);
939
940 // Specialize based on number of operands.
941 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
942 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand,
943 const SDNodeFlags Flags = SDNodeFlags());
944 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
945 SDValue N2, const SDNodeFlags Flags = SDNodeFlags());
946 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
947 SDValue N2, SDValue N3,
948 const SDNodeFlags Flags = SDNodeFlags());
949 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
950 SDValue N2, SDValue N3, SDValue N4);
951 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
952 SDValue N2, SDValue N3, SDValue N4, SDValue N5);
953
954 // Specialize again based on number of operands for nodes with a VTList
955 // rather than a single VT.
956 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList);
957 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N);
958 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
959 SDValue N2);
960 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
961 SDValue N2, SDValue N3);
962 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
963 SDValue N2, SDValue N3, SDValue N4);
964 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
965 SDValue N2, SDValue N3, SDValue N4, SDValue N5);
966
967 /// Compute a TokenFactor to force all the incoming stack arguments to be
968 /// loaded from the stack. This is used in tail call lowering to protect
969 /// stack arguments from being clobbered.
970 SDValue getStackArgumentTokenFactor(SDValue Chain);
971
972 LLVM_ATTRIBUTE_DEPRECATED(SDValue getMemcpy(SDValue Chain, const SDLoc &dl,SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
973 SDValue Dst, SDValue Src,SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
974 SDValue Size, unsigned Align,SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
975 bool isVol, bool AlwaysInline,SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
976 bool isTailCall,SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
977 MachinePointerInfo DstPtrInfo,SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
978 MachinePointerInfo SrcPtrInfo),SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
979 "Use the version that takes Align instead")SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
{
980 return getMemcpy(Chain, dl, Dst, Src, Size, llvm::Align(Align), isVol,
981 AlwaysInline, isTailCall, DstPtrInfo, SrcPtrInfo);
982 }
983
984 SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
985 SDValue Size, Align Alignment, bool isVol,
986 bool AlwaysInline, bool isTailCall,
987 MachinePointerInfo DstPtrInfo,
988 MachinePointerInfo SrcPtrInfo);
989
990 LLVM_ATTRIBUTE_DEPRECATED(SDValue getMemmove(SDValue Chain, const SDLoc &dl,SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
991 SDValue Dst, SDValue Src,SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
992 SDValue Size, unsigned Align,SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
993 bool isVol, bool isTailCall,SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
994 MachinePointerInfo DstPtrInfo,SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
995 MachinePointerInfo SrcPtrInfo),SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
996 "Use the version that takes Align instead")SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
{
997 return getMemmove(Chain, dl, Dst, Src, Size, llvm::Align(Align), isVol,
998 isTailCall, DstPtrInfo, SrcPtrInfo);
999 }
1000 SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1001 SDValue Size, Align Alignment, bool isVol, bool isTailCall,
1002 MachinePointerInfo DstPtrInfo,
1003 MachinePointerInfo SrcPtrInfo);
1004
1005 LLVM_ATTRIBUTE_DEPRECATED(SDValue getMemset(SDValue Chain, const SDLoc &dl,SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo) __attribute__((deprecated
("Use the version that takes Align instead")))
1006 SDValue Dst, SDValue Src,SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo) __attribute__((deprecated
("Use the version that takes Align instead")))
1007 SDValue Size, unsigned Align,SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo) __attribute__((deprecated
("Use the version that takes Align instead")))
1008 bool isVol, bool isTailCall,SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo) __attribute__((deprecated
("Use the version that takes Align instead")))
1009 MachinePointerInfo DstPtrInfo),SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo) __attribute__((deprecated
("Use the version that takes Align instead")))
1010 "Use the version that takes Align instead")SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo) __attribute__((deprecated
("Use the version that takes Align instead")))
{
1011 return getMemset(Chain, dl, Dst, Src, Size, llvm::Align(Align), isVol,
1012 isTailCall, DstPtrInfo);
1013 }
1014 SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1015 SDValue Size, Align Alignment, bool isVol, bool isTailCall,
1016 MachinePointerInfo DstPtrInfo);
1017
1018 SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
1019 unsigned DstAlign, SDValue Src, unsigned SrcAlign,
1020 SDValue Size, Type *SizeTy, unsigned ElemSz,
1021 bool isTailCall, MachinePointerInfo DstPtrInfo,
1022 MachinePointerInfo SrcPtrInfo);
1023
1024 SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
1025 unsigned DstAlign, SDValue Src, unsigned SrcAlign,
1026 SDValue Size, Type *SizeTy, unsigned ElemSz,
1027 bool isTailCall, MachinePointerInfo DstPtrInfo,
1028 MachinePointerInfo SrcPtrInfo);
1029
1030 SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
1031 unsigned DstAlign, SDValue Value, SDValue Size,
1032 Type *SizeTy, unsigned ElemSz, bool isTailCall,
1033 MachinePointerInfo DstPtrInfo);
1034
1035 /// Helper function to make it easier to build SetCC's if you just have an
1036 /// ISD::CondCode instead of an SDValue.
1037 SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
1038 ISD::CondCode Cond, SDValue Chain = SDValue(),
1039 bool IsSignaling = false) {
1040 assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&((LHS.getValueType().isVector() == RHS.getValueType().isVector
() && "Cannot compare scalars to vectors") ? static_cast
<void> (0) : __assert_fail ("LHS.getValueType().isVector() == RHS.getValueType().isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1041, __PRETTY_FUNCTION__))
1041 "Cannot compare scalars to vectors")((LHS.getValueType().isVector() == RHS.getValueType().isVector
() && "Cannot compare scalars to vectors") ? static_cast
<void> (0) : __assert_fail ("LHS.getValueType().isVector() == RHS.getValueType().isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1041, __PRETTY_FUNCTION__))
;
1042 assert(LHS.getValueType().isVector() == VT.isVector() &&((LHS.getValueType().isVector() == VT.isVector() && "Cannot compare scalars to vectors"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType().isVector() == VT.isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1043, __PRETTY_FUNCTION__))
1043 "Cannot compare scalars to vectors")((LHS.getValueType().isVector() == VT.isVector() && "Cannot compare scalars to vectors"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType().isVector() == VT.isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1043, __PRETTY_FUNCTION__))
;
1044 assert(Cond != ISD::SETCC_INVALID &&((Cond != ISD::SETCC_INVALID && "Cannot create a setCC of an invalid node."
) ? static_cast<void> (0) : __assert_fail ("Cond != ISD::SETCC_INVALID && \"Cannot create a setCC of an invalid node.\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1045, __PRETTY_FUNCTION__))
1045 "Cannot create a setCC of an invalid node.")((Cond != ISD::SETCC_INVALID && "Cannot create a setCC of an invalid node."
) ? static_cast<void> (0) : __assert_fail ("Cond != ISD::SETCC_INVALID && \"Cannot create a setCC of an invalid node.\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1045, __PRETTY_FUNCTION__))
;
1046 if (Chain)
1047 return getNode(IsSignaling ? ISD::STRICT_FSETCCS : ISD::STRICT_FSETCC, DL,
1048 {VT, MVT::Other}, {Chain, LHS, RHS, getCondCode(Cond)});
1049 return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
1050 }
1051
1052 /// Helper function to make it easier to build Select's if you just have
1053 /// operands and don't want to check for vector.
1054 SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS,
1055 SDValue RHS) {
1056 assert(LHS.getValueType() == RHS.getValueType() &&((LHS.getValueType() == RHS.getValueType() && "Cannot use select on differing types"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Cannot use select on differing types\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1057, __PRETTY_FUNCTION__))
1057 "Cannot use select on differing types")((LHS.getValueType() == RHS.getValueType() && "Cannot use select on differing types"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Cannot use select on differing types\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1057, __PRETTY_FUNCTION__))
;
1058 assert(VT.isVector() == LHS.getValueType().isVector() &&((VT.isVector() == LHS.getValueType().isVector() && "Cannot mix vectors and scalars"
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() == LHS.getValueType().isVector() && \"Cannot mix vectors and scalars\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1059, __PRETTY_FUNCTION__))
1059 "Cannot mix vectors and scalars")((VT.isVector() == LHS.getValueType().isVector() && "Cannot mix vectors and scalars"
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() == LHS.getValueType().isVector() && \"Cannot mix vectors and scalars\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1059, __PRETTY_FUNCTION__))
;
1060 auto Opcode = Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
1061 return getNode(Opcode, DL, VT, Cond, LHS, RHS);
1062 }
1063
1064 /// Helper function to make it easier to build SelectCC's if you just have an
1065 /// ISD::CondCode instead of an SDValue.
1066 SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True,
1067 SDValue False, ISD::CondCode Cond) {
1068 return getNode(ISD::SELECT_CC, DL, True.getValueType(), LHS, RHS, True,
1069 False, getCondCode(Cond));
1070 }
1071
1072 /// Try to simplify a select/vselect into 1 of its operands or a constant.
1073 SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal);
1074
1075 /// Try to simplify a shift into 1 of its operands or a constant.
1076 SDValue simplifyShift(SDValue X, SDValue Y);
1077
1078 /// Try to simplify a floating-point binary operation into 1 of its operands
1079 /// or a constant.
1080 SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y);
1081
1082 /// VAArg produces a result and token chain, and takes a pointer
1083 /// and a source value as input.
1084 SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1085 SDValue SV, unsigned Align);
1086
1087 /// Gets a node for an atomic cmpxchg op. There are two
1088 /// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
1089 /// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
1090 /// a success flag (initially i1), and a chain.
1091 SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
1092 SDVTList VTs, SDValue Chain, SDValue Ptr,
1093 SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);
1094
1095 /// Gets a node for an atomic op, produces result (if relevant)
1096 /// and chain and takes 2 operands.
1097 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
1098 SDValue Ptr, SDValue Val, MachineMemOperand *MMO);
1099
1100 /// Gets a node for an atomic op, produces result and chain and
1101 /// takes 1 operand.
1102 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT,
1103 SDValue Chain, SDValue Ptr, MachineMemOperand *MMO);
1104
1105 /// Gets a node for an atomic op, produces result and chain and takes N
1106 /// operands.
1107 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
1108 SDVTList VTList, ArrayRef<SDValue> Ops,
1109 MachineMemOperand *MMO);
1110
1111 /// Creates a MemIntrinsicNode that may produce a
1112 /// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
1113 /// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
1114 /// less than FIRST_TARGET_MEMORY_OPCODE.
1115 SDValue getMemIntrinsicNode(
1116 unsigned Opcode, const SDLoc &dl, SDVTList VTList,
1117 ArrayRef<SDValue> Ops, EVT MemVT,
1118 MachinePointerInfo PtrInfo,
1119 unsigned Align = 0,
1120 MachineMemOperand::Flags Flags
1121 = MachineMemOperand::MOLoad | MachineMemOperand::MOStore,
1122 uint64_t Size = 0,
1123 const AAMDNodes &AAInfo = AAMDNodes());
1124
1125 SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList,
1126 ArrayRef<SDValue> Ops, EVT MemVT,
1127 MachineMemOperand *MMO);
1128
1129 /// Creates a LifetimeSDNode that starts (`IsStart==true`) or ends
1130 /// (`IsStart==false`) the lifetime of the portion of `FrameIndex` between
1131 /// offsets `Offset` and `Offset + Size`.
1132 SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain,
1133 int FrameIndex, int64_t Size, int64_t Offset = -1);
1134
1135 /// Create a MERGE_VALUES node from the given operands.
1136 SDValue getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl);
1137
1138 /// Loads are not normal binary operators: their result type is not
1139 /// determined by their operands, and they produce a value AND a token chain.
1140 ///
1141 /// This function will set the MOLoad flag on MMOFlags, but you can set it if
1142 /// you want. The MOStore flag must not be set.
1143 SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1144 MachinePointerInfo PtrInfo, unsigned Alignment = 0,
1145 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1146 const AAMDNodes &AAInfo = AAMDNodes(),
1147 const MDNode *Ranges = nullptr);
1148 SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1149 MachineMemOperand *MMO);
1150 SDValue
1151 getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
1152 SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
1153 unsigned Alignment = 0,
1154 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1155 const AAMDNodes &AAInfo = AAMDNodes());
1156 SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
1157 SDValue Chain, SDValue Ptr, EVT MemVT,
1158 MachineMemOperand *MMO);
1159 SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
1160 SDValue Offset, ISD::MemIndexedMode AM);
1161 SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1162 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1163 MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment = 0,
1164 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1165 const AAMDNodes &AAInfo = AAMDNodes(),
1166 const MDNode *Ranges = nullptr);
1167 SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1168 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1169 EVT MemVT, MachineMemOperand *MMO);
1170
1171 /// Helper function to build ISD::STORE nodes.
1172 ///
1173 /// This function will set the MOStore flag on MMOFlags, but you can set it if
1174 /// you want. The MOLoad and MOInvariant flags must not be set.
1175 SDValue
1176 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1177 MachinePointerInfo PtrInfo, unsigned Alignment = 0,
1178 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1179 const AAMDNodes &AAInfo = AAMDNodes());
1180 SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1181 MachineMemOperand *MMO);
1182 SDValue
1183 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1184 MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment = 0,
1185 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1186 const AAMDNodes &AAInfo = AAMDNodes());
1187 SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
1188 SDValue Ptr, EVT SVT, MachineMemOperand *MMO);
1189 SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base,
1190 SDValue Offset, ISD::MemIndexedMode AM);
1191
1192 SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base,
1193 SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT,
1194 MachineMemOperand *MMO, ISD::MemIndexedMode AM,
1195 ISD::LoadExtType, bool IsExpanding = false);
1196 SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
1197 SDValue Offset, ISD::MemIndexedMode AM);
1198 SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val,
1199 SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT,
1200 MachineMemOperand *MMO, ISD::MemIndexedMode AM,
1201 bool IsTruncating = false, bool IsCompressing = false);
1202 SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
1203 SDValue Base, SDValue Offset,
1204 ISD::MemIndexedMode AM);
1205 SDValue getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
1206 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
1207 ISD::MemIndexType IndexType);
1208 SDValue getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
1209 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
1210 ISD::MemIndexType IndexType);
1211
1212 /// Construct a node to track a Value* through the backend.
1213 SDValue getSrcValue(const Value *v);
1214
1215 /// Return an MDNodeSDNode which holds an MDNode.
1216 SDValue getMDNode(const MDNode *MD);
1217
1218 /// Return a bitcast using the SDLoc of the value operand, and casting to the
1219 /// provided type. Use getNode to set a custom SDLoc.
1220 SDValue getBitcast(EVT VT, SDValue V);
1221
1222 /// Return an AddrSpaceCastSDNode.
1223 SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS,
1224 unsigned DestAS);
1225
1226 /// Return the specified value casted to
1227 /// the target's desired shift amount type.
1228 SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op);
1229
1230 /// Expand the specified \c ISD::VAARG node as the Legalize pass would.
1231 SDValue expandVAArg(SDNode *Node);
1232
1233 /// Expand the specified \c ISD::VACOPY node as the Legalize pass would.
1234 SDValue expandVACopy(SDNode *Node);
1235
1236 /// Returs an GlobalAddress of the function from the current module with
1237 /// name matching the given ExternalSymbol. Additionally can provide the
1238 /// matched function.
1239 /// Panics the function doesn't exists.
1240 SDValue getSymbolFunctionGlobalAddress(SDValue Op,
1241 Function **TargetFunction = nullptr);
1242
1243 /// *Mutate* the specified node in-place to have the
1244 /// specified operands. If the resultant node already exists in the DAG,
1245 /// this does not modify the specified node, instead it returns the node that
1246 /// already exists. If the resultant node does not exist in the DAG, the
1247 /// input node is returned. As a degenerate case, if you specify the same
1248 /// input operands as the node already has, the input node is returned.
1249 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
1250 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
1251 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1252 SDValue Op3);
1253 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1254 SDValue Op3, SDValue Op4);
1255 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1256 SDValue Op3, SDValue Op4, SDValue Op5);
1257 SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
1258
1259 /// Creates a new TokenFactor containing \p Vals. If \p Vals contains 64k
1260 /// values or more, move values into new TokenFactors in 64k-1 blocks, until
1261 /// the final TokenFactor has less than 64k operands.
1262 SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl<SDValue> &Vals);
1263
1264 /// *Mutate* the specified machine node's memory references to the provided
1265 /// list.
1266 void setNodeMemRefs(MachineSDNode *N,
1267 ArrayRef<MachineMemOperand *> NewMemRefs);
1268
1269 // Propagates the change in divergence to users
1270 void updateDivergence(SDNode * N);
1271
1272 /// These are used for target selectors to *mutate* the
1273 /// specified node to have the specified return type, Target opcode, and
1274 /// operands. Note that target opcodes are stored as
1275 /// ~TargetOpcode in the node opcode field. The resultant node is returned.
1276 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT);
1277 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT, SDValue Op1);
1278 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1279 SDValue Op1, SDValue Op2);
1280 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1281 SDValue Op1, SDValue Op2, SDValue Op3);
1282 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1283 ArrayRef<SDValue> Ops);
1284 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1, EVT VT2);
1285 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1286 EVT VT2, ArrayRef<SDValue> Ops);
1287 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1288 EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
1289 SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
1290 EVT VT2, SDValue Op1);
1291 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1292 EVT VT2, SDValue Op1, SDValue Op2);
1293 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, SDVTList VTs,
1294 ArrayRef<SDValue> Ops);
1295
1296 /// This *mutates* the specified node to have the specified
1297 /// return type, opcode, and operands.
1298 SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
1299 ArrayRef<SDValue> Ops);
1300
1301 /// Mutate the specified strict FP node to its non-strict equivalent,
1302 /// unlinking the node from its chain and dropping the metadata arguments.
1303 /// The node must be a strict FP node.
1304 SDNode *mutateStrictFPToFP(SDNode *Node);
1305
1306 /// These are used for target selectors to create a new node
1307 /// with specified return type(s), MachineInstr opcode, and operands.
1308 ///
1309 /// Note that getMachineNode returns the resultant node. If there is already
1310 /// a node of the specified opcode and operands, it returns that node instead
1311 /// of the current one.
1312 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT);
1313 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1314 SDValue Op1);
1315 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1316 SDValue Op1, SDValue Op2);
1317 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1318 SDValue Op1, SDValue Op2, SDValue Op3);
1319 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1320 ArrayRef<SDValue> Ops);
1321 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1322 EVT VT2, SDValue Op1, SDValue Op2);
1323 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1324 EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
1325 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1326 EVT VT2, ArrayRef<SDValue> Ops);
1327 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1328 EVT VT2, EVT VT3, SDValue Op1, SDValue Op2);
1329 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1330 EVT VT2, EVT VT3, SDValue Op1, SDValue Op2,
1331 SDValue Op3);
1332 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1333 EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
1334 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl,
1335 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops);
1336 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, SDVTList VTs,
1337 ArrayRef<SDValue> Ops);
1338
1339 /// A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
1340 SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
1341 SDValue Operand);
1342
1343 /// A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
1344 SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
1345 SDValue Operand, SDValue Subreg);
1346
1347 /// Get the specified node if it's already available, or else return NULL.
1348 SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList, ArrayRef<SDValue> Ops,
1349 const SDNodeFlags Flags = SDNodeFlags());
1350
1351 /// Creates a SDDbgValue node.
1352 SDDbgValue *getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N,
1353 unsigned R, bool IsIndirect, const DebugLoc &DL,
1354 unsigned O);
1355
1356 /// Creates a constant SDDbgValue node.
1357 SDDbgValue *getConstantDbgValue(DIVariable *Var, DIExpression *Expr,
1358 const Value *C, const DebugLoc &DL,
1359 unsigned O);
1360
1361 /// Creates a FrameIndex SDDbgValue node.
1362 SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
1363 unsigned FI, bool IsIndirect,
1364 const DebugLoc &DL, unsigned O);
1365
1366 /// Creates a VReg SDDbgValue node.
1367 SDDbgValue *getVRegDbgValue(DIVariable *Var, DIExpression *Expr,
1368 unsigned VReg, bool IsIndirect,
1369 const DebugLoc &DL, unsigned O);
1370
1371 /// Creates a SDDbgLabel node.
1372 SDDbgLabel *getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O);
1373
1374 /// Transfer debug values from one node to another, while optionally
1375 /// generating fragment expressions for split-up values. If \p InvalidateDbg
1376 /// is set, debug values are invalidated after they are transferred.
1377 void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits = 0,
1378 unsigned SizeInBits = 0, bool InvalidateDbg = true);
1379
1380 /// Remove the specified node from the system. If any of its
1381 /// operands then becomes dead, remove them as well. Inform UpdateListener
1382 /// for each node deleted.
1383 void RemoveDeadNode(SDNode *N);
1384
1385 /// This method deletes the unreachable nodes in the
1386 /// given list, and any nodes that become unreachable as a result.
1387 void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);
1388
1389 /// Modify anything using 'From' to use 'To' instead.
1390 /// This can cause recursive merging of nodes in the DAG. Use the first
1391 /// version if 'From' is known to have a single result, use the second
1392 /// if you have two nodes with identical results (or if 'To' has a superset
1393 /// of the results of 'From'), use the third otherwise.
1394 ///
1395 /// These methods all take an optional UpdateListener, which (if not null) is
1396 /// informed about nodes that are deleted and modified due to recursive
1397 /// changes in the dag.
1398 ///
1399 /// These functions only replace all existing uses. It's possible that as
1400 /// these replacements are being performed, CSE may cause the From node
1401 /// to be given new uses. These new uses of From are left in place, and
1402 /// not automatically transferred to To.
1403 ///
1404 void ReplaceAllUsesWith(SDValue From, SDValue To);
1405 void ReplaceAllUsesWith(SDNode *From, SDNode *To);
1406 void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
1407
1408 /// Replace any uses of From with To, leaving
1409 /// uses of other values produced by From.getNode() alone.
1410 void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);
1411
1412 /// Like ReplaceAllUsesOfValueWith, but for multiple values at once.
1413 /// This correctly handles the case where
1414 /// there is an overlap between the From values and the To values.
1415 void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
1416 unsigned Num);
1417
1418 /// If an existing load has uses of its chain, create a token factor node with
1419 /// that chain and the new memory node's chain and update users of the old
1420 /// chain to the token factor. This ensures that the new memory node will have
1421 /// the same relative memory dependency position as the old load. Returns the
1422 /// new merged load chain.
1423 SDValue makeEquivalentMemoryOrdering(LoadSDNode *Old, SDValue New);
1424
1425 /// Topological-sort the AllNodes list and a
1426 /// assign a unique node id for each node in the DAG based on their
1427 /// topological order. Returns the number of nodes.
1428 unsigned AssignTopologicalOrder();
1429
1430 /// Move node N in the AllNodes list to be immediately
1431 /// before the given iterator Position. This may be used to update the
1432 /// topological ordering when the list of nodes is modified.
1433 void RepositionNode(allnodes_iterator Position, SDNode *N) {
1434 AllNodes.insert(Position, AllNodes.remove(N));
1435 }
1436
1437 /// Returns an APFloat semantics tag appropriate for the given type. If VT is
1438 /// a vector type, the element semantics are returned.
1439 static const fltSemantics &EVTToAPFloatSemantics(EVT VT) {
1440 switch (VT.getScalarType().getSimpleVT().SimpleTy) {
1441 default: llvm_unreachable("Unknown FP format")::llvm::llvm_unreachable_internal("Unknown FP format", "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1441)
;
1442 case MVT::f16: return APFloat::IEEEhalf();
1443 case MVT::f32: return APFloat::IEEEsingle();
1444 case MVT::f64: return APFloat::IEEEdouble();
1445 case MVT::f80: return APFloat::x87DoubleExtended();
1446 case MVT::f128: return APFloat::IEEEquad();
1447 case MVT::ppcf128: return APFloat::PPCDoubleDouble();
1448 }
1449 }
1450
1451 /// Add a dbg_value SDNode. If SD is non-null that means the
1452 /// value is produced by SD.
1453 void AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter);
1454
1455 /// Add a dbg_label SDNode.
1456 void AddDbgLabel(SDDbgLabel *DB);
1457
1458 /// Get the debug values which reference the given SDNode.
1459 ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) const {
1460 return DbgInfo->getSDDbgValues(SD);
1461 }
1462
1463public:
1464 /// Return true if there are any SDDbgValue nodes associated
1465 /// with this SelectionDAG.
1466 bool hasDebugValues() const { return !DbgInfo->empty(); }
1467
1468 SDDbgInfo::DbgIterator DbgBegin() const { return DbgInfo->DbgBegin(); }
1469 SDDbgInfo::DbgIterator DbgEnd() const { return DbgInfo->DbgEnd(); }
1470
1471 SDDbgInfo::DbgIterator ByvalParmDbgBegin() const {
1472 return DbgInfo->ByvalParmDbgBegin();
1473 }
1474 SDDbgInfo::DbgIterator ByvalParmDbgEnd() const {
1475 return DbgInfo->ByvalParmDbgEnd();
1476 }
1477
1478 SDDbgInfo::DbgLabelIterator DbgLabelBegin() const {
1479 return DbgInfo->DbgLabelBegin();
1480 }
1481 SDDbgInfo::DbgLabelIterator DbgLabelEnd() const {
1482 return DbgInfo->DbgLabelEnd();
1483 }
1484
1485 /// To be invoked on an SDNode that is slated to be erased. This
1486 /// function mirrors \c llvm::salvageDebugInfo.
1487 void salvageDebugInfo(SDNode &N);
1488
1489 void dump() const;
1490
1491 /// Create a stack temporary, suitable for holding the specified value type.
1492 /// If minAlign is specified, the slot size will have at least that alignment.
1493 SDValue CreateStackTemporary(EVT VT, unsigned minAlign = 1);
1494
1495 /// Create a stack temporary suitable for holding either of the specified
1496 /// value types.
1497 SDValue CreateStackTemporary(EVT VT1, EVT VT2);
1498
1499 SDValue FoldSymbolOffset(unsigned Opcode, EVT VT,
1500 const GlobalAddressSDNode *GA,
1501 const SDNode *N2);
1502
1503 SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
1504 ArrayRef<SDValue> Ops);
1505
1506 SDValue FoldConstantVectorArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
1507 ArrayRef<SDValue> Ops,
1508 const SDNodeFlags Flags = SDNodeFlags());
1509
1510 /// Fold floating-point operations with 2 operands when both operands are
1511 /// constants and/or undefined.
1512 SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT,
1513 SDValue N1, SDValue N2);
1514
1515 /// Constant fold a setcc to true or false.
1516 SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond,
1517 const SDLoc &dl);
1518
1519 /// See if the specified operand can be simplified with the knowledge that
1520 /// only the bits specified by DemandedBits are used. If so, return the
1521 /// simpler operand, otherwise return a null SDValue.
1522 ///
1523 /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
1524 /// simplify nodes with multiple uses more aggressively.)
1525 SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits);
1526
1527 /// See if the specified operand can be simplified with the knowledge that
1528 /// only the bits specified by DemandedBits are used in the elements specified
1529 /// by DemandedElts. If so, return the simpler operand, otherwise return a
1530 /// null SDValue.
1531 ///
1532 /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
1533 /// simplify nodes with multiple uses more aggressively.)
1534 SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits,
1535 const APInt &DemandedElts);
1536
1537 /// Return true if the sign bit of Op is known to be zero.
1538 /// We use this predicate to simplify operations downstream.
1539 bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;
1540
1541 /// Return true if 'Op & Mask' is known to be zero. We
1542 /// use this predicate to simplify operations downstream. Op and Mask are
1543 /// known to be the same type.
1544 bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
1545 unsigned Depth = 0) const;
1546
1547 /// Return true if 'Op & Mask' is known to be zero in DemandedElts. We
1548 /// use this predicate to simplify operations downstream. Op and Mask are
1549 /// known to be the same type.
1550 bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
1551 const APInt &DemandedElts, unsigned Depth = 0) const;
1552
1553 /// Return true if '(Op & Mask) == Mask'.
1554 /// Op and Mask are known to be the same type.
1555 bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask,
1556 unsigned Depth = 0) const;
1557
1558 /// Determine which bits of Op are known to be either zero or one and return
1559 /// them in Known. For vectors, the known bits are those that are shared by
1560 /// every vector element.
1561 /// Targets can implement the computeKnownBitsForTargetNode method in the
1562 /// TargetLowering class to allow target nodes to be understood.
1563 KnownBits computeKnownBits(SDValue Op, unsigned Depth = 0) const;
1564
1565 /// Determine which bits of Op are known to be either zero or one and return
1566 /// them in Known. The DemandedElts argument allows us to only collect the
1567 /// known bits that are shared by the requested vector elements.
1568 /// Targets can implement the computeKnownBitsForTargetNode method in the
1569 /// TargetLowering class to allow target nodes to be understood.
1570 KnownBits computeKnownBits(SDValue Op, const APInt &DemandedElts,
1571 unsigned Depth = 0) const;
1572
1573 /// Used to represent the possible overflow behavior of an operation.
1574 /// Never: the operation cannot overflow.
1575 /// Always: the operation will always overflow.
1576 /// Sometime: the operation may or may not overflow.
1577 enum OverflowKind {
1578 OFK_Never,
1579 OFK_Sometime,
1580 OFK_Always,
1581 };
1582
1583 /// Determine if the result of the addition of 2 node can overflow.
1584 OverflowKind computeOverflowKind(SDValue N0, SDValue N1) const;
1585
1586 /// Test if the given value is known to have exactly one bit set. This differs
1587 /// from computeKnownBits in that it doesn't necessarily determine which bit
1588 /// is set.
1589 bool isKnownToBeAPowerOfTwo(SDValue Val) const;
1590
1591 /// Return the number of times the sign bit of the register is replicated into
1592 /// the other bits. We know that at least 1 bit is always equal to the sign
1593 /// bit (itself), but other cases can give us information. For example,
1594 /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
1595 /// to each other, so we return 3. Targets can implement the
1596 /// ComputeNumSignBitsForTarget method in the TargetLowering class to allow
1597 /// target nodes to be understood.
1598 unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;
1599
1600 /// Return the number of times the sign bit of the register is replicated into
1601 /// the other bits. We know that at least 1 bit is always equal to the sign
1602 /// bit (itself), but other cases can give us information. For example,
1603 /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
1604 /// to each other, so we return 3. The DemandedElts argument allows
1605 /// us to only collect the minimum sign bits of the requested vector elements.
1606 /// Targets can implement the ComputeNumSignBitsForTarget method in the
1607 /// TargetLowering class to allow target nodes to be understood.
1608 unsigned ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
1609 unsigned Depth = 0) const;
1610
1611 /// Return true if the specified operand is an ISD::ADD with a ConstantSDNode
1612 /// on the right-hand side, or if it is an ISD::OR with a ConstantSDNode that
1613 /// is guaranteed to have the same semantics as an ADD. This handles the
1614 /// equivalence:
1615 /// X|Cst == X+Cst iff X&Cst = 0.
1616 bool isBaseWithConstantOffset(SDValue Op) const;
1617
1618 /// Test whether the given SDValue is known to never be NaN. If \p SNaN is
1619 /// true, returns if \p Op is known to never be a signaling NaN (it may still
1620 /// be a qNaN).
1621 bool isKnownNeverNaN(SDValue Op, bool SNaN = false, unsigned Depth = 0) const;
1622
1623 /// \returns true if \p Op is known to never be a signaling NaN.
1624 bool isKnownNeverSNaN(SDValue Op, unsigned Depth = 0) const {
1625 return isKnownNeverNaN(Op, true, Depth);
1626 }
1627
1628 /// Test whether the given floating point SDValue is known to never be
1629 /// positive or negative zero.
1630 bool isKnownNeverZeroFloat(SDValue Op) const;
1631
1632 /// Test whether the given SDValue is known to contain non-zero value(s).
1633 bool isKnownNeverZero(SDValue Op) const;
1634
1635 /// Test whether two SDValues are known to compare equal. This
1636 /// is true if they are the same value, or if one is negative zero and the
1637 /// other positive zero.
1638 bool isEqualTo(SDValue A, SDValue B) const;
1639
1640 /// Return true if A and B have no common bits set. As an example, this can
1641 /// allow an 'add' to be transformed into an 'or'.
1642 bool haveNoCommonBitsSet(SDValue A, SDValue B) const;
1643
1644 /// Test whether \p V has a splatted value for all the demanded elements.
1645 ///
1646 /// On success \p UndefElts will indicate the elements that have UNDEF
1647 /// values instead of the splat value, this is only guaranteed to be correct
1648 /// for \p DemandedElts.
1649 ///
1650 /// NOTE: The function will return true for a demanded splat of UNDEF values.
1651 bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts);
1652
1653 /// Test whether \p V has a splatted value.
1654 bool isSplatValue(SDValue V, bool AllowUndefs = false);
1655
1656 /// If V is a splatted value, return the source vector and its splat index.
1657 SDValue getSplatSourceVector(SDValue V, int &SplatIndex);
1658
1659 /// If V is a splat vector, return its scalar source operand by extracting
1660 /// that element from the source vector.
1661 SDValue getSplatValue(SDValue V);
1662
1663 /// If a SHL/SRA/SRL node \p V has a constant or splat constant shift amount
1664 /// that is less than the element bit-width of the shift node, return it.
1665 const APInt *getValidShiftAmountConstant(SDValue V,
1666 const APInt &DemandedElts) const;
1667
1668 /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
1669 /// than the element bit-width of the shift node, return the minimum value.
1670 const APInt *
1671 getValidMinimumShiftAmountConstant(SDValue V,
1672 const APInt &DemandedElts) const;
1673
1674 /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
1675 /// than the element bit-width of the shift node, return the maximum value.
1676 const APInt *
1677 getValidMaximumShiftAmountConstant(SDValue V,
1678 const APInt &DemandedElts) const;
1679
1680 /// Match a binop + shuffle pyramid that represents a horizontal reduction
1681 /// over the elements of a vector starting from the EXTRACT_VECTOR_ELT node /p
1682 /// Extract. The reduction must use one of the opcodes listed in /p
1683 /// CandidateBinOps and on success /p BinOp will contain the matching opcode.
1684 /// Returns the vector that is being reduced on, or SDValue() if a reduction
1685 /// was not matched. If \p AllowPartials is set then in the case of a
1686 /// reduction pattern that only matches the first few stages, the extracted
1687 /// subvector of the start of the reduction is returned.
1688 SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
1689 ArrayRef<ISD::NodeType> CandidateBinOps,
1690 bool AllowPartials = false);
1691
1692 /// Utility function used by legalize and lowering to
1693 /// "unroll" a vector operation by splitting out the scalars and operating
1694 /// on each element individually. If the ResNE is 0, fully unroll the vector
1695 /// op. If ResNE is less than the width of the vector op, unroll up to ResNE.
1696 /// If the ResNE is greater than the width of the vector op, unroll the
1697 /// vector op and fill the end of the resulting vector with UNDEFS.
1698 SDValue UnrollVectorOp(SDNode *N, unsigned ResNE = 0);
1699
1700 /// Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
1701 /// This is a separate function because those opcodes have two results.
1702 std::pair<SDValue, SDValue> UnrollVectorOverflowOp(SDNode *N,
1703 unsigned ResNE = 0);
1704
1705 /// Return true if loads are next to each other and can be
1706 /// merged. Check that both are nonvolatile and if LD is loading
1707 /// 'Bytes' bytes from a location that is 'Dist' units away from the
1708 /// location that the 'Base' load is loading from.
1709 bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base,
1710 unsigned Bytes, int Dist) const;
1711
1712 /// Infer alignment of a load / store address. Return 0 if
1713 /// it cannot be inferred.
1714 unsigned InferPtrAlignment(SDValue Ptr) const;
1715
1716 /// Compute the VTs needed for the low/hi parts of a type
1717 /// which is split (or expanded) into two not necessarily identical pieces.
1718 std::pair<EVT, EVT> GetSplitDestVTs(const EVT &VT) const;
1719
1720 /// Split the vector with EXTRACT_SUBVECTOR using the provides
1721 /// VTs and return the low/high part.
1722 std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL,
1723 const EVT &LoVT, const EVT &HiVT);
1724
1725 /// Split the vector with EXTRACT_SUBVECTOR and return the low/high part.
1726 std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL) {
1727 EVT LoVT, HiVT;
1728 std::tie(LoVT, HiVT) = GetSplitDestVTs(N.getValueType());
1729 return SplitVector(N, DL, LoVT, HiVT);
1730 }
1731
1732 /// Split the node's operand with EXTRACT_SUBVECTOR and
1733 /// return the low/high part.
1734 std::pair<SDValue, SDValue> SplitVectorOperand(const SDNode *N, unsigned OpNo)
1735 {
1736 return SplitVector(N->getOperand(OpNo), SDLoc(N));
1737 }
1738
1739 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
1740 SDValue WidenVector(const SDValue &N, const SDLoc &DL);
1741
1742 /// Append the extracted elements from Start to Count out of the vector Op
1743 /// in Args. If Count is 0, all of the elements will be extracted.
1744 void ExtractVectorElements(SDValue Op, SmallVectorImpl<SDValue> &Args,
1745 unsigned Start = 0, unsigned Count = 0);
1746
1747 /// Compute the default alignment value for the given type.
1748 unsigned getEVTAlignment(EVT MemoryVT) const;
1749
1750 /// Test whether the given value is a constant int or similar node.
1751 SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N);
1752
1753 /// Test whether the given value is a constant FP or similar node.
1754 SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N);
1755
1756 /// \returns true if \p N is any kind of constant or build_vector of
1757 /// constants, int or float. If a vector, it may not necessarily be a splat.
1758 inline bool isConstantValueOfAnyType(SDValue N) {
1759 return isConstantIntBuildVectorOrConstantInt(N) ||
1760 isConstantFPBuildVectorOrConstantFP(N);
1761 }
1762
1763 void addCallSiteInfo(const SDNode *CallNode, CallSiteInfoImpl &&CallInfo) {
1764 SDCallSiteDbgInfo[CallNode].CSInfo = std::move(CallInfo);
1765 }
1766
1767 CallSiteInfo getSDCallSiteInfo(const SDNode *CallNode) {
1768 auto I = SDCallSiteDbgInfo.find(CallNode);
1769 if (I != SDCallSiteDbgInfo.end())
1770 return std::move(I->second).CSInfo;
1771 return CallSiteInfo();
1772 }
1773
1774 void addHeapAllocSite(const SDNode *Node, MDNode *MD) {
1775 SDCallSiteDbgInfo[Node].HeapAllocSite = MD;
1776 }
1777
1778 /// Return the HeapAllocSite type associated with the SDNode, if it exists.
1779 MDNode *getHeapAllocSite(const SDNode *Node) {
1780 auto It = SDCallSiteDbgInfo.find(Node);
1781 if (It == SDCallSiteDbgInfo.end())
1782 return nullptr;
1783 return It->second.HeapAllocSite;
1784 }
1785
1786 /// Return the current function's default denormal handling kind for the given
1787 /// floating point type.
1788 DenormalMode getDenormalMode(EVT VT) const {
1789 return MF->getDenormalMode(EVTToAPFloatSemantics(VT));
1790 }
1791
1792 bool shouldOptForSize() const;
1793
1794private:
1795 void InsertNode(SDNode *N);
1796 bool RemoveNodeFromCSEMaps(SDNode *N);
1797 void AddModifiedNodeToCSEMaps(SDNode *N);
1798 SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
1799 SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
1800 void *&InsertPos);
1801 SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
1802 void *&InsertPos);
1803 SDNode *UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &loc);
1804
1805 void DeleteNodeNotInCSEMaps(SDNode *N);
1806 void DeallocateNode(SDNode *N);
1807
1808 void allnodes_clear();
1809
1810 /// Look up the node specified by ID in CSEMap. If it exists, return it. If
1811 /// not, return the insertion token that will make insertion faster. This
1812 /// overload is for nodes other than Constant or ConstantFP, use the other one
1813 /// for those.
1814 SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
1815
1816 /// Look up the node specified by ID in CSEMap. If it exists, return it. If
1817 /// not, return the insertion token that will make insertion faster. Performs
1818 /// additional processing for constant nodes.
1819 SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, const SDLoc &DL,
1820 void *&InsertPos);
1821
1822 /// List of non-single value types.
1823 FoldingSet<SDVTListNode> VTListMap;
1824
1825 /// Maps to auto-CSE operations.
1826 std::vector<CondCodeSDNode*> CondCodeNodes;
1827
1828 std::vector<SDNode*> ValueTypeNodes;
1829 std::map<EVT, SDNode*, EVT::compareRawBits> ExtendedValueTypeNodes;
1830 StringMap<SDNode*> ExternalSymbols;
1831
1832 std::map<std::pair<std::string, unsigned>, SDNode *> TargetExternalSymbols;
1833 DenseMap<MCSymbol *, SDNode *> MCSymbols;
1834};
1835
1836template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
1837 using nodes_iterator = pointer_iterator<SelectionDAG::allnodes_iterator>;
1838
1839 static nodes_iterator nodes_begin(SelectionDAG *G) {
1840 return nodes_iterator(G->allnodes_begin());
1841 }
1842
1843 static nodes_iterator nodes_end(SelectionDAG *G) {
1844 return nodes_iterator(G->allnodes_end());
1845 }
1846};
1847
1848} // end namespace llvm
1849
1850#endif // LLVM_CODEGEN_SELECTIONDAG_H

/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/ValueTypes.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DebugLoc.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/Metadata.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/Support/AlignOf.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MachineValueType.h"
45#include "llvm/Support/TypeSize.h"
46#include <algorithm>
47#include <cassert>
48#include <climits>
49#include <cstddef>
50#include <cstdint>
51#include <cstring>
52#include <iterator>
53#include <string>
54#include <tuple>
55
56namespace llvm {
57
58class APInt;
59class Constant;
60template <typename T> struct DenseMapInfo;
61class GlobalValue;
62class MachineBasicBlock;
63class MachineConstantPoolValue;
64class MCSymbol;
65class raw_ostream;
66class SDNode;
67class SelectionDAG;
68class Type;
69class Value;
70
71void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
72 bool force = false);
73
74/// This represents a list of ValueType's that has been intern'd by
75/// a SelectionDAG. Instances of this simple value class are returned by
76/// SelectionDAG::getVTList(...).
77///
78struct SDVTList {
79 const EVT *VTs;
80 unsigned int NumVTs;
81};
82
83namespace ISD {
84
85 /// Node predicates
86
87 /// If N is a BUILD_VECTOR node whose elements are all the same constant or
88 /// undefined, return true and return the constant value in \p SplatValue.
89 bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
90
91 /// Return true if the specified node is a BUILD_VECTOR where all of the
92 /// elements are ~0 or undef.
93 bool isBuildVectorAllOnes(const SDNode *N);
94
95 /// Return true if the specified node is a BUILD_VECTOR where all of the
96 /// elements are 0 or undef.
97 bool isBuildVectorAllZeros(const SDNode *N);
98
99 /// Return true if the specified node is a BUILD_VECTOR node of all
100 /// ConstantSDNode or undef.
101 bool isBuildVectorOfConstantSDNodes(const SDNode *N);
102
103 /// Return true if the specified node is a BUILD_VECTOR node of all
104 /// ConstantFPSDNode or undef.
105 bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
106
107 /// Return true if the node has at least one operand and all operands of the
108 /// specified node are ISD::UNDEF.
109 bool allOperandsUndef(const SDNode *N);
110
111} // end namespace ISD
112
113//===----------------------------------------------------------------------===//
114/// Unlike LLVM values, Selection DAG nodes may return multiple
115/// values as the result of a computation. Many nodes return multiple values,
116/// from loads (which define a token and a return value) to ADDC (which returns
117/// a result and a carry value), to calls (which may return an arbitrary number
118/// of values).
119///
120/// As such, each use of a SelectionDAG computation must indicate the node that
121/// computes it as well as which return value to use from that node. This pair
122/// of information is represented with the SDValue value type.
123///
124class SDValue {
125 friend struct DenseMapInfo<SDValue>;
126
127 SDNode *Node = nullptr; // The node defining the value we are using.
128 unsigned ResNo = 0; // Which return value of the node we are using.
129
130public:
131 SDValue() = default;
132 SDValue(SDNode *node, unsigned resno);
133
134 /// get the index which selects a specific result in the SDNode
135 unsigned getResNo() const { return ResNo; }
136
137 /// get the SDNode which holds the desired result
138 SDNode *getNode() const { return Node; }
139
140 /// set the SDNode
141 void setNode(SDNode *N) { Node = N; }
142
143 inline SDNode *operator->() const { return Node; }
144
145 bool operator==(const SDValue &O) const {
146 return Node == O.Node && ResNo == O.ResNo;
147 }
148 bool operator!=(const SDValue &O) const {
149 return !operator==(O);
150 }
151 bool operator<(const SDValue &O) const {
152 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
153 }
154 explicit operator bool() const {
155 return Node != nullptr;
156 }
157
158 SDValue getValue(unsigned R) const {
159 return SDValue(Node, R);
160 }
161
162 /// Return true if this node is an operand of N.
163 bool isOperandOf(const SDNode *N) const;
164
165 /// Return the ValueType of the referenced return value.
166 inline EVT getValueType() const;
167
168 /// Return the simple ValueType of the referenced return value.
169 MVT getSimpleValueType() const {
170 return getValueType().getSimpleVT();
171 }
172
173 /// Returns the size of the value in bits.
174 ///
175 /// If the value type is a scalable vector type, the scalable property will
176 /// be set and the runtime size will be a positive integer multiple of the
177 /// base size.
178 TypeSize getValueSizeInBits() const {
179 return getValueType().getSizeInBits();
180 }
181
182 TypeSize getScalarValueSizeInBits() const {
183 return getValueType().getScalarType().getSizeInBits();
184 }
185
186 // Forwarding methods - These forward to the corresponding methods in SDNode.
187 inline unsigned getOpcode() const;
188 inline unsigned getNumOperands() const;
189 inline const SDValue &getOperand(unsigned i) const;
190 inline uint64_t getConstantOperandVal(unsigned i) const;
191 inline const APInt &getConstantOperandAPInt(unsigned i) const;
192 inline bool isTargetMemoryOpcode() const;
193 inline bool isTargetOpcode() const;
194 inline bool isMachineOpcode() const;
195 inline bool isUndef() const;
196 inline unsigned getMachineOpcode() const;
197 inline const DebugLoc &getDebugLoc() const;
198 inline void dump() const;
199 inline void dump(const SelectionDAG *G) const;
200 inline void dumpr() const;
201 inline void dumpr(const SelectionDAG *G) const;
202
203 /// Return true if this operand (which must be a chain) reaches the
204 /// specified operand without crossing any side-effecting instructions.
205 /// In practice, this looks through token factors and non-volatile loads.
206 /// In order to remain efficient, this only
207 /// looks a couple of nodes in, it does not do an exhaustive search.
208 bool reachesChainWithoutSideEffects(SDValue Dest,
209 unsigned Depth = 2) const;
210
211 /// Return true if there are no nodes using value ResNo of Node.
212 inline bool use_empty() const;
213
214 /// Return true if there is exactly one node using value ResNo of Node.
215 inline bool hasOneUse() const;
216};
217
218template<> struct DenseMapInfo<SDValue> {
219 static inline SDValue getEmptyKey() {
220 SDValue V;
221 V.ResNo = -1U;
222 return V;
223 }
224
225 static inline SDValue getTombstoneKey() {
226 SDValue V;
227 V.ResNo = -2U;
228 return V;
229 }
230
231 static unsigned getHashValue(const SDValue &Val) {
232 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
233 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
234 }
235
236 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
237 return LHS == RHS;
238 }
239};
240
241/// Allow casting operators to work directly on
242/// SDValues as if they were SDNode*'s.
243template<> struct simplify_type<SDValue> {
244 using SimpleType = SDNode *;
245
246 static SimpleType getSimplifiedValue(SDValue &Val) {
247 return Val.getNode();
248 }
249};
250template<> struct simplify_type<const SDValue> {
251 using SimpleType = /*const*/ SDNode *;
252
253 static SimpleType getSimplifiedValue(const SDValue &Val) {
254 return Val.getNode();
255 }
256};
257
258/// Represents a use of a SDNode. This class holds an SDValue,
259/// which records the SDNode being used and the result number, a
260/// pointer to the SDNode using the value, and Next and Prev pointers,
261/// which link together all the uses of an SDNode.
262///
263class SDUse {
264 /// Val - The value being used.
265 SDValue Val;
266 /// User - The user of this value.
267 SDNode *User = nullptr;
268 /// Prev, Next - Pointers to the uses list of the SDNode referred by
269 /// this operand.
270 SDUse **Prev = nullptr;
271 SDUse *Next = nullptr;
272
273public:
274 SDUse() = default;
275 SDUse(const SDUse &U) = delete;
276 SDUse &operator=(const SDUse &) = delete;
277
278 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
279 operator const SDValue&() const { return Val; }
280
281 /// If implicit conversion to SDValue doesn't work, the get() method returns
282 /// the SDValue.
283 const SDValue &get() const { return Val; }
284
285 /// This returns the SDNode that contains this Use.
286 SDNode *getUser() { return User; }
287
288 /// Get the next SDUse in the use list.
289 SDUse *getNext() const { return Next; }
290
291 /// Convenience function for get().getNode().
292 SDNode *getNode() const { return Val.getNode(); }
293 /// Convenience function for get().getResNo().
294 unsigned getResNo() const { return Val.getResNo(); }
295 /// Convenience function for get().getValueType().
296 EVT getValueType() const { return Val.getValueType(); }
297
298 /// Convenience function for get().operator==
299 bool operator==(const SDValue &V) const {
300 return Val == V;
301 }
302
303 /// Convenience function for get().operator!=
304 bool operator!=(const SDValue &V) const {
305 return Val != V;
306 }
307
308 /// Convenience function for get().operator<
309 bool operator<(const SDValue &V) const {
310 return Val < V;
311 }
312
313private:
314 friend class SelectionDAG;
315 friend class SDNode;
316 // TODO: unfriend HandleSDNode once we fix its operand handling.
317 friend class HandleSDNode;
318
319 void setUser(SDNode *p) { User = p; }
320
321 /// Remove this use from its existing use list, assign it the
322 /// given value, and add it to the new value's node's use list.
323 inline void set(const SDValue &V);
324 /// Like set, but only supports initializing a newly-allocated
325 /// SDUse with a non-null value.
326 inline void setInitial(const SDValue &V);
327 /// Like set, but only sets the Node portion of the value,
328 /// leaving the ResNo portion unmodified.
329 inline void setNode(SDNode *N);
330
331 void addToList(SDUse **List) {
332 Next = *List;
333 if (Next) Next->Prev = &Next;
334 Prev = List;
335 *List = this;
336 }
337
338 void removeFromList() {
339 *Prev = Next;
340 if (Next) Next->Prev = Prev;
341 }
342};
343
344/// simplify_type specializations - Allow casting operators to work directly on
345/// SDValues as if they were SDNode*'s.
346template<> struct simplify_type<SDUse> {
347 using SimpleType = SDNode *;
348
349 static SimpleType getSimplifiedValue(SDUse &Val) {
350 return Val.getNode();
351 }
352};
353
354/// These are IR-level optimization flags that may be propagated to SDNodes.
355/// TODO: This data structure should be shared by the IR optimizer and the
356/// the backend.
357struct SDNodeFlags {
358private:
359 // This bit is used to determine if the flags are in a defined state.
360 // Flag bits can only be masked out during intersection if the masking flags
361 // are defined.
362 bool AnyDefined : 1;
363
364 bool NoUnsignedWrap : 1;
365 bool NoSignedWrap : 1;
366 bool Exact : 1;
367 bool NoNaNs : 1;
368 bool NoInfs : 1;
369 bool NoSignedZeros : 1;
370 bool AllowReciprocal : 1;
371 bool VectorReduction : 1;
372 bool AllowContract : 1;
373 bool ApproximateFuncs : 1;
374 bool AllowReassociation : 1;
375
376 // We assume instructions do not raise floating-point exceptions by default,
377 // and only those marked explicitly may do so. We could choose to represent
378 // this via a positive "FPExcept" flags like on the MI level, but having a
379 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
380 // intersection logic more straightforward.
381 bool NoFPExcept : 1;
382
383public:
384 /// Default constructor turns off all optimization flags.
385 SDNodeFlags()
386 : AnyDefined(false), NoUnsignedWrap(false), NoSignedWrap(false),
387 Exact(false), NoNaNs(false), NoInfs(false),
388 NoSignedZeros(false), AllowReciprocal(false), VectorReduction(false),
389 AllowContract(false), ApproximateFuncs(false),
390 AllowReassociation(false), NoFPExcept(false) {}
391
392 /// Propagate the fast-math-flags from an IR FPMathOperator.
393 void copyFMF(const FPMathOperator &FPMO) {
394 setNoNaNs(FPMO.hasNoNaNs());
395 setNoInfs(FPMO.hasNoInfs());
396 setNoSignedZeros(FPMO.hasNoSignedZeros());
397 setAllowReciprocal(FPMO.hasAllowReciprocal());
398 setAllowContract(FPMO.hasAllowContract());
399 setApproximateFuncs(FPMO.hasApproxFunc());
400 setAllowReassociation(FPMO.hasAllowReassoc());
401 }
402
403 /// Sets the state of the flags to the defined state.
404 void setDefined() { AnyDefined = true; }
405 /// Returns true if the flags are in a defined state.
406 bool isDefined() const { return AnyDefined; }
407
408 // These are mutators for each flag.
409 void setNoUnsignedWrap(bool b) {
410 setDefined();
411 NoUnsignedWrap = b;
412 }
413 void setNoSignedWrap(bool b) {
414 setDefined();
415 NoSignedWrap = b;
416 }
417 void setExact(bool b) {
418 setDefined();
419 Exact = b;
420 }
421 void setNoNaNs(bool b) {
422 setDefined();
423 NoNaNs = b;
424 }
425 void setNoInfs(bool b) {
426 setDefined();
427 NoInfs = b;
428 }
429 void setNoSignedZeros(bool b) {
430 setDefined();
431 NoSignedZeros = b;
432 }
433 void setAllowReciprocal(bool b) {
434 setDefined();
435 AllowReciprocal = b;
436 }
437 void setVectorReduction(bool b) {
438 setDefined();
439 VectorReduction = b;
440 }
441 void setAllowContract(bool b) {
442 setDefined();
443 AllowContract = b;
444 }
445 void setApproximateFuncs(bool b) {
446 setDefined();
447 ApproximateFuncs = b;
448 }
449 void setAllowReassociation(bool b) {
450 setDefined();
451 AllowReassociation = b;
452 }
453 void setNoFPExcept(bool b) {
454 setDefined();
455 NoFPExcept = b;
456 }
457
458 // These are accessors for each flag.
459 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
460 bool hasNoSignedWrap() const { return NoSignedWrap; }
461 bool hasExact() const { return Exact; }
462 bool hasNoNaNs() const { return NoNaNs; }
463 bool hasNoInfs() const { return NoInfs; }
464 bool hasNoSignedZeros() const { return NoSignedZeros; }
465 bool hasAllowReciprocal() const { return AllowReciprocal; }
466 bool hasVectorReduction() const { return VectorReduction; }
467 bool hasAllowContract() const { return AllowContract; }
468 bool hasApproximateFuncs() const { return ApproximateFuncs; }
469 bool hasAllowReassociation() const { return AllowReassociation; }
470 bool hasNoFPExcept() const { return NoFPExcept; }
471
472 /// Clear any flags in this flag set that aren't also set in Flags.
473 /// If the given Flags are undefined then don't do anything.
474 void intersectWith(const SDNodeFlags Flags) {
475 if (!Flags.isDefined())
476 return;
477 NoUnsignedWrap &= Flags.NoUnsignedWrap;
478 NoSignedWrap &= Flags.NoSignedWrap;
479 Exact &= Flags.Exact;
480 NoNaNs &= Flags.NoNaNs;
481 NoInfs &= Flags.NoInfs;
482 NoSignedZeros &= Flags.NoSignedZeros;
483 AllowReciprocal &= Flags.AllowReciprocal;
484 VectorReduction &= Flags.VectorReduction;
485 AllowContract &= Flags.AllowContract;
486 ApproximateFuncs &= Flags.ApproximateFuncs;
487 AllowReassociation &= Flags.AllowReassociation;
488 NoFPExcept &= Flags.NoFPExcept;
489 }
490};
491
492/// Represents one node in the SelectionDAG.
493///
494class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
495private:
496 /// The operation that this node performs.
497 int16_t NodeType;
498
499protected:
500 // We define a set of mini-helper classes to help us interpret the bits in our
501 // SubclassData. These are designed to fit within a uint16_t so they pack
502 // with NodeType.
503
504#if defined(_AIX) && (!defined(__GNUC__4) || defined(__ibmxl__))
505// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
506// and give the `pack` pragma push semantics.
507#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
508#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
509#else
510#define BEGIN_TWO_BYTE_PACK()
511#define END_TWO_BYTE_PACK()
512#endif
513
514BEGIN_TWO_BYTE_PACK()
515 class SDNodeBitfields {
516 friend class SDNode;
517 friend class MemIntrinsicSDNode;
518 friend class MemSDNode;
519 friend class SelectionDAG;
520
521 uint16_t HasDebugValue : 1;
522 uint16_t IsMemIntrinsic : 1;
523 uint16_t IsDivergent : 1;
524 };
525 enum { NumSDNodeBits = 3 };
526
527 class ConstantSDNodeBitfields {
528 friend class ConstantSDNode;
529
530 uint16_t : NumSDNodeBits;
531
532 uint16_t IsOpaque : 1;
533 };
534
535 class MemSDNodeBitfields {
536 friend class MemSDNode;
537 friend class MemIntrinsicSDNode;
538 friend class AtomicSDNode;
539
540 uint16_t : NumSDNodeBits;
541
542 uint16_t IsVolatile : 1;
543 uint16_t IsNonTemporal : 1;
544 uint16_t IsDereferenceable : 1;
545 uint16_t IsInvariant : 1;
546 };
547 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
548
549 class LSBaseSDNodeBitfields {
550 friend class LSBaseSDNode;
551 friend class MaskedLoadStoreSDNode;
552 friend class MaskedGatherScatterSDNode;
553
554 uint16_t : NumMemSDNodeBits;
555
556 // This storage is shared between disparate class hierarchies to hold an
557 // enumeration specific to the class hierarchy in use.
558 // LSBaseSDNode => enum ISD::MemIndexedMode
559 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
560 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
561 uint16_t AddressingMode : 3;
562 };
563 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
564
565 class LoadSDNodeBitfields {
566 friend class LoadSDNode;
567 friend class MaskedLoadSDNode;
568
569 uint16_t : NumLSBaseSDNodeBits;
570
571 uint16_t ExtTy : 2; // enum ISD::LoadExtType
572 uint16_t IsExpanding : 1;
573 };
574
575 class StoreSDNodeBitfields {
576 friend class StoreSDNode;
577 friend class MaskedStoreSDNode;
578
579 uint16_t : NumLSBaseSDNodeBits;
580
581 uint16_t IsTruncating : 1;
582 uint16_t IsCompressing : 1;
583 };
584
585 union {
586 char RawSDNodeBits[sizeof(uint16_t)];
587 SDNodeBitfields SDNodeBits;
588 ConstantSDNodeBitfields ConstantSDNodeBits;
589 MemSDNodeBitfields MemSDNodeBits;
590 LSBaseSDNodeBitfields LSBaseSDNodeBits;
591 LoadSDNodeBitfields LoadSDNodeBits;
592 StoreSDNodeBitfields StoreSDNodeBits;
593 };
594END_TWO_BYTE_PACK()
595#undef BEGIN_TWO_BYTE_PACK
596#undef END_TWO_BYTE_PACK
597
598 // RawSDNodeBits must cover the entirety of the union. This means that all of
599 // the union's members must have size <= RawSDNodeBits. We write the RHS as
600 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
601 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
602 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
603 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
604 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
605 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
606 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
607
608private:
609 friend class SelectionDAG;
610 // TODO: unfriend HandleSDNode once we fix its operand handling.
611 friend class HandleSDNode;
612
613 /// Unique id per SDNode in the DAG.
614 int NodeId = -1;
615
616 /// The values that are used by this operation.
617 SDUse *OperandList = nullptr;
618
619 /// The types of the values this node defines. SDNode's may
620 /// define multiple values simultaneously.
621 const EVT *ValueList;
622
623 /// List of uses for this SDNode.
624 SDUse *UseList = nullptr;
625
626 /// The number of entries in the Operand/Value list.
627 unsigned short NumOperands = 0;
628 unsigned short NumValues;
629
630 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
631 // original LLVM instructions.
632 // This is used for turning off scheduling, because we'll forgo
633 // the normal scheduling algorithms and output the instructions according to
634 // this ordering.
635 unsigned IROrder;
636
637 /// Source line information.
638 DebugLoc debugLoc;
639
640 /// Return a pointer to the specified value type.
641 static const EVT *getValueTypeList(EVT VT);
642
643 SDNodeFlags Flags;
644
645public:
646 /// Unique and persistent id per SDNode in the DAG.
647 /// Used for debug printing.
648 uint16_t PersistentId;
649
650 //===--------------------------------------------------------------------===//
651 // Accessors
652 //
653
654 /// Return the SelectionDAG opcode value for this node. For
655 /// pre-isel nodes (those for which isMachineOpcode returns false), these
656 /// are the opcode values in the ISD and <target>ISD namespaces. For
657 /// post-isel opcodes, see getMachineOpcode.
658 unsigned getOpcode() const { return (unsigned short)NodeType; }
659
660 /// Test if this node has a target-specific opcode (in the
661 /// \<target\>ISD namespace).
662 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
663
664 /// Test if this node has a target-specific opcode that may raise
665 /// FP exceptions (in the \<target\>ISD namespace and greater than
666 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
667 /// opcode are currently automatically considered to possibly raise
668 /// FP exceptions as well.
669 bool isTargetStrictFPOpcode() const {
670 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
671 }
672
673 /// Test if this node has a target-specific
674 /// memory-referencing opcode (in the \<target\>ISD namespace and
675 /// greater than FIRST_TARGET_MEMORY_OPCODE).
676 bool isTargetMemoryOpcode() const {
677 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
678 }
679
680 /// Return true if the type of the node type undefined.
681 bool isUndef() const { return NodeType == ISD::UNDEF; }
682
683 /// Test if this node is a memory intrinsic (with valid pointer information).
684 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
685 /// non-memory intrinsics (with chains) that are not really instances of
686 /// MemSDNode. For such nodes, we need some extra state to determine the
687 /// proper classof relationship.
688 bool isMemIntrinsic() const {
689 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
690 NodeType == ISD::INTRINSIC_VOID) &&
691 SDNodeBits.IsMemIntrinsic;
692 }
693
694 /// Test if this node is a strict floating point pseudo-op.
695 bool isStrictFPOpcode() {
696 switch (NodeType) {
697 default:
698 return false;
699 case ISD::STRICT_FP16_TO_FP:
700 case ISD::STRICT_FP_TO_FP16:
701#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
702 case ISD::STRICT_##DAGN:
703#include "llvm/IR/ConstrainedOps.def"
704 return true;
705 }
706 }
707
708 /// Test if this node has a post-isel opcode, directly
709 /// corresponding to a MachineInstr opcode.
710 bool isMachineOpcode() const { return NodeType < 0; }
711
712 /// This may only be called if isMachineOpcode returns
713 /// true. It returns the MachineInstr opcode value that the node's opcode
714 /// corresponds to.
715 unsigned getMachineOpcode() const {
716 assert(isMachineOpcode() && "Not a MachineInstr opcode!")((isMachineOpcode() && "Not a MachineInstr opcode!") ?
static_cast<void> (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 716, __PRETTY_FUNCTION__))
;
717 return ~NodeType;
718 }
719
720 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
721 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
722
723 bool isDivergent() const { return SDNodeBits.IsDivergent; }
724
725 /// Return true if there are no uses of this node.
726 bool use_empty() const { return UseList == nullptr; }
727
728 /// Return true if there is exactly one use of this node.
729 bool hasOneUse() const {
730 return !use_empty() && std::next(use_begin()) == use_end();
731 }
732
733 /// Return the number of uses of this node. This method takes
734 /// time proportional to the number of uses.
735 size_t use_size() const { return std::distance(use_begin(), use_end()); }
736
737 /// Return the unique node id.
738 int getNodeId() const { return NodeId; }
739
740 /// Set unique node id.
741 void setNodeId(int Id) { NodeId = Id; }
742
743 /// Return the node ordering.
744 unsigned getIROrder() const { return IROrder; }
745
746 /// Set the node ordering.
747 void setIROrder(unsigned Order) { IROrder = Order; }
748
749 /// Return the source location info.
750 const DebugLoc &getDebugLoc() const { return debugLoc; }
751
752 /// Set source location info. Try to avoid this, putting
753 /// it in the constructor is preferable.
754 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
755
756 /// This class provides iterator support for SDUse
757 /// operands that use a specific SDNode.
758 class use_iterator
759 : public std::iterator<std::forward_iterator_tag, SDUse, ptrdiff_t> {
760 friend class SDNode;
761
762 SDUse *Op = nullptr;
763
764 explicit use_iterator(SDUse *op) : Op(op) {}
765
766 public:
767 using reference = std::iterator<std::forward_iterator_tag,
768 SDUse, ptrdiff_t>::reference;
769 using pointer = std::iterator<std::forward_iterator_tag,
770 SDUse, ptrdiff_t>::pointer;
771
772 use_iterator() = default;
773 use_iterator(const use_iterator &I) : Op(I.Op) {}
774
775 bool operator==(const use_iterator &x) const {
776 return Op == x.Op;
777 }
778 bool operator!=(const use_iterator &x) const {
779 return !operator==(x);
780 }
781
782 /// Return true if this iterator is at the end of uses list.
783 bool atEnd() const { return Op == nullptr; }
784
785 // Iterator traversal: forward iteration only.
786 use_iterator &operator++() { // Preincrement
787 assert(Op && "Cannot increment end iterator!")((Op && "Cannot increment end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 787, __PRETTY_FUNCTION__))
;
788 Op = Op->getNext();
789 return *this;
790 }
791
792 use_iterator operator++(int) { // Postincrement
793 use_iterator tmp = *this; ++*this; return tmp;
794 }
795
796 /// Retrieve a pointer to the current user node.
797 SDNode *operator*() const {
798 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 798, __PRETTY_FUNCTION__))
;
799 return Op->getUser();
800 }
801
802 SDNode *operator->() const { return operator*(); }
803
804 SDUse &getUse() const { return *Op; }
805
806 /// Retrieve the operand # of this use in its user.
807 unsigned getOperandNo() const {
808 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 808, __PRETTY_FUNCTION__))
;
809 return (unsigned)(Op - Op->getUser()->OperandList);
810 }
811 };
812
813 /// Provide iteration support to walk over all uses of an SDNode.
814 use_iterator use_begin() const {
815 return use_iterator(UseList);
816 }
817
818 static use_iterator use_end() { return use_iterator(nullptr); }
819
820 inline iterator_range<use_iterator> uses() {
821 return make_range(use_begin(), use_end());
822 }
823 inline iterator_range<use_iterator> uses() const {
824 return make_range(use_begin(), use_end());
825 }
826
827 /// Return true if there are exactly NUSES uses of the indicated value.
828 /// This method ignores uses of other values defined by this operation.
829 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
830
831 /// Return true if there are any use of the indicated value.
832 /// This method ignores uses of other values defined by this operation.
833 bool hasAnyUseOfValue(unsigned Value) const;
834
835 /// Return true if this node is the only use of N.
836 bool isOnlyUserOf(const SDNode *N) const;
837
838 /// Return true if this node is an operand of N.
839 bool isOperandOf(const SDNode *N) const;
840
841 /// Return true if this node is a predecessor of N.
842 /// NOTE: Implemented on top of hasPredecessor and every bit as
843 /// expensive. Use carefully.
844 bool isPredecessorOf(const SDNode *N) const {
845 return N->hasPredecessor(this);
846 }
847
848 /// Return true if N is a predecessor of this node.
849 /// N is either an operand of this node, or can be reached by recursively
850 /// traversing up the operands.
851 /// NOTE: This is an expensive method. Use it carefully.
852 bool hasPredecessor(const SDNode *N) const;
853
854 /// Returns true if N is a predecessor of any node in Worklist. This
855 /// helper keeps Visited and Worklist sets externally to allow unions
856 /// searches to be performed in parallel, caching of results across
857 /// queries and incremental addition to Worklist. Stops early if N is
858 /// found but will resume. Remember to clear Visited and Worklists
859 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
860 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
861 /// topologically ordered (Operands have strictly smaller node id) and search
862 /// can be pruned leveraging this.
863 static bool hasPredecessorHelper(const SDNode *N,
864 SmallPtrSetImpl<const SDNode *> &Visited,
865 SmallVectorImpl<const SDNode *> &Worklist,
866 unsigned int MaxSteps = 0,
867 bool TopologicalPrune = false) {
868 SmallVector<const SDNode *, 8> DeferredNodes;
869 if (Visited.count(N))
870 return true;
871
872 // Node Id's are assigned in three places: As a topological
873 // ordering (> 0), during legalization (results in values set to
874 // 0), new nodes (set to -1). If N has a topolgical id then we
875 // know that all nodes with ids smaller than it cannot be
876 // successors and we need not check them. Filter out all node
877 // that can't be matches. We add them to the worklist before exit
878 // in case of multiple calls. Note that during selection the topological id
879 // may be violated if a node's predecessor is selected before it. We mark
880 // this at selection negating the id of unselected successors and
881 // restricting topological pruning to positive ids.
882
883 int NId = N->getNodeId();
884 // If we Invalidated the Id, reconstruct original NId.
885 if (NId < -1)
886 NId = -(NId + 1);
887
888 bool Found = false;
889 while (!Worklist.empty()) {
890 const SDNode *M = Worklist.pop_back_val();
891 int MId = M->getNodeId();
892 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
893 (MId > 0) && (MId < NId)) {
894 DeferredNodes.push_back(M);
895 continue;
896 }
897 for (const SDValue &OpV : M->op_values()) {
898 SDNode *Op = OpV.getNode();
899 if (Visited.insert(Op).second)
900 Worklist.push_back(Op);
901 if (Op == N)
902 Found = true;
903 }
904 if (Found)
905 break;
906 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
907 break;
908 }
909 // Push deferred nodes back on worklist.
910 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
911 // If we bailed early, conservatively return found.
912 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
913 return true;
914 return Found;
915 }
916
917 /// Return true if all the users of N are contained in Nodes.
918 /// NOTE: Requires at least one match, but doesn't require them all.
919 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
920
921 /// Return the number of values used by this operation.
922 unsigned getNumOperands() const { return NumOperands; }
923
924 /// Return the maximum number of operands that a SDNode can hold.
925 static constexpr size_t getMaxNumOperands() {
926 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
927 }
928
929 /// Helper method returns the integer value of a ConstantSDNode operand.
930 inline uint64_t getConstantOperandVal(unsigned Num) const;
931
932 /// Helper method returns the APInt of a ConstantSDNode operand.
933 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
934
935 const SDValue &getOperand(unsigned Num) const {
936 assert(Num < NumOperands && "Invalid child # of SDNode!")((Num < NumOperands && "Invalid child # of SDNode!"
) ? static_cast<void> (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 936, __PRETTY_FUNCTION__))
;
937 return OperandList[Num];
938 }
939
940 using op_iterator = SDUse *;
941
942 op_iterator op_begin() const { return OperandList; }
943 op_iterator op_end() const { return OperandList+NumOperands; }
944 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
945
946 /// Iterator for directly iterating over the operand SDValue's.
947 struct value_op_iterator
948 : iterator_adaptor_base<value_op_iterator, op_iterator,
949 std::random_access_iterator_tag, SDValue,
950 ptrdiff_t, value_op_iterator *,
951 value_op_iterator *> {
952 explicit value_op_iterator(SDUse *U = nullptr)
953 : iterator_adaptor_base(U) {}
954
955 const SDValue &operator*() const { return I->get(); }
956 };
957
958 iterator_range<value_op_iterator> op_values() const {
959 return make_range(value_op_iterator(op_begin()),
960 value_op_iterator(op_end()));
961 }
962
963 SDVTList getVTList() const {
964 SDVTList X = { ValueList, NumValues };
965 return X;
966 }
967
968 /// If this node has a glue operand, return the node
969 /// to which the glue operand points. Otherwise return NULL.
970 SDNode *getGluedNode() const {
971 if (getNumOperands() != 0 &&
972 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
973 return getOperand(getNumOperands()-1).getNode();
974 return nullptr;
975 }
976
977 /// If this node has a glue value with a user, return
978 /// the user (there is at most one). Otherwise return NULL.
979 SDNode *getGluedUser() const {
980 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
981 if (UI.getUse().get().getValueType() == MVT::Glue)
982 return *UI;
983 return nullptr;
984 }
985
986 const SDNodeFlags getFlags() const { return Flags; }
987 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
988
989 /// Clear any flags in this node that aren't also set in Flags.
990 /// If Flags is not in a defined state then this has no effect.
991 void intersectFlagsWith(const SDNodeFlags Flags);
992
993 /// Return the number of values defined/returned by this operator.
994 unsigned getNumValues() const { return NumValues; }
995
996 /// Return the type of a specified result.
997 EVT getValueType(unsigned ResNo) const {
998 assert(ResNo < NumValues && "Illegal result number!")((ResNo < NumValues && "Illegal result number!") ?
static_cast<void> (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 998, __PRETTY_FUNCTION__))
;
999 return ValueList[ResNo];
1000 }
1001
1002 /// Return the type of a specified result as a simple type.
1003 MVT getSimpleValueType(unsigned ResNo) const {
1004 return getValueType(ResNo).getSimpleVT();
1005 }
1006
1007 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
1008 ///
1009 /// If the value type is a scalable vector type, the scalable property will
1010 /// be set and the runtime size will be a positive integer multiple of the
1011 /// base size.
1012 TypeSize getValueSizeInBits(unsigned ResNo) const {
1013 return getValueType(ResNo).getSizeInBits();
1014 }
1015
1016 using value_iterator = const EVT *;
1017
1018 value_iterator value_begin() const { return ValueList; }
1019 value_iterator value_end() const { return ValueList+NumValues; }
1020
1021 /// Return the opcode of this operation for printing.
1022 std::string getOperationName(const SelectionDAG *G = nullptr) const;
1023 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
1024 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
1025 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
1026 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1027 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
1028
1029 /// Print a SelectionDAG node and all children down to
1030 /// the leaves. The given SelectionDAG allows target-specific nodes
1031 /// to be printed in human-readable form. Unlike printr, this will
1032 /// print the whole DAG, including children that appear multiple
1033 /// times.
1034 ///
1035 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1036
1037 /// Print a SelectionDAG node and children up to
1038 /// depth "depth." The given SelectionDAG allows target-specific
1039 /// nodes to be printed in human-readable form. Unlike printr, this
1040 /// will print children that appear multiple times wherever they are
1041 /// used.
1042 ///
1043 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1044 unsigned depth = 100) const;
1045
1046 /// Dump this node, for debugging.
1047 void dump() const;
1048
1049 /// Dump (recursively) this node and its use-def subgraph.
1050 void dumpr() const;
1051
1052 /// Dump this node, for debugging.
1053 /// The given SelectionDAG allows target-specific nodes to be printed
1054 /// in human-readable form.
1055 void dump(const SelectionDAG *G) const;
1056
1057 /// Dump (recursively) this node and its use-def subgraph.
1058 /// The given SelectionDAG allows target-specific nodes to be printed
1059 /// in human-readable form.
1060 void dumpr(const SelectionDAG *G) const;
1061
1062 /// printrFull to dbgs(). The given SelectionDAG allows
1063 /// target-specific nodes to be printed in human-readable form.
1064 /// Unlike dumpr, this will print the whole DAG, including children
1065 /// that appear multiple times.
1066 void dumprFull(const SelectionDAG *G = nullptr) const;
1067
1068 /// printrWithDepth to dbgs(). The given
1069 /// SelectionDAG allows target-specific nodes to be printed in
1070 /// human-readable form. Unlike dumpr, this will print children
1071 /// that appear multiple times wherever they are used.
1072 ///
1073 void dumprWithDepth(const SelectionDAG *G = nullptr,
1074 unsigned depth = 100) const;
1075
1076 /// Gather unique data for the node.
1077 void Profile(FoldingSetNodeID &ID) const;
1078
1079 /// This method should only be used by the SDUse class.
1080 void addUse(SDUse &U) { U.addToList(&UseList); }
1081
1082protected:
1083 static SDVTList getSDVTList(EVT VT) {
1084 SDVTList Ret = { getValueTypeList(VT), 1 };
1085 return Ret;
1086 }
1087
1088 /// Create an SDNode.
1089 ///
1090 /// SDNodes are created without any operands, and never own the operand
1091 /// storage. To add operands, see SelectionDAG::createOperands.
1092 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1093 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1094 IROrder(Order), debugLoc(std::move(dl)) {
1095 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1096 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor"
) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1096, __PRETTY_FUNCTION__))
;
1097 assert(NumValues == VTs.NumVTs &&((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1098, __PRETTY_FUNCTION__))
1098 "NumValues wasn't wide enough for its operands!")((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1098, __PRETTY_FUNCTION__))
;
1099 }
1100
1101 /// Release the operands and set this node to have zero operands.
1102 void DropOperands();
1103};
1104
1105/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1106/// into SDNode creation functions.
1107/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1108/// from the original Instruction, and IROrder is the ordinal position of
1109/// the instruction.
1110/// When an SDNode is created after the DAG is being built, both DebugLoc and
1111/// the IROrder are propagated from the original SDNode.
1112/// So SDLoc class provides two constructors besides the default one, one to
1113/// be used by the DAGBuilder, the other to be used by others.
1114class SDLoc {
1115private:
1116 DebugLoc DL;
1117 int IROrder = 0;
1118
1119public:
1120 SDLoc() = default;
1121 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1122 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1123 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1124 assert(Order >= 0 && "bad IROrder")((Order >= 0 && "bad IROrder") ? static_cast<void
> (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1124, __PRETTY_FUNCTION__))
;
1125 if (I)
1126 DL = I->getDebugLoc();
1127 }
1128
1129 unsigned getIROrder() const { return IROrder; }
1130 const DebugLoc &getDebugLoc() const { return DL; }
1131};
1132
1133// Define inline functions from the SDValue class.
1134
1135inline SDValue::SDValue(SDNode *node, unsigned resno)
1136 : Node(node), ResNo(resno) {
1137 // Explicitly check for !ResNo to avoid use-after-free, because there are
1138 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1139 // combines.
1140 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1141, __PRETTY_FUNCTION__))
1141 "Invalid result number for the given node!")(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1141, __PRETTY_FUNCTION__))
;
1142 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")((ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? static_cast<void> (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1142, __PRETTY_FUNCTION__))
;
1143}
1144
1145inline unsigned SDValue::getOpcode() const {
1146 return Node->getOpcode();
20
Called C++ object pointer is null
1147}
1148
1149inline EVT SDValue::getValueType() const {
1150 return Node->getValueType(ResNo);
1151}
1152
1153inline unsigned SDValue::getNumOperands() const {
1154 return Node->getNumOperands();
1155}
1156
1157inline const SDValue &SDValue::getOperand(unsigned i) const {
1158 return Node->getOperand(i);
1159}
1160
1161inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1162 return Node->getConstantOperandVal(i);
1163}
1164
1165inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1166 return Node->getConstantOperandAPInt(i);
1167}
1168
1169inline bool SDValue::isTargetOpcode() const {
1170 return Node->isTargetOpcode();
1171}
1172
1173inline bool SDValue::isTargetMemoryOpcode() const {
1174 return Node->isTargetMemoryOpcode();
1175}
1176
1177inline bool SDValue::isMachineOpcode() const {
1178 return Node->isMachineOpcode();
1179}
1180
1181inline unsigned SDValue::getMachineOpcode() const {
1182 return Node->getMachineOpcode();
1183}
1184
1185inline bool SDValue::isUndef() const {
1186 return Node->isUndef();
1187}
1188
1189inline bool SDValue::use_empty() const {
1190 return !Node->hasAnyUseOfValue(ResNo);
1191}
1192
1193inline bool SDValue::hasOneUse() const {
1194 return Node->hasNUsesOfValue(1, ResNo);
1195}
1196
1197inline const DebugLoc &SDValue::getDebugLoc() const {
1198 return Node->getDebugLoc();
1199}
1200
1201inline void SDValue::dump() const {
1202 return Node->dump();
1203}
1204
1205inline void SDValue::dump(const SelectionDAG *G) const {
1206 return Node->dump(G);
1207}
1208
1209inline void SDValue::dumpr() const {
1210 return Node->dumpr();
1211}
1212
1213inline void SDValue::dumpr(const SelectionDAG *G) const {
1214 return Node->dumpr(G);
1215}
1216
1217// Define inline functions from the SDUse class.
1218
1219inline void SDUse::set(const SDValue &V) {
1220 if (Val.getNode()) removeFromList();
1221 Val = V;
1222 if (V.getNode()) V.getNode()->addUse(*this);
1223}
1224
1225inline void SDUse::setInitial(const SDValue &V) {
1226 Val = V;
1227 V.getNode()->addUse(*this);
1228}
1229
1230inline void SDUse::setNode(SDNode *N) {
1231 if (Val.getNode()) removeFromList();
1232 Val.setNode(N);
1233 if (N) N->addUse(*this);
1234}
1235
1236/// This class is used to form a handle around another node that
1237/// is persistent and is updated across invocations of replaceAllUsesWith on its
1238/// operand. This node should be directly created by end-users and not added to
1239/// the AllNodes list.
1240class HandleSDNode : public SDNode {
1241 SDUse Op;
1242
1243public:
1244 explicit HandleSDNode(SDValue X)
1245 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1246 // HandleSDNodes are never inserted into the DAG, so they won't be
1247 // auto-numbered. Use ID 65535 as a sentinel.
1248 PersistentId = 0xffff;
1249
1250 // Manually set up the operand list. This node type is special in that it's
1251 // always stack allocated and SelectionDAG does not manage its operands.
1252 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1253 // be so special.
1254 Op.setUser(this);
1255 Op.setInitial(X);
1256 NumOperands = 1;
1257 OperandList = &Op;
1258 }
1259 ~HandleSDNode();
1260
1261 const SDValue &getValue() const { return Op; }
1262};
1263
1264class AddrSpaceCastSDNode : public SDNode {
1265private:
1266 unsigned SrcAddrSpace;
1267 unsigned DestAddrSpace;
1268
1269public:
1270 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1271 unsigned SrcAS, unsigned DestAS);
1272
1273 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1274 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1275
1276 static bool classof(const SDNode *N) {
1277 return N->getOpcode() == ISD::ADDRSPACECAST;
1278 }
1279};
1280
1281/// This is an abstract virtual class for memory operations.
1282class MemSDNode : public SDNode {
1283private:
1284 // VT of in-memory value.
1285 EVT MemoryVT;
1286
1287protected:
1288 /// Memory reference information.
1289 MachineMemOperand *MMO;
1290
1291public:
1292 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1293 EVT memvt, MachineMemOperand *MMO);
1294
1295 bool readMem() const { return MMO->isLoad(); }
1296 bool writeMem() const { return MMO->isStore(); }
1297
1298 /// Returns alignment and volatility of the memory access
1299 unsigned getOriginalAlignment() const {
1300 return MMO->getBaseAlignment();
1301 }
1302 unsigned getAlignment() const {
1303 return MMO->getAlignment();
1304 }
1305
1306 /// Return the SubclassData value, without HasDebugValue. This contains an
1307 /// encoding of the volatile flag, as well as bits used by subclasses. This
1308 /// function should only be used to compute a FoldingSetNodeID value.
1309 /// The HasDebugValue bit is masked out because CSE map needs to match
1310 /// nodes with debug info with nodes without debug info. Same is about
1311 /// isDivergent bit.
1312 unsigned getRawSubclassData() const {
1313 uint16_t Data;
1314 union {
1315 char RawSDNodeBits[sizeof(uint16_t)];
1316 SDNodeBitfields SDNodeBits;
1317 };
1318 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1319 SDNodeBits.HasDebugValue = 0;
1320 SDNodeBits.IsDivergent = false;
1321 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1322 return Data;
1323 }
1324
1325 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1326 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1327 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1328 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1329
1330 // Returns the offset from the location of the access.
1331 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1332
1333 /// Returns the AA info that describes the dereference.
1334 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1335
1336 /// Returns the Ranges that describes the dereference.
1337 const MDNode *getRanges() const { return MMO->getRanges(); }
1338
1339 /// Returns the synchronization scope ID for this memory operation.
1340 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1341
1342 /// Return the atomic ordering requirements for this memory operation. For
1343 /// cmpxchg atomic operations, return the atomic ordering requirements when
1344 /// store occurs.
1345 AtomicOrdering getOrdering() const { return MMO->getOrdering(); }
1346
1347 /// Return true if the memory operation ordering is Unordered or higher.
1348 bool isAtomic() const { return MMO->isAtomic(); }
1349
1350 /// Returns true if the memory operation doesn't imply any ordering
1351 /// constraints on surrounding memory operations beyond the normal memory
1352 /// aliasing rules.
1353 bool isUnordered() const { return MMO->isUnordered(); }
1354
1355 /// Returns true if the memory operation is neither atomic or volatile.
1356 bool isSimple() const { return !isAtomic() && !isVolatile(); }
1357
1358 /// Return the type of the in-memory value.
1359 EVT getMemoryVT() const { return MemoryVT; }
1360
1361 /// Return a MachineMemOperand object describing the memory
1362 /// reference performed by operation.
1363 MachineMemOperand *getMemOperand() const { return MMO; }
1364
1365 const MachinePointerInfo &getPointerInfo() const {
1366 return MMO->getPointerInfo();
1367 }
1368
1369 /// Return the address space for the associated pointer
1370 unsigned getAddressSpace() const {
1371 return getPointerInfo().getAddrSpace();
1372 }
1373
1374 /// Update this MemSDNode's MachineMemOperand information
1375 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1376 /// This must only be used when the new alignment applies to all users of
1377 /// this MachineMemOperand.
1378 void refineAlignment(const MachineMemOperand *NewMMO) {
1379 MMO->refineAlignment(NewMMO);
1380 }
1381
1382 const SDValue &getChain() const { return getOperand(0); }
1383 const SDValue &getBasePtr() const {
1384 return getOperand(getOpcode() == ISD::STORE ? 2 : 1);
1385 }
1386
1387 // Methods to support isa and dyn_cast
1388 static bool classof(const SDNode *N) {
1389 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1390 // with either an intrinsic or a target opcode.
1391 return N->getOpcode() == ISD::LOAD ||
1392 N->getOpcode() == ISD::STORE ||
1393 N->getOpcode() == ISD::PREFETCH ||
1394 N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1395 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1396 N->getOpcode() == ISD::ATOMIC_SWAP ||
1397 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1398 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1399 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1400 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1401 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1402 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1403 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1404 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1405 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1406 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1407 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1408 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1409 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1410 N->getOpcode() == ISD::ATOMIC_LOAD ||
1411 N->getOpcode() == ISD::ATOMIC_STORE ||
1412 N->getOpcode() == ISD::MLOAD ||
1413 N->getOpcode() == ISD::MSTORE ||
1414 N->getOpcode() == ISD::MGATHER ||
1415 N->getOpcode() == ISD::MSCATTER ||
1416 N->isMemIntrinsic() ||
1417 N->isTargetMemoryOpcode();
1418 }
1419};
1420
1421/// This is an SDNode representing atomic operations.
1422class AtomicSDNode : public MemSDNode {
1423public:
1424 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1425 EVT MemVT, MachineMemOperand *MMO)
1426 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1427 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1428, __PRETTY_FUNCTION__))
1428 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1428, __PRETTY_FUNCTION__))
;
1429 }
1430
1431 const SDValue &getBasePtr() const { return getOperand(1); }
1432 const SDValue &getVal() const { return getOperand(2); }
1433
1434 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1435 /// otherwise.
1436 bool isCompareAndSwap() const {
1437 unsigned Op = getOpcode();
1438 return Op == ISD::ATOMIC_CMP_SWAP ||
1439 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1440 }
1441
1442 /// For cmpxchg atomic operations, return the atomic ordering requirements
1443 /// when store does not occur.
1444 AtomicOrdering getFailureOrdering() const {
1445 assert(isCompareAndSwap() && "Must be cmpxchg operation")((isCompareAndSwap() && "Must be cmpxchg operation") ?
static_cast<void> (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1445, __PRETTY_FUNCTION__))
;
1446 return MMO->getFailureOrdering();
1447 }
1448
1449 // Methods to support isa and dyn_cast
1450 static bool classof(const SDNode *N) {
1451 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1452 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1453 N->getOpcode() == ISD::ATOMIC_SWAP ||
1454 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1455 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1456 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1457 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1458 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1459 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1460 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1461 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1462 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1463 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1464 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1465 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1466 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1467 N->getOpcode() == ISD::ATOMIC_LOAD ||
1468 N->getOpcode() == ISD::ATOMIC_STORE;
1469 }
1470};
1471
1472/// This SDNode is used for target intrinsics that touch
1473/// memory and need an associated MachineMemOperand. Its opcode may be
1474/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1475/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1476class MemIntrinsicSDNode : public MemSDNode {
1477public:
1478 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1479 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1480 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1481 SDNodeBits.IsMemIntrinsic = true;
1482 }
1483
1484 // Methods to support isa and dyn_cast
1485 static bool classof(const SDNode *N) {
1486 // We lower some target intrinsics to their target opcode
1487 // early a node with a target opcode can be of this class
1488 return N->isMemIntrinsic() ||
1489 N->getOpcode() == ISD::PREFETCH ||
1490 N->isTargetMemoryOpcode();
1491 }
1492};
1493
1494/// This SDNode is used to implement the code generator
1495/// support for the llvm IR shufflevector instruction. It combines elements
1496/// from two input vectors into a new input vector, with the selection and
1497/// ordering of elements determined by an array of integers, referred to as
1498/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1499/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1500/// An index of -1 is treated as undef, such that the code generator may put
1501/// any value in the corresponding element of the result.
1502class ShuffleVectorSDNode : public SDNode {
1503 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1504 // is freed when the SelectionDAG object is destroyed.
1505 const int *Mask;
1506
1507protected:
1508 friend class SelectionDAG;
1509
1510 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1511 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1512
1513public:
1514 ArrayRef<int> getMask() const {
1515 EVT VT = getValueType(0);
1516 return makeArrayRef(Mask, VT.getVectorNumElements());
1517 }
1518
1519 int getMaskElt(unsigned Idx) const {
1520 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")((Idx < getValueType(0).getVectorNumElements() && "Idx out of range!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1520, __PRETTY_FUNCTION__))
;
1521 return Mask[Idx];
1522 }
1523
1524 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1525
1526 int getSplatIndex() const {
1527 assert(isSplat() && "Cannot get splat index for non-splat!")((isSplat() && "Cannot get splat index for non-splat!"
) ? static_cast<void> (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1527, __PRETTY_FUNCTION__))
;
1528 EVT VT = getValueType(0);
1529 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1530 if (Mask[i] >= 0)
1531 return Mask[i];
1532
1533 // We can choose any index value here and be correct because all elements
1534 // are undefined. Return 0 for better potential for callers to simplify.
1535 return 0;
1536 }
1537
1538 static bool isSplatMask(const int *Mask, EVT VT);
1539
1540 /// Change values in a shuffle permute mask assuming
1541 /// the two vector operands have swapped position.
1542 static void commuteMask(MutableArrayRef<int> Mask) {
1543 unsigned NumElems = Mask.size();
1544 for (unsigned i = 0; i != NumElems; ++i) {
1545 int idx = Mask[i];
1546 if (idx < 0)
1547 continue;
1548 else if (idx < (int)NumElems)
1549 Mask[i] = idx + NumElems;
1550 else
1551 Mask[i] = idx - NumElems;
1552 }
1553 }
1554
1555 static bool classof(const SDNode *N) {
1556 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1557 }
1558};
1559
1560class ConstantSDNode : public SDNode {
1561 friend class SelectionDAG;
1562
1563 const ConstantInt *Value;
1564
1565 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1566 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1567 getSDVTList(VT)),
1568 Value(val) {
1569 ConstantSDNodeBits.IsOpaque = isOpaque;
1570 }
1571
1572public:
1573 const ConstantInt *getConstantIntValue() const { return Value; }
1574 const APInt &getAPIntValue() const { return Value->getValue(); }
1575 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1576 int64_t getSExtValue() const { return Value->getSExtValue(); }
1577 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1578 return Value->getLimitedValue(Limit);
1579 }
1580
1581 bool isOne() const { return Value->isOne(); }
1582 bool isNullValue() const { return Value->isZero(); }
1583 bool isAllOnesValue() const { return Value->isMinusOne(); }
1584
1585 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1586
1587 static bool classof(const SDNode *N) {
1588 return N->getOpcode() == ISD::Constant ||
1589 N->getOpcode() == ISD::TargetConstant;
1590 }
1591};
1592
1593uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1594 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1595}
1596
1597const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1598 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1599}
1600
1601class ConstantFPSDNode : public SDNode {
1602 friend class SelectionDAG;
1603
1604 const ConstantFP *Value;
1605
1606 ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
1607 : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
1608 DebugLoc(), getSDVTList(VT)),
1609 Value(val) {}
1610
1611public:
1612 const APFloat& getValueAPF() const { return Value->getValueAPF(); }
1613 const ConstantFP *getConstantFPValue() const { return Value; }
1614
1615 /// Return true if the value is positive or negative zero.
1616 bool isZero() const { return Value->isZero(); }
1617
1618 /// Return true if the value is a NaN.
1619 bool isNaN() const { return Value->isNaN(); }
1620
1621 /// Return true if the value is an infinity
1622 bool isInfinity() const { return Value->isInfinity(); }
1623
1624 /// Return true if the value is negative.
1625 bool isNegative() const { return Value->isNegative(); }
1626
1627 /// We don't rely on operator== working on double values, as
1628 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
1629 /// As such, this method can be used to do an exact bit-for-bit comparison of
1630 /// two floating point values.
1631
1632 /// We leave the version with the double argument here because it's just so
1633 /// convenient to write "2.0" and the like. Without this function we'd
1634 /// have to duplicate its logic everywhere it's called.
1635 bool isExactlyValue(double V) const {
1636 return Value->getValueAPF().isExactlyValue(V);
1637 }
1638 bool isExactlyValue(const APFloat& V) const;
1639
1640 static bool isValueValidForType(EVT VT, const APFloat& Val);
1641
1642 static bool classof(const SDNode *N) {
1643 return N->getOpcode() == ISD::ConstantFP ||
1644 N->getOpcode() == ISD::TargetConstantFP;
1645 }
1646};
1647
1648/// Returns true if \p V is a constant integer zero.
1649bool isNullConstant(SDValue V);
1650
1651/// Returns true if \p V is an FP constant with a value of positive zero.
1652bool isNullFPConstant(SDValue V);
1653
1654/// Returns true if \p V is an integer constant with all bits set.
1655bool isAllOnesConstant(SDValue V);
1656
1657/// Returns true if \p V is a constant integer one.
1658bool isOneConstant(SDValue V);
1659
1660/// Return the non-bitcasted source operand of \p V if it exists.
1661/// If \p V is not a bitcasted value, it is returned as-is.
1662SDValue peekThroughBitcasts(SDValue V);
1663
1664/// Return the non-bitcasted and one-use source operand of \p V if it exists.
1665/// If \p V is not a bitcasted one-use value, it is returned as-is.
1666SDValue peekThroughOneUseBitcasts(SDValue V);
1667
1668/// Return the non-extracted vector source operand of \p V if it exists.
1669/// If \p V is not an extracted subvector, it is returned as-is.
1670SDValue peekThroughExtractSubvectors(SDValue V);
1671
1672/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
1673/// constant is canonicalized to be operand 1.
1674bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
1675
1676/// Returns the SDNode if it is a constant splat BuildVector or constant int.
1677ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
1678 bool AllowTruncation = false);
1679
1680/// Returns the SDNode if it is a demanded constant splat BuildVector or
1681/// constant int.
1682ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
1683 bool AllowUndefs = false,
1684 bool AllowTruncation = false);
1685
1686/// Returns the SDNode if it is a constant splat BuildVector or constant float.
1687ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
1688
1689/// Returns the SDNode if it is a demanded constant splat BuildVector or
1690/// constant float.
1691ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
1692 bool AllowUndefs = false);
1693
1694/// Return true if the value is a constant 0 integer or a splatted vector of
1695/// a constant 0 integer (with no undefs by default).
1696/// Build vector implicit truncation is not an issue for null values.
1697bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);
1698
1699/// Return true if the value is a constant 1 integer or a splatted vector of a
1700/// constant 1 integer (with no undefs).
1701/// Does not permit build vector implicit truncation.
1702bool isOneOrOneSplat(SDValue V);
1703
1704/// Return true if the value is a constant -1 integer or a splatted vector of a
1705/// constant -1 integer (with no undefs).
1706/// Does not permit build vector implicit truncation.
1707bool isAllOnesOrAllOnesSplat(SDValue V);
1708
1709class GlobalAddressSDNode : public SDNode {
1710 friend class SelectionDAG;
1711
1712 const GlobalValue *TheGlobal;
1713 int64_t Offset;
1714 unsigned TargetFlags;
1715
1716 GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
1717 const GlobalValue *GA, EVT VT, int64_t o,
1718 unsigned TF);
1719
1720public:
1721 const GlobalValue *getGlobal() const { return TheGlobal; }
1722 int64_t getOffset() const { return Offset; }
1723 unsigned getTargetFlags() const { return TargetFlags; }
1724 // Return the address space this GlobalAddress belongs to.
1725 unsigned getAddressSpace() const;
1726
1727 static bool classof(const SDNode *N) {
1728 return N->getOpcode() == ISD::GlobalAddress ||
1729 N->getOpcode() == ISD::TargetGlobalAddress ||
1730 N->getOpcode() == ISD::GlobalTLSAddress ||
1731 N->getOpcode() == ISD::TargetGlobalTLSAddress;
1732 }
1733};
1734
1735class FrameIndexSDNode : public SDNode {
1736 friend class SelectionDAG;
1737
1738 int FI;
1739
1740 FrameIndexSDNode(int fi, EVT VT, bool isTarg)
1741 : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
1742 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
1743 }
1744
1745public:
1746 int getIndex() const { return FI; }
1747
1748 static bool classof(const SDNode *N) {
1749 return N->getOpcode() == ISD::FrameIndex ||
1750 N->getOpcode() == ISD::TargetFrameIndex;
1751 }
1752};
1753
1754/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
1755/// the offet and size that are started/ended in the underlying FrameIndex.
1756class LifetimeSDNode : public SDNode {
1757 friend class SelectionDAG;
1758 int64_t Size;
1759 int64_t Offset; // -1 if offset is unknown.
1760
1761 LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1762 SDVTList VTs, int64_t Size, int64_t Offset)
1763 : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
1764public:
1765 int64_t getFrameIndex() const {
1766 return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
1767 }
1768
1769 bool hasOffset() const { return Offset >= 0; }
1770 int64_t getOffset() const {
1771 assert(hasOffset() && "offset is unknown")((hasOffset() && "offset is unknown") ? static_cast<
void> (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1771, __PRETTY_FUNCTION__))
;
1772 return Offset;
1773 }
1774 int64_t getSize() const {
1775 assert(hasOffset() && "offset is unknown")((hasOffset() && "offset is unknown") ? static_cast<
void> (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1775, __PRETTY_FUNCTION__))
;
1776 return Size;
1777 }
1778
1779 // Methods to support isa and dyn_cast
1780 static bool classof(const SDNode *N) {
1781 return N->getOpcode() == ISD::LIFETIME_START ||
1782 N->getOpcode() == ISD::LIFETIME_END;
1783 }
1784};
1785
1786class JumpTableSDNode : public SDNode {
1787 friend class SelectionDAG;
1788
1789 int JTI;
1790 unsigned TargetFlags;
1791
1792 JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
1793 : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
1794 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
1795 }
1796
1797public:
1798 int getIndex() const { return JTI; }
1799 unsigned getTargetFlags() const { return TargetFlags; }
1800
1801 static bool classof(const SDNode *N) {
1802 return N->getOpcode() == ISD::JumpTable ||
1803 N->getOpcode() == ISD::TargetJumpTable;
1804 }
1805};
1806
1807class ConstantPoolSDNode : public SDNode {
1808 friend class SelectionDAG;
1809
1810 union {
1811 const Constant *ConstVal;
1812 MachineConstantPoolValue *MachineCPVal;
1813 } Val;
1814 int Offset; // It's a MachineConstantPoolValue if top bit is set.
1815 unsigned Alignment; // Minimum alignment requirement of CP (not log2 value).
1816 unsigned TargetFlags;
1817
1818 ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
1819 unsigned Align, unsigned TF)
1820 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1821 DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
1822 TargetFlags(TF) {
1823 assert(Offset >= 0 && "Offset is too large")((Offset >= 0 && "Offset is too large") ? static_cast
<void> (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1823, __PRETTY_FUNCTION__))
;
1824 Val.ConstVal = c;
1825 }
1826
1827 ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v,
1828 EVT VT, int o, unsigned Align, unsigned TF)
1829 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1830 DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
1831 TargetFlags(TF) {
1832 assert(Offset >= 0 && "Offset is too large")((Offset >= 0 && "Offset is too large") ? static_cast
<void> (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1832, __PRETTY_FUNCTION__))
;
1833 Val.MachineCPVal = v;
1834 Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1);
1835 }
1836
1837public:
1838 bool isMachineConstantPoolEntry() const {
1839 return Offset < 0;
1840 }
1841
1842 const Constant *getConstVal() const {
1843 assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")((!isMachineConstantPoolEntry() && "Wrong constantpool type"
) ? static_cast<void> (0) : __assert_fail ("!isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1843, __PRETTY_FUNCTION__))
;
1844 return Val.ConstVal;
1845 }
1846
1847 MachineConstantPoolValue *getMachineCPVal() const {
1848 assert(isMachineConstantPoolEntry() && "Wrong constantpool type")((isMachineConstantPoolEntry() && "Wrong constantpool type"
) ? static_cast<void> (0) : __assert_fail ("isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1848, __PRETTY_FUNCTION__))
;
1849 return Val.MachineCPVal;
1850 }
1851
1852 int getOffset() const {
1853 return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1));
1854 }
1855
1856 // Return the alignment of this constant pool object, which is either 0 (for
1857 // default alignment) or the desired value.
1858 unsigned getAlignment() const { return Alignment; }
1859 unsigned getTargetFlags() const { return TargetFlags; }
1860
1861 Type *getType() const;
1862
1863 static bool classof(const SDNode *N) {
1864 return N->getOpcode() == ISD::ConstantPool ||
1865 N->getOpcode() == ISD::TargetConstantPool;
1866 }
1867};
1868
1869/// Completely target-dependent object reference.
1870class TargetIndexSDNode : public SDNode {
1871 friend class SelectionDAG;
1872
1873 unsigned TargetFlags;
1874 int Index;
1875 int64_t Offset;
1876
1877public:
1878 TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF)
1879 : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
1880 TargetFlags(TF), Index(Idx), Offset(Ofs) {}
1881
1882 unsigned getTargetFlags() const { return TargetFlags; }
1883 int getIndex() const { return Index; }
1884 int64_t getOffset() const { return Offset; }
1885
1886 static bool classof(const SDNode *N) {
1887 return N->getOpcode() == ISD::TargetIndex;
1888 }
1889};
1890
1891class BasicBlockSDNode : public SDNode {
1892 friend class SelectionDAG;
1893
1894 MachineBasicBlock *MBB;
1895
1896 /// Debug info is meaningful and potentially useful here, but we create
1897 /// blocks out of order when they're jumped to, which makes it a bit
1898 /// harder. Let's see if we need it first.
1899 explicit BasicBlockSDNode(MachineBasicBlock *mbb)
1900 : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
1901 {}
1902
1903public:
1904 MachineBasicBlock *getBasicBlock() const { return MBB; }
1905
1906 static bool classof(const SDNode *N) {
1907 return N->getOpcode() == ISD::BasicBlock;
1908 }
1909};
1910
1911/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
1912class BuildVectorSDNode : public SDNode {
1913public:
1914 // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
1915 explicit BuildVectorSDNode() = delete;
1916
1917 /// Check if this is a constant splat, and if so, find the
1918 /// smallest element size that splats the vector. If MinSplatBits is
1919 /// nonzero, the element size must be at least that large. Note that the
1920 /// splat element may be the entire vector (i.e., a one element vector).
1921 /// Returns the splat element value in SplatValue. Any undefined bits in
1922 /// that value are zero, and the corresponding bits in the SplatUndef mask
1923 /// are set. The SplatBitSize value is set to the splat element size in
1924 /// bits. HasAnyUndefs is set to true if any bits in the vector are
1925 /// undefined. isBigEndian describes the endianness of the target.
1926 bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
1927 unsigned &SplatBitSize, bool &HasAnyUndefs,
1928 unsigned MinSplatBits = 0,
1929 bool isBigEndian = false) const;
1930
1931 /// Returns the demanded splatted value or a null value if this is not a
1932 /// splat.
1933 ///
1934 /// The DemandedElts mask indicates the elements that must be in the splat.
1935 /// If passed a non-null UndefElements bitvector, it will resize it to match
1936 /// the vector width and set the bits where elements are undef.
1937 SDValue getSplatValue(const APInt &DemandedElts,
1938 BitVector *UndefElements = nullptr) const;
1939
1940 /// Returns the splatted value or a null value if this is not a splat.
1941 ///
1942 /// If passed a non-null UndefElements bitvector, it will resize it to match
1943 /// the vector width and set the bits where elements are undef.
1944 SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
1945
1946 /// Returns the demanded splatted constant or null if this is not a constant
1947 /// splat.
1948 ///
1949 /// The DemandedElts mask indicates the elements that must be in the splat.
1950 /// If passed a non-null UndefElements bitvector, it will resize it to match
1951 /// the vector width and set the bits where elements are undef.
1952 ConstantSDNode *
1953 getConstantSplatNode(const APInt &DemandedElts,
1954 BitVector *UndefElements = nullptr) const;
1955
1956 /// Returns the splatted constant or null if this is not a constant
1957 /// splat.
1958 ///
1959 /// If passed a non-null UndefElements bitvector, it will resize it to match
1960 /// the vector width and set the bits where elements are undef.
1961 ConstantSDNode *
1962 getConstantSplatNode(BitVector *UndefElements = nullptr) const;
1963
1964 /// Returns the demanded splatted constant FP or null if this is not a
1965 /// constant FP splat.
1966 ///
1967 /// The DemandedElts mask indicates the elements that must be in the splat.
1968 /// If passed a non-null UndefElements bitvector, it will resize it to match
1969 /// the vector width and set the bits where elements are undef.
1970 ConstantFPSDNode *
1971 getConstantFPSplatNode(const APInt &DemandedElts,
1972 BitVector *UndefElements = nullptr) const;
1973
1974 /// Returns the splatted constant FP or null if this is not a constant
1975 /// FP splat.
1976 ///
1977 /// If passed a non-null UndefElements bitvector, it will resize it to match
1978 /// the vector width and set the bits where elements are undef.
1979 ConstantFPSDNode *
1980 getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
1981
1982 /// If this is a constant FP splat and the splatted constant FP is an
1983 /// exact power or 2, return the log base 2 integer value. Otherwise,
1984 /// return -1.
1985 ///
1986 /// The BitWidth specifies the necessary bit precision.
1987 int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
1988 uint32_t BitWidth) const;
1989
1990 bool isConstant() const;
1991
1992 static bool classof(const SDNode *N) {
1993 return N->getOpcode() == ISD::BUILD_VECTOR;
1994 }
1995};
1996
1997/// An SDNode that holds an arbitrary LLVM IR Value. This is
1998/// used when the SelectionDAG needs to make a simple reference to something
1999/// in the LLVM IR representation.
2000///
2001class SrcValueSDNode : public SDNode {
2002 friend class SelectionDAG;
2003
2004 const Value *V;
2005
2006 /// Create a SrcValue for a general value.
2007 explicit SrcValueSDNode(const Value *v)
2008 : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
2009
2010public:
2011 /// Return the contained Value.
2012 const Value *getValue() const { return V; }
2013
2014 static bool classof(const SDNode *N) {
2015 return N->getOpcode() == ISD::SRCVALUE;
2016 }
2017};
2018
2019class MDNodeSDNode : public SDNode {
2020 friend class SelectionDAG;
2021
2022 const MDNode *MD;
2023
2024 explicit MDNodeSDNode(const MDNode *md)
2025 : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
2026 {}
2027
2028public:
2029 const MDNode *getMD() const { return MD; }
2030
2031 static bool classof(const SDNode *N) {
2032 return N->getOpcode() == ISD::MDNODE_SDNODE;
2033 }
2034};
2035
2036class RegisterSDNode : public SDNode {
2037 friend class SelectionDAG;
2038
2039 unsigned Reg;
2040
2041 RegisterSDNode(unsigned reg, EVT VT)
2042 : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
2043
2044public:
2045 unsigned getReg() const { return Reg; }
2046
2047 static bool classof(const SDNode *N) {
2048 return N->getOpcode() == ISD::Register;
2049 }
2050};
2051
2052class RegisterMaskSDNode : public SDNode {
2053 friend class SelectionDAG;
2054
2055 // The memory for RegMask is not owned by the node.
2056 const uint32_t *RegMask;
2057
2058 RegisterMaskSDNode(const uint32_t *mask)
2059 : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
2060 RegMask(mask) {}
2061
2062public:
2063 const uint32_t *getRegMask() const { return RegMask; }
2064
2065 static bool classof(const SDNode *N) {
2066 return N->getOpcode() == ISD::RegisterMask;
2067 }
2068};
2069
2070class BlockAddressSDNode : public SDNode {
2071 friend class SelectionDAG;
2072
2073 const BlockAddress *BA;
2074 int64_t Offset;
2075 unsigned TargetFlags;
2076
2077 BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
2078 int64_t o, unsigned Flags)
2079 : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
2080 BA(ba), Offset(o), TargetFlags(Flags) {}
2081
2082public:
2083 const BlockAddress *getBlockAddress() const { return BA; }
2084 int64_t getOffset() const { return Offset; }
2085 unsigned getTargetFlags() const { return TargetFlags; }
2086
2087 static bool classof(const SDNode *N) {
2088 return N->getOpcode() == ISD::BlockAddress ||
2089 N->getOpcode() == ISD::TargetBlockAddress;
2090 }
2091};
2092
2093class LabelSDNode : public SDNode {
2094 friend class SelectionDAG;
2095
2096 MCSymbol *Label;
2097
2098 LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L)
2099 : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) {
2100 assert(LabelSDNode::classof(this) && "not a label opcode")((LabelSDNode::classof(this) && "not a label opcode")
? static_cast<void> (0) : __assert_fail ("LabelSDNode::classof(this) && \"not a label opcode\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2100, __PRETTY_FUNCTION__))
;
2101 }
2102
2103public:
2104 MCSymbol *getLabel() const { return Label; }
2105
2106 static bool classof(const SDNode *N) {
2107 return N->getOpcode() == ISD::EH_LABEL ||
2108 N->getOpcode() == ISD::ANNOTATION_LABEL;
2109 }
2110};
2111
2112class ExternalSymbolSDNode : public SDNode {
2113 friend class SelectionDAG;
2114
2115 const char *Symbol;
2116 unsigned TargetFlags;
2117
2118 ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT)
2119 : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0,
2120 DebugLoc(), getSDVTList(VT)),
2121 Symbol(Sym), TargetFlags(TF) {}
2122
2123public:
2124 const char *getSymbol() const { return Symbol; }
2125 unsigned getTargetFlags() const { return TargetFlags; }
2126
2127 static bool classof(const SDNode *N) {
2128 return N->getOpcode() == ISD::ExternalSymbol ||
2129 N->getOpcode() == ISD::TargetExternalSymbol;
2130 }
2131};
2132
2133class MCSymbolSDNode : public SDNode {
2134 friend class SelectionDAG;
2135
2136 MCSymbol *Symbol;
2137
2138 MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
2139 : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
2140
2141public:
2142 MCSymbol *getMCSymbol() const { return Symbol; }
2143
2144 static bool classof(const SDNode *N) {
2145 return N->getOpcode() == ISD::MCSymbol;
2146 }
2147};
2148
2149class CondCodeSDNode : public SDNode {
2150 friend class SelectionDAG;
2151
2152 ISD::CondCode Condition;
2153
2154 explicit CondCodeSDNode(ISD::CondCode Cond)
2155 : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2156 Condition(Cond) {}
2157
2158public:
2159 ISD::CondCode get() const { return Condition; }
2160
2161 static bool classof(const SDNode *N) {
2162 return N->getOpcode() == ISD::CONDCODE;
2163 }
2164};
2165
2166/// This class is used to represent EVT's, which are used
2167/// to parameterize some operations.
2168class VTSDNode : public SDNode {
2169 friend class SelectionDAG;
2170
2171 EVT ValueType;
2172
2173 explicit VTSDNode(EVT VT)
2174 : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2175 ValueType(VT) {}
2176
2177public:
2178 EVT getVT() const { return ValueType; }
2179
2180 static bool classof(const SDNode *N) {
2181 return N->getOpcode() == ISD::VALUETYPE;
2182 }
2183};
2184
2185/// Base class for LoadSDNode and StoreSDNode
2186class LSBaseSDNode : public MemSDNode {
2187public:
2188 LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2189 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2190 MachineMemOperand *MMO)
2191 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2192 LSBaseSDNodeBits.AddressingMode = AM;
2193 assert(getAddressingMode() == AM && "Value truncated")((getAddressingMode() == AM && "Value truncated") ? static_cast
<void> (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-11~++20200226111113+80d7e473e0b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2193, __PRETTY_FUNCTION__))
;
2194 }
2195
2196 const SDValue &getOffset() const {
2197 return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
2198 }
2199
2200 /// Return the addressing mode for this load or store:
2201 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2202 ISD::MemIndexedMode getAddressingMode() const {
2203 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2204 }
2205
2206 /// Return true if this is a pre/post inc/dec load/store.
2207 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2208
2209 /// Return true if this is NOT a pre/post inc/dec load/store.
2210 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2211
2212 static bool classof(const SDNode *N) {
2213 return N->getOpcode() == ISD::LOAD ||
2214 N->getOpcode() == ISD::STORE;
2215 }
2216};
2217
2218/// This class is used to represent ISD::LOAD nodes.
2219class LoadSDNode : public LSBaseSDNode {