Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1097, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name WebAssemblyISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/build-llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly -I /build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/build-llvm/lib/Target/WebAssembly -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-09-28-092409-31635-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp

1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the WebAssemblyTargetLowering class.
11///
12//===----------------------------------------------------------------------===//
13
14#include "WebAssemblyISelLowering.h"
15#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16#include "WebAssemblyMachineFunctionInfo.h"
17#include "WebAssemblySubtarget.h"
18#include "WebAssemblyTargetMachine.h"
19#include "llvm/CodeGen/Analysis.h"
20#include "llvm/CodeGen/CallingConvLower.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineJumpTableInfo.h"
23#include "llvm/CodeGen/MachineModuleInfo.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/SelectionDAG.h"
26#include "llvm/CodeGen/WasmEHFuncInfo.h"
27#include "llvm/IR/DiagnosticInfo.h"
28#include "llvm/IR/DiagnosticPrinter.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/Intrinsics.h"
31#include "llvm/IR/IntrinsicsWebAssembly.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Target/TargetOptions.h"
36using namespace llvm;
37
38#define DEBUG_TYPE"wasm-lower" "wasm-lower"
39
40WebAssemblyTargetLowering::WebAssemblyTargetLowering(
41 const TargetMachine &TM, const WebAssemblySubtarget &STI)
42 : TargetLowering(TM), Subtarget(&STI) {
43 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
44
45 // Booleans always contain 0 or 1.
46 setBooleanContents(ZeroOrOneBooleanContent);
47 // Except in SIMD vectors
48 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
49 // We don't know the microarchitecture here, so just reduce register pressure.
50 setSchedulingPreference(Sched::RegPressure);
51 // Tell ISel that we have a stack pointer.
52 setStackPointerRegisterToSaveRestore(
53 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
54 // Set up the register classes.
55 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
56 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
57 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
58 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
59 if (Subtarget->hasSIMD128()) {
60 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
61 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
62 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
63 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
64 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
65 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
66 }
67 // Compute derived properties from the register classes.
68 computeRegisterProperties(Subtarget->getRegisterInfo());
69
70 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
71 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
72 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
73 setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
74 setOperationAction(ISD::BRIND, MVT::Other, Custom);
75
76 // Take the default expansion for va_arg, va_copy, and va_end. There is no
77 // default action for va_start, so we do that custom.
78 setOperationAction(ISD::VASTART, MVT::Other, Custom);
79 setOperationAction(ISD::VAARG, MVT::Other, Expand);
80 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
81 setOperationAction(ISD::VAEND, MVT::Other, Expand);
82
83 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
84 // Don't expand the floating-point types to constant pools.
85 setOperationAction(ISD::ConstantFP, T, Legal);
86 // Expand floating-point comparisons.
87 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
88 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
89 setCondCodeAction(CC, T, Expand);
90 // Expand floating-point library function operators.
91 for (auto Op :
92 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
93 setOperationAction(Op, T, Expand);
94 // Note supported floating-point library function operators that otherwise
95 // default to expand.
96 for (auto Op :
97 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
98 setOperationAction(Op, T, Legal);
99 // Support minimum and maximum, which otherwise default to expand.
100 setOperationAction(ISD::FMINIMUM, T, Legal);
101 setOperationAction(ISD::FMAXIMUM, T, Legal);
102 // WebAssembly currently has no builtin f16 support.
103 setOperationAction(ISD::FP16_TO_FP, T, Expand);
104 setOperationAction(ISD::FP_TO_FP16, T, Expand);
105 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
106 setTruncStoreAction(T, MVT::f16, Expand);
107 }
108
109 // Expand unavailable integer operations.
110 for (auto Op :
111 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
112 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
113 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
114 for (auto T : {MVT::i32, MVT::i64})
115 setOperationAction(Op, T, Expand);
116 if (Subtarget->hasSIMD128())
117 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
118 setOperationAction(Op, T, Expand);
119 }
120
121 // SIMD-specific configuration
122 if (Subtarget->hasSIMD128()) {
123 // Hoist bitcasts out of shuffles
124 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
125
126 // Combine extends of extract_subvectors into widening ops
127 setTargetDAGCombine(ISD::SIGN_EXTEND);
128 setTargetDAGCombine(ISD::ZERO_EXTEND);
129
130 // Support saturating add for i8x16 and i16x8
131 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
132 for (auto T : {MVT::v16i8, MVT::v8i16})
133 setOperationAction(Op, T, Legal);
134
135 // Support integer abs
136 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
137 setOperationAction(ISD::ABS, T, Legal);
138
139 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
140 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
141 MVT::v2f64})
142 setOperationAction(ISD::BUILD_VECTOR, T, Custom);
143
144 // We have custom shuffle lowering to expose the shuffle mask
145 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
146 MVT::v2f64})
147 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
148
149 // Custom lowering since wasm shifts must have a scalar shift amount
150 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
151 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
152 setOperationAction(Op, T, Custom);
153
154 // Custom lower lane accesses to expand out variable indices
155 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
156 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
157 MVT::v2f64})
158 setOperationAction(Op, T, Custom);
159
160 // There is no i8x16.mul instruction
161 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
162
163 // There is no vector conditional select instruction
164 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
165 MVT::v2f64})
166 setOperationAction(ISD::SELECT_CC, T, Expand);
167
168 // Expand integer operations supported for scalars but not SIMD
169 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
170 ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
171 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
172 setOperationAction(Op, T, Expand);
173
174 // But we do have integer min and max operations
175 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
176 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
177 setOperationAction(Op, T, Legal);
178
179 // Expand float operations supported for scalars but not SIMD
180 for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT,
181 ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
182 ISD::FEXP, ISD::FEXP2, ISD::FRINT})
183 for (auto T : {MVT::v4f32, MVT::v2f64})
184 setOperationAction(Op, T, Expand);
185
186 // Expand operations not supported for i64x2 vectors
187 for (unsigned CC = 0; CC < ISD::SETCC_INVALID; ++CC)
188 setCondCodeAction(static_cast<ISD::CondCode>(CC), MVT::v2i64, Custom);
189
190 // 64x2 conversions are not in the spec
191 for (auto Op :
192 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
193 for (auto T : {MVT::v2i64, MVT::v2f64})
194 setOperationAction(Op, T, Expand);
195 }
196
197 // As a special case, these operators use the type to mean the type to
198 // sign-extend from.
199 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
200 if (!Subtarget->hasSignExt()) {
201 // Sign extends are legal only when extending a vector extract
202 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
203 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
204 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
205 }
206 for (auto T : MVT::integer_fixedlen_vector_valuetypes())
207 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
208
209 // Dynamic stack allocation: use the default expansion.
210 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
211 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
212 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
213
214 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
215 setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
216 setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
217
218 // Expand these forms; we pattern-match the forms that we can handle in isel.
219 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
220 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
221 setOperationAction(Op, T, Expand);
222
223 // We have custom switch handling.
224 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
225
226 // WebAssembly doesn't have:
227 // - Floating-point extending loads.
228 // - Floating-point truncating stores.
229 // - i1 extending loads.
230 // - truncating SIMD stores and most extending loads
231 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
232 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
233 for (auto T : MVT::integer_valuetypes())
234 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
235 setLoadExtAction(Ext, T, MVT::i1, Promote);
236 if (Subtarget->hasSIMD128()) {
237 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
238 MVT::v2f64}) {
239 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
240 if (MVT(T) != MemT) {
241 setTruncStoreAction(T, MemT, Expand);
242 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
243 setLoadExtAction(Ext, T, MemT, Expand);
244 }
245 }
246 }
247 // But some vector extending loads are legal
248 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
249 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
250 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
251 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
252 }
253 // And some truncating stores are legal as well
254 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
255 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
256 }
257
258 // Don't do anything clever with build_pairs
259 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
260
261 // Trap lowers to wasm unreachable
262 setOperationAction(ISD::TRAP, MVT::Other, Legal);
263 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
264
265 // Exception handling intrinsics
266 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
267 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
268
269 setMaxAtomicSizeInBitsSupported(64);
270
271 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
272 // consistent with the f64 and f128 names.
273 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
274 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
275
276 // Define the emscripten name for return address helper.
277 // TODO: when implementing other Wasm backends, make this generic or only do
278 // this on emscripten depending on what they end up doing.
279 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
280
281 // Always convert switches to br_tables unless there is only one case, which
282 // is equivalent to a simple branch. This reduces code size for wasm, and we
283 // defer possible jump table optimizations to the VM.
284 setMinimumJumpTableEntries(2);
285}
286
287TargetLowering::AtomicExpansionKind
288WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
289 // We have wasm instructions for these
290 switch (AI->getOperation()) {
291 case AtomicRMWInst::Add:
292 case AtomicRMWInst::Sub:
293 case AtomicRMWInst::And:
294 case AtomicRMWInst::Or:
295 case AtomicRMWInst::Xor:
296 case AtomicRMWInst::Xchg:
297 return AtomicExpansionKind::None;
298 default:
299 break;
300 }
301 return AtomicExpansionKind::CmpXChg;
302}
303
304FastISel *WebAssemblyTargetLowering::createFastISel(
305 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
306 return WebAssembly::createFastISel(FuncInfo, LibInfo);
307}
308
309MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
310 EVT VT) const {
311 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
312 if (BitWidth > 1 && BitWidth < 8)
313 BitWidth = 8;
314
315 if (BitWidth > 64) {
316 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
317 // the count to be an i32.
318 BitWidth = 32;
319 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&((BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && "32-bit shift counts ought to be enough for anyone"
) ? static_cast<void> (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 320, __PRETTY_FUNCTION__))
320 "32-bit shift counts ought to be enough for anyone")((BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && "32-bit shift counts ought to be enough for anyone"
) ? static_cast<void> (0) : __assert_fail ("BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && \"32-bit shift counts ought to be enough for anyone\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 320, __PRETTY_FUNCTION__))
;
321 }
322
323 MVT Result = MVT::getIntegerVT(BitWidth);
324 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&((Result != MVT::INVALID_SIMPLE_VALUE_TYPE && "Unable to represent scalar shift amount type"
) ? static_cast<void> (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 325, __PRETTY_FUNCTION__))
325 "Unable to represent scalar shift amount type")((Result != MVT::INVALID_SIMPLE_VALUE_TYPE && "Unable to represent scalar shift amount type"
) ? static_cast<void> (0) : __assert_fail ("Result != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Unable to represent scalar shift amount type\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 325, __PRETTY_FUNCTION__))
;
326 return Result;
327}
328
329// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
330// undefined result on invalid/overflow, to the WebAssembly opcode, which
331// traps on invalid/overflow.
332static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
333 MachineBasicBlock *BB,
334 const TargetInstrInfo &TII,
335 bool IsUnsigned, bool Int64,
336 bool Float64, unsigned LoweredOpcode) {
337 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
338
339 Register OutReg = MI.getOperand(0).getReg();
340 Register InReg = MI.getOperand(1).getReg();
341
342 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
343 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
344 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
345 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
346 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
347 unsigned Eqz = WebAssembly::EQZ_I32;
348 unsigned And = WebAssembly::AND_I32;
349 int64_t Limit = Int64 ? INT64_MIN(-9223372036854775807L -1) : INT32_MIN(-2147483647-1);
350 int64_t Substitute = IsUnsigned ? 0 : Limit;
351 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
352 auto &Context = BB->getParent()->getFunction().getContext();
353 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
354
355 const BasicBlock *LLVMBB = BB->getBasicBlock();
356 MachineFunction *F = BB->getParent();
357 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
358 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
359 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
360
361 MachineFunction::iterator It = ++BB->getIterator();
362 F->insert(It, FalseMBB);
363 F->insert(It, TrueMBB);
364 F->insert(It, DoneMBB);
365
366 // Transfer the remainder of BB and its successor edges to DoneMBB.
367 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
368 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
369
370 BB->addSuccessor(TrueMBB);
371 BB->addSuccessor(FalseMBB);
372 TrueMBB->addSuccessor(DoneMBB);
373 FalseMBB->addSuccessor(DoneMBB);
374
375 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
376 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
377 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
378 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
379 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
380 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
381 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
382
383 MI.eraseFromParent();
384 // For signed numbers, we can do a single comparison to determine whether
385 // fabs(x) is within range.
386 if (IsUnsigned) {
387 Tmp0 = InReg;
388 } else {
389 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
390 }
391 BuildMI(BB, DL, TII.get(FConst), Tmp1)
392 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
393 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
394
395 // For unsigned numbers, we have to do a separate comparison with zero.
396 if (IsUnsigned) {
397 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
398 Register SecondCmpReg =
399 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
400 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
401 BuildMI(BB, DL, TII.get(FConst), Tmp1)
402 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
403 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
404 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
405 CmpReg = AndReg;
406 }
407
408 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
409
410 // Create the CFG diamond to select between doing the conversion or using
411 // the substitute value.
412 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
413 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
414 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
415 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
416 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
417 .addReg(FalseReg)
418 .addMBB(FalseMBB)
419 .addReg(TrueReg)
420 .addMBB(TrueMBB);
421
422 return DoneMBB;
423}
424
425static MachineBasicBlock *LowerCallResults(MachineInstr &CallResults,
426 DebugLoc DL, MachineBasicBlock *BB,
427 const TargetInstrInfo &TII) {
428 MachineInstr &CallParams = *CallResults.getPrevNode();
429 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS)((CallParams.getOpcode() == WebAssembly::CALL_PARAMS) ? static_cast
<void> (0) : __assert_fail ("CallParams.getOpcode() == WebAssembly::CALL_PARAMS"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 429, __PRETTY_FUNCTION__))
;
430 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||((CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults
.getOpcode() == WebAssembly::RET_CALL_RESULTS) ? static_cast<
void> (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 431, __PRETTY_FUNCTION__))
431 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS)((CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults
.getOpcode() == WebAssembly::RET_CALL_RESULTS) ? static_cast<
void> (0) : __assert_fail ("CallResults.getOpcode() == WebAssembly::CALL_RESULTS || CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 431, __PRETTY_FUNCTION__))
;
432
433 bool IsIndirect = CallParams.getOperand(0).isReg();
434 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
435
436 unsigned CallOp;
437 if (IsIndirect && IsRetCall) {
438 CallOp = WebAssembly::RET_CALL_INDIRECT;
439 } else if (IsIndirect) {
440 CallOp = WebAssembly::CALL_INDIRECT;
441 } else if (IsRetCall) {
442 CallOp = WebAssembly::RET_CALL;
443 } else {
444 CallOp = WebAssembly::CALL;
445 }
446
447 MachineFunction &MF = *BB->getParent();
448 const MCInstrDesc &MCID = TII.get(CallOp);
449 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
450
451 // See if we must truncate the function pointer.
452 // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
453 // as 64-bit for uniformity with other pointer types.
454 if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
455 Register Reg32 =
456 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
457 auto &FnPtr = CallParams.getOperand(0);
458 BuildMI(*BB, CallResults.getIterator(), DL,
459 TII.get(WebAssembly::I32_WRAP_I64), Reg32)
460 .addReg(FnPtr.getReg());
461 FnPtr.setReg(Reg32);
462 }
463
464 // Move the function pointer to the end of the arguments for indirect calls
465 if (IsIndirect) {
466 auto FnPtr = CallParams.getOperand(0);
467 CallParams.RemoveOperand(0);
468 CallParams.addOperand(FnPtr);
469 }
470
471 for (auto Def : CallResults.defs())
472 MIB.add(Def);
473
474 // Add placeholders for the type index and immediate flags
475 if (IsIndirect) {
476 MIB.addImm(0);
477 MIB.addImm(0);
478 }
479
480 for (auto Use : CallParams.uses())
481 MIB.add(Use);
482
483 BB->insert(CallResults.getIterator(), MIB);
484 CallParams.eraseFromParent();
485 CallResults.eraseFromParent();
486
487 return BB;
488}
489
490MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
491 MachineInstr &MI, MachineBasicBlock *BB) const {
492 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
493 DebugLoc DL = MI.getDebugLoc();
494
495 switch (MI.getOpcode()) {
496 default:
497 llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 497)
;
498 case WebAssembly::FP_TO_SINT_I32_F32:
499 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
500 WebAssembly::I32_TRUNC_S_F32);
501 case WebAssembly::FP_TO_UINT_I32_F32:
502 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
503 WebAssembly::I32_TRUNC_U_F32);
504 case WebAssembly::FP_TO_SINT_I64_F32:
505 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
506 WebAssembly::I64_TRUNC_S_F32);
507 case WebAssembly::FP_TO_UINT_I64_F32:
508 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
509 WebAssembly::I64_TRUNC_U_F32);
510 case WebAssembly::FP_TO_SINT_I32_F64:
511 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
512 WebAssembly::I32_TRUNC_S_F64);
513 case WebAssembly::FP_TO_UINT_I32_F64:
514 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
515 WebAssembly::I32_TRUNC_U_F64);
516 case WebAssembly::FP_TO_SINT_I64_F64:
517 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
518 WebAssembly::I64_TRUNC_S_F64);
519 case WebAssembly::FP_TO_UINT_I64_F64:
520 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
521 WebAssembly::I64_TRUNC_U_F64);
522 case WebAssembly::CALL_RESULTS:
523 case WebAssembly::RET_CALL_RESULTS:
524 return LowerCallResults(MI, DL, BB, TII);
525 }
526}
527
528const char *
529WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
530 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
531 case WebAssemblyISD::FIRST_NUMBER:
532 case WebAssemblyISD::FIRST_MEM_OPCODE:
533 break;
534#define HANDLE_NODETYPE(NODE) \
535 case WebAssemblyISD::NODE: \
536 return "WebAssemblyISD::" #NODE;
537#define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
538#include "WebAssemblyISD.def"
539#undef HANDLE_MEM_NODETYPE
540#undef HANDLE_NODETYPE
541 }
542 return nullptr;
543}
544
545std::pair<unsigned, const TargetRegisterClass *>
546WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
547 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
548 // First, see if this is a constraint that directly corresponds to a
549 // WebAssembly register class.
550 if (Constraint.size() == 1) {
551 switch (Constraint[0]) {
552 case 'r':
553 assert(VT != MVT::iPTR && "Pointer MVT not expected here")((VT != MVT::iPTR && "Pointer MVT not expected here")
? static_cast<void> (0) : __assert_fail ("VT != MVT::iPTR && \"Pointer MVT not expected here\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 553, __PRETTY_FUNCTION__))
;
554 if (Subtarget->hasSIMD128() && VT.isVector()) {
555 if (VT.getSizeInBits() == 128)
556 return std::make_pair(0U, &WebAssembly::V128RegClass);
557 }
558 if (VT.isInteger() && !VT.isVector()) {
559 if (VT.getSizeInBits() <= 32)
560 return std::make_pair(0U, &WebAssembly::I32RegClass);
561 if (VT.getSizeInBits() <= 64)
562 return std::make_pair(0U, &WebAssembly::I64RegClass);
563 }
564 break;
565 default:
566 break;
567 }
568 }
569
570 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
571}
572
573bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
574 // Assume ctz is a relatively cheap operation.
575 return true;
576}
577
578bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
579 // Assume clz is a relatively cheap operation.
580 return true;
581}
582
583bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
584 const AddrMode &AM,
585 Type *Ty, unsigned AS,
586 Instruction *I) const {
587 // WebAssembly offsets are added as unsigned without wrapping. The
588 // isLegalAddressingMode gives us no way to determine if wrapping could be
589 // happening, so we approximate this by accepting only non-negative offsets.
590 if (AM.BaseOffs < 0)
591 return false;
592
593 // WebAssembly has no scale register operands.
594 if (AM.Scale != 0)
595 return false;
596
597 // Everything else is legal.
598 return true;
599}
600
601bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
602 EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/,
603 MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
604 // WebAssembly supports unaligned accesses, though it should be declared
605 // with the p2align attribute on loads and stores which do so, and there
606 // may be a performance impact. We tell LLVM they're "fast" because
607 // for the kinds of things that LLVM uses this for (merging adjacent stores
608 // of constants, etc.), WebAssembly implementations will either want the
609 // unaligned access or they'll split anyway.
610 if (Fast)
611 *Fast = true;
612 return true;
613}
614
615bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
616 AttributeList Attr) const {
617 // The current thinking is that wasm engines will perform this optimization,
618 // so we can save on code size.
619 return true;
620}
621
622bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
623 EVT ExtT = ExtVal.getValueType();
624 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
625 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
626 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
627 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
628}
629
630EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
631 LLVMContext &C,
632 EVT VT) const {
633 if (VT.isVector())
634 return VT.changeVectorElementTypeToInteger();
635
636 // So far, all branch instructions in Wasm take an I32 condition.
637 // The default TargetLowering::getSetCCResultType returns the pointer size,
638 // which would be useful to reduce instruction counts when testing
639 // against 64-bit pointers/values if at some point Wasm supports that.
640 return EVT::getIntegerVT(C, 32);
641}
642
643bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
644 const CallInst &I,
645 MachineFunction &MF,
646 unsigned Intrinsic) const {
647 switch (Intrinsic) {
648 case Intrinsic::wasm_atomic_notify:
649 Info.opc = ISD::INTRINSIC_W_CHAIN;
650 Info.memVT = MVT::i32;
651 Info.ptrVal = I.getArgOperand(0);
652 Info.offset = 0;
653 Info.align = Align(4);
654 // atomic.notify instruction does not really load the memory specified with
655 // this argument, but MachineMemOperand should either be load or store, so
656 // we set this to a load.
657 // FIXME Volatile isn't really correct, but currently all LLVM atomic
658 // instructions are treated as volatiles in the backend, so we should be
659 // consistent. The same applies for wasm_atomic_wait intrinsics too.
660 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
661 return true;
662 case Intrinsic::wasm_atomic_wait_i32:
663 Info.opc = ISD::INTRINSIC_W_CHAIN;
664 Info.memVT = MVT::i32;
665 Info.ptrVal = I.getArgOperand(0);
666 Info.offset = 0;
667 Info.align = Align(4);
668 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
669 return true;
670 case Intrinsic::wasm_atomic_wait_i64:
671 Info.opc = ISD::INTRINSIC_W_CHAIN;
672 Info.memVT = MVT::i64;
673 Info.ptrVal = I.getArgOperand(0);
674 Info.offset = 0;
675 Info.align = Align(8);
676 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
677 return true;
678 case Intrinsic::wasm_load32_zero:
679 case Intrinsic::wasm_load64_zero:
680 Info.opc = ISD::INTRINSIC_W_CHAIN;
681 Info.memVT = Intrinsic == Intrinsic::wasm_load32_zero ? MVT::i32 : MVT::i64;
682 Info.ptrVal = I.getArgOperand(0);
683 Info.offset = 0;
684 Info.align = Info.memVT == MVT::i32 ? Align(4) : Align(8);
685 Info.flags = MachineMemOperand::MOLoad;
686 return true;
687 default:
688 return false;
689 }
690}
691
692//===----------------------------------------------------------------------===//
693// WebAssembly Lowering private implementation.
694//===----------------------------------------------------------------------===//
695
696//===----------------------------------------------------------------------===//
697// Lowering Code
698//===----------------------------------------------------------------------===//
699
700static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
701 MachineFunction &MF = DAG.getMachineFunction();
702 DAG.getContext()->diagnose(
703 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
704}
705
706// Test whether the given calling convention is supported.
707static bool callingConvSupported(CallingConv::ID CallConv) {
708 // We currently support the language-independent target-independent
709 // conventions. We don't yet have a way to annotate calls with properties like
710 // "cold", and we don't have any call-clobbered registers, so these are mostly
711 // all handled the same.
712 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
713 CallConv == CallingConv::Cold ||
714 CallConv == CallingConv::PreserveMost ||
715 CallConv == CallingConv::PreserveAll ||
716 CallConv == CallingConv::CXX_FAST_TLS ||
717 CallConv == CallingConv::WASM_EmscriptenInvoke ||
718 CallConv == CallingConv::Swift;
719}
720
721SDValue
722WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
723 SmallVectorImpl<SDValue> &InVals) const {
724 SelectionDAG &DAG = CLI.DAG;
725 SDLoc DL = CLI.DL;
726 SDValue Chain = CLI.Chain;
727 SDValue Callee = CLI.Callee;
728 MachineFunction &MF = DAG.getMachineFunction();
729 auto Layout = MF.getDataLayout();
730
731 CallingConv::ID CallConv = CLI.CallConv;
732 if (!callingConvSupported(CallConv))
733 fail(DL, DAG,
734 "WebAssembly doesn't support language-specific or target-specific "
735 "calling conventions yet");
736 if (CLI.IsPatchPoint)
737 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
738
739 if (CLI.IsTailCall) {
740 auto NoTail = [&](const char *Msg) {
741 if (CLI.CB && CLI.CB->isMustTailCall())
742 fail(DL, DAG, Msg);
743 CLI.IsTailCall = false;
744 };
745
746 if (!Subtarget->hasTailCall())
747 NoTail("WebAssembly 'tail-call' feature not enabled");
748
749 // Varargs calls cannot be tail calls because the buffer is on the stack
750 if (CLI.IsVarArg)
751 NoTail("WebAssembly does not support varargs tail calls");
752
753 // Do not tail call unless caller and callee return types match
754 const Function &F = MF.getFunction();
755 const TargetMachine &TM = getTargetMachine();
756 Type *RetTy = F.getReturnType();
757 SmallVector<MVT, 4> CallerRetTys;
758 SmallVector<MVT, 4> CalleeRetTys;
759 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
760 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
761 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
762 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
763 CalleeRetTys.begin());
764 if (!TypesMatch)
765 NoTail("WebAssembly tail call requires caller and callee return types to "
766 "match");
767
768 // If pointers to local stack values are passed, we cannot tail call
769 if (CLI.CB) {
770 for (auto &Arg : CLI.CB->args()) {
771 Value *Val = Arg.get();
772 // Trace the value back through pointer operations
773 while (true) {
774 Value *Src = Val->stripPointerCastsAndAliases();
775 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
776 Src = GEP->getPointerOperand();
777 if (Val == Src)
778 break;
779 Val = Src;
780 }
781 if (isa<AllocaInst>(Val)) {
782 NoTail(
783 "WebAssembly does not support tail calling with stack arguments");
784 break;
785 }
786 }
787 }
788 }
789
790 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
791 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
792 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
793
794 // The generic code may have added an sret argument. If we're lowering an
795 // invoke function, the ABI requires that the function pointer be the first
796 // argument, so we may have to swap the arguments.
797 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
798 Outs[0].Flags.isSRet()) {
799 std::swap(Outs[0], Outs[1]);
800 std::swap(OutVals[0], OutVals[1]);
801 }
802
803 bool HasSwiftSelfArg = false;
804 bool HasSwiftErrorArg = false;
805 unsigned NumFixedArgs = 0;
806 for (unsigned I = 0; I < Outs.size(); ++I) {
807 const ISD::OutputArg &Out = Outs[I];
808 SDValue &OutVal = OutVals[I];
809 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
810 HasSwiftErrorArg |= Out.Flags.isSwiftError();
811 if (Out.Flags.isNest())
812 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
813 if (Out.Flags.isInAlloca())
814 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
815 if (Out.Flags.isInConsecutiveRegs())
816 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
817 if (Out.Flags.isInConsecutiveRegsLast())
818 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
819 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
820 auto &MFI = MF.getFrameInfo();
821 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
822 Out.Flags.getNonZeroByValAlign(),
823 /*isSS=*/false);
824 SDValue SizeNode =
825 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
826 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
827 Chain = DAG.getMemcpy(
828 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
829 /*isVolatile*/ false, /*AlwaysInline=*/false,
830 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
831 OutVal = FINode;
832 }
833 // Count the number of fixed args *after* legalization.
834 NumFixedArgs += Out.IsFixed;
835 }
836
837 bool IsVarArg = CLI.IsVarArg;
838 auto PtrVT = getPointerTy(Layout);
839
840 // For swiftcc, emit additional swiftself and swifterror arguments
841 // if there aren't. These additional arguments are also added for callee
842 // signature They are necessary to match callee and caller signature for
843 // indirect call.
844 if (CallConv == CallingConv::Swift) {
845 if (!HasSwiftSelfArg) {
846 NumFixedArgs++;
847 ISD::OutputArg Arg;
848 Arg.Flags.setSwiftSelf();
849 CLI.Outs.push_back(Arg);
850 SDValue ArgVal = DAG.getUNDEF(PtrVT);
851 CLI.OutVals.push_back(ArgVal);
852 }
853 if (!HasSwiftErrorArg) {
854 NumFixedArgs++;
855 ISD::OutputArg Arg;
856 Arg.Flags.setSwiftError();
857 CLI.Outs.push_back(Arg);
858 SDValue ArgVal = DAG.getUNDEF(PtrVT);
859 CLI.OutVals.push_back(ArgVal);
860 }
861 }
862
863 // Analyze operands of the call, assigning locations to each operand.
864 SmallVector<CCValAssign, 16> ArgLocs;
865 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
866
867 if (IsVarArg) {
868 // Outgoing non-fixed arguments are placed in a buffer. First
869 // compute their offsets and the total amount of buffer space needed.
870 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
871 const ISD::OutputArg &Out = Outs[I];
872 SDValue &Arg = OutVals[I];
873 EVT VT = Arg.getValueType();
874 assert(VT != MVT::iPTR && "Legalized args should be concrete")((VT != MVT::iPTR && "Legalized args should be concrete"
) ? static_cast<void> (0) : __assert_fail ("VT != MVT::iPTR && \"Legalized args should be concrete\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 874, __PRETTY_FUNCTION__))
;
875 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
876 Align Alignment =
877 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
878 unsigned Offset =
879 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
880 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
881 Offset, VT.getSimpleVT(),
882 CCValAssign::Full));
883 }
884 }
885
886 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
887
888 SDValue FINode;
889 if (IsVarArg && NumBytes) {
890 // For non-fixed arguments, next emit stores to store the argument values
891 // to the stack buffer at the offsets computed above.
892 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
893 Layout.getStackAlignment(),
894 /*isSS=*/false);
895 unsigned ValNo = 0;
896 SmallVector<SDValue, 8> Chains;
897 for (SDValue Arg :
898 make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
899 assert(ArgLocs[ValNo].getValNo() == ValNo &&((ArgLocs[ValNo].getValNo() == ValNo && "ArgLocs should remain in order and only hold varargs args"
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 900, __PRETTY_FUNCTION__))
900 "ArgLocs should remain in order and only hold varargs args")((ArgLocs[ValNo].getValNo() == ValNo && "ArgLocs should remain in order and only hold varargs args"
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[ValNo].getValNo() == ValNo && \"ArgLocs should remain in order and only hold varargs args\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 900, __PRETTY_FUNCTION__))
;
901 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
902 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
903 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
904 DAG.getConstant(Offset, DL, PtrVT));
905 Chains.push_back(
906 DAG.getStore(Chain, DL, Arg, Add,
907 MachinePointerInfo::getFixedStack(MF, FI, Offset)));
908 }
909 if (!Chains.empty())
910 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
911 } else if (IsVarArg) {
912 FINode = DAG.getIntPtrConstant(0, DL);
913 }
914
915 if (Callee->getOpcode() == ISD::GlobalAddress) {
916 // If the callee is a GlobalAddress node (quite common, every direct call
917 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
918 // doesn't at MO_GOT which is not needed for direct calls.
919 GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
920 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
921 getPointerTy(DAG.getDataLayout()),
922 GA->getOffset());
923 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
924 getPointerTy(DAG.getDataLayout()), Callee);
925 }
926
927 // Compute the operands for the CALLn node.
928 SmallVector<SDValue, 16> Ops;
929 Ops.push_back(Chain);
930 Ops.push_back(Callee);
931
932 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
933 // isn't reliable.
934 Ops.append(OutVals.begin(),
935 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
936 // Add a pointer to the vararg buffer.
937 if (IsVarArg)
938 Ops.push_back(FINode);
939
940 SmallVector<EVT, 8> InTys;
941 for (const auto &In : Ins) {
942 assert(!In.Flags.isByVal() && "byval is not valid for return values")((!In.Flags.isByVal() && "byval is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!In.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 942, __PRETTY_FUNCTION__))
;
943 assert(!In.Flags.isNest() && "nest is not valid for return values")((!In.Flags.isNest() && "nest is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!In.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 943, __PRETTY_FUNCTION__))
;
944 if (In.Flags.isInAlloca())
945 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
946 if (In.Flags.isInConsecutiveRegs())
947 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
948 if (In.Flags.isInConsecutiveRegsLast())
949 fail(DL, DAG,
950 "WebAssembly hasn't implemented cons regs last return values");
951 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
952 // registers.
953 InTys.push_back(In.VT);
954 }
955
956 if (CLI.IsTailCall) {
957 // ret_calls do not return values to the current frame
958 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
959 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
960 }
961
962 InTys.push_back(MVT::Other);
963 SDVTList InTyList = DAG.getVTList(InTys);
964 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
965
966 for (size_t I = 0; I < Ins.size(); ++I)
967 InVals.push_back(Res.getValue(I));
968
969 // Return the chain
970 return Res.getValue(Ins.size());
971}
972
973bool WebAssemblyTargetLowering::CanLowerReturn(
974 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
975 const SmallVectorImpl<ISD::OutputArg> &Outs,
976 LLVMContext & /*Context*/) const {
977 // WebAssembly can only handle returning tuples with multivalue enabled
978 return Subtarget->hasMultivalue() || Outs.size() <= 1;
979}
980
981SDValue WebAssemblyTargetLowering::LowerReturn(
982 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
983 const SmallVectorImpl<ISD::OutputArg> &Outs,
984 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
985 SelectionDAG &DAG) const {
986 assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&(((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
"MVP WebAssembly can only return up to one value") ? static_cast
<void> (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 987, __PRETTY_FUNCTION__))
987 "MVP WebAssembly can only return up to one value")(((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
"MVP WebAssembly can only return up to one value") ? static_cast
<void> (0) : __assert_fail ("(Subtarget->hasMultivalue() || Outs.size() <= 1) && \"MVP WebAssembly can only return up to one value\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 987, __PRETTY_FUNCTION__))
;
988 if (!callingConvSupported(CallConv))
989 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
990
991 SmallVector<SDValue, 4> RetOps(1, Chain);
992 RetOps.append(OutVals.begin(), OutVals.end());
993 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
994
995 // Record the number and types of the return values.
996 for (const ISD::OutputArg &Out : Outs) {
997 assert(!Out.Flags.isByVal() && "byval is not valid for return values")((!Out.Flags.isByVal() && "byval is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!Out.Flags.isByVal() && \"byval is not valid for return values\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 997, __PRETTY_FUNCTION__))
;
998 assert(!Out.Flags.isNest() && "nest is not valid for return values")((!Out.Flags.isNest() && "nest is not valid for return values"
) ? static_cast<void> (0) : __assert_fail ("!Out.Flags.isNest() && \"nest is not valid for return values\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 998, __PRETTY_FUNCTION__))
;
999 assert(Out.IsFixed && "non-fixed return value is not valid")((Out.IsFixed && "non-fixed return value is not valid"
) ? static_cast<void> (0) : __assert_fail ("Out.IsFixed && \"non-fixed return value is not valid\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 999, __PRETTY_FUNCTION__))
;
1000 if (Out.Flags.isInAlloca())
1001 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1002 if (Out.Flags.isInConsecutiveRegs())
1003 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1004 if (Out.Flags.isInConsecutiveRegsLast())
1005 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1006 }
1007
1008 return Chain;
1009}
1010
1011SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1012 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1013 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1014 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1015 if (!callingConvSupported(CallConv))
1016 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1017
1018 MachineFunction &MF = DAG.getMachineFunction();
1019 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1020
1021 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1022 // of the incoming values before they're represented by virtual registers.
1023 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1024
1025 bool HasSwiftErrorArg = false;
1026 bool HasSwiftSelfArg = false;
1027 for (const ISD::InputArg &In : Ins) {
1028 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1029 HasSwiftErrorArg |= In.Flags.isSwiftError();
1030 if (In.Flags.isInAlloca())
1031 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1032 if (In.Flags.isNest())
1033 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1034 if (In.Flags.isInConsecutiveRegs())
1035 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1036 if (In.Flags.isInConsecutiveRegsLast())
1037 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1038 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1039 // registers.
1040 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1041 DAG.getTargetConstant(InVals.size(),
1042 DL, MVT::i32))
1043 : DAG.getUNDEF(In.VT));
1044
1045 // Record the number and types of arguments.
1046 MFI->addParam(In.VT);
1047 }
1048
1049 // For swiftcc, emit additional swiftself and swifterror arguments
1050 // if there aren't. These additional arguments are also added for callee
1051 // signature They are necessary to match callee and caller signature for
1052 // indirect call.
1053 auto PtrVT = getPointerTy(MF.getDataLayout());
1054 if (CallConv == CallingConv::Swift) {
1055 if (!HasSwiftSelfArg) {
1056 MFI->addParam(PtrVT);
1057 }
1058 if (!HasSwiftErrorArg) {
1059 MFI->addParam(PtrVT);
1060 }
1061 }
1062 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1063 // the buffer is passed as an argument.
1064 if (IsVarArg) {
1065 MVT PtrVT = getPointerTy(MF.getDataLayout());
1066 Register VarargVreg =
1067 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
1068 MFI->setVarargBufferVreg(VarargVreg);
1069 Chain = DAG.getCopyToReg(
1070 Chain, DL, VarargVreg,
1071 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1072 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1073 MFI->addParam(PtrVT);
1074 }
1075
1076 // Record the number and types of arguments and results.
1077 SmallVector<MVT, 4> Params;
1078 SmallVector<MVT, 4> Results;
1079 computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
1080 MF.getFunction(), DAG.getTarget(), Params, Results);
1081 for (MVT VT : Results)
1082 MFI->addResult(VT);
1083 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1084 // the param logic here with ComputeSignatureVTs
1085 assert(MFI->getParams().size() == Params.size() &&((MFI->getParams().size() == Params.size() && std::
equal(MFI->getParams().begin(), MFI->getParams().end(),
Params.begin())) ? static_cast<void> (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1087, __PRETTY_FUNCTION__))
1086 std::equal(MFI->getParams().begin(), MFI->getParams().end(),((MFI->getParams().size() == Params.size() && std::
equal(MFI->getParams().begin(), MFI->getParams().end(),
Params.begin())) ? static_cast<void> (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1087, __PRETTY_FUNCTION__))
1087 Params.begin()))((MFI->getParams().size() == Params.size() && std::
equal(MFI->getParams().begin(), MFI->getParams().end(),
Params.begin())) ? static_cast<void> (0) : __assert_fail
("MFI->getParams().size() == Params.size() && std::equal(MFI->getParams().begin(), MFI->getParams().end(), Params.begin())"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1087, __PRETTY_FUNCTION__))
;
1088
1089 return Chain;
1090}
1091
1092void WebAssemblyTargetLowering::ReplaceNodeResults(
1093 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1094 switch (N->getOpcode()) {
1095 case ISD::SIGN_EXTEND_INREG:
1096 // Do not add any results, signifying that N should not be custom lowered
1097 // after all. This happens because simd128 turns on custom lowering for
1098 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1099 // illegal type.
1100 break;
1101 default:
1102 llvm_unreachable(::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1103)
1103 "ReplaceNodeResults not implemented for this op for WebAssembly!")::llvm::llvm_unreachable_internal("ReplaceNodeResults not implemented for this op for WebAssembly!"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1103)
;
1104 }
1105}
1106
1107//===----------------------------------------------------------------------===//
1108// Custom lowering hooks.
1109//===----------------------------------------------------------------------===//
1110
1111SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1112 SelectionDAG &DAG) const {
1113 SDLoc DL(Op);
1114 switch (Op.getOpcode()) {
1
Control jumps to 'case BUILD_VECTOR:' at line 1149
1115 default:
1116 llvm_unreachable("unimplemented operation lowering")::llvm::llvm_unreachable_internal("unimplemented operation lowering"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1116)
;
1117 return SDValue();
1118 case ISD::FrameIndex:
1119 return LowerFrameIndex(Op, DAG);
1120 case ISD::GlobalAddress:
1121 return LowerGlobalAddress(Op, DAG);
1122 case ISD::ExternalSymbol:
1123 return LowerExternalSymbol(Op, DAG);
1124 case ISD::JumpTable:
1125 return LowerJumpTable(Op, DAG);
1126 case ISD::BR_JT:
1127 return LowerBR_JT(Op, DAG);
1128 case ISD::VASTART:
1129 return LowerVASTART(Op, DAG);
1130 case ISD::BlockAddress:
1131 case ISD::BRIND:
1132 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1133 return SDValue();
1134 case ISD::RETURNADDR:
1135 return LowerRETURNADDR(Op, DAG);
1136 case ISD::FRAMEADDR:
1137 return LowerFRAMEADDR(Op, DAG);
1138 case ISD::CopyToReg:
1139 return LowerCopyToReg(Op, DAG);
1140 case ISD::EXTRACT_VECTOR_ELT:
1141 case ISD::INSERT_VECTOR_ELT:
1142 return LowerAccessVectorElement(Op, DAG);
1143 case ISD::INTRINSIC_VOID:
1144 case ISD::INTRINSIC_WO_CHAIN:
1145 case ISD::INTRINSIC_W_CHAIN:
1146 return LowerIntrinsic(Op, DAG);
1147 case ISD::SIGN_EXTEND_INREG:
1148 return LowerSIGN_EXTEND_INREG(Op, DAG);
1149 case ISD::BUILD_VECTOR:
1150 return LowerBUILD_VECTOR(Op, DAG);
2
Calling 'WebAssemblyTargetLowering::LowerBUILD_VECTOR'
1151 case ISD::VECTOR_SHUFFLE:
1152 return LowerVECTOR_SHUFFLE(Op, DAG);
1153 case ISD::SETCC:
1154 return LowerSETCC(Op, DAG);
1155 case ISD::SHL:
1156 case ISD::SRA:
1157 case ISD::SRL:
1158 return LowerShift(Op, DAG);
1159 }
1160}
1161
1162SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1163 SelectionDAG &DAG) const {
1164 SDValue Src = Op.getOperand(2);
1165 if (isa<FrameIndexSDNode>(Src.getNode())) {
1166 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1167 // the FI to some LEA-like instruction, but since we don't have that, we
1168 // need to insert some kind of instruction that can take an FI operand and
1169 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1170 // local.copy between Op and its FI operand.
1171 SDValue Chain = Op.getOperand(0);
1172 SDLoc DL(Op);
1173 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1174 EVT VT = Src.getValueType();
1175 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1176 : WebAssembly::COPY_I64,
1177 DL, VT, Src),
1178 0);
1179 return Op.getNode()->getNumValues() == 1
1180 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1181 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1182 Op.getNumOperands() == 4 ? Op.getOperand(3)
1183 : SDValue());
1184 }
1185 return SDValue();
1186}
1187
1188SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1189 SelectionDAG &DAG) const {
1190 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1191 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1192}
1193
1194SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1195 SelectionDAG &DAG) const {
1196 SDLoc DL(Op);
1197
1198 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1199 fail(DL, DAG,
1200 "Non-Emscripten WebAssembly hasn't implemented "
1201 "__builtin_return_address");
1202 return SDValue();
1203 }
1204
1205 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1206 return SDValue();
1207
1208 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1209 MakeLibCallOptions CallOptions;
1210 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1211 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1212 .first;
1213}
1214
1215SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1216 SelectionDAG &DAG) const {
1217 // Non-zero depths are not supported by WebAssembly currently. Use the
1218 // legalizer's default expansion, which is to return 0 (what this function is
1219 // documented to do).
1220 if (Op.getConstantOperandVal(0) > 0)
1221 return SDValue();
1222
1223 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1224 EVT VT = Op.getValueType();
1225 Register FP =
1226 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1227 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1228}
1229
1230SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1231 SelectionDAG &DAG) const {
1232 SDLoc DL(Op);
1233 const auto *GA = cast<GlobalAddressSDNode>(Op);
1234 EVT VT = Op.getValueType();
1235 assert(GA->getTargetFlags() == 0 &&((GA->getTargetFlags() == 0 && "Unexpected target flags on generic GlobalAddressSDNode"
) ? static_cast<void> (0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1236, __PRETTY_FUNCTION__))
1236 "Unexpected target flags on generic GlobalAddressSDNode")((GA->getTargetFlags() == 0 && "Unexpected target flags on generic GlobalAddressSDNode"
) ? static_cast<void> (0) : __assert_fail ("GA->getTargetFlags() == 0 && \"Unexpected target flags on generic GlobalAddressSDNode\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1236, __PRETTY_FUNCTION__))
;
1237 if (GA->getAddressSpace() != 0)
1238 fail(DL, DAG, "WebAssembly only expects the 0 address space");
1239
1240 unsigned OperandFlags = 0;
1241 if (isPositionIndependent()) {
1242 const GlobalValue *GV = GA->getGlobal();
1243 if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1244 MachineFunction &MF = DAG.getMachineFunction();
1245 MVT PtrVT = getPointerTy(MF.getDataLayout());
1246 const char *BaseName;
1247 if (GV->getValueType()->isFunctionTy()) {
1248 BaseName = MF.createExternalSymbolName("__table_base");
1249 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1250 }
1251 else {
1252 BaseName = MF.createExternalSymbolName("__memory_base");
1253 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1254 }
1255 SDValue BaseAddr =
1256 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1257 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1258
1259 SDValue SymAddr = DAG.getNode(
1260 WebAssemblyISD::WrapperPIC, DL, VT,
1261 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1262 OperandFlags));
1263
1264 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1265 } else {
1266 OperandFlags = WebAssemblyII::MO_GOT;
1267 }
1268 }
1269
1270 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1271 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1272 GA->getOffset(), OperandFlags));
1273}
1274
1275SDValue
1276WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1277 SelectionDAG &DAG) const {
1278 SDLoc DL(Op);
1279 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1280 EVT VT = Op.getValueType();
1281 assert(ES->getTargetFlags() == 0 &&((ES->getTargetFlags() == 0 && "Unexpected target flags on generic ExternalSymbolSDNode"
) ? static_cast<void> (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1282, __PRETTY_FUNCTION__))
1282 "Unexpected target flags on generic ExternalSymbolSDNode")((ES->getTargetFlags() == 0 && "Unexpected target flags on generic ExternalSymbolSDNode"
) ? static_cast<void> (0) : __assert_fail ("ES->getTargetFlags() == 0 && \"Unexpected target flags on generic ExternalSymbolSDNode\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1282, __PRETTY_FUNCTION__))
;
1283 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1284 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1285}
1286
1287SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1288 SelectionDAG &DAG) const {
1289 // There's no need for a Wrapper node because we always incorporate a jump
1290 // table operand into a BR_TABLE instruction, rather than ever
1291 // materializing it in a register.
1292 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1293 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1294 JT->getTargetFlags());
1295}
1296
1297SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1298 SelectionDAG &DAG) const {
1299 SDLoc DL(Op);
1300 SDValue Chain = Op.getOperand(0);
1301 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1302 SDValue Index = Op.getOperand(2);
1303 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags")((JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags"
) ? static_cast<void> (0) : __assert_fail ("JT->getTargetFlags() == 0 && \"WebAssembly doesn't set target flags\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1303, __PRETTY_FUNCTION__))
;
1304
1305 SmallVector<SDValue, 8> Ops;
1306 Ops.push_back(Chain);
1307 Ops.push_back(Index);
1308
1309 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1310 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1311
1312 // Add an operand for each case.
1313 for (auto MBB : MBBs)
1314 Ops.push_back(DAG.getBasicBlock(MBB));
1315
1316 // Add the first MBB as a dummy default target for now. This will be replaced
1317 // with the proper default target (and the preceding range check eliminated)
1318 // if possible by WebAssemblyFixBrTableDefaults.
1319 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1320 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1321}
1322
1323SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1324 SelectionDAG &DAG) const {
1325 SDLoc DL(Op);
1326 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1327
1328 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1329 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1330
1331 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1332 MFI->getVarargBufferVreg(), PtrVT);
1333 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1334 MachinePointerInfo(SV));
1335}
1336
1337SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1338 SelectionDAG &DAG) const {
1339 MachineFunction &MF = DAG.getMachineFunction();
1340 unsigned IntNo;
1341 switch (Op.getOpcode()) {
1342 case ISD::INTRINSIC_VOID:
1343 case ISD::INTRINSIC_W_CHAIN:
1344 IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1345 break;
1346 case ISD::INTRINSIC_WO_CHAIN:
1347 IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1348 break;
1349 default:
1350 llvm_unreachable("Invalid intrinsic")::llvm::llvm_unreachable_internal("Invalid intrinsic", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1350)
;
1351 }
1352 SDLoc DL(Op);
1353
1354 switch (IntNo) {
1355 default:
1356 return SDValue(); // Don't custom lower most intrinsics.
1357
1358 case Intrinsic::wasm_lsda: {
1359 EVT VT = Op.getValueType();
1360 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1361 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1362 auto &Context = MF.getMMI().getContext();
1363 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1364 Twine(MF.getFunctionNumber()));
1365 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1366 DAG.getMCSymbol(S, PtrVT));
1367 }
1368
1369 case Intrinsic::wasm_throw: {
1370 // We only support C++ exceptions for now
1371 int Tag = cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1372 if (Tag != CPP_EXCEPTION)
1373 llvm_unreachable("Invalid tag!")::llvm::llvm_unreachable_internal("Invalid tag!", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1373)
;
1374 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1375 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1376 const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1377 SDValue SymNode = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1378 DAG.getTargetExternalSymbol(SymName, PtrVT));
1379 return DAG.getNode(WebAssemblyISD::THROW, DL,
1380 MVT::Other, // outchain type
1381 {
1382 Op.getOperand(0), // inchain
1383 SymNode, // exception symbol
1384 Op.getOperand(3) // thrown value
1385 });
1386 }
1387
1388 case Intrinsic::wasm_shuffle: {
1389 // Drop in-chain and replace undefs, but otherwise pass through unchanged
1390 SDValue Ops[18];
1391 size_t OpIdx = 0;
1392 Ops[OpIdx++] = Op.getOperand(1);
1393 Ops[OpIdx++] = Op.getOperand(2);
1394 while (OpIdx < 18) {
1395 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1396 if (MaskIdx.isUndef() ||
1397 cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
1398 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32);
1399 } else {
1400 Ops[OpIdx++] = MaskIdx;
1401 }
1402 }
1403 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1404 }
1405 }
1406}
1407
1408SDValue
1409WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1410 SelectionDAG &DAG) const {
1411 SDLoc DL(Op);
1412 // If sign extension operations are disabled, allow sext_inreg only if operand
1413 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1414 // extension operations, but allowing sext_inreg in this context lets us have
1415 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1416 // everywhere would be simpler in this file, but would necessitate large and
1417 // brittle patterns to undo the expansion and select extract_lane_s
1418 // instructions.
1419 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128())((!Subtarget->hasSignExt() && Subtarget->hasSIMD128
()) ? static_cast<void> (0) : __assert_fail ("!Subtarget->hasSignExt() && Subtarget->hasSIMD128()"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1419, __PRETTY_FUNCTION__))
;
1420 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1421 return SDValue();
1422
1423 const SDValue &Extract = Op.getOperand(0);
1424 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1425 if (VecT.getVectorElementType().getSizeInBits() > 32)
1426 return SDValue();
1427 MVT ExtractedLaneT =
1428 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1429 MVT ExtractedVecT =
1430 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1431 if (ExtractedVecT == VecT)
1432 return Op;
1433
1434 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1435 const SDNode *Index = Extract.getOperand(1).getNode();
1436 if (!isa<ConstantSDNode>(Index))
1437 return SDValue();
1438 unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
1439 unsigned Scale =
1440 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1441 assert(Scale > 1)((Scale > 1) ? static_cast<void> (0) : __assert_fail
("Scale > 1", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1441, __PRETTY_FUNCTION__))
;
1442 SDValue NewIndex =
1443 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1444 SDValue NewExtract = DAG.getNode(
1445 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1446 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1447 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1448 Op.getOperand(1));
1449}
1450
1451SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1452 SelectionDAG &DAG) const {
1453 SDLoc DL(Op);
1454 const EVT VecT = Op.getValueType();
1455 const EVT LaneT = Op.getOperand(0).getValueType();
1456 const size_t Lanes = Op.getNumOperands();
1457 bool CanSwizzle = VecT == MVT::v16i8;
1458
1459 // BUILD_VECTORs are lowered to the instruction that initializes the highest
1460 // possible number of lanes at once followed by a sequence of replace_lane
1461 // instructions to individually initialize any remaining lanes.
1462
1463 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
1464 // swizzled lanes should be given greater weight.
1465
1466 // TODO: Investigate building vectors by shuffling together vectors built by
1467 // separately specialized means.
1468
1469 auto IsConstant = [](const SDValue &V) {
1470 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1471 };
1472
1473 // Returns the source vector and index vector pair if they exist. Checks for:
1474 // (extract_vector_elt
1475 // $src,
1476 // (sign_extend_inreg (extract_vector_elt $indices, $i))
1477 // )
1478 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
1479 auto Bail = std::make_pair(SDValue(), SDValue());
1480 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1481 return Bail;
1482 const SDValue &SwizzleSrc = Lane->getOperand(0);
1483 const SDValue &IndexExt = Lane->getOperand(1);
1484 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
1485 return Bail;
1486 const SDValue &Index = IndexExt->getOperand(0);
1487 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1488 return Bail;
1489 const SDValue &SwizzleIndices = Index->getOperand(0);
1490 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
1491 SwizzleIndices.getValueType() != MVT::v16i8 ||
1492 Index->getOperand(1)->getOpcode() != ISD::Constant ||
1493 Index->getConstantOperandVal(1) != I)
1494 return Bail;
1495 return std::make_pair(SwizzleSrc, SwizzleIndices);
1496 };
1497
1498 using ValueEntry = std::pair<SDValue, size_t>;
1499 SmallVector<ValueEntry, 16> SplatValueCounts;
1500
1501 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
1502 SmallVector<SwizzleEntry, 16> SwizzleCounts;
1503
1504 auto AddCount = [](auto &Counts, const auto &Val) {
1505 auto CountIt = std::find_if(Counts.begin(), Counts.end(),
1506 [&Val](auto E) { return E.first == Val; });
1507 if (CountIt == Counts.end()) {
1508 Counts.emplace_back(Val, 1);
1509 } else {
1510 CountIt->second++;
1511 }
1512 };
1513
1514 auto GetMostCommon = [](auto &Counts) {
1515 auto CommonIt =
1516 std::max_element(Counts.begin(), Counts.end(),
1517 [](auto A, auto B) { return A.second < B.second; });
1518 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector")((CommonIt != Counts.end() && "Unexpected all-undef build_vector"
) ? static_cast<void> (0) : __assert_fail ("CommonIt != Counts.end() && \"Unexpected all-undef build_vector\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1518, __PRETTY_FUNCTION__))
;
1519 return *CommonIt;
1520 };
1521
1522 size_t NumConstantLanes = 0;
1523
1524 // Count eligible lanes for each type of vector creation op
1525 for (size_t I = 0; I
2.1
'I' is < 'Lanes'
2.1
'I' is < 'Lanes'
2.1
'I' is < 'Lanes'
< Lanes
; ++I) {
3
Loop condition is true. Entering loop body
8
Assuming 'I' is >= 'Lanes'
9
Loop condition is false. Execution continues on line 1541
1526 const SDValue &Lane = Op->getOperand(I);
1527 if (Lane.isUndef())
4
Taking false branch
1528 continue;
1529
1530 AddCount(SplatValueCounts, Lane);
1531
1532 if (IsConstant(Lane)) {
5
Taking false branch
1533 NumConstantLanes++;
1534 } else if (CanSwizzle
5.1
'CanSwizzle' is true
5.1
'CanSwizzle' is true
5.1
'CanSwizzle' is true
) {
6
Taking true branch
1535 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
1536 if (SwizzleSrcs.first)
7
Taking true branch
1537 AddCount(SwizzleCounts, SwizzleSrcs);
1538 }
1539 }
1540
1541 SDValue SplatValue;
1542 size_t NumSplatLanes;
1543 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
1544
1545 SDValue SwizzleSrc;
1546 SDValue SwizzleIndices;
1547 size_t NumSwizzleLanes = 0;
1548 if (SwizzleCounts.size())
10
Assuming the condition is false
11
Taking false branch
1549 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
1550 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
1551
1552 // Predicate returning true if the lane is properly initialized by the
1553 // original instruction
1554 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
1555 SDValue Result;
1556 // Prefer swizzles over vector consts over splats
1557 if (NumSwizzleLanes >= NumSplatLanes &&
12
Assuming 'NumSwizzleLanes' is < 'NumSplatLanes'
1558 (!Subtarget->hasUnimplementedSIMD128() ||
1559 NumSwizzleLanes >= NumConstantLanes)) {
1560 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
1561 SwizzleIndices);
1562 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
1563 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
1564 return Swizzled == GetSwizzleSrcs(I, Lane);
1565 };
1566 } else if (NumConstantLanes
12.1
'NumConstantLanes' is < 'NumSplatLanes'
12.1
'NumConstantLanes' is < 'NumSplatLanes'
12.1
'NumConstantLanes' is < 'NumSplatLanes'
>= NumSplatLanes &&
1567 Subtarget->hasUnimplementedSIMD128()) {
1568 SmallVector<SDValue, 16> ConstLanes;
1569 for (const SDValue &Lane : Op->op_values()) {
1570 if (IsConstant(Lane)) {
1571 ConstLanes.push_back(Lane);
1572 } else if (LaneT.isFloatingPoint()) {
1573 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1574 } else {
1575 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1576 }
1577 }
1578 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1579 IsLaneConstructed = [&](size_t _, const SDValue &Lane) {
1580 return IsConstant(Lane);
1581 };
1582 }
1583 if (!Result) {
13
Taking true branch
1584 // Use a splat, but possibly a load_splat
1585 LoadSDNode *SplattedLoad;
1586 if ((SplattedLoad = dyn_cast<LoadSDNode>(SplatValue)) &&
14
Assuming 'SplattedLoad' is null
15
Assuming pointer value is null
16
Taking false branch
1587 SplattedLoad->getMemoryVT() == VecT.getVectorElementType()) {
1588 Result = DAG.getMemIntrinsicNode(
1589 WebAssemblyISD::LOAD_SPLAT, DL, DAG.getVTList(VecT),
1590 {SplattedLoad->getChain(), SplattedLoad->getBasePtr(),
1591 SplattedLoad->getOffset()},
1592 SplattedLoad->getMemoryVT(), SplattedLoad->getMemOperand());
1593 } else {
1594 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
17
Value assigned to 'Op.Node'
18
Calling 'SelectionDAG::getSplatBuildVector'
1595 }
1596 IsLaneConstructed = [&](size_t _, const SDValue &Lane) {
1597 return Lane == SplatValue;
1598 };
1599 }
1600
1601 // Add replace_lane instructions for any unhandled values
1602 for (size_t I = 0; I < Lanes; ++I) {
1603 const SDValue &Lane = Op->getOperand(I);
1604 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
1605 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1606 DAG.getConstant(I, DL, MVT::i32));
1607 }
1608
1609 return Result;
1610}
1611
1612SDValue
1613WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1614 SelectionDAG &DAG) const {
1615 SDLoc DL(Op);
1616 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1617 MVT VecType = Op.getOperand(0).getSimpleValueType();
1618 assert(VecType.is128BitVector() && "Unexpected shuffle vector type")((VecType.is128BitVector() && "Unexpected shuffle vector type"
) ? static_cast<void> (0) : __assert_fail ("VecType.is128BitVector() && \"Unexpected shuffle vector type\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1618, __PRETTY_FUNCTION__))
;
1619 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1620
1621 // Space for two vector args and sixteen mask indices
1622 SDValue Ops[18];
1623 size_t OpIdx = 0;
1624 Ops[OpIdx++] = Op.getOperand(0);
1625 Ops[OpIdx++] = Op.getOperand(1);
1626
1627 // Expand mask indices to byte indices and materialize them as operands
1628 for (int M : Mask) {
1629 for (size_t J = 0; J < LaneBytes; ++J) {
1630 // Lower undefs (represented by -1 in mask) to zero
1631 uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1632 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1633 }
1634 }
1635
1636 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1637}
1638
1639SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
1640 SelectionDAG &DAG) const {
1641 SDLoc DL(Op);
1642 // The legalizer does not know how to expand the comparison modes of i64x2
1643 // vectors because no comparison modes are supported. We could solve this by
1644 // expanding all i64x2 SETCC nodes, but that seems to expand f64x2 SETCC nodes
1645 // (which return i64x2 results) as well. So instead we manually unroll i64x2
1646 // comparisons here.
1647 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64)((Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64
) ? static_cast<void> (0) : __assert_fail ("Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1647, __PRETTY_FUNCTION__))
;
1648 SmallVector<SDValue, 2> LHS, RHS;
1649 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
1650 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
1651 const SDValue &CC = Op->getOperand(2);
1652 auto MakeLane = [&](unsigned I) {
1653 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
1654 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
1655 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
1656 };
1657 return DAG.getBuildVector(Op->getValueType(0), DL,
1658 {MakeLane(0), MakeLane(1)});
1659}
1660
1661SDValue
1662WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1663 SelectionDAG &DAG) const {
1664 // Allow constant lane indices, expand variable lane indices
1665 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1666 if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1667 return Op;
1668 else
1669 // Perform default expansion
1670 return SDValue();
1671}
1672
1673static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
1674 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
1675 // 32-bit and 64-bit unrolled shifts will have proper semantics
1676 if (LaneT.bitsGE(MVT::i32))
1677 return DAG.UnrollVectorOp(Op.getNode());
1678 // Otherwise mask the shift value to get proper semantics from 32-bit shift
1679 SDLoc DL(Op);
1680 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
1681 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
1682 unsigned ShiftOpcode = Op.getOpcode();
1683 SmallVector<SDValue, 16> ShiftedElements;
1684 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
1685 SmallVector<SDValue, 16> ShiftElements;
1686 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
1687 SmallVector<SDValue, 16> UnrolledOps;
1688 for (size_t i = 0; i < NumLanes; ++i) {
1689 SDValue MaskedShiftValue =
1690 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
1691 SDValue ShiftedValue = ShiftedElements[i];
1692 if (ShiftOpcode == ISD::SRA)
1693 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
1694 ShiftedValue, DAG.getValueType(LaneT));
1695 UnrolledOps.push_back(
1696 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
1697 }
1698 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
1699}
1700
1701SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1702 SelectionDAG &DAG) const {
1703 SDLoc DL(Op);
1704
1705 // Only manually lower vector shifts
1706 assert(Op.getSimpleValueType().isVector())((Op.getSimpleValueType().isVector()) ? static_cast<void>
(0) : __assert_fail ("Op.getSimpleValueType().isVector()", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1706, __PRETTY_FUNCTION__))
;
1707
1708 auto ShiftVal = DAG.getSplatValue(Op.getOperand(1));
1709 if (!ShiftVal)
1710 return unrollVectorShift(Op, DAG);
1711
1712 // Use anyext because none of the high bits can affect the shift
1713 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
1714
1715 unsigned Opcode;
1716 switch (Op.getOpcode()) {
1717 case ISD::SHL:
1718 Opcode = WebAssemblyISD::VEC_SHL;
1719 break;
1720 case ISD::SRA:
1721 Opcode = WebAssemblyISD::VEC_SHR_S;
1722 break;
1723 case ISD::SRL:
1724 Opcode = WebAssemblyISD::VEC_SHR_U;
1725 break;
1726 default:
1727 llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1727)
;
1728 }
1729
1730 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
1731}
1732
1733//===----------------------------------------------------------------------===//
1734// Custom DAG combine hooks
1735//===----------------------------------------------------------------------===//
1736static SDValue
1737performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
1738 auto &DAG = DCI.DAG;
1739 auto Shuffle = cast<ShuffleVectorSDNode>(N);
1740
1741 // Hoist vector bitcasts that don't change the number of lanes out of unary
1742 // shuffles, where they are less likely to get in the way of other combines.
1743 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
1744 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
1745 SDValue Bitcast = N->getOperand(0);
1746 if (Bitcast.getOpcode() != ISD::BITCAST)
1747 return SDValue();
1748 if (!N->getOperand(1).isUndef())
1749 return SDValue();
1750 SDValue CastOp = Bitcast.getOperand(0);
1751 MVT SrcType = CastOp.getSimpleValueType();
1752 MVT DstType = Bitcast.getSimpleValueType();
1753 if (!SrcType.is128BitVector() ||
1754 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
1755 return SDValue();
1756 SDValue NewShuffle = DAG.getVectorShuffle(
1757 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
1758 return DAG.getBitcast(DstType, NewShuffle);
1759}
1760
1761static SDValue performVectorWidenCombine(SDNode *N,
1762 TargetLowering::DAGCombinerInfo &DCI) {
1763 auto &DAG = DCI.DAG;
1764 assert(N->getOpcode() == ISD::SIGN_EXTEND ||((N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() ==
ISD::ZERO_EXTEND) ? static_cast<void> (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1765, __PRETTY_FUNCTION__))
1765 N->getOpcode() == ISD::ZERO_EXTEND)((N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() ==
ISD::ZERO_EXTEND) ? static_cast<void> (0) : __assert_fail
("N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp"
, 1765, __PRETTY_FUNCTION__))
;
1766
1767 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
1768 // possible before the extract_subvector can be expanded.
1769 auto Extract = N->getOperand(0);
1770 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
1771 return SDValue();
1772 auto Source = Extract.getOperand(0);
1773 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
1774 if (IndexNode == nullptr)
1775 return SDValue();
1776 auto Index = IndexNode->getZExtValue();
1777
1778 // Only v8i8 and v4i16 extracts can be widened, and only if the extracted
1779 // subvector is the low or high half of its source.
1780 EVT ResVT = N->getValueType(0);
1781 if (ResVT == MVT::v8i16) {
1782 if (Extract.getValueType() != MVT::v8i8 ||
1783 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
1784 return SDValue();
1785 } else if (ResVT == MVT::v4i32) {
1786 if (Extract.getValueType() != MVT::v4i16 ||
1787 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
1788 return SDValue();
1789 } else {
1790 return SDValue();
1791 }
1792
1793 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
1794 bool IsLow = Index == 0;
1795
1796 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::WIDEN_LOW_S
1797 : WebAssemblyISD::WIDEN_HIGH_S)
1798 : (IsLow ? WebAssemblyISD::WIDEN_LOW_U
1799 : WebAssemblyISD::WIDEN_HIGH_U);
1800
1801 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
1802}
1803
1804SDValue
1805WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
1806 DAGCombinerInfo &DCI) const {
1807 switch (N->getOpcode()) {
1808 default:
1809 return SDValue();
1810 case ISD::VECTOR_SHUFFLE:
1811 return performVECTOR_SHUFFLECombine(N, DCI);
1812 case ISD::SIGN_EXTEND:
1813 case ISD::ZERO_EXTEND:
1814 return performVectorWidenCombine(N, DCI);
1815 }
1816}

/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h

1//===- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SelectionDAG class, and transitively defines the
10// SDNode class and subclasses.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CODEGEN_SELECTIONDAG_H
15#define LLVM_CODEGEN_SELECTIONDAG_H
16
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
21#include "llvm/ADT/DenseSet.h"
22#include "llvm/ADT/FoldingSet.h"
23#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/StringMap.h"
26#include "llvm/ADT/ilist.h"
27#include "llvm/ADT/iterator.h"
28#include "llvm/ADT/iterator_range.h"
29#include "llvm/CodeGen/DAGCombine.h"
30#include "llvm/CodeGen/ISDOpcodes.h"
31#include "llvm/CodeGen/MachineFunction.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/SelectionDAGNodes.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/DebugLoc.h"
36#include "llvm/IR/Instructions.h"
37#include "llvm/IR/Metadata.h"
38#include "llvm/Support/Allocator.h"
39#include "llvm/Support/ArrayRecycler.h"
40#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/CodeGen.h"
43#include "llvm/Support/ErrorHandling.h"
44#include "llvm/Support/MachineValueType.h"
45#include "llvm/Support/RecyclingAllocator.h"
46#include <algorithm>
47#include <cassert>
48#include <cstdint>
49#include <functional>
50#include <map>
51#include <string>
52#include <tuple>
53#include <utility>
54#include <vector>
55
56namespace llvm {
57
58class AAResults;
59class BlockAddress;
60class BlockFrequencyInfo;
61class Constant;
62class ConstantFP;
63class ConstantInt;
64class DataLayout;
65struct fltSemantics;
66class FunctionLoweringInfo;
67class GlobalValue;
68struct KnownBits;
69class LegacyDivergenceAnalysis;
70class LLVMContext;
71class MachineBasicBlock;
72class MachineConstantPoolValue;
73class MCSymbol;
74class OptimizationRemarkEmitter;
75class ProfileSummaryInfo;
76class SDDbgValue;
77class SDDbgLabel;
78class SelectionDAG;
79class SelectionDAGTargetInfo;
80class TargetLibraryInfo;
81class TargetLowering;
82class TargetMachine;
83class TargetSubtargetInfo;
84class Value;
85
86class SDVTListNode : public FoldingSetNode {
87 friend struct FoldingSetTrait<SDVTListNode>;
88
89 /// A reference to an Interned FoldingSetNodeID for this node.
90 /// The Allocator in SelectionDAG holds the data.
91 /// SDVTList contains all types which are frequently accessed in SelectionDAG.
92 /// The size of this list is not expected to be big so it won't introduce
93 /// a memory penalty.
94 FoldingSetNodeIDRef FastID;
95 const EVT *VTs;
96 unsigned int NumVTs;
97 /// The hash value for SDVTList is fixed, so cache it to avoid
98 /// hash calculation.
99 unsigned HashValue;
100
101public:
102 SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
103 FastID(ID), VTs(VT), NumVTs(Num) {
104 HashValue = ID.ComputeHash();
105 }
106
107 SDVTList getSDVTList() {
108 SDVTList result = {VTs, NumVTs};
109 return result;
110 }
111};
112
113/// Specialize FoldingSetTrait for SDVTListNode
114/// to avoid computing temp FoldingSetNodeID and hash value.
115template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTListNode> {
116 static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
117 ID = X.FastID;
118 }
119
120 static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
121 unsigned IDHash, FoldingSetNodeID &TempID) {
122 if (X.HashValue != IDHash)
123 return false;
124 return ID == X.FastID;
125 }
126
127 static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
128 return X.HashValue;
129 }
130};
131
132template <> struct ilist_alloc_traits<SDNode> {
133 static void deleteNode(SDNode *) {
134 llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!")::llvm::llvm_unreachable_internal("ilist_traits<SDNode> shouldn't see a deleteNode call!"
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 134)
;
135 }
136};
137
138/// Keeps track of dbg_value information through SDISel. We do
139/// not build SDNodes for these so as not to perturb the generated code;
140/// instead the info is kept off to the side in this structure. Each SDNode may
141/// have one or more associated dbg_value entries. This information is kept in
142/// DbgValMap.
143/// Byval parameters are handled separately because they don't use alloca's,
144/// which busts the normal mechanism. There is good reason for handling all
145/// parameters separately: they may not have code generated for them, they
146/// should always go at the beginning of the function regardless of other code
147/// motion, and debug info for them is potentially useful even if the parameter
148/// is unused. Right now only byval parameters are handled separately.
149class SDDbgInfo {
150 BumpPtrAllocator Alloc;
151 SmallVector<SDDbgValue*, 32> DbgValues;
152 SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
153 SmallVector<SDDbgLabel*, 4> DbgLabels;
154 using DbgValMapType = DenseMap<const SDNode *, SmallVector<SDDbgValue *, 2>>;
155 DbgValMapType DbgValMap;
156
157public:
158 SDDbgInfo() = default;
159 SDDbgInfo(const SDDbgInfo &) = delete;
160 SDDbgInfo &operator=(const SDDbgInfo &) = delete;
161
162 void add(SDDbgValue *V, const SDNode *Node, bool isParameter) {
163 if (isParameter) {
164 ByvalParmDbgValues.push_back(V);
165 } else DbgValues.push_back(V);
166 if (Node)
167 DbgValMap[Node].push_back(V);
168 }
169
170 void add(SDDbgLabel *L) {
171 DbgLabels.push_back(L);
172 }
173
174 /// Invalidate all DbgValues attached to the node and remove
175 /// it from the Node-to-DbgValues map.
176 void erase(const SDNode *Node);
177
178 void clear() {
179 DbgValMap.clear();
180 DbgValues.clear();
181 ByvalParmDbgValues.clear();
182 DbgLabels.clear();
183 Alloc.Reset();
184 }
185
186 BumpPtrAllocator &getAlloc() { return Alloc; }
187
188 bool empty() const {
189 return DbgValues.empty() && ByvalParmDbgValues.empty() && DbgLabels.empty();
190 }
191
192 ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) const {
193 auto I = DbgValMap.find(Node);
194 if (I != DbgValMap.end())
195 return I->second;
196 return ArrayRef<SDDbgValue*>();
197 }
198
199 using DbgIterator = SmallVectorImpl<SDDbgValue*>::iterator;
200 using DbgLabelIterator = SmallVectorImpl<SDDbgLabel*>::iterator;
201
202 DbgIterator DbgBegin() { return DbgValues.begin(); }
203 DbgIterator DbgEnd() { return DbgValues.end(); }
204 DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
205 DbgIterator ByvalParmDbgEnd() { return ByvalParmDbgValues.end(); }
206 DbgLabelIterator DbgLabelBegin() { return DbgLabels.begin(); }
207 DbgLabelIterator DbgLabelEnd() { return DbgLabels.end(); }
208};
209
210void checkForCycles(const SelectionDAG *DAG, bool force = false);
211
212/// This is used to represent a portion of an LLVM function in a low-level
213/// Data Dependence DAG representation suitable for instruction selection.
214/// This DAG is constructed as the first step of instruction selection in order
215/// to allow implementation of machine specific optimizations
216/// and code simplifications.
217///
218/// The representation used by the SelectionDAG is a target-independent
219/// representation, which has some similarities to the GCC RTL representation,
220/// but is significantly more simple, powerful, and is a graph form instead of a
221/// linear form.
222///
223class SelectionDAG {
224 const TargetMachine &TM;
225 const SelectionDAGTargetInfo *TSI = nullptr;
226 const TargetLowering *TLI = nullptr;
227 const TargetLibraryInfo *LibInfo = nullptr;
228 MachineFunction *MF;
229 Pass *SDAGISelPass = nullptr;
230 LLVMContext *Context;
231 CodeGenOpt::Level OptLevel;
232
233 LegacyDivergenceAnalysis * DA = nullptr;
234 FunctionLoweringInfo * FLI = nullptr;
235
236 /// The function-level optimization remark emitter. Used to emit remarks
237 /// whenever manipulating the DAG.
238 OptimizationRemarkEmitter *ORE;
239
240 ProfileSummaryInfo *PSI = nullptr;
241 BlockFrequencyInfo *BFI = nullptr;
242
243 /// The starting token.
244 SDNode EntryNode;
245
246 /// The root of the entire DAG.
247 SDValue Root;
248
249 /// A linked list of nodes in the current DAG.
250 ilist<SDNode> AllNodes;
251
252 /// The AllocatorType for allocating SDNodes. We use
253 /// pool allocation with recycling.
254 using NodeAllocatorType = RecyclingAllocator<BumpPtrAllocator, SDNode,
255 sizeof(LargestSDNode),
256 alignof(MostAlignedSDNode)>;
257
258 /// Pool allocation for nodes.
259 NodeAllocatorType NodeAllocator;
260
261 /// This structure is used to memoize nodes, automatically performing
262 /// CSE with existing nodes when a duplicate is requested.
263 FoldingSet<SDNode> CSEMap;
264
265 /// Pool allocation for machine-opcode SDNode operands.
266 BumpPtrAllocator OperandAllocator;
267 ArrayRecycler<SDUse> OperandRecycler;
268
269 /// Pool allocation for misc. objects that are created once per SelectionDAG.
270 BumpPtrAllocator Allocator;
271
272 /// Tracks dbg_value and dbg_label information through SDISel.
273 SDDbgInfo *DbgInfo;
274
275 using CallSiteInfo = MachineFunction::CallSiteInfo;
276 using CallSiteInfoImpl = MachineFunction::CallSiteInfoImpl;
277
278 struct CallSiteDbgInfo {
279 CallSiteInfo CSInfo;
280 MDNode *HeapAllocSite = nullptr;
281 bool NoMerge = false;
282 };
283
284 DenseMap<const SDNode *, CallSiteDbgInfo> SDCallSiteDbgInfo;
285
286 uint16_t NextPersistentId = 0;
287
288public:
289 /// Clients of various APIs that cause global effects on
290 /// the DAG can optionally implement this interface. This allows the clients
291 /// to handle the various sorts of updates that happen.
292 ///
293 /// A DAGUpdateListener automatically registers itself with DAG when it is
294 /// constructed, and removes itself when destroyed in RAII fashion.
295 struct DAGUpdateListener {
296 DAGUpdateListener *const Next;
297 SelectionDAG &DAG;
298
299 explicit DAGUpdateListener(SelectionDAG &D)
300 : Next(D.UpdateListeners), DAG(D) {
301 DAG.UpdateListeners = this;
302 }
303
304 virtual ~DAGUpdateListener() {
305 assert(DAG.UpdateListeners == this &&((DAG.UpdateListeners == this && "DAGUpdateListeners must be destroyed in LIFO order"
) ? static_cast<void> (0) : __assert_fail ("DAG.UpdateListeners == this && \"DAGUpdateListeners must be destroyed in LIFO order\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 306, __PRETTY_FUNCTION__))
306 "DAGUpdateListeners must be destroyed in LIFO order")((DAG.UpdateListeners == this && "DAGUpdateListeners must be destroyed in LIFO order"
) ? static_cast<void> (0) : __assert_fail ("DAG.UpdateListeners == this && \"DAGUpdateListeners must be destroyed in LIFO order\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 306, __PRETTY_FUNCTION__))
;
307 DAG.UpdateListeners = Next;
308 }
309
310 /// The node N that was deleted and, if E is not null, an
311 /// equivalent node E that replaced it.
312 virtual void NodeDeleted(SDNode *N, SDNode *E);
313
314 /// The node N that was updated.
315 virtual void NodeUpdated(SDNode *N);
316
317 /// The node N that was inserted.
318 virtual void NodeInserted(SDNode *N);
319 };
320
321 struct DAGNodeDeletedListener : public DAGUpdateListener {
322 std::function<void(SDNode *, SDNode *)> Callback;
323
324 DAGNodeDeletedListener(SelectionDAG &DAG,
325 std::function<void(SDNode *, SDNode *)> Callback)
326 : DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
327
328 void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
329
330 private:
331 virtual void anchor();
332 };
333
334 /// Help to insert SDNodeFlags automatically in transforming. Use
335 /// RAII to save and resume flags in current scope.
336 class FlagInserter {
337 SelectionDAG &DAG;
338 SDNodeFlags Flags;
339 FlagInserter *LastInserter;
340
341 public:
342 FlagInserter(SelectionDAG &SDAG, SDNode *N)
343 : DAG(SDAG), Flags(N->getFlags()),
344 LastInserter(SDAG.getFlagInserter()) {
345 SDAG.setFlagInserter(this);
346 }
347
348 FlagInserter(const FlagInserter &) = delete;
349 FlagInserter &operator=(const FlagInserter &) = delete;
350 ~FlagInserter() { DAG.setFlagInserter(LastInserter); }
351
352 const SDNodeFlags getFlags() const { return Flags; }
353 };
354
355 /// When true, additional steps are taken to
356 /// ensure that getConstant() and similar functions return DAG nodes that
357 /// have legal types. This is important after type legalization since
358 /// any illegally typed nodes generated after this point will not experience
359 /// type legalization.
360 bool NewNodesMustHaveLegalTypes = false;
361
362private:
363 /// DAGUpdateListener is a friend so it can manipulate the listener stack.
364 friend struct DAGUpdateListener;
365
366 /// Linked list of registered DAGUpdateListener instances.
367 /// This stack is maintained by DAGUpdateListener RAII.
368 DAGUpdateListener *UpdateListeners = nullptr;
369
370 /// Implementation of setSubgraphColor.
371 /// Return whether we had to truncate the search.
372 bool setSubgraphColorHelper(SDNode *N, const char *Color,
373 DenseSet<SDNode *> &visited,
374 int level, bool &printed);
375
376 template <typename SDNodeT, typename... ArgTypes>
377 SDNodeT *newSDNode(ArgTypes &&... Args) {
378 return new (NodeAllocator.template Allocate<SDNodeT>())
379 SDNodeT(std::forward<ArgTypes>(Args)...);
380 }
381
382 /// Build a synthetic SDNodeT with the given args and extract its subclass
383 /// data as an integer (e.g. for use in a folding set).
384 ///
385 /// The args to this function are the same as the args to SDNodeT's
386 /// constructor, except the second arg (assumed to be a const DebugLoc&) is
387 /// omitted.
388 template <typename SDNodeT, typename... ArgTypes>
389 static uint16_t getSyntheticNodeSubclassData(unsigned IROrder,
390 ArgTypes &&... Args) {
391 // The compiler can reduce this expression to a constant iff we pass an
392 // empty DebugLoc. Thankfully, the debug location doesn't have any bearing
393 // on the subclass data.
394 return SDNodeT(IROrder, DebugLoc(), std::forward<ArgTypes>(Args)...)
395 .getRawSubclassData();
396 }
397
398 template <typename SDNodeTy>
399 static uint16_t getSyntheticNodeSubclassData(unsigned Opc, unsigned Order,
400 SDVTList VTs, EVT MemoryVT,
401 MachineMemOperand *MMO) {
402 return SDNodeTy(Opc, Order, DebugLoc(), VTs, MemoryVT, MMO)
403 .getRawSubclassData();
404 }
405
406 void createOperands(SDNode *Node, ArrayRef<SDValue> Vals);
407
408 void removeOperands(SDNode *Node) {
409 if (!Node->OperandList)
410 return;
411 OperandRecycler.deallocate(
412 ArrayRecycler<SDUse>::Capacity::get(Node->NumOperands),
413 Node->OperandList);
414 Node->NumOperands = 0;
415 Node->OperandList = nullptr;
416 }
417 void CreateTopologicalOrder(std::vector<SDNode*>& Order);
418
419public:
420 // Maximum depth for recursive analysis such as computeKnownBits, etc.
421 static constexpr unsigned MaxRecursionDepth = 6;
422
423 explicit SelectionDAG(const TargetMachine &TM, CodeGenOpt::Level);
424 SelectionDAG(const SelectionDAG &) = delete;
425 SelectionDAG &operator=(const SelectionDAG &) = delete;
426 ~SelectionDAG();
427
428 /// Prepare this SelectionDAG to process code in the given MachineFunction.
429 void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
430 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
431 LegacyDivergenceAnalysis * Divergence,
432 ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin);
433
434 void setFunctionLoweringInfo(FunctionLoweringInfo * FuncInfo) {
435 FLI = FuncInfo;
436 }
437
438 /// Clear state and free memory necessary to make this
439 /// SelectionDAG ready to process a new block.
440 void clear();
441
442 MachineFunction &getMachineFunction() const { return *MF; }
443 const Pass *getPass() const { return SDAGISelPass; }
444
445 const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
446 const TargetMachine &getTarget() const { return TM; }
447 const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
448 const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
449 const TargetLibraryInfo &getLibInfo() const { return *LibInfo; }
450 const SelectionDAGTargetInfo &getSelectionDAGInfo() const { return *TSI; }
451 const LegacyDivergenceAnalysis *getDivergenceAnalysis() const { return DA; }
452 LLVMContext *getContext() const { return Context; }
453 OptimizationRemarkEmitter &getORE() const { return *ORE; }
454 ProfileSummaryInfo *getPSI() const { return PSI; }
455 BlockFrequencyInfo *getBFI() const { return BFI; }
456
457 FlagInserter *getFlagInserter() { return Inserter; }
458 void setFlagInserter(FlagInserter *FI) { Inserter = FI; }
459
460 /// Just dump dot graph to a user-provided path and title.
461 /// This doesn't open the dot viewer program and
462 /// helps visualization when outside debugging session.
463 /// FileName expects absolute path. If provided
464 /// without any path separators then the file
465 /// will be created in the current directory.
466 /// Error will be emitted if the path is insane.
467#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
468 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dumpDotGraph(const Twine &FileName, const Twine &Title);
469#endif
470
471 /// Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
472 void viewGraph(const std::string &Title);
473 void viewGraph();
474
475#ifndef NDEBUG
476 std::map<const SDNode *, std::string> NodeGraphAttrs;
477#endif
478
479 /// Clear all previously defined node graph attributes.
480 /// Intended to be used from a debugging tool (eg. gdb).
481 void clearGraphAttrs();
482
483 /// Set graph attributes for a node. (eg. "color=red".)
484 void setGraphAttrs(const SDNode *N, const char *Attrs);
485
486 /// Get graph attributes for a node. (eg. "color=red".)
487 /// Used from getNodeAttributes.
488 const std::string getGraphAttrs(const SDNode *N) const;
489
490 /// Convenience for setting node color attribute.
491 void setGraphColor(const SDNode *N, const char *Color);
492
493 /// Convenience for setting subgraph color attribute.
494 void setSubgraphColor(SDNode *N, const char *Color);
495
496 using allnodes_const_iterator = ilist<SDNode>::const_iterator;
497
498 allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
499 allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
500
501 using allnodes_iterator = ilist<SDNode>::iterator;
502
503 allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
504 allnodes_iterator allnodes_end() { return AllNodes.end(); }
505
506 ilist<SDNode>::size_type allnodes_size() const {
507 return AllNodes.size();
508 }
509
510 iterator_range<allnodes_iterator> allnodes() {
511 return make_range(allnodes_begin(), allnodes_end());
512 }
513 iterator_range<allnodes_const_iterator> allnodes() const {
514 return make_range(allnodes_begin(), allnodes_end());
515 }
516
517 /// Return the root tag of the SelectionDAG.
518 const SDValue &getRoot() const { return Root; }
519
520 /// Return the token chain corresponding to the entry of the function.
521 SDValue getEntryNode() const {
522 return SDValue(const_cast<SDNode *>(&EntryNode), 0);
523 }
524
525 /// Set the current root tag of the SelectionDAG.
526 ///
527 const SDValue &setRoot(SDValue N) {
528 assert((!N.getNode() || N.getValueType() == MVT::Other) &&(((!N.getNode() || N.getValueType() == MVT::Other) &&
"DAG root value is not a chain!") ? static_cast<void> (
0) : __assert_fail ("(!N.getNode() || N.getValueType() == MVT::Other) && \"DAG root value is not a chain!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 529, __PRETTY_FUNCTION__))
529 "DAG root value is not a chain!")(((!N.getNode() || N.getValueType() == MVT::Other) &&
"DAG root value is not a chain!") ? static_cast<void> (
0) : __assert_fail ("(!N.getNode() || N.getValueType() == MVT::Other) && \"DAG root value is not a chain!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 529, __PRETTY_FUNCTION__))
;
530 if (N.getNode())
531 checkForCycles(N.getNode(), this);
532 Root = N;
533 if (N.getNode())
534 checkForCycles(this);
535 return Root;
536 }
537
538#ifndef NDEBUG
539 void VerifyDAGDiverence();
540#endif
541
542 /// This iterates over the nodes in the SelectionDAG, folding
543 /// certain types of nodes together, or eliminating superfluous nodes. The
544 /// Level argument controls whether Combine is allowed to produce nodes and
545 /// types that are illegal on the target.
546 void Combine(CombineLevel Level, AAResults *AA,
547 CodeGenOpt::Level OptLevel);
548
549 /// This transforms the SelectionDAG into a SelectionDAG that
550 /// only uses types natively supported by the target.
551 /// Returns "true" if it made any changes.
552 ///
553 /// Note that this is an involved process that may invalidate pointers into
554 /// the graph.
555 bool LegalizeTypes();
556
557 /// This transforms the SelectionDAG into a SelectionDAG that is
558 /// compatible with the target instruction selector, as indicated by the
559 /// TargetLowering object.
560 ///
561 /// Note that this is an involved process that may invalidate pointers into
562 /// the graph.
563 void Legalize();
564
565 /// Transforms a SelectionDAG node and any operands to it into a node
566 /// that is compatible with the target instruction selector, as indicated by
567 /// the TargetLowering object.
568 ///
569 /// \returns true if \c N is a valid, legal node after calling this.
570 ///
571 /// This essentially runs a single recursive walk of the \c Legalize process
572 /// over the given node (and its operands). This can be used to incrementally
573 /// legalize the DAG. All of the nodes which are directly replaced,
574 /// potentially including N, are added to the output parameter \c
575 /// UpdatedNodes so that the delta to the DAG can be understood by the
576 /// caller.
577 ///
578 /// When this returns false, N has been legalized in a way that make the
579 /// pointer passed in no longer valid. It may have even been deleted from the
580 /// DAG, and so it shouldn't be used further. When this returns true, the
581 /// N passed in is a legal node, and can be immediately processed as such.
582 /// This may still have done some work on the DAG, and will still populate
583 /// UpdatedNodes with any new nodes replacing those originally in the DAG.
584 bool LegalizeOp(SDNode *N, SmallSetVector<SDNode *, 16> &UpdatedNodes);
585
586 /// This transforms the SelectionDAG into a SelectionDAG
587 /// that only uses vector math operations supported by the target. This is
588 /// necessary as a separate step from Legalize because unrolling a vector
589 /// operation can introduce illegal types, which requires running
590 /// LegalizeTypes again.
591 ///
592 /// This returns true if it made any changes; in that case, LegalizeTypes
593 /// is called again before Legalize.
594 ///
595 /// Note that this is an involved process that may invalidate pointers into
596 /// the graph.
597 bool LegalizeVectors();
598
599 /// This method deletes all unreachable nodes in the SelectionDAG.
600 void RemoveDeadNodes();
601
602 /// Remove the specified node from the system. This node must
603 /// have no referrers.
604 void DeleteNode(SDNode *N);
605
606 /// Return an SDVTList that represents the list of values specified.
607 SDVTList getVTList(EVT VT);
608 SDVTList getVTList(EVT VT1, EVT VT2);
609 SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3);
610 SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4);
611 SDVTList getVTList(ArrayRef<EVT> VTs);
612
613 //===--------------------------------------------------------------------===//
614 // Node creation methods.
615
616 /// Create a ConstantSDNode wrapping a constant value.
617 /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
618 ///
619 /// If only legal types can be produced, this does the necessary
620 /// transformations (e.g., if the vector element type is illegal).
621 /// @{
622 SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
623 bool isTarget = false, bool isOpaque = false);
624 SDValue getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
625 bool isTarget = false, bool isOpaque = false);
626
627 SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget = false,
628 bool IsOpaque = false) {
629 return getConstant(APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL,
630 VT, IsTarget, IsOpaque);
631 }
632
633 SDValue getConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
634 bool isTarget = false, bool isOpaque = false);
635 SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL,
636 bool isTarget = false);
637 SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL,
638 bool LegalTypes = true);
639 SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
640 bool isTarget = false);
641
642 SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT,
643 bool isOpaque = false) {
644 return getConstant(Val, DL, VT, true, isOpaque);
645 }
646 SDValue getTargetConstant(const APInt &Val, const SDLoc &DL, EVT VT,
647 bool isOpaque = false) {
648 return getConstant(Val, DL, VT, true, isOpaque);
649 }
650 SDValue getTargetConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
651 bool isOpaque = false) {
652 return getConstant(Val, DL, VT, true, isOpaque);
653 }
654
655 /// Create a true or false constant of type \p VT using the target's
656 /// BooleanContent for type \p OpVT.
657 SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT);
658 /// @}
659
660 /// Create a ConstantFPSDNode wrapping a constant value.
661 /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
662 ///
663 /// If only legal types can be produced, this does the necessary
664 /// transformations (e.g., if the vector element type is illegal).
665 /// The forms that take a double should only be used for simple constants
666 /// that can be exactly represented in VT. No checks are made.
667 /// @{
668 SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT,
669 bool isTarget = false);
670 SDValue getConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT,
671 bool isTarget = false);
672 SDValue getConstantFP(const ConstantFP &V, const SDLoc &DL, EVT VT,
673 bool isTarget = false);
674 SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT) {
675 return getConstantFP(Val, DL, VT, true);
676 }
677 SDValue getTargetConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT) {
678 return getConstantFP(Val, DL, VT, true);
679 }
680 SDValue getTargetConstantFP(const ConstantFP &Val, const SDLoc &DL, EVT VT) {
681 return getConstantFP(Val, DL, VT, true);
682 }
683 /// @}
684
685 SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
686 int64_t offset = 0, bool isTargetGA = false,
687 unsigned TargetFlags = 0);
688 SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
689 int64_t offset = 0, unsigned TargetFlags = 0) {
690 return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
691 }
692 SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
693 SDValue getTargetFrameIndex(int FI, EVT VT) {
694 return getFrameIndex(FI, VT, true);
695 }
696 SDValue getJumpTable(int JTI, EVT VT, bool isTarget = false,
697 unsigned TargetFlags = 0);
698 SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags = 0) {
699 return getJumpTable(JTI, VT, true, TargetFlags);
700 }
701 SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align = None,
702 int Offs = 0, bool isT = false,
703 unsigned TargetFlags = 0);
704 SDValue getTargetConstantPool(const Constant *C, EVT VT,
705 MaybeAlign Align = None, int Offset = 0,
706 unsigned TargetFlags = 0) {
707 return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
708 }
709 SDValue getConstantPool(MachineConstantPoolValue *C, EVT VT,
710 MaybeAlign Align = None, int Offs = 0,
711 bool isT = false, unsigned TargetFlags = 0);
712 SDValue getTargetConstantPool(MachineConstantPoolValue *C, EVT VT,
713 MaybeAlign Align = None, int Offset = 0,
714 unsigned TargetFlags = 0) {
715 return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
716 }
717 SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
718 unsigned TargetFlags = 0);
719 // When generating a branch to a BB, we don't in general know enough
720 // to provide debug info for the BB at that time, so keep this one around.
721 SDValue getBasicBlock(MachineBasicBlock *MBB);
722 SDValue getBasicBlock(MachineBasicBlock *MBB, SDLoc dl);
723 SDValue getExternalSymbol(const char *Sym, EVT VT);
724 SDValue getExternalSymbol(const char *Sym, const SDLoc &dl, EVT VT);
725 SDValue getTargetExternalSymbol(const char *Sym, EVT VT,
726 unsigned TargetFlags = 0);
727 SDValue getMCSymbol(MCSymbol *Sym, EVT VT);
728
729 SDValue getValueType(EVT);
730 SDValue getRegister(unsigned Reg, EVT VT);
731 SDValue getRegisterMask(const uint32_t *RegMask);
732 SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label);
733 SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root,
734 MCSymbol *Label);
735 SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset = 0,
736 bool isTarget = false, unsigned TargetFlags = 0);
737 SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT,
738 int64_t Offset = 0, unsigned TargetFlags = 0) {
739 return getBlockAddress(BA, VT, Offset, true, TargetFlags);
740 }
741
742 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg,
743 SDValue N) {
744 return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
745 getRegister(Reg, N.getValueType()), N);
746 }
747
748 // This version of the getCopyToReg method takes an extra operand, which
749 // indicates that there is potentially an incoming glue value (if Glue is not
750 // null) and that there should be a glue result.
751 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N,
752 SDValue Glue) {
753 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
754 SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
755 return getNode(ISD::CopyToReg, dl, VTs,
756 makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
757 }
758
759 // Similar to last getCopyToReg() except parameter Reg is a SDValue
760 SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, SDValue Reg, SDValue N,
761 SDValue Glue) {
762 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
763 SDValue Ops[] = { Chain, Reg, N, Glue };
764 return getNode(ISD::CopyToReg, dl, VTs,
765 makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
766 }
767
768 SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT) {
769 SDVTList VTs = getVTList(VT, MVT::Other);
770 SDValue Ops[] = { Chain, getRegister(Reg, VT) };
771 return getNode(ISD::CopyFromReg, dl, VTs, Ops);
772 }
773
774 // This version of the getCopyFromReg method takes an extra operand, which
775 // indicates that there is potentially an incoming glue value (if Glue is not
776 // null) and that there should be a glue result.
777 SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT,
778 SDValue Glue) {
779 SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
780 SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
781 return getNode(ISD::CopyFromReg, dl, VTs,
782 makeArrayRef(Ops, Glue.getNode() ? 3 : 2));
783 }
784
785 SDValue getCondCode(ISD::CondCode Cond);
786
787 /// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
788 /// which must be a vector type, must match the number of mask elements
789 /// NumElts. An integer mask element equal to -1 is treated as undefined.
790 SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
791 ArrayRef<int> Mask);
792
793 /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
794 /// which must be a vector type, must match the number of operands in Ops.
795 /// The operands must have the same type as (or, for integers, a type wider
796 /// than) VT's element type.
797 SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDValue> Ops) {
798 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
799 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
800 }
801
802 /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
803 /// which must be a vector type, must match the number of operands in Ops.
804 /// The operands must have the same type as (or, for integers, a type wider
805 /// than) VT's element type.
806 SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDUse> Ops) {
807 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
808 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
809 }
810
811 /// Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all
812 /// elements. VT must be a vector type. Op's type must be the same as (or,
813 /// for integers, a type wider than) VT's element type.
814 SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
815 // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
816 if (Op.getOpcode() == ISD::UNDEF) {
19
Calling 'SDValue::getOpcode'
817 assert((VT.getVectorElementType() == Op.getValueType() ||(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 821, __PRETTY_FUNCTION__))
818 (VT.isInteger() &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 821, __PRETTY_FUNCTION__))
819 VT.getVectorElementType().bitsLE(Op.getValueType()))) &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 821, __PRETTY_FUNCTION__))
820 "A splatted value must have a width equal or (for integers) "(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 821, __PRETTY_FUNCTION__))
821 "greater than the vector element type!")(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 821, __PRETTY_FUNCTION__))
;
822 return getNode(ISD::UNDEF, SDLoc(), VT);
823 }
824
825 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Op);
826 return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
827 }
828
829 // Return a splat ISD::SPLAT_VECTOR node, consisting of Op splatted to all
830 // elements.
831 SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op) {
832 if (Op.getOpcode() == ISD::UNDEF) {
833 assert((VT.getVectorElementType() == Op.getValueType() ||(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 837, __PRETTY_FUNCTION__))
834 (VT.isInteger() &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 837, __PRETTY_FUNCTION__))
835 VT.getVectorElementType().bitsLE(Op.getValueType()))) &&(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 837, __PRETTY_FUNCTION__))
836 "A splatted value must have a width equal or (for integers) "(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 837, __PRETTY_FUNCTION__))
837 "greater than the vector element type!")(((VT.getVectorElementType() == Op.getValueType() || (VT.isInteger
() && VT.getVectorElementType().bitsLE(Op.getValueType
()))) && "A splatted value must have a width equal or (for integers) "
"greater than the vector element type!") ? static_cast<void
> (0) : __assert_fail ("(VT.getVectorElementType() == Op.getValueType() || (VT.isInteger() && VT.getVectorElementType().bitsLE(Op.getValueType()))) && \"A splatted value must have a width equal or (for integers) \" \"greater than the vector element type!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 837, __PRETTY_FUNCTION__))
;
838 return getNode(ISD::UNDEF, SDLoc(), VT);
839 }
840 return getNode(ISD::SPLAT_VECTOR, DL, VT, Op);
841 }
842
843 /// Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
844 /// the shuffle node in input but with swapped operands.
845 ///
846 /// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
847 SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV);
848
849 /// Convert Op, which must be of float type, to the
850 /// float type VT, by either extending or rounding (by truncation).
851 SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT);
852
853 /// Convert Op, which must be a STRICT operation of float type, to the
854 /// float type VT, by either extending or rounding (by truncation).
855 std::pair<SDValue, SDValue>
856 getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT);
857
858 /// Convert Op, which must be of integer type, to the
859 /// integer type VT, by either any-extending or truncating it.
860 SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
861
862 /// Convert Op, which must be of integer type, to the
863 /// integer type VT, by either sign-extending or truncating it.
864 SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
865
866 /// Convert Op, which must be of integer type, to the
867 /// integer type VT, by either zero-extending or truncating it.
868 SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
869
870 /// Return the expression required to zero extend the Op
871 /// value assuming it was the smaller SrcTy value.
872 SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
873
874 /// Convert Op, which must be of integer type, to the integer type VT, by
875 /// either truncating it or performing either zero or sign extension as
876 /// appropriate extension for the pointer's semantics.
877 SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
878
879 /// Return the expression required to extend the Op as a pointer value
880 /// assuming it was the smaller SrcTy value. This may be either a zero extend
881 /// or a sign extend.
882 SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
883
884 /// Convert Op, which must be of integer type, to the integer type VT,
885 /// by using an extension appropriate for the target's
886 /// BooleanContent for type OpVT or truncating it.
887 SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT);
888
889 /// Create a bitwise NOT operation as (XOR Val, -1).
890 SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT);
891
892 /// Create a logical NOT operation as (XOR Val, BooleanOne).
893 SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT);
894
895 /// Returns sum of the base pointer and offset.
896 /// Unlike getObjectPtrOffset this does not set NoUnsignedWrap by default.
897 SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL,
898 const SDNodeFlags Flags = SDNodeFlags());
899 SDValue getMemBasePlusOffset(SDValue Base, SDValue Offset, const SDLoc &DL,
900 const SDNodeFlags Flags = SDNodeFlags());
901
902 /// Create an add instruction with appropriate flags when used for
903 /// addressing some offset of an object. i.e. if a load is split into multiple
904 /// components, create an add nuw from the base pointer to the offset.
905 SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset) {
906 SDNodeFlags Flags;
907 Flags.setNoUnsignedWrap(true);
908 return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
909 }
910
911 SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, SDValue Offset) {
912 // The object itself can't wrap around the address space, so it shouldn't be
913 // possible for the adds of the offsets to the split parts to overflow.
914 SDNodeFlags Flags;
915 Flags.setNoUnsignedWrap(true);
916 return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
917 }
918
919 /// Return a new CALLSEQ_START node, that starts new call frame, in which
920 /// InSize bytes are set up inside CALLSEQ_START..CALLSEQ_END sequence and
921 /// OutSize specifies part of the frame set up prior to the sequence.
922 SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize,
923 const SDLoc &DL) {
924 SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
925 SDValue Ops[] = { Chain,
926 getIntPtrConstant(InSize, DL, true),
927 getIntPtrConstant(OutSize, DL, true) };
928 return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
929 }
930
931 /// Return a new CALLSEQ_END node, which always must have a
932 /// glue result (to ensure it's not CSE'd).
933 /// CALLSEQ_END does not have a useful SDLoc.
934 SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
935 SDValue InGlue, const SDLoc &DL) {
936 SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
937 SmallVector<SDValue, 4> Ops;
938 Ops.push_back(Chain);
939 Ops.push_back(Op1);
940 Ops.push_back(Op2);
941 if (InGlue.getNode())
942 Ops.push_back(InGlue);
943 return getNode(ISD::CALLSEQ_END, DL, NodeTys, Ops);
944 }
945
946 /// Return true if the result of this operation is always undefined.
947 bool isUndef(unsigned Opcode, ArrayRef<SDValue> Ops);
948
949 /// Return an UNDEF node. UNDEF does not have a useful SDLoc.
950 SDValue getUNDEF(EVT VT) {
951 return getNode(ISD::UNDEF, SDLoc(), VT);
952 }
953
954 /// Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
955 SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm) {
956 assert(MulImm.getMinSignedBits() <= VT.getSizeInBits() &&((MulImm.getMinSignedBits() <= VT.getSizeInBits() &&
"Immediate does not fit VT") ? static_cast<void> (0) :
__assert_fail ("MulImm.getMinSignedBits() <= VT.getSizeInBits() && \"Immediate does not fit VT\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 957, __PRETTY_FUNCTION__))
957 "Immediate does not fit VT")((MulImm.getMinSignedBits() <= VT.getSizeInBits() &&
"Immediate does not fit VT") ? static_cast<void> (0) :
__assert_fail ("MulImm.getMinSignedBits() <= VT.getSizeInBits() && \"Immediate does not fit VT\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 957, __PRETTY_FUNCTION__))
;
958 return getNode(ISD::VSCALE, DL, VT,
959 getConstant(MulImm.sextOrTrunc(VT.getSizeInBits()), DL, VT));
960 }
961
962 /// Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
963 SDValue getGLOBAL_OFFSET_TABLE(EVT VT) {
964 return getNode(ISD::GLOBAL_OFFSET_TABLE, SDLoc(), VT);
965 }
966
967 /// Gets or creates the specified node.
968 ///
969 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
970 ArrayRef<SDUse> Ops);
971 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
972 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
973 SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
974 ArrayRef<SDValue> Ops);
975 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
976 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
977
978 // Use flags from current flag inserter.
979 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
980 ArrayRef<SDValue> Ops);
981 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
982 ArrayRef<SDValue> Ops);
983 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand);
984 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
985 SDValue N2);
986 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
987 SDValue N2, SDValue N3);
988
989 // Specialize based on number of operands.
990 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
991 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand,
992 const SDNodeFlags Flags);
993 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
994 SDValue N2, const SDNodeFlags Flags);
995 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
996 SDValue N2, SDValue N3, const SDNodeFlags Flags);
997 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
998 SDValue N2, SDValue N3, SDValue N4);
999 SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
1000 SDValue N2, SDValue N3, SDValue N4, SDValue N5);
1001
1002 // Specialize again based on number of operands for nodes with a VTList
1003 // rather than a single VT.
1004 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList);
1005 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N);
1006 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1007 SDValue N2);
1008 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1009 SDValue N2, SDValue N3);
1010 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1011 SDValue N2, SDValue N3, SDValue N4);
1012 SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
1013 SDValue N2, SDValue N3, SDValue N4, SDValue N5);
1014
1015 /// Compute a TokenFactor to force all the incoming stack arguments to be
1016 /// loaded from the stack. This is used in tail call lowering to protect
1017 /// stack arguments from being clobbered.
1018 SDValue getStackArgumentTokenFactor(SDValue Chain);
1019
1020 LLVM_ATTRIBUTE_DEPRECATED(SDValue getMemcpy(SDValue Chain, const SDLoc &dl,SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
1021 SDValue Dst, SDValue Src,SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
1022 SDValue Size, unsigned Align,SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
1023 bool isVol, bool AlwaysInline,SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
1024 bool isTailCall,SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
1025 MachinePointerInfo DstPtrInfo,SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
1026 MachinePointerInfo SrcPtrInfo),SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
1027 "Use the version that takes Align instead")SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo
, MachinePointerInfo SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
{
1028 return getMemcpy(Chain, dl, Dst, Src, Size, llvm::Align(Align), isVol,
1029 AlwaysInline, isTailCall, DstPtrInfo, SrcPtrInfo);
1030 }
1031
1032 SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1033 SDValue Size, Align Alignment, bool isVol,
1034 bool AlwaysInline, bool isTailCall,
1035 MachinePointerInfo DstPtrInfo,
1036 MachinePointerInfo SrcPtrInfo);
1037
1038 LLVM_ATTRIBUTE_DEPRECATED(SDValue getMemmove(SDValue Chain, const SDLoc &dl,SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
1039 SDValue Dst, SDValue Src,SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
1040 SDValue Size, unsigned Align,SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
1041 bool isVol, bool isTailCall,SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
1042 MachinePointerInfo DstPtrInfo,SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
1043 MachinePointerInfo SrcPtrInfo),SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
1044 "Use the version that takes Align instead")SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo
SrcPtrInfo) __attribute__((deprecated("Use the version that takes Align instead"
)))
{
1045 return getMemmove(Chain, dl, Dst, Src, Size, llvm::Align(Align), isVol,
1046 isTailCall, DstPtrInfo, SrcPtrInfo);
1047 }
1048 SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1049 SDValue Size, Align Alignment, bool isVol, bool isTailCall,
1050 MachinePointerInfo DstPtrInfo,
1051 MachinePointerInfo SrcPtrInfo);
1052
1053 LLVM_ATTRIBUTE_DEPRECATED(SDValue getMemset(SDValue Chain, const SDLoc &dl,SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo) __attribute__((deprecated
("Use the version that takes Align instead")))
1054 SDValue Dst, SDValue Src,SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo) __attribute__((deprecated
("Use the version that takes Align instead")))
1055 SDValue Size, unsigned Align,SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo) __attribute__((deprecated
("Use the version that takes Align instead")))
1056 bool isVol, bool isTailCall,SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo) __attribute__((deprecated
("Use the version that takes Align instead")))
1057 MachinePointerInfo DstPtrInfo),SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo) __attribute__((deprecated
("Use the version that takes Align instead")))
1058 "Use the version that takes Align instead")SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue
Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool
isTailCall, MachinePointerInfo DstPtrInfo) __attribute__((deprecated
("Use the version that takes Align instead")))
{
1059 return getMemset(Chain, dl, Dst, Src, Size, llvm::Align(Align), isVol,
1060 isTailCall, DstPtrInfo);
1061 }
1062 SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
1063 SDValue Size, Align Alignment, bool isVol, bool isTailCall,
1064 MachinePointerInfo DstPtrInfo);
1065
1066 SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
1067 unsigned DstAlign, SDValue Src, unsigned SrcAlign,
1068 SDValue Size, Type *SizeTy, unsigned ElemSz,
1069 bool isTailCall, MachinePointerInfo DstPtrInfo,
1070 MachinePointerInfo SrcPtrInfo);
1071
1072 SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
1073 unsigned DstAlign, SDValue Src, unsigned SrcAlign,
1074 SDValue Size, Type *SizeTy, unsigned ElemSz,
1075 bool isTailCall, MachinePointerInfo DstPtrInfo,
1076 MachinePointerInfo SrcPtrInfo);
1077
1078 SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
1079 unsigned DstAlign, SDValue Value, SDValue Size,
1080 Type *SizeTy, unsigned ElemSz, bool isTailCall,
1081 MachinePointerInfo DstPtrInfo);
1082
1083 /// Helper function to make it easier to build SetCC's if you just have an
1084 /// ISD::CondCode instead of an SDValue.
1085 SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
1086 ISD::CondCode Cond, SDNodeFlags Flags = SDNodeFlags(),
1087 SDValue Chain = SDValue(), bool IsSignaling = false) {
1088 assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&((LHS.getValueType().isVector() == RHS.getValueType().isVector
() && "Cannot compare scalars to vectors") ? static_cast
<void> (0) : __assert_fail ("LHS.getValueType().isVector() == RHS.getValueType().isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1089, __PRETTY_FUNCTION__))
1089 "Cannot compare scalars to vectors")((LHS.getValueType().isVector() == RHS.getValueType().isVector
() && "Cannot compare scalars to vectors") ? static_cast
<void> (0) : __assert_fail ("LHS.getValueType().isVector() == RHS.getValueType().isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1089, __PRETTY_FUNCTION__))
;
1090 assert(LHS.getValueType().isVector() == VT.isVector() &&((LHS.getValueType().isVector() == VT.isVector() && "Cannot compare scalars to vectors"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType().isVector() == VT.isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1091, __PRETTY_FUNCTION__))
1091 "Cannot compare scalars to vectors")((LHS.getValueType().isVector() == VT.isVector() && "Cannot compare scalars to vectors"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType().isVector() == VT.isVector() && \"Cannot compare scalars to vectors\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1091, __PRETTY_FUNCTION__))
;
1092 assert(Cond != ISD::SETCC_INVALID &&((Cond != ISD::SETCC_INVALID && "Cannot create a setCC of an invalid node."
) ? static_cast<void> (0) : __assert_fail ("Cond != ISD::SETCC_INVALID && \"Cannot create a setCC of an invalid node.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1093, __PRETTY_FUNCTION__))
1093 "Cannot create a setCC of an invalid node.")((Cond != ISD::SETCC_INVALID && "Cannot create a setCC of an invalid node."
) ? static_cast<void> (0) : __assert_fail ("Cond != ISD::SETCC_INVALID && \"Cannot create a setCC of an invalid node.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1093, __PRETTY_FUNCTION__))
;
1094 if (Chain)
1095 return getNode(IsSignaling ? ISD::STRICT_FSETCCS : ISD::STRICT_FSETCC, DL,
1096 {VT, MVT::Other}, {Chain, LHS, RHS, getCondCode(Cond)});
1097 return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond), Flags);
1098 }
1099
1100 /// Helper function to make it easier to build Select's if you just have
1101 /// operands and don't want to check for vector.
1102 SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS,
1103 SDValue RHS) {
1104 assert(LHS.getValueType() == RHS.getValueType() &&((LHS.getValueType() == RHS.getValueType() && "Cannot use select on differing types"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Cannot use select on differing types\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1105, __PRETTY_FUNCTION__))
1105 "Cannot use select on differing types")((LHS.getValueType() == RHS.getValueType() && "Cannot use select on differing types"
) ? static_cast<void> (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType() && \"Cannot use select on differing types\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1105, __PRETTY_FUNCTION__))
;
1106 assert(VT.isVector() == LHS.getValueType().isVector() &&((VT.isVector() == LHS.getValueType().isVector() && "Cannot mix vectors and scalars"
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() == LHS.getValueType().isVector() && \"Cannot mix vectors and scalars\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1107, __PRETTY_FUNCTION__))
1107 "Cannot mix vectors and scalars")((VT.isVector() == LHS.getValueType().isVector() && "Cannot mix vectors and scalars"
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() == LHS.getValueType().isVector() && \"Cannot mix vectors and scalars\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1107, __PRETTY_FUNCTION__))
;
1108 auto Opcode = Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
1109 return getNode(Opcode, DL, VT, Cond, LHS, RHS);
1110 }
1111
1112 /// Helper function to make it easier to build SelectCC's if you just have an
1113 /// ISD::CondCode instead of an SDValue.
1114 SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True,
1115 SDValue False, ISD::CondCode Cond) {
1116 return getNode(ISD::SELECT_CC, DL, True.getValueType(), LHS, RHS, True,
1117 False, getCondCode(Cond));
1118 }
1119
1120 /// Try to simplify a select/vselect into 1 of its operands or a constant.
1121 SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal);
1122
1123 /// Try to simplify a shift into 1 of its operands or a constant.
1124 SDValue simplifyShift(SDValue X, SDValue Y);
1125
1126 /// Try to simplify a floating-point binary operation into 1 of its operands
1127 /// or a constant.
1128 SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
1129 SDNodeFlags Flags);
1130
1131 /// VAArg produces a result and token chain, and takes a pointer
1132 /// and a source value as input.
1133 SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1134 SDValue SV, unsigned Align);
1135
1136 /// Gets a node for an atomic cmpxchg op. There are two
1137 /// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
1138 /// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
1139 /// a success flag (initially i1), and a chain.
1140 SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
1141 SDVTList VTs, SDValue Chain, SDValue Ptr,
1142 SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);
1143
1144 /// Gets a node for an atomic op, produces result (if relevant)
1145 /// and chain and takes 2 operands.
1146 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
1147 SDValue Ptr, SDValue Val, MachineMemOperand *MMO);
1148
1149 /// Gets a node for an atomic op, produces result and chain and
1150 /// takes 1 operand.
1151 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT,
1152 SDValue Chain, SDValue Ptr, MachineMemOperand *MMO);
1153
1154 /// Gets a node for an atomic op, produces result and chain and takes N
1155 /// operands.
1156 SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
1157 SDVTList VTList, ArrayRef<SDValue> Ops,
1158 MachineMemOperand *MMO);
1159
1160 /// Creates a MemIntrinsicNode that may produce a
1161 /// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
1162 /// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
1163 /// less than FIRST_TARGET_MEMORY_OPCODE.
1164 SDValue getMemIntrinsicNode(
1165 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
1166 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
1167 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
1168 MachineMemOperand::MOStore,
1169 uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes());
1170
1171 inline SDValue getMemIntrinsicNode(
1172 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
1173 EVT MemVT, MachinePointerInfo PtrInfo, MaybeAlign Alignment = None,
1174 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
1175 MachineMemOperand::MOStore,
1176 uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) {
1177 // Ensure that codegen never sees alignment 0
1178 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo,
1179 Alignment.getValueOr(getEVTAlign(MemVT)), Flags,
1180 Size, AAInfo);
1181 }
1182
1183 LLVM_ATTRIBUTE_DEPRECATED(inline SDValue getMemIntrinsicNode( unsigned Opcode, const SDLoc
&dl, SDVTList VTList, ArrayRef<SDValue> Ops, EVT MemVT
, MachinePointerInfo PtrInfo, unsigned Alignment, MachineMemOperand
::Flags Flags = MachineMemOperand::MOLoad | MachineMemOperand
::MOStore, uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes
()) __attribute__((deprecated("")))
1184 inline SDValue getMemIntrinsicNode(inline SDValue getMemIntrinsicNode( unsigned Opcode, const SDLoc
&dl, SDVTList VTList, ArrayRef<SDValue> Ops, EVT MemVT
, MachinePointerInfo PtrInfo, unsigned Alignment, MachineMemOperand
::Flags Flags = MachineMemOperand::MOLoad | MachineMemOperand
::MOStore, uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes
()) __attribute__((deprecated("")))
1185 unsigned Opcode, const SDLoc &dl, SDVTList VTList,inline SDValue getMemIntrinsicNode( unsigned Opcode, const SDLoc
&dl, SDVTList VTList, ArrayRef<SDValue> Ops, EVT MemVT
, MachinePointerInfo PtrInfo, unsigned Alignment, MachineMemOperand
::Flags Flags = MachineMemOperand::MOLoad | MachineMemOperand
::MOStore, uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes
()) __attribute__((deprecated("")))
1186 ArrayRef<SDValue> Ops, EVT MemVT, MachinePointerInfo PtrInfo,inline SDValue getMemIntrinsicNode( unsigned Opcode, const SDLoc
&dl, SDVTList VTList, ArrayRef<SDValue> Ops, EVT MemVT
, MachinePointerInfo PtrInfo, unsigned Alignment, MachineMemOperand
::Flags Flags = MachineMemOperand::MOLoad | MachineMemOperand
::MOStore, uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes
()) __attribute__((deprecated("")))
1187 unsigned Alignment,inline SDValue getMemIntrinsicNode( unsigned Opcode, const SDLoc
&dl, SDVTList VTList, ArrayRef<SDValue> Ops, EVT MemVT
, MachinePointerInfo PtrInfo, unsigned Alignment, MachineMemOperand
::Flags Flags = MachineMemOperand::MOLoad | MachineMemOperand
::MOStore, uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes
()) __attribute__((deprecated("")))
1188 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |inline SDValue getMemIntrinsicNode( unsigned Opcode, const SDLoc
&dl, SDVTList VTList, ArrayRef<SDValue> Ops, EVT MemVT
, MachinePointerInfo PtrInfo, unsigned Alignment, MachineMemOperand
::Flags Flags = MachineMemOperand::MOLoad | MachineMemOperand
::MOStore, uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes
()) __attribute__((deprecated("")))
1189 MachineMemOperand::MOStore,inline SDValue getMemIntrinsicNode( unsigned Opcode, const SDLoc
&dl, SDVTList VTList, ArrayRef<SDValue> Ops, EVT MemVT
, MachinePointerInfo PtrInfo, unsigned Alignment, MachineMemOperand
::Flags Flags = MachineMemOperand::MOLoad | MachineMemOperand
::MOStore, uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes
()) __attribute__((deprecated("")))
1190 uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()),inline SDValue getMemIntrinsicNode( unsigned Opcode, const SDLoc
&dl, SDVTList VTList, ArrayRef<SDValue> Ops, EVT MemVT
, MachinePointerInfo PtrInfo, unsigned Alignment, MachineMemOperand
::Flags Flags = MachineMemOperand::MOLoad | MachineMemOperand
::MOStore, uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes
()) __attribute__((deprecated("")))
1191 "")inline SDValue getMemIntrinsicNode( unsigned Opcode, const SDLoc
&dl, SDVTList VTList, ArrayRef<SDValue> Ops, EVT MemVT
, MachinePointerInfo PtrInfo, unsigned Alignment, MachineMemOperand
::Flags Flags = MachineMemOperand::MOLoad | MachineMemOperand
::MOStore, uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes
()) __attribute__((deprecated("")))
{
1192 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo,
1193 MaybeAlign(Alignment), Flags, Size, AAInfo);
1194 }
1195
1196 SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList,
1197 ArrayRef<SDValue> Ops, EVT MemVT,
1198 MachineMemOperand *MMO);
1199
1200 /// Creates a LifetimeSDNode that starts (`IsStart==true`) or ends
1201 /// (`IsStart==false`) the lifetime of the portion of `FrameIndex` between
1202 /// offsets `Offset` and `Offset + Size`.
1203 SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain,
1204 int FrameIndex, int64_t Size, int64_t Offset = -1);
1205
1206 /// Create a MERGE_VALUES node from the given operands.
1207 SDValue getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl);
1208
1209 /// Loads are not normal binary operators: their result type is not
1210 /// determined by their operands, and they produce a value AND a token chain.
1211 ///
1212 /// This function will set the MOLoad flag on MMOFlags, but you can set it if
1213 /// you want. The MOStore flag must not be set.
1214 SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1215 MachinePointerInfo PtrInfo,
1216 MaybeAlign Alignment = MaybeAlign(),
1217 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1218 const AAMDNodes &AAInfo = AAMDNodes(),
1219 const MDNode *Ranges = nullptr);
1220 /// FIXME: Remove once transition to Align is over.
1221 inline SDValue
1222 getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1223 MachinePointerInfo PtrInfo, unsigned Alignment,
1224 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1225 const AAMDNodes &AAInfo = AAMDNodes(),
1226 const MDNode *Ranges = nullptr) {
1227 return getLoad(VT, dl, Chain, Ptr, PtrInfo, MaybeAlign(Alignment), MMOFlags,
1228 AAInfo, Ranges);
1229 }
1230 SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
1231 MachineMemOperand *MMO);
1232 SDValue
1233 getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
1234 SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
1235 MaybeAlign Alignment = MaybeAlign(),
1236 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1237 const AAMDNodes &AAInfo = AAMDNodes());
1238 /// FIXME: Remove once transition to Align is over.
1239 inline SDValue
1240 getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
1241 SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
1242 unsigned Alignment,
1243 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1244 const AAMDNodes &AAInfo = AAMDNodes()) {
1245 return getExtLoad(ExtType, dl, VT, Chain, Ptr, PtrInfo, MemVT,
1246 MaybeAlign(Alignment), MMOFlags, AAInfo);
1247 }
1248 SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
1249 SDValue Chain, SDValue Ptr, EVT MemVT,
1250 MachineMemOperand *MMO);
1251 SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
1252 SDValue Offset, ISD::MemIndexedMode AM);
1253 SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1254 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1255 MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment,
1256 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1257 const AAMDNodes &AAInfo = AAMDNodes(),
1258 const MDNode *Ranges = nullptr);
1259 inline SDValue getLoad(
1260 ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
1261 SDValue Chain, SDValue Ptr, SDValue Offset, MachinePointerInfo PtrInfo,
1262 EVT MemVT, MaybeAlign Alignment = MaybeAlign(),
1263 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1264 const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr) {
1265 // Ensures that codegen never sees a None Alignment.
1266 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
1267 Alignment.getValueOr(getEVTAlign(MemVT)), MMOFlags, AAInfo,
1268 Ranges);
1269 }
1270 /// FIXME: Remove once transition to Align is over.
1271 inline SDValue
1272 getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1273 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1274 MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment,
1275 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1276 const AAMDNodes &AAInfo = AAMDNodes(),
1277 const MDNode *Ranges = nullptr) {
1278 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
1279 MaybeAlign(Alignment), MMOFlags, AAInfo, Ranges);
1280 }
1281 SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
1282 const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
1283 EVT MemVT, MachineMemOperand *MMO);
1284
1285 /// Helper function to build ISD::STORE nodes.
1286 ///
1287 /// This function will set the MOStore flag on MMOFlags, but you can set it if
1288 /// you want. The MOLoad and MOInvariant flags must not be set.
1289
1290 SDValue
1291 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1292 MachinePointerInfo PtrInfo, Align Alignment,
1293 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1294 const AAMDNodes &AAInfo = AAMDNodes());
1295 inline SDValue
1296 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1297 MachinePointerInfo PtrInfo, MaybeAlign Alignment = MaybeAlign(),
1298 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1299 const AAMDNodes &AAInfo = AAMDNodes()) {
1300 return getStore(Chain, dl, Val, Ptr, PtrInfo,
1301 Alignment.getValueOr(getEVTAlign(Val.getValueType())),
1302 MMOFlags, AAInfo);
1303 }
1304 /// FIXME: Remove once transition to Align is over.
1305 inline SDValue
1306 getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1307 MachinePointerInfo PtrInfo, unsigned Alignment,
1308 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1309 const AAMDNodes &AAInfo = AAMDNodes()) {
1310 return getStore(Chain, dl, Val, Ptr, PtrInfo, MaybeAlign(Alignment),
1311 MMOFlags, AAInfo);
1312 }
1313 SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1314 MachineMemOperand *MMO);
1315 SDValue
1316 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1317 MachinePointerInfo PtrInfo, EVT SVT, Align Alignment,
1318 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1319 const AAMDNodes &AAInfo = AAMDNodes());
1320 inline SDValue
1321 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1322 MachinePointerInfo PtrInfo, EVT SVT,
1323 MaybeAlign Alignment = MaybeAlign(),
1324 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1325 const AAMDNodes &AAInfo = AAMDNodes()) {
1326 return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
1327 Alignment.getValueOr(getEVTAlign(SVT)), MMOFlags,
1328 AAInfo);
1329 }
1330 /// FIXME: Remove once transition to Align is over.
1331 inline SDValue
1332 getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
1333 MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment,
1334 MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
1335 const AAMDNodes &AAInfo = AAMDNodes()) {
1336 return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
1337 MaybeAlign(Alignment), MMOFlags, AAInfo);
1338 }
1339 SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
1340 SDValue Ptr, EVT SVT, MachineMemOperand *MMO);
1341 SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base,
1342 SDValue Offset, ISD::MemIndexedMode AM);
1343
1344 SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base,
1345 SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT,
1346 MachineMemOperand *MMO, ISD::MemIndexedMode AM,
1347 ISD::LoadExtType, bool IsExpanding = false);
1348 SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
1349 SDValue Offset, ISD::MemIndexedMode AM);
1350 SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val,
1351 SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT,
1352 MachineMemOperand *MMO, ISD::MemIndexedMode AM,
1353 bool IsTruncating = false, bool IsCompressing = false);
1354 SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
1355 SDValue Base, SDValue Offset,
1356 ISD::MemIndexedMode AM);
1357 SDValue getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
1358 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
1359 ISD::MemIndexType IndexType);
1360 SDValue getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
1361 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
1362 ISD::MemIndexType IndexType);
1363
1364 /// Construct a node to track a Value* through the backend.
1365 SDValue getSrcValue(const Value *v);
1366
1367 /// Return an MDNodeSDNode which holds an MDNode.
1368 SDValue getMDNode(const MDNode *MD);
1369
1370 /// Return a bitcast using the SDLoc of the value operand, and casting to the
1371 /// provided type. Use getNode to set a custom SDLoc.
1372 SDValue getBitcast(EVT VT, SDValue V);
1373
1374 /// Return an AddrSpaceCastSDNode.
1375 SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS,
1376 unsigned DestAS);
1377
1378 /// Return a freeze using the SDLoc of the value operand.
1379 SDValue getFreeze(SDValue V);
1380
1381 /// Return an AssertAlignSDNode.
1382 SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A);
1383
1384 /// Return the specified value casted to
1385 /// the target's desired shift amount type.
1386 SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op);
1387
1388 /// Expand the specified \c ISD::VAARG node as the Legalize pass would.
1389 SDValue expandVAArg(SDNode *Node);
1390
1391 /// Expand the specified \c ISD::VACOPY node as the Legalize pass would.
1392 SDValue expandVACopy(SDNode *Node);
1393
1394 /// Returs an GlobalAddress of the function from the current module with
1395 /// name matching the given ExternalSymbol. Additionally can provide the
1396 /// matched function.
1397 /// Panics the function doesn't exists.
1398 SDValue getSymbolFunctionGlobalAddress(SDValue Op,
1399 Function **TargetFunction = nullptr);
1400
1401 /// *Mutate* the specified node in-place to have the
1402 /// specified operands. If the resultant node already exists in the DAG,
1403 /// this does not modify the specified node, instead it returns the node that
1404 /// already exists. If the resultant node does not exist in the DAG, the
1405 /// input node is returned. As a degenerate case, if you specify the same
1406 /// input operands as the node already has, the input node is returned.
1407 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
1408 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
1409 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1410 SDValue Op3);
1411 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1412 SDValue Op3, SDValue Op4);
1413 SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
1414 SDValue Op3, SDValue Op4, SDValue Op5);
1415 SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
1416
1417 /// Creates a new TokenFactor containing \p Vals. If \p Vals contains 64k
1418 /// values or more, move values into new TokenFactors in 64k-1 blocks, until
1419 /// the final TokenFactor has less than 64k operands.
1420 SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl<SDValue> &Vals);
1421
1422 /// *Mutate* the specified machine node's memory references to the provided
1423 /// list.
1424 void setNodeMemRefs(MachineSDNode *N,
1425 ArrayRef<MachineMemOperand *> NewMemRefs);
1426
1427 // Propagates the change in divergence to users
1428 void updateDivergence(SDNode * N);
1429
1430 /// These are used for target selectors to *mutate* the
1431 /// specified node to have the specified return type, Target opcode, and
1432 /// operands. Note that target opcodes are stored as
1433 /// ~TargetOpcode in the node opcode field. The resultant node is returned.
1434 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT);
1435 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT, SDValue Op1);
1436 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1437 SDValue Op1, SDValue Op2);
1438 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1439 SDValue Op1, SDValue Op2, SDValue Op3);
1440 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
1441 ArrayRef<SDValue> Ops);
1442 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1, EVT VT2);
1443 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1444 EVT VT2, ArrayRef<SDValue> Ops);
1445 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1446 EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
1447 SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
1448 EVT VT2, SDValue Op1);
1449 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
1450 EVT VT2, SDValue Op1, SDValue Op2);
1451 SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, SDVTList VTs,
1452 ArrayRef<SDValue> Ops);
1453
1454 /// This *mutates* the specified node to have the specified
1455 /// return type, opcode, and operands.
1456 SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
1457 ArrayRef<SDValue> Ops);
1458
1459 /// Mutate the specified strict FP node to its non-strict equivalent,
1460 /// unlinking the node from its chain and dropping the metadata arguments.
1461 /// The node must be a strict FP node.
1462 SDNode *mutateStrictFPToFP(SDNode *Node);
1463
1464 /// These are used for target selectors to create a new node
1465 /// with specified return type(s), MachineInstr opcode, and operands.
1466 ///
1467 /// Note that getMachineNode returns the resultant node. If there is already
1468 /// a node of the specified opcode and operands, it returns that node instead
1469 /// of the current one.
1470 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT);
1471 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1472 SDValue Op1);
1473 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1474 SDValue Op1, SDValue Op2);
1475 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1476 SDValue Op1, SDValue Op2, SDValue Op3);
1477 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
1478 ArrayRef<SDValue> Ops);
1479 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1480 EVT VT2, SDValue Op1, SDValue Op2);
1481 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1482 EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
1483 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1484 EVT VT2, ArrayRef<SDValue> Ops);
1485 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1486 EVT VT2, EVT VT3, SDValue Op1, SDValue Op2);
1487 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1488 EVT VT2, EVT VT3, SDValue Op1, SDValue Op2,
1489 SDValue Op3);
1490 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
1491 EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
1492 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl,
1493 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops);
1494 MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, SDVTList VTs,
1495 ArrayRef<SDValue> Ops);
1496
1497 /// A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
1498 SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
1499 SDValue Operand);
1500
1501 /// A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
1502 SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
1503 SDValue Operand, SDValue Subreg);
1504
1505 /// Get the specified node if it's already available, or else return NULL.
1506 SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
1507 ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
1508 SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
1509 ArrayRef<SDValue> Ops);
1510
1511 /// Creates a SDDbgValue node.
1512 SDDbgValue *getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N,
1513 unsigned R, bool IsIndirect, const DebugLoc &DL,
1514 unsigned O);
1515
1516 /// Creates a constant SDDbgValue node.
1517 SDDbgValue *getConstantDbgValue(DIVariable *Var, DIExpression *Expr,
1518 const Value *C, const DebugLoc &DL,
1519 unsigned O);
1520
1521 /// Creates a FrameIndex SDDbgValue node.
1522 SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
1523 unsigned FI, bool IsIndirect,
1524 const DebugLoc &DL, unsigned O);
1525
1526 /// Creates a VReg SDDbgValue node.
1527 SDDbgValue *getVRegDbgValue(DIVariable *Var, DIExpression *Expr,
1528 unsigned VReg, bool IsIndirect,
1529 const DebugLoc &DL, unsigned O);
1530
1531 /// Creates a SDDbgLabel node.
1532 SDDbgLabel *getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O);
1533
1534 /// Transfer debug values from one node to another, while optionally
1535 /// generating fragment expressions for split-up values. If \p InvalidateDbg
1536 /// is set, debug values are invalidated after they are transferred.
1537 void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits = 0,
1538 unsigned SizeInBits = 0, bool InvalidateDbg = true);
1539
1540 /// Remove the specified node from the system. If any of its
1541 /// operands then becomes dead, remove them as well. Inform UpdateListener
1542 /// for each node deleted.
1543 void RemoveDeadNode(SDNode *N);
1544
1545 /// This method deletes the unreachable nodes in the
1546 /// given list, and any nodes that become unreachable as a result.
1547 void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);
1548
1549 /// Modify anything using 'From' to use 'To' instead.
1550 /// This can cause recursive merging of nodes in the DAG. Use the first
1551 /// version if 'From' is known to have a single result, use the second
1552 /// if you have two nodes with identical results (or if 'To' has a superset
1553 /// of the results of 'From'), use the third otherwise.
1554 ///
1555 /// These methods all take an optional UpdateListener, which (if not null) is
1556 /// informed about nodes that are deleted and modified due to recursive
1557 /// changes in the dag.
1558 ///
1559 /// These functions only replace all existing uses. It's possible that as
1560 /// these replacements are being performed, CSE may cause the From node
1561 /// to be given new uses. These new uses of From are left in place, and
1562 /// not automatically transferred to To.
1563 ///
1564 void ReplaceAllUsesWith(SDValue From, SDValue To);
1565 void ReplaceAllUsesWith(SDNode *From, SDNode *To);
1566 void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
1567
1568 /// Replace any uses of From with To, leaving
1569 /// uses of other values produced by From.getNode() alone.
1570 void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);
1571
1572 /// Like ReplaceAllUsesOfValueWith, but for multiple values at once.
1573 /// This correctly handles the case where
1574 /// there is an overlap between the From values and the To values.
1575 void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
1576 unsigned Num);
1577
1578 /// If an existing load has uses of its chain, create a token factor node with
1579 /// that chain and the new memory node's chain and update users of the old
1580 /// chain to the token factor. This ensures that the new memory node will have
1581 /// the same relative memory dependency position as the old load. Returns the
1582 /// new merged load chain.
1583 SDValue makeEquivalentMemoryOrdering(LoadSDNode *Old, SDValue New);
1584
1585 /// Topological-sort the AllNodes list and a
1586 /// assign a unique node id for each node in the DAG based on their
1587 /// topological order. Returns the number of nodes.
1588 unsigned AssignTopologicalOrder();
1589
1590 /// Move node N in the AllNodes list to be immediately
1591 /// before the given iterator Position. This may be used to update the
1592 /// topological ordering when the list of nodes is modified.
1593 void RepositionNode(allnodes_iterator Position, SDNode *N) {
1594 AllNodes.insert(Position, AllNodes.remove(N));
1595 }
1596
1597 /// Returns an APFloat semantics tag appropriate for the given type. If VT is
1598 /// a vector type, the element semantics are returned.
1599 static const fltSemantics &EVTToAPFloatSemantics(EVT VT) {
1600 switch (VT.getScalarType().getSimpleVT().SimpleTy) {
1601 default: llvm_unreachable("Unknown FP format")::llvm::llvm_unreachable_internal("Unknown FP format", "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAG.h"
, 1601)
;
1602 case MVT::f16: return APFloat::IEEEhalf();
1603 case MVT::bf16: return APFloat::BFloat();
1604 case MVT::f32: return APFloat::IEEEsingle();
1605 case MVT::f64: return APFloat::IEEEdouble();
1606 case MVT::f80: return APFloat::x87DoubleExtended();
1607 case MVT::f128: return APFloat::IEEEquad();
1608 case MVT::ppcf128: return APFloat::PPCDoubleDouble();
1609 }
1610 }
1611
1612 /// Add a dbg_value SDNode. If SD is non-null that means the
1613 /// value is produced by SD.
1614 void AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter);
1615
1616 /// Add a dbg_label SDNode.
1617 void AddDbgLabel(SDDbgLabel *DB);
1618
1619 /// Get the debug values which reference the given SDNode.
1620 ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) const {
1621 return DbgInfo->getSDDbgValues(SD);
1622 }
1623
1624public:
1625 /// Return true if there are any SDDbgValue nodes associated
1626 /// with this SelectionDAG.
1627 bool hasDebugValues() const { return !DbgInfo->empty(); }
1628
1629 SDDbgInfo::DbgIterator DbgBegin() const { return DbgInfo->DbgBegin(); }
1630 SDDbgInfo::DbgIterator DbgEnd() const { return DbgInfo->DbgEnd(); }
1631
1632 SDDbgInfo::DbgIterator ByvalParmDbgBegin() const {
1633 return DbgInfo->ByvalParmDbgBegin();
1634 }
1635 SDDbgInfo::DbgIterator ByvalParmDbgEnd() const {
1636 return DbgInfo->ByvalParmDbgEnd();
1637 }
1638
1639 SDDbgInfo::DbgLabelIterator DbgLabelBegin() const {
1640 return DbgInfo->DbgLabelBegin();
1641 }
1642 SDDbgInfo::DbgLabelIterator DbgLabelEnd() const {
1643 return DbgInfo->DbgLabelEnd();
1644 }
1645
1646 /// To be invoked on an SDNode that is slated to be erased. This
1647 /// function mirrors \c llvm::salvageDebugInfo.
1648 void salvageDebugInfo(SDNode &N);
1649
1650 void dump() const;
1651
1652 /// In most cases this function returns the ABI alignment for a given type,
1653 /// except for illegal vector types where the alignment exceeds that of the
1654 /// stack. In such cases we attempt to break the vector down to a legal type
1655 /// and return the ABI alignment for that instead.
1656 Align getReducedAlign(EVT VT, bool UseABI);
1657
1658 /// Create a stack temporary based on the size in bytes and the alignment
1659 SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment);
1660
1661 /// Create a stack temporary, suitable for holding the specified value type.
1662 /// If minAlign is specified, the slot size will have at least that alignment.
1663 SDValue CreateStackTemporary(EVT VT, unsigned minAlign = 1);
1664
1665 /// Create a stack temporary suitable for holding either of the specified
1666 /// value types.
1667 SDValue CreateStackTemporary(EVT VT1, EVT VT2);
1668
1669 SDValue FoldSymbolOffset(unsigned Opcode, EVT VT,
1670 const GlobalAddressSDNode *GA,
1671 const SDNode *N2);
1672
1673 SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
1674 ArrayRef<SDValue> Ops);
1675
1676 SDValue FoldConstantVectorArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
1677 ArrayRef<SDValue> Ops,
1678 const SDNodeFlags Flags = SDNodeFlags());
1679
1680 /// Fold floating-point operations with 2 operands when both operands are
1681 /// constants and/or undefined.
1682 SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT,
1683 SDValue N1, SDValue N2);
1684
1685 /// Constant fold a setcc to true or false.
1686 SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond,
1687 const SDLoc &dl);
1688
1689 /// See if the specified operand can be simplified with the knowledge that
1690 /// only the bits specified by DemandedBits are used. If so, return the
1691 /// simpler operand, otherwise return a null SDValue.
1692 ///
1693 /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
1694 /// simplify nodes with multiple uses more aggressively.)
1695 SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits);
1696
1697 /// See if the specified operand can be simplified with the knowledge that
1698 /// only the bits specified by DemandedBits are used in the elements specified
1699 /// by DemandedElts. If so, return the simpler operand, otherwise return a
1700 /// null SDValue.
1701 ///
1702 /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can
1703 /// simplify nodes with multiple uses more aggressively.)
1704 SDValue GetDemandedBits(SDValue V, const APInt &DemandedBits,
1705 const APInt &DemandedElts);
1706
1707 /// Return true if the sign bit of Op is known to be zero.
1708 /// We use this predicate to simplify operations downstream.
1709 bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;
1710
1711 /// Return true if 'Op & Mask' is known to be zero. We
1712 /// use this predicate to simplify operations downstream. Op and Mask are
1713 /// known to be the same type.
1714 bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
1715 unsigned Depth = 0) const;
1716
1717 /// Return true if 'Op & Mask' is known to be zero in DemandedElts. We
1718 /// use this predicate to simplify operations downstream. Op and Mask are
1719 /// known to be the same type.
1720 bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
1721 const APInt &DemandedElts, unsigned Depth = 0) const;
1722
1723 /// Return true if '(Op & Mask) == Mask'.
1724 /// Op and Mask are known to be the same type.
1725 bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask,
1726 unsigned Depth = 0) const;
1727
1728 /// Determine which bits of Op are known to be either zero or one and return
1729 /// them in Known. For vectors, the known bits are those that are shared by
1730 /// every vector element.
1731 /// Targets can implement the computeKnownBitsForTargetNode method in the
1732 /// TargetLowering class to allow target nodes to be understood.
1733 KnownBits computeKnownBits(SDValue Op, unsigned Depth = 0) const;
1734
1735 /// Determine which bits of Op are known to be either zero or one and return
1736 /// them in Known. The DemandedElts argument allows us to only collect the
1737 /// known bits that are shared by the requested vector elements.
1738 /// Targets can implement the computeKnownBitsForTargetNode method in the
1739 /// TargetLowering class to allow target nodes to be understood.
1740 KnownBits computeKnownBits(SDValue Op, const APInt &DemandedElts,
1741 unsigned Depth = 0) const;
1742
1743 /// Used to represent the possible overflow behavior of an operation.
1744 /// Never: the operation cannot overflow.
1745 /// Always: the operation will always overflow.
1746 /// Sometime: the operation may or may not overflow.
1747 enum OverflowKind {
1748 OFK_Never,
1749 OFK_Sometime,
1750 OFK_Always,
1751 };
1752
1753 /// Determine if the result of the addition of 2 node can overflow.
1754 OverflowKind computeOverflowKind(SDValue N0, SDValue N1) const;
1755
1756 /// Test if the given value is known to have exactly one bit set. This differs
1757 /// from computeKnownBits in that it doesn't necessarily determine which bit
1758 /// is set.
1759 bool isKnownToBeAPowerOfTwo(SDValue Val) const;
1760
1761 /// Return the number of times the sign bit of the register is replicated into
1762 /// the other bits. We know that at least 1 bit is always equal to the sign
1763 /// bit (itself), but other cases can give us information. For example,
1764 /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
1765 /// to each other, so we return 3. Targets can implement the
1766 /// ComputeNumSignBitsForTarget method in the TargetLowering class to allow
1767 /// target nodes to be understood.
1768 unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;
1769
1770 /// Return the number of times the sign bit of the register is replicated into
1771 /// the other bits. We know that at least 1 bit is always equal to the sign
1772 /// bit (itself), but other cases can give us information. For example,
1773 /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
1774 /// to each other, so we return 3. The DemandedElts argument allows
1775 /// us to only collect the minimum sign bits of the requested vector elements.
1776 /// Targets can implement the ComputeNumSignBitsForTarget method in the
1777 /// TargetLowering class to allow target nodes to be understood.
1778 unsigned ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
1779 unsigned Depth = 0) const;
1780
1781 /// Return true if the specified operand is an ISD::ADD with a ConstantSDNode
1782 /// on the right-hand side, or if it is an ISD::OR with a ConstantSDNode that
1783 /// is guaranteed to have the same semantics as an ADD. This handles the
1784 /// equivalence:
1785 /// X|Cst == X+Cst iff X&Cst = 0.
1786 bool isBaseWithConstantOffset(SDValue Op) const;
1787
1788 /// Test whether the given SDValue is known to never be NaN. If \p SNaN is
1789 /// true, returns if \p Op is known to never be a signaling NaN (it may still
1790 /// be a qNaN).
1791 bool isKnownNeverNaN(SDValue Op, bool SNaN = false, unsigned Depth = 0) const;
1792
1793 /// \returns true if \p Op is known to never be a signaling NaN.
1794 bool isKnownNeverSNaN(SDValue Op, unsigned Depth = 0) const {
1795 return isKnownNeverNaN(Op, true, Depth);
1796 }
1797
1798 /// Test whether the given floating point SDValue is known to never be
1799 /// positive or negative zero.
1800 bool isKnownNeverZeroFloat(SDValue Op) const;
1801
1802 /// Test whether the given SDValue is known to contain non-zero value(s).
1803 bool isKnownNeverZero(SDValue Op) const;
1804
1805 /// Test whether two SDValues are known to compare equal. This
1806 /// is true if they are the same value, or if one is negative zero and the
1807 /// other positive zero.
1808 bool isEqualTo(SDValue A, SDValue B) const;
1809
1810 /// Return true if A and B have no common bits set. As an example, this can
1811 /// allow an 'add' to be transformed into an 'or'.
1812 bool haveNoCommonBitsSet(SDValue A, SDValue B) const;
1813
1814 /// Test whether \p V has a splatted value for all the demanded elements.
1815 ///
1816 /// On success \p UndefElts will indicate the elements that have UNDEF
1817 /// values instead of the splat value, this is only guaranteed to be correct
1818 /// for \p DemandedElts.
1819 ///
1820 /// NOTE: The function will return true for a demanded splat of UNDEF values.
1821 bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts);
1822
1823 /// Test whether \p V has a splatted value.
1824 bool isSplatValue(SDValue V, bool AllowUndefs = false);
1825
1826 /// If V is a splatted value, return the source vector and its splat index.
1827 SDValue getSplatSourceVector(SDValue V, int &SplatIndex);
1828
1829 /// If V is a splat vector, return its scalar source operand by extracting
1830 /// that element from the source vector.
1831 SDValue getSplatValue(SDValue V);
1832
1833 /// If a SHL/SRA/SRL node \p V has a constant or splat constant shift amount
1834 /// that is less than the element bit-width of the shift node, return it.
1835 const APInt *getValidShiftAmountConstant(SDValue V,
1836 const APInt &DemandedElts) const;
1837
1838 /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
1839 /// than the element bit-width of the shift node, return the minimum value.
1840 const APInt *
1841 getValidMinimumShiftAmountConstant(SDValue V,
1842 const APInt &DemandedElts) const;
1843
1844 /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
1845 /// than the element bit-width of the shift node, return the maximum value.
1846 const APInt *
1847 getValidMaximumShiftAmountConstant(SDValue V,
1848 const APInt &DemandedElts) const;
1849
1850 /// Match a binop + shuffle pyramid that represents a horizontal reduction
1851 /// over the elements of a vector starting from the EXTRACT_VECTOR_ELT node /p
1852 /// Extract. The reduction must use one of the opcodes listed in /p
1853 /// CandidateBinOps and on success /p BinOp will contain the matching opcode.
1854 /// Returns the vector that is being reduced on, or SDValue() if a reduction
1855 /// was not matched. If \p AllowPartials is set then in the case of a
1856 /// reduction pattern that only matches the first few stages, the extracted
1857 /// subvector of the start of the reduction is returned.
1858 SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
1859 ArrayRef<ISD::NodeType> CandidateBinOps,
1860 bool AllowPartials = false);
1861
1862 /// Utility function used by legalize and lowering to
1863 /// "unroll" a vector operation by splitting out the scalars and operating
1864 /// on each element individually. If the ResNE is 0, fully unroll the vector
1865 /// op. If ResNE is less than the width of the vector op, unroll up to ResNE.
1866 /// If the ResNE is greater than the width of the vector op, unroll the
1867 /// vector op and fill the end of the resulting vector with UNDEFS.
1868 SDValue UnrollVectorOp(SDNode *N, unsigned ResNE = 0);
1869
1870 /// Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
1871 /// This is a separate function because those opcodes have two results.
1872 std::pair<SDValue, SDValue> UnrollVectorOverflowOp(SDNode *N,
1873 unsigned ResNE = 0);
1874
1875 /// Return true if loads are next to each other and can be
1876 /// merged. Check that both are nonvolatile and if LD is loading
1877 /// 'Bytes' bytes from a location that is 'Dist' units away from the
1878 /// location that the 'Base' load is loading from.
1879 bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base,
1880 unsigned Bytes, int Dist) const;
1881
1882 /// Infer alignment of a load / store address. Return None if it cannot be
1883 /// inferred.
1884 MaybeAlign InferPtrAlign(SDValue Ptr) const;
1885
1886 LLVM_ATTRIBUTE_DEPRECATED(inline unsigned InferPtrAlignment(SDValue Ptr)inline unsigned InferPtrAlignment(SDValue Ptr) const __attribute__
((deprecated("Use InferPtrAlign instead")))
1887 const,inline unsigned InferPtrAlignment(SDValue Ptr) const __attribute__
((deprecated("Use InferPtrAlign instead")))
1888 "Use InferPtrAlign instead")inline unsigned InferPtrAlignment(SDValue Ptr) const __attribute__
((deprecated("Use InferPtrAlign instead")))
{
1889 if (auto A = InferPtrAlign(Ptr))
1890 return A->value();
1891 return 0;
1892 }
1893
1894 /// Compute the VTs needed for the low/hi parts of a type
1895 /// which is split (or expanded) into two not necessarily identical pieces.
1896 std::pair<EVT, EVT> GetSplitDestVTs(const EVT &VT) const;
1897
1898 /// Compute the VTs needed for the low/hi parts of a type, dependent on an
1899 /// enveloping VT that has been split into two identical pieces. Sets the
1900 /// HisIsEmpty flag when hi type has zero storage size.
1901 std::pair<EVT, EVT> GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
1902 bool *HiIsEmpty) const;
1903
1904 /// Split the vector with EXTRACT_SUBVECTOR using the provides
1905 /// VTs and return the low/high part.
1906 std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL,
1907 const EVT &LoVT, const EVT &HiVT);
1908
1909 /// Split the vector with EXTRACT_SUBVECTOR and return the low/high part.
1910 std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL) {
1911 EVT LoVT, HiVT;
1912 std::tie(LoVT, HiVT) = GetSplitDestVTs(N.getValueType());
1913 return SplitVector(N, DL, LoVT, HiVT);
1914 }
1915
1916 /// Split the node's operand with EXTRACT_SUBVECTOR and
1917 /// return the low/high part.
1918 std::pair<SDValue, SDValue> SplitVectorOperand(const SDNode *N, unsigned OpNo)
1919 {
1920 return SplitVector(N->getOperand(OpNo), SDLoc(N));
1921 }
1922
1923 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
1924 SDValue WidenVector(const SDValue &N, const SDLoc &DL);
1925
1926 /// Append the extracted elements from Start to Count out of the vector Op in
1927 /// Args. If Count is 0, all of the elements will be extracted. The extracted
1928 /// elements will have type EVT if it is provided, and otherwise their type
1929 /// will be Op's element type.
1930 void ExtractVectorElements(SDValue Op, SmallVectorImpl<SDValue> &Args,
1931 unsigned Start = 0, unsigned Count = 0,
1932 EVT EltVT = EVT());
1933
1934 /// Compute the default alignment value for the given type.
1935 Align getEVTAlign(EVT MemoryVT) const;
1936 /// Compute the default alignment value for the given type.
1937 /// FIXME: Remove once transition to Align is over.
1938 inline unsigned getEVTAlignment(EVT MemoryVT) const {
1939 return getEVTAlign(MemoryVT).value();
1940 }
1941
1942 /// Test whether the given value is a constant int or similar node.
1943 SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N);
1944
1945 /// Test whether the given value is a constant FP or similar node.
1946 SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N);
1947
1948 /// \returns true if \p N is any kind of constant or build_vector of
1949 /// constants, int or float. If a vector, it may not necessarily be a splat.
1950 inline bool isConstantValueOfAnyType(SDValue N) {
1951 return isConstantIntBuildVectorOrConstantInt(N) ||
1952 isConstantFPBuildVectorOrConstantFP(N);
1953 }
1954
1955 void addCallSiteInfo(const SDNode *CallNode, CallSiteInfoImpl &&CallInfo) {
1956 SDCallSiteDbgInfo[CallNode].CSInfo = std::move(CallInfo);
1957 }
1958
1959 CallSiteInfo getSDCallSiteInfo(const SDNode *CallNode) {
1960 auto I = SDCallSiteDbgInfo.find(CallNode);
1961 if (I != SDCallSiteDbgInfo.end())
1962 return std::move(I->second).CSInfo;
1963 return CallSiteInfo();
1964 }
1965
1966 void addHeapAllocSite(const SDNode *Node, MDNode *MD) {
1967 SDCallSiteDbgInfo[Node].HeapAllocSite = MD;
1968 }
1969
1970 /// Return the HeapAllocSite type associated with the SDNode, if it exists.
1971 MDNode *getHeapAllocSite(const SDNode *Node) {
1972 auto It = SDCallSiteDbgInfo.find(Node);
1973 if (It == SDCallSiteDbgInfo.end())
1974 return nullptr;
1975 return It->second.HeapAllocSite;
1976 }
1977
1978 void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge) {
1979 if (NoMerge)
1980 SDCallSiteDbgInfo[Node].NoMerge = NoMerge;
1981 }
1982
1983 bool getNoMergeSiteInfo(const SDNode *Node) {
1984 auto I = SDCallSiteDbgInfo.find(Node);
1985 if (I == SDCallSiteDbgInfo.end())
1986 return false;
1987 return I->second.NoMerge;
1988 }
1989
1990 /// Return the current function's default denormal handling kind for the given
1991 /// floating point type.
1992 DenormalMode getDenormalMode(EVT VT) const {
1993 return MF->getDenormalMode(EVTToAPFloatSemantics(VT));
1994 }
1995
1996 bool shouldOptForSize() const;
1997
1998private:
1999 void InsertNode(SDNode *N);
2000 bool RemoveNodeFromCSEMaps(SDNode *N);
2001 void AddModifiedNodeToCSEMaps(SDNode *N);
2002 SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
2003 SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
2004 void *&InsertPos);
2005 SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
2006 void *&InsertPos);
2007 SDNode *UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &loc);
2008
2009 void DeleteNodeNotInCSEMaps(SDNode *N);
2010 void DeallocateNode(SDNode *N);
2011
2012 void allnodes_clear();
2013
2014 /// Look up the node specified by ID in CSEMap. If it exists, return it. If
2015 /// not, return the insertion token that will make insertion faster. This
2016 /// overload is for nodes other than Constant or ConstantFP, use the other one
2017 /// for those.
2018 SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
2019
2020 /// Look up the node specified by ID in CSEMap. If it exists, return it. If
2021 /// not, return the insertion token that will make insertion faster. Performs
2022 /// additional processing for constant nodes.
2023 SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, const SDLoc &DL,
2024 void *&InsertPos);
2025
2026 /// List of non-single value types.
2027 FoldingSet<SDVTListNode> VTListMap;
2028
2029 /// Maps to auto-CSE operations.
2030 std::vector<CondCodeSDNode*> CondCodeNodes;
2031
2032 std::vector<SDNode*> ValueTypeNodes;
2033 std::map<EVT, SDNode*, EVT::compareRawBits> ExtendedValueTypeNodes;
2034 StringMap<SDNode*> ExternalSymbols;
2035
2036 std::map<std::pair<std::string, unsigned>, SDNode *> TargetExternalSymbols;
2037 DenseMap<MCSymbol *, SDNode *> MCSymbols;
2038
2039 FlagInserter *Inserter = nullptr;
2040};
2041
2042template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
2043 using nodes_iterator = pointer_iterator<SelectionDAG::allnodes_iterator>;
2044
2045 static nodes_iterator nodes_begin(SelectionDAG *G) {
2046 return nodes_iterator(G->allnodes_begin());
2047 }
2048
2049 static nodes_iterator nodes_end(SelectionDAG *G) {
2050 return nodes_iterator(G->allnodes_end());
2051 }
2052};
2053
2054} // end namespace llvm
2055
2056#endif // LLVM_CODEGEN_SELECTIONDAG_H

/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/Register.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Metadata.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/Support/AlignOf.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/TypeSize.h"
47#include <algorithm>
48#include <cassert>
49#include <climits>
50#include <cstddef>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <string>
55#include <tuple>
56
57namespace llvm {
58
59class APInt;
60class Constant;
61template <typename T> struct DenseMapInfo;
62class GlobalValue;
63class MachineBasicBlock;
64class MachineConstantPoolValue;
65class MCSymbol;
66class raw_ostream;
67class SDNode;
68class SelectionDAG;
69class Type;
70class Value;
71
72void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
73 bool force = false);
74
75/// This represents a list of ValueType's that has been intern'd by
76/// a SelectionDAG. Instances of this simple value class are returned by
77/// SelectionDAG::getVTList(...).
78///
79struct SDVTList {
80 const EVT *VTs;
81 unsigned int NumVTs;
82};
83
84namespace ISD {
85
86 /// Node predicates
87
88 /// If N is a BUILD_VECTOR node whose elements are all the same constant or
89 /// undefined, return true and return the constant value in \p SplatValue.
90 bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
91
92 /// Return true if the specified node is a BUILD_VECTOR where all of the
93 /// elements are ~0 or undef.
94 bool isBuildVectorAllOnes(const SDNode *N);
95
96 /// Return true if the specified node is a BUILD_VECTOR where all of the
97 /// elements are 0 or undef.
98 bool isBuildVectorAllZeros(const SDNode *N);
99
100 /// Return true if the specified node is a BUILD_VECTOR node of all
101 /// ConstantSDNode or undef.
102 bool isBuildVectorOfConstantSDNodes(const SDNode *N);
103
104 /// Return true if the specified node is a BUILD_VECTOR node of all
105 /// ConstantFPSDNode or undef.
106 bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
107
108 /// Return true if the node has at least one operand and all operands of the
109 /// specified node are ISD::UNDEF.
110 bool allOperandsUndef(const SDNode *N);
111
112} // end namespace ISD
113
114//===----------------------------------------------------------------------===//
115/// Unlike LLVM values, Selection DAG nodes may return multiple
116/// values as the result of a computation. Many nodes return multiple values,
117/// from loads (which define a token and a return value) to ADDC (which returns
118/// a result and a carry value), to calls (which may return an arbitrary number
119/// of values).
120///
121/// As such, each use of a SelectionDAG computation must indicate the node that
122/// computes it as well as which return value to use from that node. This pair
123/// of information is represented with the SDValue value type.
124///
125class SDValue {
126 friend struct DenseMapInfo<SDValue>;
127
128 SDNode *Node = nullptr; // The node defining the value we are using.
129 unsigned ResNo = 0; // Which return value of the node we are using.
130
131public:
132 SDValue() = default;
133 SDValue(SDNode *node, unsigned resno);
134
135 /// get the index which selects a specific result in the SDNode
136 unsigned getResNo() const { return ResNo; }
137
138 /// get the SDNode which holds the desired result
139 SDNode *getNode() const { return Node; }
140
141 /// set the SDNode
142 void setNode(SDNode *N) { Node = N; }
143
144 inline SDNode *operator->() const { return Node; }
145
146 bool operator==(const SDValue &O) const {
147 return Node == O.Node && ResNo == O.ResNo;
148 }
149 bool operator!=(const SDValue &O) const {
150 return !operator==(O);
151 }
152 bool operator<(const SDValue &O) const {
153 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
154 }
155 explicit operator bool() const {
156 return Node != nullptr;
157 }
158
159 SDValue getValue(unsigned R) const {
160 return SDValue(Node, R);
161 }
162
163 /// Return true if this node is an operand of N.
164 bool isOperandOf(const SDNode *N) const;
165
166 /// Return the ValueType of the referenced return value.
167 inline EVT getValueType() const;
168
169 /// Return the simple ValueType of the referenced return value.
170 MVT getSimpleValueType() const {
171 return getValueType().getSimpleVT();
172 }
173
174 /// Returns the size of the value in bits.
175 ///
176 /// If the value type is a scalable vector type, the scalable property will
177 /// be set and the runtime size will be a positive integer multiple of the
178 /// base size.
179 TypeSize getValueSizeInBits() const {
180 return getValueType().getSizeInBits();
181 }
182
183 uint64_t getScalarValueSizeInBits() const {
184 return getValueType().getScalarType().getSizeInBits().getFixedSize();
185 }
186
187 // Forwarding methods - These forward to the corresponding methods in SDNode.
188 inline unsigned getOpcode() const;
189 inline unsigned getNumOperands() const;
190 inline const SDValue &getOperand(unsigned i) const;
191 inline uint64_t getConstantOperandVal(unsigned i) const;
192 inline const APInt &getConstantOperandAPInt(unsigned i) const;
193 inline bool isTargetMemoryOpcode() const;
194 inline bool isTargetOpcode() const;
195 inline bool isMachineOpcode() const;
196 inline bool isUndef() const;
197 inline unsigned getMachineOpcode() const;
198 inline const DebugLoc &getDebugLoc() const;
199 inline void dump() const;
200 inline void dump(const SelectionDAG *G) const;
201 inline void dumpr() const;
202 inline void dumpr(const SelectionDAG *G) const;
203
204 /// Return true if this operand (which must be a chain) reaches the
205 /// specified operand without crossing any side-effecting instructions.
206 /// In practice, this looks through token factors and non-volatile loads.
207 /// In order to remain efficient, this only
208 /// looks a couple of nodes in, it does not do an exhaustive search.
209 bool reachesChainWithoutSideEffects(SDValue Dest,
210 unsigned Depth = 2) const;
211
212 /// Return true if there are no nodes using value ResNo of Node.
213 inline bool use_empty() const;
214
215 /// Return true if there is exactly one node using value ResNo of Node.
216 inline bool hasOneUse() const;
217};
218
219template<> struct DenseMapInfo<SDValue> {
220 static inline SDValue getEmptyKey() {
221 SDValue V;
222 V.ResNo = -1U;
223 return V;
224 }
225
226 static inline SDValue getTombstoneKey() {
227 SDValue V;
228 V.ResNo = -2U;
229 return V;
230 }
231
232 static unsigned getHashValue(const SDValue &Val) {
233 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
234 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
235 }
236
237 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
238 return LHS == RHS;
239 }
240};
241
242/// Allow casting operators to work directly on
243/// SDValues as if they were SDNode*'s.
244template<> struct simplify_type<SDValue> {
245 using SimpleType = SDNode *;
246
247 static SimpleType getSimplifiedValue(SDValue &Val) {
248 return Val.getNode();
249 }
250};
251template<> struct simplify_type<const SDValue> {
252 using SimpleType = /*const*/ SDNode *;
253
254 static SimpleType getSimplifiedValue(const SDValue &Val) {
255 return Val.getNode();
256 }
257};
258
259/// Represents a use of a SDNode. This class holds an SDValue,
260/// which records the SDNode being used and the result number, a
261/// pointer to the SDNode using the value, and Next and Prev pointers,
262/// which link together all the uses of an SDNode.
263///
264class SDUse {
265 /// Val - The value being used.
266 SDValue Val;
267 /// User - The user of this value.
268 SDNode *User = nullptr;
269 /// Prev, Next - Pointers to the uses list of the SDNode referred by
270 /// this operand.
271 SDUse **Prev = nullptr;
272 SDUse *Next = nullptr;
273
274public:
275 SDUse() = default;
276 SDUse(const SDUse &U) = delete;
277 SDUse &operator=(const SDUse &) = delete;
278
279 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
280 operator const SDValue&() const { return Val; }
281
282 /// If implicit conversion to SDValue doesn't work, the get() method returns
283 /// the SDValue.
284 const SDValue &get() const { return Val; }
285
286 /// This returns the SDNode that contains this Use.
287 SDNode *getUser() { return User; }
288
289 /// Get the next SDUse in the use list.
290 SDUse *getNext() const { return Next; }
291
292 /// Convenience function for get().getNode().
293 SDNode *getNode() const { return Val.getNode(); }
294 /// Convenience function for get().getResNo().
295 unsigned getResNo() const { return Val.getResNo(); }
296 /// Convenience function for get().getValueType().
297 EVT getValueType() const { return Val.getValueType(); }
298
299 /// Convenience function for get().operator==
300 bool operator==(const SDValue &V) const {
301 return Val == V;
302 }
303
304 /// Convenience function for get().operator!=
305 bool operator!=(const SDValue &V) const {
306 return Val != V;
307 }
308
309 /// Convenience function for get().operator<
310 bool operator<(const SDValue &V) const {
311 return Val < V;
312 }
313
314private:
315 friend class SelectionDAG;
316 friend class SDNode;
317 // TODO: unfriend HandleSDNode once we fix its operand handling.
318 friend class HandleSDNode;
319
320 void setUser(SDNode *p) { User = p; }
321
322 /// Remove this use from its existing use list, assign it the
323 /// given value, and add it to the new value's node's use list.
324 inline void set(const SDValue &V);
325 /// Like set, but only supports initializing a newly-allocated
326 /// SDUse with a non-null value.
327 inline void setInitial(const SDValue &V);
328 /// Like set, but only sets the Node portion of the value,
329 /// leaving the ResNo portion unmodified.
330 inline void setNode(SDNode *N);
331
332 void addToList(SDUse **List) {
333 Next = *List;
334 if (Next) Next->Prev = &Next;
335 Prev = List;
336 *List = this;
337 }
338
339 void removeFromList() {
340 *Prev = Next;
341 if (Next) Next->Prev = Prev;
342 }
343};
344
345/// simplify_type specializations - Allow casting operators to work directly on
346/// SDValues as if they were SDNode*'s.
347template<> struct simplify_type<SDUse> {
348 using SimpleType = SDNode *;
349
350 static SimpleType getSimplifiedValue(SDUse &Val) {
351 return Val.getNode();
352 }
353};
354
355/// These are IR-level optimization flags that may be propagated to SDNodes.
356/// TODO: This data structure should be shared by the IR optimizer and the
357/// the backend.
358struct SDNodeFlags {
359private:
360 bool NoUnsignedWrap : 1;
361 bool NoSignedWrap : 1;
362 bool Exact : 1;
363 bool NoNaNs : 1;
364 bool NoInfs : 1;
365 bool NoSignedZeros : 1;
366 bool AllowReciprocal : 1;
367 bool AllowContract : 1;
368 bool ApproximateFuncs : 1;
369 bool AllowReassociation : 1;
370
371 // We assume instructions do not raise floating-point exceptions by default,
372 // and only those marked explicitly may do so. We could choose to represent
373 // this via a positive "FPExcept" flags like on the MI level, but having a
374 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
375 // intersection logic more straightforward.
376 bool NoFPExcept : 1;
377
378public:
379 /// Default constructor turns off all optimization flags.
380 SDNodeFlags()
381 : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
382 NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
383 AllowContract(false), ApproximateFuncs(false),
384 AllowReassociation(false), NoFPExcept(false) {}
385
386 /// Propagate the fast-math-flags from an IR FPMathOperator.
387 void copyFMF(const FPMathOperator &FPMO) {
388 setNoNaNs(FPMO.hasNoNaNs());
389 setNoInfs(FPMO.hasNoInfs());
390 setNoSignedZeros(FPMO.hasNoSignedZeros());
391 setAllowReciprocal(FPMO.hasAllowReciprocal());
392 setAllowContract(FPMO.hasAllowContract());
393 setApproximateFuncs(FPMO.hasApproxFunc());
394 setAllowReassociation(FPMO.hasAllowReassoc());
395 }
396
397 // These are mutators for each flag.
398 void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
399 void setNoSignedWrap(bool b) { NoSignedWrap = b; }
400 void setExact(bool b) { Exact = b; }
401 void setNoNaNs(bool b) { NoNaNs = b; }
402 void setNoInfs(bool b) { NoInfs = b; }
403 void setNoSignedZeros(bool b) { NoSignedZeros = b; }
404 void setAllowReciprocal(bool b) { AllowReciprocal = b; }
405 void setAllowContract(bool b) { AllowContract = b; }
406 void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
407 void setAllowReassociation(bool b) { AllowReassociation = b; }
408 void setNoFPExcept(bool b) { NoFPExcept = b; }
409
410 // These are accessors for each flag.
411 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
412 bool hasNoSignedWrap() const { return NoSignedWrap; }
413 bool hasExact() const { return Exact; }
414 bool hasNoNaNs() const { return NoNaNs; }
415 bool hasNoInfs() const { return NoInfs; }
416 bool hasNoSignedZeros() const { return NoSignedZeros; }
417 bool hasAllowReciprocal() const { return AllowReciprocal; }
418 bool hasAllowContract() const { return AllowContract; }
419 bool hasApproximateFuncs() const { return ApproximateFuncs; }
420 bool hasAllowReassociation() const { return AllowReassociation; }
421 bool hasNoFPExcept() const { return NoFPExcept; }
422
423 /// Clear any flags in this flag set that aren't also set in Flags. All
424 /// flags will be cleared if Flags are undefined.
425 void intersectWith(const SDNodeFlags Flags) {
426 NoUnsignedWrap &= Flags.NoUnsignedWrap;
427 NoSignedWrap &= Flags.NoSignedWrap;
428 Exact &= Flags.Exact;
429 NoNaNs &= Flags.NoNaNs;
430 NoInfs &= Flags.NoInfs;
431 NoSignedZeros &= Flags.NoSignedZeros;
432 AllowReciprocal &= Flags.AllowReciprocal;
433 AllowContract &= Flags.AllowContract;
434 ApproximateFuncs &= Flags.ApproximateFuncs;
435 AllowReassociation &= Flags.AllowReassociation;
436 NoFPExcept &= Flags.NoFPExcept;
437 }
438};
439
440/// Represents one node in the SelectionDAG.
441///
442class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
443private:
444 /// The operation that this node performs.
445 int16_t NodeType;
446
447protected:
448 // We define a set of mini-helper classes to help us interpret the bits in our
449 // SubclassData. These are designed to fit within a uint16_t so they pack
450 // with NodeType.
451
452#if defined(_AIX) && (!defined(__GNUC__4) || defined(__ibmxl__))
453// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
454// and give the `pack` pragma push semantics.
455#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
456#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
457#else
458#define BEGIN_TWO_BYTE_PACK()
459#define END_TWO_BYTE_PACK()
460#endif
461
462BEGIN_TWO_BYTE_PACK()
463 class SDNodeBitfields {
464 friend class SDNode;
465 friend class MemIntrinsicSDNode;
466 friend class MemSDNode;
467 friend class SelectionDAG;
468
469 uint16_t HasDebugValue : 1;
470 uint16_t IsMemIntrinsic : 1;
471 uint16_t IsDivergent : 1;
472 };
473 enum { NumSDNodeBits = 3 };
474
475 class ConstantSDNodeBitfields {
476 friend class ConstantSDNode;
477
478 uint16_t : NumSDNodeBits;
479
480 uint16_t IsOpaque : 1;
481 };
482
483 class MemSDNodeBitfields {
484 friend class MemSDNode;
485 friend class MemIntrinsicSDNode;
486 friend class AtomicSDNode;
487
488 uint16_t : NumSDNodeBits;
489
490 uint16_t IsVolatile : 1;
491 uint16_t IsNonTemporal : 1;
492 uint16_t IsDereferenceable : 1;
493 uint16_t IsInvariant : 1;
494 };
495 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
496
497 class LSBaseSDNodeBitfields {
498 friend class LSBaseSDNode;
499 friend class MaskedLoadStoreSDNode;
500 friend class MaskedGatherScatterSDNode;
501
502 uint16_t : NumMemSDNodeBits;
503
504 // This storage is shared between disparate class hierarchies to hold an
505 // enumeration specific to the class hierarchy in use.
506 // LSBaseSDNode => enum ISD::MemIndexedMode
507 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
508 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
509 uint16_t AddressingMode : 3;
510 };
511 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
512
513 class LoadSDNodeBitfields {
514 friend class LoadSDNode;
515 friend class MaskedLoadSDNode;
516
517 uint16_t : NumLSBaseSDNodeBits;
518
519 uint16_t ExtTy : 2; // enum ISD::LoadExtType
520 uint16_t IsExpanding : 1;
521 };
522
523 class StoreSDNodeBitfields {
524 friend class StoreSDNode;
525 friend class MaskedStoreSDNode;
526
527 uint16_t : NumLSBaseSDNodeBits;
528
529 uint16_t IsTruncating : 1;
530 uint16_t IsCompressing : 1;
531 };
532
533 union {
534 char RawSDNodeBits[sizeof(uint16_t)];
535 SDNodeBitfields SDNodeBits;
536 ConstantSDNodeBitfields ConstantSDNodeBits;
537 MemSDNodeBitfields MemSDNodeBits;
538 LSBaseSDNodeBitfields LSBaseSDNodeBits;
539 LoadSDNodeBitfields LoadSDNodeBits;
540 StoreSDNodeBitfields StoreSDNodeBits;
541 };
542END_TWO_BYTE_PACK()
543#undef BEGIN_TWO_BYTE_PACK
544#undef END_TWO_BYTE_PACK
545
546 // RawSDNodeBits must cover the entirety of the union. This means that all of
547 // the union's members must have size <= RawSDNodeBits. We write the RHS as
548 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
549 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
550 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
551 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
552 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
553 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
554 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
555
556private:
557 friend class SelectionDAG;
558 // TODO: unfriend HandleSDNode once we fix its operand handling.
559 friend class HandleSDNode;
560
561 /// Unique id per SDNode in the DAG.
562 int NodeId = -1;
563
564 /// The values that are used by this operation.
565 SDUse *OperandList = nullptr;
566
567 /// The types of the values this node defines. SDNode's may
568 /// define multiple values simultaneously.
569 const EVT *ValueList;
570
571 /// List of uses for this SDNode.
572 SDUse *UseList = nullptr;
573
574 /// The number of entries in the Operand/Value list.
575 unsigned short NumOperands = 0;
576 unsigned short NumValues;
577
578 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
579 // original LLVM instructions.
580 // This is used for turning off scheduling, because we'll forgo
581 // the normal scheduling algorithms and output the instructions according to
582 // this ordering.
583 unsigned IROrder;
584
585 /// Source line information.
586 DebugLoc debugLoc;
587
588 /// Return a pointer to the specified value type.
589 static const EVT *getValueTypeList(EVT VT);
590
591 SDNodeFlags Flags;
592
593public:
594 /// Unique and persistent id per SDNode in the DAG.
595 /// Used for debug printing.
596 uint16_t PersistentId;
597
598 //===--------------------------------------------------------------------===//
599 // Accessors
600 //
601
602 /// Return the SelectionDAG opcode value for this node. For
603 /// pre-isel nodes (those for which isMachineOpcode returns false), these
604 /// are the opcode values in the ISD and <target>ISD namespaces. For
605 /// post-isel opcodes, see getMachineOpcode.
606 unsigned getOpcode() const { return (unsigned short)NodeType; }
607
608 /// Test if this node has a target-specific opcode (in the
609 /// \<target\>ISD namespace).
610 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
611
612 /// Test if this node has a target-specific opcode that may raise
613 /// FP exceptions (in the \<target\>ISD namespace and greater than
614 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
615 /// opcode are currently automatically considered to possibly raise
616 /// FP exceptions as well.
617 bool isTargetStrictFPOpcode() const {
618 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
619 }
620
621 /// Test if this node has a target-specific
622 /// memory-referencing opcode (in the \<target\>ISD namespace and
623 /// greater than FIRST_TARGET_MEMORY_OPCODE).
624 bool isTargetMemoryOpcode() const {
625 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
626 }
627
628 /// Return true if the type of the node type undefined.
629 bool isUndef() const { return NodeType == ISD::UNDEF; }
630
631 /// Test if this node is a memory intrinsic (with valid pointer information).
632 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
633 /// non-memory intrinsics (with chains) that are not really instances of
634 /// MemSDNode. For such nodes, we need some extra state to determine the
635 /// proper classof relationship.
636 bool isMemIntrinsic() const {
637 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
638 NodeType == ISD::INTRINSIC_VOID) &&
639 SDNodeBits.IsMemIntrinsic;
640 }
641
642 /// Test if this node is a strict floating point pseudo-op.
643 bool isStrictFPOpcode() {
644 switch (NodeType) {
645 default:
646 return false;
647 case ISD::STRICT_FP16_TO_FP:
648 case ISD::STRICT_FP_TO_FP16:
649#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
650 case ISD::STRICT_##DAGN:
651#include "llvm/IR/ConstrainedOps.def"
652 return true;
653 }
654 }
655
656 /// Test if this node has a post-isel opcode, directly
657 /// corresponding to a MachineInstr opcode.
658 bool isMachineOpcode() const { return NodeType < 0; }
659
660 /// This may only be called if isMachineOpcode returns
661 /// true. It returns the MachineInstr opcode value that the node's opcode
662 /// corresponds to.
663 unsigned getMachineOpcode() const {
664 assert(isMachineOpcode() && "Not a MachineInstr opcode!")((isMachineOpcode() && "Not a MachineInstr opcode!") ?
static_cast<void> (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 664, __PRETTY_FUNCTION__))
;
665 return ~NodeType;
666 }
667
668 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
669 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
670
671 bool isDivergent() const { return SDNodeBits.IsDivergent; }
672
673 /// Return true if there are no uses of this node.
674 bool use_empty() const { return UseList == nullptr; }
675
676 /// Return true if there is exactly one use of this node.
677 bool hasOneUse() const {
678 return !use_empty() && std::next(use_begin()) == use_end();
679 }
680
681 /// Return the number of uses of this node. This method takes
682 /// time proportional to the number of uses.
683 size_t use_size() const { return std::distance(use_begin(), use_end()); }
684
685 /// Return the unique node id.
686 int getNodeId() const { return NodeId; }
687
688 /// Set unique node id.
689 void setNodeId(int Id) { NodeId = Id; }
690
691 /// Return the node ordering.
692 unsigned getIROrder() const { return IROrder; }
693
694 /// Set the node ordering.
695 void setIROrder(unsigned Order) { IROrder = Order; }
696
697 /// Return the source location info.
698 const DebugLoc &getDebugLoc() const { return debugLoc; }
699
700 /// Set source location info. Try to avoid this, putting
701 /// it in the constructor is preferable.
702 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
703
704 /// This class provides iterator support for SDUse
705 /// operands that use a specific SDNode.
706 class use_iterator
707 : public std::iterator<std::forward_iterator_tag, SDUse, ptrdiff_t> {
708 friend class SDNode;
709
710 SDUse *Op = nullptr;
711
712 explicit use_iterator(SDUse *op) : Op(op) {}
713
714 public:
715 using reference = std::iterator<std::forward_iterator_tag,
716 SDUse, ptrdiff_t>::reference;
717 using pointer = std::iterator<std::forward_iterator_tag,
718 SDUse, ptrdiff_t>::pointer;
719
720 use_iterator() = default;
721 use_iterator(const use_iterator &I) : Op(I.Op) {}
722
723 bool operator==(const use_iterator &x) const {
724 return Op == x.Op;
725 }
726 bool operator!=(const use_iterator &x) const {
727 return !operator==(x);
728 }
729
730 /// Return true if this iterator is at the end of uses list.
731 bool atEnd() const { return Op == nullptr; }
732
733 // Iterator traversal: forward iteration only.
734 use_iterator &operator++() { // Preincrement
735 assert(Op && "Cannot increment end iterator!")((Op && "Cannot increment end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 735, __PRETTY_FUNCTION__))
;
736 Op = Op->getNext();
737 return *this;
738 }
739
740 use_iterator operator++(int) { // Postincrement
741 use_iterator tmp = *this; ++*this; return tmp;
742 }
743
744 /// Retrieve a pointer to the current user node.
745 SDNode *operator*() const {
746 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 746, __PRETTY_FUNCTION__))
;
747 return Op->getUser();
748 }
749
750 SDNode *operator->() const { return operator*(); }
751
752 SDUse &getUse() const { return *Op; }
753
754 /// Retrieve the operand # of this use in its user.
755 unsigned getOperandNo() const {
756 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 756, __PRETTY_FUNCTION__))
;
757 return (unsigned)(Op - Op->getUser()->OperandList);
758 }
759 };
760
761 /// Provide iteration support to walk over all uses of an SDNode.
762 use_iterator use_begin() const {
763 return use_iterator(UseList);
764 }
765
766 static use_iterator use_end() { return use_iterator(nullptr); }
767
768 inline iterator_range<use_iterator> uses() {
769 return make_range(use_begin(), use_end());
770 }
771 inline iterator_range<use_iterator> uses() const {
772 return make_range(use_begin(), use_end());
773 }
774
775 /// Return true if there are exactly NUSES uses of the indicated value.
776 /// This method ignores uses of other values defined by this operation.
777 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
778
779 /// Return true if there are any use of the indicated value.
780 /// This method ignores uses of other values defined by this operation.
781 bool hasAnyUseOfValue(unsigned Value) const;
782
783 /// Return true if this node is the only use of N.
784 bool isOnlyUserOf(const SDNode *N) const;
785
786 /// Return true if this node is an operand of N.
787 bool isOperandOf(const SDNode *N) const;
788
789 /// Return true if this node is a predecessor of N.
790 /// NOTE: Implemented on top of hasPredecessor and every bit as
791 /// expensive. Use carefully.
792 bool isPredecessorOf(const SDNode *N) const {
793 return N->hasPredecessor(this);
794 }
795
796 /// Return true if N is a predecessor of this node.
797 /// N is either an operand of this node, or can be reached by recursively
798 /// traversing up the operands.
799 /// NOTE: This is an expensive method. Use it carefully.
800 bool hasPredecessor(const SDNode *N) const;
801
802 /// Returns true if N is a predecessor of any node in Worklist. This
803 /// helper keeps Visited and Worklist sets externally to allow unions
804 /// searches to be performed in parallel, caching of results across
805 /// queries and incremental addition to Worklist. Stops early if N is
806 /// found but will resume. Remember to clear Visited and Worklists
807 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
808 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
809 /// topologically ordered (Operands have strictly smaller node id) and search
810 /// can be pruned leveraging this.
811 static bool hasPredecessorHelper(const SDNode *N,
812 SmallPtrSetImpl<const SDNode *> &Visited,
813 SmallVectorImpl<const SDNode *> &Worklist,
814 unsigned int MaxSteps = 0,
815 bool TopologicalPrune = false) {
816 SmallVector<const SDNode *, 8> DeferredNodes;
817 if (Visited.count(N))
818 return true;
819
820 // Node Id's are assigned in three places: As a topological
821 // ordering (> 0), during legalization (results in values set to
822 // 0), new nodes (set to -1). If N has a topolgical id then we
823 // know that all nodes with ids smaller than it cannot be
824 // successors and we need not check them. Filter out all node
825 // that can't be matches. We add them to the worklist before exit
826 // in case of multiple calls. Note that during selection the topological id
827 // may be violated if a node's predecessor is selected before it. We mark
828 // this at selection negating the id of unselected successors and
829 // restricting topological pruning to positive ids.
830
831 int NId = N->getNodeId();
832 // If we Invalidated the Id, reconstruct original NId.
833 if (NId < -1)
834 NId = -(NId + 1);
835
836 bool Found = false;
837 while (!Worklist.empty()) {
838 const SDNode *M = Worklist.pop_back_val();
839 int MId = M->getNodeId();
840 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
841 (MId > 0) && (MId < NId)) {
842 DeferredNodes.push_back(M);
843 continue;
844 }
845 for (const SDValue &OpV : M->op_values()) {
846 SDNode *Op = OpV.getNode();
847 if (Visited.insert(Op).second)
848 Worklist.push_back(Op);
849 if (Op == N)
850 Found = true;
851 }
852 if (Found)
853 break;
854 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
855 break;
856 }
857 // Push deferred nodes back on worklist.
858 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
859 // If we bailed early, conservatively return found.
860 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
861 return true;
862 return Found;
863 }
864
865 /// Return true if all the users of N are contained in Nodes.
866 /// NOTE: Requires at least one match, but doesn't require them all.
867 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
868
869 /// Return the number of values used by this operation.
870 unsigned getNumOperands() const { return NumOperands; }
871
872 /// Return the maximum number of operands that a SDNode can hold.
873 static constexpr size_t getMaxNumOperands() {
874 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
875 }
876
877 /// Helper method returns the integer value of a ConstantSDNode operand.
878 inline uint64_t getConstantOperandVal(unsigned Num) const;
879
880 /// Helper method returns the APInt of a ConstantSDNode operand.
881 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
882
883 const SDValue &getOperand(unsigned Num) const {
884 assert(Num < NumOperands && "Invalid child # of SDNode!")((Num < NumOperands && "Invalid child # of SDNode!"
) ? static_cast<void> (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 884, __PRETTY_FUNCTION__))
;
885 return OperandList[Num];
886 }
887
888 using op_iterator = SDUse *;
889
890 op_iterator op_begin() const { return OperandList; }
891 op_iterator op_end() const { return OperandList+NumOperands; }
892 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
893
894 /// Iterator for directly iterating over the operand SDValue's.
895 struct value_op_iterator
896 : iterator_adaptor_base<value_op_iterator, op_iterator,
897 std::random_access_iterator_tag, SDValue,
898 ptrdiff_t, value_op_iterator *,
899 value_op_iterator *> {
900 explicit value_op_iterator(SDUse *U = nullptr)
901 : iterator_adaptor_base(U) {}
902
903 const SDValue &operator*() const { return I->get(); }
904 };
905
906 iterator_range<value_op_iterator> op_values() const {
907 return make_range(value_op_iterator(op_begin()),
908 value_op_iterator(op_end()));
909 }
910
911 SDVTList getVTList() const {
912 SDVTList X = { ValueList, NumValues };
913 return X;
914 }
915
916 /// If this node has a glue operand, return the node
917 /// to which the glue operand points. Otherwise return NULL.
918 SDNode *getGluedNode() const {
919 if (getNumOperands() != 0 &&
920 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
921 return getOperand(getNumOperands()-1).getNode();
922 return nullptr;
923 }
924
925 /// If this node has a glue value with a user, return
926 /// the user (there is at most one). Otherwise return NULL.
927 SDNode *getGluedUser() const {
928 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
929 if (UI.getUse().get().getValueType() == MVT::Glue)
930 return *UI;
931 return nullptr;
932 }
933
934 const SDNodeFlags getFlags() const { return Flags; }
935 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
936
937 /// Clear any flags in this node that aren't also set in Flags.
938 /// If Flags is not in a defined state then this has no effect.
939 void intersectFlagsWith(const SDNodeFlags Flags);
940
941 /// Return the number of values defined/returned by this operator.
942 unsigned getNumValues() const { return NumValues; }
943
944 /// Return the type of a specified result.
945 EVT getValueType(unsigned ResNo) const {
946 assert(ResNo < NumValues && "Illegal result number!")((ResNo < NumValues && "Illegal result number!") ?
static_cast<void> (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 946, __PRETTY_FUNCTION__))
;
947 return ValueList[ResNo];
948 }
949
950 /// Return the type of a specified result as a simple type.
951 MVT getSimpleValueType(unsigned ResNo) const {
952 return getValueType(ResNo).getSimpleVT();
953 }
954
955 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
956 ///
957 /// If the value type is a scalable vector type, the scalable property will
958 /// be set and the runtime size will be a positive integer multiple of the
959 /// base size.
960 TypeSize getValueSizeInBits(unsigned ResNo) const {
961 return getValueType(ResNo).getSizeInBits();
962 }
963
964 using value_iterator = const EVT *;
965
966 value_iterator value_begin() const { return ValueList; }
967 value_iterator value_end() const { return ValueList+NumValues; }
968 iterator_range<value_iterator> values() const {
969 return llvm::make_range(value_begin(), value_end());
970 }
971
972 /// Return the opcode of this operation for printing.
973 std::string getOperationName(const SelectionDAG *G = nullptr) const;
974 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
975 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
976 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
977 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
978 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
979
980 /// Print a SelectionDAG node and all children down to
981 /// the leaves. The given SelectionDAG allows target-specific nodes
982 /// to be printed in human-readable form. Unlike printr, this will
983 /// print the whole DAG, including children that appear multiple
984 /// times.
985 ///
986 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
987
988 /// Print a SelectionDAG node and children up to
989 /// depth "depth." The given SelectionDAG allows target-specific
990 /// nodes to be printed in human-readable form. Unlike printr, this
991 /// will print children that appear multiple times wherever they are
992 /// used.
993 ///
994 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
995 unsigned depth = 100) const;
996
997 /// Dump this node, for debugging.
998 void dump() const;
999
1000 /// Dump (recursively) this node and its use-def subgraph.
1001 void dumpr() const;
1002
1003 /// Dump this node, for debugging.
1004 /// The given SelectionDAG allows target-specific nodes to be printed
1005 /// in human-readable form.
1006 void dump(const SelectionDAG *G) const;
1007
1008 /// Dump (recursively) this node and its use-def subgraph.
1009 /// The given SelectionDAG allows target-specific nodes to be printed
1010 /// in human-readable form.
1011 void dumpr(const SelectionDAG *G) const;
1012
1013 /// printrFull to dbgs(). The given SelectionDAG allows
1014 /// target-specific nodes to be printed in human-readable form.
1015 /// Unlike dumpr, this will print the whole DAG, including children
1016 /// that appear multiple times.
1017 void dumprFull(const SelectionDAG *G = nullptr) const;
1018
1019 /// printrWithDepth to dbgs(). The given
1020 /// SelectionDAG allows target-specific nodes to be printed in
1021 /// human-readable form. Unlike dumpr, this will print children
1022 /// that appear multiple times wherever they are used.
1023 ///
1024 void dumprWithDepth(const SelectionDAG *G = nullptr,
1025 unsigned depth = 100) const;
1026
1027 /// Gather unique data for the node.
1028 void Profile(FoldingSetNodeID &ID) const;
1029
1030 /// This method should only be used by the SDUse class.
1031 void addUse(SDUse &U) { U.addToList(&UseList); }
1032
1033protected:
1034 static SDVTList getSDVTList(EVT VT) {
1035 SDVTList Ret = { getValueTypeList(VT), 1 };
1036 return Ret;
1037 }
1038
1039 /// Create an SDNode.
1040 ///
1041 /// SDNodes are created without any operands, and never own the operand
1042 /// storage. To add operands, see SelectionDAG::createOperands.
1043 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1044 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1045 IROrder(Order), debugLoc(std::move(dl)) {
1046 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1047 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor"
) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1047, __PRETTY_FUNCTION__))
;
1048 assert(NumValues == VTs.NumVTs &&((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1049, __PRETTY_FUNCTION__))
1049 "NumValues wasn't wide enough for its operands!")((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1049, __PRETTY_FUNCTION__))
;
1050 }
1051
1052 /// Release the operands and set this node to have zero operands.
1053 void DropOperands();
1054};
1055
1056/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1057/// into SDNode creation functions.
1058/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1059/// from the original Instruction, and IROrder is the ordinal position of
1060/// the instruction.
1061/// When an SDNode is created after the DAG is being built, both DebugLoc and
1062/// the IROrder are propagated from the original SDNode.
1063/// So SDLoc class provides two constructors besides the default one, one to
1064/// be used by the DAGBuilder, the other to be used by others.
1065class SDLoc {
1066private:
1067 DebugLoc DL;
1068 int IROrder = 0;
1069
1070public:
1071 SDLoc() = default;
1072 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1073 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1074 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1075 assert(Order >= 0 && "bad IROrder")((Order >= 0 && "bad IROrder") ? static_cast<void
> (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1075, __PRETTY_FUNCTION__))
;
1076 if (I)
1077 DL = I->getDebugLoc();
1078 }
1079
1080 unsigned getIROrder() const { return IROrder; }
1081 const DebugLoc &getDebugLoc() const { return DL; }
1082};
1083
1084// Define inline functions from the SDValue class.
1085
1086inline SDValue::SDValue(SDNode *node, unsigned resno)
1087 : Node(node), ResNo(resno) {
1088 // Explicitly check for !ResNo to avoid use-after-free, because there are
1089 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1090 // combines.
1091 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1092, __PRETTY_FUNCTION__))
1092 "Invalid result number for the given node!")(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1092, __PRETTY_FUNCTION__))
;
1093 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")((ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? static_cast<void> (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1093, __PRETTY_FUNCTION__))
;
1094}
1095
1096inline unsigned SDValue::getOpcode() const {
1097 return Node->getOpcode();
20
Called C++ object pointer is null
1098}
1099
1100inline EVT SDValue::getValueType() const {
1101 return Node->getValueType(ResNo);
1102}
1103
1104inline unsigned SDValue::getNumOperands() const {
1105 return Node->getNumOperands();
1106}
1107
1108inline const SDValue &SDValue::getOperand(unsigned i) const {
1109 return Node->getOperand(i);
1110}
1111
1112inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1113 return Node->getConstantOperandVal(i);
1114}
1115
1116inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1117 return Node->getConstantOperandAPInt(i);
1118}
1119
1120inline bool SDValue::isTargetOpcode() const {
1121 return Node->isTargetOpcode();
1122}
1123
1124inline bool SDValue::isTargetMemoryOpcode() const {
1125 return Node->isTargetMemoryOpcode();
1126}
1127
1128inline bool SDValue::isMachineOpcode() const {
1129 return Node->isMachineOpcode();
1130}
1131
1132inline unsigned SDValue::getMachineOpcode() const {
1133 return Node->getMachineOpcode();
1134}
1135
1136inline bool SDValue::isUndef() const {
1137 return Node->isUndef();
1138}
1139
1140inline bool SDValue::use_empty() const {
1141 return !Node->hasAnyUseOfValue(ResNo);
1142}
1143
1144inline bool SDValue::hasOneUse() const {
1145 return Node->hasNUsesOfValue(1, ResNo);
1146}
1147
1148inline const DebugLoc &SDValue::getDebugLoc() const {
1149 return Node->getDebugLoc();
1150}
1151
1152inline void SDValue::dump() const {
1153 return Node->dump();
1154}
1155
1156inline void SDValue::dump(const SelectionDAG *G) const {
1157 return Node->dump(G);
1158}
1159
1160inline void SDValue::dumpr() const {
1161 return Node->dumpr();
1162}
1163
1164inline void SDValue::dumpr(const SelectionDAG *G) const {
1165 return Node->dumpr(G);
1166}
1167
1168// Define inline functions from the SDUse class.
1169
1170inline void SDUse::set(const SDValue &V) {
1171 if (Val.getNode()) removeFromList();
1172 Val = V;
1173 if (V.getNode()) V.getNode()->addUse(*this);
1174}
1175
1176inline void SDUse::setInitial(const SDValue &V) {
1177 Val = V;
1178 V.getNode()->addUse(*this);
1179}
1180
1181inline void SDUse::setNode(SDNode *N) {
1182 if (Val.getNode()) removeFromList();
1183 Val.setNode(N);
1184 if (N) N->addUse(*this);
1185}
1186
1187/// This class is used to form a handle around another node that
1188/// is persistent and is updated across invocations of replaceAllUsesWith on its
1189/// operand. This node should be directly created by end-users and not added to
1190/// the AllNodes list.
1191class HandleSDNode : public SDNode {
1192 SDUse Op;
1193
1194public:
1195 explicit HandleSDNode(SDValue X)
1196 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1197 // HandleSDNodes are never inserted into the DAG, so they won't be
1198 // auto-numbered. Use ID 65535 as a sentinel.
1199 PersistentId = 0xffff;
1200
1201 // Manually set up the operand list. This node type is special in that it's
1202 // always stack allocated and SelectionDAG does not manage its operands.
1203 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1204 // be so special.
1205 Op.setUser(this);
1206 Op.setInitial(X);
1207 NumOperands = 1;
1208 OperandList = &Op;
1209 }
1210 ~HandleSDNode();
1211
1212 const SDValue &getValue() const { return Op; }
1213};
1214
1215class AddrSpaceCastSDNode : public SDNode {
1216private:
1217 unsigned SrcAddrSpace;
1218 unsigned DestAddrSpace;
1219
1220public:
1221 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1222 unsigned SrcAS, unsigned DestAS);
1223
1224 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1225 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1226
1227 static bool classof(const SDNode *N) {
1228 return N->getOpcode() == ISD::ADDRSPACECAST;
1229 }
1230};
1231
1232/// This is an abstract virtual class for memory operations.
1233class MemSDNode : public SDNode {
1234private:
1235 // VT of in-memory value.
1236 EVT MemoryVT;
1237
1238protected:
1239 /// Memory reference information.
1240 MachineMemOperand *MMO;
1241
1242public:
1243 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1244 EVT memvt, MachineMemOperand *MMO);
1245
1246 bool readMem() const { return MMO->isLoad(); }
1247 bool writeMem() const { return MMO->isStore(); }
1248
1249 /// Returns alignment and volatility of the memory access
1250 Align getOriginalAlign() const { return MMO->getBaseAlign(); }
1251 Align getAlign() const { return MMO->getAlign(); }
1252 LLVM_ATTRIBUTE_DEPRECATED(unsigned getOriginalAlignment() const,unsigned getOriginalAlignment() const __attribute__((deprecated
("Use getOriginalAlign() instead")))
1253 "Use getOriginalAlign() instead")unsigned getOriginalAlignment() const __attribute__((deprecated
("Use getOriginalAlign() instead")))
{
1254 return MMO->getBaseAlign().value();
1255 }
1256 // FIXME: Remove once transition to getAlign is over.
1257 unsigned getAlignment() const { return MMO->getAlign().value(); }
1258
1259 /// Return the SubclassData value, without HasDebugValue. This contains an
1260 /// encoding of the volatile flag, as well as bits used by subclasses. This
1261 /// function should only be used to compute a FoldingSetNodeID value.
1262 /// The HasDebugValue bit is masked out because CSE map needs to match
1263 /// nodes with debug info with nodes without debug info. Same is about
1264 /// isDivergent bit.
1265 unsigned getRawSubclassData() const {
1266 uint16_t Data;
1267 union {
1268 char RawSDNodeBits[sizeof(uint16_t)];
1269 SDNodeBitfields SDNodeBits;
1270 };
1271 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1272 SDNodeBits.HasDebugValue = 0;
1273 SDNodeBits.IsDivergent = false;
1274 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1275 return Data;
1276 }
1277
1278 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1279 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1280 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1281 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1282
1283 // Returns the offset from the location of the access.
1284 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1285
1286 /// Returns the AA info that describes the dereference.
1287 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1288
1289 /// Returns the Ranges that describes the dereference.
1290 const MDNode *getRanges() const { return MMO->getRanges(); }
1291
1292 /// Returns the synchronization scope ID for this memory operation.
1293 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1294
1295 /// Return the atomic ordering requirements for this memory operation. For
1296 /// cmpxchg atomic operations, return the atomic ordering requirements when
1297 /// store occurs.
1298 AtomicOrdering getOrdering() const { return MMO->getOrdering(); }
1299
1300 /// Return true if the memory operation ordering is Unordered or higher.
1301 bool isAtomic() const { return MMO->isAtomic(); }
1302
1303 /// Returns true if the memory operation doesn't imply any ordering
1304 /// constraints on surrounding memory operations beyond the normal memory
1305 /// aliasing rules.
1306 bool isUnordered() const { return MMO->isUnordered(); }
1307
1308 /// Returns true if the memory operation is neither atomic or volatile.
1309 bool isSimple() const { return !isAtomic() && !isVolatile(); }
1310
1311 /// Return the type of the in-memory value.
1312 EVT getMemoryVT() const { return MemoryVT; }
1313
1314 /// Return a MachineMemOperand object describing the memory
1315 /// reference performed by operation.
1316 MachineMemOperand *getMemOperand() const { return MMO; }
1317
1318 const MachinePointerInfo &getPointerInfo() const {
1319 return MMO->getPointerInfo();
1320 }
1321
1322 /// Return the address space for the associated pointer
1323 unsigned getAddressSpace() const {
1324 return getPointerInfo().getAddrSpace();
1325 }
1326
1327 /// Update this MemSDNode's MachineMemOperand information
1328 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1329 /// This must only be used when the new alignment applies to all users of
1330 /// this MachineMemOperand.
1331 void refineAlignment(const MachineMemOperand *NewMMO) {
1332 MMO->refineAlignment(NewMMO);
1333 }
1334
1335 const SDValue &getChain() const { return getOperand(0); }
1336
1337 const SDValue &getBasePtr() const {
1338 switch (getOpcode()) {
1339 case ISD::STORE:
1340 case ISD::MSTORE:
1341 return getOperand(2);
1342 case ISD::MGATHER:
1343 case ISD::MSCATTER:
1344 return getOperand(3);
1345 default:
1346 return getOperand(1);
1347 }
1348 }
1349
1350 // Methods to support isa and dyn_cast
1351 static bool classof(const SDNode *N) {
1352 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1353 // with either an intrinsic or a target opcode.
1354 return N->getOpcode() == ISD::LOAD ||
1355 N->getOpcode() == ISD::STORE ||
1356 N->getOpcode() == ISD::PREFETCH ||
1357 N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1358 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1359 N->getOpcode() == ISD::ATOMIC_SWAP ||
1360 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1361 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1362 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1363 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1364 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1365 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1366 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1367 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1368 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1369 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1370 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1371 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1372 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1373 N->getOpcode() == ISD::ATOMIC_LOAD ||
1374 N->getOpcode() == ISD::ATOMIC_STORE ||
1375 N->getOpcode() == ISD::MLOAD ||
1376 N->getOpcode() == ISD::MSTORE ||
1377 N->getOpcode() == ISD::MGATHER ||
1378 N->getOpcode() == ISD::MSCATTER ||
1379 N->isMemIntrinsic() ||
1380 N->isTargetMemoryOpcode();
1381 }
1382};
1383
1384/// This is an SDNode representing atomic operations.
1385class AtomicSDNode : public MemSDNode {
1386public:
1387 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1388 EVT MemVT, MachineMemOperand *MMO)
1389 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1390 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1391, __PRETTY_FUNCTION__))
1391 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1391, __PRETTY_FUNCTION__))
;
1392 }
1393
1394 const SDValue &getBasePtr() const { return getOperand(1); }
1395 const SDValue &getVal() const { return getOperand(2); }
1396
1397 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1398 /// otherwise.
1399 bool isCompareAndSwap() const {
1400 unsigned Op = getOpcode();
1401 return Op == ISD::ATOMIC_CMP_SWAP ||
1402 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1403 }
1404
1405 /// For cmpxchg atomic operations, return the atomic ordering requirements
1406 /// when store does not occur.
1407 AtomicOrdering getFailureOrdering() const {
1408 assert(isCompareAndSwap() && "Must be cmpxchg operation")((isCompareAndSwap() && "Must be cmpxchg operation") ?
static_cast<void> (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1408, __PRETTY_FUNCTION__))
;
1409 return MMO->getFailureOrdering();
1410 }
1411
1412 // Methods to support isa and dyn_cast
1413 static bool classof(const SDNode *N) {
1414 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1415 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1416 N->getOpcode() == ISD::ATOMIC_SWAP ||
1417 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1418 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1419 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1420 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1421 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1422 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1423 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1424 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1425 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1426 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1427 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1428 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1429 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1430 N->getOpcode() == ISD::ATOMIC_LOAD ||
1431 N->getOpcode() == ISD::ATOMIC_STORE;
1432 }
1433};
1434
1435/// This SDNode is used for target intrinsics that touch
1436/// memory and need an associated MachineMemOperand. Its opcode may be
1437/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1438/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1439class MemIntrinsicSDNode : public MemSDNode {
1440public:
1441 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1442 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1443 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1444 SDNodeBits.IsMemIntrinsic = true;
1445 }
1446
1447 // Methods to support isa and dyn_cast
1448 static bool classof(const SDNode *N) {
1449 // We lower some target intrinsics to their target opcode
1450 // early a node with a target opcode can be of this class
1451 return N->isMemIntrinsic() ||
1452 N->getOpcode() == ISD::PREFETCH ||
1453 N->isTargetMemoryOpcode();
1454 }
1455};
1456
1457/// This SDNode is used to implement the code generator
1458/// support for the llvm IR shufflevector instruction. It combines elements
1459/// from two input vectors into a new input vector, with the selection and
1460/// ordering of elements determined by an array of integers, referred to as
1461/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1462/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1463/// An index of -1 is treated as undef, such that the code generator may put
1464/// any value in the corresponding element of the result.
1465class ShuffleVectorSDNode : public SDNode {
1466 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1467 // is freed when the SelectionDAG object is destroyed.
1468 const int *Mask;
1469
1470protected:
1471 friend class SelectionDAG;
1472
1473 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1474 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1475
1476public:
1477 ArrayRef<int> getMask() const {
1478 EVT VT = getValueType(0);
1479 return makeArrayRef(Mask, VT.getVectorNumElements());
1480 }
1481
1482 int getMaskElt(unsigned Idx) const {
1483 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")((Idx < getValueType(0).getVectorNumElements() && "Idx out of range!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1483, __PRETTY_FUNCTION__))
;
1484 return Mask[Idx];
1485 }
1486
1487 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1488
1489 int getSplatIndex() const {
1490 assert(isSplat() && "Cannot get splat index for non-splat!")((isSplat() && "Cannot get splat index for non-splat!"
) ? static_cast<void> (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1490, __PRETTY_FUNCTION__))
;
1491 EVT VT = getValueType(0);
1492 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1493 if (Mask[i] >= 0)
1494 return Mask[i];
1495
1496 // We can choose any index value here and be correct because all elements
1497 // are undefined. Return 0 for better potential for callers to simplify.
1498 return 0;
1499 }
1500
1501 static bool isSplatMask(const int *Mask, EVT VT);
1502
1503 /// Change values in a shuffle permute mask assuming
1504 /// the two vector operands have swapped position.
1505 static void commuteMask(MutableArrayRef<int> Mask) {
1506 unsigned NumElems = Mask.size();
1507 for (unsigned i = 0; i != NumElems; ++i) {
1508 int idx = Mask[i];
1509 if (idx < 0)
1510 continue;
1511 else if (idx < (int)NumElems)
1512 Mask[i] = idx + NumElems;
1513 else
1514 Mask[i] = idx - NumElems;
1515 }
1516 }
1517
1518 static bool classof(const SDNode *N) {
1519 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1520 }
1521};
1522
1523class ConstantSDNode : public SDNode {
1524 friend class SelectionDAG;
1525
1526 const ConstantInt *Value;
1527
1528 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1529 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1530 getSDVTList(VT)),
1531 Value(val) {
1532 ConstantSDNodeBits.IsOpaque = isOpaque;
1533 }
1534
1535public:
1536 const ConstantInt *getConstantIntValue() const { return Value; }
1537 const APInt &getAPIntValue() const { return Value->getValue(); }
1538 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1539 int64_t getSExtValue() const { return Value->getSExtValue(); }
1540 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1541 return Value->getLimitedValue(Limit);
1542 }
1543 MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); }
1544 Align getAlignValue() const { return Value->getAlignValue(); }
1545
1546 bool isOne() const { return Value->isOne(); }
1547 bool isNullValue() const { return Value->isZero(); }
1548 bool isAllOnesValue() const { return Value->isMinusOne(); }
1549
1550 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1551
1552 static bool classof(const SDNode *N) {
1553 return N->getOpcode() == ISD::Constant ||
1554 N->getOpcode() == ISD::TargetConstant;
1555 }
1556};
1557
1558uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1559 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1560}
1561
1562const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1563 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1564}
1565
1566class ConstantFPSDNode : public SDNode {
1567 friend class SelectionDAG;
1568
1569 const ConstantFP *Value;
1570
1571 ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
1572 : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
1573 DebugLoc(), getSDVTList(VT)),
1574 Value(val) {}
1575
1576public:
1577 const APFloat& getValueAPF() const { return Value->getValueAPF(); }
1578 const ConstantFP *getConstantFPValue() const { return Value; }
1579
1580 /// Return true if the value is positive or negative zero.
1581 bool isZero() const { return Value->isZero(); }
1582
1583 /// Return true if the value is a NaN.
1584 bool isNaN() const { return Value->isNaN(); }
1585
1586 /// Return true if the value is an infinity
1587 bool isInfinity() const { return Value->isInfinity(); }
1588
1589 /// Return true if the value is negative.
1590 bool isNegative() const { return Value->isNegative(); }
1591
1592 /// We don't rely on operator== working on double values, as
1593 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
1594 /// As such, this method can be used to do an exact bit-for-bit comparison of
1595 /// two floating point values.
1596
1597 /// We leave the version with the double argument here because it's just so
1598 /// convenient to write "2.0" and the like. Without this function we'd
1599 /// have to duplicate its logic everywhere it's called.
1600 bool isExactlyValue(double V) const {
1601 return Value->getValueAPF().isExactlyValue(V);
1602 }
1603 bool isExactlyValue(const APFloat& V) const;
1604
1605 static bool isValueValidForType(EVT VT, const APFloat& Val);
1606
1607 static bool classof(const SDNode *N) {
1608 return N->getOpcode() == ISD::ConstantFP ||
1609 N->getOpcode() == ISD::TargetConstantFP;
1610 }
1611};
1612
1613/// Returns true if \p V is a constant integer zero.
1614bool isNullConstant(SDValue V);
1615
1616/// Returns true if \p V is an FP constant with a value of positive zero.
1617bool isNullFPConstant(SDValue V);
1618
1619/// Returns true if \p V is an integer constant with all bits set.
1620bool isAllOnesConstant(SDValue V);
1621
1622/// Returns true if \p V is a constant integer one.
1623bool isOneConstant(SDValue V);
1624
1625/// Return the non-bitcasted source operand of \p V if it exists.
1626/// If \p V is not a bitcasted value, it is returned as-is.
1627SDValue peekThroughBitcasts(SDValue V);
1628
1629/// Return the non-bitcasted and one-use source operand of \p V if it exists.
1630/// If \p V is not a bitcasted one-use value, it is returned as-is.
1631SDValue peekThroughOneUseBitcasts(SDValue V);
1632
1633/// Return the non-extracted vector source operand of \p V if it exists.
1634/// If \p V is not an extracted subvector, it is returned as-is.
1635SDValue peekThroughExtractSubvectors(SDValue V);
1636
1637/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
1638/// constant is canonicalized to be operand 1.
1639bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
1640
1641/// Returns the SDNode if it is a constant splat BuildVector or constant int.
1642ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
1643 bool AllowTruncation = false);
1644
1645/// Returns the SDNode if it is a demanded constant splat BuildVector or
1646/// constant int.
1647ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
1648 bool AllowUndefs = false,
1649 bool AllowTruncation = false);
1650
1651/// Returns the SDNode if it is a constant splat BuildVector or constant float.
1652ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
1653
1654/// Returns the SDNode if it is a demanded constant splat BuildVector or
1655/// constant float.
1656ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
1657 bool AllowUndefs = false);
1658
1659/// Return true if the value is a constant 0 integer or a splatted vector of
1660/// a constant 0 integer (with no undefs by default).
1661/// Build vector implicit truncation is not an issue for null values.
1662bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);
1663
1664/// Return true if the value is a constant 1 integer or a splatted vector of a
1665/// constant 1 integer (with no undefs).
1666/// Does not permit build vector implicit truncation.
1667bool isOneOrOneSplat(SDValue V);
1668
1669/// Return true if the value is a constant -1 integer or a splatted vector of a
1670/// constant -1 integer (with no undefs).
1671/// Does not permit build vector implicit truncation.
1672bool isAllOnesOrAllOnesSplat(SDValue V);
1673
1674class GlobalAddressSDNode : public SDNode {
1675 friend class SelectionDAG;
1676
1677 const GlobalValue *TheGlobal;
1678 int64_t Offset;
1679 unsigned TargetFlags;
1680
1681 GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
1682 const GlobalValue *GA, EVT VT, int64_t o,
1683 unsigned TF);
1684
1685public:
1686 const GlobalValue *getGlobal() const { return TheGlobal; }
1687 int64_t getOffset() const { return Offset; }
1688 unsigned getTargetFlags() const { return TargetFlags; }
1689 // Return the address space this GlobalAddress belongs to.
1690 unsigned getAddressSpace() const;
1691
1692 static bool classof(const SDNode *N) {
1693 return N->getOpcode() == ISD::GlobalAddress ||
1694 N->getOpcode() == ISD::TargetGlobalAddress ||
1695 N->getOpcode() == ISD::GlobalTLSAddress ||
1696 N->getOpcode() == ISD::TargetGlobalTLSAddress;
1697 }
1698};
1699
1700class FrameIndexSDNode : public SDNode {
1701 friend class SelectionDAG;
1702
1703 int FI;
1704
1705 FrameIndexSDNode(int fi, EVT VT, bool isTarg)
1706 : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
1707 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
1708 }
1709
1710public:
1711 int getIndex() const { return FI; }
1712
1713 static bool classof(const SDNode *N) {
1714 return N->getOpcode() == ISD::FrameIndex ||
1715 N->getOpcode() == ISD::TargetFrameIndex;
1716 }
1717};
1718
1719/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
1720/// the offet and size that are started/ended in the underlying FrameIndex.
1721class LifetimeSDNode : public SDNode {
1722 friend class SelectionDAG;
1723 int64_t Size;
1724 int64_t Offset; // -1 if offset is unknown.
1725
1726 LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1727 SDVTList VTs, int64_t Size, int64_t Offset)
1728 : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
1729public:
1730 int64_t getFrameIndex() const {
1731 return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
1732 }
1733
1734 bool hasOffset() const { return Offset >= 0; }
1735 int64_t getOffset() const {
1736 assert(hasOffset() && "offset is unknown")((hasOffset() && "offset is unknown") ? static_cast<
void> (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1736, __PRETTY_FUNCTION__))
;
1737 return Offset;
1738 }
1739 int64_t getSize() const {
1740 assert(hasOffset() && "offset is unknown")((hasOffset() && "offset is unknown") ? static_cast<
void> (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1740, __PRETTY_FUNCTION__))
;
1741 return Size;
1742 }
1743
1744 // Methods to support isa and dyn_cast
1745 static bool classof(const SDNode *N) {
1746 return N->getOpcode() == ISD::LIFETIME_START ||
1747 N->getOpcode() == ISD::LIFETIME_END;
1748 }
1749};
1750
1751class JumpTableSDNode : public SDNode {
1752 friend class SelectionDAG;
1753
1754 int JTI;
1755 unsigned TargetFlags;
1756
1757 JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
1758 : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
1759 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
1760 }
1761
1762public:
1763 int getIndex() const { return JTI; }
1764 unsigned getTargetFlags() const { return TargetFlags; }
1765
1766 static bool classof(const SDNode *N) {
1767 return N->getOpcode() == ISD::JumpTable ||
1768 N->getOpcode() == ISD::TargetJumpTable;
1769 }
1770};
1771
1772class ConstantPoolSDNode : public SDNode {
1773 friend class SelectionDAG;
1774
1775 union {
1776 const Constant *ConstVal;
1777 MachineConstantPoolValue *MachineCPVal;
1778 } Val;
1779 int Offset; // It's a MachineConstantPoolValue if top bit is set.
1780 Align Alignment; // Minimum alignment requirement of CP.
1781 unsigned TargetFlags;
1782
1783 ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
1784 Align Alignment, unsigned TF)
1785 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1786 DebugLoc(), getSDVTList(VT)),
1787 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1788 assert(Offset >= 0 && "Offset is too large")((Offset >= 0 && "Offset is too large") ? static_cast
<void> (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-12~++20200927111121+5811d723998/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1788, __PRETTY_FUNCTION__))
;
1789 Val.ConstVal = c;
1790 }
1791
1792 ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, EVT VT, int o,
1793 Align Alignment, unsigned TF)
1794 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1795