Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1160, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name SelectionDAG.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/CodeGen/SelectionDAG -I include -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-16-232930-107970-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp

/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp

1//===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the SelectionDAG class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/SelectionDAG.h"
14#include "SDNodeDbgValue.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/APSInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/FoldingSet.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallPtrSet.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/Triple.h"
26#include "llvm/ADT/Twine.h"
27#include "llvm/Analysis/BlockFrequencyInfo.h"
28#include "llvm/Analysis/MemoryLocation.h"
29#include "llvm/Analysis/ProfileSummaryInfo.h"
30#include "llvm/Analysis/ValueTracking.h"
31#include "llvm/CodeGen/Analysis.h"
32#include "llvm/CodeGen/FunctionLoweringInfo.h"
33#include "llvm/CodeGen/ISDOpcodes.h"
34#include "llvm/CodeGen/MachineBasicBlock.h"
35#include "llvm/CodeGen/MachineConstantPool.h"
36#include "llvm/CodeGen/MachineFrameInfo.h"
37#include "llvm/CodeGen/MachineFunction.h"
38#include "llvm/CodeGen/MachineMemOperand.h"
39#include "llvm/CodeGen/RuntimeLibcalls.h"
40#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
41#include "llvm/CodeGen/SelectionDAGNodes.h"
42#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
43#include "llvm/CodeGen/TargetFrameLowering.h"
44#include "llvm/CodeGen/TargetLowering.h"
45#include "llvm/CodeGen/TargetRegisterInfo.h"
46#include "llvm/CodeGen/TargetSubtargetInfo.h"
47#include "llvm/CodeGen/ValueTypes.h"
48#include "llvm/IR/Constant.h"
49#include "llvm/IR/Constants.h"
50#include "llvm/IR/DataLayout.h"
51#include "llvm/IR/DebugInfoMetadata.h"
52#include "llvm/IR/DebugLoc.h"
53#include "llvm/IR/DerivedTypes.h"
54#include "llvm/IR/Function.h"
55#include "llvm/IR/GlobalValue.h"
56#include "llvm/IR/Metadata.h"
57#include "llvm/IR/Type.h"
58#include "llvm/IR/Value.h"
59#include "llvm/Support/Casting.h"
60#include "llvm/Support/CodeGen.h"
61#include "llvm/Support/Compiler.h"
62#include "llvm/Support/Debug.h"
63#include "llvm/Support/ErrorHandling.h"
64#include "llvm/Support/KnownBits.h"
65#include "llvm/Support/MachineValueType.h"
66#include "llvm/Support/ManagedStatic.h"
67#include "llvm/Support/MathExtras.h"
68#include "llvm/Support/Mutex.h"
69#include "llvm/Support/raw_ostream.h"
70#include "llvm/Target/TargetMachine.h"
71#include "llvm/Target/TargetOptions.h"
72#include "llvm/Transforms/Utils/SizeOpts.h"
73#include <algorithm>
74#include <cassert>
75#include <cstdint>
76#include <cstdlib>
77#include <limits>
78#include <set>
79#include <string>
80#include <utility>
81#include <vector>
82
83using namespace llvm;
84
85/// makeVTList - Return an instance of the SDVTList struct initialized with the
86/// specified members.
87static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
88 SDVTList Res = {VTs, NumVTs};
89 return Res;
90}
91
92// Default null implementations of the callbacks.
93void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
94void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
95void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {}
96
97void SelectionDAG::DAGNodeDeletedListener::anchor() {}
98
99#define DEBUG_TYPE"selectiondag" "selectiondag"
100
101static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
102 cl::Hidden, cl::init(true),
103 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
104
105static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
106 cl::desc("Number limit for gluing ld/st of memcpy."),
107 cl::Hidden, cl::init(0));
108
109static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
110 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("selectiondag")) { dbgs() << Msg; V.getNode()->dump
(G);; } } while (false)
;
111}
112
113//===----------------------------------------------------------------------===//
114// ConstantFPSDNode Class
115//===----------------------------------------------------------------------===//
116
117/// isExactlyValue - We don't rely on operator== working on double values, as
118/// it returns true for things that are clearly not equal, like -0.0 and 0.0.
119/// As such, this method can be used to do an exact bit-for-bit comparison of
120/// two floating point values.
121bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
122 return getValueAPF().bitwiseIsEqual(V);
123}
124
125bool ConstantFPSDNode::isValueValidForType(EVT VT,
126 const APFloat& Val) {
127 assert(VT.isFloatingPoint() && "Can only convert between FP types")(static_cast <bool> (VT.isFloatingPoint() && "Can only convert between FP types"
) ? void (0) : __assert_fail ("VT.isFloatingPoint() && \"Can only convert between FP types\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 127, __extension__
__PRETTY_FUNCTION__))
;
128
129 // convert modifies in place, so make a copy.
130 APFloat Val2 = APFloat(Val);
131 bool losesInfo;
132 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
133 APFloat::rmNearestTiesToEven,
134 &losesInfo);
135 return !losesInfo;
136}
137
138//===----------------------------------------------------------------------===//
139// ISD Namespace
140//===----------------------------------------------------------------------===//
141
142bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
143 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
144 unsigned EltSize =
145 N->getValueType(0).getVectorElementType().getSizeInBits();
146 if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
147 SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize);
148 return true;
149 }
150 if (auto *Op0 = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) {
151 SplatVal = Op0->getValueAPF().bitcastToAPInt().truncOrSelf(EltSize);
152 return true;
153 }
154 }
155
156 auto *BV = dyn_cast<BuildVectorSDNode>(N);
157 if (!BV)
158 return false;
159
160 APInt SplatUndef;
161 unsigned SplatBitSize;
162 bool HasUndefs;
163 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
164 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
165 EltSize) &&
166 EltSize == SplatBitSize;
167}
168
169// FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
170// specializations of the more general isConstantSplatVector()?
171
172bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
173 // Look through a bit convert.
174 while (N->getOpcode() == ISD::BITCAST)
175 N = N->getOperand(0).getNode();
176
177 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
178 APInt SplatVal;
179 return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnes();
180 }
181
182 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
183
184 unsigned i = 0, e = N->getNumOperands();
185
186 // Skip over all of the undef values.
187 while (i != e && N->getOperand(i).isUndef())
188 ++i;
189
190 // Do not accept an all-undef vector.
191 if (i == e) return false;
192
193 // Do not accept build_vectors that aren't all constants or which have non-~0
194 // elements. We have to be a bit careful here, as the type of the constant
195 // may not be the same as the type of the vector elements due to type
196 // legalization (the elements are promoted to a legal type for the target and
197 // a vector of a type may be legal when the base element type is not).
198 // We only want to check enough bits to cover the vector elements, because
199 // we care if the resultant vector is all ones, not whether the individual
200 // constants are.
201 SDValue NotZero = N->getOperand(i);
202 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
203 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
204 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
205 return false;
206 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
207 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
208 return false;
209 } else
210 return false;
211
212 // Okay, we have at least one ~0 value, check to see if the rest match or are
213 // undefs. Even with the above element type twiddling, this should be OK, as
214 // the same type legalization should have applied to all the elements.
215 for (++i; i != e; ++i)
216 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
217 return false;
218 return true;
219}
220
221bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
222 // Look through a bit convert.
223 while (N->getOpcode() == ISD::BITCAST)
224 N = N->getOperand(0).getNode();
225
226 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
227 APInt SplatVal;
228 return isConstantSplatVector(N, SplatVal) && SplatVal.isZero();
229 }
230
231 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
232
233 bool IsAllUndef = true;
234 for (const SDValue &Op : N->op_values()) {
235 if (Op.isUndef())
236 continue;
237 IsAllUndef = false;
238 // Do not accept build_vectors that aren't all constants or which have non-0
239 // elements. We have to be a bit careful here, as the type of the constant
240 // may not be the same as the type of the vector elements due to type
241 // legalization (the elements are promoted to a legal type for the target
242 // and a vector of a type may be legal when the base element type is not).
243 // We only want to check enough bits to cover the vector elements, because
244 // we care if the resultant vector is all zeros, not whether the individual
245 // constants are.
246 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
247 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
248 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
249 return false;
250 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
251 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
252 return false;
253 } else
254 return false;
255 }
256
257 // Do not accept an all-undef vector.
258 if (IsAllUndef)
259 return false;
260 return true;
261}
262
263bool ISD::isBuildVectorAllOnes(const SDNode *N) {
264 return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true);
265}
266
267bool ISD::isBuildVectorAllZeros(const SDNode *N) {
268 return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true);
269}
270
271bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
272 if (N->getOpcode() != ISD::BUILD_VECTOR)
273 return false;
274
275 for (const SDValue &Op : N->op_values()) {
276 if (Op.isUndef())
277 continue;
278 if (!isa<ConstantSDNode>(Op))
279 return false;
280 }
281 return true;
282}
283
284bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
285 if (N->getOpcode() != ISD::BUILD_VECTOR)
286 return false;
287
288 for (const SDValue &Op : N->op_values()) {
289 if (Op.isUndef())
290 continue;
291 if (!isa<ConstantFPSDNode>(Op))
292 return false;
293 }
294 return true;
295}
296
297bool ISD::allOperandsUndef(const SDNode *N) {
298 // Return false if the node has no operands.
299 // This is "logically inconsistent" with the definition of "all" but
300 // is probably the desired behavior.
301 if (N->getNumOperands() == 0)
302 return false;
303 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
304}
305
306bool ISD::matchUnaryPredicate(SDValue Op,
307 std::function<bool(ConstantSDNode *)> Match,
308 bool AllowUndefs) {
309 // FIXME: Add support for scalar UNDEF cases?
310 if (auto *Cst = dyn_cast<ConstantSDNode>(Op))
311 return Match(Cst);
312
313 // FIXME: Add support for vector UNDEF cases?
314 if (ISD::BUILD_VECTOR != Op.getOpcode() &&
315 ISD::SPLAT_VECTOR != Op.getOpcode())
316 return false;
317
318 EVT SVT = Op.getValueType().getScalarType();
319 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
320 if (AllowUndefs && Op.getOperand(i).isUndef()) {
321 if (!Match(nullptr))
322 return false;
323 continue;
324 }
325
326 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i));
327 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
328 return false;
329 }
330 return true;
331}
332
333bool ISD::matchBinaryPredicate(
334 SDValue LHS, SDValue RHS,
335 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
336 bool AllowUndefs, bool AllowTypeMismatch) {
337 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
338 return false;
339
340 // TODO: Add support for scalar UNDEF cases?
341 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
342 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
343 return Match(LHSCst, RHSCst);
344
345 // TODO: Add support for vector UNDEF cases?
346 if (LHS.getOpcode() != RHS.getOpcode() ||
347 (LHS.getOpcode() != ISD::BUILD_VECTOR &&
348 LHS.getOpcode() != ISD::SPLAT_VECTOR))
349 return false;
350
351 EVT SVT = LHS.getValueType().getScalarType();
352 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
353 SDValue LHSOp = LHS.getOperand(i);
354 SDValue RHSOp = RHS.getOperand(i);
355 bool LHSUndef = AllowUndefs && LHSOp.isUndef();
356 bool RHSUndef = AllowUndefs && RHSOp.isUndef();
357 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
358 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
359 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
360 return false;
361 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT ||
362 LHSOp.getValueType() != RHSOp.getValueType()))
363 return false;
364 if (!Match(LHSCst, RHSCst))
365 return false;
366 }
367 return true;
368}
369
370ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) {
371 switch (VecReduceOpcode) {
372 default:
373 llvm_unreachable("Expected VECREDUCE opcode")::llvm::llvm_unreachable_internal("Expected VECREDUCE opcode"
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 373)
;
374 case ISD::VECREDUCE_FADD:
375 case ISD::VECREDUCE_SEQ_FADD:
376 return ISD::FADD;
377 case ISD::VECREDUCE_FMUL:
378 case ISD::VECREDUCE_SEQ_FMUL:
379 return ISD::FMUL;
380 case ISD::VECREDUCE_ADD:
381 return ISD::ADD;
382 case ISD::VECREDUCE_MUL:
383 return ISD::MUL;
384 case ISD::VECREDUCE_AND:
385 return ISD::AND;
386 case ISD::VECREDUCE_OR:
387 return ISD::OR;
388 case ISD::VECREDUCE_XOR:
389 return ISD::XOR;
390 case ISD::VECREDUCE_SMAX:
391 return ISD::SMAX;
392 case ISD::VECREDUCE_SMIN:
393 return ISD::SMIN;
394 case ISD::VECREDUCE_UMAX:
395 return ISD::UMAX;
396 case ISD::VECREDUCE_UMIN:
397 return ISD::UMIN;
398 case ISD::VECREDUCE_FMAX:
399 return ISD::FMAXNUM;
400 case ISD::VECREDUCE_FMIN:
401 return ISD::FMINNUM;
402 }
403}
404
405bool ISD::isVPOpcode(unsigned Opcode) {
406 switch (Opcode) {
407 default:
408 return false;
409#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) \
410 case ISD::VPSD: \
411 return true;
412#include "llvm/IR/VPIntrinsics.def"
413 }
414}
415
416bool ISD::isVPBinaryOp(unsigned Opcode) {
417 switch (Opcode) {
418 default:
419 break;
420#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
421#define VP_PROPERTY_BINARYOP return true;
422#define END_REGISTER_VP_SDNODE(VPSD) break;
423#include "llvm/IR/VPIntrinsics.def"
424 }
425 return false;
426}
427
428bool ISD::isVPReduction(unsigned Opcode) {
429 switch (Opcode) {
430 default:
431 break;
432#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
433#define VP_PROPERTY_REDUCTION(STARTPOS, ...) return true;
434#define END_REGISTER_VP_SDNODE(VPSD) break;
435#include "llvm/IR/VPIntrinsics.def"
436 }
437 return false;
438}
439
440/// The operand position of the vector mask.
441Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
442 switch (Opcode) {
443 default:
444 return None;
445#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...) \
446 case ISD::VPSD: \
447 return MASKPOS;
448#include "llvm/IR/VPIntrinsics.def"
449 }
450}
451
452/// The operand position of the explicit vector length parameter.
453Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
454 switch (Opcode) {
455 default:
456 return None;
457#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
458 case ISD::VPSD: \
459 return EVLPOS;
460#include "llvm/IR/VPIntrinsics.def"
461 }
462}
463
464ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
465 switch (ExtType) {
466 case ISD::EXTLOAD:
467 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
468 case ISD::SEXTLOAD:
469 return ISD::SIGN_EXTEND;
470 case ISD::ZEXTLOAD:
471 return ISD::ZERO_EXTEND;
472 default:
473 break;
474 }
475
476 llvm_unreachable("Invalid LoadExtType")::llvm::llvm_unreachable_internal("Invalid LoadExtType", "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp"
, 476)
;
477}
478
479ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
480 // To perform this operation, we just need to swap the L and G bits of the
481 // operation.
482 unsigned OldL = (Operation >> 2) & 1;
483 unsigned OldG = (Operation >> 1) & 1;
484 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
485 (OldL << 1) | // New G bit
486 (OldG << 2)); // New L bit.
487}
488
489static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) {
490 unsigned Operation = Op;
491 if (isIntegerLike)
492 Operation ^= 7; // Flip L, G, E bits, but not U.
493 else
494 Operation ^= 15; // Flip all of the condition bits.
495
496 if (Operation > ISD::SETTRUE2)
497 Operation &= ~8; // Don't let N and U bits get set.
498
499 return ISD::CondCode(Operation);
500}
501
502ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) {
503 return getSetCCInverseImpl(Op, Type.isInteger());
504}
505
506ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op,
507 bool isIntegerLike) {
508 return getSetCCInverseImpl(Op, isIntegerLike);
509}
510
511/// For an integer comparison, return 1 if the comparison is a signed operation
512/// and 2 if the result is an unsigned comparison. Return zero if the operation
513/// does not depend on the sign of the input (setne and seteq).
514static int isSignedOp(ISD::CondCode Opcode) {
515 switch (Opcode) {
516 default: llvm_unreachable("Illegal integer setcc operation!")::llvm::llvm_unreachable_internal("Illegal integer setcc operation!"
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 516)
;
517 case ISD::SETEQ:
518 case ISD::SETNE: return 0;
519 case ISD::SETLT:
520 case ISD::SETLE:
521 case ISD::SETGT:
522 case ISD::SETGE: return 1;
523 case ISD::SETULT:
524 case ISD::SETULE:
525 case ISD::SETUGT:
526 case ISD::SETUGE: return 2;
527 }
528}
529
530ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
531 EVT Type) {
532 bool IsInteger = Type.isInteger();
533 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
534 // Cannot fold a signed integer setcc with an unsigned integer setcc.
535 return ISD::SETCC_INVALID;
536
537 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
538
539 // If the N and U bits get set, then the resultant comparison DOES suddenly
540 // care about orderedness, and it is true when ordered.
541 if (Op > ISD::SETTRUE2)
542 Op &= ~16; // Clear the U bit if the N bit is set.
543
544 // Canonicalize illegal integer setcc's.
545 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
546 Op = ISD::SETNE;
547
548 return ISD::CondCode(Op);
549}
550
551ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
552 EVT Type) {
553 bool IsInteger = Type.isInteger();
554 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
555 // Cannot fold a signed setcc with an unsigned setcc.
556 return ISD::SETCC_INVALID;
557
558 // Combine all of the condition bits.
559 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
560
561 // Canonicalize illegal integer setcc's.
562 if (IsInteger) {
563 switch (Result) {
564 default: break;
565 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
566 case ISD::SETOEQ: // SETEQ & SETU[LG]E
567 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
568 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
569 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
570 }
571 }
572
573 return Result;
574}
575
576//===----------------------------------------------------------------------===//
577// SDNode Profile Support
578//===----------------------------------------------------------------------===//
579
580/// AddNodeIDOpcode - Add the node opcode to the NodeID data.
581static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
582 ID.AddInteger(OpC);
583}
584
585/// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
586/// solely with their pointer.
587static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
588 ID.AddPointer(VTList.VTs);
589}
590
591/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
592static void AddNodeIDOperands(FoldingSetNodeID &ID,
593 ArrayRef<SDValue> Ops) {
594 for (auto& Op : Ops) {
595 ID.AddPointer(Op.getNode());
596 ID.AddInteger(Op.getResNo());
597 }
598}
599
600/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
601static void AddNodeIDOperands(FoldingSetNodeID &ID,
602 ArrayRef<SDUse> Ops) {
603 for (auto& Op : Ops) {
604 ID.AddPointer(Op.getNode());
605 ID.AddInteger(Op.getResNo());
606 }
607}
608
609static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
610 SDVTList VTList, ArrayRef<SDValue> OpList) {
611 AddNodeIDOpcode(ID, OpC);
612 AddNodeIDValueTypes(ID, VTList);
613 AddNodeIDOperands(ID, OpList);
614}
615
616/// If this is an SDNode with special info, add this info to the NodeID data.
617static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
618 switch (N->getOpcode()) {
619 case ISD::TargetExternalSymbol:
620 case ISD::ExternalSymbol:
621 case ISD::MCSymbol:
622 llvm_unreachable("Should only be used on nodes with operands")::llvm::llvm_unreachable_internal("Should only be used on nodes with operands"
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 622)
;
623 default: break; // Normal nodes don't need extra info.
624 case ISD::TargetConstant:
625 case ISD::Constant: {
626 const ConstantSDNode *C = cast<ConstantSDNode>(N);
627 ID.AddPointer(C->getConstantIntValue());
628 ID.AddBoolean(C->isOpaque());
629 break;
630 }
631 case ISD::TargetConstantFP:
632 case ISD::ConstantFP:
633 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
634 break;
635 case ISD::TargetGlobalAddress:
636 case ISD::GlobalAddress:
637 case ISD::TargetGlobalTLSAddress:
638 case ISD::GlobalTLSAddress: {
639 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
640 ID.AddPointer(GA->getGlobal());
641 ID.AddInteger(GA->getOffset());
642 ID.AddInteger(GA->getTargetFlags());
643 break;
644 }
645 case ISD::BasicBlock:
646 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
647 break;
648 case ISD::Register:
649 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
650 break;
651 case ISD::RegisterMask:
652 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
653 break;
654 case ISD::SRCVALUE:
655 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
656 break;
657 case ISD::FrameIndex:
658 case ISD::TargetFrameIndex:
659 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
660 break;
661 case ISD::LIFETIME_START:
662 case ISD::LIFETIME_END:
663 if (cast<LifetimeSDNode>(N)->hasOffset()) {
664 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize());
665 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset());
666 }
667 break;
668 case ISD::PSEUDO_PROBE:
669 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid());
670 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex());
671 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes());
672 break;
673 case ISD::JumpTable:
674 case ISD::TargetJumpTable:
675 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
676 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
677 break;
678 case ISD::ConstantPool:
679 case ISD::TargetConstantPool: {
680 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
681 ID.AddInteger(CP->getAlign().value());
682 ID.AddInteger(CP->getOffset());
683 if (CP->isMachineConstantPoolEntry())
684 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
685 else
686 ID.AddPointer(CP->getConstVal());
687 ID.AddInteger(CP->getTargetFlags());
688 break;
689 }
690 case ISD::TargetIndex: {
691 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
692 ID.AddInteger(TI->getIndex());
693 ID.AddInteger(TI->getOffset());
694 ID.AddInteger(TI->getTargetFlags());
695 break;
696 }
697 case ISD::LOAD: {
698 const LoadSDNode *LD = cast<LoadSDNode>(N);
699 ID.AddInteger(LD->getMemoryVT().getRawBits());
700 ID.AddInteger(LD->getRawSubclassData());
701 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
702 break;
703 }
704 case ISD::STORE: {
705 const StoreSDNode *ST = cast<StoreSDNode>(N);
706 ID.AddInteger(ST->getMemoryVT().getRawBits());
707 ID.AddInteger(ST->getRawSubclassData());
708 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
709 break;
710 }
711 case ISD::VP_LOAD: {
712 const VPLoadSDNode *ELD = cast<VPLoadSDNode>(N);
713 ID.AddInteger(ELD->getMemoryVT().getRawBits());
714 ID.AddInteger(ELD->getRawSubclassData());
715 ID.AddInteger(ELD->getPointerInfo().getAddrSpace());
716 break;
717 }
718 case ISD::VP_STORE: {
719 const VPStoreSDNode *EST = cast<VPStoreSDNode>(N);
720 ID.AddInteger(EST->getMemoryVT().getRawBits());
721 ID.AddInteger(EST->getRawSubclassData());
722 ID.AddInteger(EST->getPointerInfo().getAddrSpace());
723 break;
724 }
725 case ISD::VP_GATHER: {
726 const VPGatherSDNode *EG = cast<VPGatherSDNode>(N);
727 ID.AddInteger(EG->getMemoryVT().getRawBits());
728 ID.AddInteger(EG->getRawSubclassData());
729 ID.AddInteger(EG->getPointerInfo().getAddrSpace());
730 break;
731 }
732 case ISD::VP_SCATTER: {
733 const VPScatterSDNode *ES = cast<VPScatterSDNode>(N);
734 ID.AddInteger(ES->getMemoryVT().getRawBits());
735 ID.AddInteger(ES->getRawSubclassData());
736 ID.AddInteger(ES->getPointerInfo().getAddrSpace());
737 break;
738 }
739 case ISD::MLOAD: {
740 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
741 ID.AddInteger(MLD->getMemoryVT().getRawBits());
742 ID.AddInteger(MLD->getRawSubclassData());
743 ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
744 break;
745 }
746 case ISD::MSTORE: {
747 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
748 ID.AddInteger(MST->getMemoryVT().getRawBits());
749 ID.AddInteger(MST->getRawSubclassData());
750 ID.AddInteger(MST->getPointerInfo().getAddrSpace());
751 break;
752 }
753 case ISD::MGATHER: {
754 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N);
755 ID.AddInteger(MG->getMemoryVT().getRawBits());
756 ID.AddInteger(MG->getRawSubclassData());
757 ID.AddInteger(MG->getPointerInfo().getAddrSpace());
758 break;
759 }
760 case ISD::MSCATTER: {
761 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N);
762 ID.AddInteger(MS->getMemoryVT().getRawBits());
763 ID.AddInteger(MS->getRawSubclassData());
764 ID.AddInteger(MS->getPointerInfo().getAddrSpace());
765 break;
766 }
767 case ISD::ATOMIC_CMP_SWAP:
768 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
769 case ISD::ATOMIC_SWAP:
770 case ISD::ATOMIC_LOAD_ADD:
771 case ISD::ATOMIC_LOAD_SUB:
772 case ISD::ATOMIC_LOAD_AND:
773 case ISD::ATOMIC_LOAD_CLR:
774 case ISD::ATOMIC_LOAD_OR:
775 case ISD::ATOMIC_LOAD_XOR:
776 case ISD::ATOMIC_LOAD_NAND:
777 case ISD::ATOMIC_LOAD_MIN:
778 case ISD::ATOMIC_LOAD_MAX:
779 case ISD::ATOMIC_LOAD_UMIN:
780 case ISD::ATOMIC_LOAD_UMAX:
781 case ISD::ATOMIC_LOAD:
782 case ISD::ATOMIC_STORE: {
783 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
784 ID.AddInteger(AT->getMemoryVT().getRawBits());
785 ID.AddInteger(AT->getRawSubclassData());
786 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
787 break;
788 }
789 case ISD::PREFETCH: {
790 const MemSDNode *PF = cast<MemSDNode>(N);
791 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
792 break;
793 }
794 case ISD::VECTOR_SHUFFLE: {
795 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
796 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
797 i != e; ++i)
798 ID.AddInteger(SVN->getMaskElt(i));
799 break;
800 }
801 case ISD::TargetBlockAddress:
802 case ISD::BlockAddress: {
803 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
804 ID.AddPointer(BA->getBlockAddress());
805 ID.AddInteger(BA->getOffset());
806 ID.AddInteger(BA->getTargetFlags());
807 break;
808 }
809 } // end switch (N->getOpcode())
810
811 // Target specific memory nodes could also have address spaces to check.
812 if (N->isTargetMemoryOpcode())
813 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
814}
815
816/// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
817/// data.
818static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
819 AddNodeIDOpcode(ID, N->getOpcode());
820 // Add the return value info.
821 AddNodeIDValueTypes(ID, N->getVTList());
822 // Add the operand info.
823 AddNodeIDOperands(ID, N->ops());
824
825 // Handle SDNode leafs with special info.
826 AddNodeIDCustom(ID, N);
827}
828
829//===----------------------------------------------------------------------===//
830// SelectionDAG Class
831//===----------------------------------------------------------------------===//
832
833/// doNotCSE - Return true if CSE should not be performed for this node.
834static bool doNotCSE(SDNode *N) {
835 if (N->getValueType(0) == MVT::Glue)
836 return true; // Never CSE anything that produces a flag.
837
838 switch (N->getOpcode()) {
839 default: break;
840 case ISD::HANDLENODE:
841 case ISD::EH_LABEL:
842 return true; // Never CSE these nodes.
843 }
844
845 // Check that remaining values produced are not flags.
846 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
847 if (N->getValueType(i) == MVT::Glue)
848 return true; // Never CSE anything that produces a flag.
849
850 return false;
851}
852
853/// RemoveDeadNodes - This method deletes all unreachable nodes in the
854/// SelectionDAG.
855void SelectionDAG::RemoveDeadNodes() {
856 // Create a dummy node (which is not added to allnodes), that adds a reference
857 // to the root node, preventing it from being deleted.
858 HandleSDNode Dummy(getRoot());
859
860 SmallVector<SDNode*, 128> DeadNodes;
861
862 // Add all obviously-dead nodes to the DeadNodes worklist.
863 for (SDNode &Node : allnodes())
864 if (Node.use_empty())
865 DeadNodes.push_back(&Node);
866
867 RemoveDeadNodes(DeadNodes);
868
869 // If the root changed (e.g. it was a dead load, update the root).
870 setRoot(Dummy.getValue());
871}
872
873/// RemoveDeadNodes - This method deletes the unreachable nodes in the
874/// given list, and any nodes that become unreachable as a result.
875void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
876
877 // Process the worklist, deleting the nodes and adding their uses to the
878 // worklist.
879 while (!DeadNodes.empty()) {
880 SDNode *N = DeadNodes.pop_back_val();
881 // Skip to next node if we've already managed to delete the node. This could
882 // happen if replacing a node causes a node previously added to the node to
883 // be deleted.
884 if (N->getOpcode() == ISD::DELETED_NODE)
885 continue;
886
887 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
888 DUL->NodeDeleted(N, nullptr);
889
890 // Take the node out of the appropriate CSE map.
891 RemoveNodeFromCSEMaps(N);
892
893 // Next, brutally remove the operand list. This is safe to do, as there are
894 // no cycles in the graph.
895 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
896 SDUse &Use = *I++;
897 SDNode *Operand = Use.getNode();
898 Use.set(SDValue());
899
900 // Now that we removed this operand, see if there are no uses of it left.
901 if (Operand->use_empty())
902 DeadNodes.push_back(Operand);
903 }
904
905 DeallocateNode(N);
906 }
907}
908
909void SelectionDAG::RemoveDeadNode(SDNode *N){
910 SmallVector<SDNode*, 16> DeadNodes(1, N);
911
912 // Create a dummy node that adds a reference to the root node, preventing
913 // it from being deleted. (This matters if the root is an operand of the
914 // dead node.)
915 HandleSDNode Dummy(getRoot());
916
917 RemoveDeadNodes(DeadNodes);
918}
919
920void SelectionDAG::DeleteNode(SDNode *N) {
921 // First take this out of the appropriate CSE map.
922 RemoveNodeFromCSEMaps(N);
923
924 // Finally, remove uses due to operands of this node, remove from the
925 // AllNodes list, and delete the node.
926 DeleteNodeNotInCSEMaps(N);
927}
928
929void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
930 assert(N->getIterator() != AllNodes.begin() &&(static_cast <bool> (N->getIterator() != AllNodes.begin
() && "Cannot delete the entry node!") ? void (0) : __assert_fail
("N->getIterator() != AllNodes.begin() && \"Cannot delete the entry node!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 931, __extension__
__PRETTY_FUNCTION__))
931 "Cannot delete the entry node!")(static_cast <bool> (N->getIterator() != AllNodes.begin
() && "Cannot delete the entry node!") ? void (0) : __assert_fail
("N->getIterator() != AllNodes.begin() && \"Cannot delete the entry node!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 931, __extension__
__PRETTY_FUNCTION__))
;
932 assert(N->use_empty() && "Cannot delete a node that is not dead!")(static_cast <bool> (N->use_empty() && "Cannot delete a node that is not dead!"
) ? void (0) : __assert_fail ("N->use_empty() && \"Cannot delete a node that is not dead!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 932, __extension__
__PRETTY_FUNCTION__))
;
933
934 // Drop all of the operands and decrement used node's use counts.
935 N->DropOperands();
936
937 DeallocateNode(N);
938}
939
940void SDDbgInfo::add(SDDbgValue *V, bool isParameter) {
941 assert(!(V->isVariadic() && isParameter))(static_cast <bool> (!(V->isVariadic() && isParameter
)) ? void (0) : __assert_fail ("!(V->isVariadic() && isParameter)"
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 941, __extension__
__PRETTY_FUNCTION__))
;
942 if (isParameter)
943 ByvalParmDbgValues.push_back(V);
944 else
945 DbgValues.push_back(V);
946 for (const SDNode *Node : V->getSDNodes())
947 if (Node)
948 DbgValMap[Node].push_back(V);
949}
950
951void SDDbgInfo::erase(const SDNode *Node) {
952 DbgValMapType::iterator I = DbgValMap.find(Node);
953 if (I == DbgValMap.end())
954 return;
955 for (auto &Val: I->second)
956 Val->setIsInvalidated();
957 DbgValMap.erase(I);
958}
959
960void SelectionDAG::DeallocateNode(SDNode *N) {
961 // If we have operands, deallocate them.
962 removeOperands(N);
963
964 NodeAllocator.Deallocate(AllNodes.remove(N));
965
966 // Set the opcode to DELETED_NODE to help catch bugs when node
967 // memory is reallocated.
968 // FIXME: There are places in SDag that have grown a dependency on the opcode
969 // value in the released node.
970 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
971 N->NodeType = ISD::DELETED_NODE;
972
973 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
974 // them and forget about that node.
975 DbgInfo->erase(N);
976}
977
978#ifndef NDEBUG
979/// VerifySDNode - Check the given SDNode. Aborts if it is invalid.
980static void VerifySDNode(SDNode *N) {
981 switch (N->getOpcode()) {
982 default:
983 break;
984 case ISD::BUILD_PAIR: {
985 EVT VT = N->getValueType(0);
986 assert(N->getNumValues() == 1 && "Too many results!")(static_cast <bool> (N->getNumValues() == 1 &&
"Too many results!") ? void (0) : __assert_fail ("N->getNumValues() == 1 && \"Too many results!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 986, __extension__
__PRETTY_FUNCTION__))
;
987 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&(static_cast <bool> (!VT.isVector() && (VT.isInteger
() || VT.isFloatingPoint()) && "Wrong return type!") ?
void (0) : __assert_fail ("!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && \"Wrong return type!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 988, __extension__
__PRETTY_FUNCTION__))
988 "Wrong return type!")(static_cast <bool> (!VT.isVector() && (VT.isInteger
() || VT.isFloatingPoint()) && "Wrong return type!") ?
void (0) : __assert_fail ("!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && \"Wrong return type!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 988, __extension__
__PRETTY_FUNCTION__))
;
989 assert(N->getNumOperands() == 2 && "Wrong number of operands!")(static_cast <bool> (N->getNumOperands() == 2 &&
"Wrong number of operands!") ? void (0) : __assert_fail ("N->getNumOperands() == 2 && \"Wrong number of operands!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 989, __extension__
__PRETTY_FUNCTION__))
;
990 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&(static_cast <bool> (N->getOperand(0).getValueType()
== N->getOperand(1).getValueType() && "Mismatched operand types!"
) ? void (0) : __assert_fail ("N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && \"Mismatched operand types!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 991, __extension__
__PRETTY_FUNCTION__))
991 "Mismatched operand types!")(static_cast <bool> (N->getOperand(0).getValueType()
== N->getOperand(1).getValueType() && "Mismatched operand types!"
) ? void (0) : __assert_fail ("N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && \"Mismatched operand types!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 991, __extension__
__PRETTY_FUNCTION__))
;
992 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&(static_cast <bool> (N->getOperand(0).getValueType()
.isInteger() == VT.isInteger() && "Wrong operand type!"
) ? void (0) : __assert_fail ("N->getOperand(0).getValueType().isInteger() == VT.isInteger() && \"Wrong operand type!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 993, __extension__
__PRETTY_FUNCTION__))
993 "Wrong operand type!")(static_cast <bool> (N->getOperand(0).getValueType()
.isInteger() == VT.isInteger() && "Wrong operand type!"
) ? void (0) : __assert_fail ("N->getOperand(0).getValueType().isInteger() == VT.isInteger() && \"Wrong operand type!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 993, __extension__
__PRETTY_FUNCTION__))
;
994 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&(static_cast <bool> (VT.getSizeInBits() == 2 * N->getOperand
(0).getValueSizeInBits() && "Wrong return type size")
? void (0) : __assert_fail ("VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && \"Wrong return type size\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 995, __extension__
__PRETTY_FUNCTION__))
995 "Wrong return type size")(static_cast <bool> (VT.getSizeInBits() == 2 * N->getOperand
(0).getValueSizeInBits() && "Wrong return type size")
? void (0) : __assert_fail ("VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && \"Wrong return type size\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 995, __extension__
__PRETTY_FUNCTION__))
;
996 break;
997 }
998 case ISD::BUILD_VECTOR: {
999 assert(N->getNumValues() == 1 && "Too many results!")(static_cast <bool> (N->getNumValues() == 1 &&
"Too many results!") ? void (0) : __assert_fail ("N->getNumValues() == 1 && \"Too many results!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 999, __extension__
__PRETTY_FUNCTION__))
;
1000 assert(N->getValueType(0).isVector() && "Wrong return type!")(static_cast <bool> (N->getValueType(0).isVector() &&
"Wrong return type!") ? void (0) : __assert_fail ("N->getValueType(0).isVector() && \"Wrong return type!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1000, __extension__
__PRETTY_FUNCTION__))
;
1001 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&(static_cast <bool> (N->getNumOperands() == N->getValueType
(0).getVectorNumElements() && "Wrong number of operands!"
) ? void (0) : __assert_fail ("N->getNumOperands() == N->getValueType(0).getVectorNumElements() && \"Wrong number of operands!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1002, __extension__
__PRETTY_FUNCTION__))
1002 "Wrong number of operands!")(static_cast <bool> (N->getNumOperands() == N->getValueType
(0).getVectorNumElements() && "Wrong number of operands!"
) ? void (0) : __assert_fail ("N->getNumOperands() == N->getValueType(0).getVectorNumElements() && \"Wrong number of operands!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1002, __extension__
__PRETTY_FUNCTION__))
;
1003 EVT EltVT = N->getValueType(0).getVectorElementType();
1004 for (const SDUse &Op : N->ops()) {
1005 assert((Op.getValueType() == EltVT ||(static_cast <bool> ((Op.getValueType() == EltVT || (EltVT
.isInteger() && Op.getValueType().isInteger() &&
EltVT.bitsLE(Op.getValueType()))) && "Wrong operand type!"
) ? void (0) : __assert_fail ("(Op.getValueType() == EltVT || (EltVT.isInteger() && Op.getValueType().isInteger() && EltVT.bitsLE(Op.getValueType()))) && \"Wrong operand type!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1008, __extension__
__PRETTY_FUNCTION__))
1006 (EltVT.isInteger() && Op.getValueType().isInteger() &&(static_cast <bool> ((Op.getValueType() == EltVT || (EltVT
.isInteger() && Op.getValueType().isInteger() &&
EltVT.bitsLE(Op.getValueType()))) && "Wrong operand type!"
) ? void (0) : __assert_fail ("(Op.getValueType() == EltVT || (EltVT.isInteger() && Op.getValueType().isInteger() && EltVT.bitsLE(Op.getValueType()))) && \"Wrong operand type!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1008, __extension__
__PRETTY_FUNCTION__))
1007 EltVT.bitsLE(Op.getValueType()))) &&(static_cast <bool> ((Op.getValueType() == EltVT || (EltVT
.isInteger() && Op.getValueType().isInteger() &&
EltVT.bitsLE(Op.getValueType()))) && "Wrong operand type!"
) ? void (0) : __assert_fail ("(Op.getValueType() == EltVT || (EltVT.isInteger() && Op.getValueType().isInteger() && EltVT.bitsLE(Op.getValueType()))) && \"Wrong operand type!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1008, __extension__
__PRETTY_FUNCTION__))
1008 "Wrong operand type!")(static_cast <bool> ((Op.getValueType() == EltVT || (EltVT
.isInteger() && Op.getValueType().isInteger() &&
EltVT.bitsLE(Op.getValueType()))) && "Wrong operand type!"
) ? void (0) : __assert_fail ("(Op.getValueType() == EltVT || (EltVT.isInteger() && Op.getValueType().isInteger() && EltVT.bitsLE(Op.getValueType()))) && \"Wrong operand type!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1008, __extension__
__PRETTY_FUNCTION__))
;
1009 assert(Op.getValueType() == N->getOperand(0).getValueType() &&(static_cast <bool> (Op.getValueType() == N->getOperand
(0).getValueType() && "Operands must all have the same type"
) ? void (0) : __assert_fail ("Op.getValueType() == N->getOperand(0).getValueType() && \"Operands must all have the same type\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1010, __extension__
__PRETTY_FUNCTION__))
1010 "Operands must all have the same type")(static_cast <bool> (Op.getValueType() == N->getOperand
(0).getValueType() && "Operands must all have the same type"
) ? void (0) : __assert_fail ("Op.getValueType() == N->getOperand(0).getValueType() && \"Operands must all have the same type\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1010, __extension__
__PRETTY_FUNCTION__))
;
1011 }
1012 break;
1013 }
1014 }
1015}
1016#endif // NDEBUG
1017
1018/// Insert a newly allocated node into the DAG.
1019///
1020/// Handles insertion into the all nodes list and CSE map, as well as
1021/// verification and other common operations when a new node is allocated.
1022void SelectionDAG::InsertNode(SDNode *N) {
1023 AllNodes.push_back(N);
1024#ifndef NDEBUG
1025 N->PersistentId = NextPersistentId++;
1026 VerifySDNode(N);
1027#endif
1028 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1029 DUL->NodeInserted(N);
1030}
1031
1032/// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
1033/// correspond to it. This is useful when we're about to delete or repurpose
1034/// the node. We don't want future request for structurally identical nodes
1035/// to return N anymore.
1036bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
1037 bool Erased = false;
1038 switch (N->getOpcode()) {
1039 case ISD::HANDLENODE: return false; // noop.
1040 case ISD::CONDCODE:
1041 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&(static_cast <bool> (CondCodeNodes[cast<CondCodeSDNode
>(N)->get()] && "Cond code doesn't exist!") ? void
(0) : __assert_fail ("CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && \"Cond code doesn't exist!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1042, __extension__
__PRETTY_FUNCTION__))
1042 "Cond code doesn't exist!")(static_cast <bool> (CondCodeNodes[cast<CondCodeSDNode
>(N)->get()] && "Cond code doesn't exist!") ? void
(0) : __assert_fail ("CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && \"Cond code doesn't exist!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1042, __extension__
__PRETTY_FUNCTION__))
;
1043 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
1044 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
1045 break;
1046 case ISD::ExternalSymbol:
1047 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
1048 break;
1049 case ISD::TargetExternalSymbol: {
1050 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
1051 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1052 ESN->getSymbol(), ESN->getTargetFlags()));
1053 break;
1054 }
1055 case ISD::MCSymbol: {
1056 auto *MCSN = cast<MCSymbolSDNode>(N);
1057 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1058 break;
1059 }
1060 case ISD::VALUETYPE: {
1061 EVT VT = cast<VTSDNode>(N)->getVT();
1062 if (VT.isExtended()) {
1063 Erased = ExtendedValueTypeNodes.erase(VT);
1064 } else {
1065 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
1066 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
1067 }
1068 break;
1069 }
1070 default:
1071 // Remove it from the CSE Map.
1072 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!")(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& "DELETED_NODE in CSEMap!") ? void (0) : __assert_fail
("N->getOpcode() != ISD::DELETED_NODE && \"DELETED_NODE in CSEMap!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1072, __extension__
__PRETTY_FUNCTION__))
;
1073 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!")(static_cast <bool> (N->getOpcode() != ISD::EntryToken
&& "EntryToken in CSEMap!") ? void (0) : __assert_fail
("N->getOpcode() != ISD::EntryToken && \"EntryToken in CSEMap!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1073, __extension__
__PRETTY_FUNCTION__))
;
1074 Erased = CSEMap.RemoveNode(N);
1075 break;
1076 }
1077#ifndef NDEBUG
1078 // Verify that the node was actually in one of the CSE maps, unless it has a
1079 // flag result (which cannot be CSE'd) or is one of the special cases that are
1080 // not subject to CSE.
1081 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
1082 !N->isMachineOpcode() && !doNotCSE(N)) {
1083 N->dump(this);
1084 dbgs() << "\n";
1085 llvm_unreachable("Node is not in map!")::llvm::llvm_unreachable_internal("Node is not in map!", "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp"
, 1085)
;
1086 }
1087#endif
1088 return Erased;
1089}
1090
1091/// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
1092/// maps and modified in place. Add it back to the CSE maps, unless an identical
1093/// node already exists, in which case transfer all its users to the existing
1094/// node. This transfer can potentially trigger recursive merging.
1095void
1096SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
1097 // For node types that aren't CSE'd, just act as if no identical node
1098 // already exists.
1099 if (!doNotCSE(N)) {
1100 SDNode *Existing = CSEMap.GetOrInsertNode(N);
1101 if (Existing != N) {
1102 // If there was already an existing matching node, use ReplaceAllUsesWith
1103 // to replace the dead one with the existing one. This can cause
1104 // recursive merging of other unrelated nodes down the line.
1105 ReplaceAllUsesWith(N, Existing);
1106
1107 // N is now dead. Inform the listeners and delete it.
1108 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1109 DUL->NodeDeleted(N, Existing);
1110 DeleteNodeNotInCSEMaps(N);
1111 return;
1112 }
1113 }
1114
1115 // If the node doesn't already exist, we updated it. Inform listeners.
1116 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1117 DUL->NodeUpdated(N);
1118}
1119
1120/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1121/// were replaced with those specified. If this node is never memoized,
1122/// return null, otherwise return a pointer to the slot it would take. If a
1123/// node already exists with these operands, the slot will be non-null.
1124SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
1125 void *&InsertPos) {
1126 if (doNotCSE(N))
1127 return nullptr;
1128
1129 SDValue Ops[] = { Op };
1130 FoldingSetNodeID ID;
1131 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1132 AddNodeIDCustom(ID, N);
1133 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1134 if (Node)
1135 Node->intersectFlagsWith(N->getFlags());
1136 return Node;
1137}
1138
1139/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1140/// were replaced with those specified. If this node is never memoized,
1141/// return null, otherwise return a pointer to the slot it would take. If a
1142/// node already exists with these operands, the slot will be non-null.
1143SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
1144 SDValue Op1, SDValue Op2,
1145 void *&InsertPos) {
1146 if (doNotCSE(N))
1147 return nullptr;
1148
1149 SDValue Ops[] = { Op1, Op2 };
1150 FoldingSetNodeID ID;
1151 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1152 AddNodeIDCustom(ID, N);
1153 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1154 if (Node)
1155 Node->intersectFlagsWith(N->getFlags());
1156 return Node;
1157}
1158
1159/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1160/// were replaced with those specified. If this node is never memoized,
1161/// return null, otherwise return a pointer to the slot it would take. If a
1162/// node already exists with these operands, the slot will be non-null.
1163SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
1164 void *&InsertPos) {
1165 if (doNotCSE(N))
1166 return nullptr;
1167
1168 FoldingSetNodeID ID;
1169 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1170 AddNodeIDCustom(ID, N);
1171 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1172 if (Node)
1173 Node->intersectFlagsWith(N->getFlags());
1174 return Node;
1175}
1176
1177Align SelectionDAG::getEVTAlign(EVT VT) const {
1178 Type *Ty = VT == MVT::iPTR ?
1179 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
1180 VT.getTypeForEVT(*getContext());
1181
1182 return getDataLayout().getABITypeAlign(Ty);
1183}
1184
1185// EntryNode could meaningfully have debug info if we can find it...
1186SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
1187 : TM(tm), OptLevel(OL),
1188 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
1189 Root(getEntryNode()) {
1190 InsertNode(&EntryNode);
1191 DbgInfo = new SDDbgInfo();
1192}
1193
1194void SelectionDAG::init(MachineFunction &NewMF,
1195 OptimizationRemarkEmitter &NewORE,
1196 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
1197 LegacyDivergenceAnalysis * Divergence,
1198 ProfileSummaryInfo *PSIin,
1199 BlockFrequencyInfo *BFIin) {
1200 MF = &NewMF;
1201 SDAGISelPass = PassPtr;
1202 ORE = &NewORE;
1203 TLI = getSubtarget().getTargetLowering();
1204 TSI = getSubtarget().getSelectionDAGInfo();
1205 LibInfo = LibraryInfo;
1206 Context = &MF->getFunction().getContext();
1207 DA = Divergence;
1208 PSI = PSIin;
1209 BFI = BFIin;
1210}
1211
1212SelectionDAG::~SelectionDAG() {
1213 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners")(static_cast <bool> (!UpdateListeners && "Dangling registered DAGUpdateListeners"
) ? void (0) : __assert_fail ("!UpdateListeners && \"Dangling registered DAGUpdateListeners\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1213, __extension__
__PRETTY_FUNCTION__))
;
1214 allnodes_clear();
1215 OperandRecycler.clear(OperandAllocator);
1216 delete DbgInfo;
1217}
1218
1219bool SelectionDAG::shouldOptForSize() const {
1220 return MF->getFunction().hasOptSize() ||
1221 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI);
1222}
1223
1224void SelectionDAG::allnodes_clear() {
1225 assert(&*AllNodes.begin() == &EntryNode)(static_cast <bool> (&*AllNodes.begin() == &EntryNode
) ? void (0) : __assert_fail ("&*AllNodes.begin() == &EntryNode"
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1225, __extension__
__PRETTY_FUNCTION__))
;
1226 AllNodes.remove(AllNodes.begin());
1227 while (!AllNodes.empty())
1228 DeallocateNode(&AllNodes.front());
1229#ifndef NDEBUG
1230 NextPersistentId = 0;
1231#endif
1232}
1233
1234SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1235 void *&InsertPos) {
1236 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1237 if (N) {
1238 switch (N->getOpcode()) {
1239 default: break;
1240 case ISD::Constant:
1241 case ISD::ConstantFP:
1242 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "::llvm::llvm_unreachable_internal("Querying for Constant and ConstantFP nodes requires "
"debug location. Use another overload.", "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp"
, 1243)
1243 "debug location. Use another overload.")::llvm::llvm_unreachable_internal("Querying for Constant and ConstantFP nodes requires "
"debug location. Use another overload.", "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp"
, 1243)
;
1244 }
1245 }
1246 return N;
1247}
1248
1249SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1250 const SDLoc &DL, void *&InsertPos) {
1251 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1252 if (N) {
1253 switch (N->getOpcode()) {
1254 case ISD::Constant:
1255 case ISD::ConstantFP:
1256 // Erase debug location from the node if the node is used at several
1257 // different places. Do not propagate one location to all uses as it
1258 // will cause a worse single stepping debugging experience.
1259 if (N->getDebugLoc() != DL.getDebugLoc())
1260 N->setDebugLoc(DebugLoc());
1261 break;
1262 default:
1263 // When the node's point of use is located earlier in the instruction
1264 // sequence than its prior point of use, update its debug info to the
1265 // earlier location.
1266 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1267 N->setDebugLoc(DL.getDebugLoc());
1268 break;
1269 }
1270 }
1271 return N;
1272}
1273
1274void SelectionDAG::clear() {
1275 allnodes_clear();
1276 OperandRecycler.clear(OperandAllocator);
1277 OperandAllocator.Reset();
1278 CSEMap.clear();
1279
1280 ExtendedValueTypeNodes.clear();
1281 ExternalSymbols.clear();
1282 TargetExternalSymbols.clear();
1283 MCSymbols.clear();
1284 SDCallSiteDbgInfo.clear();
1285 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1286 static_cast<CondCodeSDNode*>(nullptr));
1287 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1288 static_cast<SDNode*>(nullptr));
1289
1290 EntryNode.UseList = nullptr;
1291 InsertNode(&EntryNode);
1292 Root = getEntryNode();
1293 DbgInfo->clear();
1294}
1295
1296SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
1297 return VT.bitsGT(Op.getValueType())
1298 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1299 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL));
1300}
1301
1302std::pair<SDValue, SDValue>
1303SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain,
1304 const SDLoc &DL, EVT VT) {
1305 assert(!VT.bitsEq(Op.getValueType()) &&(static_cast <bool> (!VT.bitsEq(Op.getValueType()) &&
"Strict no-op FP extend/round not allowed.") ? void (0) : __assert_fail
("!VT.bitsEq(Op.getValueType()) && \"Strict no-op FP extend/round not allowed.\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1306, __extension__
__PRETTY_FUNCTION__))
1306 "Strict no-op FP extend/round not allowed.")(static_cast <bool> (!VT.bitsEq(Op.getValueType()) &&
"Strict no-op FP extend/round not allowed.") ? void (0) : __assert_fail
("!VT.bitsEq(Op.getValueType()) && \"Strict no-op FP extend/round not allowed.\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1306, __extension__
__PRETTY_FUNCTION__))
;
1307 SDValue Res =
1308 VT.bitsGT(Op.getValueType())
1309 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op})
1310 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other},
1311 {Chain, Op, getIntPtrConstant(0, DL)});
1312
1313 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1));
1314}
1315
1316SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1317 return VT.bitsGT(Op.getValueType()) ?
1318 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1319 getNode(ISD::TRUNCATE, DL, VT, Op);
1320}
1321
1322SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1323 return VT.bitsGT(Op.getValueType()) ?
1324 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1325 getNode(ISD::TRUNCATE, DL, VT, Op);
1326}
1327
1328SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1329 return VT.bitsGT(Op.getValueType()) ?
1330 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1331 getNode(ISD::TRUNCATE, DL, VT, Op);
1332}
1333
1334SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1335 EVT OpVT) {
1336 if (VT.bitsLE(Op.getValueType()))
1337 return getNode(ISD::TRUNCATE, SL, VT, Op);
1338
1339 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1340 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1341}
1342
1343SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1344 EVT OpVT = Op.getValueType();
1345 assert(VT.isInteger() && OpVT.isInteger() &&(static_cast <bool> (VT.isInteger() && OpVT.isInteger
() && "Cannot getZeroExtendInReg FP types") ? void (0
) : __assert_fail ("VT.isInteger() && OpVT.isInteger() && \"Cannot getZeroExtendInReg FP types\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1346, __extension__
__PRETTY_FUNCTION__))
1346 "Cannot getZeroExtendInReg FP types")(static_cast <bool> (VT.isInteger() && OpVT.isInteger
() && "Cannot getZeroExtendInReg FP types") ? void (0
) : __assert_fail ("VT.isInteger() && OpVT.isInteger() && \"Cannot getZeroExtendInReg FP types\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1346, __extension__
__PRETTY_FUNCTION__))
;
1347 assert(VT.isVector() == OpVT.isVector() &&(static_cast <bool> (VT.isVector() == OpVT.isVector() &&
"getZeroExtendInReg type should be vector iff the operand " "type is vector!"
) ? void (0) : __assert_fail ("VT.isVector() == OpVT.isVector() && \"getZeroExtendInReg type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1349, __extension__
__PRETTY_FUNCTION__))
1348 "getZeroExtendInReg type should be vector iff the operand "(static_cast <bool> (VT.isVector() == OpVT.isVector() &&
"getZeroExtendInReg type should be vector iff the operand " "type is vector!"
) ? void (0) : __assert_fail ("VT.isVector() == OpVT.isVector() && \"getZeroExtendInReg type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1349, __extension__
__PRETTY_FUNCTION__))
1349 "type is vector!")(static_cast <bool> (VT.isVector() == OpVT.isVector() &&
"getZeroExtendInReg type should be vector iff the operand " "type is vector!"
) ? void (0) : __assert_fail ("VT.isVector() == OpVT.isVector() && \"getZeroExtendInReg type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1349, __extension__
__PRETTY_FUNCTION__))
;
1350 assert((!VT.isVector() ||(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == OpVT.getVectorElementCount()) && "Vector element counts must match in getZeroExtendInReg"
) ? void (0) : __assert_fail ("(!VT.isVector() || VT.getVectorElementCount() == OpVT.getVectorElementCount()) && \"Vector element counts must match in getZeroExtendInReg\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1352, __extension__
__PRETTY_FUNCTION__))
1351 VT.getVectorElementCount() == OpVT.getVectorElementCount()) &&(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == OpVT.getVectorElementCount()) && "Vector element counts must match in getZeroExtendInReg"
) ? void (0) : __assert_fail ("(!VT.isVector() || VT.getVectorElementCount() == OpVT.getVectorElementCount()) && \"Vector element counts must match in getZeroExtendInReg\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1352, __extension__
__PRETTY_FUNCTION__))
1352 "Vector element counts must match in getZeroExtendInReg")(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == OpVT.getVectorElementCount()) && "Vector element counts must match in getZeroExtendInReg"
) ? void (0) : __assert_fail ("(!VT.isVector() || VT.getVectorElementCount() == OpVT.getVectorElementCount()) && \"Vector element counts must match in getZeroExtendInReg\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1352, __extension__
__PRETTY_FUNCTION__))
;
1353 assert(VT.bitsLE(OpVT) && "Not extending!")(static_cast <bool> (VT.bitsLE(OpVT) && "Not extending!"
) ? void (0) : __assert_fail ("VT.bitsLE(OpVT) && \"Not extending!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1353, __extension__
__PRETTY_FUNCTION__))
;
1354 if (OpVT == VT)
1355 return Op;
1356 APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(),
1357 VT.getScalarSizeInBits());
1358 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT));
1359}
1360
1361SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1362 // Only unsigned pointer semantics are supported right now. In the future this
1363 // might delegate to TLI to check pointer signedness.
1364 return getZExtOrTrunc(Op, DL, VT);
1365}
1366
1367SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1368 // Only unsigned pointer semantics are supported right now. In the future this
1369 // might delegate to TLI to check pointer signedness.
1370 return getZeroExtendInReg(Op, DL, VT);
1371}
1372
1373/// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1374SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1375 return getNode(ISD::XOR, DL, VT, Val, getAllOnesConstant(DL, VT));
1376}
1377
1378SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1379 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1380 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1381}
1382
1383SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT,
1384 EVT OpVT) {
1385 if (!V)
1386 return getConstant(0, DL, VT);
1387
1388 switch (TLI->getBooleanContents(OpVT)) {
1389 case TargetLowering::ZeroOrOneBooleanContent:
1390 case TargetLowering::UndefinedBooleanContent:
1391 return getConstant(1, DL, VT);
1392 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1393 return getAllOnesConstant(DL, VT);
1394 }
1395 llvm_unreachable("Unexpected boolean content enum!")::llvm::llvm_unreachable_internal("Unexpected boolean content enum!"
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1395)
;
1396}
1397
1398SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1399 bool isT, bool isO) {
1400 EVT EltVT = VT.getScalarType();
1401 assert((EltVT.getSizeInBits() >= 64 ||(static_cast <bool> ((EltVT.getSizeInBits() >= 64 ||
(uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 <
2) && "getConstant with a uint64_t value that doesn't fit in the type!"
) ? void (0) : __assert_fail ("(EltVT.getSizeInBits() >= 64 || (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && \"getConstant with a uint64_t value that doesn't fit in the type!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1403, __extension__
__PRETTY_FUNCTION__))
1402 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&(static_cast <bool> ((EltVT.getSizeInBits() >= 64 ||
(uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 <
2) && "getConstant with a uint64_t value that doesn't fit in the type!"
) ? void (0) : __assert_fail ("(EltVT.getSizeInBits() >= 64 || (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && \"getConstant with a uint64_t value that doesn't fit in the type!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1403, __extension__
__PRETTY_FUNCTION__))
1403 "getConstant with a uint64_t value that doesn't fit in the type!")(static_cast <bool> ((EltVT.getSizeInBits() >= 64 ||
(uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 <
2) && "getConstant with a uint64_t value that doesn't fit in the type!"
) ? void (0) : __assert_fail ("(EltVT.getSizeInBits() >= 64 || (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && \"getConstant with a uint64_t value that doesn't fit in the type!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1403, __extension__
__PRETTY_FUNCTION__))
;
1404 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1405}
1406
1407SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1408 bool isT, bool isO) {
1409 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1410}
1411
1412SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1413 EVT VT, bool isT, bool isO) {
1414 assert(VT.isInteger() && "Cannot create FP integer constant!")(static_cast <bool> (VT.isInteger() && "Cannot create FP integer constant!"
) ? void (0) : __assert_fail ("VT.isInteger() && \"Cannot create FP integer constant!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1414, __extension__
__PRETTY_FUNCTION__))
;
1415
1416 EVT EltVT = VT.getScalarType();
1417 const ConstantInt *Elt = &Val;
1418
1419 // In some cases the vector type is legal but the element type is illegal and
1420 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1421 // inserted value (the type does not need to match the vector element type).
1422 // Any extra bits introduced will be truncated away.
1423 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1424 TargetLowering::TypePromoteInteger) {
1425 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1426 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1427 Elt = ConstantInt::get(*getContext(), NewVal);
1428 }
1429 // In other cases the element type is illegal and needs to be expanded, for
1430 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1431 // the value into n parts and use a vector type with n-times the elements.
1432 // Then bitcast to the type requested.
1433 // Legalizing constants too early makes the DAGCombiner's job harder so we
1434 // only legalize if the DAG tells us we must produce legal types.
1435 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1436 TLI->getTypeAction(*getContext(), EltVT) ==
1437 TargetLowering::TypeExpandInteger) {
1438 const APInt &NewVal = Elt->getValue();
1439 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1440 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1441
1442 // For scalable vectors, try to use a SPLAT_VECTOR_PARTS node.
1443 if (VT.isScalableVector()) {
1444 assert(EltVT.getSizeInBits() % ViaEltSizeInBits == 0 &&(static_cast <bool> (EltVT.getSizeInBits() % ViaEltSizeInBits
== 0 && "Can only handle an even split!") ? void (0)
: __assert_fail ("EltVT.getSizeInBits() % ViaEltSizeInBits == 0 && \"Can only handle an even split!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1445, __extension__
__PRETTY_FUNCTION__))
1445 "Can only handle an even split!")(static_cast <bool> (EltVT.getSizeInBits() % ViaEltSizeInBits
== 0 && "Can only handle an even split!") ? void (0)
: __assert_fail ("EltVT.getSizeInBits() % ViaEltSizeInBits == 0 && \"Can only handle an even split!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1445, __extension__
__PRETTY_FUNCTION__))
;
1446 unsigned Parts = EltVT.getSizeInBits() / ViaEltSizeInBits;
1447
1448 SmallVector<SDValue, 2> ScalarParts;
1449 for (unsigned i = 0; i != Parts; ++i)
1450 ScalarParts.push_back(getConstant(
1451 NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1452 ViaEltVT, isT, isO));
1453
1454 return getNode(ISD::SPLAT_VECTOR_PARTS, DL, VT, ScalarParts);
1455 }
1456
1457 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1458 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1459
1460 // Check the temporary vector is the correct size. If this fails then
1461 // getTypeToTransformTo() probably returned a type whose size (in bits)
1462 // isn't a power-of-2 factor of the requested type size.
1463 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits())(static_cast <bool> (ViaVecVT.getSizeInBits() == VT.getSizeInBits
()) ? void (0) : __assert_fail ("ViaVecVT.getSizeInBits() == VT.getSizeInBits()"
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1463, __extension__
__PRETTY_FUNCTION__))
;
1464
1465 SmallVector<SDValue, 2> EltParts;
1466 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i)
1467 EltParts.push_back(getConstant(
1468 NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1469 ViaEltVT, isT, isO));
1470
1471 // EltParts is currently in little endian order. If we actually want
1472 // big-endian order then reverse it now.
1473 if (getDataLayout().isBigEndian())
1474 std::reverse(EltParts.begin(), EltParts.end());
1475
1476 // The elements must be reversed when the element order is different
1477 // to the endianness of the elements (because the BITCAST is itself a
1478 // vector shuffle in this situation). However, we do not need any code to
1479 // perform this reversal because getConstant() is producing a vector
1480 // splat.
1481 // This situation occurs in MIPS MSA.
1482
1483 SmallVector<SDValue, 8> Ops;
1484 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1485 llvm::append_range(Ops, EltParts);
1486
1487 SDValue V =
1488 getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1489 return V;
1490 }
1491
1492 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&(static_cast <bool> (Elt->getBitWidth() == EltVT.getSizeInBits
() && "APInt size does not match type size!") ? void (
0) : __assert_fail ("Elt->getBitWidth() == EltVT.getSizeInBits() && \"APInt size does not match type size!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1493, __extension__
__PRETTY_FUNCTION__))
1493 "APInt size does not match type size!")(static_cast <bool> (Elt->getBitWidth() == EltVT.getSizeInBits
() && "APInt size does not match type size!") ? void (
0) : __assert_fail ("Elt->getBitWidth() == EltVT.getSizeInBits() && \"APInt size does not match type size!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1493, __extension__
__PRETTY_FUNCTION__))
;
1494 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1495 FoldingSetNodeID ID;
1496 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1497 ID.AddPointer(Elt);
1498 ID.AddBoolean(isO);
1499 void *IP = nullptr;
1500 SDNode *N = nullptr;
1501 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1502 if (!VT.isVector())
1503 return SDValue(N, 0);
1504
1505 if (!N) {
1506 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT);
1507 CSEMap.InsertNode(N, IP);
1508 InsertNode(N);
1509 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1510 }
1511
1512 SDValue Result(N, 0);
1513 if (VT.isScalableVector())
1514 Result = getSplatVector(VT, DL, Result);
1515 else if (VT.isVector())
1516 Result = getSplatBuildVector(VT, DL, Result);
1517
1518 return Result;
1519}
1520
1521SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1522 bool isTarget) {
1523 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1524}
1525
1526SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT,
1527 const SDLoc &DL, bool LegalTypes) {
1528 assert(VT.isInteger() && "Shift amount is not an integer type!")(static_cast <bool> (VT.isInteger() && "Shift amount is not an integer type!"
) ? void (0) : __assert_fail ("VT.isInteger() && \"Shift amount is not an integer type!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1528, __extension__
__PRETTY_FUNCTION__))
;
1529 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes);
1530 return getConstant(Val, DL, ShiftVT);
1531}
1532
1533SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
1534 bool isTarget) {
1535 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget);
1536}
1537
1538SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1539 bool isTarget) {
1540 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1541}
1542
1543SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1544 EVT VT, bool isTarget) {
1545 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!")(static_cast <bool> (VT.isFloatingPoint() && "Cannot create integer FP constant!"
) ? void (0) : __assert_fail ("VT.isFloatingPoint() && \"Cannot create integer FP constant!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1545, __extension__
__PRETTY_FUNCTION__))
;
1546
1547 EVT EltVT = VT.getScalarType();
1548
1549 // Do the map lookup using the actual bit pattern for the floating point
1550 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1551 // we don't have issues with SNANs.
1552 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1553 FoldingSetNodeID ID;
1554 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1555 ID.AddPointer(&V);
1556 void *IP = nullptr;
1557 SDNode *N = nullptr;
1558 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1559 if (!VT.isVector())
1560 return SDValue(N, 0);
1561
1562 if (!N) {
1563 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT);
1564 CSEMap.InsertNode(N, IP);
1565 InsertNode(N);
1566 }
1567
1568 SDValue Result(N, 0);
1569 if (VT.isScalableVector())
1570 Result = getSplatVector(VT, DL, Result);
1571 else if (VT.isVector())
1572 Result = getSplatBuildVector(VT, DL, Result);
1573 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1574 return Result;
1575}
1576
1577SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1578 bool isTarget) {
1579 EVT EltVT = VT.getScalarType();
1580 if (EltVT == MVT::f32)
1581 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1582 if (EltVT == MVT::f64)
1583 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1584 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1585 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1586 bool Ignored;
1587 APFloat APF = APFloat(Val);
1588 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1589 &Ignored);
1590 return getConstantFP(APF, DL, VT, isTarget);
1591 }
1592 llvm_unreachable("Unsupported type in getConstantFP")::llvm::llvm_unreachable_internal("Unsupported type in getConstantFP"
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1592)
;
1593}
1594
1595SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1596 EVT VT, int64_t Offset, bool isTargetGA,
1597 unsigned TargetFlags) {
1598 assert((TargetFlags == 0 || isTargetGA) &&(static_cast <bool> ((TargetFlags == 0 || isTargetGA) &&
"Cannot set target flags on target-independent globals") ? void
(0) : __assert_fail ("(TargetFlags == 0 || isTargetGA) && \"Cannot set target flags on target-independent globals\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1599, __extension__
__PRETTY_FUNCTION__))
1599 "Cannot set target flags on target-independent globals")(static_cast <bool> ((TargetFlags == 0 || isTargetGA) &&
"Cannot set target flags on target-independent globals") ? void
(0) : __assert_fail ("(TargetFlags == 0 || isTargetGA) && \"Cannot set target flags on target-independent globals\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1599, __extension__
__PRETTY_FUNCTION__))
;
1600
1601 // Truncate (with sign-extension) the offset value to the pointer size.
1602 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1603 if (BitWidth < 64)
1604 Offset = SignExtend64(Offset, BitWidth);
1605
1606 unsigned Opc;
1607 if (GV->isThreadLocal())
1608 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1609 else
1610 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1611
1612 FoldingSetNodeID ID;
1613 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1614 ID.AddPointer(GV);
1615 ID.AddInteger(Offset);
1616 ID.AddInteger(TargetFlags);
1617 void *IP = nullptr;
1618 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1619 return SDValue(E, 0);
1620
1621 auto *N = newSDNode<GlobalAddressSDNode>(
1622 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1623 CSEMap.InsertNode(N, IP);
1624 InsertNode(N);
1625 return SDValue(N, 0);
1626}
1627
1628SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1629 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1630 FoldingSetNodeID ID;
1631 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1632 ID.AddInteger(FI);
1633 void *IP = nullptr;
1634 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1635 return SDValue(E, 0);
1636
1637 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1638 CSEMap.InsertNode(N, IP);
1639 InsertNode(N);
1640 return SDValue(N, 0);
1641}
1642
1643SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1644 unsigned TargetFlags) {
1645 assert((TargetFlags == 0 || isTarget) &&(static_cast <bool> ((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent jump tables")
? void (0) : __assert_fail ("(TargetFlags == 0 || isTarget) && \"Cannot set target flags on target-independent jump tables\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1646, __extension__
__PRETTY_FUNCTION__))
1646 "Cannot set target flags on target-independent jump tables")(static_cast <bool> ((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent jump tables")
? void (0) : __assert_fail ("(TargetFlags == 0 || isTarget) && \"Cannot set target flags on target-independent jump tables\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1646, __extension__
__PRETTY_FUNCTION__))
;
1647 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1648 FoldingSetNodeID ID;
1649 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1650 ID.AddInteger(JTI);
1651 ID.AddInteger(TargetFlags);
1652 void *IP = nullptr;
1653 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1654 return SDValue(E, 0);
1655
1656 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1657 CSEMap.InsertNode(N, IP);
1658 InsertNode(N);
1659 return SDValue(N, 0);
1660}
1661
1662SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1663 MaybeAlign Alignment, int Offset,
1664 bool isTarget, unsigned TargetFlags) {
1665 assert((TargetFlags == 0 || isTarget) &&(static_cast <bool> ((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals") ? void
(0) : __assert_fail ("(TargetFlags == 0 || isTarget) && \"Cannot set target flags on target-independent globals\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1666, __extension__
__PRETTY_FUNCTION__))
1666 "Cannot set target flags on target-independent globals")(static_cast <bool> ((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals") ? void
(0) : __assert_fail ("(TargetFlags == 0 || isTarget) && \"Cannot set target flags on target-independent globals\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1666, __extension__
__PRETTY_FUNCTION__))
;
1667 if (!Alignment)
1668 Alignment = shouldOptForSize()
1669 ? getDataLayout().getABITypeAlign(C->getType())
1670 : getDataLayout().getPrefTypeAlign(C->getType());
1671 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1672 FoldingSetNodeID ID;
1673 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1674 ID.AddInteger(Alignment->value());
1675 ID.AddInteger(Offset);
1676 ID.AddPointer(C);
1677 ID.AddInteger(TargetFlags);
1678 void *IP = nullptr;
1679 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1680 return SDValue(E, 0);
1681
1682 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1683 TargetFlags);
1684 CSEMap.InsertNode(N, IP);
1685 InsertNode(N);
1686 SDValue V = SDValue(N, 0);
1687 NewSDValueDbgMsg(V, "Creating new constant pool: ", this);
1688 return V;
1689}
1690
1691SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1692 MaybeAlign Alignment, int Offset,
1693 bool isTarget, unsigned TargetFlags) {
1694 assert((TargetFlags == 0 || isTarget) &&(static_cast <bool> ((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals") ? void
(0) : __assert_fail ("(TargetFlags == 0 || isTarget) && \"Cannot set target flags on target-independent globals\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1695, __extension__
__PRETTY_FUNCTION__))
1695 "Cannot set target flags on target-independent globals")(static_cast <bool> ((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals") ? void
(0) : __assert_fail ("(TargetFlags == 0 || isTarget) && \"Cannot set target flags on target-independent globals\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1695, __extension__
__PRETTY_FUNCTION__))
;
1696 if (!Alignment)
1697 Alignment = getDataLayout().getPrefTypeAlign(C->getType());
1698 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1699 FoldingSetNodeID ID;
1700 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1701 ID.AddInteger(Alignment->value());
1702 ID.AddInteger(Offset);
1703 C->addSelectionDAGCSEId(ID);
1704 ID.AddInteger(TargetFlags);
1705 void *IP = nullptr;
1706 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1707 return SDValue(E, 0);
1708
1709 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1710 TargetFlags);
1711 CSEMap.InsertNode(N, IP);
1712 InsertNode(N);
1713 return SDValue(N, 0);
1714}
1715
1716SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1717 unsigned TargetFlags) {
1718 FoldingSetNodeID ID;
1719 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1720 ID.AddInteger(Index);
1721 ID.AddInteger(Offset);
1722 ID.AddInteger(TargetFlags);
1723 void *IP = nullptr;
1724 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1725 return SDValue(E, 0);
1726
1727 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1728 CSEMap.InsertNode(N, IP);
1729 InsertNode(N);
1730 return SDValue(N, 0);
1731}
1732
1733SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1734 FoldingSetNodeID ID;
1735 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1736 ID.AddPointer(MBB);
1737 void *IP = nullptr;
1738 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1739 return SDValue(E, 0);
1740
1741 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1742 CSEMap.InsertNode(N, IP);
1743 InsertNode(N);
1744 return SDValue(N, 0);
1745}
1746
1747SDValue SelectionDAG::getValueType(EVT VT) {
1748 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1749 ValueTypeNodes.size())
1750 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1751
1752 SDNode *&N = VT.isExtended() ?
1753 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1754
1755 if (N) return SDValue(N, 0);
1756 N = newSDNode<VTSDNode>(VT);
1757 InsertNode(N);
1758 return SDValue(N, 0);
1759}
1760
1761SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1762 SDNode *&N = ExternalSymbols[Sym];
1763 if (N) return SDValue(N, 0);
1764 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1765 InsertNode(N);
1766 return SDValue(N, 0);
1767}
1768
1769SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1770 SDNode *&N = MCSymbols[Sym];
1771 if (N)
1772 return SDValue(N, 0);
1773 N = newSDNode<MCSymbolSDNode>(Sym, VT);
1774 InsertNode(N);
1775 return SDValue(N, 0);
1776}
1777
1778SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1779 unsigned TargetFlags) {
1780 SDNode *&N =
1781 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
1782 if (N) return SDValue(N, 0);
1783 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1784 InsertNode(N);
1785 return SDValue(N, 0);
1786}
1787
1788SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1789 if ((unsigned)Cond >= CondCodeNodes.size())
1790 CondCodeNodes.resize(Cond+1);
1791
1792 if (!CondCodeNodes[Cond]) {
1793 auto *N = newSDNode<CondCodeSDNode>(Cond);
1794 CondCodeNodes[Cond] = N;
1795 InsertNode(N);
1796 }
1797
1798 return SDValue(CondCodeNodes[Cond], 0);
1799}
1800
1801SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT) {
1802 APInt One(ResVT.getScalarSizeInBits(), 1);
1803 return getStepVector(DL, ResVT, One);
1804}
1805
1806SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal) {
1807 assert(ResVT.getScalarSizeInBits() == StepVal.getBitWidth())(static_cast <bool> (ResVT.getScalarSizeInBits() == StepVal
.getBitWidth()) ? void (0) : __assert_fail ("ResVT.getScalarSizeInBits() == StepVal.getBitWidth()"
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1807, __extension__
__PRETTY_FUNCTION__))
;
1808 if (ResVT.isScalableVector())
1809 return getNode(
1810 ISD::STEP_VECTOR, DL, ResVT,
1811 getTargetConstant(StepVal, DL, ResVT.getVectorElementType()));
1812
1813 SmallVector<SDValue, 16> OpsStepConstants;
1814 for (uint64_t i = 0; i < ResVT.getVectorNumElements(); i++)
1815 OpsStepConstants.push_back(
1816 getConstant(StepVal * i, DL, ResVT.getVectorElementType()));
1817 return getBuildVector(ResVT, DL, OpsStepConstants);
1818}
1819
1820/// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1821/// point at N1 to point at N2 and indices that point at N2 to point at N1.
1822static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1823 std::swap(N1, N2);
1824 ShuffleVectorSDNode::commuteMask(M);
1825}
1826
1827SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1828 SDValue N2, ArrayRef<int> Mask) {
1829 assert(VT.getVectorNumElements() == Mask.size() &&(static_cast <bool> (VT.getVectorNumElements() == Mask.
size() && "Must have the same number of vector elements as mask elements!"
) ? void (0) : __assert_fail ("VT.getVectorNumElements() == Mask.size() && \"Must have the same number of vector elements as mask elements!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1830, __extension__
__PRETTY_FUNCTION__))
1830 "Must have the same number of vector elements as mask elements!")(static_cast <bool> (VT.getVectorNumElements() == Mask.
size() && "Must have the same number of vector elements as mask elements!"
) ? void (0) : __assert_fail ("VT.getVectorNumElements() == Mask.size() && \"Must have the same number of vector elements as mask elements!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1830, __extension__
__PRETTY_FUNCTION__))
;
1831 assert(VT == N1.getValueType() && VT == N2.getValueType() &&(static_cast <bool> (VT == N1.getValueType() &&
VT == N2.getValueType() && "Invalid VECTOR_SHUFFLE")
? void (0) : __assert_fail ("VT == N1.getValueType() && VT == N2.getValueType() && \"Invalid VECTOR_SHUFFLE\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1832, __extension__
__PRETTY_FUNCTION__))
1832 "Invalid VECTOR_SHUFFLE")(static_cast <bool> (VT == N1.getValueType() &&
VT == N2.getValueType() && "Invalid VECTOR_SHUFFLE")
? void (0) : __assert_fail ("VT == N1.getValueType() && VT == N2.getValueType() && \"Invalid VECTOR_SHUFFLE\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1832, __extension__
__PRETTY_FUNCTION__))
;
1833
1834 // Canonicalize shuffle undef, undef -> undef
1835 if (N1.isUndef() && N2.isUndef())
1836 return getUNDEF(VT);
1837
1838 // Validate that all indices in Mask are within the range of the elements
1839 // input to the shuffle.
1840 int NElts = Mask.size();
1841 assert(llvm::all_of(Mask,(static_cast <bool> (llvm::all_of(Mask, [&](int M) {
return M < (NElts * 2) && M >= -1; }) &&
"Index out of range") ? void (0) : __assert_fail ("llvm::all_of(Mask, [&](int M) { return M < (NElts * 2) && M >= -1; }) && \"Index out of range\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1843, __extension__
__PRETTY_FUNCTION__))
1842 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&(static_cast <bool> (llvm::all_of(Mask, [&](int M) {
return M < (NElts * 2) && M >= -1; }) &&
"Index out of range") ? void (0) : __assert_fail ("llvm::all_of(Mask, [&](int M) { return M < (NElts * 2) && M >= -1; }) && \"Index out of range\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1843, __extension__
__PRETTY_FUNCTION__))
1843 "Index out of range")(static_cast <bool> (llvm::all_of(Mask, [&](int M) {
return M < (NElts * 2) && M >= -1; }) &&
"Index out of range") ? void (0) : __assert_fail ("llvm::all_of(Mask, [&](int M) { return M < (NElts * 2) && M >= -1; }) && \"Index out of range\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 1843, __extension__
__PRETTY_FUNCTION__))
;
1844
1845 // Copy the mask so we can do any needed cleanup.
1846 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1847
1848 // Canonicalize shuffle v, v -> v, undef
1849 if (N1 == N2) {
1850 N2 = getUNDEF(VT);
1851 for (int i = 0; i != NElts; ++i)
1852 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1853 }
1854
1855 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1856 if (N1.isUndef())
1857 commuteShuffle(N1, N2, MaskVec);
1858
1859 if (TLI->hasVectorBlend()) {
1860 // If shuffling a splat, try to blend the splat instead. We do this here so
1861 // that even when this arises during lowering we don't have to re-handle it.
1862 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1863 BitVector UndefElements;
1864 SDValue Splat = BV->getSplatValue(&UndefElements);
1865 if (!Splat)
1866 return;
1867
1868 for (int i = 0; i < NElts; ++i) {
1869 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1870 continue;
1871
1872 // If this input comes from undef, mark it as such.
1873 if (UndefElements[MaskVec[i] - Offset]) {
1874 MaskVec[i] = -1;
1875 continue;
1876 }
1877
1878 // If we can blend a non-undef lane, use that instead.
1879 if (!UndefElements[i])
1880 MaskVec[i] = i + Offset;
1881 }
1882 };
1883 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1884 BlendSplat(N1BV, 0);
1885 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1886 BlendSplat(N2BV, NElts);
1887 }
1888
1889 // Canonicalize all index into lhs, -> shuffle lhs, undef
1890 // Canonicalize all index into rhs, -> shuffle rhs, undef
1891 bool AllLHS = true, AllRHS = true;
1892 bool N2Undef = N2.isUndef();
1893 for (int i = 0; i != NElts; ++i) {
1894 if (MaskVec[i] >= NElts) {
1895 if (N2Undef)
1896 MaskVec[i] = -1;
1897 else
1898 AllLHS = false;
1899 } else if (MaskVec[i] >= 0) {
1900 AllRHS = false;
1901 }
1902 }
1903 if (AllLHS && AllRHS)
1904 return getUNDEF(VT);
1905 if (AllLHS && !N2Undef)
1906 N2 = getUNDEF(VT);
1907 if (AllRHS) {
1908 N1 = getUNDEF(VT);
1909 commuteShuffle(N1, N2, MaskVec);
1910 }
1911 // Reset our undef status after accounting for the mask.
1912 N2Undef = N2.isUndef();
1913 // Re-check whether both sides ended up undef.
1914 if (N1.isUndef() && N2Undef)
1915 return getUNDEF(VT);
1916
1917 // If Identity shuffle return that node.
1918 bool Identity = true, AllSame = true;
1919 for (int i = 0; i != NElts; ++i) {
1920 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1921 if (MaskVec[i] != MaskVec[0]) AllSame = false;
1922 }
1923 if (Identity && NElts)
1924 return N1;
1925
1926 // Shuffling a constant splat doesn't change the result.
1927 if (N2Undef) {
1928 SDValue V = N1;
1929
1930 // Look through any bitcasts. We check that these don't change the number
1931 // (and size) of elements and just changes their types.
1932 while (V.getOpcode() == ISD::BITCAST)
1933 V = V->getOperand(0);
1934
1935 // A splat should always show up as a build vector node.
1936 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1937 BitVector UndefElements;
1938 SDValue Splat = BV->getSplatValue(&UndefElements);
1939 // If this is a splat of an undef, shuffling it is also undef.
1940 if (Splat && Splat.isUndef())
1941 return getUNDEF(VT);
1942
1943 bool SameNumElts =
1944 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1945
1946 // We only have a splat which can skip shuffles if there is a splatted
1947 // value and no undef lanes rearranged by the shuffle.
1948 if (Splat && UndefElements.none()) {
1949 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1950 // number of elements match or the value splatted is a zero constant.
1951 if (SameNumElts)
1952 return N1;
1953 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1954 if (C->isZero())
1955 return N1;
1956 }
1957
1958 // If the shuffle itself creates a splat, build the vector directly.
1959 if (AllSame && SameNumElts) {
1960 EVT BuildVT = BV->getValueType(0);
1961 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1962 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1963
1964 // We may have jumped through bitcasts, so the type of the
1965 // BUILD_VECTOR may not match the type of the shuffle.
1966 if (BuildVT != VT)
1967 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1968 return NewBV;
1969 }
1970 }
1971 }
1972
1973 FoldingSetNodeID ID;
1974 SDValue Ops[2] = { N1, N2 };
1975 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1976 for (int i = 0; i != NElts; ++i)
1977 ID.AddInteger(MaskVec[i]);
1978
1979 void* IP = nullptr;
1980 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1981 return SDValue(E, 0);
1982
1983 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1984 // SDNode doesn't have access to it. This memory will be "leaked" when
1985 // the node is deallocated, but recovered when the NodeAllocator is released.
1986 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1987 llvm::copy(MaskVec, MaskAlloc);
1988
1989 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1990 dl.getDebugLoc(), MaskAlloc);
1991 createOperands(N, Ops);
1992
1993 CSEMap.InsertNode(N, IP);
1994 InsertNode(N);
1995 SDValue V = SDValue(N, 0);
1996 NewSDValueDbgMsg(V, "Creating new node: ", this);
1997 return V;
1998}
1999
2000SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
2001 EVT VT = SV.getValueType(0);
2002 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
2003 ShuffleVectorSDNode::commuteMask(MaskVec);
2004
2005 SDValue Op0 = SV.getOperand(0);
2006 SDValue Op1 = SV.getOperand(1);
2007 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
2008}
2009
2010SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
2011 FoldingSetNodeID ID;
2012 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
2013 ID.AddInteger(RegNo);
2014 void *IP = nullptr;
2015 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2016 return SDValue(E, 0);
2017
2018 auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
2019 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
2020 CSEMap.InsertNode(N, IP);
2021 InsertNode(N);
2022 return SDValue(N, 0);
2023}
2024
2025SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
2026 FoldingSetNodeID ID;
2027 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
2028 ID.AddPointer(RegMask);
2029 void *IP = nullptr;
2030 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2031 return SDValue(E, 0);
2032
2033 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
2034 CSEMap.InsertNode(N, IP);
2035 InsertNode(N);
2036 return SDValue(N, 0);
2037}
2038
2039SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
2040 MCSymbol *Label) {
2041 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
2042}
2043
2044SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
2045 SDValue Root, MCSymbol *Label) {
2046 FoldingSetNodeID ID;
2047 SDValue Ops[] = { Root };
2048 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
2049 ID.AddPointer(Label);
2050 void *IP = nullptr;
2051 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2052 return SDValue(E, 0);
2053
2054 auto *N =
2055 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label);
2056 createOperands(N, Ops);
2057
2058 CSEMap.InsertNode(N, IP);
2059 InsertNode(N);
2060 return SDValue(N, 0);
2061}
2062
2063SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
2064 int64_t Offset, bool isTarget,
2065 unsigned TargetFlags) {
2066 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
2067
2068 FoldingSetNodeID ID;
2069 AddNodeIDNode(ID, Opc, getVTList(VT), None);
2070 ID.AddPointer(BA);
2071 ID.AddInteger(Offset);
2072 ID.AddInteger(TargetFlags);
2073 void *IP = nullptr;
2074 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2075 return SDValue(E, 0);
2076
2077 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
2078 CSEMap.InsertNode(N, IP);
2079 InsertNode(N);
2080 return SDValue(N, 0);
2081}
2082
2083SDValue SelectionDAG::getSrcValue(const Value *V) {
2084 FoldingSetNodeID ID;
2085 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
2086 ID.AddPointer(V);
2087
2088 void *IP = nullptr;
2089 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2090 return SDValue(E, 0);
2091
2092 auto *N = newSDNode<SrcValueSDNode>(V);
2093 CSEMap.InsertNode(N, IP);
2094 InsertNode(N);
2095 return SDValue(N, 0);
2096}
2097
2098SDValue SelectionDAG::getMDNode(const MDNode *MD) {
2099 FoldingSetNodeID ID;
2100 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
2101 ID.AddPointer(MD);
2102
2103 void *IP = nullptr;
2104 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2105 return SDValue(E, 0);
2106
2107 auto *N = newSDNode<MDNodeSDNode>(MD);
2108 CSEMap.InsertNode(N, IP);
2109 InsertNode(N);
2110 return SDValue(N, 0);
2111}
2112
2113SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
2114 if (VT == V.getValueType())
2115 return V;
2116
2117 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
2118}
2119
2120SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
2121 unsigned SrcAS, unsigned DestAS) {
2122 SDValue Ops[] = {Ptr};
2123 FoldingSetNodeID ID;
2124 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
2125 ID.AddInteger(SrcAS);
2126 ID.AddInteger(DestAS);
2127
2128 void *IP = nullptr;
2129 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
2130 return SDValue(E, 0);
2131
2132 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
2133 VT, SrcAS, DestAS);
2134 createOperands(N, Ops);
2135
2136 CSEMap.InsertNode(N, IP);
2137 InsertNode(N);
2138 return SDValue(N, 0);
2139}
2140
2141SDValue SelectionDAG::getFreeze(SDValue V) {
2142 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V);
2143}
2144
2145/// getShiftAmountOperand - Return the specified value casted to
2146/// the target's desired shift amount type.
2147SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
2148 EVT OpTy = Op.getValueType();
2149 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
2150 if (OpTy == ShTy || OpTy.isVector()) return Op;
2151
2152 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
2153}
2154
2155SDValue SelectionDAG::expandVAArg(SDNode *Node) {
2156 SDLoc dl(Node);
2157 const TargetLowering &TLI = getTargetLoweringInfo();
2158 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2159 EVT VT = Node->getValueType(0);
2160 SDValue Tmp1 = Node->getOperand(0);
2161 SDValue Tmp2 = Node->getOperand(1);
2162 const MaybeAlign MA(Node->getConstantOperandVal(3));
2163
2164 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
2165 Tmp2, MachinePointerInfo(V));
2166 SDValue VAList = VAListLoad;
2167
2168 if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
2169 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2170 getConstant(MA->value() - 1, dl, VAList.getValueType()));
2171
2172 VAList =
2173 getNode(ISD::AND, dl, VAList.getValueType(), VAList,
2174 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType()));
2175 }
2176
2177 // Increment the pointer, VAList, to the next vaarg
2178 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2179 getConstant(getDataLayout().getTypeAllocSize(
2180 VT.getTypeForEVT(*getContext())),
2181 dl, VAList.getValueType()));
2182 // Store the incremented VAList to the legalized pointer
2183 Tmp1 =
2184 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
2185 // Load the actual argument out of the pointer VAList
2186 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
2187}
2188
2189SDValue SelectionDAG::expandVACopy(SDNode *Node) {
2190 SDLoc dl(Node);
2191 const TargetLowering &TLI = getTargetLoweringInfo();
2192 // This defaults to loading a pointer from the input and storing it to the
2193 // output, returning the chain.
2194 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2195 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2196 SDValue Tmp1 =
2197 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
2198 Node->getOperand(2), MachinePointerInfo(VS));
2199 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
2200 MachinePointerInfo(VD));
2201}
2202
2203Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) {
2204 const DataLayout &DL = getDataLayout();
2205 Type *Ty = VT.getTypeForEVT(*getContext());
2206 Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2207
2208 if (TLI->isTypeLegal(VT) || !VT.isVector())
2209 return RedAlign;
2210
2211 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2212 const Align StackAlign = TFI->getStackAlign();
2213
2214 // See if we can choose a smaller ABI alignment in cases where it's an
2215 // illegal vector type that will get broken down.
2216 if (RedAlign > StackAlign) {
2217 EVT IntermediateVT;
2218 MVT RegisterVT;
2219 unsigned NumIntermediates;
2220 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT,
2221 NumIntermediates, RegisterVT);
2222 Ty = IntermediateVT.getTypeForEVT(*getContext());
2223 Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2224 if (RedAlign2 < RedAlign)
2225 RedAlign = RedAlign2;
2226 }
2227
2228 return RedAlign;
2229}
2230
2231SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) {
2232 MachineFrameInfo &MFI = MF->getFrameInfo();
2233 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2234 int StackID = 0;
2235 if (Bytes.isScalable())
2236 StackID = TFI->getStackIDForScalableVectors();
2237 // The stack id gives an indication of whether the object is scalable or
2238 // not, so it's safe to pass in the minimum size here.
2239 int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment,
2240 false, nullptr, StackID);
2241 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
2242}
2243
2244SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
2245 Type *Ty = VT.getTypeForEVT(*getContext());
2246 Align StackAlign =
2247 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign));
2248 return CreateStackTemporary(VT.getStoreSize(), StackAlign);
2249}
2250
2251SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
2252 TypeSize VT1Size = VT1.getStoreSize();
2253 TypeSize VT2Size = VT2.getStoreSize();
2254 assert(VT1Size.isScalable() == VT2Size.isScalable() &&(static_cast <bool> (VT1Size.isScalable() == VT2Size.isScalable
() && "Don't know how to choose the maximum size when creating a stack "
"temporary") ? void (0) : __assert_fail ("VT1Size.isScalable() == VT2Size.isScalable() && \"Don't know how to choose the maximum size when creating a stack \" \"temporary\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2256, __extension__
__PRETTY_FUNCTION__))
2255 "Don't know how to choose the maximum size when creating a stack "(static_cast <bool> (VT1Size.isScalable() == VT2Size.isScalable
() && "Don't know how to choose the maximum size when creating a stack "
"temporary") ? void (0) : __assert_fail ("VT1Size.isScalable() == VT2Size.isScalable() && \"Don't know how to choose the maximum size when creating a stack \" \"temporary\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2256, __extension__
__PRETTY_FUNCTION__))
2256 "temporary")(static_cast <bool> (VT1Size.isScalable() == VT2Size.isScalable
() && "Don't know how to choose the maximum size when creating a stack "
"temporary") ? void (0) : __assert_fail ("VT1Size.isScalable() == VT2Size.isScalable() && \"Don't know how to choose the maximum size when creating a stack \" \"temporary\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2256, __extension__
__PRETTY_FUNCTION__))
;
2257 TypeSize Bytes =
2258 VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size;
2259
2260 Type *Ty1 = VT1.getTypeForEVT(*getContext());
2261 Type *Ty2 = VT2.getTypeForEVT(*getContext());
2262 const DataLayout &DL = getDataLayout();
2263 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2));
2264 return CreateStackTemporary(Bytes, Align);
2265}
2266
2267SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
2268 ISD::CondCode Cond, const SDLoc &dl) {
2269 EVT OpVT = N1.getValueType();
2270
2271 // These setcc operations always fold.
2272 switch (Cond) {
2273 default: break;
2274 case ISD::SETFALSE:
2275 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
2276 case ISD::SETTRUE:
2277 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
2278
2279 case ISD::SETOEQ:
2280 case ISD::SETOGT:
2281 case ISD::SETOGE:
2282 case ISD::SETOLT:
2283 case ISD::SETOLE:
2284 case ISD::SETONE:
2285 case ISD::SETO:
2286 case ISD::SETUO:
2287 case ISD::SETUEQ:
2288 case ISD::SETUNE:
2289 assert(!OpVT.isInteger() && "Illegal setcc for integer!")(static_cast <bool> (!OpVT.isInteger() && "Illegal setcc for integer!"
) ? void (0) : __assert_fail ("!OpVT.isInteger() && \"Illegal setcc for integer!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2289, __extension__
__PRETTY_FUNCTION__))
;
2290 break;
2291 }
2292
2293 if (OpVT.isInteger()) {
2294 // For EQ and NE, we can always pick a value for the undef to make the
2295 // predicate pass or fail, so we can return undef.
2296 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2297 // icmp eq/ne X, undef -> undef.
2298 if ((N1.isUndef() || N2.isUndef()) &&
2299 (Cond == ISD::SETEQ || Cond == ISD::SETNE))
2300 return getUNDEF(VT);
2301
2302 // If both operands are undef, we can return undef for int comparison.
2303 // icmp undef, undef -> undef.
2304 if (N1.isUndef() && N2.isUndef())
2305 return getUNDEF(VT);
2306
2307 // icmp X, X -> true/false
2308 // icmp X, undef -> true/false because undef could be X.
2309 if (N1 == N2)
2310 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
2311 }
2312
2313 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
2314 const APInt &C2 = N2C->getAPIntValue();
2315 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
2316 const APInt &C1 = N1C->getAPIntValue();
2317
2318 return getBoolConstant(ICmpInst::compare(C1, C2, getICmpCondCode(Cond)),
2319 dl, VT, OpVT);
2320 }
2321 }
2322
2323 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2324 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2325
2326 if (N1CFP && N2CFP) {
2327 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF());
2328 switch (Cond) {
2329 default: break;
2330 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
2331 return getUNDEF(VT);
2332 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2333 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2334 OpVT);
2335 case ISD::SETNE: if (R==APFloat::cmpUnordered)
2336 return getUNDEF(VT);
2337 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2338 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2339 R==APFloat::cmpLessThan, dl, VT,
2340 OpVT);
2341 case ISD::SETLT: if (R==APFloat::cmpUnordered)
2342 return getUNDEF(VT);
2343 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2344 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2345 OpVT);
2346 case ISD::SETGT: if (R==APFloat::cmpUnordered)
2347 return getUNDEF(VT);
2348 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2349 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
2350 VT, OpVT);
2351 case ISD::SETLE: if (R==APFloat::cmpUnordered)
2352 return getUNDEF(VT);
2353 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2354 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
2355 R==APFloat::cmpEqual, dl, VT,
2356 OpVT);
2357 case ISD::SETGE: if (R==APFloat::cmpUnordered)
2358 return getUNDEF(VT);
2359 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2360 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2361 R==APFloat::cmpEqual, dl, VT, OpVT);
2362 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2363 OpVT);
2364 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2365 OpVT);
2366 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered ||
2367 R==APFloat::cmpEqual, dl, VT,
2368 OpVT);
2369 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2370 OpVT);
2371 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered ||
2372 R==APFloat::cmpLessThan, dl, VT,
2373 OpVT);
2374 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2375 R==APFloat::cmpUnordered, dl, VT,
2376 OpVT);
2377 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl,
2378 VT, OpVT);
2379 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2380 OpVT);
2381 }
2382 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
2383 // Ensure that the constant occurs on the RHS.
2384 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2385 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
2386 return SDValue();
2387 return getSetCC(dl, VT, N2, N1, SwappedCond);
2388 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2389 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
2390 // If an operand is known to be a nan (or undef that could be a nan), we can
2391 // fold it.
2392 // Choosing NaN for the undef will always make unordered comparison succeed
2393 // and ordered comparison fails.
2394 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2395 switch (ISD::getUnorderedFlavor(Cond)) {
2396 default:
2397 llvm_unreachable("Unknown flavor!")::llvm::llvm_unreachable_internal("Unknown flavor!", "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp"
, 2397)
;
2398 case 0: // Known false.
2399 return getBoolConstant(false, dl, VT, OpVT);
2400 case 1: // Known true.
2401 return getBoolConstant(true, dl, VT, OpVT);
2402 case 2: // Undefined.
2403 return getUNDEF(VT);
2404 }
2405 }
2406
2407 // Could not fold it.
2408 return SDValue();
2409}
2410
2411/// See if the specified operand can be simplified with the knowledge that only
2412/// the bits specified by DemandedBits are used.
2413/// TODO: really we should be making this into the DAG equivalent of
2414/// SimplifyMultipleUseDemandedBits and not generate any new nodes.
2415SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) {
2416 EVT VT = V.getValueType();
2417
2418 if (VT.isScalableVector())
2419 return SDValue();
2420
2421 APInt DemandedElts = VT.isVector()
2422 ? APInt::getAllOnes(VT.getVectorNumElements())
2423 : APInt(1, 1);
2424 return GetDemandedBits(V, DemandedBits, DemandedElts);
2425}
2426
2427/// See if the specified operand can be simplified with the knowledge that only
2428/// the bits specified by DemandedBits are used in the elements specified by
2429/// DemandedElts.
2430/// TODO: really we should be making this into the DAG equivalent of
2431/// SimplifyMultipleUseDemandedBits and not generate any new nodes.
2432SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits,
2433 const APInt &DemandedElts) {
2434 switch (V.getOpcode()) {
2435 default:
2436 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts,
2437 *this, 0);
2438 case ISD::Constant: {
2439 const APInt &CVal = cast<ConstantSDNode>(V)->getAPIntValue();
2440 APInt NewVal = CVal & DemandedBits;
2441 if (NewVal != CVal)
2442 return getConstant(NewVal, SDLoc(V), V.getValueType());
2443 break;
2444 }
2445 case ISD::SRL:
2446 // Only look at single-use SRLs.
2447 if (!V.getNode()->hasOneUse())
2448 break;
2449 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
2450 // See if we can recursively simplify the LHS.
2451 unsigned Amt = RHSC->getZExtValue();
2452
2453 // Watch out for shift count overflow though.
2454 if (Amt >= DemandedBits.getBitWidth())
2455 break;
2456 APInt SrcDemandedBits = DemandedBits << Amt;
2457 if (SDValue SimplifyLHS =
2458 GetDemandedBits(V.getOperand(0), SrcDemandedBits))
2459 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS,
2460 V.getOperand(1));
2461 }
2462 break;
2463 }
2464 return SDValue();
2465}
2466
2467/// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2468/// use this predicate to simplify operations downstream.
2469bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2470 unsigned BitWidth = Op.getScalarValueSizeInBits();
2471 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2472}
2473
2474/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2475/// this predicate to simplify operations downstream. Mask is known to be zero
2476/// for bits that V cannot have.
2477bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2478 unsigned Depth) const {
2479 return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero);
2480}
2481
2482/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2483/// DemandedElts. We use this predicate to simplify operations downstream.
2484/// Mask is known to be zero for bits that V cannot have.
2485bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2486 const APInt &DemandedElts,
2487 unsigned Depth) const {
2488 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
2489}
2490
2491/// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
2492bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask,
2493 unsigned Depth) const {
2494 return Mask.isSubsetOf(computeKnownBits(V, Depth).One);
2495}
2496
2497/// isSplatValue - Return true if the vector V has the same value
2498/// across all DemandedElts. For scalable vectors it does not make
2499/// sense to specify which elements are demanded or undefined, therefore
2500/// they are simply ignored.
2501bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2502 APInt &UndefElts, unsigned Depth) const {
2503 unsigned Opcode = V.getOpcode();
2504 EVT VT = V.getValueType();
2505 assert(VT.isVector() && "Vector type expected")(static_cast <bool> (VT.isVector() && "Vector type expected"
) ? void (0) : __assert_fail ("VT.isVector() && \"Vector type expected\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2505, __extension__
__PRETTY_FUNCTION__))
;
2506
2507 if (!VT.isScalableVector() && !DemandedElts)
2508 return false; // No demanded elts, better to assume we don't know anything.
2509
2510 if (Depth >= MaxRecursionDepth)
2511 return false; // Limit search depth.
2512
2513 // Deal with some common cases here that work for both fixed and scalable
2514 // vector types.
2515 switch (Opcode) {
2516 case ISD::SPLAT_VECTOR:
2517 UndefElts = V.getOperand(0).isUndef()
2518 ? APInt::getAllOnes(DemandedElts.getBitWidth())
2519 : APInt(DemandedElts.getBitWidth(), 0);
2520 return true;
2521 case ISD::ADD:
2522 case ISD::SUB:
2523 case ISD::AND:
2524 case ISD::XOR:
2525 case ISD::OR: {
2526 APInt UndefLHS, UndefRHS;
2527 SDValue LHS = V.getOperand(0);
2528 SDValue RHS = V.getOperand(1);
2529 if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) &&
2530 isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) {
2531 UndefElts = UndefLHS | UndefRHS;
2532 return true;
2533 }
2534 return false;
2535 }
2536 case ISD::ABS:
2537 case ISD::TRUNCATE:
2538 case ISD::SIGN_EXTEND:
2539 case ISD::ZERO_EXTEND:
2540 return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1);
2541 default:
2542 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
2543 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
2544 return TLI->isSplatValueForTargetNode(V, DemandedElts, UndefElts, Depth);
2545 break;
2546}
2547
2548 // We don't support other cases than those above for scalable vectors at
2549 // the moment.
2550 if (VT.isScalableVector())
2551 return false;
2552
2553 unsigned NumElts = VT.getVectorNumElements();
2554 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch")(static_cast <bool> (NumElts == DemandedElts.getBitWidth
() && "Vector size mismatch") ? void (0) : __assert_fail
("NumElts == DemandedElts.getBitWidth() && \"Vector size mismatch\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2554, __extension__
__PRETTY_FUNCTION__))
;
2555 UndefElts = APInt::getZero(NumElts);
2556
2557 switch (Opcode) {
2558 case ISD::BUILD_VECTOR: {
2559 SDValue Scl;
2560 for (unsigned i = 0; i != NumElts; ++i) {
2561 SDValue Op = V.getOperand(i);
2562 if (Op.isUndef()) {
2563 UndefElts.setBit(i);
2564 continue;
2565 }
2566 if (!DemandedElts[i])
2567 continue;
2568 if (Scl && Scl != Op)
2569 return false;
2570 Scl = Op;
2571 }
2572 return true;
2573 }
2574 case ISD::VECTOR_SHUFFLE: {
2575 // Check if this is a shuffle node doing a splat.
2576 // TODO: Do we need to handle shuffle(splat, undef, mask)?
2577 int SplatIndex = -1;
2578 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
2579 for (int i = 0; i != (int)NumElts; ++i) {
2580 int M = Mask[i];
2581 if (M < 0) {
2582 UndefElts.setBit(i);
2583 continue;
2584 }
2585 if (!DemandedElts[i])
2586 continue;
2587 if (0 <= SplatIndex && SplatIndex != M)
2588 return false;
2589 SplatIndex = M;
2590 }
2591 return true;
2592 }
2593 case ISD::EXTRACT_SUBVECTOR: {
2594 // Offset the demanded elts by the subvector index.
2595 SDValue Src = V.getOperand(0);
2596 // We don't support scalable vectors at the moment.
2597 if (Src.getValueType().isScalableVector())
2598 return false;
2599 uint64_t Idx = V.getConstantOperandVal(1);
2600 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2601 APInt UndefSrcElts;
2602 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2603 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
2604 UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
2605 return true;
2606 }
2607 break;
2608 }
2609 case ISD::ANY_EXTEND_VECTOR_INREG:
2610 case ISD::SIGN_EXTEND_VECTOR_INREG:
2611 case ISD::ZERO_EXTEND_VECTOR_INREG: {
2612 // Widen the demanded elts by the src element count.
2613 SDValue Src = V.getOperand(0);
2614 // We don't support scalable vectors at the moment.
2615 if (Src.getValueType().isScalableVector())
2616 return false;
2617 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2618 APInt UndefSrcElts;
2619 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts);
2620 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
2621 UndefElts = UndefSrcElts.truncOrSelf(NumElts);
2622 return true;
2623 }
2624 break;
2625 }
2626 }
2627
2628 return false;
2629}
2630
2631/// Helper wrapper to main isSplatValue function.
2632bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) const {
2633 EVT VT = V.getValueType();
2634 assert(VT.isVector() && "Vector type expected")(static_cast <bool> (VT.isVector() && "Vector type expected"
) ? void (0) : __assert_fail ("VT.isVector() && \"Vector type expected\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2634, __extension__
__PRETTY_FUNCTION__))
;
2635
2636 APInt UndefElts;
2637 APInt DemandedElts;
2638
2639 // For now we don't support this with scalable vectors.
2640 if (!VT.isScalableVector())
2641 DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
2642 return isSplatValue(V, DemandedElts, UndefElts) &&
2643 (AllowUndefs || !UndefElts);
2644}
2645
2646SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) {
2647 V = peekThroughExtractSubvectors(V);
2648
2649 EVT VT = V.getValueType();
2650 unsigned Opcode = V.getOpcode();
2651 switch (Opcode) {
2652 default: {
2653 APInt UndefElts;
2654 APInt DemandedElts;
2655
2656 if (!VT.isScalableVector())
2657 DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
2658
2659 if (isSplatValue(V, DemandedElts, UndefElts)) {
2660 if (VT.isScalableVector()) {
2661 // DemandedElts and UndefElts are ignored for scalable vectors, since
2662 // the only supported cases are SPLAT_VECTOR nodes.
2663 SplatIdx = 0;
2664 } else {
2665 // Handle case where all demanded elements are UNDEF.
2666 if (DemandedElts.isSubsetOf(UndefElts)) {
2667 SplatIdx = 0;
2668 return getUNDEF(VT);
2669 }
2670 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes();
2671 }
2672 return V;
2673 }
2674 break;
2675 }
2676 case ISD::SPLAT_VECTOR:
2677 SplatIdx = 0;
2678 return V;
2679 case ISD::VECTOR_SHUFFLE: {
2680 if (VT.isScalableVector())
2681 return SDValue();
2682
2683 // Check if this is a shuffle node doing a splat.
2684 // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
2685 // getTargetVShiftNode currently struggles without the splat source.
2686 auto *SVN = cast<ShuffleVectorSDNode>(V);
2687 if (!SVN->isSplat())
2688 break;
2689 int Idx = SVN->getSplatIndex();
2690 int NumElts = V.getValueType().getVectorNumElements();
2691 SplatIdx = Idx % NumElts;
2692 return V.getOperand(Idx / NumElts);
2693 }
2694 }
2695
2696 return SDValue();
2697}
2698
2699SDValue SelectionDAG::getSplatValue(SDValue V, bool LegalTypes) {
2700 int SplatIdx;
2701 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) {
2702 EVT SVT = SrcVector.getValueType().getScalarType();
2703 EVT LegalSVT = SVT;
2704 if (LegalTypes && !TLI->isTypeLegal(SVT)) {
2705 if (!SVT.isInteger())
2706 return SDValue();
2707 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
2708 if (LegalSVT.bitsLT(SVT))
2709 return SDValue();
2710 }
2711 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), LegalSVT, SrcVector,
2712 getVectorIdxConstant(SplatIdx, SDLoc(V)));
2713 }
2714 return SDValue();
2715}
2716
2717const APInt *
2718SelectionDAG::getValidShiftAmountConstant(SDValue V,
2719 const APInt &DemandedElts) const {
2720 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||(static_cast <bool> ((V.getOpcode() == ISD::SHL || V.getOpcode
() == ISD::SRL || V.getOpcode() == ISD::SRA) && "Unknown shift node"
) ? void (0) : __assert_fail ("(V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || V.getOpcode() == ISD::SRA) && \"Unknown shift node\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2722, __extension__
__PRETTY_FUNCTION__))
2721 V.getOpcode() == ISD::SRA) &&(static_cast <bool> ((V.getOpcode() == ISD::SHL || V.getOpcode
() == ISD::SRL || V.getOpcode() == ISD::SRA) && "Unknown shift node"
) ? void (0) : __assert_fail ("(V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || V.getOpcode() == ISD::SRA) && \"Unknown shift node\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2722, __extension__
__PRETTY_FUNCTION__))
2722 "Unknown shift node")(static_cast <bool> ((V.getOpcode() == ISD::SHL || V.getOpcode
() == ISD::SRL || V.getOpcode() == ISD::SRA) && "Unknown shift node"
) ? void (0) : __assert_fail ("(V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || V.getOpcode() == ISD::SRA) && \"Unknown shift node\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2722, __extension__
__PRETTY_FUNCTION__))
;
2723 unsigned BitWidth = V.getScalarValueSizeInBits();
2724 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) {
2725 // Shifting more than the bitwidth is not valid.
2726 const APInt &ShAmt = SA->getAPIntValue();
2727 if (ShAmt.ult(BitWidth))
2728 return &ShAmt;
2729 }
2730 return nullptr;
2731}
2732
2733const APInt *SelectionDAG::getValidMinimumShiftAmountConstant(
2734 SDValue V, const APInt &DemandedElts) const {
2735 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||(static_cast <bool> ((V.getOpcode() == ISD::SHL || V.getOpcode
() == ISD::SRL || V.getOpcode() == ISD::SRA) && "Unknown shift node"
) ? void (0) : __assert_fail ("(V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || V.getOpcode() == ISD::SRA) && \"Unknown shift node\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2737, __extension__
__PRETTY_FUNCTION__))
2736 V.getOpcode() == ISD::SRA) &&(static_cast <bool> ((V.getOpcode() == ISD::SHL || V.getOpcode
() == ISD::SRL || V.getOpcode() == ISD::SRA) && "Unknown shift node"
) ? void (0) : __assert_fail ("(V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || V.getOpcode() == ISD::SRA) && \"Unknown shift node\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2737, __extension__
__PRETTY_FUNCTION__))
2737 "Unknown shift node")(static_cast <bool> ((V.getOpcode() == ISD::SHL || V.getOpcode
() == ISD::SRL || V.getOpcode() == ISD::SRA) && "Unknown shift node"
) ? void (0) : __assert_fail ("(V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || V.getOpcode() == ISD::SRA) && \"Unknown shift node\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2737, __extension__
__PRETTY_FUNCTION__))
;
2738 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2739 return ValidAmt;
2740 unsigned BitWidth = V.getScalarValueSizeInBits();
2741 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2742 if (!BV)
2743 return nullptr;
2744 const APInt *MinShAmt = nullptr;
2745 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2746 if (!DemandedElts[i])
2747 continue;
2748 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2749 if (!SA)
2750 return nullptr;
2751 // Shifting more than the bitwidth is not valid.
2752 const APInt &ShAmt = SA->getAPIntValue();
2753 if (ShAmt.uge(BitWidth))
2754 return nullptr;
2755 if (MinShAmt && MinShAmt->ule(ShAmt))
2756 continue;
2757 MinShAmt = &ShAmt;
2758 }
2759 return MinShAmt;
2760}
2761
2762const APInt *SelectionDAG::getValidMaximumShiftAmountConstant(
2763 SDValue V, const APInt &DemandedElts) const {
2764 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||(static_cast <bool> ((V.getOpcode() == ISD::SHL || V.getOpcode
() == ISD::SRL || V.getOpcode() == ISD::SRA) && "Unknown shift node"
) ? void (0) : __assert_fail ("(V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || V.getOpcode() == ISD::SRA) && \"Unknown shift node\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2766, __extension__
__PRETTY_FUNCTION__))
2765 V.getOpcode() == ISD::SRA) &&(static_cast <bool> ((V.getOpcode() == ISD::SHL || V.getOpcode
() == ISD::SRL || V.getOpcode() == ISD::SRA) && "Unknown shift node"
) ? void (0) : __assert_fail ("(V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || V.getOpcode() == ISD::SRA) && \"Unknown shift node\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2766, __extension__
__PRETTY_FUNCTION__))
2766 "Unknown shift node")(static_cast <bool> ((V.getOpcode() == ISD::SHL || V.getOpcode
() == ISD::SRL || V.getOpcode() == ISD::SRA) && "Unknown shift node"
) ? void (0) : __assert_fail ("(V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || V.getOpcode() == ISD::SRA) && \"Unknown shift node\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2766, __extension__
__PRETTY_FUNCTION__))
;
2767 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2768 return ValidAmt;
2769 unsigned BitWidth = V.getScalarValueSizeInBits();
2770 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2771 if (!BV)
2772 return nullptr;
2773 const APInt *MaxShAmt = nullptr;
2774 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2775 if (!DemandedElts[i])
2776 continue;
2777 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2778 if (!SA)
2779 return nullptr;
2780 // Shifting more than the bitwidth is not valid.
2781 const APInt &ShAmt = SA->getAPIntValue();
2782 if (ShAmt.uge(BitWidth))
2783 return nullptr;
2784 if (MaxShAmt && MaxShAmt->uge(ShAmt))
2785 continue;
2786 MaxShAmt = &ShAmt;
2787 }
2788 return MaxShAmt;
2789}
2790
2791/// Determine which bits of Op are known to be either zero or one and return
2792/// them in Known. For vectors, the known bits are those that are shared by
2793/// every vector element.
2794KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
2795 EVT VT = Op.getValueType();
2796
2797 // TOOD: Until we have a plan for how to represent demanded elements for
2798 // scalable vectors, we can just bail out for now.
2799 if (Op.getValueType().isScalableVector()) {
2800 unsigned BitWidth = Op.getScalarValueSizeInBits();
2801 return KnownBits(BitWidth);
2802 }
2803
2804 APInt DemandedElts = VT.isVector()
2805 ? APInt::getAllOnes(VT.getVectorNumElements())
2806 : APInt(1, 1);
2807 return computeKnownBits(Op, DemandedElts, Depth);
2808}
2809
2810/// Determine which bits of Op are known to be either zero or one and return
2811/// them in Known. The DemandedElts argument allows us to only collect the known
2812/// bits that are shared by the requested vector elements.
2813KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
2814 unsigned Depth) const {
2815 unsigned BitWidth = Op.getScalarValueSizeInBits();
2816
2817 KnownBits Known(BitWidth); // Don't know anything.
2818
2819 // TOOD: Until we have a plan for how to represent demanded elements for
2820 // scalable vectors, we can just bail out for now.
2821 if (Op.getValueType().isScalableVector())
2822 return Known;
2823
2824 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2825 // We know all of the bits for a constant!
2826 return KnownBits::makeConstant(C->getAPIntValue());
2827 }
2828 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
2829 // We know all of the bits for a constant fp!
2830 return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt());
2831 }
2832
2833 if (Depth >= MaxRecursionDepth)
2834 return Known; // Limit search depth.
2835
2836 KnownBits Known2;
2837 unsigned NumElts = DemandedElts.getBitWidth();
2838 assert((!Op.getValueType().isVector() ||(static_cast <bool> ((!Op.getValueType().isVector() || NumElts
== Op.getValueType().getVectorNumElements()) && "Unexpected vector size"
) ? void (0) : __assert_fail ("(!Op.getValueType().isVector() || NumElts == Op.getValueType().getVectorNumElements()) && \"Unexpected vector size\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2840, __extension__
__PRETTY_FUNCTION__))
2839 NumElts == Op.getValueType().getVectorNumElements()) &&(static_cast <bool> ((!Op.getValueType().isVector() || NumElts
== Op.getValueType().getVectorNumElements()) && "Unexpected vector size"
) ? void (0) : __assert_fail ("(!Op.getValueType().isVector() || NumElts == Op.getValueType().getVectorNumElements()) && \"Unexpected vector size\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2840, __extension__
__PRETTY_FUNCTION__))
2840 "Unexpected vector size")(static_cast <bool> ((!Op.getValueType().isVector() || NumElts
== Op.getValueType().getVectorNumElements()) && "Unexpected vector size"
) ? void (0) : __assert_fail ("(!Op.getValueType().isVector() || NumElts == Op.getValueType().getVectorNumElements()) && \"Unexpected vector size\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2840, __extension__
__PRETTY_FUNCTION__))
;
2841
2842 if (!DemandedElts)
2843 return Known; // No demanded elts, better to assume we don't know anything.
2844
2845 unsigned Opcode = Op.getOpcode();
2846 switch (Opcode) {
2847 case ISD::BUILD_VECTOR:
2848 // Collect the known bits that are shared by every demanded vector element.
2849 Known.Zero.setAllBits(); Known.One.setAllBits();
2850 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2851 if (!DemandedElts[i])
2852 continue;
2853
2854 SDValue SrcOp = Op.getOperand(i);
2855 Known2 = computeKnownBits(SrcOp, Depth + 1);
2856
2857 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2858 if (SrcOp.getValueSizeInBits() != BitWidth) {
2859 assert(SrcOp.getValueSizeInBits() > BitWidth &&(static_cast <bool> (SrcOp.getValueSizeInBits() > BitWidth
&& "Expected BUILD_VECTOR implicit truncation") ? void
(0) : __assert_fail ("SrcOp.getValueSizeInBits() > BitWidth && \"Expected BUILD_VECTOR implicit truncation\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2860, __extension__
__PRETTY_FUNCTION__))
2860 "Expected BUILD_VECTOR implicit truncation")(static_cast <bool> (SrcOp.getValueSizeInBits() > BitWidth
&& "Expected BUILD_VECTOR implicit truncation") ? void
(0) : __assert_fail ("SrcOp.getValueSizeInBits() > BitWidth && \"Expected BUILD_VECTOR implicit truncation\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2860, __extension__
__PRETTY_FUNCTION__))
;
2861 Known2 = Known2.trunc(BitWidth);
2862 }
2863
2864 // Known bits are the values that are shared by every demanded element.
2865 Known = KnownBits::commonBits(Known, Known2);
2866
2867 // If we don't know any bits, early out.
2868 if (Known.isUnknown())
2869 break;
2870 }
2871 break;
2872 case ISD::VECTOR_SHUFFLE: {
2873 // Collect the known bits that are shared by every vector element referenced
2874 // by the shuffle.
2875 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2876 Known.Zero.setAllBits(); Known.One.setAllBits();
2877 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2878 assert(NumElts == SVN->getMask().size() && "Unexpected vector size")(static_cast <bool> (NumElts == SVN->getMask().size(
) && "Unexpected vector size") ? void (0) : __assert_fail
("NumElts == SVN->getMask().size() && \"Unexpected vector size\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 2878, __extension__
__PRETTY_FUNCTION__))
;
2879 for (unsigned i = 0; i != NumElts; ++i) {
2880 if (!DemandedElts[i])
2881 continue;
2882
2883 int M = SVN->getMaskElt(i);
2884 if (M < 0) {
2885 // For UNDEF elements, we don't know anything about the common state of
2886 // the shuffle result.
2887 Known.resetAll();
2888 DemandedLHS.clearAllBits();
2889 DemandedRHS.clearAllBits();
2890 break;
2891 }
2892
2893 if ((unsigned)M < NumElts)
2894 DemandedLHS.setBit((unsigned)M % NumElts);
2895 else
2896 DemandedRHS.setBit((unsigned)M % NumElts);
2897 }
2898 // Known bits are the values that are shared by every demanded element.
2899 if (!!DemandedLHS) {
2900 SDValue LHS = Op.getOperand(0);
2901 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
2902 Known = KnownBits::commonBits(Known, Known2);
2903 }
2904 // If we don't know any bits, early out.
2905 if (Known.isUnknown())
2906 break;
2907 if (!!DemandedRHS) {
2908 SDValue RHS = Op.getOperand(1);
2909 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
2910 Known = KnownBits::commonBits(Known, Known2);
2911 }
2912 break;
2913 }
2914 case ISD::CONCAT_VECTORS: {
2915 // Split DemandedElts and test each of the demanded subvectors.
2916 Known.Zero.setAllBits(); Known.One.setAllBits();
2917 EVT SubVectorVT = Op.getOperand(0).getValueType();
2918 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2919 unsigned NumSubVectors = Op.getNumOperands();
2920 for (unsigned i = 0; i != NumSubVectors; ++i) {
2921 APInt DemandedSub =
2922 DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
2923 if (!!DemandedSub) {
2924 SDValue Sub = Op.getOperand(i);
2925 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
2926 Known = KnownBits::commonBits(Known, Known2);
2927 }
2928 // If we don't know any bits, early out.
2929 if (Known.isUnknown())
2930 break;
2931 }
2932 break;
2933 }
2934 case ISD::INSERT_SUBVECTOR: {
2935 // Demand any elements from the subvector and the remainder from the src its
2936 // inserted into.
2937 SDValue Src = Op.getOperand(0);
2938 SDValue Sub = Op.getOperand(1);
2939 uint64_t Idx = Op.getConstantOperandVal(2);
2940 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2941 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2942 APInt DemandedSrcElts = DemandedElts;
2943 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
2944
2945 Known.One.setAllBits();
2946 Known.Zero.setAllBits();
2947 if (!!DemandedSubElts) {
2948 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
2949 if (Known.isUnknown())
2950 break; // early-out.
2951 }
2952 if (!!DemandedSrcElts) {
2953 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2954 Known = KnownBits::commonBits(Known, Known2);
2955 }
2956 break;
2957 }
2958 case ISD::EXTRACT_SUBVECTOR: {
2959 // Offset the demanded elts by the subvector index.
2960 SDValue Src = Op.getOperand(0);
2961 // Bail until we can represent demanded elements for scalable vectors.
2962 if (Src.getValueType().isScalableVector())
2963 break;
2964 uint64_t Idx = Op.getConstantOperandVal(1);
2965 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2966 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2967 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2968 break;
2969 }
2970 case ISD::SCALAR_TO_VECTOR: {
2971 // We know about scalar_to_vector as much as we know about it source,
2972 // which becomes the first element of otherwise unknown vector.
2973 if (DemandedElts != 1)
2974 break;
2975
2976 SDValue N0 = Op.getOperand(0);
2977 Known = computeKnownBits(N0, Depth + 1);
2978 if (N0.getValueSizeInBits() != BitWidth)
2979 Known = Known.trunc(BitWidth);
2980
2981 break;
2982 }
2983 case ISD::BITCAST: {
2984 SDValue N0 = Op.getOperand(0);
2985 EVT SubVT = N0.getValueType();
2986 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
2987
2988 // Ignore bitcasts from unsupported types.
2989 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
2990 break;
2991
2992 // Fast handling of 'identity' bitcasts.
2993 if (BitWidth == SubBitWidth) {
2994 Known = computeKnownBits(N0, DemandedElts, Depth + 1);
2995 break;
2996 }
2997
2998 bool IsLE = getDataLayout().isLittleEndian();
2999
3000 // Bitcast 'small element' vector to 'large element' scalar/vector.
3001 if ((BitWidth % SubBitWidth) == 0) {
3002 assert(N0.getValueType().isVector() && "Expected bitcast from vector")(static_cast <bool> (N0.getValueType().isVector() &&
"Expected bitcast from vector") ? void (0) : __assert_fail (
"N0.getValueType().isVector() && \"Expected bitcast from vector\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3002, __extension__
__PRETTY_FUNCTION__))
;
3003
3004 // Collect known bits for the (larger) output by collecting the known
3005 // bits from each set of sub elements and shift these into place.
3006 // We need to separately call computeKnownBits for each set of
3007 // sub elements as the knownbits for each is likely to be different.
3008 unsigned SubScale = BitWidth / SubBitWidth;
3009 APInt SubDemandedElts(NumElts * SubScale, 0);
3010 for (unsigned i = 0; i != NumElts; ++i)
3011 if (DemandedElts[i])
3012 SubDemandedElts.setBit(i * SubScale);
3013
3014 for (unsigned i = 0; i != SubScale; ++i) {
3015 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
3016 Depth + 1);
3017 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
3018 Known.insertBits(Known2, SubBitWidth * Shifts);
3019 }
3020 }
3021
3022 // Bitcast 'large element' scalar/vector to 'small element' vector.
3023 if ((SubBitWidth % BitWidth) == 0) {
3024 assert(Op.getValueType().isVector() && "Expected bitcast to vector")(static_cast <bool> (Op.getValueType().isVector() &&
"Expected bitcast to vector") ? void (0) : __assert_fail ("Op.getValueType().isVector() && \"Expected bitcast to vector\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3024, __extension__
__PRETTY_FUNCTION__))
;
3025
3026 // Collect known bits for the (smaller) output by collecting the known
3027 // bits from the overlapping larger input elements and extracting the
3028 // sub sections we actually care about.
3029 unsigned SubScale = SubBitWidth / BitWidth;
3030 APInt SubDemandedElts =
3031 APIntOps::ScaleBitMask(DemandedElts, NumElts / SubScale);
3032 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
3033
3034 Known.Zero.setAllBits(); Known.One.setAllBits();
3035 for (unsigned i = 0; i != NumElts; ++i)
3036 if (DemandedElts[i]) {
3037 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3038 unsigned Offset = (Shifts % SubScale) * BitWidth;
3039 Known = KnownBits::commonBits(Known,
3040 Known2.extractBits(BitWidth, Offset));
3041 // If we don't know any bits, early out.
3042 if (Known.isUnknown())
3043 break;
3044 }
3045 }
3046 break;
3047 }
3048 case ISD::AND:
3049 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3050 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3051
3052 Known &= Known2;
3053 break;
3054 case ISD::OR:
3055 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3056 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3057
3058 Known |= Known2;
3059 break;
3060 case ISD::XOR:
3061 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3062 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3063
3064 Known ^= Known2;
3065 break;
3066 case ISD::MUL: {
3067 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3068 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3069 Known = KnownBits::mul(Known, Known2);
3070 break;
3071 }
3072 case ISD::MULHU: {
3073 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3074 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3075 Known = KnownBits::mulhu(Known, Known2);
3076 break;
3077 }
3078 case ISD::MULHS: {
3079 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3080 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3081 Known = KnownBits::mulhs(Known, Known2);
3082 break;
3083 }
3084 case ISD::UMUL_LOHI: {
3085 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result")(static_cast <bool> ((Op.getResNo() == 0 || Op.getResNo
() == 1) && "Unknown result") ? void (0) : __assert_fail
("(Op.getResNo() == 0 || Op.getResNo() == 1) && \"Unknown result\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3085, __extension__
__PRETTY_FUNCTION__))
;
3086 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3087 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3088 if (Op.getResNo() == 0)
3089 Known = KnownBits::mul(Known, Known2);
3090 else
3091 Known = KnownBits::mulhu(Known, Known2);
3092 break;
3093 }
3094 case ISD::SMUL_LOHI: {
3095 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result")(static_cast <bool> ((Op.getResNo() == 0 || Op.getResNo
() == 1) && "Unknown result") ? void (0) : __assert_fail
("(Op.getResNo() == 0 || Op.getResNo() == 1) && \"Unknown result\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3095, __extension__
__PRETTY_FUNCTION__))
;
3096 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3097 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3098 if (Op.getResNo() == 0)
3099 Known = KnownBits::mul(Known, Known2);
3100 else
3101 Known = KnownBits::mulhs(Known, Known2);
3102 break;
3103 }
3104 case ISD::UDIV: {
3105 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3106 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3107 Known = KnownBits::udiv(Known, Known2);
3108 break;
3109 }
3110 case ISD::SELECT:
3111 case ISD::VSELECT:
3112 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3113 // If we don't know any bits, early out.
3114 if (Known.isUnknown())
3115 break;
3116 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
3117
3118 // Only known if known in both the LHS and RHS.
3119 Known = KnownBits::commonBits(Known, Known2);
3120 break;
3121 case ISD::SELECT_CC:
3122 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
3123 // If we don't know any bits, early out.
3124 if (Known.isUnknown())
3125 break;
3126 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3127
3128 // Only known if known in both the LHS and RHS.
3129 Known = KnownBits::commonBits(Known, Known2);
3130 break;
3131 case ISD::SMULO:
3132 case ISD::UMULO:
3133 if (Op.getResNo() != 1)
3134 break;
3135 // The boolean result conforms to getBooleanContents.
3136 // If we know the result of a setcc has the top bits zero, use this info.
3137 // We know that we have an integer-based boolean since these operations
3138 // are only available for integer.
3139 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3140 TargetLowering::ZeroOrOneBooleanContent &&
3141 BitWidth > 1)
3142 Known.Zero.setBitsFrom(1);
3143 break;
3144 case ISD::SETCC:
3145 case ISD::STRICT_FSETCC:
3146 case ISD::STRICT_FSETCCS: {
3147 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3148 // If we know the result of a setcc has the top bits zero, use this info.
3149 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3150 TargetLowering::ZeroOrOneBooleanContent &&
3151 BitWidth > 1)
3152 Known.Zero.setBitsFrom(1);
3153 break;
3154 }
3155 case ISD::SHL:
3156 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3157 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3158 Known = KnownBits::shl(Known, Known2);
3159
3160 // Minimum shift low bits are known zero.
3161 if (const APInt *ShMinAmt =
3162 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3163 Known.Zero.setLowBits(ShMinAmt->getZExtValue());
3164 break;
3165 case ISD::SRL:
3166 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3167 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3168 Known = KnownBits::lshr(Known, Known2);
3169
3170 // Minimum shift high bits are known zero.
3171 if (const APInt *ShMinAmt =
3172 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3173 Known.Zero.setHighBits(ShMinAmt->getZExtValue());
3174 break;
3175 case ISD::SRA:
3176 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3177 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3178 Known = KnownBits::ashr(Known, Known2);
3179 // TODO: Add minimum shift high known sign bits.
3180 break;
3181 case ISD::FSHL:
3182 case ISD::FSHR:
3183 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) {
3184 unsigned Amt = C->getAPIntValue().urem(BitWidth);
3185
3186 // For fshl, 0-shift returns the 1st arg.
3187 // For fshr, 0-shift returns the 2nd arg.
3188 if (Amt == 0) {
3189 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
3190 DemandedElts, Depth + 1);
3191 break;
3192 }
3193
3194 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3195 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3196 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3197 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3198 if (Opcode == ISD::FSHL) {
3199 Known.One <<= Amt;
3200 Known.Zero <<= Amt;
3201 Known2.One.lshrInPlace(BitWidth - Amt);
3202 Known2.Zero.lshrInPlace(BitWidth - Amt);
3203 } else {
3204 Known.One <<= BitWidth - Amt;
3205 Known.Zero <<= BitWidth - Amt;
3206 Known2.One.lshrInPlace(Amt);
3207 Known2.Zero.lshrInPlace(Amt);
3208 }
3209 Known.One |= Known2.One;
3210 Known.Zero |= Known2.Zero;
3211 }
3212 break;
3213 case ISD::SIGN_EXTEND_INREG: {
3214 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3215 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3216 Known = Known.sextInReg(EVT.getScalarSizeInBits());
3217 break;
3218 }
3219 case ISD::CTTZ:
3220 case ISD::CTTZ_ZERO_UNDEF: {
3221 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3222 // If we have a known 1, its position is our upper bound.
3223 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
3224 unsigned LowBits = Log2_32(PossibleTZ) + 1;
3225 Known.Zero.setBitsFrom(LowBits);
3226 break;
3227 }
3228 case ISD::CTLZ:
3229 case ISD::CTLZ_ZERO_UNDEF: {
3230 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3231 // If we have a known 1, its position is our upper bound.
3232 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
3233 unsigned LowBits = Log2_32(PossibleLZ) + 1;
3234 Known.Zero.setBitsFrom(LowBits);
3235 break;
3236 }
3237 case ISD::CTPOP: {
3238 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3239 // If we know some of the bits are zero, they can't be one.
3240 unsigned PossibleOnes = Known2.countMaxPopulation();
3241 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
3242 break;
3243 }
3244 case ISD::PARITY: {
3245 // Parity returns 0 everywhere but the LSB.
3246 Known.Zero.setBitsFrom(1);
3247 break;
3248 }
3249 case ISD::LOAD: {
3250 LoadSDNode *LD = cast<LoadSDNode>(Op);
3251 const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3252 if (ISD::isNON_EXTLoad(LD) && Cst) {
3253 // Determine any common known bits from the loaded constant pool value.
3254 Type *CstTy = Cst->getType();
3255 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) {
3256 // If its a vector splat, then we can (quickly) reuse the scalar path.
3257 // NOTE: We assume all elements match and none are UNDEF.
3258 if (CstTy->isVectorTy()) {
3259 if (const Constant *Splat = Cst->getSplatValue()) {
3260 Cst = Splat;
3261 CstTy = Cst->getType();
3262 }
3263 }
3264 // TODO - do we need to handle different bitwidths?
3265 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) {
3266 // Iterate across all vector elements finding common known bits.
3267 Known.One.setAllBits();
3268 Known.Zero.setAllBits();
3269 for (unsigned i = 0; i != NumElts; ++i) {
3270 if (!DemandedElts[i])
3271 continue;
3272 if (Constant *Elt = Cst->getAggregateElement(i)) {
3273 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3274 const APInt &Value = CInt->getValue();
3275 Known.One &= Value;
3276 Known.Zero &= ~Value;
3277 continue;
3278 }
3279 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
3280 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3281 Known.One &= Value;
3282 Known.Zero &= ~Value;
3283 continue;
3284 }
3285 }
3286 Known.One.clearAllBits();
3287 Known.Zero.clearAllBits();
3288 break;
3289 }
3290 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
3291 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
3292 Known = KnownBits::makeConstant(CInt->getValue());
3293 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
3294 Known =
3295 KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt());
3296 }
3297 }
3298 }
3299 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
3300 // If this is a ZEXTLoad and we are looking at the loaded value.
3301 EVT VT = LD->getMemoryVT();
3302 unsigned MemBits = VT.getScalarSizeInBits();
3303 Known.Zero.setBitsFrom(MemBits);
3304 } else if (const MDNode *Ranges = LD->getRanges()) {
3305 if (LD->getExtensionType() == ISD::NON_EXTLOAD)
3306 computeKnownBitsFromRangeMetadata(*Ranges, Known);
3307 }
3308 break;
3309 }
3310 case ISD::ZERO_EXTEND_VECTOR_INREG: {
3311 EVT InVT = Op.getOperand(0).getValueType();
3312 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3313 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3314 Known = Known.zext(BitWidth);
3315 break;
3316 }
3317 case ISD::ZERO_EXTEND: {
3318 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3319 Known = Known.zext(BitWidth);
3320 break;
3321 }
3322 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3323 EVT InVT = Op.getOperand(0).getValueType();
3324 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3325 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3326 // If the sign bit is known to be zero or one, then sext will extend
3327 // it to the top bits, else it will just zext.
3328 Known = Known.sext(BitWidth);
3329 break;
3330 }
3331 case ISD::SIGN_EXTEND: {
3332 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3333 // If the sign bit is known to be zero or one, then sext will extend
3334 // it to the top bits, else it will just zext.
3335 Known = Known.sext(BitWidth);
3336 break;
3337 }
3338 case ISD::ANY_EXTEND_VECTOR_INREG: {
3339 EVT InVT = Op.getOperand(0).getValueType();
3340 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3341 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3342 Known = Known.anyext(BitWidth);
3343 break;
3344 }
3345 case ISD::ANY_EXTEND: {
3346 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3347 Known = Known.anyext(BitWidth);
3348 break;
3349 }
3350 case ISD::TRUNCATE: {
3351 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3352 Known = Known.trunc(BitWidth);
3353 break;
3354 }
3355 case ISD::AssertZext: {
3356 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3357 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
3358 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3359 Known.Zero |= (~InMask);
3360 Known.One &= (~Known.Zero);
3361 break;
3362 }
3363 case ISD::AssertAlign: {
3364 unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign());
3365 assert(LogOfAlign != 0)(static_cast <bool> (LogOfAlign != 0) ? void (0) : __assert_fail
("LogOfAlign != 0", "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp"
, 3365, __extension__ __PRETTY_FUNCTION__))
;
3366 // If a node is guaranteed to be aligned, set low zero bits accordingly as
3367 // well as clearing one bits.
3368 Known.Zero.setLowBits(LogOfAlign);
3369 Known.One.clearLowBits(LogOfAlign);
3370 break;
3371 }
3372 case ISD::FGETSIGN:
3373 // All bits are zero except the low bit.
3374 Known.Zero.setBitsFrom(1);
3375 break;
3376 case ISD::USUBO:
3377 case ISD::SSUBO:
3378 if (Op.getResNo() == 1) {
3379 // If we know the result of a setcc has the top bits zero, use this info.
3380 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3381 TargetLowering::ZeroOrOneBooleanContent &&
3382 BitWidth > 1)
3383 Known.Zero.setBitsFrom(1);
3384 break;
3385 }
3386 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3387 case ISD::SUB:
3388 case ISD::SUBC: {
3389 assert(Op.getResNo() == 0 &&(static_cast <bool> (Op.getResNo() == 0 && "We only compute knownbits for the difference here."
) ? void (0) : __assert_fail ("Op.getResNo() == 0 && \"We only compute knownbits for the difference here.\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3390, __extension__
__PRETTY_FUNCTION__))
3390 "We only compute knownbits for the difference here.")(static_cast <bool> (Op.getResNo() == 0 && "We only compute knownbits for the difference here."
) ? void (0) : __assert_fail ("Op.getResNo() == 0 && \"We only compute knownbits for the difference here.\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3390, __extension__
__PRETTY_FUNCTION__))
;
3391
3392 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3393 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3394 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false,
3395 Known, Known2);
3396 break;
3397 }
3398 case ISD::UADDO:
3399 case ISD::SADDO:
3400 case ISD::ADDCARRY:
3401 if (Op.getResNo() == 1) {
3402 // If we know the result of a setcc has the top bits zero, use this info.
3403 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3404 TargetLowering::ZeroOrOneBooleanContent &&
3405 BitWidth > 1)
3406 Known.Zero.setBitsFrom(1);
3407 break;
3408 }
3409 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3410 case ISD::ADD:
3411 case ISD::ADDC:
3412 case ISD::ADDE: {
3413 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.")(static_cast <bool> (Op.getResNo() == 0 && "We only compute knownbits for the sum here."
) ? void (0) : __assert_fail ("Op.getResNo() == 0 && \"We only compute knownbits for the sum here.\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3413, __extension__
__PRETTY_FUNCTION__))
;
3414
3415 // With ADDE and ADDCARRY, a carry bit may be added in.
3416 KnownBits Carry(1);
3417 if (Opcode == ISD::ADDE)
3418 // Can't track carry from glue, set carry to unknown.
3419 Carry.resetAll();
3420 else if (Opcode == ISD::ADDCARRY)
3421 // TODO: Compute known bits for the carry operand. Not sure if it is worth
3422 // the trouble (how often will we find a known carry bit). And I haven't
3423 // tested this very much yet, but something like this might work:
3424 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3425 // Carry = Carry.zextOrTrunc(1, false);
3426 Carry.resetAll();
3427 else
3428 Carry.setAllZero();
3429
3430 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3431 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3432 Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
3433 break;
3434 }
3435 case ISD::SREM: {
3436 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3437 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3438 Known = KnownBits::srem(Known, Known2);
3439 break;
3440 }
3441 case ISD::UREM: {
3442 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3443 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3444 Known = KnownBits::urem(Known, Known2);
3445 break;
3446 }
3447 case ISD::EXTRACT_ELEMENT: {
3448 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3449 const unsigned Index = Op.getConstantOperandVal(1);
3450 const unsigned EltBitWidth = Op.getValueSizeInBits();
3451
3452 // Remove low part of known bits mask
3453 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3454 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3455
3456 // Remove high part of known bit mask
3457 Known = Known.trunc(EltBitWidth);
3458 break;
3459 }
3460 case ISD::EXTRACT_VECTOR_ELT: {
3461 SDValue InVec = Op.getOperand(0);
3462 SDValue EltNo = Op.getOperand(1);
3463 EVT VecVT = InVec.getValueType();
3464 // computeKnownBits not yet implemented for scalable vectors.
3465 if (VecVT.isScalableVector())
3466 break;
3467 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
3468 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3469
3470 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
3471 // anything about the extended bits.
3472 if (BitWidth > EltBitWidth)
3473 Known = Known.trunc(EltBitWidth);
3474
3475 // If we know the element index, just demand that vector element, else for
3476 // an unknown element index, ignore DemandedElts and demand them all.
3477 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
3478 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3479 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3480 DemandedSrcElts =
3481 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3482
3483 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1);
3484 if (BitWidth > EltBitWidth)
3485 Known = Known.anyext(BitWidth);
3486 break;
3487 }
3488 case ISD::INSERT_VECTOR_ELT: {
3489 // If we know the element index, split the demand between the
3490 // source vector and the inserted element, otherwise assume we need
3491 // the original demanded vector elements and the value.
3492 SDValue InVec = Op.getOperand(0);
3493 SDValue InVal = Op.getOperand(1);
3494 SDValue EltNo = Op.getOperand(2);
3495 bool DemandedVal = true;
3496 APInt DemandedVecElts = DemandedElts;
3497 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3498 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3499 unsigned EltIdx = CEltNo->getZExtValue();
3500 DemandedVal = !!DemandedElts[EltIdx];
3501 DemandedVecElts.clearBit(EltIdx);
3502 }
3503 Known.One.setAllBits();
3504 Known.Zero.setAllBits();
3505 if (DemandedVal) {
3506 Known2 = computeKnownBits(InVal, Depth + 1);
3507 Known = KnownBits::commonBits(Known, Known2.zextOrTrunc(BitWidth));
3508 }
3509 if (!!DemandedVecElts) {
3510 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1);
3511 Known = KnownBits::commonBits(Known, Known2);
3512 }
3513 break;
3514 }
3515 case ISD::BITREVERSE: {
3516 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3517 Known = Known2.reverseBits();
3518 break;
3519 }
3520 case ISD::BSWAP: {
3521 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3522 Known = Known2.byteSwap();
3523 break;
3524 }
3525 case ISD::ABS: {
3526 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3527 Known = Known2.abs();
3528 break;
3529 }
3530 case ISD::USUBSAT: {
3531 // The result of usubsat will never be larger than the LHS.
3532 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3533 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
3534 break;
3535 }
3536 case ISD::UMIN: {
3537 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3538 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3539 Known = KnownBits::umin(Known, Known2);
3540 break;
3541 }
3542 case ISD::UMAX: {
3543 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3544 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3545 Known = KnownBits::umax(Known, Known2);
3546 break;
3547 }
3548 case ISD::SMIN:
3549 case ISD::SMAX: {
3550 // If we have a clamp pattern, we know that the number of sign bits will be
3551 // the minimum of the clamp min/max range.
3552 bool IsMax = (Opcode == ISD::SMAX);
3553 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3554 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3555 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3556 CstHigh =
3557 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3558 if (CstLow && CstHigh) {
3559 if (!IsMax)
3560 std::swap(CstLow, CstHigh);
3561
3562 const APInt &ValueLow = CstLow->getAPIntValue();
3563 const APInt &ValueHigh = CstHigh->getAPIntValue();
3564 if (ValueLow.sle(ValueHigh)) {
3565 unsigned LowSignBits = ValueLow.getNumSignBits();
3566 unsigned HighSignBits = ValueHigh.getNumSignBits();
3567 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
3568 if (ValueLow.isNegative() && ValueHigh.isNegative()) {
3569 Known.One.setHighBits(MinSignBits);
3570 break;
3571 }
3572 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
3573 Known.Zero.setHighBits(MinSignBits);
3574 break;
3575 }
3576 }
3577 }
3578
3579 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3580 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3581 if (IsMax)
3582 Known = KnownBits::smax(Known, Known2);
3583 else
3584 Known = KnownBits::smin(Known, Known2);
3585 break;
3586 }
3587 case ISD::FP_TO_UINT_SAT: {
3588 // FP_TO_UINT_SAT produces an unsigned value that fits in the saturating VT.
3589 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3590 Known.Zero |= APInt::getBitsSetFrom(BitWidth, VT.getScalarSizeInBits());
3591 break;
3592 }
3593 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
3594 if (Op.getResNo() == 1) {
3595 // The boolean result conforms to getBooleanContents.
3596 // If we know the result of a setcc has the top bits zero, use this info.
3597 // We know that we have an integer-based boolean since these operations
3598 // are only available for integer.
3599 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3600 TargetLowering::ZeroOrOneBooleanContent &&
3601 BitWidth > 1)
3602 Known.Zero.setBitsFrom(1);
3603 break;
3604 }
3605 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3606 case ISD::ATOMIC_CMP_SWAP:
3607 case ISD::ATOMIC_SWAP:
3608 case ISD::ATOMIC_LOAD_ADD:
3609 case ISD::ATOMIC_LOAD_SUB:
3610 case ISD::ATOMIC_LOAD_AND:
3611 case ISD::ATOMIC_LOAD_CLR:
3612 case ISD::ATOMIC_LOAD_OR:
3613 case ISD::ATOMIC_LOAD_XOR:
3614 case ISD::ATOMIC_LOAD_NAND:
3615 case ISD::ATOMIC_LOAD_MIN:
3616 case ISD::ATOMIC_LOAD_MAX:
3617 case ISD::ATOMIC_LOAD_UMIN:
3618 case ISD::ATOMIC_LOAD_UMAX:
3619 case ISD::ATOMIC_LOAD: {
3620 unsigned MemBits =
3621 cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits();
3622 // If we are looking at the loaded value.
3623 if (Op.getResNo() == 0) {
3624 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
3625 Known.Zero.setBitsFrom(MemBits);
3626 }
3627 break;
3628 }
3629 case ISD::FrameIndex:
3630 case ISD::TargetFrameIndex:
3631 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(),
3632 Known, getMachineFunction());
3633 break;
3634
3635 default:
3636 if (Opcode < ISD::BUILTIN_OP_END)
3637 break;
3638 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3639 case ISD::INTRINSIC_WO_CHAIN:
3640 case ISD::INTRINSIC_W_CHAIN:
3641 case ISD::INTRINSIC_VOID:
3642 // Allow the target to implement this method for its nodes.
3643 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
3644 break;
3645 }
3646
3647 assert(!Known.hasConflict() && "Bits known to be one AND zero?")(static_cast <bool> (!Known.hasConflict() && "Bits known to be one AND zero?"
) ? void (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3647, __extension__
__PRETTY_FUNCTION__))
;
3648 return Known;
3649}
3650
3651SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
3652 SDValue N1) const {
3653 // X + 0 never overflow
3654 if (isNullConstant(N1))
3655 return OFK_Never;
3656
3657 KnownBits N1Known = computeKnownBits(N1);
3658 if (N1Known.Zero.getBoolValue()) {
3659 KnownBits N0Known = computeKnownBits(N0);
3660
3661 bool overflow;
3662 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow);
3663 if (!overflow)
3664 return OFK_Never;
3665 }
3666
3667 // mulhi + 1 never overflow
3668 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
3669 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue())
3670 return OFK_Never;
3671
3672 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
3673 KnownBits N0Known = computeKnownBits(N0);
3674
3675 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue())
3676 return OFK_Never;
3677 }
3678
3679 return OFK_Sometime;
3680}
3681
3682bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
3683 EVT OpVT = Val.getValueType();
3684 unsigned BitWidth = OpVT.getScalarSizeInBits();
3685
3686 // Is the constant a known power of 2?
3687 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
3688 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3689
3690 // A left-shift of a constant one will have exactly one bit set because
3691 // shifting the bit off the end is undefined.
3692 if (Val.getOpcode() == ISD::SHL) {
3693 auto *C = isConstOrConstSplat(Val.getOperand(0));
3694 if (C && C->getAPIntValue() == 1)
3695 return true;
3696 }
3697
3698 // Similarly, a logical right-shift of a constant sign-bit will have exactly
3699 // one bit set.
3700 if (Val.getOpcode() == ISD::SRL) {
3701 auto *C = isConstOrConstSplat(Val.getOperand(0));
3702 if (C && C->getAPIntValue().isSignMask())
3703 return true;
3704 }
3705
3706 // Are all operands of a build vector constant powers of two?
3707 if (Val.getOpcode() == ISD::BUILD_VECTOR)
3708 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
3709 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
3710 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3711 return false;
3712 }))
3713 return true;
3714
3715 // Is the operand of a splat vector a constant power of two?
3716 if (Val.getOpcode() == ISD::SPLAT_VECTOR)
3717 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val->getOperand(0)))
3718 if (C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2())
3719 return true;
3720
3721 // More could be done here, though the above checks are enough
3722 // to handle some common cases.
3723
3724 // Fall back to computeKnownBits to catch other known cases.
3725 KnownBits Known = computeKnownBits(Val);
3726 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
3727}
3728
3729unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
3730 EVT VT = Op.getValueType();
3731
3732 // TODO: Assume we don't know anything for now.
3733 if (VT.isScalableVector())
3734 return 1;
3735
3736 APInt DemandedElts = VT.isVector()
3737 ? APInt::getAllOnes(VT.getVectorNumElements())
3738 : APInt(1, 1);
3739 return ComputeNumSignBits(Op, DemandedElts, Depth);
3740}
3741
3742unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
3743 unsigned Depth) const {
3744 EVT VT = Op.getValueType();
3745 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!")(static_cast <bool> ((VT.isInteger() || VT.isFloatingPoint
()) && "Invalid VT!") ? void (0) : __assert_fail ("(VT.isInteger() || VT.isFloatingPoint()) && \"Invalid VT!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3745, __extension__
__PRETTY_FUNCTION__))
;
3746 unsigned VTBits = VT.getScalarSizeInBits();
3747 unsigned NumElts = DemandedElts.getBitWidth();
3748 unsigned Tmp, Tmp2;
3749 unsigned FirstAnswer = 1;
3750
3751 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3752 const APInt &Val = C->getAPIntValue();
3753 return Val.getNumSignBits();
3754 }
3755
3756 if (Depth >= MaxRecursionDepth)
3757 return 1; // Limit search depth.
3758
3759 if (!DemandedElts || VT.isScalableVector())
3760 return 1; // No demanded elts, better to assume we don't know anything.
3761
3762 unsigned Opcode = Op.getOpcode();
3763 switch (Opcode) {
3764 default: break;
3765 case ISD::AssertSext:
3766 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3767 return VTBits-Tmp+1;
3768 case ISD::AssertZext:
3769 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3770 return VTBits-Tmp;
3771
3772 case ISD::BUILD_VECTOR:
3773 Tmp = VTBits;
3774 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
3775 if (!DemandedElts[i])
3776 continue;
3777
3778 SDValue SrcOp = Op.getOperand(i);
3779 Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1);
3780
3781 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3782 if (SrcOp.getValueSizeInBits() != VTBits) {
3783 assert(SrcOp.getValueSizeInBits() > VTBits &&(static_cast <bool> (SrcOp.getValueSizeInBits() > VTBits
&& "Expected BUILD_VECTOR implicit truncation") ? void
(0) : __assert_fail ("SrcOp.getValueSizeInBits() > VTBits && \"Expected BUILD_VECTOR implicit truncation\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3784, __extension__
__PRETTY_FUNCTION__))
3784 "Expected BUILD_VECTOR implicit truncation")(static_cast <bool> (SrcOp.getValueSizeInBits() > VTBits
&& "Expected BUILD_VECTOR implicit truncation") ? void
(0) : __assert_fail ("SrcOp.getValueSizeInBits() > VTBits && \"Expected BUILD_VECTOR implicit truncation\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3784, __extension__
__PRETTY_FUNCTION__))
;
3785 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
3786 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
3787 }
3788 Tmp = std::min(Tmp, Tmp2);
3789 }
3790 return Tmp;
3791
3792 case ISD::VECTOR_SHUFFLE: {
3793 // Collect the minimum number of sign bits that are shared by every vector
3794 // element referenced by the shuffle.
3795 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
3796 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3797 assert(NumElts == SVN->getMask().size() && "Unexpected vector size")(static_cast <bool> (NumElts == SVN->getMask().size(
) && "Unexpected vector size") ? void (0) : __assert_fail
("NumElts == SVN->getMask().size() && \"Unexpected vector size\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3797, __extension__
__PRETTY_FUNCTION__))
;
3798 for (unsigned i = 0; i != NumElts; ++i) {
3799 int M = SVN->getMaskElt(i);
3800 if (!DemandedElts[i])
3801 continue;
3802 // For UNDEF elements, we don't know anything about the common state of
3803 // the shuffle result.
3804 if (M < 0)
3805 return 1;
3806 if ((unsigned)M < NumElts)
3807 DemandedLHS.setBit((unsigned)M % NumElts);
3808 else
3809 DemandedRHS.setBit((unsigned)M % NumElts);
3810 }
3811 Tmp = std::numeric_limits<unsigned>::max();
3812 if (!!DemandedLHS)
3813 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
3814 if (!!DemandedRHS) {
3815 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
3816 Tmp = std::min(Tmp, Tmp2);
3817 }
3818 // If we don't know anything, early out and try computeKnownBits fall-back.
3819 if (Tmp == 1)
3820 break;
3821 assert(Tmp <= VTBits && "Failed to determine minimum sign bits")(static_cast <bool> (Tmp <= VTBits && "Failed to determine minimum sign bits"
) ? void (0) : __assert_fail ("Tmp <= VTBits && \"Failed to determine minimum sign bits\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3821, __extension__
__PRETTY_FUNCTION__))
;
3822 return Tmp;
3823 }
3824
3825 case ISD::BITCAST: {
3826 SDValue N0 = Op.getOperand(0);
3827 EVT SrcVT = N0.getValueType();
3828 unsigned SrcBits = SrcVT.getScalarSizeInBits();
3829
3830 // Ignore bitcasts from unsupported types..
3831 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
3832 break;
3833
3834 // Fast handling of 'identity' bitcasts.
3835 if (VTBits == SrcBits)
3836 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
3837
3838 bool IsLE = getDataLayout().isLittleEndian();
3839
3840 // Bitcast 'large element' scalar/vector to 'small element' vector.
3841 if ((SrcBits % VTBits) == 0) {
3842 assert(VT.isVector() && "Expected bitcast to vector")(static_cast <bool> (VT.isVector() && "Expected bitcast to vector"
) ? void (0) : __assert_fail ("VT.isVector() && \"Expected bitcast to vector\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 3842, __extension__
__PRETTY_FUNCTION__))
;
3843
3844 unsigned Scale = SrcBits / VTBits;
3845 APInt SrcDemandedElts =
3846 APIntOps::ScaleBitMask(DemandedElts, NumElts / Scale);
3847
3848 // Fast case - sign splat can be simply split across the small elements.
3849 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
3850 if (Tmp == SrcBits)
3851 return VTBits;
3852
3853 // Slow case - determine how far the sign extends into each sub-element.
3854 Tmp2 = VTBits;
3855 for (unsigned i = 0; i != NumElts; ++i)
3856 if (DemandedElts[i]) {
3857 unsigned SubOffset = i % Scale;
3858 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
3859 SubOffset = SubOffset * VTBits;
3860 if (Tmp <= SubOffset)
3861 return 1;
3862 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
3863 }
3864 return Tmp2;
3865 }
3866 break;
3867 }
3868
3869 case ISD::FP_TO_SINT_SAT:
3870 // FP_TO_SINT_SAT produces a signed value that fits in the saturating VT.
3871 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3872 return VTBits - Tmp + 1;
3873 case ISD::SIGN_EXTEND:
3874 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
3875 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
3876 case ISD::SIGN_EXTEND_INREG:
3877 // Max of the input and what this extends.
3878 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3879 Tmp = VTBits-Tmp+1;
3880 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3881 return std::max(Tmp, Tmp2);
3882 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3883 SDValue Src = Op.getOperand(0);
3884 EVT SrcVT = Src.getValueType();
3885 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements());
3886 Tmp = VTBits - SrcVT.getScalarSizeInBits();
3887 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
3888 }
3889 case ISD::SRA:
3890 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3891 // SRA X, C -> adds C sign bits.
3892 if (const APInt *ShAmt =
3893 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3894 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits);
3895 return Tmp;
3896 case ISD::SHL:
3897 if (const APInt *ShAmt =
3898 getValidMaximumShiftAmountConstant(Op, DemandedElts)) {
3899 // shl destroys sign bits, ensure it doesn't shift out all sign bits.
3900 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3901 if (ShAmt->ult(Tmp))
3902 return Tmp - ShAmt->getZExtValue();
3903 }
3904 break;
3905 case ISD::AND:
3906 case ISD::OR:
3907 case ISD::XOR: // NOT is handled here.
3908 // Logical binary ops preserve the number of sign bits at the worst.
3909 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3910 if (Tmp != 1) {
3911 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3912 FirstAnswer = std::min(Tmp, Tmp2);
3913 // We computed what we know about the sign bits as our first
3914 // answer. Now proceed to the generic code that uses
3915 // computeKnownBits, and pick whichever answer is better.
3916 }
3917 break;
3918
3919 case ISD::SELECT:
3920 case ISD::VSELECT:
3921 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3922 if (Tmp == 1) return 1; // Early out.
3923 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3924 return std::min(Tmp, Tmp2);
3925 case ISD::SELECT_CC:
3926 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3927 if (Tmp == 1) return 1; // Early out.
3928 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
3929 return std::min(Tmp, Tmp2);
3930
3931 case ISD::SMIN:
3932 case ISD::SMAX: {
3933 // If we have a clamp pattern, we know that the number of sign bits will be
3934 // the minimum of the clamp min/max range.
3935 bool IsMax = (Opcode == ISD::SMAX);
3936 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3937 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3938 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3939 CstHigh =
3940 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3941 if (CstLow && CstHigh) {
3942 if (!IsMax)
3943 std::swap(CstLow, CstHigh);
3944 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
3945 Tmp = CstLow->getAPIntValue().getNumSignBits();
3946 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
3947 return std::min(Tmp, Tmp2);
3948 }
3949 }
3950
3951 // Fallback - just get the minimum number of sign bits of the operands.
3952 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3953 if (Tmp == 1)
3954 return 1; // Early out.
3955 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3956 return std::min(Tmp, Tmp2);
3957 }
3958 case ISD::UMIN:
3959 case ISD::UMAX:
3960 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3961 if (Tmp == 1)
3962 return 1; // Early out.
3963 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3964 return std::min(Tmp, Tmp2);
3965 case ISD::SADDO:
3966 case ISD::UADDO:
3967 case ISD::SSUBO:
3968 case ISD::USUBO:
3969 case ISD::SMULO:
3970 case ISD::UMULO:
3971 if (Op.getResNo() != 1)
3972 break;
3973 // The boolean result conforms to getBooleanContents. Fall through.
3974 // If setcc returns 0/-1, all bits are sign bits.
3975 // We know that we have an integer-based boolean since these operations
3976 // are only available for integer.
3977 if (TLI->getBooleanContents(VT.isVector(), false) ==
3978 TargetLowering::ZeroOrNegativeOneBooleanContent)
3979 return VTBits;
3980 break;
3981 case ISD::SETCC:
3982 case ISD::STRICT_FSETCC:
3983 case ISD::STRICT_FSETCCS: {
3984 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3985 // If setcc returns 0/-1, all bits are sign bits.
3986 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3987 TargetLowering::ZeroOrNegativeOneBooleanContent)
3988 return VTBits;
3989 break;
3990 }
3991 case ISD::ROTL:
3992 case ISD::ROTR:
3993 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3994
3995 // If we're rotating an 0/-1 value, then it stays an 0/-1 value.
3996 if (Tmp == VTBits)
3997 return VTBits;
3998
3999 if (ConstantSDNode *C =
4000 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
4001 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
4002
4003 // Handle rotate right by N like a rotate left by 32-N.
4004 if (Opcode == ISD::ROTR)
4005 RotAmt = (VTBits - RotAmt) % VTBits;
4006
4007 // If we aren't rotating out all of the known-in sign bits, return the
4008 // number that are left. This handles rotl(sext(x), 1) for example.
4009 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
4010 }
4011 break;
4012 case ISD::ADD:
4013 case ISD::ADDC:
4014 // Add can have at most one carry bit. Thus we know that the output
4015 // is, at worst, one more bit than the inputs.
4016 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4017 if (Tmp == 1) return 1; // Early out.
4018
4019 // Special case decrementing a value (ADD X, -1):
4020 if (ConstantSDNode *CRHS =
4021 isConstOrConstSplat(Op.getOperand(1), DemandedElts))
4022 if (CRHS->isAllOnes()) {
4023 KnownBits Known =
4024 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4025
4026 // If the input is known to be 0 or 1, the output is 0/-1, which is all
4027 // sign bits set.
4028 if ((Known.Zero | 1).isAllOnes())
4029 return VTBits;
4030
4031 // If we are subtracting one from a positive number, there is no carry
4032 // out of the result.
4033 if (Known.isNonNegative())
4034 return Tmp;
4035 }
4036
4037 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4038 if (Tmp2 == 1) return 1; // Early out.
4039 return std::min(Tmp, Tmp2) - 1;
4040 case ISD::SUB:
4041 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4042 if (Tmp2 == 1) return 1; // Early out.
4043
4044 // Handle NEG.
4045 if (ConstantSDNode *CLHS =
4046 isConstOrConstSplat(Op.getOperand(0), DemandedElts))
4047 if (CLHS->isZero()) {
4048 KnownBits Known =
4049 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4050 // If the input is known to be 0 or 1, the output is 0/-1, which is all
4051 // sign bits set.
4052 if ((Known.Zero | 1).isAllOnes())
4053 return VTBits;
4054
4055 // If the input is known to be positive (the sign bit is known clear),
4056 // the output of the NEG has the same number of sign bits as the input.
4057 if (Known.isNonNegative())
4058 return Tmp2;
4059
4060 // Otherwise, we treat this like a SUB.
4061 }
4062
4063 // Sub can have at most one carry bit. Thus we know that the output
4064 // is, at worst, one more bit than the inputs.
4065 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4066 if (Tmp == 1) return 1; // Early out.
4067 return std::min(Tmp, Tmp2) - 1;
4068 case ISD::MUL: {
4069 // The output of the Mul can be at most twice the valid bits in the inputs.
4070 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4071 if (SignBitsOp0 == 1)
4072 break;
4073 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
4074 if (SignBitsOp1 == 1)
4075 break;
4076 unsigned OutValidBits =
4077 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
4078 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
4079 }
4080 case ISD::SREM:
4081 // The sign bit is the LHS's sign bit, except when the result of the
4082 // remainder is zero. The magnitude of the result should be less than or
4083 // equal to the magnitude of the LHS. Therefore, the result should have
4084 // at least as many sign bits as the left hand side.
4085 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4086 case ISD::TRUNCATE: {
4087 // Check if the sign bits of source go down as far as the truncated value.
4088 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
4089 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4090 if (NumSrcSignBits > (NumSrcBits - VTBits))
4091 return NumSrcSignBits - (NumSrcBits - VTBits);
4092 break;
4093 }
4094 case ISD::EXTRACT_ELEMENT: {
4095 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
4096 const int BitWidth = Op.getValueSizeInBits();
4097 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
4098
4099 // Get reverse index (starting from 1), Op1 value indexes elements from
4100 // little end. Sign starts at big end.
4101 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
4102
4103 // If the sign portion ends in our element the subtraction gives correct
4104 // result. Otherwise it gives either negative or > bitwidth result
4105 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
4106 }
4107 case ISD::INSERT_VECTOR_ELT: {
4108 // If we know the element index, split the demand between the
4109 // source vector and the inserted element, otherwise assume we need
4110 // the original demanded vector elements and the value.
4111 SDValue InVec = Op.getOperand(0);
4112 SDValue InVal = Op.getOperand(1);
4113 SDValue EltNo = Op.getOperand(2);
4114 bool DemandedVal = true;
4115 APInt DemandedVecElts = DemandedElts;
4116 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4117 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4118 unsigned EltIdx = CEltNo->getZExtValue();
4119 DemandedVal = !!DemandedElts[EltIdx];
4120 DemandedVecElts.clearBit(EltIdx);
4121 }
4122 Tmp = std::numeric_limits<unsigned>::max();
4123 if (DemandedVal) {
4124 // TODO - handle implicit truncation of inserted elements.
4125 if (InVal.getScalarValueSizeInBits() != VTBits)
4126 break;
4127 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
4128 Tmp = std::min(Tmp, Tmp2);
4129 }
4130 if (!!DemandedVecElts) {
4131 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1);
4132 Tmp = std::min(Tmp, Tmp2);
4133 }
4134 assert(Tmp <= VTBits && "Failed to determine minimum sign bits")(static_cast <bool> (Tmp <= VTBits && "Failed to determine minimum sign bits"
) ? void (0) : __assert_fail ("Tmp <= VTBits && \"Failed to determine minimum sign bits\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4134, __extension__
__PRETTY_FUNCTION__))
;
4135 return Tmp;
4136 }
4137 case ISD::EXTRACT_VECTOR_ELT: {
4138 SDValue InVec = Op.getOperand(0);
4139 SDValue EltNo = Op.getOperand(1);
4140 EVT VecVT = InVec.getValueType();
4141 // ComputeNumSignBits not yet implemented for scalable vectors.
4142 if (VecVT.isScalableVector())
4143 break;
4144 const unsigned BitWidth = Op.getValueSizeInBits();
4145 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
4146 const unsigned NumSrcElts = VecVT.getVectorNumElements();
4147
4148 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
4149 // anything about sign bits. But if the sizes match we can derive knowledge
4150 // about sign bits from the vector operand.
4151 if (BitWidth != EltBitWidth)
4152 break;
4153
4154 // If we know the element index, just demand that vector element, else for
4155 // an unknown element index, ignore DemandedElts and demand them all.
4156 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
4157 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4158 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4159 DemandedSrcElts =
4160 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
4161
4162 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
4163 }
4164 case ISD::EXTRACT_SUBVECTOR: {
4165 // Offset the demanded elts by the subvector index.
4166 SDValue Src = Op.getOperand(0);
4167 // Bail until we can represent demanded elements for scalable vectors.
4168 if (Src.getValueType().isScalableVector())
4169 break;
4170 uint64_t Idx = Op.getConstantOperandVal(1);
4171 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
4172 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
4173 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
4174 }
4175 case ISD::CONCAT_VECTORS: {
4176 // Determine the minimum number of sign bits across all demanded
4177 // elts of the input vectors. Early out if the result is already 1.
4178 Tmp = std::numeric_limits<unsigned>::max();
4179 EVT SubVectorVT = Op.getOperand(0).getValueType();
4180 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
4181 unsigned NumSubVectors = Op.getNumOperands();
4182 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
4183 APInt DemandedSub =
4184 DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
4185 if (!DemandedSub)
4186 continue;
4187 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
4188 Tmp = std::min(Tmp, Tmp2);
4189 }
4190 assert(Tmp <= VTBits && "Failed to determine minimum sign bits")(static_cast <bool> (Tmp <= VTBits && "Failed to determine minimum sign bits"
) ? void (0) : __assert_fail ("Tmp <= VTBits && \"Failed to determine minimum sign bits\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4190, __extension__
__PRETTY_FUNCTION__))
;
4191 return Tmp;
4192 }
4193 case ISD::INSERT_SUBVECTOR: {
4194 // Demand any elements from the subvector and the remainder from the src its
4195 // inserted into.
4196 SDValue Src = Op.getOperand(0);
4197 SDValue Sub = Op.getOperand(1);
4198 uint64_t Idx = Op.getConstantOperandVal(2);
4199 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
4200 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
4201 APInt DemandedSrcElts = DemandedElts;
4202 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
4203
4204 Tmp = std::numeric_limits<unsigned>::max();
4205 if (!!DemandedSubElts) {
4206 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
4207 if (Tmp == 1)
4208 return 1; // early-out
4209 }
4210 if (!!DemandedSrcElts) {
4211 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
4212 Tmp = std::min(Tmp, Tmp2);
4213 }
4214 assert(Tmp <= VTBits && "Failed to determine minimum sign bits")(static_cast <bool> (Tmp <= VTBits && "Failed to determine minimum sign bits"
) ? void (0) : __assert_fail ("Tmp <= VTBits && \"Failed to determine minimum sign bits\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4214, __extension__
__PRETTY_FUNCTION__))
;
4215 return Tmp;
4216 }
4217 case ISD::ATOMIC_CMP_SWAP:
4218 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
4219 case ISD::ATOMIC_SWAP:
4220 case ISD::ATOMIC_LOAD_ADD:
4221 case ISD::ATOMIC_LOAD_SUB:
4222 case ISD::ATOMIC_LOAD_AND:
4223 case ISD::ATOMIC_LOAD_CLR:
4224 case ISD::ATOMIC_LOAD_OR:
4225 case ISD::ATOMIC_LOAD_XOR:
4226 case ISD::ATOMIC_LOAD_NAND:
4227 case ISD::ATOMIC_LOAD_MIN:
4228 case ISD::ATOMIC_LOAD_MAX:
4229 case ISD::ATOMIC_LOAD_UMIN:
4230 case ISD::ATOMIC_LOAD_UMAX:
4231 case ISD::ATOMIC_LOAD: {
4232 Tmp = cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits();
4233 // If we are looking at the loaded value.
4234 if (Op.getResNo() == 0) {
4235 if (Tmp == VTBits)
4236 return 1; // early-out
4237 if (TLI->getExtendForAtomicOps() == ISD::SIGN_EXTEND)
4238 return VTBits - Tmp + 1;
4239 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
4240 return VTBits - Tmp;
4241 }
4242 break;
4243 }
4244 }
4245
4246 // If we are looking at the loaded value of the SDNode.
4247 if (Op.getResNo() == 0) {
4248 // Handle LOADX separately here. EXTLOAD case will fallthrough.
4249 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
4250 unsigned ExtType = LD->getExtensionType();
4251 switch (ExtType) {
4252 default: break;
4253 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known.
4254 Tmp = LD->getMemoryVT().getScalarSizeInBits();
4255 return VTBits - Tmp + 1;
4256 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known.
4257 Tmp = LD->getMemoryVT().getScalarSizeInBits();
4258 return VTBits - Tmp;
4259 case ISD::NON_EXTLOAD:
4260 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
4261 // We only need to handle vectors - computeKnownBits should handle
4262 // scalar cases.
4263 Type *CstTy = Cst->getType();
4264 if (CstTy->isVectorTy() &&
4265 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) {
4266 Tmp = VTBits;
4267 for (unsigned i = 0; i != NumElts; ++i) {
4268 if (!DemandedElts[i])
4269 continue;
4270 if (Constant *Elt = Cst->getAggregateElement(i)) {
4271 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
4272 const APInt &Value = CInt->getValue();
4273 Tmp = std::min(Tmp, Value.getNumSignBits());
4274 continue;
4275 }
4276 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4277 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4278 Tmp = std::min(Tmp, Value.getNumSignBits());
4279 continue;
4280 }
4281 }
4282 // Unknown type. Conservatively assume no bits match sign bit.
4283 return 1;
4284 }
4285 return Tmp;
4286 }
4287 }
4288 break;
4289 }
4290 }
4291 }
4292
4293 // Allow the target to implement this method for its nodes.
4294 if (Opcode >= ISD::BUILTIN_OP_END ||
4295 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4296 Opcode == ISD::INTRINSIC_W_CHAIN ||
4297 Opcode == ISD::INTRINSIC_VOID) {
4298 unsigned NumBits =
4299 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
4300 if (NumBits > 1)
4301 FirstAnswer = std::max(FirstAnswer, NumBits);
4302 }
4303
4304 // Finally, if we can prove that the top bits of the result are 0's or 1's,
4305 // use this information.
4306 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
4307 return std::max(FirstAnswer, Known.countMinSignBits());
4308}
4309
4310unsigned SelectionDAG::ComputeMaxSignificantBits(SDValue Op,
4311 unsigned Depth) const {
4312 unsigned SignBits = ComputeNumSignBits(Op, Depth);
4313 return Op.getScalarValueSizeInBits() - SignBits + 1;
4314}
4315
4316unsigned SelectionDAG::ComputeMaxSignificantBits(SDValue Op,
4317 const APInt &DemandedElts,
4318 unsigned Depth) const {
4319 unsigned SignBits = ComputeNumSignBits(Op, DemandedElts, Depth);
4320 return Op.getScalarValueSizeInBits() - SignBits + 1;
4321}
4322
4323bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly,
4324 unsigned Depth) const {
4325 // Early out for FREEZE.
4326 if (Op.getOpcode() == ISD::FREEZE)
4327 return true;
4328
4329 // TODO: Assume we don't know anything for now.
4330 EVT VT = Op.getValueType();
4331 if (VT.isScalableVector())
4332 return false;
4333
4334 APInt DemandedElts = VT.isVector()
4335 ? APInt::getAllOnes(VT.getVectorNumElements())
4336 : APInt(1, 1);
4337 return isGuaranteedNotToBeUndefOrPoison(Op, DemandedElts, PoisonOnly, Depth);
4338}
4339
4340bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op,
4341 const APInt &DemandedElts,
4342 bool PoisonOnly,
4343 unsigned Depth) const {
4344 unsigned Opcode = Op.getOpcode();
4345
4346 // Early out for FREEZE.
4347 if (Opcode == ISD::FREEZE)
4348 return true;
4349
4350 if (Depth >= MaxRecursionDepth)
4351 return false; // Limit search depth.
4352
4353 if (isIntOrFPConstant(Op))
4354 return true;
4355
4356 switch (Opcode) {
4357 case ISD::UNDEF:
4358 return PoisonOnly;
4359
4360 case ISD::BUILD_VECTOR:
4361 // NOTE: BUILD_VECTOR has implicit truncation of wider scalar elements -
4362 // this shouldn't affect the result.
4363 for (unsigned i = 0, e = Op.getNumOperands(); i < e; ++i) {
4364 if (!DemandedElts[i])
4365 continue;
4366 if (!isGuaranteedNotToBeUndefOrPoison(Op.getOperand(i), PoisonOnly,
4367 Depth + 1))
4368 return false;
4369 }
4370 return true;
4371
4372 // TODO: Search for noundef attributes from library functions.
4373
4374 // TODO: Pointers dereferenced by ISD::LOAD/STORE ops are noundef.
4375
4376 default:
4377 // Allow the target to implement this method for its nodes.
4378 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
4379 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
4380 return TLI->isGuaranteedNotToBeUndefOrPoisonForTargetNode(
4381 Op, DemandedElts, *this, PoisonOnly, Depth);
4382 break;
4383 }
4384
4385 return false;
4386}
4387
4388bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
4389 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
4390 !isa<ConstantSDNode>(Op.getOperand(1)))
4391 return false;
4392
4393 if (Op.getOpcode() == ISD::OR &&
4394 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1)))
4395 return false;
4396
4397 return true;
4398}
4399
4400bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const {
4401 // If we're told that NaNs won't happen, assume they won't.
4402 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs())
4403 return true;
4404
4405 if (Depth >= MaxRecursionDepth)
4406 return false; // Limit search depth.
4407
4408 // TODO: Handle vectors.
4409 // If the value is a constant, we can obviously see if it is a NaN or not.
4410 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
4411 return !C->getValueAPF().isNaN() ||
4412 (SNaN && !C->getValueAPF().isSignaling());
4413 }
4414
4415 unsigned Opcode = Op.getOpcode();
4416 switch (Opcode) {
4417 case ISD::FADD:
4418 case ISD::FSUB:
4419 case ISD::FMUL:
4420 case ISD::FDIV:
4421 case ISD::FREM:
4422 case ISD::FSIN:
4423 case ISD::FCOS: {
4424 if (SNaN)
4425 return true;
4426 // TODO: Need isKnownNeverInfinity
4427 return false;
4428 }
4429 case ISD::FCANONICALIZE:
4430 case ISD::FEXP:
4431 case ISD::FEXP2:
4432 case ISD::FTRUNC:
4433 case ISD::FFLOOR:
4434 case ISD::FCEIL:
4435 case ISD::FROUND:
4436 case ISD::FROUNDEVEN:
4437 case ISD::FRINT:
4438 case ISD::FNEARBYINT: {
4439 if (SNaN)
4440 return true;
4441 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4442 }
4443 case ISD::FABS:
4444 case ISD::FNEG:
4445 case ISD::FCOPYSIGN: {
4446 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4447 }
4448 case ISD::SELECT:
4449 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4450 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4451 case ISD::FP_EXTEND:
4452 case ISD::FP_ROUND: {
4453 if (SNaN)
4454 return true;
4455 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4456 }
4457 case ISD::SINT_TO_FP:
4458 case ISD::UINT_TO_FP:
4459 return true;
4460 case ISD::FMA:
4461 case ISD::FMAD: {
4462 if (SNaN)
4463 return true;
4464 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4465 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4466 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4467 }
4468 case ISD::FSQRT: // Need is known positive
4469 case ISD::FLOG:
4470 case ISD::FLOG2:
4471 case ISD::FLOG10:
4472 case ISD::FPOWI:
4473 case ISD::FPOW: {
4474 if (SNaN)
4475 return true;
4476 // TODO: Refine on operand
4477 return false;
4478 }
4479 case ISD::FMINNUM:
4480 case ISD::FMAXNUM: {
4481 // Only one needs to be known not-nan, since it will be returned if the
4482 // other ends up being one.
4483 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) ||
4484 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4485 }
4486 case ISD::FMINNUM_IEEE:
4487 case ISD::FMAXNUM_IEEE: {
4488 if (SNaN)
4489 return true;
4490 // This can return a NaN if either operand is an sNaN, or if both operands
4491 // are NaN.
4492 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) &&
4493 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) ||
4494 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) &&
4495 isKnownNeverSNaN(Op.getOperand(0), Depth + 1));
4496 }
4497 case ISD::FMINIMUM:
4498 case ISD::FMAXIMUM: {
4499 // TODO: Does this quiet or return the origina NaN as-is?
4500 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4501 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4502 }
4503 case ISD::EXTRACT_VECTOR_ELT: {
4504 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4505 }
4506 default:
4507 if (Opcode >= ISD::BUILTIN_OP_END ||
4508 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4509 Opcode == ISD::INTRINSIC_W_CHAIN ||
4510 Opcode == ISD::INTRINSIC_VOID) {
4511 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth);
4512 }
4513
4514 return false;
4515 }
4516}
4517
4518bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const {
4519 assert(Op.getValueType().isFloatingPoint() &&(static_cast <bool> (Op.getValueType().isFloatingPoint(
) && "Floating point type expected") ? void (0) : __assert_fail
("Op.getValueType().isFloatingPoint() && \"Floating point type expected\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4520, __extension__
__PRETTY_FUNCTION__))
4520 "Floating point type expected")(static_cast <bool> (Op.getValueType().isFloatingPoint(
) && "Floating point type expected") ? void (0) : __assert_fail
("Op.getValueType().isFloatingPoint() && \"Floating point type expected\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4520, __extension__
__PRETTY_FUNCTION__))
;
4521
4522 // If the value is a constant, we can obviously see if it is a zero or not.
4523 // TODO: Add BuildVector support.
4524 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
4525 return !C->isZero();
4526 return false;
4527}
4528
4529bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
4530 assert(!Op.getValueType().isFloatingPoint() &&(static_cast <bool> (!Op.getValueType().isFloatingPoint
() && "Floating point types unsupported - use isKnownNeverZeroFloat"
) ? void (0) : __assert_fail ("!Op.getValueType().isFloatingPoint() && \"Floating point types unsupported - use isKnownNeverZeroFloat\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4531, __extension__
__PRETTY_FUNCTION__))
4531 "Floating point types unsupported - use isKnownNeverZeroFloat")(static_cast <bool> (!Op.getValueType().isFloatingPoint
() && "Floating point types unsupported - use isKnownNeverZeroFloat"
) ? void (0) : __assert_fail ("!Op.getValueType().isFloatingPoint() && \"Floating point types unsupported - use isKnownNeverZeroFloat\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4531, __extension__
__PRETTY_FUNCTION__))
;
4532
4533 // If the value is a constant, we can obviously see if it is a zero or not.
4534 if (ISD::matchUnaryPredicate(Op,
4535 [](ConstantSDNode *C) { return !C->isZero(); }))
4536 return true;
4537
4538 // TODO: Recognize more cases here.
4539 switch (Op.getOpcode()) {
4540 default: break;
4541 case ISD::OR:
4542 if (isKnownNeverZero(Op.getOperand(1)) ||
4543 isKnownNeverZero(Op.getOperand(0)))
4544 return true;
4545 break;
4546 }
4547
4548 return false;
4549}
4550
4551bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
4552 // Check the obvious case.
4553 if (A == B) return true;
4554
4555 // For for negative and positive zero.
4556 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
4557 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
4558 if (CA->isZero() && CB->isZero()) return true;
4559
4560 // Otherwise they may not be equal.
4561 return false;
4562}
4563
4564// FIXME: unify with llvm::haveNoCommonBitsSet.
4565bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
4566 assert(A.getValueType() == B.getValueType() &&(static_cast <bool> (A.getValueType() == B.getValueType
() && "Values must have the same type") ? void (0) : __assert_fail
("A.getValueType() == B.getValueType() && \"Values must have the same type\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4567, __extension__
__PRETTY_FUNCTION__))
4567 "Values must have the same type")(static_cast <bool> (A.getValueType() == B.getValueType
() && "Values must have the same type") ? void (0) : __assert_fail
("A.getValueType() == B.getValueType() && \"Values must have the same type\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4567, __extension__
__PRETTY_FUNCTION__))
;
4568 // Match masked merge pattern (X & ~M) op (Y & M)
4569 if (A->getOpcode() == ISD::AND && B->getOpcode() == ISD::AND) {
4570 auto MatchNoCommonBitsPattern = [&](SDValue NotM, SDValue And) {
4571 if (isBitwiseNot(NotM, true)) {
4572 SDValue NotOperand = NotM->getOperand(0);
4573 return NotOperand == And->getOperand(0) ||
4574 NotOperand == And->getOperand(1);
4575 }
4576 return false;
4577 };
4578 if (MatchNoCommonBitsPattern(A->getOperand(0), B) ||
4579 MatchNoCommonBitsPattern(A->getOperand(1), B) ||
4580 MatchNoCommonBitsPattern(B->getOperand(0), A) ||
4581 MatchNoCommonBitsPattern(B->getOperand(1), A))
4582 return true;
4583 }
4584 return KnownBits::haveNoCommonBitsSet(computeKnownBits(A),
4585 computeKnownBits(B));
4586}
4587
4588static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step,
4589 SelectionDAG &DAG) {
4590 if (cast<ConstantSDNode>(Step)->isZero())
4591 return DAG.getConstant(0, DL, VT);
4592
4593 return SDValue();
4594}
4595
4596static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
4597 ArrayRef<SDValue> Ops,
4598 SelectionDAG &DAG) {
4599 int NumOps = Ops.size();
4600 assert(NumOps != 0 && "Can't build an empty vector!")(static_cast <bool> (NumOps != 0 && "Can't build an empty vector!"
) ? void (0) : __assert_fail ("NumOps != 0 && \"Can't build an empty vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4600, __extension__
__PRETTY_FUNCTION__))
;
4601 assert(!VT.isScalableVector() &&(static_cast <bool> (!VT.isScalableVector() && "BUILD_VECTOR cannot be used with scalable types"
) ? void (0) : __assert_fail ("!VT.isScalableVector() && \"BUILD_VECTOR cannot be used with scalable types\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4602, __extension__
__PRETTY_FUNCTION__))
4602 "BUILD_VECTOR cannot be used with scalable types")(static_cast <bool> (!VT.isScalableVector() && "BUILD_VECTOR cannot be used with scalable types"
) ? void (0) : __assert_fail ("!VT.isScalableVector() && \"BUILD_VECTOR cannot be used with scalable types\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4602, __extension__
__PRETTY_FUNCTION__))
;
4603 assert(VT.getVectorNumElements() == (unsigned)NumOps &&(static_cast <bool> (VT.getVectorNumElements() == (unsigned
)NumOps && "Incorrect element count in BUILD_VECTOR!"
) ? void (0) : __assert_fail ("VT.getVectorNumElements() == (unsigned)NumOps && \"Incorrect element count in BUILD_VECTOR!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4604, __extension__
__PRETTY_FUNCTION__))
4604 "Incorrect element count in BUILD_VECTOR!")(static_cast <bool> (VT.getVectorNumElements() == (unsigned
)NumOps && "Incorrect element count in BUILD_VECTOR!"
) ? void (0) : __assert_fail ("VT.getVectorNumElements() == (unsigned)NumOps && \"Incorrect element count in BUILD_VECTOR!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4604, __extension__
__PRETTY_FUNCTION__))
;
4605
4606 // BUILD_VECTOR of UNDEFs is UNDEF.
4607 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4608 return DAG.getUNDEF(VT);
4609
4610 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
4611 SDValue IdentitySrc;
4612 bool IsIdentity = true;
4613 for (int i = 0; i != NumOps; ++i) {
4614 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4615 Ops[i].getOperand(0).getValueType() != VT ||
4616 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
4617 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
4618 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) {
4619 IsIdentity = false;
4620 break;
4621 }
4622 IdentitySrc = Ops[i].getOperand(0);
4623 }
4624 if (IsIdentity)
4625 return IdentitySrc;
4626
4627 return SDValue();
4628}
4629
4630/// Try to simplify vector concatenation to an input value, undef, or build
4631/// vector.
4632static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
4633 ArrayRef<SDValue> Ops,
4634 SelectionDAG &DAG) {
4635 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!")(static_cast <bool> (!Ops.empty() && "Can't concatenate an empty list of vectors!"
) ? void (0) : __assert_fail ("!Ops.empty() && \"Can't concatenate an empty list of vectors!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4635, __extension__
__PRETTY_FUNCTION__))
;
4636 assert(llvm::all_of(Ops,(static_cast <bool> (llvm::all_of(Ops, [Ops](SDValue Op
) { return Ops[0].getValueType() == Op.getValueType(); }) &&
"Concatenation of vectors with inconsistent value types!") ?
void (0) : __assert_fail ("llvm::all_of(Ops, [Ops](SDValue Op) { return Ops[0].getValueType() == Op.getValueType(); }) && \"Concatenation of vectors with inconsistent value types!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4640, __extension__
__PRETTY_FUNCTION__))
4637 [Ops](SDValue Op) {(static_cast <bool> (llvm::all_of(Ops, [Ops](SDValue Op
) { return Ops[0].getValueType() == Op.getValueType(); }) &&
"Concatenation of vectors with inconsistent value types!") ?
void (0) : __assert_fail ("llvm::all_of(Ops, [Ops](SDValue Op) { return Ops[0].getValueType() == Op.getValueType(); }) && \"Concatenation of vectors with inconsistent value types!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4640, __extension__
__PRETTY_FUNCTION__))
4638 return Ops[0].getValueType() == Op.getValueType();(static_cast <bool> (llvm::all_of(Ops, [Ops](SDValue Op
) { return Ops[0].getValueType() == Op.getValueType(); }) &&
"Concatenation of vectors with inconsistent value types!") ?
void (0) : __assert_fail ("llvm::all_of(Ops, [Ops](SDValue Op) { return Ops[0].getValueType() == Op.getValueType(); }) && \"Concatenation of vectors with inconsistent value types!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4640, __extension__
__PRETTY_FUNCTION__))
4639 }) &&(static_cast <bool> (llvm::all_of(Ops, [Ops](SDValue Op
) { return Ops[0].getValueType() == Op.getValueType(); }) &&
"Concatenation of vectors with inconsistent value types!") ?
void (0) : __assert_fail ("llvm::all_of(Ops, [Ops](SDValue Op) { return Ops[0].getValueType() == Op.getValueType(); }) && \"Concatenation of vectors with inconsistent value types!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4640, __extension__
__PRETTY_FUNCTION__))
4640 "Concatenation of vectors with inconsistent value types!")(static_cast <bool> (llvm::all_of(Ops, [Ops](SDValue Op
) { return Ops[0].getValueType() == Op.getValueType(); }) &&
"Concatenation of vectors with inconsistent value types!") ?
void (0) : __assert_fail ("llvm::all_of(Ops, [Ops](SDValue Op) { return Ops[0].getValueType() == Op.getValueType(); }) && \"Concatenation of vectors with inconsistent value types!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4640, __extension__
__PRETTY_FUNCTION__))
;
4641 assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) ==(static_cast <bool> ((Ops[0].getValueType().getVectorElementCount
() * Ops.size()) == VT.getVectorElementCount() && "Incorrect element count in vector concatenation!"
) ? void (0) : __assert_fail ("(Ops[0].getValueType().getVectorElementCount() * Ops.size()) == VT.getVectorElementCount() && \"Incorrect element count in vector concatenation!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4643, __extension__
__PRETTY_FUNCTION__))
4642 VT.getVectorElementCount() &&(static_cast <bool> ((Ops[0].getValueType().getVectorElementCount
() * Ops.size()) == VT.getVectorElementCount() && "Incorrect element count in vector concatenation!"
) ? void (0) : __assert_fail ("(Ops[0].getValueType().getVectorElementCount() * Ops.size()) == VT.getVectorElementCount() && \"Incorrect element count in vector concatenation!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4643, __extension__
__PRETTY_FUNCTION__))
4643 "Incorrect element count in vector concatenation!")(static_cast <bool> ((Ops[0].getValueType().getVectorElementCount
() * Ops.size()) == VT.getVectorElementCount() && "Incorrect element count in vector concatenation!"
) ? void (0) : __assert_fail ("(Ops[0].getValueType().getVectorElementCount() * Ops.size()) == VT.getVectorElementCount() && \"Incorrect element count in vector concatenation!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4643, __extension__
__PRETTY_FUNCTION__))
;
4644
4645 if (Ops.size() == 1)
4646 return Ops[0];
4647
4648 // Concat of UNDEFs is UNDEF.
4649 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4650 return DAG.getUNDEF(VT);
4651
4652 // Scan the operands and look for extract operations from a single source
4653 // that correspond to insertion at the same location via this concatenation:
4654 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
4655 SDValue IdentitySrc;
4656 bool IsIdentity = true;
4657 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
4658 SDValue Op = Ops[i];
4659 unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements();
4660 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
4661 Op.getOperand(0).getValueType() != VT ||
4662 (IdentitySrc && Op.getOperand(0) != IdentitySrc) ||
4663 Op.getConstantOperandVal(1) != IdentityIndex) {
4664 IsIdentity = false;
4665 break;
4666 }
4667 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&(static_cast <bool> ((!IdentitySrc || IdentitySrc == Op
.getOperand(0)) && "Unexpected identity source vector for concat of extracts"
) ? void (0) : __assert_fail ("(!IdentitySrc || IdentitySrc == Op.getOperand(0)) && \"Unexpected identity source vector for concat of extracts\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4668, __extension__
__PRETTY_FUNCTION__))
4668 "Unexpected identity source vector for concat of extracts")(static_cast <bool> ((!IdentitySrc || IdentitySrc == Op
.getOperand(0)) && "Unexpected identity source vector for concat of extracts"
) ? void (0) : __assert_fail ("(!IdentitySrc || IdentitySrc == Op.getOperand(0)) && \"Unexpected identity source vector for concat of extracts\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4668, __extension__
__PRETTY_FUNCTION__))
;
4669 IdentitySrc = Op.getOperand(0);
4670 }
4671 if (IsIdentity) {
4672 assert(IdentitySrc && "Failed to set source vector of extracts")(static_cast <bool> (IdentitySrc && "Failed to set source vector of extracts"
) ? void (0) : __assert_fail ("IdentitySrc && \"Failed to set source vector of extracts\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4672, __extension__
__PRETTY_FUNCTION__))
;
4673 return IdentitySrc;
4674 }
4675
4676 // The code below this point is only designed to work for fixed width
4677 // vectors, so we bail out for now.
4678 if (VT.isScalableVector())
4679 return SDValue();
4680
4681 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
4682 // simplified to one big BUILD_VECTOR.
4683 // FIXME: Add support for SCALAR_TO_VECTOR as well.
4684 EVT SVT = VT.getScalarType();
4685 SmallVector<SDValue, 16> Elts;
4686 for (SDValue Op : Ops) {
4687 EVT OpVT = Op.getValueType();
4688 if (Op.isUndef())
4689 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
4690 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
4691 Elts.append(Op->op_begin(), Op->op_end());
4692 else
4693 return SDValue();
4694 }
4695
4696 // BUILD_VECTOR requires all inputs to be of the same type, find the
4697 // maximum type and extend them all.
4698 for (SDValue Op : Elts)
4699 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
4700
4701 if (SVT.bitsGT(VT.getScalarType())) {
4702 for (SDValue &Op : Elts) {
4703 if (Op.isUndef())
4704 Op = DAG.getUNDEF(SVT);
4705 else
4706 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
4707 ? DAG.getZExtOrTrunc(Op, DL, SVT)
4708 : DAG.getSExtOrTrunc(Op, DL, SVT);
4709 }
4710 }
4711
4712 SDValue V = DAG.getBuildVector(VT, DL, Elts);
4713 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
4714 return V;
4715}
4716
4717/// Gets or creates the specified node.
4718SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
4719 FoldingSetNodeID ID;
4720 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
4721 void *IP = nullptr;
4722 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4723 return SDValue(E, 0);
4724
4725 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4726 getVTList(VT));
4727 CSEMap.InsertNode(N, IP);
4728
4729 InsertNode(N);
4730 SDValue V = SDValue(N, 0);
4731 NewSDValueDbgMsg(V, "Creating new node: ", this);
4732 return V;
4733}
4734
4735SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4736 SDValue Operand) {
4737 SDNodeFlags Flags;
4738 if (Inserter)
4739 Flags = Inserter->getFlags();
4740 return getNode(Opcode, DL, VT, Operand, Flags);
4741}
4742
4743SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4744 SDValue Operand, const SDNodeFlags Flags) {
4745 assert(Operand.getOpcode() != ISD::DELETED_NODE &&(static_cast <bool> (Operand.getOpcode() != ISD::DELETED_NODE
&& "Operand is DELETED_NODE!") ? void (0) : __assert_fail
("Operand.getOpcode() != ISD::DELETED_NODE && \"Operand is DELETED_NODE!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4746, __extension__
__PRETTY_FUNCTION__))
4746 "Operand is DELETED_NODE!")(static_cast <bool> (Operand.getOpcode() != ISD::DELETED_NODE
&& "Operand is DELETED_NODE!") ? void (0) : __assert_fail
("Operand.getOpcode() != ISD::DELETED_NODE && \"Operand is DELETED_NODE!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4746, __extension__
__PRETTY_FUNCTION__))
;
4747 // Constant fold unary operations with an integer constant operand. Even
4748 // opaque constant will be folded, because the folding of unary operations
4749 // doesn't create new constants with different values. Nevertheless, the
4750 // opaque flag is preserved during folding to prevent future folding with
4751 // other constants.
4752 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
4753 const APInt &Val = C->getAPIntValue();
4754 switch (Opcode) {
4755 default: break;
4756 case ISD::SIGN_EXTEND:
4757 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4758 C->isTargetOpcode(), C->isOpaque());
4759 case ISD::TRUNCATE:
4760 if (C->isOpaque())
4761 break;
4762 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4763 case ISD::ZERO_EXTEND:
4764 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4765 C->isTargetOpcode(), C->isOpaque());
4766 case ISD::ANY_EXTEND:
4767 // Some targets like RISCV prefer to sign extend some types.
4768 if (TLI->isSExtCheaperThanZExt(Operand.getValueType(), VT))
4769 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4770 C->isTargetOpcode(), C->isOpaque());
4771 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4772 C->isTargetOpcode(), C->isOpaque());
4773 case ISD::UINT_TO_FP:
4774 case ISD::SINT_TO_FP: {
4775 APFloat apf(EVTToAPFloatSemantics(VT),
4776 APInt::getZero(VT.getSizeInBits()));
4777 (void)apf.convertFromAPInt(Val,
4778 Opcode==ISD::SINT_TO_FP,
4779 APFloat::rmNearestTiesToEven);
4780 return getConstantFP(apf, DL, VT);
4781 }
4782 case ISD::BITCAST:
4783 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
4784 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
4785 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
4786 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
4787 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
4788 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
4789 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
4790 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
4791 break;
4792 case ISD::ABS:
4793 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
4794 C->isOpaque());
4795 case ISD::BITREVERSE:
4796 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
4797 C->isOpaque());
4798 case ISD::BSWAP:
4799 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
4800 C->isOpaque());
4801 case ISD::CTPOP:
4802 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
4803 C->isOpaque());
4804 case ISD::CTLZ:
4805 case ISD::CTLZ_ZERO_UNDEF:
4806 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
4807 C->isOpaque());
4808 case ISD::CTTZ:
4809 case ISD::CTTZ_ZERO_UNDEF:
4810 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
4811 C->isOpaque());
4812 case ISD::FP16_TO_FP: {
4813 bool Ignored;
4814 APFloat FPV(APFloat::IEEEhalf(),
4815 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
4816
4817 // This can return overflow, underflow, or inexact; we don't care.
4818 // FIXME need to be more flexible about rounding mode.
4819 (void)FPV.convert(EVTToAPFloatSemantics(VT),
4820 APFloat::rmNearestTiesToEven, &Ignored);
4821 return getConstantFP(FPV, DL, VT);
4822 }
4823 case ISD::STEP_VECTOR: {
4824 if (SDValue V = FoldSTEP_VECTOR(DL, VT, Operand, *this))
4825 return V;
4826 break;
4827 }
4828 }
4829 }
4830
4831 // Constant fold unary operations with a floating point constant operand.
4832 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
4833 APFloat V = C->getValueAPF(); // make copy
4834 switch (Opcode) {
4835 case ISD::FNEG:
4836 V.changeSign();
4837 return getConstantFP(V, DL, VT);
4838 case ISD::FABS:
4839 V.clearSign();
4840 return getConstantFP(V, DL, VT);
4841 case ISD::FCEIL: {
4842 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
4843 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4844 return getConstantFP(V, DL, VT);
4845 break;
4846 }
4847 case ISD::FTRUNC: {
4848 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
4849 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4850 return getConstantFP(V, DL, VT);
4851 break;
4852 }
4853 case ISD::FFLOOR: {
4854 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
4855 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4856 return getConstantFP(V, DL, VT);
4857 break;
4858 }
4859 case ISD::FP_EXTEND: {
4860 bool ignored;
4861 // This can return overflow, underflow, or inexact; we don't care.
4862 // FIXME need to be more flexible about rounding mode.
4863 (void)V.convert(EVTToAPFloatSemantics(VT),
4864 APFloat::rmNearestTiesToEven, &ignored);
4865 return getConstantFP(V, DL, VT);
4866 }
4867 case ISD::FP_TO_SINT:
4868 case ISD::FP_TO_UINT: {
4869 bool ignored;
4870 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
4871 // FIXME need to be more flexible about rounding mode.
4872 APFloat::opStatus s =
4873 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
4874 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
4875 break;
4876 return getConstant(IntVal, DL, VT);
4877 }
4878 case ISD::BITCAST:
4879 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
4880 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4881 if (VT == MVT::i16 && C->getValueType(0) == MVT::bf16)
4882 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4883 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
4884 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4885 if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
4886 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4887 break;
4888 case ISD::FP_TO_FP16: {
4889 bool Ignored;
4890 // This can return overflow, underflow, or inexact; we don't care.
4891 // FIXME need to be more flexible about rounding mode.
4892 (void)V.convert(APFloat::IEEEhalf(),
4893 APFloat::rmNearestTiesToEven, &Ignored);
4894 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4895 }
4896 }
4897 }
4898
4899 // Constant fold unary operations with a vector integer or float operand.
4900 switch (Opcode) {
4901 default:
4902 // FIXME: Entirely reasonable to perform folding of other unary
4903 // operations here as the need arises.
4904 break;
4905 case ISD::FNEG:
4906 case ISD::FABS:
4907 case ISD::FCEIL:
4908 case ISD::FTRUNC:
4909 case ISD::FFLOOR:
4910 case ISD::FP_EXTEND:
4911 case ISD::FP_TO_SINT:
4912 case ISD::FP_TO_UINT:
4913 case ISD::TRUNCATE:
4914 case ISD::ANY_EXTEND:
4915 case ISD::ZERO_EXTEND:
4916 case ISD::SIGN_EXTEND:
4917 case ISD::UINT_TO_FP:
4918 case ISD::SINT_TO_FP:
4919 case ISD::ABS:
4920 case ISD::BITREVERSE:
4921 case ISD::BSWAP:
4922 case ISD::CTLZ:
4923 case ISD::CTLZ_ZERO_UNDEF:
4924 case ISD::CTTZ:
4925 case ISD::CTTZ_ZERO_UNDEF:
4926 case ISD::CTPOP: {
4927 SDValue Ops = {Operand};
4928 if (SDValue Fold = FoldConstantArithmetic(Opcode, DL, VT, Ops))
4929 return Fold;
4930 }
4931 }
4932
4933 unsigned OpOpcode = Operand.getNode()->getOpcode();
4934 switch (Opcode) {
4935 case ISD::STEP_VECTOR:
4936 assert(VT.isScalableVector() &&(static_cast <bool> (VT.isScalableVector() && "STEP_VECTOR can only be used with scalable types"
) ? void (0) : __assert_fail ("VT.isScalableVector() && \"STEP_VECTOR can only be used with scalable types\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4937, __extension__
__PRETTY_FUNCTION__))
4937 "STEP_VECTOR can only be used with scalable types")(static_cast <bool> (VT.isScalableVector() && "STEP_VECTOR can only be used with scalable types"
) ? void (0) : __assert_fail ("VT.isScalableVector() && \"STEP_VECTOR can only be used with scalable types\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4937, __extension__
__PRETTY_FUNCTION__))
;
4938 assert(OpOpcode == ISD::TargetConstant &&(static_cast <bool> (OpOpcode == ISD::TargetConstant &&
VT.getVectorElementType() == Operand.getValueType() &&
"Unexpected step operand") ? void (0) : __assert_fail ("OpOpcode == ISD::TargetConstant && VT.getVectorElementType() == Operand.getValueType() && \"Unexpected step operand\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4940, __extension__
__PRETTY_FUNCTION__))
4939 VT.getVectorElementType() == Operand.getValueType() &&(static_cast <bool> (OpOpcode == ISD::TargetConstant &&
VT.getVectorElementType() == Operand.getValueType() &&
"Unexpected step operand") ? void (0) : __assert_fail ("OpOpcode == ISD::TargetConstant && VT.getVectorElementType() == Operand.getValueType() && \"Unexpected step operand\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4940, __extension__
__PRETTY_FUNCTION__))
4940 "Unexpected step operand")(static_cast <bool> (OpOpcode == ISD::TargetConstant &&
VT.getVectorElementType() == Operand.getValueType() &&
"Unexpected step operand") ? void (0) : __assert_fail ("OpOpcode == ISD::TargetConstant && VT.getVectorElementType() == Operand.getValueType() && \"Unexpected step operand\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4940, __extension__
__PRETTY_FUNCTION__))
;
4941 break;
4942 case ISD::FREEZE:
4943 assert(VT == Operand.getValueType() && "Unexpected VT!")(static_cast <bool> (VT == Operand.getValueType() &&
"Unexpected VT!") ? void (0) : __assert_fail ("VT == Operand.getValueType() && \"Unexpected VT!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4943, __extension__
__PRETTY_FUNCTION__))
;
4944 break;
4945 case ISD::TokenFactor:
4946 case ISD::MERGE_VALUES:
4947 case ISD::CONCAT_VECTORS:
4948 return Operand; // Factor, merge or concat of one node? No need.
4949 case ISD::BUILD_VECTOR: {
4950 // Attempt to simplify BUILD_VECTOR.
4951 SDValue Ops[] = {Operand};
4952 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
4953 return V;
4954 break;
4955 }
4956 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node")::llvm::llvm_unreachable_internal("Invalid method to make FP_ROUND node"
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4956)
;
4957 case ISD::FP_EXTEND:
4958 assert(VT.isFloatingPoint() &&(static_cast <bool> (VT.isFloatingPoint() && Operand
.getValueType().isFloatingPoint() && "Invalid FP cast!"
) ? void (0) : __assert_fail ("VT.isFloatingPoint() && Operand.getValueType().isFloatingPoint() && \"Invalid FP cast!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4959, __extension__
__PRETTY_FUNCTION__))
4959 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!")(static_cast <bool> (VT.isFloatingPoint() && Operand
.getValueType().isFloatingPoint() && "Invalid FP cast!"
) ? void (0) : __assert_fail ("VT.isFloatingPoint() && Operand.getValueType().isFloatingPoint() && \"Invalid FP cast!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4959, __extension__
__PRETTY_FUNCTION__))
;
4960 if (Operand.getValueType() == VT) return Operand; // noop conversion.
4961 assert((!VT.isVector() ||(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4964, __extension__
__PRETTY_FUNCTION__))
4962 VT.getVectorElementCount() ==(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4964, __extension__
__PRETTY_FUNCTION__))
4963 Operand.getValueType().getVectorElementCount()) &&(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4964, __extension__
__PRETTY_FUNCTION__))
4964 "Vector element count mismatch!")(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4964, __extension__
__PRETTY_FUNCTION__))
;
4965 assert(Operand.getValueType().bitsLT(VT) &&(static_cast <bool> (Operand.getValueType().bitsLT(VT) &&
"Invalid fpext node, dst < src!") ? void (0) : __assert_fail
("Operand.getValueType().bitsLT(VT) && \"Invalid fpext node, dst < src!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4966, __extension__
__PRETTY_FUNCTION__))
4966 "Invalid fpext node, dst < src!")(static_cast <bool> (Operand.getValueType().bitsLT(VT) &&
"Invalid fpext node, dst < src!") ? void (0) : __assert_fail
("Operand.getValueType().bitsLT(VT) && \"Invalid fpext node, dst < src!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4966, __extension__
__PRETTY_FUNCTION__))
;
4967 if (Operand.isUndef())
4968 return getUNDEF(VT);
4969 break;
4970 case ISD::FP_TO_SINT:
4971 case ISD::FP_TO_UINT:
4972 if (Operand.isUndef())
4973 return getUNDEF(VT);
4974 break;
4975 case ISD::SINT_TO_FP:
4976 case ISD::UINT_TO_FP:
4977 // [us]itofp(undef) = 0, because the result value is bounded.
4978 if (Operand.isUndef())
4979 return getConstantFP(0.0, DL, VT);
4980 break;
4981 case ISD::SIGN_EXTEND:
4982 assert(VT.isInteger() && Operand.getValueType().isInteger() &&(static_cast <bool> (VT.isInteger() && Operand.
getValueType().isInteger() && "Invalid SIGN_EXTEND!")
? void (0) : __assert_fail ("VT.isInteger() && Operand.getValueType().isInteger() && \"Invalid SIGN_EXTEND!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4983, __extension__
__PRETTY_FUNCTION__))
4983 "Invalid SIGN_EXTEND!")(static_cast <bool> (VT.isInteger() && Operand.
getValueType().isInteger() && "Invalid SIGN_EXTEND!")
? void (0) : __assert_fail ("VT.isInteger() && Operand.getValueType().isInteger() && \"Invalid SIGN_EXTEND!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4983, __extension__
__PRETTY_FUNCTION__))
;
4984 assert(VT.isVector() == Operand.getValueType().isVector() &&(static_cast <bool> (VT.isVector() == Operand.getValueType
().isVector() && "SIGN_EXTEND result type type should be vector iff the operand "
"type is vector!") ? void (0) : __assert_fail ("VT.isVector() == Operand.getValueType().isVector() && \"SIGN_EXTEND result type type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4986, __extension__
__PRETTY_FUNCTION__))
4985 "SIGN_EXTEND result type type should be vector iff the operand "(static_cast <bool> (VT.isVector() == Operand.getValueType
().isVector() && "SIGN_EXTEND result type type should be vector iff the operand "
"type is vector!") ? void (0) : __assert_fail ("VT.isVector() == Operand.getValueType().isVector() && \"SIGN_EXTEND result type type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4986, __extension__
__PRETTY_FUNCTION__))
4986 "type is vector!")(static_cast <bool> (VT.isVector() == Operand.getValueType
().isVector() && "SIGN_EXTEND result type type should be vector iff the operand "
"type is vector!") ? void (0) : __assert_fail ("VT.isVector() == Operand.getValueType().isVector() && \"SIGN_EXTEND result type type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4986, __extension__
__PRETTY_FUNCTION__))
;
4987 if (Operand.getValueType() == VT) return Operand; // noop extension
4988 assert((!VT.isVector() ||(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4991, __extension__
__PRETTY_FUNCTION__))
4989 VT.getVectorElementCount() ==(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4991, __extension__
__PRETTY_FUNCTION__))
4990 Operand.getValueType().getVectorElementCount()) &&(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4991, __extension__
__PRETTY_FUNCTION__))
4991 "Vector element count mismatch!")(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4991, __extension__
__PRETTY_FUNCTION__))
;
4992 assert(Operand.getValueType().bitsLT(VT) &&(static_cast <bool> (Operand.getValueType().bitsLT(VT) &&
"Invalid sext node, dst < src!") ? void (0) : __assert_fail
("Operand.getValueType().bitsLT(VT) && \"Invalid sext node, dst < src!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4993, __extension__
__PRETTY_FUNCTION__))
4993 "Invalid sext node, dst < src!")(static_cast <bool> (Operand.getValueType().bitsLT(VT) &&
"Invalid sext node, dst < src!") ? void (0) : __assert_fail
("Operand.getValueType().bitsLT(VT) && \"Invalid sext node, dst < src!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 4993, __extension__
__PRETTY_FUNCTION__))
;
4994 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
4995 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4996 if (OpOpcode == ISD::UNDEF)
4997 // sext(undef) = 0, because the top bits will all be the same.
4998 return getConstant(0, DL, VT);
4999 break;
5000 case ISD::ZERO_EXTEND:
5001 assert(VT.isInteger() && Operand.getValueType().isInteger() &&(static_cast <bool> (VT.isInteger() && Operand.
getValueType().isInteger() && "Invalid ZERO_EXTEND!")
? void (0) : __assert_fail ("VT.isInteger() && Operand.getValueType().isInteger() && \"Invalid ZERO_EXTEND!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5002, __extension__
__PRETTY_FUNCTION__))
5002 "Invalid ZERO_EXTEND!")(static_cast <bool> (VT.isInteger() && Operand.
getValueType().isInteger() && "Invalid ZERO_EXTEND!")
? void (0) : __assert_fail ("VT.isInteger() && Operand.getValueType().isInteger() && \"Invalid ZERO_EXTEND!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5002, __extension__
__PRETTY_FUNCTION__))
;
5003 assert(VT.isVector() == Operand.getValueType().isVector() &&(static_cast <bool> (VT.isVector() == Operand.getValueType
().isVector() && "ZERO_EXTEND result type type should be vector iff the operand "
"type is vector!") ? void (0) : __assert_fail ("VT.isVector() == Operand.getValueType().isVector() && \"ZERO_EXTEND result type type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5005, __extension__
__PRETTY_FUNCTION__))
5004 "ZERO_EXTEND result type type should be vector iff the operand "(static_cast <bool> (VT.isVector() == Operand.getValueType
().isVector() && "ZERO_EXTEND result type type should be vector iff the operand "
"type is vector!") ? void (0) : __assert_fail ("VT.isVector() == Operand.getValueType().isVector() && \"ZERO_EXTEND result type type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5005, __extension__
__PRETTY_FUNCTION__))
5005 "type is vector!")(static_cast <bool> (VT.isVector() == Operand.getValueType
().isVector() && "ZERO_EXTEND result type type should be vector iff the operand "
"type is vector!") ? void (0) : __assert_fail ("VT.isVector() == Operand.getValueType().isVector() && \"ZERO_EXTEND result type type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5005, __extension__
__PRETTY_FUNCTION__))
;
5006 if (Operand.getValueType() == VT) return Operand; // noop extension
5007 assert((!VT.isVector() ||(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5010, __extension__
__PRETTY_FUNCTION__))
5008 VT.getVectorElementCount() ==(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5010, __extension__
__PRETTY_FUNCTION__))
5009 Operand.getValueType().getVectorElementCount()) &&(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5010, __extension__
__PRETTY_FUNCTION__))
5010 "Vector element count mismatch!")(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5010, __extension__
__PRETTY_FUNCTION__))
;
5011 assert(Operand.getValueType().bitsLT(VT) &&(static_cast <bool> (Operand.getValueType().bitsLT(VT) &&
"Invalid zext node, dst < src!") ? void (0) : __assert_fail
("Operand.getValueType().bitsLT(VT) && \"Invalid zext node, dst < src!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5012, __extension__
__PRETTY_FUNCTION__))
5012 "Invalid zext node, dst < src!")(static_cast <bool> (Operand.getValueType().bitsLT(VT) &&
"Invalid zext node, dst < src!") ? void (0) : __assert_fail
("Operand.getValueType().bitsLT(VT) && \"Invalid zext node, dst < src!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5012, __extension__
__PRETTY_FUNCTION__))
;
5013 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
5014 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0));
5015 if (OpOpcode == ISD::UNDEF)
5016 // zext(undef) = 0, because the top bits will be zero.
5017 return getConstant(0, DL, VT);
5018 break;
5019 case ISD::ANY_EXTEND:
5020 assert(VT.isInteger() && Operand.getValueType().isInteger() &&(static_cast <bool> (VT.isInteger() && Operand.
getValueType().isInteger() && "Invalid ANY_EXTEND!") ?
void (0) : __assert_fail ("VT.isInteger() && Operand.getValueType().isInteger() && \"Invalid ANY_EXTEND!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5021, __extension__
__PRETTY_FUNCTION__))
5021 "Invalid ANY_EXTEND!")(static_cast <bool> (VT.isInteger() && Operand.
getValueType().isInteger() && "Invalid ANY_EXTEND!") ?
void (0) : __assert_fail ("VT.isInteger() && Operand.getValueType().isInteger() && \"Invalid ANY_EXTEND!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5021, __extension__
__PRETTY_FUNCTION__))
;
5022 assert(VT.isVector() == Operand.getValueType().isVector() &&(static_cast <bool> (VT.isVector() == Operand.getValueType
().isVector() && "ANY_EXTEND result type type should be vector iff the operand "
"type is vector!") ? void (0) : __assert_fail ("VT.isVector() == Operand.getValueType().isVector() && \"ANY_EXTEND result type type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5024, __extension__
__PRETTY_FUNCTION__))
5023 "ANY_EXTEND result type type should be vector iff the operand "(static_cast <bool> (VT.isVector() == Operand.getValueType
().isVector() && "ANY_EXTEND result type type should be vector iff the operand "
"type is vector!") ? void (0) : __assert_fail ("VT.isVector() == Operand.getValueType().isVector() && \"ANY_EXTEND result type type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5024, __extension__
__PRETTY_FUNCTION__))
5024 "type is vector!")(static_cast <bool> (VT.isVector() == Operand.getValueType
().isVector() && "ANY_EXTEND result type type should be vector iff the operand "
"type is vector!") ? void (0) : __assert_fail ("VT.isVector() == Operand.getValueType().isVector() && \"ANY_EXTEND result type type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5024, __extension__
__PRETTY_FUNCTION__))
;
5025 if (Operand.getValueType() == VT) return Operand; // noop extension
5026 assert((!VT.isVector() ||(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5029, __extension__
__PRETTY_FUNCTION__))
5027 VT.getVectorElementCount() ==(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5029, __extension__
__PRETTY_FUNCTION__))
5028 Operand.getValueType().getVectorElementCount()) &&(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5029, __extension__
__PRETTY_FUNCTION__))
5029 "Vector element count mismatch!")(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5029, __extension__
__PRETTY_FUNCTION__))
;
5030 assert(Operand.getValueType().bitsLT(VT) &&(static_cast <bool> (Operand.getValueType().bitsLT(VT) &&
"Invalid anyext node, dst < src!") ? void (0) : __assert_fail
("Operand.getValueType().bitsLT(VT) && \"Invalid anyext node, dst < src!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5031, __extension__
__PRETTY_FUNCTION__))
5031 "Invalid anyext node, dst < src!")(static_cast <bool> (Operand.getValueType().bitsLT(VT) &&
"Invalid anyext node, dst < src!") ? void (0) : __assert_fail
("Operand.getValueType().bitsLT(VT) && \"Invalid anyext node, dst < src!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5031, __extension__
__PRETTY_FUNCTION__))
;
5032
5033 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
5034 OpOpcode == ISD::ANY_EXTEND)
5035 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
5036 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
5037 if (OpOpcode == ISD::UNDEF)
5038 return getUNDEF(VT);
5039
5040 // (ext (trunc x)) -> x
5041 if (OpOpcode == ISD::TRUNCATE) {
5042 SDValue OpOp = Operand.getOperand(0);
5043 if (OpOp.getValueType() == VT) {
5044 transferDbgValues(Operand, OpOp);
5045 return OpOp;
5046 }
5047 }
5048 break;
5049 case ISD::TRUNCATE:
5050 assert(VT.isInteger() && Operand.getValueType().isInteger() &&(static_cast <bool> (VT.isInteger() && Operand.
getValueType().isInteger() && "Invalid TRUNCATE!") ? void
(0) : __assert_fail ("VT.isInteger() && Operand.getValueType().isInteger() && \"Invalid TRUNCATE!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5051, __extension__
__PRETTY_FUNCTION__))
5051 "Invalid TRUNCATE!")(static_cast <bool> (VT.isInteger() && Operand.
getValueType().isInteger() && "Invalid TRUNCATE!") ? void
(0) : __assert_fail ("VT.isInteger() && Operand.getValueType().isInteger() && \"Invalid TRUNCATE!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5051, __extension__
__PRETTY_FUNCTION__))
;
5052 assert(VT.isVector() == Operand.getValueType().isVector() &&(static_cast <bool> (VT.isVector() == Operand.getValueType
().isVector() && "TRUNCATE result type type should be vector iff the operand "
"type is vector!") ? void (0) : __assert_fail ("VT.isVector() == Operand.getValueType().isVector() && \"TRUNCATE result type type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5054, __extension__
__PRETTY_FUNCTION__))
5053 "TRUNCATE result type type should be vector iff the operand "(static_cast <bool> (VT.isVector() == Operand.getValueType
().isVector() && "TRUNCATE result type type should be vector iff the operand "
"type is vector!") ? void (0) : __assert_fail ("VT.isVector() == Operand.getValueType().isVector() && \"TRUNCATE result type type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5054, __extension__
__PRETTY_FUNCTION__))
5054 "type is vector!")(static_cast <bool> (VT.isVector() == Operand.getValueType
().isVector() && "TRUNCATE result type type should be vector iff the operand "
"type is vector!") ? void (0) : __assert_fail ("VT.isVector() == Operand.getValueType().isVector() && \"TRUNCATE result type type should be vector iff the operand \" \"type is vector!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5054, __extension__
__PRETTY_FUNCTION__))
;
5055 if (Operand.getValueType() == VT) return Operand; // noop truncate
5056 assert((!VT.isVector() ||(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5059, __extension__
__PRETTY_FUNCTION__))
5057 VT.getVectorElementCount() ==(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5059, __extension__
__PRETTY_FUNCTION__))
5058 Operand.getValueType().getVectorElementCount()) &&(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5059, __extension__
__PRETTY_FUNCTION__))
5059 "Vector element count mismatch!")(static_cast <bool> ((!VT.isVector() || VT.getVectorElementCount
() == Operand.getValueType().getVectorElementCount()) &&
"Vector element count mismatch!") ? void (0) : __assert_fail
("(!VT.isVector() || VT.getVectorElementCount() == Operand.getValueType().getVectorElementCount()) && \"Vector element count mismatch!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5059, __extension__
__PRETTY_FUNCTION__))
;
5060 assert(Operand.getValueType().bitsGT(VT) &&(static_cast <bool> (Operand.getValueType().bitsGT(VT) &&
"Invalid truncate node, src < dst!") ? void (0) : __assert_fail
("Operand.getValueType().bitsGT(VT) && \"Invalid truncate node, src < dst!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5061, __extension__
__PRETTY_FUNCTION__))
5061 "Invalid truncate node, src < dst!")(static_cast <bool> (Operand.getValueType().bitsGT(VT) &&
"Invalid truncate node, src < dst!") ? void (0) : __assert_fail
("Operand.getValueType().bitsGT(VT) && \"Invalid truncate node, src < dst!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5061, __extension__
__PRETTY_FUNCTION__))
;
5062 if (OpOpcode == ISD::TRUNCATE)
5063 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
5064 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
5065 OpOpcode == ISD::ANY_EXTEND) {
5066 // If the source is smaller than the dest, we still need an extend.
5067 if (Operand.getOperand(0).getValueType().getScalarType()
5068 .bitsLT(VT.getScalarType()))
5069 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
5070 if (Operand.getOperand(0).getValueType().bitsGT(VT))
5071 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
5072 return Operand.getOperand(0);
5073 }
5074 if (OpOpcode == ISD::UNDEF)
5075 return getUNDEF(VT);
5076 if (OpOpcode == ISD::VSCALE && !NewNodesMustHaveLegalTypes)
5077 return getVScale(DL, VT, Operand.getConstantOperandAPInt(0));
5078 break;
5079 case ISD::ANY_EXTEND_VECTOR_INREG:
5080 case ISD::ZERO_EXTEND_VECTOR_INREG:
5081 case ISD::SIGN_EXTEND_VECTOR_INREG:
5082 assert(VT.isVector() && "This DAG node is restricted to vector types.")(static_cast <bool> (VT.isVector() && "This DAG node is restricted to vector types."
) ? void (0) : __assert_fail ("VT.isVector() && \"This DAG node is restricted to vector types.\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5082, __extension__
__PRETTY_FUNCTION__))
;
5083 assert(Operand.getValueType().bitsLE(VT) &&(static_cast <bool> (Operand.getValueType().bitsLE(VT) &&
"The input must be the same size or smaller than the result."
) ? void (0) : __assert_fail ("Operand.getValueType().bitsLE(VT) && \"The input must be the same size or smaller than the result.\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5084, __extension__
__PRETTY_FUNCTION__))
5084 "The input must be the same size or smaller than the result.")(static_cast <bool> (Operand.getValueType().bitsLE(VT) &&
"The input must be the same size or smaller than the result."
) ? void (0) : __assert_fail ("Operand.getValueType().bitsLE(VT) && \"The input must be the same size or smaller than the result.\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5084, __extension__
__PRETTY_FUNCTION__))
;
5085 assert(VT.getVectorMinNumElements() <(static_cast <bool> (VT.getVectorMinNumElements() < Operand
.getValueType().getVectorMinNumElements() && "The destination vector type must have fewer lanes than the input."
) ? void (0) : __assert_fail ("VT.getVectorMinNumElements() < Operand.getValueType().getVectorMinNumElements() && \"The destination vector type must have fewer lanes than the input.\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5087, __extension__
__PRETTY_FUNCTION__))
5086 Operand.getValueType().getVectorMinNumElements() &&(static_cast <bool> (VT.getVectorMinNumElements() < Operand
.getValueType().getVectorMinNumElements() && "The destination vector type must have fewer lanes than the input."
) ? void (0) : __assert_fail ("VT.getVectorMinNumElements() < Operand.getValueType().getVectorMinNumElements() && \"The destination vector type must have fewer lanes than the input.\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5087, __extension__
__PRETTY_FUNCTION__))
5087 "The destination vector type must have fewer lanes than the input.")(static_cast <bool> (VT.getVectorMinNumElements() < Operand
.getValueType().getVectorMinNumElements() && "The destination vector type must have fewer lanes than the input."
) ? void (0) : __assert_fail ("VT.getVectorMinNumElements() < Operand.getValueType().getVectorMinNumElements() && \"The destination vector type must have fewer lanes than the input.\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5087, __extension__
__PRETTY_FUNCTION__))
;
5088 break;
5089 case ISD::ABS:
5090 assert(VT.isInteger() && VT == Operand.getValueType() &&(static_cast <bool> (VT.isInteger() && VT == Operand
.getValueType() && "Invalid ABS!") ? void (0) : __assert_fail
("VT.isInteger() && VT == Operand.getValueType() && \"Invalid ABS!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5091, __extension__
__PRETTY_FUNCTION__))
5091 "Invalid ABS!")(static_cast <bool> (VT.isInteger() && VT == Operand
.getValueType() && "Invalid ABS!") ? void (0) : __assert_fail
("VT.isInteger() && VT == Operand.getValueType() && \"Invalid ABS!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5091, __extension__
__PRETTY_FUNCTION__))
;
5092 if (OpOpcode == ISD::UNDEF)
5093 return getUNDEF(VT);
5094 break;
5095 case ISD::BSWAP:
5096 assert(VT.isInteger() && VT == Operand.getValueType() &&(static_cast <bool> (VT.isInteger() && VT == Operand
.getValueType() && "Invalid BSWAP!") ? void (0) : __assert_fail
("VT.isInteger() && VT == Operand.getValueType() && \"Invalid BSWAP!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5097, __extension__
__PRETTY_FUNCTION__))
5097 "Invalid BSWAP!")(static_cast <bool> (VT.isInteger() && VT == Operand
.getValueType() && "Invalid BSWAP!") ? void (0) : __assert_fail
("VT.isInteger() && VT == Operand.getValueType() && \"Invalid BSWAP!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5097, __extension__
__PRETTY_FUNCTION__))
;
5098 assert((VT.getScalarSizeInBits() % 16 == 0) &&(static_cast <bool> ((VT.getScalarSizeInBits() % 16 == 0
) && "BSWAP types must be a multiple of 16 bits!") ? void
(0) : __assert_fail ("(VT.getScalarSizeInBits() % 16 == 0) && \"BSWAP types must be a multiple of 16 bits!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5099, __extension__
__PRETTY_FUNCTION__))
5099 "BSWAP types must be a multiple of 16 bits!")(static_cast <bool> ((VT.getScalarSizeInBits() % 16 == 0
) && "BSWAP types must be a multiple of 16 bits!") ? void
(0) : __assert_fail ("(VT.getScalarSizeInBits() % 16 == 0) && \"BSWAP types must be a multiple of 16 bits!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5099, __extension__
__PRETTY_FUNCTION__))
;
5100 if (OpOpcode == ISD::UNDEF)
5101 return getUNDEF(VT);
5102 break;
5103 case ISD::BITREVERSE:
5104 assert(VT.isInteger() && VT == Operand.getValueType() &&(static_cast <bool> (VT.isInteger() && VT == Operand
.getValueType() && "Invalid BITREVERSE!") ? void (0) :
__assert_fail ("VT.isInteger() && VT == Operand.getValueType() && \"Invalid BITREVERSE!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5105, __extension__
__PRETTY_FUNCTION__))
5105 "Invalid BITREVERSE!")(static_cast <bool> (VT.isInteger() && VT == Operand
.getValueType() && "Invalid BITREVERSE!") ? void (0) :
__assert_fail ("VT.isInteger() && VT == Operand.getValueType() && \"Invalid BITREVERSE!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5105, __extension__
__PRETTY_FUNCTION__))
;
5106 if (OpOpcode == ISD::UNDEF)
5107 return getUNDEF(VT);
5108 break;
5109 case ISD::BITCAST:
5110 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&(static_cast <bool> (VT.getSizeInBits() == Operand.getValueSizeInBits
() && "Cannot BITCAST between types of different sizes!"
) ? void (0) : __assert_fail ("VT.getSizeInBits() == Operand.getValueSizeInBits() && \"Cannot BITCAST between types of different sizes!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5111, __extension__
__PRETTY_FUNCTION__))
5111 "Cannot BITCAST between types of different sizes!")(static_cast <bool> (VT.getSizeInBits() == Operand.getValueSizeInBits
() && "Cannot BITCAST between types of different sizes!"
) ? void (0) : __assert_fail ("VT.getSizeInBits() == Operand.getValueSizeInBits() && \"Cannot BITCAST between types of different sizes!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5111, __extension__
__PRETTY_FUNCTION__))
;
5112 if (VT == Operand.getValueType()) return Operand; // noop conversion.
5113 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
5114 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
5115 if (OpOpcode == ISD::UNDEF)
5116 return getUNDEF(VT);
5117 break;
5118 case ISD::SCALAR_TO_VECTOR:
5119 assert(VT.isVector() && !Operand.getValueType().isVector() &&(static_cast <bool> (VT.isVector() && !Operand.
getValueType().isVector() && (VT.getVectorElementType
() == Operand.getValueType() || (VT.getVectorElementType().isInteger
() && Operand.getValueType().isInteger() && VT
.getVectorElementType().bitsLE(Operand.getValueType()))) &&
"Illegal SCALAR_TO_VECTOR node!") ? void (0) : __assert_fail
("VT.isVector() && !Operand.getValueType().isVector() && (VT.getVectorElementType() == Operand.getValueType() || (VT.getVectorElementType().isInteger() && Operand.getValueType().isInteger() && VT.getVectorElementType().bitsLE(Operand.getValueType()))) && \"Illegal SCALAR_TO_VECTOR node!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5124, __extension__
__PRETTY_FUNCTION__))
5120 (VT.getVectorElementType() == Operand.getValueType() ||(static_cast <bool> (VT.isVector() && !Operand.
getValueType().isVector() && (VT.getVectorElementType
() == Operand.getValueType() || (VT.getVectorElementType().isInteger
() && Operand.getValueType().isInteger() && VT
.getVectorElementType().bitsLE(Operand.getValueType()))) &&
"Illegal SCALAR_TO_VECTOR node!") ? void (0) : __assert_fail
("VT.isVector() && !Operand.getValueType().isVector() && (VT.getVectorElementType() == Operand.getValueType() || (VT.getVectorElementType().isInteger() && Operand.getValueType().isInteger() && VT.getVectorElementType().bitsLE(Operand.getValueType()))) && \"Illegal SCALAR_TO_VECTOR node!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5124, __extension__
__PRETTY_FUNCTION__))
5121 (VT.getVectorElementType().isInteger() &&(static_cast <bool> (VT.isVector() && !Operand.
getValueType().isVector() && (VT.getVectorElementType
() == Operand.getValueType() || (VT.getVectorElementType().isInteger
() && Operand.getValueType().isInteger() && VT
.getVectorElementType().bitsLE(Operand.getValueType()))) &&
"Illegal SCALAR_TO_VECTOR node!") ? void (0) : __assert_fail
("VT.isVector() && !Operand.getValueType().isVector() && (VT.getVectorElementType() == Operand.getValueType() || (VT.getVectorElementType().isInteger() && Operand.getValueType().isInteger() && VT.getVectorElementType().bitsLE(Operand.getValueType()))) && \"Illegal SCALAR_TO_VECTOR node!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5124, __extension__
__PRETTY_FUNCTION__))
5122 Operand.getValueType().isInteger() &&(static_cast <bool> (VT.isVector() && !Operand.
getValueType().isVector() && (VT.getVectorElementType
() == Operand.getValueType() || (VT.getVectorElementType().isInteger
() && Operand.getValueType().isInteger() && VT
.getVectorElementType().bitsLE(Operand.getValueType()))) &&
"Illegal SCALAR_TO_VECTOR node!") ? void (0) : __assert_fail
("VT.isVector() && !Operand.getValueType().isVector() && (VT.getVectorElementType() == Operand.getValueType() || (VT.getVectorElementType().isInteger() && Operand.getValueType().isInteger() && VT.getVectorElementType().bitsLE(Operand.getValueType()))) && \"Illegal SCALAR_TO_VECTOR node!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5124, __extension__
__PRETTY_FUNCTION__))
5123 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&(static_cast <bool> (VT.isVector() && !Operand.
getValueType().isVector() && (VT.getVectorElementType
() == Operand.getValueType() || (VT.getVectorElementType().isInteger
() && Operand.getValueType().isInteger() && VT
.getVectorElementType().bitsLE(Operand.getValueType()))) &&
"Illegal SCALAR_TO_VECTOR node!") ? void (0) : __assert_fail
("VT.isVector() && !Operand.getValueType().isVector() && (VT.getVectorElementType() == Operand.getValueType() || (VT.getVectorElementType().isInteger() && Operand.getValueType().isInteger() && VT.getVectorElementType().bitsLE(Operand.getValueType()))) && \"Illegal SCALAR_TO_VECTOR node!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5124, __extension__
__PRETTY_FUNCTION__))
5124 "Illegal SCALAR_TO_VECTOR node!")(static_cast <bool> (VT.isVector() && !Operand.
getValueType().isVector() && (VT.getVectorElementType
() == Operand.getValueType() || (VT.getVectorElementType().isInteger
() && Operand.getValueType().isInteger() && VT
.getVectorElementType().bitsLE(Operand.getValueType()))) &&
"Illegal SCALAR_TO_VECTOR node!") ? void (0) : __assert_fail
("VT.isVector() && !Operand.getValueType().isVector() && (VT.getVectorElementType() == Operand.getValueType() || (VT.getVectorElementType().isInteger() && Operand.getValueType().isInteger() && VT.getVectorElementType().bitsLE(Operand.getValueType()))) && \"Illegal SCALAR_TO_VECTOR node!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5124, __extension__
__PRETTY_FUNCTION__))
;
5125 if (OpOpcode == ISD::UNDEF)
5126 return getUNDEF(VT);
5127 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
5128 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
5129 isa<ConstantSDNode>(Operand.getOperand(1)) &&
5130 Operand.getConstantOperandVal(1) == 0 &&
5131 Operand.getOperand(0).getValueType() == VT)
5132 return Operand.getOperand(0);
5133 break;
5134 case ISD::FNEG:
5135 // Negation of an unknown bag of bits is still completely undefined.
5136 if (OpOpcode == ISD::UNDEF)
5137 return getUNDEF(VT);
5138
5139 if (OpOpcode == ISD::FNEG) // --X -> X
5140 return Operand.getOperand(0);
5141 break;
5142 case ISD::FABS:
5143 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
5144 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0));
5145 break;
5146 case ISD::VSCALE:
5147 assert(VT == Operand.getValueType() && "Unexpected VT!")(static_cast <bool> (VT == Operand.getValueType() &&
"Unexpected VT!") ? void (0) : __assert_fail ("VT == Operand.getValueType() && \"Unexpected VT!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5147, __extension__
__PRETTY_FUNCTION__))
;
5148 break;
5149 case ISD::CTPOP:
5150 if (Operand.getValueType().getScalarType() == MVT::i1)
5151 return Operand;
5152 break;
5153 case ISD::CTLZ:
5154 case ISD::CTTZ:
5155 if (Operand.getValueType().getScalarType() == MVT::i1)
5156 return getNOT(DL, Operand, Operand.getValueType());
5157 break;
5158 case ISD::VECREDUCE_SMIN:
5159 case ISD::VECREDUCE_UMAX:
5160 if (Operand.getValueType().getScalarType() == MVT::i1)
5161 return getNode(ISD::VECREDUCE_OR, DL, VT, Operand);
5162 break;
5163 case ISD::VECREDUCE_SMAX:
5164 case ISD::VECREDUCE_UMIN:
5165 if (Operand.getValueType().getScalarType() == MVT::i1)
5166 return getNode(ISD::VECREDUCE_AND, DL, VT, Operand);
5167 break;
5168 }
5169
5170 SDNode *N;
5171 SDVTList VTs = getVTList(VT);
5172 SDValue Ops[] = {Operand};
5173 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
5174 FoldingSetNodeID ID;
5175 AddNodeIDNode(ID, Opcode, VTs, Ops);
5176 void *IP = nullptr;
5177 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5178 E->intersectFlagsWith(Flags);
5179 return SDValue(E, 0);
5180 }
5181
5182 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5183 N->setFlags(Flags);
5184 createOperands(N, Ops);
5185 CSEMap.InsertNode(N, IP);
5186 } else {
5187 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5188 createOperands(N, Ops);
5189 }
5190
5191 InsertNode(N);
5192 SDValue V = SDValue(N, 0);
5193 NewSDValueDbgMsg(V, "Creating new node: ", this);
5194 return V;
5195}
5196
5197static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
5198 const APInt &C2) {
5199 switch (Opcode) {
5200 case ISD::ADD: return C1 + C2;
5201 case ISD::SUB: return C1 - C2;
5202 case ISD::MUL: return C1 * C2;
5203 case ISD::AND: return C1 & C2;
5204 case ISD::OR: return C1 | C2;
5205 case ISD::XOR: return C1 ^ C2;
5206 case ISD::SHL: return C1 << C2;
5207 case ISD::SRL: return C1.lshr(C2);
5208 case ISD::SRA: return C1.ashr(C2);
5209 case ISD::ROTL: return C1.rotl(C2);
5210 case ISD::ROTR: return C1.rotr(C2);
5211 case ISD::SMIN: return C1.sle(C2) ? C1 : C2;
5212 case ISD::SMAX: return C1.sge(C2) ? C1 : C2;
5213 case ISD::UMIN: return C1.ule(C2) ? C1 : C2;
5214 case ISD::UMAX: return C1.uge(C2) ? C1 : C2;
5215 case ISD::SADDSAT: return C1.sadd_sat(C2);
5216 case ISD::UADDSAT: return C1.uadd_sat(C2);
5217 case ISD::SSUBSAT: return C1.ssub_sat(C2);
5218 case ISD::USUBSAT: return C1.usub_sat(C2);
5219 case ISD::UDIV:
5220 if (!C2.getBoolValue())
5221 break;
5222 return C1.udiv(C2);
5223 case ISD::UREM:
5224 if (!C2.getBoolValue())
5225 break;
5226 return C1.urem(C2);
5227 case ISD::SDIV:
5228 if (!C2.getBoolValue())
5229 break;
5230 return C1.sdiv(C2);
5231 case ISD::SREM:
5232 if (!C2.getBoolValue())
5233 break;
5234 return C1.srem(C2);
5235 case ISD::MULHS: {
5236 unsigned FullWidth = C1.getBitWidth() * 2;
5237 APInt C1Ext = C1.sext(FullWidth);
5238 APInt C2Ext = C2.sext(FullWidth);
5239 return (C1Ext * C2Ext).extractBits(C1.getBitWidth(), C1.getBitWidth());
5240 }
5241 case ISD::MULHU: {
5242 unsigned FullWidth = C1.getBitWidth() * 2;
5243 APInt C1Ext = C1.zext(FullWidth);
5244 APInt C2Ext = C2.zext(FullWidth);
5245 return (C1Ext * C2Ext).extractBits(C1.getBitWidth(), C1.getBitWidth());
5246 }
5247 }
5248 return llvm::None;
5249}
5250
5251SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
5252 const GlobalAddressSDNode *GA,
5253 const SDNode *N2) {
5254 if (GA->getOpcode() != ISD::GlobalAddress)
5255 return SDValue();
5256 if (!TLI->isOffsetFoldingLegal(GA))
5257 return SDValue();
5258 auto *C2 = dyn_cast<ConstantSDNode>(N2);
5259 if (!C2)
5260 return SDValue();
5261 int64_t Offset = C2->getSExtValue();
5262 switch (Opcode) {
5263 case ISD::ADD: break;
5264 case ISD::SUB: Offset = -uint64_t(Offset); break;
5265 default: return SDValue();
5266 }
5267 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT,
5268 GA->getOffset() + uint64_t(Offset));
5269}
5270
5271bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
5272 switch (Opcode) {
5273 case ISD::SDIV:
5274 case ISD::UDIV:
5275 case ISD::SREM:
5276 case ISD::UREM: {
5277 // If a divisor is zero/undef or any element of a divisor vector is
5278 // zero/undef, the whole op is undef.
5279 assert(Ops.size() == 2 && "Div/rem should have 2 operands")(static_cast <bool> (Ops.size() == 2 && "Div/rem should have 2 operands"
) ? void (0) : __assert_fail ("Ops.size() == 2 && \"Div/rem should have 2 operands\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5279, __extension__
__PRETTY_FUNCTION__))
;
5280 SDValue Divisor = Ops[1];
5281 if (Divisor.isUndef() || isNullConstant(Divisor))
5282 return true;
5283
5284 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
5285 llvm::any_of(Divisor->op_values(),
5286 [](SDValue V) { return V.isUndef() ||
5287 isNullConstant(V); });
5288 // TODO: Handle signed overflow.
5289 }
5290 // TODO: Handle oversized shifts.
5291 default:
5292 return false;
5293 }
5294}
5295
5296SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
5297 EVT VT, ArrayRef<SDValue> Ops) {
5298 // If the opcode is a target-specific ISD node, there's nothing we can
5299 // do here and the operand rules may not line up with the below, so
5300 // bail early.
5301 // We can't create a scalar CONCAT_VECTORS so skip it. It will break
5302 // for concats involving SPLAT_VECTOR. Concats of BUILD_VECTORS are handled by
5303 // foldCONCAT_VECTORS in getNode before this is called.
5304 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::CONCAT_VECTORS)
5305 return SDValue();
5306
5307 unsigned NumOps = Ops.size();
5308 if (NumOps == 0)
5309 return SDValue();
5310
5311 if (isUndef(Opcode, Ops))
5312 return getUNDEF(VT);
5313
5314 // Handle binops special cases.
5315 if (NumOps == 2) {
5316 if (SDValue CFP = foldConstantFPMath(Opcode, DL, VT, Ops[0], Ops[1]))
5317 return CFP;
5318
5319 if (auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
5320 if (auto *C2 = dyn_cast<ConstantSDNode>(Ops[1])) {
5321 if (C1->isOpaque() || C2->isOpaque())
5322 return SDValue();
5323
5324 Optional<APInt> FoldAttempt =
5325 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
5326 if (!FoldAttempt)
5327 return SDValue();
5328
5329 SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT);
5330 assert((!Folded || !VT.isVector()) &&(static_cast <bool> ((!Folded || !VT.isVector()) &&
"Can't fold vectors ops with scalar operands") ? void (0) : __assert_fail
("(!Folded || !VT.isVector()) && \"Can't fold vectors ops with scalar operands\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5331, __extension__
__PRETTY_FUNCTION__))
5331 "Can't fold vectors ops with scalar operands")(static_cast <bool> ((!Folded || !VT.isVector()) &&
"Can't fold vectors ops with scalar operands") ? void (0) : __assert_fail
("(!Folded || !VT.isVector()) && \"Can't fold vectors ops with scalar operands\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5331, __extension__
__PRETTY_FUNCTION__))
;
5332 return Folded;
5333 }
5334 }
5335
5336 // fold (add Sym, c) -> Sym+c
5337 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Ops[0]))
5338 return FoldSymbolOffset(Opcode, VT, GA, Ops[1].getNode());
5339 if (TLI->isCommutativeBinOp(Opcode))
5340 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Ops[1]))
5341 return FoldSymbolOffset(Opcode, VT, GA, Ops[0].getNode());
5342 }
5343
5344 // This is for vector folding only from here on.
5345 if (!VT.isVector())
5346 return SDValue();
5347
5348 ElementCount NumElts = VT.getVectorElementCount();
5349
5350 // See if we can fold through bitcasted integer ops.
5351 // TODO: Can we handle undef elements?
5352 if (NumOps == 2 && VT.isFixedLengthVector() && VT.isInteger() &&
5353 Ops[0].getValueType() == VT && Ops[1].getValueType() == VT &&
5354 Ops[0].getOpcode() == ISD::BITCAST &&
5355 Ops[1].getOpcode() == ISD::BITCAST) {
5356 SDValue N1 = peekThroughBitcasts(Ops[0]);
5357 SDValue N2 = peekThroughBitcasts(Ops[1]);
5358 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
5359 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
5360 EVT BVVT = N1.getValueType();
5361 if (BV1 && BV2 && BVVT.isInteger() && BVVT == N2.getValueType()) {
5362 bool IsLE = getDataLayout().isLittleEndian();
5363 unsigned EltBits = VT.getScalarSizeInBits();
5364 SmallVector<APInt> RawBits1, RawBits2;
5365 BitVector UndefElts1, UndefElts2;
5366 if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) &&
5367 BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2) &&
5368 UndefElts1.none() && UndefElts2.none()) {
5369 SmallVector<APInt> RawBits;
5370 for (unsigned I = 0, E = NumElts.getFixedValue(); I != E; ++I) {
5371 Optional<APInt> Fold = FoldValue(Opcode, RawBits1[I], RawBits2[I]);
5372 if (!Fold)
5373 break;
5374 RawBits.push_back(Fold.getValue());
5375 }
5376 if (RawBits.size() == NumElts.getFixedValue()) {
5377 // We have constant folded, but we need to cast this again back to
5378 // the original (possibly legalized) type.
5379 SmallVector<APInt> DstBits;
5380 BitVector DstUndefs;
5381 BuildVectorSDNode::recastRawBits(IsLE, BVVT.getScalarSizeInBits(),
5382 DstBits, RawBits, DstUndefs,
5383 BitVector(RawBits.size(), false));
5384 EVT BVEltVT = BV1->getOperand(0).getValueType();
5385 unsigned BVEltBits = BVEltVT.getSizeInBits();
5386 SmallVector<SDValue> Ops(DstBits.size(), getUNDEF(BVEltVT));
5387 for (unsigned I = 0, E = DstBits.size(); I != E; ++I) {
5388 if (DstUndefs[I])
5389 continue;
5390 Ops[I] = getConstant(DstBits[I].sextOrSelf(BVEltBits), DL, BVEltVT);
5391 }
5392 return getBitcast(VT, getBuildVector(BVVT, DL, Ops));
5393 }
5394 }
5395 }
5396 }
5397
5398 auto IsScalarOrSameVectorSize = [NumElts](const SDValue &Op) {
5399 return !Op.getValueType().isVector() ||
5400 Op.getValueType().getVectorElementCount() == NumElts;
5401 };
5402
5403 auto IsBuildVectorSplatVectorOrUndef = [](const SDValue &Op) {
5404 return Op.isUndef() || Op.getOpcode() == ISD::CONDCODE ||
5405 Op.getOpcode() == ISD::BUILD_VECTOR ||
5406 Op.getOpcode() == ISD::SPLAT_VECTOR;
5407 };
5408
5409 // All operands must be vector types with the same number of elements as
5410 // the result type and must be either UNDEF or a build/splat vector
5411 // or UNDEF scalars.
5412 if (!llvm::all_of(Ops, IsBuildVectorSplatVectorOrUndef) ||
5413 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
5414 return SDValue();
5415
5416 // If we are comparing vectors, then the result needs to be a i1 boolean
5417 // that is then sign-extended back to the legal result type.
5418 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
5419
5420 // Find legal integer scalar type for constant promotion and
5421 // ensure that its scalar size is at least as large as source.
5422 EVT LegalSVT = VT.getScalarType();
5423 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
5424 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5425 if (LegalSVT.bitsLT(VT.getScalarType()))
5426 return SDValue();
5427 }
5428
5429 // For scalable vector types we know we're dealing with SPLAT_VECTORs. We
5430 // only have one operand to check. For fixed-length vector types we may have
5431 // a combination of BUILD_VECTOR and SPLAT_VECTOR.
5432 unsigned NumVectorElts = NumElts.isScalable() ? 1 : NumElts.getFixedValue();
5433
5434 // Constant fold each scalar lane separately.
5435 SmallVector<SDValue, 4> ScalarResults;
5436 for (unsigned I = 0; I != NumVectorElts; I++) {
5437 SmallVector<SDValue, 4> ScalarOps;
5438 for (SDValue Op : Ops) {
5439 EVT InSVT = Op.getValueType().getScalarType();
5440 if (Op.getOpcode() != ISD::BUILD_VECTOR &&
5441 Op.getOpcode() != ISD::SPLAT_VECTOR) {
5442 if (Op.isUndef())
5443 ScalarOps.push_back(getUNDEF(InSVT));
5444 else
5445 ScalarOps.push_back(Op);
5446 continue;
5447 }
5448
5449 SDValue ScalarOp =
5450 Op.getOperand(Op.getOpcode() == ISD::SPLAT_VECTOR ? 0 : I);
5451 EVT ScalarVT = ScalarOp.getValueType();
5452
5453 // Build vector (integer) scalar operands may need implicit
5454 // truncation - do this before constant folding.
5455 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
5456 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
5457
5458 ScalarOps.push_back(ScalarOp);
5459 }
5460
5461 // Constant fold the scalar operands.
5462 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps);
5463
5464 // Legalize the (integer) scalar constant if necessary.
5465 if (LegalSVT != SVT)
5466 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5467
5468 // Scalar folding only succeeded if the result is a constant or UNDEF.
5469 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5470 ScalarResult.getOpcode() != ISD::ConstantFP)
5471 return SDValue();
5472 ScalarResults.push_back(ScalarResult);
5473 }
5474
5475 SDValue V = NumElts.isScalable() ? getSplatVector(VT, DL, ScalarResults[0])
5476 : getBuildVector(VT, DL, ScalarResults);
5477 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
5478 return V;
5479}
5480
5481SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
5482 EVT VT, SDValue N1, SDValue N2) {
5483 // TODO: We don't do any constant folding for strict FP opcodes here, but we
5484 // should. That will require dealing with a potentially non-default
5485 // rounding mode, checking the "opStatus" return value from the APFloat
5486 // math calculations, and possibly other variations.
5487 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1, /*AllowUndefs*/ false);
5488 ConstantFPSDNode *N2CFP = isConstOrConstSplatFP(N2, /*AllowUndefs*/ false);
5489 if (N1CFP && N2CFP) {
5490 APFloat C1 = N1CFP->getValueAPF(); // make copy
5491 const APFloat &C2 = N2CFP->getValueAPF();
5492 switch (Opcode) {
5493 case ISD::FADD:
5494 C1.add(C2, APFloat::rmNearestTiesToEven);
5495 return getConstantFP(C1, DL, VT);
5496 case ISD::FSUB:
5497 C1.subtract(C2, APFloat::rmNearestTiesToEven);
5498 return getConstantFP(C1, DL, VT);
5499 case ISD::FMUL:
5500 C1.multiply(C2, APFloat::rmNearestTiesToEven);
5501 return getConstantFP(C1, DL, VT);
5502 case ISD::FDIV:
5503 C1.divide(C2, APFloat::rmNearestTiesToEven);
5504 return getConstantFP(C1, DL, VT);
5505 case ISD::FREM:
5506 C1.mod(C2);
5507 return getConstantFP(C1, DL, VT);
5508 case ISD::FCOPYSIGN:
5509 C1.copySign(C2);
5510 return getConstantFP(C1, DL, VT);
5511 case ISD::FMINNUM:
5512 return getConstantFP(minnum(C1, C2), DL, VT);
5513 case ISD::FMAXNUM:
5514 return getConstantFP(maxnum(C1, C2), DL, VT);
5515 case ISD::FMINIMUM:
5516 return getConstantFP(minimum(C1, C2), DL, VT);
5517 case ISD::FMAXIMUM:
5518 return getConstantFP(maximum(C1, C2), DL, VT);
5519 default: break;
5520 }
5521 }
5522 if (N1CFP && Opcode == ISD::FP_ROUND) {
5523 APFloat C1 = N1CFP->getValueAPF(); // make copy
5524 bool Unused;
5525 // This can return overflow, underflow, or inexact; we don't care.
5526 // FIXME need to be more flexible about rounding mode.
5527 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
5528 &Unused);
5529 return getConstantFP(C1, DL, VT);
5530 }
5531
5532 switch (Opcode) {
5533 case ISD::FSUB:
5534 // -0.0 - undef --> undef (consistent with "fneg undef")
5535 if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, /*AllowUndefs*/ true))
5536 if (N1C && N1C->getValueAPF().isNegZero() && N2.isUndef())
5537 return getUNDEF(VT);
5538 LLVM_FALLTHROUGH[[gnu::fallthrough]];
5539
5540 case ISD::FADD:
5541 case ISD::FMUL:
5542 case ISD::FDIV:
5543 case ISD::FREM:
5544 // If both operands are undef, the result is undef. If 1 operand is undef,
5545 // the result is NaN. This should match the behavior of the IR optimizer.
5546 if (N1.isUndef() && N2.isUndef())
5547 return getUNDEF(VT);
5548 if (N1.isUndef() || N2.isUndef())
5549 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
5550 }
5551 return SDValue();
5552}
5553
5554SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) {
5555 assert(Val.getValueType().isInteger() && "Invalid AssertAlign!")(static_cast <bool> (Val.getValueType().isInteger() &&
"Invalid AssertAlign!") ? void (0) : __assert_fail ("Val.getValueType().isInteger() && \"Invalid AssertAlign!\""
, "llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp", 5555, __extension__
__PRETTY_FUNCTION__))
;
5556
5557 // There's no need to assert on a byte-aligned pointer. All pointers are at
5558 // least byte aligned.
5559 if (A == Align(1))
5560 return Val;
5561
5562 FoldingSetNodeID ID;
5563 AddNodeIDNode(ID, ISD::AssertAlign, getVTList(Val.getValueType()), {Val});
5564 ID.AddInteger(A.value());
5565
5566 void *IP = nullptr;
5567 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5568 return SDValue(E, 0);
5569
5570 auto *N = newSDNode<AssertAlignSDNo