Bug Summary

File:llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
Warning:line 6881, column 36
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SelectionDAG.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen/SelectionDAG -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/CodeGen/SelectionDAG -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp

1//===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the SelectionDAG class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/SelectionDAG.h"
14#include "SDNodeDbgValue.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/APSInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/FoldingSet.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallPtrSet.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/Triple.h"
26#include "llvm/ADT/Twine.h"
27#include "llvm/Analysis/BlockFrequencyInfo.h"
28#include "llvm/Analysis/MemoryLocation.h"
29#include "llvm/Analysis/ProfileSummaryInfo.h"
30#include "llvm/Analysis/ValueTracking.h"
31#include "llvm/CodeGen/FunctionLoweringInfo.h"
32#include "llvm/CodeGen/ISDOpcodes.h"
33#include "llvm/CodeGen/MachineBasicBlock.h"
34#include "llvm/CodeGen/MachineConstantPool.h"
35#include "llvm/CodeGen/MachineFrameInfo.h"
36#include "llvm/CodeGen/MachineFunction.h"
37#include "llvm/CodeGen/MachineMemOperand.h"
38#include "llvm/CodeGen/RuntimeLibcalls.h"
39#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
40#include "llvm/CodeGen/SelectionDAGNodes.h"
41#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
42#include "llvm/CodeGen/TargetFrameLowering.h"
43#include "llvm/CodeGen/TargetLowering.h"
44#include "llvm/CodeGen/TargetRegisterInfo.h"
45#include "llvm/CodeGen/TargetSubtargetInfo.h"
46#include "llvm/CodeGen/ValueTypes.h"
47#include "llvm/IR/Constant.h"
48#include "llvm/IR/Constants.h"
49#include "llvm/IR/DataLayout.h"
50#include "llvm/IR/DebugInfoMetadata.h"
51#include "llvm/IR/DebugLoc.h"
52#include "llvm/IR/DerivedTypes.h"
53#include "llvm/IR/Function.h"
54#include "llvm/IR/GlobalValue.h"
55#include "llvm/IR/Metadata.h"
56#include "llvm/IR/Type.h"
57#include "llvm/IR/Value.h"
58#include "llvm/Support/Casting.h"
59#include "llvm/Support/CodeGen.h"
60#include "llvm/Support/Compiler.h"
61#include "llvm/Support/Debug.h"
62#include "llvm/Support/ErrorHandling.h"
63#include "llvm/Support/KnownBits.h"
64#include "llvm/Support/MachineValueType.h"
65#include "llvm/Support/ManagedStatic.h"
66#include "llvm/Support/MathExtras.h"
67#include "llvm/Support/Mutex.h"
68#include "llvm/Support/raw_ostream.h"
69#include "llvm/Target/TargetMachine.h"
70#include "llvm/Target/TargetOptions.h"
71#include "llvm/Transforms/Utils/SizeOpts.h"
72#include <algorithm>
73#include <cassert>
74#include <cstdint>
75#include <cstdlib>
76#include <limits>
77#include <set>
78#include <string>
79#include <utility>
80#include <vector>
81
82using namespace llvm;
83
84/// makeVTList - Return an instance of the SDVTList struct initialized with the
85/// specified members.
86static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
87 SDVTList Res = {VTs, NumVTs};
88 return Res;
89}
90
91// Default null implementations of the callbacks.
92void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
93void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
94void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {}
95
96void SelectionDAG::DAGNodeDeletedListener::anchor() {}
97
98#define DEBUG_TYPE"selectiondag" "selectiondag"
99
100static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
101 cl::Hidden, cl::init(true),
102 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
103
104static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
105 cl::desc("Number limit for gluing ld/st of memcpy."),
106 cl::Hidden, cl::init(0));
107
108static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
109 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);)do { } while (false);
110}
111
112//===----------------------------------------------------------------------===//
113// ConstantFPSDNode Class
114//===----------------------------------------------------------------------===//
115
116/// isExactlyValue - We don't rely on operator== working on double values, as
117/// it returns true for things that are clearly not equal, like -0.0 and 0.0.
118/// As such, this method can be used to do an exact bit-for-bit comparison of
119/// two floating point values.
120bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
121 return getValueAPF().bitwiseIsEqual(V);
122}
123
124bool ConstantFPSDNode::isValueValidForType(EVT VT,
125 const APFloat& Val) {
126 assert(VT.isFloatingPoint() && "Can only convert between FP types")(static_cast<void> (0));
127
128 // convert modifies in place, so make a copy.
129 APFloat Val2 = APFloat(Val);
130 bool losesInfo;
131 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
132 APFloat::rmNearestTiesToEven,
133 &losesInfo);
134 return !losesInfo;
135}
136
137//===----------------------------------------------------------------------===//
138// ISD Namespace
139//===----------------------------------------------------------------------===//
140
141bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
142 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
143 unsigned EltSize =
144 N->getValueType(0).getVectorElementType().getSizeInBits();
145 if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
146 SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize);
147 return true;
148 }
149 if (auto *Op0 = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) {
150 SplatVal = Op0->getValueAPF().bitcastToAPInt().truncOrSelf(EltSize);
151 return true;
152 }
153 }
154
155 auto *BV = dyn_cast<BuildVectorSDNode>(N);
156 if (!BV)
157 return false;
158
159 APInt SplatUndef;
160 unsigned SplatBitSize;
161 bool HasUndefs;
162 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
163 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
164 EltSize) &&
165 EltSize == SplatBitSize;
166}
167
168// FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
169// specializations of the more general isConstantSplatVector()?
170
171bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
172 // Look through a bit convert.
173 while (N->getOpcode() == ISD::BITCAST)
174 N = N->getOperand(0).getNode();
175
176 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
177 APInt SplatVal;
178 return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnesValue();
179 }
180
181 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
182
183 unsigned i = 0, e = N->getNumOperands();
184
185 // Skip over all of the undef values.
186 while (i != e && N->getOperand(i).isUndef())
187 ++i;
188
189 // Do not accept an all-undef vector.
190 if (i == e) return false;
191
192 // Do not accept build_vectors that aren't all constants or which have non-~0
193 // elements. We have to be a bit careful here, as the type of the constant
194 // may not be the same as the type of the vector elements due to type
195 // legalization (the elements are promoted to a legal type for the target and
196 // a vector of a type may be legal when the base element type is not).
197 // We only want to check enough bits to cover the vector elements, because
198 // we care if the resultant vector is all ones, not whether the individual
199 // constants are.
200 SDValue NotZero = N->getOperand(i);
201 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
202 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
203 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
204 return false;
205 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
206 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
207 return false;
208 } else
209 return false;
210
211 // Okay, we have at least one ~0 value, check to see if the rest match or are
212 // undefs. Even with the above element type twiddling, this should be OK, as
213 // the same type legalization should have applied to all the elements.
214 for (++i; i != e; ++i)
215 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
216 return false;
217 return true;
218}
219
220bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
221 // Look through a bit convert.
222 while (N->getOpcode() == ISD::BITCAST)
223 N = N->getOperand(0).getNode();
224
225 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
226 APInt SplatVal;
227 return isConstantSplatVector(N, SplatVal) && SplatVal.isNullValue();
228 }
229
230 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
231
232 bool IsAllUndef = true;
233 for (const SDValue &Op : N->op_values()) {
234 if (Op.isUndef())
235 continue;
236 IsAllUndef = false;
237 // Do not accept build_vectors that aren't all constants or which have non-0
238 // elements. We have to be a bit careful here, as the type of the constant
239 // may not be the same as the type of the vector elements due to type
240 // legalization (the elements are promoted to a legal type for the target
241 // and a vector of a type may be legal when the base element type is not).
242 // We only want to check enough bits to cover the vector elements, because
243 // we care if the resultant vector is all zeros, not whether the individual
244 // constants are.
245 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
246 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
247 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
248 return false;
249 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
250 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
251 return false;
252 } else
253 return false;
254 }
255
256 // Do not accept an all-undef vector.
257 if (IsAllUndef)
258 return false;
259 return true;
260}
261
262bool ISD::isBuildVectorAllOnes(const SDNode *N) {
263 return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true);
264}
265
266bool ISD::isBuildVectorAllZeros(const SDNode *N) {
267 return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true);
268}
269
270bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
271 if (N->getOpcode() != ISD::BUILD_VECTOR)
272 return false;
273
274 for (const SDValue &Op : N->op_values()) {
275 if (Op.isUndef())
276 continue;
277 if (!isa<ConstantSDNode>(Op))
278 return false;
279 }
280 return true;
281}
282
283bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
284 if (N->getOpcode() != ISD::BUILD_VECTOR)
285 return false;
286
287 for (const SDValue &Op : N->op_values()) {
288 if (Op.isUndef())
289 continue;
290 if (!isa<ConstantFPSDNode>(Op))
291 return false;
292 }
293 return true;
294}
295
296bool ISD::allOperandsUndef(const SDNode *N) {
297 // Return false if the node has no operands.
298 // This is "logically inconsistent" with the definition of "all" but
299 // is probably the desired behavior.
300 if (N->getNumOperands() == 0)
301 return false;
302 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
303}
304
305bool ISD::matchUnaryPredicate(SDValue Op,
306 std::function<bool(ConstantSDNode *)> Match,
307 bool AllowUndefs) {
308 // FIXME: Add support for scalar UNDEF cases?
309 if (auto *Cst = dyn_cast<ConstantSDNode>(Op))
310 return Match(Cst);
311
312 // FIXME: Add support for vector UNDEF cases?
313 if (ISD::BUILD_VECTOR != Op.getOpcode() &&
314 ISD::SPLAT_VECTOR != Op.getOpcode())
315 return false;
316
317 EVT SVT = Op.getValueType().getScalarType();
318 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
319 if (AllowUndefs && Op.getOperand(i).isUndef()) {
320 if (!Match(nullptr))
321 return false;
322 continue;
323 }
324
325 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i));
326 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
327 return false;
328 }
329 return true;
330}
331
332bool ISD::matchBinaryPredicate(
333 SDValue LHS, SDValue RHS,
334 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
335 bool AllowUndefs, bool AllowTypeMismatch) {
336 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
337 return false;
338
339 // TODO: Add support for scalar UNDEF cases?
340 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
341 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
342 return Match(LHSCst, RHSCst);
343
344 // TODO: Add support for vector UNDEF cases?
345 if (LHS.getOpcode() != RHS.getOpcode() ||
346 (LHS.getOpcode() != ISD::BUILD_VECTOR &&
347 LHS.getOpcode() != ISD::SPLAT_VECTOR))
348 return false;
349
350 EVT SVT = LHS.getValueType().getScalarType();
351 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
352 SDValue LHSOp = LHS.getOperand(i);
353 SDValue RHSOp = RHS.getOperand(i);
354 bool LHSUndef = AllowUndefs && LHSOp.isUndef();
355 bool RHSUndef = AllowUndefs && RHSOp.isUndef();
356 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
357 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
358 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
359 return false;
360 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT ||
361 LHSOp.getValueType() != RHSOp.getValueType()))
362 return false;
363 if (!Match(LHSCst, RHSCst))
364 return false;
365 }
366 return true;
367}
368
369ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) {
370 switch (VecReduceOpcode) {
371 default:
372 llvm_unreachable("Expected VECREDUCE opcode")__builtin_unreachable();
373 case ISD::VECREDUCE_FADD:
374 case ISD::VECREDUCE_SEQ_FADD:
375 return ISD::FADD;
376 case ISD::VECREDUCE_FMUL:
377 case ISD::VECREDUCE_SEQ_FMUL:
378 return ISD::FMUL;
379 case ISD::VECREDUCE_ADD:
380 return ISD::ADD;
381 case ISD::VECREDUCE_MUL:
382 return ISD::MUL;
383 case ISD::VECREDUCE_AND:
384 return ISD::AND;
385 case ISD::VECREDUCE_OR:
386 return ISD::OR;
387 case ISD::VECREDUCE_XOR:
388 return ISD::XOR;
389 case ISD::VECREDUCE_SMAX:
390 return ISD::SMAX;
391 case ISD::VECREDUCE_SMIN:
392 return ISD::SMIN;
393 case ISD::VECREDUCE_UMAX:
394 return ISD::UMAX;
395 case ISD::VECREDUCE_UMIN:
396 return ISD::UMIN;
397 case ISD::VECREDUCE_FMAX:
398 return ISD::FMAXNUM;
399 case ISD::VECREDUCE_FMIN:
400 return ISD::FMINNUM;
401 }
402}
403
404bool ISD::isVPOpcode(unsigned Opcode) {
405 switch (Opcode) {
406 default:
407 return false;
408#define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \
409 case ISD::SDOPC: \
410 return true;
411#include "llvm/IR/VPIntrinsics.def"
412 }
413}
414
415/// The operand position of the vector mask.
416Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
417 switch (Opcode) {
418 default:
419 return None;
420#define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, ...) \
421 case ISD::SDOPC: \
422 return MASKPOS;
423#include "llvm/IR/VPIntrinsics.def"
424 }
425}
426
427/// The operand position of the explicit vector length parameter.
428Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
429 switch (Opcode) {
430 default:
431 return None;
432#define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
433 case ISD::SDOPC: \
434 return EVLPOS;
435#include "llvm/IR/VPIntrinsics.def"
436 }
437}
438
439ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
440 switch (ExtType) {
441 case ISD::EXTLOAD:
442 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
443 case ISD::SEXTLOAD:
444 return ISD::SIGN_EXTEND;
445 case ISD::ZEXTLOAD:
446 return ISD::ZERO_EXTEND;
447 default:
448 break;
449 }
450
451 llvm_unreachable("Invalid LoadExtType")__builtin_unreachable();
452}
453
454ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
455 // To perform this operation, we just need to swap the L and G bits of the
456 // operation.
457 unsigned OldL = (Operation >> 2) & 1;
458 unsigned OldG = (Operation >> 1) & 1;
459 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
460 (OldL << 1) | // New G bit
461 (OldG << 2)); // New L bit.
462}
463
464static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) {
465 unsigned Operation = Op;
466 if (isIntegerLike)
467 Operation ^= 7; // Flip L, G, E bits, but not U.
468 else
469 Operation ^= 15; // Flip all of the condition bits.
470
471 if (Operation > ISD::SETTRUE2)
472 Operation &= ~8; // Don't let N and U bits get set.
473
474 return ISD::CondCode(Operation);
475}
476
477ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) {
478 return getSetCCInverseImpl(Op, Type.isInteger());
479}
480
481ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op,
482 bool isIntegerLike) {
483 return getSetCCInverseImpl(Op, isIntegerLike);
484}
485
486/// For an integer comparison, return 1 if the comparison is a signed operation
487/// and 2 if the result is an unsigned comparison. Return zero if the operation
488/// does not depend on the sign of the input (setne and seteq).
489static int isSignedOp(ISD::CondCode Opcode) {
490 switch (Opcode) {
491 default: llvm_unreachable("Illegal integer setcc operation!")__builtin_unreachable();
492 case ISD::SETEQ:
493 case ISD::SETNE: return 0;
494 case ISD::SETLT:
495 case ISD::SETLE:
496 case ISD::SETGT:
497 case ISD::SETGE: return 1;
498 case ISD::SETULT:
499 case ISD::SETULE:
500 case ISD::SETUGT:
501 case ISD::SETUGE: return 2;
502 }
503}
504
505ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
506 EVT Type) {
507 bool IsInteger = Type.isInteger();
508 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
509 // Cannot fold a signed integer setcc with an unsigned integer setcc.
510 return ISD::SETCC_INVALID;
511
512 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
513
514 // If the N and U bits get set, then the resultant comparison DOES suddenly
515 // care about orderedness, and it is true when ordered.
516 if (Op > ISD::SETTRUE2)
517 Op &= ~16; // Clear the U bit if the N bit is set.
518
519 // Canonicalize illegal integer setcc's.
520 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
521 Op = ISD::SETNE;
522
523 return ISD::CondCode(Op);
524}
525
526ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
527 EVT Type) {
528 bool IsInteger = Type.isInteger();
529 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
530 // Cannot fold a signed setcc with an unsigned setcc.
531 return ISD::SETCC_INVALID;
532
533 // Combine all of the condition bits.
534 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
535
536 // Canonicalize illegal integer setcc's.
537 if (IsInteger) {
538 switch (Result) {
539 default: break;
540 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
541 case ISD::SETOEQ: // SETEQ & SETU[LG]E
542 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
543 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
544 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
545 }
546 }
547
548 return Result;
549}
550
551//===----------------------------------------------------------------------===//
552// SDNode Profile Support
553//===----------------------------------------------------------------------===//
554
555/// AddNodeIDOpcode - Add the node opcode to the NodeID data.
556static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
557 ID.AddInteger(OpC);
558}
559
560/// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
561/// solely with their pointer.
562static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
563 ID.AddPointer(VTList.VTs);
564}
565
566/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
567static void AddNodeIDOperands(FoldingSetNodeID &ID,
568 ArrayRef<SDValue> Ops) {
569 for (auto& Op : Ops) {
570 ID.AddPointer(Op.getNode());
571 ID.AddInteger(Op.getResNo());
572 }
573}
574
575/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
576static void AddNodeIDOperands(FoldingSetNodeID &ID,
577 ArrayRef<SDUse> Ops) {
578 for (auto& Op : Ops) {
579 ID.AddPointer(Op.getNode());
580 ID.AddInteger(Op.getResNo());
581 }
582}
583
584static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
585 SDVTList VTList, ArrayRef<SDValue> OpList) {
586 AddNodeIDOpcode(ID, OpC);
587 AddNodeIDValueTypes(ID, VTList);
588 AddNodeIDOperands(ID, OpList);
589}
590
591/// If this is an SDNode with special info, add this info to the NodeID data.
592static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
593 switch (N->getOpcode()) {
594 case ISD::TargetExternalSymbol:
595 case ISD::ExternalSymbol:
596 case ISD::MCSymbol:
597 llvm_unreachable("Should only be used on nodes with operands")__builtin_unreachable();
598 default: break; // Normal nodes don't need extra info.
599 case ISD::TargetConstant:
600 case ISD::Constant: {
601 const ConstantSDNode *C = cast<ConstantSDNode>(N);
602 ID.AddPointer(C->getConstantIntValue());
603 ID.AddBoolean(C->isOpaque());
604 break;
605 }
606 case ISD::TargetConstantFP:
607 case ISD::ConstantFP:
608 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
609 break;
610 case ISD::TargetGlobalAddress:
611 case ISD::GlobalAddress:
612 case ISD::TargetGlobalTLSAddress:
613 case ISD::GlobalTLSAddress: {
614 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
615 ID.AddPointer(GA->getGlobal());
616 ID.AddInteger(GA->getOffset());
617 ID.AddInteger(GA->getTargetFlags());
618 break;
619 }
620 case ISD::BasicBlock:
621 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
622 break;
623 case ISD::Register:
624 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
625 break;
626 case ISD::RegisterMask:
627 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
628 break;
629 case ISD::SRCVALUE:
630 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
631 break;
632 case ISD::FrameIndex:
633 case ISD::TargetFrameIndex:
634 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
635 break;
636 case ISD::LIFETIME_START:
637 case ISD::LIFETIME_END:
638 if (cast<LifetimeSDNode>(N)->hasOffset()) {
639 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize());
640 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset());
641 }
642 break;
643 case ISD::PSEUDO_PROBE:
644 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid());
645 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex());
646 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes());
647 break;
648 case ISD::JumpTable:
649 case ISD::TargetJumpTable:
650 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
651 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
652 break;
653 case ISD::ConstantPool:
654 case ISD::TargetConstantPool: {
655 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
656 ID.AddInteger(CP->getAlign().value());
657 ID.AddInteger(CP->getOffset());
658 if (CP->isMachineConstantPoolEntry())
659 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
660 else
661 ID.AddPointer(CP->getConstVal());
662 ID.AddInteger(CP->getTargetFlags());
663 break;
664 }
665 case ISD::TargetIndex: {
666 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
667 ID.AddInteger(TI->getIndex());
668 ID.AddInteger(TI->getOffset());
669 ID.AddInteger(TI->getTargetFlags());
670 break;
671 }
672 case ISD::LOAD: {
673 const LoadSDNode *LD = cast<LoadSDNode>(N);
674 ID.AddInteger(LD->getMemoryVT().getRawBits());
675 ID.AddInteger(LD->getRawSubclassData());
676 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
677 break;
678 }
679 case ISD::STORE: {
680 const StoreSDNode *ST = cast<StoreSDNode>(N);
681 ID.AddInteger(ST->getMemoryVT().getRawBits());
682 ID.AddInteger(ST->getRawSubclassData());
683 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
684 break;
685 }
686 case ISD::VP_LOAD: {
687 const VPLoadSDNode *ELD = cast<VPLoadSDNode>(N);
688 ID.AddInteger(ELD->getMemoryVT().getRawBits());
689 ID.AddInteger(ELD->getRawSubclassData());
690 ID.AddInteger(ELD->getPointerInfo().getAddrSpace());
691 break;
692 }
693 case ISD::VP_STORE: {
694 const VPStoreSDNode *EST = cast<VPStoreSDNode>(N);
695 ID.AddInteger(EST->getMemoryVT().getRawBits());
696 ID.AddInteger(EST->getRawSubclassData());
697 ID.AddInteger(EST->getPointerInfo().getAddrSpace());
698 break;
699 }
700 case ISD::VP_GATHER: {
701 const VPGatherSDNode *EG = cast<VPGatherSDNode>(N);
702 ID.AddInteger(EG->getMemoryVT().getRawBits());
703 ID.AddInteger(EG->getRawSubclassData());
704 ID.AddInteger(EG->getPointerInfo().getAddrSpace());
705 break;
706 }
707 case ISD::VP_SCATTER: {
708 const VPScatterSDNode *ES = cast<VPScatterSDNode>(N);
709 ID.AddInteger(ES->getMemoryVT().getRawBits());
710 ID.AddInteger(ES->getRawSubclassData());
711 ID.AddInteger(ES->getPointerInfo().getAddrSpace());
712 break;
713 }
714 case ISD::MLOAD: {
715 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
716 ID.AddInteger(MLD->getMemoryVT().getRawBits());
717 ID.AddInteger(MLD->getRawSubclassData());
718 ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
719 break;
720 }
721 case ISD::MSTORE: {
722 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
723 ID.AddInteger(MST->getMemoryVT().getRawBits());
724 ID.AddInteger(MST->getRawSubclassData());
725 ID.AddInteger(MST->getPointerInfo().getAddrSpace());
726 break;
727 }
728 case ISD::MGATHER: {
729 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N);
730 ID.AddInteger(MG->getMemoryVT().getRawBits());
731 ID.AddInteger(MG->getRawSubclassData());
732 ID.AddInteger(MG->getPointerInfo().getAddrSpace());
733 break;
734 }
735 case ISD::MSCATTER: {
736 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N);
737 ID.AddInteger(MS->getMemoryVT().getRawBits());
738 ID.AddInteger(MS->getRawSubclassData());
739 ID.AddInteger(MS->getPointerInfo().getAddrSpace());
740 break;
741 }
742 case ISD::ATOMIC_CMP_SWAP:
743 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
744 case ISD::ATOMIC_SWAP:
745 case ISD::ATOMIC_LOAD_ADD:
746 case ISD::ATOMIC_LOAD_SUB:
747 case ISD::ATOMIC_LOAD_AND:
748 case ISD::ATOMIC_LOAD_CLR:
749 case ISD::ATOMIC_LOAD_OR:
750 case ISD::ATOMIC_LOAD_XOR:
751 case ISD::ATOMIC_LOAD_NAND:
752 case ISD::ATOMIC_LOAD_MIN:
753 case ISD::ATOMIC_LOAD_MAX:
754 case ISD::ATOMIC_LOAD_UMIN:
755 case ISD::ATOMIC_LOAD_UMAX:
756 case ISD::ATOMIC_LOAD:
757 case ISD::ATOMIC_STORE: {
758 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
759 ID.AddInteger(AT->getMemoryVT().getRawBits());
760 ID.AddInteger(AT->getRawSubclassData());
761 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
762 break;
763 }
764 case ISD::PREFETCH: {
765 const MemSDNode *PF = cast<MemSDNode>(N);
766 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
767 break;
768 }
769 case ISD::VECTOR_SHUFFLE: {
770 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
771 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
772 i != e; ++i)
773 ID.AddInteger(SVN->getMaskElt(i));
774 break;
775 }
776 case ISD::TargetBlockAddress:
777 case ISD::BlockAddress: {
778 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
779 ID.AddPointer(BA->getBlockAddress());
780 ID.AddInteger(BA->getOffset());
781 ID.AddInteger(BA->getTargetFlags());
782 break;
783 }
784 } // end switch (N->getOpcode())
785
786 // Target specific memory nodes could also have address spaces to check.
787 if (N->isTargetMemoryOpcode())
788 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
789}
790
791/// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
792/// data.
793static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
794 AddNodeIDOpcode(ID, N->getOpcode());
795 // Add the return value info.
796 AddNodeIDValueTypes(ID, N->getVTList());
797 // Add the operand info.
798 AddNodeIDOperands(ID, N->ops());
799
800 // Handle SDNode leafs with special info.
801 AddNodeIDCustom(ID, N);
802}
803
804//===----------------------------------------------------------------------===//
805// SelectionDAG Class
806//===----------------------------------------------------------------------===//
807
808/// doNotCSE - Return true if CSE should not be performed for this node.
809static bool doNotCSE(SDNode *N) {
810 if (N->getValueType(0) == MVT::Glue)
811 return true; // Never CSE anything that produces a flag.
812
813 switch (N->getOpcode()) {
814 default: break;
815 case ISD::HANDLENODE:
816 case ISD::EH_LABEL:
817 return true; // Never CSE these nodes.
818 }
819
820 // Check that remaining values produced are not flags.
821 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
822 if (N->getValueType(i) == MVT::Glue)
823 return true; // Never CSE anything that produces a flag.
824
825 return false;
826}
827
828/// RemoveDeadNodes - This method deletes all unreachable nodes in the
829/// SelectionDAG.
830void SelectionDAG::RemoveDeadNodes() {
831 // Create a dummy node (which is not added to allnodes), that adds a reference
832 // to the root node, preventing it from being deleted.
833 HandleSDNode Dummy(getRoot());
834
835 SmallVector<SDNode*, 128> DeadNodes;
836
837 // Add all obviously-dead nodes to the DeadNodes worklist.
838 for (SDNode &Node : allnodes())
839 if (Node.use_empty())
840 DeadNodes.push_back(&Node);
841
842 RemoveDeadNodes(DeadNodes);
843
844 // If the root changed (e.g. it was a dead load, update the root).
845 setRoot(Dummy.getValue());
846}
847
848/// RemoveDeadNodes - This method deletes the unreachable nodes in the
849/// given list, and any nodes that become unreachable as a result.
850void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
851
852 // Process the worklist, deleting the nodes and adding their uses to the
853 // worklist.
854 while (!DeadNodes.empty()) {
855 SDNode *N = DeadNodes.pop_back_val();
856 // Skip to next node if we've already managed to delete the node. This could
857 // happen if replacing a node causes a node previously added to the node to
858 // be deleted.
859 if (N->getOpcode() == ISD::DELETED_NODE)
860 continue;
861
862 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
863 DUL->NodeDeleted(N, nullptr);
864
865 // Take the node out of the appropriate CSE map.
866 RemoveNodeFromCSEMaps(N);
867
868 // Next, brutally remove the operand list. This is safe to do, as there are
869 // no cycles in the graph.
870 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
871 SDUse &Use = *I++;
872 SDNode *Operand = Use.getNode();
873 Use.set(SDValue());
874
875 // Now that we removed this operand, see if there are no uses of it left.
876 if (Operand->use_empty())
877 DeadNodes.push_back(Operand);
878 }
879
880 DeallocateNode(N);
881 }
882}
883
884void SelectionDAG::RemoveDeadNode(SDNode *N){
885 SmallVector<SDNode*, 16> DeadNodes(1, N);
886
887 // Create a dummy node that adds a reference to the root node, preventing
888 // it from being deleted. (This matters if the root is an operand of the
889 // dead node.)
890 HandleSDNode Dummy(getRoot());
891
892 RemoveDeadNodes(DeadNodes);
893}
894
895void SelectionDAG::DeleteNode(SDNode *N) {
896 // First take this out of the appropriate CSE map.
897 RemoveNodeFromCSEMaps(N);
898
899 // Finally, remove uses due to operands of this node, remove from the
900 // AllNodes list, and delete the node.
901 DeleteNodeNotInCSEMaps(N);
902}
903
904void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
905 assert(N->getIterator() != AllNodes.begin() &&(static_cast<void> (0))
906 "Cannot delete the entry node!")(static_cast<void> (0));
907 assert(N->use_empty() && "Cannot delete a node that is not dead!")(static_cast<void> (0));
908
909 // Drop all of the operands and decrement used node's use counts.
910 N->DropOperands();
911
912 DeallocateNode(N);
913}
914
915void SDDbgInfo::add(SDDbgValue *V, bool isParameter) {
916 assert(!(V->isVariadic() && isParameter))(static_cast<void> (0));
917 if (isParameter)
918 ByvalParmDbgValues.push_back(V);
919 else
920 DbgValues.push_back(V);
921 for (const SDNode *Node : V->getSDNodes())
922 if (Node)
923 DbgValMap[Node].push_back(V);
924}
925
926void SDDbgInfo::erase(const SDNode *Node) {
927 DbgValMapType::iterator I = DbgValMap.find(Node);
928 if (I == DbgValMap.end())
929 return;
930 for (auto &Val: I->second)
931 Val->setIsInvalidated();
932 DbgValMap.erase(I);
933}
934
935void SelectionDAG::DeallocateNode(SDNode *N) {
936 // If we have operands, deallocate them.
937 removeOperands(N);
938
939 NodeAllocator.Deallocate(AllNodes.remove(N));
940
941 // Set the opcode to DELETED_NODE to help catch bugs when node
942 // memory is reallocated.
943 // FIXME: There are places in SDag that have grown a dependency on the opcode
944 // value in the released node.
945 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
946 N->NodeType = ISD::DELETED_NODE;
947
948 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
949 // them and forget about that node.
950 DbgInfo->erase(N);
951}
952
953#ifndef NDEBUG1
954/// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
955static void VerifySDNode(SDNode *N) {
956 switch (N->getOpcode()) {
957 default:
958 break;
959 case ISD::BUILD_PAIR: {
960 EVT VT = N->getValueType(0);
961 assert(N->getNumValues() == 1 && "Too many results!")(static_cast<void> (0));
962 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&(static_cast<void> (0))
963 "Wrong return type!")(static_cast<void> (0));
964 assert(N->getNumOperands() == 2 && "Wrong number of operands!")(static_cast<void> (0));
965 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&(static_cast<void> (0))
966 "Mismatched operand types!")(static_cast<void> (0));
967 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&(static_cast<void> (0))
968 "Wrong operand type!")(static_cast<void> (0));
969 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&(static_cast<void> (0))
970 "Wrong return type size")(static_cast<void> (0));
971 break;
972 }
973 case ISD::BUILD_VECTOR: {
974 assert(N->getNumValues() == 1 && "Too many results!")(static_cast<void> (0));
975 assert(N->getValueType(0).isVector() && "Wrong return type!")(static_cast<void> (0));
976 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&(static_cast<void> (0))
977 "Wrong number of operands!")(static_cast<void> (0));
978 EVT EltVT = N->getValueType(0).getVectorElementType();
979 for (const SDUse &Op : N->ops()) {
980 assert((Op.getValueType() == EltVT ||(static_cast<void> (0))
981 (EltVT.isInteger() && Op.getValueType().isInteger() &&(static_cast<void> (0))
982 EltVT.bitsLE(Op.getValueType()))) &&(static_cast<void> (0))
983 "Wrong operand type!")(static_cast<void> (0));
984 assert(Op.getValueType() == N->getOperand(0).getValueType() &&(static_cast<void> (0))
985 "Operands must all have the same type")(static_cast<void> (0));
986 }
987 break;
988 }
989 }
990}
991#endif // NDEBUG
992
993/// Insert a newly allocated node into the DAG.
994///
995/// Handles insertion into the all nodes list and CSE map, as well as
996/// verification and other common operations when a new node is allocated.
997void SelectionDAG::InsertNode(SDNode *N) {
998 AllNodes.push_back(N);
999#ifndef NDEBUG1
1000 N->PersistentId = NextPersistentId++;
1001 VerifySDNode(N);
1002#endif
1003 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1004 DUL->NodeInserted(N);
1005}
1006
1007/// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
1008/// correspond to it. This is useful when we're about to delete or repurpose
1009/// the node. We don't want future request for structurally identical nodes
1010/// to return N anymore.
1011bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
1012 bool Erased = false;
1013 switch (N->getOpcode()) {
1014 case ISD::HANDLENODE: return false; // noop.
1015 case ISD::CONDCODE:
1016 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&(static_cast<void> (0))
1017 "Cond code doesn't exist!")(static_cast<void> (0));
1018 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
1019 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
1020 break;
1021 case ISD::ExternalSymbol:
1022 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
1023 break;
1024 case ISD::TargetExternalSymbol: {
1025 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
1026 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1027 ESN->getSymbol(), ESN->getTargetFlags()));
1028 break;
1029 }
1030 case ISD::MCSymbol: {
1031 auto *MCSN = cast<MCSymbolSDNode>(N);
1032 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1033 break;
1034 }
1035 case ISD::VALUETYPE: {
1036 EVT VT = cast<VTSDNode>(N)->getVT();
1037 if (VT.isExtended()) {
1038 Erased = ExtendedValueTypeNodes.erase(VT);
1039 } else {
1040 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
1041 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
1042 }
1043 break;
1044 }
1045 default:
1046 // Remove it from the CSE Map.
1047 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!")(static_cast<void> (0));
1048 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!")(static_cast<void> (0));
1049 Erased = CSEMap.RemoveNode(N);
1050 break;
1051 }
1052#ifndef NDEBUG1
1053 // Verify that the node was actually in one of the CSE maps, unless it has a
1054 // flag result (which cannot be CSE'd) or is one of the special cases that are
1055 // not subject to CSE.
1056 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
1057 !N->isMachineOpcode() && !doNotCSE(N)) {
1058 N->dump(this);
1059 dbgs() << "\n";
1060 llvm_unreachable("Node is not in map!")__builtin_unreachable();
1061 }
1062#endif
1063 return Erased;
1064}
1065
1066/// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
1067/// maps and modified in place. Add it back to the CSE maps, unless an identical
1068/// node already exists, in which case transfer all its users to the existing
1069/// node. This transfer can potentially trigger recursive merging.
1070void
1071SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
1072 // For node types that aren't CSE'd, just act as if no identical node
1073 // already exists.
1074 if (!doNotCSE(N)) {
1075 SDNode *Existing = CSEMap.GetOrInsertNode(N);
1076 if (Existing != N) {
1077 // If there was already an existing matching node, use ReplaceAllUsesWith
1078 // to replace the dead one with the existing one. This can cause
1079 // recursive merging of other unrelated nodes down the line.
1080 ReplaceAllUsesWith(N, Existing);
1081
1082 // N is now dead. Inform the listeners and delete it.
1083 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1084 DUL->NodeDeleted(N, Existing);
1085 DeleteNodeNotInCSEMaps(N);
1086 return;
1087 }
1088 }
1089
1090 // If the node doesn't already exist, we updated it. Inform listeners.
1091 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1092 DUL->NodeUpdated(N);
1093}
1094
1095/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1096/// were replaced with those specified. If this node is never memoized,
1097/// return null, otherwise return a pointer to the slot it would take. If a
1098/// node already exists with these operands, the slot will be non-null.
1099SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
1100 void *&InsertPos) {
1101 if (doNotCSE(N))
1102 return nullptr;
1103
1104 SDValue Ops[] = { Op };
1105 FoldingSetNodeID ID;
1106 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1107 AddNodeIDCustom(ID, N);
1108 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1109 if (Node)
1110 Node->intersectFlagsWith(N->getFlags());
1111 return Node;
1112}
1113
1114/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1115/// were replaced with those specified. If this node is never memoized,
1116/// return null, otherwise return a pointer to the slot it would take. If a
1117/// node already exists with these operands, the slot will be non-null.
1118SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
1119 SDValue Op1, SDValue Op2,
1120 void *&InsertPos) {
1121 if (doNotCSE(N))
1122 return nullptr;
1123
1124 SDValue Ops[] = { Op1, Op2 };
1125 FoldingSetNodeID ID;
1126 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1127 AddNodeIDCustom(ID, N);
1128 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1129 if (Node)
1130 Node->intersectFlagsWith(N->getFlags());
1131 return Node;
1132}
1133
1134/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1135/// were replaced with those specified. If this node is never memoized,
1136/// return null, otherwise return a pointer to the slot it would take. If a
1137/// node already exists with these operands, the slot will be non-null.
1138SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
1139 void *&InsertPos) {
1140 if (doNotCSE(N))
1141 return nullptr;
1142
1143 FoldingSetNodeID ID;
1144 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1145 AddNodeIDCustom(ID, N);
1146 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1147 if (Node)
1148 Node->intersectFlagsWith(N->getFlags());
1149 return Node;
1150}
1151
1152Align SelectionDAG::getEVTAlign(EVT VT) const {
1153 Type *Ty = VT == MVT::iPTR ?
1154 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
1155 VT.getTypeForEVT(*getContext());
1156
1157 return getDataLayout().getABITypeAlign(Ty);
1158}
1159
1160// EntryNode could meaningfully have debug info if we can find it...
1161SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
1162 : TM(tm), OptLevel(OL),
1163 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
1164 Root(getEntryNode()) {
1165 InsertNode(&EntryNode);
1166 DbgInfo = new SDDbgInfo();
1167}
1168
1169void SelectionDAG::init(MachineFunction &NewMF,
1170 OptimizationRemarkEmitter &NewORE,
1171 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
1172 LegacyDivergenceAnalysis * Divergence,
1173 ProfileSummaryInfo *PSIin,
1174 BlockFrequencyInfo *BFIin) {
1175 MF = &NewMF;
1176 SDAGISelPass = PassPtr;
1177 ORE = &NewORE;
1178 TLI = getSubtarget().getTargetLowering();
1179 TSI = getSubtarget().getSelectionDAGInfo();
1180 LibInfo = LibraryInfo;
1181 Context = &MF->getFunction().getContext();
1182 DA = Divergence;
1183 PSI = PSIin;
1184 BFI = BFIin;
1185}
1186
1187SelectionDAG::~SelectionDAG() {
1188 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners")(static_cast<void> (0));
1189 allnodes_clear();
1190 OperandRecycler.clear(OperandAllocator);
1191 delete DbgInfo;
1192}
1193
1194bool SelectionDAG::shouldOptForSize() const {
1195 return MF->getFunction().hasOptSize() ||
1196 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI);
1197}
1198
1199void SelectionDAG::allnodes_clear() {
1200 assert(&*AllNodes.begin() == &EntryNode)(static_cast<void> (0));
1201 AllNodes.remove(AllNodes.begin());
1202 while (!AllNodes.empty())
1203 DeallocateNode(&AllNodes.front());
1204#ifndef NDEBUG1
1205 NextPersistentId = 0;
1206#endif
1207}
1208
1209SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1210 void *&InsertPos) {
1211 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1212 if (N) {
1213 switch (N->getOpcode()) {
1214 default: break;
1215 case ISD::Constant:
1216 case ISD::ConstantFP:
1217 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "__builtin_unreachable()
1218 "debug location. Use another overload.")__builtin_unreachable();
1219 }
1220 }
1221 return N;
1222}
1223
1224SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1225 const SDLoc &DL, void *&InsertPos) {
1226 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1227 if (N) {
1228 switch (N->getOpcode()) {
1229 case ISD::Constant:
1230 case ISD::ConstantFP:
1231 // Erase debug location from the node if the node is used at several
1232 // different places. Do not propagate one location to all uses as it
1233 // will cause a worse single stepping debugging experience.
1234 if (N->getDebugLoc() != DL.getDebugLoc())
1235 N->setDebugLoc(DebugLoc());
1236 break;
1237 default:
1238 // When the node's point of use is located earlier in the instruction
1239 // sequence than its prior point of use, update its debug info to the
1240 // earlier location.
1241 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1242 N->setDebugLoc(DL.getDebugLoc());
1243 break;
1244 }
1245 }
1246 return N;
1247}
1248
1249void SelectionDAG::clear() {
1250 allnodes_clear();
1251 OperandRecycler.clear(OperandAllocator);
1252 OperandAllocator.Reset();
1253 CSEMap.clear();
1254
1255 ExtendedValueTypeNodes.clear();
1256 ExternalSymbols.clear();
1257 TargetExternalSymbols.clear();
1258 MCSymbols.clear();
1259 SDCallSiteDbgInfo.clear();
1260 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1261 static_cast<CondCodeSDNode*>(nullptr));
1262 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1263 static_cast<SDNode*>(nullptr));
1264
1265 EntryNode.UseList = nullptr;
1266 InsertNode(&EntryNode);
1267 Root = getEntryNode();
1268 DbgInfo->clear();
1269}
1270
1271SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
1272 return VT.bitsGT(Op.getValueType())
1273 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1274 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL));
1275}
1276
1277std::pair<SDValue, SDValue>
1278SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain,
1279 const SDLoc &DL, EVT VT) {
1280 assert(!VT.bitsEq(Op.getValueType()) &&(static_cast<void> (0))
1281 "Strict no-op FP extend/round not allowed.")(static_cast<void> (0));
1282 SDValue Res =
1283 VT.bitsGT(Op.getValueType())
1284 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op})
1285 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other},
1286 {Chain, Op, getIntPtrConstant(0, DL)});
1287
1288 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1));
1289}
1290
1291SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1292 return VT.bitsGT(Op.getValueType()) ?
1293 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1294 getNode(ISD::TRUNCATE, DL, VT, Op);
1295}
1296
1297SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1298 return VT.bitsGT(Op.getValueType()) ?
1299 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1300 getNode(ISD::TRUNCATE, DL, VT, Op);
1301}
1302
1303SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1304 return VT.bitsGT(Op.getValueType()) ?
1305 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1306 getNode(ISD::TRUNCATE, DL, VT, Op);
1307}
1308
1309SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1310 EVT OpVT) {
1311 if (VT.bitsLE(Op.getValueType()))
1312 return getNode(ISD::TRUNCATE, SL, VT, Op);
1313
1314 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1315 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1316}
1317
1318SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1319 EVT OpVT = Op.getValueType();
1320 assert(VT.isInteger() && OpVT.isInteger() &&(static_cast<void> (0))
1321 "Cannot getZeroExtendInReg FP types")(static_cast<void> (0));
1322 assert(VT.isVector() == OpVT.isVector() &&(static_cast<void> (0))
1323 "getZeroExtendInReg type should be vector iff the operand "(static_cast<void> (0))
1324 "type is vector!")(static_cast<void> (0));
1325 assert((!VT.isVector() ||(static_cast<void> (0))
1326 VT.getVectorElementCount() == OpVT.getVectorElementCount()) &&(static_cast<void> (0))
1327 "Vector element counts must match in getZeroExtendInReg")(static_cast<void> (0));
1328 assert(VT.bitsLE(OpVT) && "Not extending!")(static_cast<void> (0));
1329 if (OpVT == VT)
1330 return Op;
1331 APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(),
1332 VT.getScalarSizeInBits());
1333 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT));
1334}
1335
1336SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1337 // Only unsigned pointer semantics are supported right now. In the future this
1338 // might delegate to TLI to check pointer signedness.
1339 return getZExtOrTrunc(Op, DL, VT);
1340}
1341
1342SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1343 // Only unsigned pointer semantics are supported right now. In the future this
1344 // might delegate to TLI to check pointer signedness.
1345 return getZeroExtendInReg(Op, DL, VT);
1346}
1347
1348/// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1349SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1350 EVT EltVT = VT.getScalarType();
1351 SDValue NegOne =
1352 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
1353 return getNode(ISD::XOR, DL, VT, Val, NegOne);
1354}
1355
1356SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1357 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1358 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1359}
1360
1361SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT,
1362 EVT OpVT) {
1363 if (!V)
1364 return getConstant(0, DL, VT);
1365
1366 switch (TLI->getBooleanContents(OpVT)) {
1367 case TargetLowering::ZeroOrOneBooleanContent:
1368 case TargetLowering::UndefinedBooleanContent:
1369 return getConstant(1, DL, VT);
1370 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1371 return getAllOnesConstant(DL, VT);
1372 }
1373 llvm_unreachable("Unexpected boolean content enum!")__builtin_unreachable();
1374}
1375
1376SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1377 bool isT, bool isO) {
1378 EVT EltVT = VT.getScalarType();
1379 assert((EltVT.getSizeInBits() >= 64 ||(static_cast<void> (0))
1380 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&(static_cast<void> (0))
1381 "getConstant with a uint64_t value that doesn't fit in the type!")(static_cast<void> (0));
1382 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1383}
1384
1385SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1386 bool isT, bool isO) {
1387 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1388}
1389
1390SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1391 EVT VT, bool isT, bool isO) {
1392 assert(VT.isInteger() && "Cannot create FP integer constant!")(static_cast<void> (0));
1393
1394 EVT EltVT = VT.getScalarType();
1395 const ConstantInt *Elt = &Val;
1396
1397 // In some cases the vector type is legal but the element type is illegal and
1398 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1399 // inserted value (the type does not need to match the vector element type).
1400 // Any extra bits introduced will be truncated away.
1401 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1402 TargetLowering::TypePromoteInteger) {
1403 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1404 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1405 Elt = ConstantInt::get(*getContext(), NewVal);
1406 }
1407 // In other cases the element type is illegal and needs to be expanded, for
1408 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1409 // the value into n parts and use a vector type with n-times the elements.
1410 // Then bitcast to the type requested.
1411 // Legalizing constants too early makes the DAGCombiner's job harder so we
1412 // only legalize if the DAG tells us we must produce legal types.
1413 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1414 TLI->getTypeAction(*getContext(), EltVT) ==
1415 TargetLowering::TypeExpandInteger) {
1416 const APInt &NewVal = Elt->getValue();
1417 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1418 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1419
1420 // For scalable vectors, try to use a SPLAT_VECTOR_PARTS node.
1421 if (VT.isScalableVector()) {
1422 assert(EltVT.getSizeInBits() % ViaEltSizeInBits == 0 &&(static_cast<void> (0))
1423 "Can only handle an even split!")(static_cast<void> (0));
1424 unsigned Parts = EltVT.getSizeInBits() / ViaEltSizeInBits;
1425
1426 SmallVector<SDValue, 2> ScalarParts;
1427 for (unsigned i = 0; i != Parts; ++i)
1428 ScalarParts.push_back(getConstant(
1429 NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1430 ViaEltVT, isT, isO));
1431
1432 return getNode(ISD::SPLAT_VECTOR_PARTS, DL, VT, ScalarParts);
1433 }
1434
1435 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1436 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1437
1438 // Check the temporary vector is the correct size. If this fails then
1439 // getTypeToTransformTo() probably returned a type whose size (in bits)
1440 // isn't a power-of-2 factor of the requested type size.
1441 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits())(static_cast<void> (0));
1442
1443 SmallVector<SDValue, 2> EltParts;
1444 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i)
1445 EltParts.push_back(getConstant(
1446 NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1447 ViaEltVT, isT, isO));
1448
1449 // EltParts is currently in little endian order. If we actually want
1450 // big-endian order then reverse it now.
1451 if (getDataLayout().isBigEndian())
1452 std::reverse(EltParts.begin(), EltParts.end());
1453
1454 // The elements must be reversed when the element order is different
1455 // to the endianness of the elements (because the BITCAST is itself a
1456 // vector shuffle in this situation). However, we do not need any code to
1457 // perform this reversal because getConstant() is producing a vector
1458 // splat.
1459 // This situation occurs in MIPS MSA.
1460
1461 SmallVector<SDValue, 8> Ops;
1462 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1463 llvm::append_range(Ops, EltParts);
1464
1465 SDValue V =
1466 getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1467 return V;
1468 }
1469
1470 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&(static_cast<void> (0))
1471 "APInt size does not match type size!")(static_cast<void> (0));
1472 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1473 FoldingSetNodeID ID;
1474 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1475 ID.AddPointer(Elt);
1476 ID.AddBoolean(isO);
1477 void *IP = nullptr;
1478 SDNode *N = nullptr;
1479 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1480 if (!VT.isVector())
1481 return SDValue(N, 0);
1482
1483 if (!N) {
1484 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT);
1485 CSEMap.InsertNode(N, IP);
1486 InsertNode(N);
1487 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1488 }
1489
1490 SDValue Result(N, 0);
1491 if (VT.isScalableVector())
1492 Result = getSplatVector(VT, DL, Result);
1493 else if (VT.isVector())
1494 Result = getSplatBuildVector(VT, DL, Result);
1495
1496 return Result;
1497}
1498
1499SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1500 bool isTarget) {
1501 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1502}
1503
1504SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT,
1505 const SDLoc &DL, bool LegalTypes) {
1506 assert(VT.isInteger() && "Shift amount is not an integer type!")(static_cast<void> (0));
1507 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes);
1508 return getConstant(Val, DL, ShiftVT);
1509}
1510
1511SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
1512 bool isTarget) {
1513 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget);
1514}
1515
1516SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1517 bool isTarget) {
1518 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1519}
1520
1521SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1522 EVT VT, bool isTarget) {
1523 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!")(static_cast<void> (0));
1524
1525 EVT EltVT = VT.getScalarType();
1526
1527 // Do the map lookup using the actual bit pattern for the floating point
1528 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1529 // we don't have issues with SNANs.
1530 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1531 FoldingSetNodeID ID;
1532 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1533 ID.AddPointer(&V);
1534 void *IP = nullptr;
1535 SDNode *N = nullptr;
1536 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1537 if (!VT.isVector())
1538 return SDValue(N, 0);
1539
1540 if (!N) {
1541 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT);
1542 CSEMap.InsertNode(N, IP);
1543 InsertNode(N);
1544 }
1545
1546 SDValue Result(N, 0);
1547 if (VT.isScalableVector())
1548 Result = getSplatVector(VT, DL, Result);
1549 else if (VT.isVector())
1550 Result = getSplatBuildVector(VT, DL, Result);
1551 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1552 return Result;
1553}
1554
1555SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1556 bool isTarget) {
1557 EVT EltVT = VT.getScalarType();
1558 if (EltVT == MVT::f32)
1559 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1560 if (EltVT == MVT::f64)
1561 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1562 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1563 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1564 bool Ignored;
1565 APFloat APF = APFloat(Val);
1566 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1567 &Ignored);
1568 return getConstantFP(APF, DL, VT, isTarget);
1569 }
1570 llvm_unreachable("Unsupported type in getConstantFP")__builtin_unreachable();
1571}
1572
1573SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1574 EVT VT, int64_t Offset, bool isTargetGA,
1575 unsigned TargetFlags) {
1576 assert((TargetFlags == 0 || isTargetGA) &&(static_cast<void> (0))
1577 "Cannot set target flags on target-independent globals")(static_cast<void> (0));
1578
1579 // Truncate (with sign-extension) the offset value to the pointer size.
1580 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1581 if (BitWidth < 64)
1582 Offset = SignExtend64(Offset, BitWidth);
1583
1584 unsigned Opc;
1585 if (GV->isThreadLocal())
1586 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1587 else
1588 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1589
1590 FoldingSetNodeID ID;
1591 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1592 ID.AddPointer(GV);
1593 ID.AddInteger(Offset);
1594 ID.AddInteger(TargetFlags);
1595 void *IP = nullptr;
1596 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1597 return SDValue(E, 0);
1598
1599 auto *N = newSDNode<GlobalAddressSDNode>(
1600 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1601 CSEMap.InsertNode(N, IP);
1602 InsertNode(N);
1603 return SDValue(N, 0);
1604}
1605
1606SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1607 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1608 FoldingSetNodeID ID;
1609 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1610 ID.AddInteger(FI);
1611 void *IP = nullptr;
1612 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1613 return SDValue(E, 0);
1614
1615 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1616 CSEMap.InsertNode(N, IP);
1617 InsertNode(N);
1618 return SDValue(N, 0);
1619}
1620
1621SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1622 unsigned TargetFlags) {
1623 assert((TargetFlags == 0 || isTarget) &&(static_cast<void> (0))
1624 "Cannot set target flags on target-independent jump tables")(static_cast<void> (0));
1625 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1626 FoldingSetNodeID ID;
1627 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1628 ID.AddInteger(JTI);
1629 ID.AddInteger(TargetFlags);
1630 void *IP = nullptr;
1631 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1632 return SDValue(E, 0);
1633
1634 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1635 CSEMap.InsertNode(N, IP);
1636 InsertNode(N);
1637 return SDValue(N, 0);
1638}
1639
1640SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1641 MaybeAlign Alignment, int Offset,
1642 bool isTarget, unsigned TargetFlags) {
1643 assert((TargetFlags == 0 || isTarget) &&(static_cast<void> (0))
1644 "Cannot set target flags on target-independent globals")(static_cast<void> (0));
1645 if (!Alignment)
1646 Alignment = shouldOptForSize()
1647 ? getDataLayout().getABITypeAlign(C->getType())
1648 : getDataLayout().getPrefTypeAlign(C->getType());
1649 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1650 FoldingSetNodeID ID;
1651 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1652 ID.AddInteger(Alignment->value());
1653 ID.AddInteger(Offset);
1654 ID.AddPointer(C);
1655 ID.AddInteger(TargetFlags);
1656 void *IP = nullptr;
1657 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1658 return SDValue(E, 0);
1659
1660 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1661 TargetFlags);
1662 CSEMap.InsertNode(N, IP);
1663 InsertNode(N);
1664 SDValue V = SDValue(N, 0);
1665 NewSDValueDbgMsg(V, "Creating new constant pool: ", this);
1666 return V;
1667}
1668
1669SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1670 MaybeAlign Alignment, int Offset,
1671 bool isTarget, unsigned TargetFlags) {
1672 assert((TargetFlags == 0 || isTarget) &&(static_cast<void> (0))
1673 "Cannot set target flags on target-independent globals")(static_cast<void> (0));
1674 if (!Alignment)
1675 Alignment = getDataLayout().getPrefTypeAlign(C->getType());
1676 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1677 FoldingSetNodeID ID;
1678 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1679 ID.AddInteger(Alignment->value());
1680 ID.AddInteger(Offset);
1681 C->addSelectionDAGCSEId(ID);
1682 ID.AddInteger(TargetFlags);
1683 void *IP = nullptr;
1684 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1685 return SDValue(E, 0);
1686
1687 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1688 TargetFlags);
1689 CSEMap.InsertNode(N, IP);
1690 InsertNode(N);
1691 return SDValue(N, 0);
1692}
1693
1694SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1695 unsigned TargetFlags) {
1696 FoldingSetNodeID ID;
1697 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1698 ID.AddInteger(Index);
1699 ID.AddInteger(Offset);
1700 ID.AddInteger(TargetFlags);
1701 void *IP = nullptr;
1702 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1703 return SDValue(E, 0);
1704
1705 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1706 CSEMap.InsertNode(N, IP);
1707 InsertNode(N);
1708 return SDValue(N, 0);
1709}
1710
1711SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1712 FoldingSetNodeID ID;
1713 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1714 ID.AddPointer(MBB);
1715 void *IP = nullptr;
1716 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1717 return SDValue(E, 0);
1718
1719 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1720 CSEMap.InsertNode(N, IP);
1721 InsertNode(N);
1722 return SDValue(N, 0);
1723}
1724
1725SDValue SelectionDAG::getValueType(EVT VT) {
1726 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1727 ValueTypeNodes.size())
1728 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1729
1730 SDNode *&N = VT.isExtended() ?
1731 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1732
1733 if (N) return SDValue(N, 0);
1734 N = newSDNode<VTSDNode>(VT);
1735 InsertNode(N);
1736 return SDValue(N, 0);
1737}
1738
1739SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1740 SDNode *&N = ExternalSymbols[Sym];
1741 if (N) return SDValue(N, 0);
1742 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1743 InsertNode(N);
1744 return SDValue(N, 0);
1745}
1746
1747SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1748 SDNode *&N = MCSymbols[Sym];
1749 if (N)
1750 return SDValue(N, 0);
1751 N = newSDNode<MCSymbolSDNode>(Sym, VT);
1752 InsertNode(N);
1753 return SDValue(N, 0);
1754}
1755
1756SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1757 unsigned TargetFlags) {
1758 SDNode *&N =
1759 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
1760 if (N) return SDValue(N, 0);
1761 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1762 InsertNode(N);
1763 return SDValue(N, 0);
1764}
1765
1766SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1767 if ((unsigned)Cond >= CondCodeNodes.size())
1768 CondCodeNodes.resize(Cond+1);
1769
1770 if (!CondCodeNodes[Cond]) {
1771 auto *N = newSDNode<CondCodeSDNode>(Cond);
1772 CondCodeNodes[Cond] = N;
1773 InsertNode(N);
1774 }
1775
1776 return SDValue(CondCodeNodes[Cond], 0);
1777}
1778
1779SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT) {
1780 APInt One(ResVT.getScalarSizeInBits(), 1);
1781 return getStepVector(DL, ResVT, One);
1782}
1783
1784SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal) {
1785 assert(ResVT.getScalarSizeInBits() == StepVal.getBitWidth())(static_cast<void> (0));
1786 if (ResVT.isScalableVector())
1787 return getNode(
1788 ISD::STEP_VECTOR, DL, ResVT,
1789 getTargetConstant(StepVal, DL, ResVT.getVectorElementType()));
1790
1791 SmallVector<SDValue, 16> OpsStepConstants;
1792 for (uint64_t i = 0; i < ResVT.getVectorNumElements(); i++)
1793 OpsStepConstants.push_back(
1794 getConstant(StepVal * i, DL, ResVT.getVectorElementType()));
1795 return getBuildVector(ResVT, DL, OpsStepConstants);
1796}
1797
1798/// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1799/// point at N1 to point at N2 and indices that point at N2 to point at N1.
1800static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1801 std::swap(N1, N2);
1802 ShuffleVectorSDNode::commuteMask(M);
1803}
1804
1805SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1806 SDValue N2, ArrayRef<int> Mask) {
1807 assert(VT.getVectorNumElements() == Mask.size() &&(static_cast<void> (0))
1808 "Must have the same number of vector elements as mask elements!")(static_cast<void> (0));
1809 assert(VT == N1.getValueType() && VT == N2.getValueType() &&(static_cast<void> (0))
1810 "Invalid VECTOR_SHUFFLE")(static_cast<void> (0));
1811
1812 // Canonicalize shuffle undef, undef -> undef
1813 if (N1.isUndef() && N2.isUndef())
1814 return getUNDEF(VT);
1815
1816 // Validate that all indices in Mask are within the range of the elements
1817 // input to the shuffle.
1818 int NElts = Mask.size();
1819 assert(llvm::all_of(Mask,(static_cast<void> (0))
1820 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&(static_cast<void> (0))
1821 "Index out of range")(static_cast<void> (0));
1822
1823 // Copy the mask so we can do any needed cleanup.
1824 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1825
1826 // Canonicalize shuffle v, v -> v, undef
1827 if (N1 == N2) {
1828 N2 = getUNDEF(VT);
1829 for (int i = 0; i != NElts; ++i)
1830 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1831 }
1832
1833 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1834 if (N1.isUndef())
1835 commuteShuffle(N1, N2, MaskVec);
1836
1837 if (TLI->hasVectorBlend()) {
1838 // If shuffling a splat, try to blend the splat instead. We do this here so
1839 // that even when this arises during lowering we don't have to re-handle it.
1840 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1841 BitVector UndefElements;
1842 SDValue Splat = BV->getSplatValue(&UndefElements);
1843 if (!Splat)
1844 return;
1845
1846 for (int i = 0; i < NElts; ++i) {
1847 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1848 continue;
1849
1850 // If this input comes from undef, mark it as such.
1851 if (UndefElements[MaskVec[i] - Offset]) {
1852 MaskVec[i] = -1;
1853 continue;
1854 }
1855
1856 // If we can blend a non-undef lane, use that instead.
1857 if (!UndefElements[i])
1858 MaskVec[i] = i + Offset;
1859 }
1860 };
1861 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1862 BlendSplat(N1BV, 0);
1863 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1864 BlendSplat(N2BV, NElts);
1865 }
1866
1867 // Canonicalize all index into lhs, -> shuffle lhs, undef
1868 // Canonicalize all index into rhs, -> shuffle rhs, undef
1869 bool AllLHS = true, AllRHS = true;
1870 bool N2Undef = N2.isUndef();
1871 for (int i = 0; i != NElts; ++i) {
1872 if (MaskVec[i] >= NElts) {
1873 if (N2Undef)
1874 MaskVec[i] = -1;
1875 else
1876 AllLHS = false;
1877 } else if (MaskVec[i] >= 0) {
1878 AllRHS = false;
1879 }
1880 }
1881 if (AllLHS && AllRHS)
1882 return getUNDEF(VT);
1883 if (AllLHS && !N2Undef)
1884 N2 = getUNDEF(VT);
1885 if (AllRHS) {
1886 N1 = getUNDEF(VT);
1887 commuteShuffle(N1, N2, MaskVec);
1888 }
1889 // Reset our undef status after accounting for the mask.
1890 N2Undef = N2.isUndef();
1891 // Re-check whether both sides ended up undef.
1892 if (N1.isUndef() && N2Undef)
1893 return getUNDEF(VT);
1894
1895 // If Identity shuffle return that node.
1896 bool Identity = true, AllSame = true;
1897 for (int i = 0; i != NElts; ++i) {
1898 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1899 if (MaskVec[i] != MaskVec[0]) AllSame = false;
1900 }
1901 if (Identity && NElts)
1902 return N1;
1903
1904 // Shuffling a constant splat doesn't change the result.
1905 if (N2Undef) {
1906 SDValue V = N1;
1907
1908 // Look through any bitcasts. We check that these don't change the number
1909 // (and size) of elements and just changes their types.
1910 while (V.getOpcode() == ISD::BITCAST)
1911 V = V->getOperand(0);
1912
1913 // A splat should always show up as a build vector node.
1914 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1915 BitVector UndefElements;
1916 SDValue Splat = BV->getSplatValue(&UndefElements);
1917 // If this is a splat of an undef, shuffling it is also undef.
1918 if (Splat && Splat.isUndef())
1919 return getUNDEF(VT);
1920
1921 bool SameNumElts =
1922 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1923
1924 // We only have a splat which can skip shuffles if there is a splatted
1925 // value and no undef lanes rearranged by the shuffle.
1926 if (Splat && UndefElements.none()) {
1927 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1928 // number of elements match or the value splatted is a zero constant.
1929 if (SameNumElts)
1930 return N1;
1931 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1932 if (C->isNullValue())
1933 return N1;
1934 }
1935
1936 // If the shuffle itself creates a splat, build the vector directly.
1937 if (AllSame && SameNumElts) {
1938 EVT BuildVT = BV->getValueType(0);
1939 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1940 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1941
1942 // We may have jumped through bitcasts, so the type of the
1943 // BUILD_VECTOR may not match the type of the shuffle.
1944 if (BuildVT != VT)
1945 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1946 return NewBV;
1947 }
1948 }
1949 }
1950
1951 FoldingSetNodeID ID;
1952 SDValue Ops[2] = { N1, N2 };
1953 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1954 for (int i = 0; i != NElts; ++i)
1955 ID.AddInteger(MaskVec[i]);
1956
1957 void* IP = nullptr;
1958 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1959 return SDValue(E, 0);
1960
1961 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1962 // SDNode doesn't have access to it. This memory will be "leaked" when
1963 // the node is deallocated, but recovered when the NodeAllocator is released.
1964 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1965 llvm::copy(MaskVec, MaskAlloc);
1966
1967 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1968 dl.getDebugLoc(), MaskAlloc);
1969 createOperands(N, Ops);
1970
1971 CSEMap.InsertNode(N, IP);
1972 InsertNode(N);
1973 SDValue V = SDValue(N, 0);
1974 NewSDValueDbgMsg(V, "Creating new node: ", this);
1975 return V;
1976}
1977
1978SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1979 EVT VT = SV.getValueType(0);
1980 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1981 ShuffleVectorSDNode::commuteMask(MaskVec);
1982
1983 SDValue Op0 = SV.getOperand(0);
1984 SDValue Op1 = SV.getOperand(1);
1985 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
1986}
1987
1988SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1989 FoldingSetNodeID ID;
1990 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1991 ID.AddInteger(RegNo);
1992 void *IP = nullptr;
1993 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1994 return SDValue(E, 0);
1995
1996 auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
1997 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
1998 CSEMap.InsertNode(N, IP);
1999 InsertNode(N);
2000 return SDValue(N, 0);
2001}
2002
2003SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
2004 FoldingSetNodeID ID;
2005 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
2006 ID.AddPointer(RegMask);
2007 void *IP = nullptr;
2008 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2009 return SDValue(E, 0);
2010
2011 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
2012 CSEMap.InsertNode(N, IP);
2013 InsertNode(N);
2014 return SDValue(N, 0);
2015}
2016
2017SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
2018 MCSymbol *Label) {
2019 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
2020}
2021
2022SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
2023 SDValue Root, MCSymbol *Label) {
2024 FoldingSetNodeID ID;
2025 SDValue Ops[] = { Root };
2026 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
2027 ID.AddPointer(Label);
2028 void *IP = nullptr;
2029 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2030 return SDValue(E, 0);
2031
2032 auto *N =
2033 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label);
2034 createOperands(N, Ops);
2035
2036 CSEMap.InsertNode(N, IP);
2037 InsertNode(N);
2038 return SDValue(N, 0);
2039}
2040
2041SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
2042 int64_t Offset, bool isTarget,
2043 unsigned TargetFlags) {
2044 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
2045
2046 FoldingSetNodeID ID;
2047 AddNodeIDNode(ID, Opc, getVTList(VT), None);
2048 ID.AddPointer(BA);
2049 ID.AddInteger(Offset);
2050 ID.AddInteger(TargetFlags);
2051 void *IP = nullptr;
2052 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2053 return SDValue(E, 0);
2054
2055 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
2056 CSEMap.InsertNode(N, IP);
2057 InsertNode(N);
2058 return SDValue(N, 0);
2059}
2060
2061SDValue SelectionDAG::getSrcValue(const Value *V) {
2062 FoldingSetNodeID ID;
2063 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
2064 ID.AddPointer(V);
2065
2066 void *IP = nullptr;
2067 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2068 return SDValue(E, 0);
2069
2070 auto *N = newSDNode<SrcValueSDNode>(V);
2071 CSEMap.InsertNode(N, IP);
2072 InsertNode(N);
2073 return SDValue(N, 0);
2074}
2075
2076SDValue SelectionDAG::getMDNode(const MDNode *MD) {
2077 FoldingSetNodeID ID;
2078 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
2079 ID.AddPointer(MD);
2080
2081 void *IP = nullptr;
2082 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2083 return SDValue(E, 0);
2084
2085 auto *N = newSDNode<MDNodeSDNode>(MD);
2086 CSEMap.InsertNode(N, IP);
2087 InsertNode(N);
2088 return SDValue(N, 0);
2089}
2090
2091SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
2092 if (VT == V.getValueType())
2093 return V;
2094
2095 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
2096}
2097
2098SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
2099 unsigned SrcAS, unsigned DestAS) {
2100 SDValue Ops[] = {Ptr};
2101 FoldingSetNodeID ID;
2102 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
2103 ID.AddInteger(SrcAS);
2104 ID.AddInteger(DestAS);
2105
2106 void *IP = nullptr;
2107 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
2108 return SDValue(E, 0);
2109
2110 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
2111 VT, SrcAS, DestAS);
2112 createOperands(N, Ops);
2113
2114 CSEMap.InsertNode(N, IP);
2115 InsertNode(N);
2116 return SDValue(N, 0);
2117}
2118
2119SDValue SelectionDAG::getFreeze(SDValue V) {
2120 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V);
2121}
2122
2123/// getShiftAmountOperand - Return the specified value casted to
2124/// the target's desired shift amount type.
2125SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
2126 EVT OpTy = Op.getValueType();
2127 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
2128 if (OpTy == ShTy || OpTy.isVector()) return Op;
2129
2130 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
2131}
2132
2133SDValue SelectionDAG::expandVAArg(SDNode *Node) {
2134 SDLoc dl(Node);
2135 const TargetLowering &TLI = getTargetLoweringInfo();
2136 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2137 EVT VT = Node->getValueType(0);
2138 SDValue Tmp1 = Node->getOperand(0);
2139 SDValue Tmp2 = Node->getOperand(1);
2140 const MaybeAlign MA(Node->getConstantOperandVal(3));
2141
2142 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
2143 Tmp2, MachinePointerInfo(V));
2144 SDValue VAList = VAListLoad;
2145
2146 if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
2147 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2148 getConstant(MA->value() - 1, dl, VAList.getValueType()));
2149
2150 VAList =
2151 getNode(ISD::AND, dl, VAList.getValueType(), VAList,
2152 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType()));
2153 }
2154
2155 // Increment the pointer, VAList, to the next vaarg
2156 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2157 getConstant(getDataLayout().getTypeAllocSize(
2158 VT.getTypeForEVT(*getContext())),
2159 dl, VAList.getValueType()));
2160 // Store the incremented VAList to the legalized pointer
2161 Tmp1 =
2162 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
2163 // Load the actual argument out of the pointer VAList
2164 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
2165}
2166
2167SDValue SelectionDAG::expandVACopy(SDNode *Node) {
2168 SDLoc dl(Node);
2169 const TargetLowering &TLI = getTargetLoweringInfo();
2170 // This defaults to loading a pointer from the input and storing it to the
2171 // output, returning the chain.
2172 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2173 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2174 SDValue Tmp1 =
2175 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
2176 Node->getOperand(2), MachinePointerInfo(VS));
2177 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
2178 MachinePointerInfo(VD));
2179}
2180
2181Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) {
2182 const DataLayout &DL = getDataLayout();
2183 Type *Ty = VT.getTypeForEVT(*getContext());
2184 Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2185
2186 if (TLI->isTypeLegal(VT) || !VT.isVector())
2187 return RedAlign;
2188
2189 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2190 const Align StackAlign = TFI->getStackAlign();
2191
2192 // See if we can choose a smaller ABI alignment in cases where it's an
2193 // illegal vector type that will get broken down.
2194 if (RedAlign > StackAlign) {
2195 EVT IntermediateVT;
2196 MVT RegisterVT;
2197 unsigned NumIntermediates;
2198 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT,
2199 NumIntermediates, RegisterVT);
2200 Ty = IntermediateVT.getTypeForEVT(*getContext());
2201 Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2202 if (RedAlign2 < RedAlign)
2203 RedAlign = RedAlign2;
2204 }
2205
2206 return RedAlign;
2207}
2208
2209SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) {
2210 MachineFrameInfo &MFI = MF->getFrameInfo();
2211 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2212 int StackID = 0;
2213 if (Bytes.isScalable())
2214 StackID = TFI->getStackIDForScalableVectors();
2215 // The stack id gives an indication of whether the object is scalable or
2216 // not, so it's safe to pass in the minimum size here.
2217 int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment,
2218 false, nullptr, StackID);
2219 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
2220}
2221
2222SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
2223 Type *Ty = VT.getTypeForEVT(*getContext());
2224 Align StackAlign =
2225 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign));
2226 return CreateStackTemporary(VT.getStoreSize(), StackAlign);
2227}
2228
2229SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
2230 TypeSize VT1Size = VT1.getStoreSize();
2231 TypeSize VT2Size = VT2.getStoreSize();
2232 assert(VT1Size.isScalable() == VT2Size.isScalable() &&(static_cast<void> (0))
2233 "Don't know how to choose the maximum size when creating a stack "(static_cast<void> (0))
2234 "temporary")(static_cast<void> (0));
2235 TypeSize Bytes =
2236 VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size;
2237
2238 Type *Ty1 = VT1.getTypeForEVT(*getContext());
2239 Type *Ty2 = VT2.getTypeForEVT(*getContext());
2240 const DataLayout &DL = getDataLayout();
2241 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2));
2242 return CreateStackTemporary(Bytes, Align);
2243}
2244
2245SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
2246 ISD::CondCode Cond, const SDLoc &dl) {
2247 EVT OpVT = N1.getValueType();
2248
2249 // These setcc operations always fold.
2250 switch (Cond) {
2251 default: break;
2252 case ISD::SETFALSE:
2253 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
2254 case ISD::SETTRUE:
2255 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
2256
2257 case ISD::SETOEQ:
2258 case ISD::SETOGT:
2259 case ISD::SETOGE:
2260 case ISD::SETOLT:
2261 case ISD::SETOLE:
2262 case ISD::SETONE:
2263 case ISD::SETO:
2264 case ISD::SETUO:
2265 case ISD::SETUEQ:
2266 case ISD::SETUNE:
2267 assert(!OpVT.isInteger() && "Illegal setcc for integer!")(static_cast<void> (0));
2268 break;
2269 }
2270
2271 if (OpVT.isInteger()) {
2272 // For EQ and NE, we can always pick a value for the undef to make the
2273 // predicate pass or fail, so we can return undef.
2274 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2275 // icmp eq/ne X, undef -> undef.
2276 if ((N1.isUndef() || N2.isUndef()) &&
2277 (Cond == ISD::SETEQ || Cond == ISD::SETNE))
2278 return getUNDEF(VT);
2279
2280 // If both operands are undef, we can return undef for int comparison.
2281 // icmp undef, undef -> undef.
2282 if (N1.isUndef() && N2.isUndef())
2283 return getUNDEF(VT);
2284
2285 // icmp X, X -> true/false
2286 // icmp X, undef -> true/false because undef could be X.
2287 if (N1 == N2)
2288 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
2289 }
2290
2291 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
2292 const APInt &C2 = N2C->getAPIntValue();
2293 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
2294 const APInt &C1 = N1C->getAPIntValue();
2295
2296 switch (Cond) {
2297 default: llvm_unreachable("Unknown integer setcc!")__builtin_unreachable();
2298 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT);
2299 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT);
2300 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT);
2301 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT);
2302 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT);
2303 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT);
2304 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT);
2305 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT);
2306 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT);
2307 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT);
2308 }
2309 }
2310 }
2311
2312 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2313 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2314
2315 if (N1CFP && N2CFP) {
2316 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF());
2317 switch (Cond) {
2318 default: break;
2319 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
2320 return getUNDEF(VT);
2321 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2322 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2323 OpVT);
2324 case ISD::SETNE: if (R==APFloat::cmpUnordered)
2325 return getUNDEF(VT);
2326 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2327 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2328 R==APFloat::cmpLessThan, dl, VT,
2329 OpVT);
2330 case ISD::SETLT: if (R==APFloat::cmpUnordered)
2331 return getUNDEF(VT);
2332 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2333 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2334 OpVT);
2335 case ISD::SETGT: if (R==APFloat::cmpUnordered)
2336 return getUNDEF(VT);
2337 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2338 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
2339 VT, OpVT);
2340 case ISD::SETLE: if (R==APFloat::cmpUnordered)
2341 return getUNDEF(VT);
2342 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2343 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
2344 R==APFloat::cmpEqual, dl, VT,
2345 OpVT);
2346 case ISD::SETGE: if (R==APFloat::cmpUnordered)
2347 return getUNDEF(VT);
2348 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2349 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2350 R==APFloat::cmpEqual, dl, VT, OpVT);
2351 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2352 OpVT);
2353 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2354 OpVT);
2355 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered ||
2356 R==APFloat::cmpEqual, dl, VT,
2357 OpVT);
2358 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2359 OpVT);
2360 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered ||
2361 R==APFloat::cmpLessThan, dl, VT,
2362 OpVT);
2363 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2364 R==APFloat::cmpUnordered, dl, VT,
2365 OpVT);
2366 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl,
2367 VT, OpVT);
2368 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2369 OpVT);
2370 }
2371 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
2372 // Ensure that the constant occurs on the RHS.
2373 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2374 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
2375 return SDValue();
2376 return getSetCC(dl, VT, N2, N1, SwappedCond);
2377 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2378 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
2379 // If an operand is known to be a nan (or undef that could be a nan), we can
2380 // fold it.
2381 // Choosing NaN for the undef will always make unordered comparison succeed
2382 // and ordered comparison fails.
2383 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2384 switch (ISD::getUnorderedFlavor(Cond)) {
2385 default:
2386 llvm_unreachable("Unknown flavor!")__builtin_unreachable();
2387 case 0: // Known false.
2388 return getBoolConstant(false, dl, VT, OpVT);
2389 case 1: // Known true.
2390 return getBoolConstant(true, dl, VT, OpVT);
2391 case 2: // Undefined.
2392 return getUNDEF(VT);
2393 }
2394 }
2395
2396 // Could not fold it.
2397 return SDValue();
2398}
2399
2400/// See if the specified operand can be simplified with the knowledge that only
2401/// the bits specified by DemandedBits are used.
2402/// TODO: really we should be making this into the DAG equivalent of
2403/// SimplifyMultipleUseDemandedBits and not generate any new nodes.
2404SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) {
2405 EVT VT = V.getValueType();
2406
2407 if (VT.isScalableVector())
2408 return SDValue();
2409
2410 APInt DemandedElts = VT.isVector()
2411 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2412 : APInt(1, 1);
2413 return GetDemandedBits(V, DemandedBits, DemandedElts);
2414}
2415
2416/// See if the specified operand can be simplified with the knowledge that only
2417/// the bits specified by DemandedBits are used in the elements specified by
2418/// DemandedElts.
2419/// TODO: really we should be making this into the DAG equivalent of
2420/// SimplifyMultipleUseDemandedBits and not generate any new nodes.
2421SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits,
2422 const APInt &DemandedElts) {
2423 switch (V.getOpcode()) {
2424 default:
2425 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts,
2426 *this, 0);
2427 case ISD::Constant: {
2428 const APInt &CVal = cast<ConstantSDNode>(V)->getAPIntValue();
2429 APInt NewVal = CVal & DemandedBits;
2430 if (NewVal != CVal)
2431 return getConstant(NewVal, SDLoc(V), V.getValueType());
2432 break;
2433 }
2434 case ISD::SRL:
2435 // Only look at single-use SRLs.
2436 if (!V.getNode()->hasOneUse())
2437 break;
2438 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
2439 // See if we can recursively simplify the LHS.
2440 unsigned Amt = RHSC->getZExtValue();
2441
2442 // Watch out for shift count overflow though.
2443 if (Amt >= DemandedBits.getBitWidth())
2444 break;
2445 APInt SrcDemandedBits = DemandedBits << Amt;
2446 if (SDValue SimplifyLHS =
2447 GetDemandedBits(V.getOperand(0), SrcDemandedBits))
2448 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS,
2449 V.getOperand(1));
2450 }
2451 break;
2452 }
2453 return SDValue();
2454}
2455
2456/// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2457/// use this predicate to simplify operations downstream.
2458bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2459 unsigned BitWidth = Op.getScalarValueSizeInBits();
2460 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2461}
2462
2463/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2464/// this predicate to simplify operations downstream. Mask is known to be zero
2465/// for bits that V cannot have.
2466bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2467 unsigned Depth) const {
2468 return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero);
2469}
2470
2471/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2472/// DemandedElts. We use this predicate to simplify operations downstream.
2473/// Mask is known to be zero for bits that V cannot have.
2474bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2475 const APInt &DemandedElts,
2476 unsigned Depth) const {
2477 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
2478}
2479
2480/// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
2481bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask,
2482 unsigned Depth) const {
2483 return Mask.isSubsetOf(computeKnownBits(V, Depth).One);
2484}
2485
2486/// isSplatValue - Return true if the vector V has the same value
2487/// across all DemandedElts. For scalable vectors it does not make
2488/// sense to specify which elements are demanded or undefined, therefore
2489/// they are simply ignored.
2490bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2491 APInt &UndefElts, unsigned Depth) {
2492 EVT VT = V.getValueType();
2493 assert(VT.isVector() && "Vector type expected")(static_cast<void> (0));
2494
2495 if (!VT.isScalableVector() && !DemandedElts)
2496 return false; // No demanded elts, better to assume we don't know anything.
2497
2498 if (Depth >= MaxRecursionDepth)
2499 return false; // Limit search depth.
2500
2501 // Deal with some common cases here that work for both fixed and scalable
2502 // vector types.
2503 switch (V.getOpcode()) {
2504 case ISD::SPLAT_VECTOR:
2505 UndefElts = V.getOperand(0).isUndef()
2506 ? APInt::getAllOnesValue(DemandedElts.getBitWidth())
2507 : APInt(DemandedElts.getBitWidth(), 0);
2508 return true;
2509 case ISD::ADD:
2510 case ISD::SUB:
2511 case ISD::AND:
2512 case ISD::XOR:
2513 case ISD::OR: {
2514 APInt UndefLHS, UndefRHS;
2515 SDValue LHS = V.getOperand(0);
2516 SDValue RHS = V.getOperand(1);
2517 if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) &&
2518 isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) {
2519 UndefElts = UndefLHS | UndefRHS;
2520 return true;
2521 }
2522 return false;
2523 }
2524 case ISD::ABS:
2525 case ISD::TRUNCATE:
2526 case ISD::SIGN_EXTEND:
2527 case ISD::ZERO_EXTEND:
2528 return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1);
2529 }
2530
2531 // We don't support other cases than those above for scalable vectors at
2532 // the moment.
2533 if (VT.isScalableVector())
2534 return false;
2535
2536 unsigned NumElts = VT.getVectorNumElements();
2537 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch")(static_cast<void> (0));
2538 UndefElts = APInt::getNullValue(NumElts);
2539
2540 switch (V.getOpcode()) {
2541 case ISD::BUILD_VECTOR: {
2542 SDValue Scl;
2543 for (unsigned i = 0; i != NumElts; ++i) {
2544 SDValue Op = V.getOperand(i);
2545 if (Op.isUndef()) {
2546 UndefElts.setBit(i);
2547 continue;
2548 }
2549 if (!DemandedElts[i])
2550 continue;
2551 if (Scl && Scl != Op)
2552 return false;
2553 Scl = Op;
2554 }
2555 return true;
2556 }
2557 case ISD::VECTOR_SHUFFLE: {
2558 // Check if this is a shuffle node doing a splat.
2559 // TODO: Do we need to handle shuffle(splat, undef, mask)?
2560 int SplatIndex = -1;
2561 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
2562 for (int i = 0; i != (int)NumElts; ++i) {
2563 int M = Mask[i];
2564 if (M < 0) {
2565 UndefElts.setBit(i);
2566 continue;
2567 }
2568 if (!DemandedElts[i])
2569 continue;
2570 if (0 <= SplatIndex && SplatIndex != M)
2571 return false;
2572 SplatIndex = M;
2573 }
2574 return true;
2575 }
2576 case ISD::EXTRACT_SUBVECTOR: {
2577 // Offset the demanded elts by the subvector index.
2578 SDValue Src = V.getOperand(0);
2579 // We don't support scalable vectors at the moment.
2580 if (Src.getValueType().isScalableVector())
2581 return false;
2582 uint64_t Idx = V.getConstantOperandVal(1);
2583 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2584 APInt UndefSrcElts;
2585 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2586 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
2587 UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
2588 return true;
2589 }
2590 break;
2591 }
2592 }
2593
2594 return false;
2595}
2596
2597/// Helper wrapper to main isSplatValue function.
2598bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) {
2599 EVT VT = V.getValueType();
2600 assert(VT.isVector() && "Vector type expected")(static_cast<void> (0));
2601
2602 APInt UndefElts;
2603 APInt DemandedElts;
2604
2605 // For now we don't support this with scalable vectors.
2606 if (!VT.isScalableVector())
2607 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
2608 return isSplatValue(V, DemandedElts, UndefElts) &&
2609 (AllowUndefs || !UndefElts);
2610}
2611
2612SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) {
2613 V = peekThroughExtractSubvectors(V);
2614
2615 EVT VT = V.getValueType();
2616 unsigned Opcode = V.getOpcode();
2617 switch (Opcode) {
2618 default: {
2619 APInt UndefElts;
2620 APInt DemandedElts;
2621
2622 if (!VT.isScalableVector())
2623 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
2624
2625 if (isSplatValue(V, DemandedElts, UndefElts)) {
2626 if (VT.isScalableVector()) {
2627 // DemandedElts and UndefElts are ignored for scalable vectors, since
2628 // the only supported cases are SPLAT_VECTOR nodes.
2629 SplatIdx = 0;
2630 } else {
2631 // Handle case where all demanded elements are UNDEF.
2632 if (DemandedElts.isSubsetOf(UndefElts)) {
2633 SplatIdx = 0;
2634 return getUNDEF(VT);
2635 }
2636 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes();
2637 }
2638 return V;
2639 }
2640 break;
2641 }
2642 case ISD::SPLAT_VECTOR:
2643 SplatIdx = 0;
2644 return V;
2645 case ISD::VECTOR_SHUFFLE: {
2646 if (VT.isScalableVector())
2647 return SDValue();
2648
2649 // Check if this is a shuffle node doing a splat.
2650 // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
2651 // getTargetVShiftNode currently struggles without the splat source.
2652 auto *SVN = cast<ShuffleVectorSDNode>(V);
2653 if (!SVN->isSplat())
2654 break;
2655 int Idx = SVN->getSplatIndex();
2656 int NumElts = V.getValueType().getVectorNumElements();
2657 SplatIdx = Idx % NumElts;
2658 return V.getOperand(Idx / NumElts);
2659 }
2660 }
2661
2662 return SDValue();
2663}
2664
2665SDValue SelectionDAG::getSplatValue(SDValue V, bool LegalTypes) {
2666 int SplatIdx;
2667 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) {
2668 EVT SVT = SrcVector.getValueType().getScalarType();
2669 EVT LegalSVT = SVT;
2670 if (LegalTypes && !TLI->isTypeLegal(SVT)) {
2671 if (!SVT.isInteger())
2672 return SDValue();
2673 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
2674 if (LegalSVT.bitsLT(SVT))
2675 return SDValue();
2676 }
2677 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), LegalSVT, SrcVector,
2678 getVectorIdxConstant(SplatIdx, SDLoc(V)));
2679 }
2680 return SDValue();
2681}
2682
2683const APInt *
2684SelectionDAG::getValidShiftAmountConstant(SDValue V,
2685 const APInt &DemandedElts) const {
2686 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||(static_cast<void> (0))
2687 V.getOpcode() == ISD::SRA) &&(static_cast<void> (0))
2688 "Unknown shift node")(static_cast<void> (0));
2689 unsigned BitWidth = V.getScalarValueSizeInBits();
2690 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) {
2691 // Shifting more than the bitwidth is not valid.
2692 const APInt &ShAmt = SA->getAPIntValue();
2693 if (ShAmt.ult(BitWidth))
2694 return &ShAmt;
2695 }
2696 return nullptr;
2697}
2698
2699const APInt *SelectionDAG::getValidMinimumShiftAmountConstant(
2700 SDValue V, const APInt &DemandedElts) const {
2701 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||(static_cast<void> (0))
2702 V.getOpcode() == ISD::SRA) &&(static_cast<void> (0))
2703 "Unknown shift node")(static_cast<void> (0));
2704 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2705 return ValidAmt;
2706 unsigned BitWidth = V.getScalarValueSizeInBits();
2707 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2708 if (!BV)
2709 return nullptr;
2710 const APInt *MinShAmt = nullptr;
2711 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2712 if (!DemandedElts[i])
2713 continue;
2714 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2715 if (!SA)
2716 return nullptr;
2717 // Shifting more than the bitwidth is not valid.
2718 const APInt &ShAmt = SA->getAPIntValue();
2719 if (ShAmt.uge(BitWidth))
2720 return nullptr;
2721 if (MinShAmt && MinShAmt->ule(ShAmt))
2722 continue;
2723 MinShAmt = &ShAmt;
2724 }
2725 return MinShAmt;
2726}
2727
2728const APInt *SelectionDAG::getValidMaximumShiftAmountConstant(
2729 SDValue V, const APInt &DemandedElts) const {
2730 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||(static_cast<void> (0))
2731 V.getOpcode() == ISD::SRA) &&(static_cast<void> (0))
2732 "Unknown shift node")(static_cast<void> (0));
2733 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2734 return ValidAmt;
2735 unsigned BitWidth = V.getScalarValueSizeInBits();
2736 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2737 if (!BV)
2738 return nullptr;
2739 const APInt *MaxShAmt = nullptr;
2740 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2741 if (!DemandedElts[i])
2742 continue;
2743 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2744 if (!SA)
2745 return nullptr;
2746 // Shifting more than the bitwidth is not valid.
2747 const APInt &ShAmt = SA->getAPIntValue();
2748 if (ShAmt.uge(BitWidth))
2749 return nullptr;
2750 if (MaxShAmt && MaxShAmt->uge(ShAmt))
2751 continue;
2752 MaxShAmt = &ShAmt;
2753 }
2754 return MaxShAmt;
2755}
2756
2757/// Determine which bits of Op are known to be either zero or one and return
2758/// them in Known. For vectors, the known bits are those that are shared by
2759/// every vector element.
2760KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
2761 EVT VT = Op.getValueType();
2762
2763 // TOOD: Until we have a plan for how to represent demanded elements for
2764 // scalable vectors, we can just bail out for now.
2765 if (Op.getValueType().isScalableVector()) {
2766 unsigned BitWidth = Op.getScalarValueSizeInBits();
2767 return KnownBits(BitWidth);
2768 }
2769
2770 APInt DemandedElts = VT.isVector()
2771 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2772 : APInt(1, 1);
2773 return computeKnownBits(Op, DemandedElts, Depth);
2774}
2775
2776/// Determine which bits of Op are known to be either zero or one and return
2777/// them in Known. The DemandedElts argument allows us to only collect the known
2778/// bits that are shared by the requested vector elements.
2779KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
2780 unsigned Depth) const {
2781 unsigned BitWidth = Op.getScalarValueSizeInBits();
2782
2783 KnownBits Known(BitWidth); // Don't know anything.
2784
2785 // TOOD: Until we have a plan for how to represent demanded elements for
2786 // scalable vectors, we can just bail out for now.
2787 if (Op.getValueType().isScalableVector())
2788 return Known;
2789
2790 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2791 // We know all of the bits for a constant!
2792 return KnownBits::makeConstant(C->getAPIntValue());
2793 }
2794 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
2795 // We know all of the bits for a constant fp!
2796 return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt());
2797 }
2798
2799 if (Depth >= MaxRecursionDepth)
2800 return Known; // Limit search depth.
2801
2802 KnownBits Known2;
2803 unsigned NumElts = DemandedElts.getBitWidth();
2804 assert((!Op.getValueType().isVector() ||(static_cast<void> (0))
2805 NumElts == Op.getValueType().getVectorNumElements()) &&(static_cast<void> (0))
2806 "Unexpected vector size")(static_cast<void> (0));
2807
2808 if (!DemandedElts)
2809 return Known; // No demanded elts, better to assume we don't know anything.
2810
2811 unsigned Opcode = Op.getOpcode();
2812 switch (Opcode) {
2813 case ISD::BUILD_VECTOR:
2814 // Collect the known bits that are shared by every demanded vector element.
2815 Known.Zero.setAllBits(); Known.One.setAllBits();
2816 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2817 if (!DemandedElts[i])
2818 continue;
2819
2820 SDValue SrcOp = Op.getOperand(i);
2821 Known2 = computeKnownBits(SrcOp, Depth + 1);
2822
2823 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2824 if (SrcOp.getValueSizeInBits() != BitWidth) {
2825 assert(SrcOp.getValueSizeInBits() > BitWidth &&(static_cast<void> (0))
2826 "Expected BUILD_VECTOR implicit truncation")(static_cast<void> (0));
2827 Known2 = Known2.trunc(BitWidth);
2828 }
2829
2830 // Known bits are the values that are shared by every demanded element.
2831 Known = KnownBits::commonBits(Known, Known2);
2832
2833 // If we don't know any bits, early out.
2834 if (Known.isUnknown())
2835 break;
2836 }
2837 break;
2838 case ISD::VECTOR_SHUFFLE: {
2839 // Collect the known bits that are shared by every vector element referenced
2840 // by the shuffle.
2841 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2842 Known.Zero.setAllBits(); Known.One.setAllBits();
2843 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2844 assert(NumElts == SVN->getMask().size() && "Unexpected vector size")(static_cast<void> (0));
2845 for (unsigned i = 0; i != NumElts; ++i) {
2846 if (!DemandedElts[i])
2847 continue;
2848
2849 int M = SVN->getMaskElt(i);
2850 if (M < 0) {
2851 // For UNDEF elements, we don't know anything about the common state of
2852 // the shuffle result.
2853 Known.resetAll();
2854 DemandedLHS.clearAllBits();
2855 DemandedRHS.clearAllBits();
2856 break;
2857 }
2858
2859 if ((unsigned)M < NumElts)
2860 DemandedLHS.setBit((unsigned)M % NumElts);
2861 else
2862 DemandedRHS.setBit((unsigned)M % NumElts);
2863 }
2864 // Known bits are the values that are shared by every demanded element.
2865 if (!!DemandedLHS) {
2866 SDValue LHS = Op.getOperand(0);
2867 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
2868 Known = KnownBits::commonBits(Known, Known2);
2869 }
2870 // If we don't know any bits, early out.
2871 if (Known.isUnknown())
2872 break;
2873 if (!!DemandedRHS) {
2874 SDValue RHS = Op.getOperand(1);
2875 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
2876 Known = KnownBits::commonBits(Known, Known2);
2877 }
2878 break;
2879 }
2880 case ISD::CONCAT_VECTORS: {
2881 // Split DemandedElts and test each of the demanded subvectors.
2882 Known.Zero.setAllBits(); Known.One.setAllBits();
2883 EVT SubVectorVT = Op.getOperand(0).getValueType();
2884 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2885 unsigned NumSubVectors = Op.getNumOperands();
2886 for (unsigned i = 0; i != NumSubVectors; ++i) {
2887 APInt DemandedSub =
2888 DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
2889 if (!!DemandedSub) {
2890 SDValue Sub = Op.getOperand(i);
2891 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
2892 Known = KnownBits::commonBits(Known, Known2);
2893 }
2894 // If we don't know any bits, early out.
2895 if (Known.isUnknown())
2896 break;
2897 }
2898 break;
2899 }
2900 case ISD::INSERT_SUBVECTOR: {
2901 // Demand any elements from the subvector and the remainder from the src its
2902 // inserted into.
2903 SDValue Src = Op.getOperand(0);
2904 SDValue Sub = Op.getOperand(1);
2905 uint64_t Idx = Op.getConstantOperandVal(2);
2906 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2907 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2908 APInt DemandedSrcElts = DemandedElts;
2909 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
2910
2911 Known.One.setAllBits();
2912 Known.Zero.setAllBits();
2913 if (!!DemandedSubElts) {
2914 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
2915 if (Known.isUnknown())
2916 break; // early-out.
2917 }
2918 if (!!DemandedSrcElts) {
2919 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2920 Known = KnownBits::commonBits(Known, Known2);
2921 }
2922 break;
2923 }
2924 case ISD::EXTRACT_SUBVECTOR: {
2925 // Offset the demanded elts by the subvector index.
2926 SDValue Src = Op.getOperand(0);
2927 // Bail until we can represent demanded elements for scalable vectors.
2928 if (Src.getValueType().isScalableVector())
2929 break;
2930 uint64_t Idx = Op.getConstantOperandVal(1);
2931 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2932 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2933 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2934 break;
2935 }
2936 case ISD::SCALAR_TO_VECTOR: {
2937 // We know about scalar_to_vector as much as we know about it source,
2938 // which becomes the first element of otherwise unknown vector.
2939 if (DemandedElts != 1)
2940 break;
2941
2942 SDValue N0 = Op.getOperand(0);
2943 Known = computeKnownBits(N0, Depth + 1);
2944 if (N0.getValueSizeInBits() != BitWidth)
2945 Known = Known.trunc(BitWidth);
2946
2947 break;
2948 }
2949 case ISD::BITCAST: {
2950 SDValue N0 = Op.getOperand(0);
2951 EVT SubVT = N0.getValueType();
2952 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
2953
2954 // Ignore bitcasts from unsupported types.
2955 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
2956 break;
2957
2958 // Fast handling of 'identity' bitcasts.
2959 if (BitWidth == SubBitWidth) {
2960 Known = computeKnownBits(N0, DemandedElts, Depth + 1);
2961 break;
2962 }
2963
2964 bool IsLE = getDataLayout().isLittleEndian();
2965
2966 // Bitcast 'small element' vector to 'large element' scalar/vector.
2967 if ((BitWidth % SubBitWidth) == 0) {
2968 assert(N0.getValueType().isVector() && "Expected bitcast from vector")(static_cast<void> (0));
2969
2970 // Collect known bits for the (larger) output by collecting the known
2971 // bits from each set of sub elements and shift these into place.
2972 // We need to separately call computeKnownBits for each set of
2973 // sub elements as the knownbits for each is likely to be different.
2974 unsigned SubScale = BitWidth / SubBitWidth;
2975 APInt SubDemandedElts(NumElts * SubScale, 0);
2976 for (unsigned i = 0; i != NumElts; ++i)
2977 if (DemandedElts[i])
2978 SubDemandedElts.setBit(i * SubScale);
2979
2980 for (unsigned i = 0; i != SubScale; ++i) {
2981 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
2982 Depth + 1);
2983 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
2984 Known.insertBits(Known2, SubBitWidth * Shifts);
2985 }
2986 }
2987
2988 // Bitcast 'large element' scalar/vector to 'small element' vector.
2989 if ((SubBitWidth % BitWidth) == 0) {
2990 assert(Op.getValueType().isVector() && "Expected bitcast to vector")(static_cast<void> (0));
2991
2992 // Collect known bits for the (smaller) output by collecting the known
2993 // bits from the overlapping larger input elements and extracting the
2994 // sub sections we actually care about.
2995 unsigned SubScale = SubBitWidth / BitWidth;
2996 APInt SubDemandedElts(NumElts / SubScale, 0);
2997 for (unsigned i = 0; i != NumElts; ++i)
2998 if (DemandedElts[i])
2999 SubDemandedElts.setBit(i / SubScale);
3000
3001 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
3002
3003 Known.Zero.setAllBits(); Known.One.setAllBits();
3004 for (unsigned i = 0; i != NumElts; ++i)
3005 if (DemandedElts[i]) {
3006 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3007 unsigned Offset = (Shifts % SubScale) * BitWidth;
3008 Known = KnownBits::commonBits(Known,
3009 Known2.extractBits(BitWidth, Offset));
3010 // If we don't know any bits, early out.
3011 if (Known.isUnknown())
3012 break;
3013 }
3014 }
3015 break;
3016 }
3017 case ISD::AND:
3018 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3019 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3020
3021 Known &= Known2;
3022 break;
3023 case ISD::OR:
3024 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3025 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3026
3027 Known |= Known2;
3028 break;
3029 case ISD::XOR:
3030 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3031 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3032
3033 Known ^= Known2;
3034 break;
3035 case ISD::MUL: {
3036 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3037 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3038 Known = KnownBits::mul(Known, Known2);
3039 break;
3040 }
3041 case ISD::MULHU: {
3042 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3043 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3044 Known = KnownBits::mulhu(Known, Known2);
3045 break;
3046 }
3047 case ISD::MULHS: {
3048 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3049 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3050 Known = KnownBits::mulhs(Known, Known2);
3051 break;
3052 }
3053 case ISD::UMUL_LOHI: {
3054 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result")(static_cast<void> (0));
3055 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3056 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3057 if (Op.getResNo() == 0)
3058 Known = KnownBits::mul(Known, Known2);
3059 else
3060 Known = KnownBits::mulhu(Known, Known2);
3061 break;
3062 }
3063 case ISD::SMUL_LOHI: {
3064 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result")(static_cast<void> (0));
3065 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3066 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3067 if (Op.getResNo() == 0)
3068 Known = KnownBits::mul(Known, Known2);
3069 else
3070 Known = KnownBits::mulhs(Known, Known2);
3071 break;
3072 }
3073 case ISD::UDIV: {
3074 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3075 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3076 Known = KnownBits::udiv(Known, Known2);
3077 break;
3078 }
3079 case ISD::SELECT:
3080 case ISD::VSELECT:
3081 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3082 // If we don't know any bits, early out.
3083 if (Known.isUnknown())
3084 break;
3085 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
3086
3087 // Only known if known in both the LHS and RHS.
3088 Known = KnownBits::commonBits(Known, Known2);
3089 break;
3090 case ISD::SELECT_CC:
3091 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
3092 // If we don't know any bits, early out.
3093 if (Known.isUnknown())
3094 break;
3095 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3096
3097 // Only known if known in both the LHS and RHS.
3098 Known = KnownBits::commonBits(Known, Known2);
3099 break;
3100 case ISD::SMULO:
3101 case ISD::UMULO:
3102 if (Op.getResNo() != 1)
3103 break;
3104 // The boolean result conforms to getBooleanContents.
3105 // If we know the result of a setcc has the top bits zero, use this info.
3106 // We know that we have an integer-based boolean since these operations
3107 // are only available for integer.
3108 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3109 TargetLowering::ZeroOrOneBooleanContent &&
3110 BitWidth > 1)
3111 Known.Zero.setBitsFrom(1);
3112 break;
3113 case ISD::SETCC:
3114 case ISD::STRICT_FSETCC:
3115 case ISD::STRICT_FSETCCS: {
3116 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3117 // If we know the result of a setcc has the top bits zero, use this info.
3118 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3119 TargetLowering::ZeroOrOneBooleanContent &&
3120 BitWidth > 1)
3121 Known.Zero.setBitsFrom(1);
3122 break;
3123 }
3124 case ISD::SHL:
3125 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3126 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3127 Known = KnownBits::shl(Known, Known2);
3128
3129 // Minimum shift low bits are known zero.
3130 if (const APInt *ShMinAmt =
3131 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3132 Known.Zero.setLowBits(ShMinAmt->getZExtValue());
3133 break;
3134 case ISD::SRL:
3135 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3136 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3137 Known = KnownBits::lshr(Known, Known2);
3138
3139 // Minimum shift high bits are known zero.
3140 if (const APInt *ShMinAmt =
3141 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3142 Known.Zero.setHighBits(ShMinAmt->getZExtValue());
3143 break;
3144 case ISD::SRA:
3145 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3146 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3147 Known = KnownBits::ashr(Known, Known2);
3148 // TODO: Add minimum shift high known sign bits.
3149 break;
3150 case ISD::FSHL:
3151 case ISD::FSHR:
3152 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) {
3153 unsigned Amt = C->getAPIntValue().urem(BitWidth);
3154
3155 // For fshl, 0-shift returns the 1st arg.
3156 // For fshr, 0-shift returns the 2nd arg.
3157 if (Amt == 0) {
3158 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
3159 DemandedElts, Depth + 1);
3160 break;
3161 }
3162
3163 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3164 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3165 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3166 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3167 if (Opcode == ISD::FSHL) {
3168 Known.One <<= Amt;
3169 Known.Zero <<= Amt;
3170 Known2.One.lshrInPlace(BitWidth - Amt);
3171 Known2.Zero.lshrInPlace(BitWidth - Amt);
3172 } else {
3173 Known.One <<= BitWidth - Amt;
3174 Known.Zero <<= BitWidth - Amt;
3175 Known2.One.lshrInPlace(Amt);
3176 Known2.Zero.lshrInPlace(Amt);
3177 }
3178 Known.One |= Known2.One;
3179 Known.Zero |= Known2.Zero;
3180 }
3181 break;
3182 case ISD::SIGN_EXTEND_INREG: {
3183 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3184 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3185 Known = Known.sextInReg(EVT.getScalarSizeInBits());
3186 break;
3187 }
3188 case ISD::CTTZ:
3189 case ISD::CTTZ_ZERO_UNDEF: {
3190 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3191 // If we have a known 1, its position is our upper bound.
3192 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
3193 unsigned LowBits = Log2_32(PossibleTZ) + 1;
3194 Known.Zero.setBitsFrom(LowBits);
3195 break;
3196 }
3197 case ISD::CTLZ:
3198 case ISD::CTLZ_ZERO_UNDEF: {
3199 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3200 // If we have a known 1, its position is our upper bound.
3201 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
3202 unsigned LowBits = Log2_32(PossibleLZ) + 1;
3203 Known.Zero.setBitsFrom(LowBits);
3204 break;
3205 }
3206 case ISD::CTPOP: {
3207 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3208 // If we know some of the bits are zero, they can't be one.
3209 unsigned PossibleOnes = Known2.countMaxPopulation();
3210 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
3211 break;
3212 }
3213 case ISD::PARITY: {
3214 // Parity returns 0 everywhere but the LSB.
3215 Known.Zero.setBitsFrom(1);
3216 break;
3217 }
3218 case ISD::LOAD: {
3219 LoadSDNode *LD = cast<LoadSDNode>(Op);
3220 const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3221 if (ISD::isNON_EXTLoad(LD) && Cst) {
3222 // Determine any common known bits from the loaded constant pool value.
3223 Type *CstTy = Cst->getType();
3224 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) {
3225 // If its a vector splat, then we can (quickly) reuse the scalar path.
3226 // NOTE: We assume all elements match and none are UNDEF.
3227 if (CstTy->isVectorTy()) {
3228 if (const Constant *Splat = Cst->getSplatValue()) {
3229 Cst = Splat;
3230 CstTy = Cst->getType();
3231 }
3232 }
3233 // TODO - do we need to handle different bitwidths?
3234 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) {
3235 // Iterate across all vector elements finding common known bits.
3236 Known.One.setAllBits();
3237 Known.Zero.setAllBits();
3238 for (unsigned i = 0; i != NumElts; ++i) {
3239 if (!DemandedElts[i])
3240 continue;
3241 if (Constant *Elt = Cst->getAggregateElement(i)) {
3242 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3243 const APInt &Value = CInt->getValue();
3244 Known.One &= Value;
3245 Known.Zero &= ~Value;
3246 continue;
3247 }
3248 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
3249 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3250 Known.One &= Value;
3251 Known.Zero &= ~Value;
3252 continue;
3253 }
3254 }
3255 Known.One.clearAllBits();
3256 Known.Zero.clearAllBits();
3257 break;
3258 }
3259 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
3260 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
3261 Known = KnownBits::makeConstant(CInt->getValue());
3262 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
3263 Known =
3264 KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt());
3265 }
3266 }
3267 }
3268 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
3269 // If this is a ZEXTLoad and we are looking at the loaded value.
3270 EVT VT = LD->getMemoryVT();
3271 unsigned MemBits = VT.getScalarSizeInBits();
3272 Known.Zero.setBitsFrom(MemBits);
3273 } else if (const MDNode *Ranges = LD->getRanges()) {
3274 if (LD->getExtensionType() == ISD::NON_EXTLOAD)
3275 computeKnownBitsFromRangeMetadata(*Ranges, Known);
3276 }
3277 break;
3278 }
3279 case ISD::ZERO_EXTEND_VECTOR_INREG: {
3280 EVT InVT = Op.getOperand(0).getValueType();
3281 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3282 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3283 Known = Known.zext(BitWidth);
3284 break;
3285 }
3286 case ISD::ZERO_EXTEND: {
3287 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3288 Known = Known.zext(BitWidth);
3289 break;
3290 }
3291 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3292 EVT InVT = Op.getOperand(0).getValueType();
3293 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3294 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3295 // If the sign bit is known to be zero or one, then sext will extend
3296 // it to the top bits, else it will just zext.
3297 Known = Known.sext(BitWidth);
3298 break;
3299 }
3300 case ISD::SIGN_EXTEND: {
3301 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3302 // If the sign bit is known to be zero or one, then sext will extend
3303 // it to the top bits, else it will just zext.
3304 Known = Known.sext(BitWidth);
3305 break;
3306 }
3307 case ISD::ANY_EXTEND_VECTOR_INREG: {
3308 EVT InVT = Op.getOperand(0).getValueType();
3309 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3310 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3311 Known = Known.anyext(BitWidth);
3312 break;
3313 }
3314 case ISD::ANY_EXTEND: {
3315 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3316 Known = Known.anyext(BitWidth);
3317 break;
3318 }
3319 case ISD::TRUNCATE: {
3320 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3321 Known = Known.trunc(BitWidth);
3322 break;
3323 }
3324 case ISD::AssertZext: {
3325 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3326 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
3327 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3328 Known.Zero |= (~InMask);
3329 Known.One &= (~Known.Zero);
3330 break;
3331 }
3332 case ISD::AssertAlign: {
3333 unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign());
3334 assert(LogOfAlign != 0)(static_cast<void> (0));
3335 // If a node is guaranteed to be aligned, set low zero bits accordingly as
3336 // well as clearing one bits.
3337 Known.Zero.setLowBits(LogOfAlign);
3338 Known.One.clearLowBits(LogOfAlign);
3339 break;
3340 }
3341 case ISD::FGETSIGN:
3342 // All bits are zero except the low bit.
3343 Known.Zero.setBitsFrom(1);
3344 break;
3345 case ISD::USUBO:
3346 case ISD::SSUBO:
3347 if (Op.getResNo() == 1) {
3348 // If we know the result of a setcc has the top bits zero, use this info.
3349 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3350 TargetLowering::ZeroOrOneBooleanContent &&
3351 BitWidth > 1)
3352 Known.Zero.setBitsFrom(1);
3353 break;
3354 }
3355 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3356 case ISD::SUB:
3357 case ISD::SUBC: {
3358 assert(Op.getResNo() == 0 &&(static_cast<void> (0))
3359 "We only compute knownbits for the difference here.")(static_cast<void> (0));
3360
3361 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3362 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3363 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false,
3364 Known, Known2);
3365 break;
3366 }
3367 case ISD::UADDO:
3368 case ISD::SADDO:
3369 case ISD::ADDCARRY:
3370 if (Op.getResNo() == 1) {
3371 // If we know the result of a setcc has the top bits zero, use this info.
3372 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3373 TargetLowering::ZeroOrOneBooleanContent &&
3374 BitWidth > 1)
3375 Known.Zero.setBitsFrom(1);
3376 break;
3377 }
3378 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3379 case ISD::ADD:
3380 case ISD::ADDC:
3381 case ISD::ADDE: {
3382 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.")(static_cast<void> (0));
3383
3384 // With ADDE and ADDCARRY, a carry bit may be added in.
3385 KnownBits Carry(1);
3386 if (Opcode == ISD::ADDE)
3387 // Can't track carry from glue, set carry to unknown.
3388 Carry.resetAll();
3389 else if (Opcode == ISD::ADDCARRY)
3390 // TODO: Compute known bits for the carry operand. Not sure if it is worth
3391 // the trouble (how often will we find a known carry bit). And I haven't
3392 // tested this very much yet, but something like this might work:
3393 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3394 // Carry = Carry.zextOrTrunc(1, false);
3395 Carry.resetAll();
3396 else
3397 Carry.setAllZero();
3398
3399 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3400 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3401 Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
3402 break;
3403 }
3404 case ISD::SREM: {
3405 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3406 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3407 Known = KnownBits::srem(Known, Known2);
3408 break;
3409 }
3410 case ISD::UREM: {
3411 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3412 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3413 Known = KnownBits::urem(Known, Known2);
3414 break;
3415 }
3416 case ISD::EXTRACT_ELEMENT: {
3417 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3418 const unsigned Index = Op.getConstantOperandVal(1);
3419 const unsigned EltBitWidth = Op.getValueSizeInBits();
3420
3421 // Remove low part of known bits mask
3422 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3423 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3424
3425 // Remove high part of known bit mask
3426 Known = Known.trunc(EltBitWidth);
3427 break;
3428 }
3429 case ISD::EXTRACT_VECTOR_ELT: {
3430 SDValue InVec = Op.getOperand(0);
3431 SDValue EltNo = Op.getOperand(1);
3432 EVT VecVT = InVec.getValueType();
3433 // computeKnownBits not yet implemented for scalable vectors.
3434 if (VecVT.isScalableVector())
3435 break;
3436 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
3437 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3438
3439 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
3440 // anything about the extended bits.
3441 if (BitWidth > EltBitWidth)
3442 Known = Known.trunc(EltBitWidth);
3443
3444 // If we know the element index, just demand that vector element, else for
3445 // an unknown element index, ignore DemandedElts and demand them all.
3446 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
3447 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3448 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3449 DemandedSrcElts =
3450 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3451
3452 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1);
3453 if (BitWidth > EltBitWidth)
3454 Known = Known.anyext(BitWidth);
3455 break;
3456 }
3457 case ISD::INSERT_VECTOR_ELT: {
3458 // If we know the element index, split the demand between the
3459 // source vector and the inserted element, otherwise assume we need
3460 // the original demanded vector elements and the value.
3461 SDValue InVec = Op.getOperand(0);
3462 SDValue InVal = Op.getOperand(1);
3463 SDValue EltNo = Op.getOperand(2);
3464 bool DemandedVal = true;
3465 APInt DemandedVecElts = DemandedElts;
3466 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3467 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3468 unsigned EltIdx = CEltNo->getZExtValue();
3469 DemandedVal = !!DemandedElts[EltIdx];
3470 DemandedVecElts.clearBit(EltIdx);
3471 }
3472 Known.One.setAllBits();
3473 Known.Zero.setAllBits();
3474 if (DemandedVal) {
3475 Known2 = computeKnownBits(InVal, Depth + 1);
3476 Known = KnownBits::commonBits(Known, Known2.zextOrTrunc(BitWidth));
3477 }
3478 if (!!DemandedVecElts) {
3479 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1);
3480 Known = KnownBits::commonBits(Known, Known2);
3481 }
3482 break;
3483 }
3484 case ISD::BITREVERSE: {
3485 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3486 Known = Known2.reverseBits();
3487 break;
3488 }
3489 case ISD::BSWAP: {
3490 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3491 Known = Known2.byteSwap();
3492 break;
3493 }
3494 case ISD::ABS: {
3495 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3496 Known = Known2.abs();
3497 break;
3498 }
3499 case ISD::USUBSAT: {
3500 // The result of usubsat will never be larger than the LHS.
3501 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3502 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
3503 break;
3504 }
3505 case ISD::UMIN: {
3506 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3507 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3508 Known = KnownBits::umin(Known, Known2);
3509 break;
3510 }
3511 case ISD::UMAX: {
3512 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3513 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3514 Known = KnownBits::umax(Known, Known2);
3515 break;
3516 }
3517 case ISD::SMIN:
3518 case ISD::SMAX: {
3519 // If we have a clamp pattern, we know that the number of sign bits will be
3520 // the minimum of the clamp min/max range.
3521 bool IsMax = (Opcode == ISD::SMAX);
3522 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3523 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3524 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3525 CstHigh =
3526 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3527 if (CstLow && CstHigh) {
3528 if (!IsMax)
3529 std::swap(CstLow, CstHigh);
3530
3531 const APInt &ValueLow = CstLow->getAPIntValue();
3532 const APInt &ValueHigh = CstHigh->getAPIntValue();
3533 if (ValueLow.sle(ValueHigh)) {
3534 unsigned LowSignBits = ValueLow.getNumSignBits();
3535 unsigned HighSignBits = ValueHigh.getNumSignBits();
3536 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
3537 if (ValueLow.isNegative() && ValueHigh.isNegative()) {
3538 Known.One.setHighBits(MinSignBits);
3539 break;
3540 }
3541 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
3542 Known.Zero.setHighBits(MinSignBits);
3543 break;
3544 }
3545 }
3546 }
3547
3548 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3549 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3550 if (IsMax)
3551 Known = KnownBits::smax(Known, Known2);
3552 else
3553 Known = KnownBits::smin(Known, Known2);
3554 break;
3555 }
3556 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
3557 if (Op.getResNo() == 1) {
3558 // The boolean result conforms to getBooleanContents.
3559 // If we know the result of a setcc has the top bits zero, use this info.
3560 // We know that we have an integer-based boolean since these operations
3561 // are only available for integer.
3562 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3563 TargetLowering::ZeroOrOneBooleanContent &&
3564 BitWidth > 1)
3565 Known.Zero.setBitsFrom(1);
3566 break;
3567 }
3568 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3569 case ISD::ATOMIC_CMP_SWAP:
3570 case ISD::ATOMIC_SWAP:
3571 case ISD::ATOMIC_LOAD_ADD:
3572 case ISD::ATOMIC_LOAD_SUB:
3573 case ISD::ATOMIC_LOAD_AND:
3574 case ISD::ATOMIC_LOAD_CLR:
3575 case ISD::ATOMIC_LOAD_OR:
3576 case ISD::ATOMIC_LOAD_XOR:
3577 case ISD::ATOMIC_LOAD_NAND:
3578 case ISD::ATOMIC_LOAD_MIN:
3579 case ISD::ATOMIC_LOAD_MAX:
3580 case ISD::ATOMIC_LOAD_UMIN:
3581 case ISD::ATOMIC_LOAD_UMAX:
3582 case ISD::ATOMIC_LOAD: {
3583 unsigned MemBits =
3584 cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits();
3585 // If we are looking at the loaded value.
3586 if (Op.getResNo() == 0) {
3587 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
3588 Known.Zero.setBitsFrom(MemBits);
3589 }
3590 break;
3591 }
3592 case ISD::FrameIndex:
3593 case ISD::TargetFrameIndex:
3594 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(),
3595 Known, getMachineFunction());
3596 break;
3597
3598 default:
3599 if (Opcode < ISD::BUILTIN_OP_END)
3600 break;
3601 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3602 case ISD::INTRINSIC_WO_CHAIN:
3603 case ISD::INTRINSIC_W_CHAIN:
3604 case ISD::INTRINSIC_VOID:
3605 // Allow the target to implement this method for its nodes.
3606 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
3607 break;
3608 }
3609
3610 assert(!Known.hasConflict() && "Bits known to be one AND zero?")(static_cast<void> (0));
3611 return Known;
3612}
3613
3614SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
3615 SDValue N1) const {
3616 // X + 0 never overflow
3617 if (isNullConstant(N1))
3618 return OFK_Never;
3619
3620 KnownBits N1Known = computeKnownBits(N1);
3621 if (N1Known.Zero.getBoolValue()) {
3622 KnownBits N0Known = computeKnownBits(N0);
3623
3624 bool overflow;
3625 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow);
3626 if (!overflow)
3627 return OFK_Never;
3628 }
3629
3630 // mulhi + 1 never overflow
3631 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
3632 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue())
3633 return OFK_Never;
3634
3635 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
3636 KnownBits N0Known = computeKnownBits(N0);
3637
3638 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue())
3639 return OFK_Never;
3640 }
3641
3642 return OFK_Sometime;
3643}
3644
3645bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
3646 EVT OpVT = Val.getValueType();
3647 unsigned BitWidth = OpVT.getScalarSizeInBits();
3648
3649 // Is the constant a known power of 2?
3650 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
3651 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3652
3653 // A left-shift of a constant one will have exactly one bit set because
3654 // shifting the bit off the end is undefined.
3655 if (Val.getOpcode() == ISD::SHL) {
3656 auto *C = isConstOrConstSplat(Val.getOperand(0));
3657 if (C && C->getAPIntValue() == 1)
3658 return true;
3659 }
3660
3661 // Similarly, a logical right-shift of a constant sign-bit will have exactly
3662 // one bit set.
3663 if (Val.getOpcode() == ISD::SRL) {
3664 auto *C = isConstOrConstSplat(Val.getOperand(0));
3665 if (C && C->getAPIntValue().isSignMask())
3666 return true;
3667 }
3668
3669 // Are all operands of a build vector constant powers of two?
3670 if (Val.getOpcode() == ISD::BUILD_VECTOR)
3671 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
3672 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
3673 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3674 return false;
3675 }))
3676 return true;
3677
3678 // Is the operand of a splat vector a constant power of two?
3679 if (Val.getOpcode() == ISD::SPLAT_VECTOR)
3680 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val->getOperand(0)))
3681 if (C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2())
3682 return true;
3683
3684 // More could be done here, though the above checks are enough
3685 // to handle some common cases.
3686
3687 // Fall back to computeKnownBits to catch other known cases.
3688 KnownBits Known = computeKnownBits(Val);
3689 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
3690}
3691
3692unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
3693 EVT VT = Op.getValueType();
3694
3695 // TODO: Assume we don't know anything for now.
3696 if (VT.isScalableVector())
3697 return 1;
3698
3699 APInt DemandedElts = VT.isVector()
3700 ? APInt::getAllOnesValue(VT.getVectorNumElements())
3701 : APInt(1, 1);
3702 return ComputeNumSignBits(Op, DemandedElts, Depth);
3703}
3704
3705unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
3706 unsigned Depth) const {
3707 EVT VT = Op.getValueType();
3708 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!")(static_cast<void> (0));
3709 unsigned VTBits = VT.getScalarSizeInBits();
3710 unsigned NumElts = DemandedElts.getBitWidth();
3711 unsigned Tmp, Tmp2;
3712 unsigned FirstAnswer = 1;
3713
3714 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3715 const APInt &Val = C->getAPIntValue();
3716 return Val.getNumSignBits();
3717 }
3718
3719 if (Depth >= MaxRecursionDepth)
3720 return 1; // Limit search depth.
3721
3722 if (!DemandedElts || VT.isScalableVector())
3723 return 1; // No demanded elts, better to assume we don't know anything.
3724
3725 unsigned Opcode = Op.getOpcode();
3726 switch (Opcode) {
3727 default: break;
3728 case ISD::AssertSext:
3729 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3730 return VTBits-Tmp+1;
3731 case ISD::AssertZext:
3732 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3733 return VTBits-Tmp;
3734
3735 case ISD::BUILD_VECTOR:
3736 Tmp = VTBits;
3737 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
3738 if (!DemandedElts[i])
3739 continue;
3740
3741 SDValue SrcOp = Op.getOperand(i);
3742 Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1);
3743
3744 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3745 if (SrcOp.getValueSizeInBits() != VTBits) {
3746 assert(SrcOp.getValueSizeInBits() > VTBits &&(static_cast<void> (0))
3747 "Expected BUILD_VECTOR implicit truncation")(static_cast<void> (0));
3748 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
3749 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
3750 }
3751 Tmp = std::min(Tmp, Tmp2);
3752 }
3753 return Tmp;
3754
3755 case ISD::VECTOR_SHUFFLE: {
3756 // Collect the minimum number of sign bits that are shared by every vector
3757 // element referenced by the shuffle.
3758 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
3759 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3760 assert(NumElts == SVN->getMask().size() && "Unexpected vector size")(static_cast<void> (0));
3761 for (unsigned i = 0; i != NumElts; ++i) {
3762 int M = SVN->getMaskElt(i);
3763 if (!DemandedElts[i])
3764 continue;
3765 // For UNDEF elements, we don't know anything about the common state of
3766 // the shuffle result.
3767 if (M < 0)
3768 return 1;
3769 if ((unsigned)M < NumElts)
3770 DemandedLHS.setBit((unsigned)M % NumElts);
3771 else
3772 DemandedRHS.setBit((unsigned)M % NumElts);
3773 }
3774 Tmp = std::numeric_limits<unsigned>::max();
3775 if (!!DemandedLHS)
3776 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
3777 if (!!DemandedRHS) {
3778 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
3779 Tmp = std::min(Tmp, Tmp2);
3780 }
3781 // If we don't know anything, early out and try computeKnownBits fall-back.
3782 if (Tmp == 1)
3783 break;
3784 assert(Tmp <= VTBits && "Failed to determine minimum sign bits")(static_cast<void> (0));
3785 return Tmp;
3786 }
3787
3788 case ISD::BITCAST: {
3789 SDValue N0 = Op.getOperand(0);
3790 EVT SrcVT = N0.getValueType();
3791 unsigned SrcBits = SrcVT.getScalarSizeInBits();
3792
3793 // Ignore bitcasts from unsupported types..
3794 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
3795 break;
3796
3797 // Fast handling of 'identity' bitcasts.
3798 if (VTBits == SrcBits)
3799 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
3800
3801 bool IsLE = getDataLayout().isLittleEndian();
3802
3803 // Bitcast 'large element' scalar/vector to 'small element' vector.
3804 if ((SrcBits % VTBits) == 0) {
3805 assert(VT.isVector() && "Expected bitcast to vector")(static_cast<void> (0));
3806
3807 unsigned Scale = SrcBits / VTBits;
3808 APInt SrcDemandedElts(NumElts / Scale, 0);
3809 for (unsigned i = 0; i != NumElts; ++i)
3810 if (DemandedElts[i])
3811 SrcDemandedElts.setBit(i / Scale);
3812
3813 // Fast case - sign splat can be simply split across the small elements.
3814 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
3815 if (Tmp == SrcBits)
3816 return VTBits;
3817
3818 // Slow case - determine how far the sign extends into each sub-element.
3819 Tmp2 = VTBits;
3820 for (unsigned i = 0; i != NumElts; ++i)
3821 if (DemandedElts[i]) {
3822 unsigned SubOffset = i % Scale;
3823 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
3824 SubOffset = SubOffset * VTBits;
3825 if (Tmp <= SubOffset)
3826 return 1;
3827 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
3828 }
3829 return Tmp2;
3830 }
3831 break;
3832 }
3833
3834 case ISD::SIGN_EXTEND:
3835 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
3836 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
3837 case ISD::SIGN_EXTEND_INREG:
3838 // Max of the input and what this extends.
3839 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3840 Tmp = VTBits-Tmp+1;
3841 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3842 return std::max(Tmp, Tmp2);
3843 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3844 SDValue Src = Op.getOperand(0);
3845 EVT SrcVT = Src.getValueType();
3846 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements());
3847 Tmp = VTBits - SrcVT.getScalarSizeInBits();
3848 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
3849 }
3850 case ISD::SRA:
3851 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3852 // SRA X, C -> adds C sign bits.
3853 if (const APInt *ShAmt =
3854 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3855 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits);
3856 return Tmp;
3857 case ISD::SHL:
3858 if (const APInt *ShAmt =
3859 getValidMaximumShiftAmountConstant(Op, DemandedElts)) {
3860 // shl destroys sign bits, ensure it doesn't shift out all sign bits.
3861 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3862 if (ShAmt->ult(Tmp))
3863 return Tmp - ShAmt->getZExtValue();
3864 }
3865 break;
3866 case ISD::AND:
3867 case ISD::OR:
3868 case ISD::XOR: // NOT is handled here.
3869 // Logical binary ops preserve the number of sign bits at the worst.
3870 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3871 if (Tmp != 1) {
3872 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3873 FirstAnswer = std::min(Tmp, Tmp2);
3874 // We computed what we know about the sign bits as our first
3875 // answer. Now proceed to the generic code that uses
3876 // computeKnownBits, and pick whichever answer is better.
3877 }
3878 break;
3879
3880 case ISD::SELECT:
3881 case ISD::VSELECT:
3882 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3883 if (Tmp == 1) return 1; // Early out.
3884 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3885 return std::min(Tmp, Tmp2);
3886 case ISD::SELECT_CC:
3887 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3888 if (Tmp == 1) return 1; // Early out.
3889 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
3890 return std::min(Tmp, Tmp2);
3891
3892 case ISD::SMIN:
3893 case ISD::SMAX: {
3894 // If we have a clamp pattern, we know that the number of sign bits will be
3895 // the minimum of the clamp min/max range.
3896 bool IsMax = (Opcode == ISD::SMAX);
3897 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3898 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3899 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3900 CstHigh =
3901 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3902 if (CstLow && CstHigh) {
3903 if (!IsMax)
3904 std::swap(CstLow, CstHigh);
3905 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
3906 Tmp = CstLow->getAPIntValue().getNumSignBits();
3907 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
3908 return std::min(Tmp, Tmp2);
3909 }
3910 }
3911
3912 // Fallback - just get the minimum number of sign bits of the operands.
3913 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3914 if (Tmp == 1)
3915 return 1; // Early out.
3916 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3917 return std::min(Tmp, Tmp2);
3918 }
3919 case ISD::UMIN:
3920 case ISD::UMAX:
3921 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3922 if (Tmp == 1)
3923 return 1; // Early out.
3924 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3925 return std::min(Tmp, Tmp2);
3926 case ISD::SADDO:
3927 case ISD::UADDO:
3928 case ISD::SSUBO:
3929 case ISD::USUBO:
3930 case ISD::SMULO:
3931 case ISD::UMULO:
3932 if (Op.getResNo() != 1)
3933 break;
3934 // The boolean result conforms to getBooleanContents. Fall through.
3935 // If setcc returns 0/-1, all bits are sign bits.
3936 // We know that we have an integer-based boolean since these operations
3937 // are only available for integer.
3938 if (TLI->getBooleanContents(VT.isVector(), false) ==
3939 TargetLowering::ZeroOrNegativeOneBooleanContent)
3940 return VTBits;
3941 break;
3942 case ISD::SETCC:
3943 case ISD::STRICT_FSETCC:
3944 case ISD::STRICT_FSETCCS: {
3945 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3946 // If setcc returns 0/-1, all bits are sign bits.
3947 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3948 TargetLowering::ZeroOrNegativeOneBooleanContent)
3949 return VTBits;
3950 break;
3951 }
3952 case ISD::ROTL:
3953 case ISD::ROTR:
3954 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3955
3956 // If we're rotating an 0/-1 value, then it stays an 0/-1 value.
3957 if (Tmp == VTBits)
3958 return VTBits;
3959
3960 if (ConstantSDNode *C =
3961 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
3962 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
3963
3964 // Handle rotate right by N like a rotate left by 32-N.
3965 if (Opcode == ISD::ROTR)
3966 RotAmt = (VTBits - RotAmt) % VTBits;
3967
3968 // If we aren't rotating out all of the known-in sign bits, return the
3969 // number that are left. This handles rotl(sext(x), 1) for example.
3970 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
3971 }
3972 break;
3973 case ISD::ADD:
3974 case ISD::ADDC:
3975 // Add can have at most one carry bit. Thus we know that the output
3976 // is, at worst, one more bit than the inputs.
3977 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3978 if (Tmp == 1) return 1; // Early out.
3979
3980 // Special case decrementing a value (ADD X, -1):
3981 if (ConstantSDNode *CRHS =
3982 isConstOrConstSplat(Op.getOperand(1), DemandedElts))
3983 if (CRHS->isAllOnesValue()) {
3984 KnownBits Known =
3985 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3986
3987 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3988 // sign bits set.
3989 if ((Known.Zero | 1).isAllOnesValue())
3990 return VTBits;
3991
3992 // If we are subtracting one from a positive number, there is no carry
3993 // out of the result.
3994 if (Known.isNonNegative())
3995 return Tmp;
3996 }
3997
3998 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3999 if (Tmp2 == 1) return 1; // Early out.
4000 return std::min(Tmp, Tmp2) - 1;
4001 case ISD::SUB:
4002 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4003 if (Tmp2 == 1) return 1; // Early out.
4004
4005 // Handle NEG.
4006 if (ConstantSDNode *CLHS =
4007 isConstOrConstSplat(Op.getOperand(0), DemandedElts))
4008 if (CLHS->isNullValue()) {
4009 KnownBits Known =
4010 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4011 // If the input is known to be 0 or 1, the output is 0/-1, which is all
4012 // sign bits set.
4013 if ((Known.Zero | 1).isAllOnesValue())
4014 return VTBits;
4015
4016 // If the input is known to be positive (the sign bit is known clear),
4017 // the output of the NEG has the same number of sign bits as the input.
4018 if (Known.isNonNegative())
4019 return Tmp2;
4020
4021 // Otherwise, we treat this like a SUB.
4022 }
4023
4024 // Sub can have at most one carry bit. Thus we know that the output
4025 // is, at worst, one more bit than the inputs.
4026 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4027 if (Tmp == 1) return 1; // Early out.
4028 return std::min(Tmp, Tmp2) - 1;
4029 case ISD::MUL: {
4030 // The output of the Mul can be at most twice the valid bits in the inputs.
4031 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4032 if (SignBitsOp0 == 1)
4033 break;
4034 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
4035 if (SignBitsOp1 == 1)
4036 break;
4037 unsigned OutValidBits =
4038 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
4039 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
4040 }
4041 case ISD::SREM:
4042 // The sign bit is the LHS's sign bit, except when the result of the
4043 // remainder is zero. The magnitude of the result should be less than or
4044 // equal to the magnitude of the LHS. Therefore, the result should have
4045 // at least as many sign bits as the left hand side.
4046 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4047 case ISD::TRUNCATE: {
4048 // Check if the sign bits of source go down as far as the truncated value.
4049 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
4050 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4051 if (NumSrcSignBits > (NumSrcBits - VTBits))
4052 return NumSrcSignBits - (NumSrcBits - VTBits);
4053 break;
4054 }
4055 case ISD::EXTRACT_ELEMENT: {
4056 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
4057 const int BitWidth = Op.getValueSizeInBits();
4058 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
4059
4060 // Get reverse index (starting from 1), Op1 value indexes elements from
4061 // little end. Sign starts at big end.
4062 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
4063
4064 // If the sign portion ends in our element the subtraction gives correct
4065 // result. Otherwise it gives either negative or > bitwidth result
4066 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
4067 }
4068 case ISD::INSERT_VECTOR_ELT: {
4069 // If we know the element index, split the demand between the
4070 // source vector and the inserted element, otherwise assume we need
4071 // the original demanded vector elements and the value.
4072 SDValue InVec = Op.getOperand(0);
4073 SDValue InVal = Op.getOperand(1);
4074 SDValue EltNo = Op.getOperand(2);
4075 bool DemandedVal = true;
4076 APInt DemandedVecElts = DemandedElts;
4077 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4078 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4079 unsigned EltIdx = CEltNo->getZExtValue();
4080 DemandedVal = !!DemandedElts[EltIdx];
4081 DemandedVecElts.clearBit(EltIdx);
4082 }
4083 Tmp = std::numeric_limits<unsigned>::max();
4084 if (DemandedVal) {
4085 // TODO - handle implicit truncation of inserted elements.
4086 if (InVal.getScalarValueSizeInBits() != VTBits)
4087 break;
4088 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
4089 Tmp = std::min(Tmp, Tmp2);
4090 }
4091 if (!!DemandedVecElts) {
4092 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1);
4093 Tmp = std::min(Tmp, Tmp2);
4094 }
4095 assert(Tmp <= VTBits && "Failed to determine minimum sign bits")(static_cast<void> (0));
4096 return Tmp;
4097 }
4098 case ISD::EXTRACT_VECTOR_ELT: {
4099 SDValue InVec = Op.getOperand(0);
4100 SDValue EltNo = Op.getOperand(1);
4101 EVT VecVT = InVec.getValueType();
4102 // ComputeNumSignBits not yet implemented for scalable vectors.
4103 if (VecVT.isScalableVector())
4104 break;
4105 const unsigned BitWidth = Op.getValueSizeInBits();
4106 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
4107 const unsigned NumSrcElts = VecVT.getVectorNumElements();
4108
4109 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
4110 // anything about sign bits. But if the sizes match we can derive knowledge
4111 // about sign bits from the vector operand.
4112 if (BitWidth != EltBitWidth)
4113 break;
4114
4115 // If we know the element index, just demand that vector element, else for
4116 // an unknown element index, ignore DemandedElts and demand them all.
4117 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
4118 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4119 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4120 DemandedSrcElts =
4121 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
4122
4123 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
4124 }
4125 case ISD::EXTRACT_SUBVECTOR: {
4126 // Offset the demanded elts by the subvector index.
4127 SDValue Src = Op.getOperand(0);
4128 // Bail until we can represent demanded elements for scalable vectors.
4129 if (Src.getValueType().isScalableVector())
4130 break;
4131 uint64_t Idx = Op.getConstantOperandVal(1);
4132 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
4133 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
4134 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
4135 }
4136 case ISD::CONCAT_VECTORS: {
4137 // Determine the minimum number of sign bits across all demanded
4138 // elts of the input vectors. Early out if the result is already 1.
4139 Tmp = std::numeric_limits<unsigned>::max();
4140 EVT SubVectorVT = Op.getOperand(0).getValueType();
4141 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
4142 unsigned NumSubVectors = Op.getNumOperands();
4143 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
4144 APInt DemandedSub =
4145 DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
4146 if (!DemandedSub)
4147 continue;
4148 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
4149 Tmp = std::min(Tmp, Tmp2);
4150 }
4151 assert(Tmp <= VTBits && "Failed to determine minimum sign bits")(static_cast<void> (0));
4152 return Tmp;
4153 }
4154 case ISD::INSERT_SUBVECTOR: {
4155 // Demand any elements from the subvector and the remainder from the src its
4156 // inserted into.
4157 SDValue Src = Op.getOperand(0);
4158 SDValue Sub = Op.getOperand(1);
4159 uint64_t Idx = Op.getConstantOperandVal(2);
4160 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
4161 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
4162 APInt DemandedSrcElts = DemandedElts;
4163 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
4164
4165 Tmp = std::numeric_limits<unsigned>::max();
4166 if (!!DemandedSubElts) {
4167 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
4168 if (Tmp == 1)
4169 return 1; // early-out
4170 }
4171 if (!!DemandedSrcElts) {
4172 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
4173 Tmp = std::min(Tmp, Tmp2);
4174 }
4175 assert(Tmp <= VTBits && "Failed to determine minimum sign bits")(static_cast<void> (0));
4176 return Tmp;
4177 }
4178 case ISD::ATOMIC_CMP_SWAP:
4179 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
4180 case ISD::ATOMIC_SWAP:
4181 case ISD::ATOMIC_LOAD_ADD:
4182 case ISD::ATOMIC_LOAD_SUB:
4183 case ISD::ATOMIC_LOAD_AND:
4184 case ISD::ATOMIC_LOAD_CLR:
4185 case ISD::ATOMIC_LOAD_OR:
4186 case ISD::ATOMIC_LOAD_XOR:
4187 case ISD::ATOMIC_LOAD_NAND:
4188 case ISD::ATOMIC_LOAD_MIN:
4189 case ISD::ATOMIC_LOAD_MAX:
4190 case ISD::ATOMIC_LOAD_UMIN:
4191 case ISD::ATOMIC_LOAD_UMAX:
4192 case ISD::ATOMIC_LOAD: {
4193 Tmp = cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits();
4194 // If we are looking at the loaded value.
4195 if (Op.getResNo() == 0) {
4196 if (Tmp == VTBits)
4197 return 1; // early-out
4198 if (TLI->getExtendForAtomicOps() == ISD::SIGN_EXTEND)
4199 return VTBits - Tmp + 1;
4200 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
4201 return VTBits - Tmp;
4202 }
4203 break;
4204 }
4205 }
4206
4207 // If we are looking at the loaded value of the SDNode.
4208 if (Op.getResNo() == 0) {
4209 // Handle LOADX separately here. EXTLOAD case will fallthrough.
4210 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
4211 unsigned ExtType = LD->getExtensionType();
4212 switch (ExtType) {
4213 default: break;
4214 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known.
4215 Tmp = LD->getMemoryVT().getScalarSizeInBits();
4216 return VTBits - Tmp + 1;
4217 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known.
4218 Tmp = LD->getMemoryVT().getScalarSizeInBits();
4219 return VTBits - Tmp;
4220 case ISD::NON_EXTLOAD:
4221 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
4222 // We only need to handle vectors - computeKnownBits should handle
4223 // scalar cases.
4224 Type *CstTy = Cst->getType();
4225 if (CstTy->isVectorTy() &&
4226 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) {
4227 Tmp = VTBits;
4228 for (unsigned i = 0; i != NumElts; ++i) {
4229 if (!DemandedElts[i])
4230 continue;
4231 if (Constant *Elt = Cst->getAggregateElement(i)) {
4232 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
4233 const APInt &Value = CInt->getValue();
4234 Tmp = std::min(Tmp, Value.getNumSignBits());
4235 continue;
4236 }
4237 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4238 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4239 Tmp = std::min(Tmp, Value.getNumSignBits());
4240 continue;
4241 }
4242 }
4243 // Unknown type. Conservatively assume no bits match sign bit.
4244 return 1;
4245 }
4246 return Tmp;
4247 }
4248 }
4249 break;
4250 }
4251 }
4252 }
4253
4254 // Allow the target to implement this method for its nodes.
4255 if (Opcode >= ISD::BUILTIN_OP_END ||
4256 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4257 Opcode == ISD::INTRINSIC_W_CHAIN ||
4258 Opcode == ISD::INTRINSIC_VOID) {
4259 unsigned NumBits =
4260 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
4261 if (NumBits > 1)
4262 FirstAnswer = std::max(FirstAnswer, NumBits);
4263 }
4264
4265 // Finally, if we can prove that the top bits of the result are 0's or 1's,
4266 // use this information.
4267 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
4268
4269 APInt Mask;
4270 if (Known.isNonNegative()) { // sign bit is 0
4271 Mask = Known.Zero;
4272 } else if (Known.isNegative()) { // sign bit is 1;
4273 Mask = Known.One;
4274 } else {
4275 // Nothing known.
4276 return FirstAnswer;
4277 }
4278
4279 // Okay, we know that the sign bit in Mask is set. Use CLO to determine
4280 // the number of identical bits in the top of the input value.
4281 Mask <<= Mask.getBitWidth()-VTBits;
4282 return std::max(FirstAnswer, Mask.countLeadingOnes());
4283}
4284
4285bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly,
4286 unsigned Depth) const {
4287 // Early out for FREEZE.
4288 if (Op.getOpcode() == ISD::FREEZE)
4289 return true;
4290
4291 // TODO: Assume we don't know anything for now.
4292 EVT VT = Op.getValueType();
4293 if (VT.isScalableVector())
4294 return false;
4295
4296 APInt DemandedElts = VT.isVector()
4297 ? APInt::getAllOnesValue(VT.getVectorNumElements())
4298 : APInt(1, 1);
4299 return isGuaranteedNotToBeUndefOrPoison(Op, DemandedElts, PoisonOnly, Depth);
4300}
4301
4302bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op,
4303 const APInt &DemandedElts,
4304 bool PoisonOnly,
4305 unsigned Depth) const {
4306 unsigned Opcode = Op.getOpcode();
4307
4308 // Early out for FREEZE.
4309 if (Opcode == ISD::FREEZE)
4310 return true;
4311
4312 if (Depth >= MaxRecursionDepth)
4313 return false; // Limit search depth.
4314
4315 if (isIntOrFPConstant(Op))
4316 return true;
4317
4318 switch (Opcode) {
4319 case ISD::UNDEF:
4320 return PoisonOnly;
4321
4322 case ISD::BUILD_VECTOR:
4323 // NOTE: BUILD_VECTOR has implicit truncation of wider scalar elements -
4324 // this shouldn't affect the result.
4325 for (unsigned i = 0, e = Op.getNumOperands(); i < e; ++i) {
4326 if (!DemandedElts[i])
4327 continue;
4328 if (!isGuaranteedNotToBeUndefOrPoison(Op.getOperand(i), PoisonOnly,
4329 Depth + 1))
4330 return false;
4331 }
4332 return true;
4333
4334 // TODO: Search for noundef attributes from library functions.
4335
4336 // TODO: Pointers dereferenced by ISD::LOAD/STORE ops are noundef.
4337
4338 default:
4339 // Allow the target to implement this method for its nodes.
4340 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
4341 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
4342 return TLI->isGuaranteedNotToBeUndefOrPoisonForTargetNode(
4343 Op, DemandedElts, *this, PoisonOnly, Depth);
4344 break;
4345 }
4346
4347 return false;
4348}
4349
4350bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
4351 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
4352 !isa<ConstantSDNode>(Op.getOperand(1)))
4353 return false;
4354
4355 if (Op.getOpcode() == ISD::OR &&
4356 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1)))
4357 return false;
4358
4359 return true;
4360}
4361
4362bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const {
4363 // If we're told that NaNs won't happen, assume they won't.
4364 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs())
4365 return true;
4366
4367 if (Depth >= MaxRecursionDepth)
4368 return false; // Limit search depth.
4369
4370 // TODO: Handle vectors.
4371 // If the value is a constant, we can obviously see if it is a NaN or not.
4372 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
4373 return !C->getValueAPF().isNaN() ||
4374 (SNaN && !C->getValueAPF().isSignaling());
4375 }
4376
4377 unsigned Opcode = Op.getOpcode();
4378 switch (Opcode) {
4379 case ISD::FADD:
4380 case ISD::FSUB:
4381 case ISD::FMUL:
4382 case ISD::FDIV:
4383 case ISD::FREM:
4384 case ISD::FSIN:
4385 case ISD::FCOS: {
4386 if (SNaN)
4387 return true;
4388 // TODO: Need isKnownNeverInfinity
4389 return false;
4390 }
4391 case ISD::FCANONICALIZE:
4392 case ISD::FEXP:
4393 case ISD::FEXP2:
4394 case ISD::FTRUNC:
4395 case ISD::FFLOOR:
4396 case ISD::FCEIL:
4397 case ISD::FROUND:
4398 case ISD::FROUNDEVEN:
4399 case ISD::FRINT:
4400 case ISD::FNEARBYINT: {
4401 if (SNaN)
4402 return true;
4403 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4404 }
4405 case ISD::FABS:
4406 case ISD::FNEG:
4407 case ISD::FCOPYSIGN: {
4408 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4409 }
4410 case ISD::SELECT:
4411 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4412 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4413 case ISD::FP_EXTEND:
4414 case ISD::FP_ROUND: {
4415 if (SNaN)
4416 return true;
4417 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4418 }
4419 case ISD::SINT_TO_FP:
4420 case ISD::UINT_TO_FP:
4421 return true;
4422 case ISD::FMA:
4423 case ISD::FMAD: {
4424 if (SNaN)
4425 return true;
4426 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4427 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4428 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4429 }
4430 case ISD::FSQRT: // Need is known positive
4431 case ISD::FLOG:
4432 case ISD::FLOG2:
4433 case ISD::FLOG10:
4434 case ISD::FPOWI:
4435 case ISD::FPOW: {
4436 if (SNaN)
4437 return true;
4438 // TODO: Refine on operand
4439 return false;
4440 }
4441 case ISD::FMINNUM:
4442 case ISD::FMAXNUM: {
4443 // Only one needs to be known not-nan, since it will be returned if the
4444 // other ends up being one.
4445 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) ||
4446 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4447 }
4448 case ISD::FMINNUM_IEEE:
4449 case ISD::FMAXNUM_IEEE: {
4450 if (SNaN)
4451 return true;
4452 // This can return a NaN if either operand is an sNaN, or if both operands
4453 // are NaN.
4454 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) &&
4455 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) ||
4456 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) &&
4457 isKnownNeverSNaN(Op.getOperand(0), Depth + 1));
4458 }
4459 case ISD::FMINIMUM:
4460 case ISD::FMAXIMUM: {
4461 // TODO: Does this quiet or return the origina NaN as-is?
4462 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4463 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4464 }
4465 case ISD::EXTRACT_VECTOR_ELT: {
4466 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4467 }
4468 default:
4469 if (Opcode >= ISD::BUILTIN_OP_END ||
4470 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4471 Opcode == ISD::INTRINSIC_W_CHAIN ||
4472 Opcode == ISD::INTRINSIC_VOID) {
4473 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth);
4474 }
4475
4476 return false;
4477 }
4478}
4479
4480bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const {
4481 assert(Op.getValueType().isFloatingPoint() &&(static_cast<void> (0))
4482 "Floating point type expected")(static_cast<void> (0));
4483
4484 // If the value is a constant, we can obviously see if it is a zero or not.
4485 // TODO: Add BuildVector support.
4486 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
4487 return !C->isZero();
4488 return false;
4489}
4490
4491bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
4492 assert(!Op.getValueType().isFloatingPoint() &&(static_cast<void> (0))
4493 "Floating point types unsupported - use isKnownNeverZeroFloat")(static_cast<void> (0));
4494
4495 // If the value is a constant, we can obviously see if it is a zero or not.
4496 if (ISD::matchUnaryPredicate(
4497 Op, [](ConstantSDNode *C) { return !C->isNullValue(); }))
4498 return true;
4499
4500 // TODO: Recognize more cases here.
4501 switch (Op.getOpcode()) {
4502 default: break;
4503 case ISD::OR:
4504 if (isKnownNeverZero(Op.getOperand(1)) ||
4505 isKnownNeverZero(Op.getOperand(0)))
4506 return true;
4507 break;
4508 }
4509
4510 return false;
4511}
4512
4513bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
4514 // Check the obvious case.
4515 if (A == B) return true;
4516
4517 // For for negative and positive zero.
4518 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
4519 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
4520 if (CA->isZero() && CB->isZero()) return true;
4521
4522 // Otherwise they may not be equal.
4523 return false;
4524}
4525
4526// FIXME: unify with llvm::haveNoCommonBitsSet.
4527// FIXME: could also handle masked merge pattern (X & ~M) op (Y & M)
4528bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
4529 assert(A.getValueType() == B.getValueType() &&(static_cast<void> (0))
4530 "Values must have the same type")(static_cast<void> (0));
4531 return KnownBits::haveNoCommonBitsSet(computeKnownBits(A),
4532 computeKnownBits(B));
4533}
4534
4535static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step,
4536 SelectionDAG &DAG) {
4537 if (cast<ConstantSDNode>(Step)->isNullValue())
4538 return DAG.getConstant(0, DL, VT);
4539
4540 return SDValue();
4541}
4542
4543static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
4544 ArrayRef<SDValue> Ops,
4545 SelectionDAG &DAG) {
4546 int NumOps = Ops.size();
4547 assert(NumOps != 0 && "Can't build an empty vector!")(static_cast<void> (0));
4548 assert(!VT.isScalableVector() &&(static_cast<void> (0))
4549 "BUILD_VECTOR cannot be used with scalable types")(static_cast<void> (0));
4550 assert(VT.getVectorNumElements() == (unsigned)NumOps &&(static_cast<void> (0))
4551 "Incorrect element count in BUILD_VECTOR!")(static_cast<void> (0));
4552
4553 // BUILD_VECTOR of UNDEFs is UNDEF.
4554 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4555 return DAG.getUNDEF(VT);
4556
4557 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
4558 SDValue IdentitySrc;
4559 bool IsIdentity = true;
4560 for (int i = 0; i != NumOps; ++i) {
4561 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4562 Ops[i].getOperand(0).getValueType() != VT ||
4563 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
4564 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
4565 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) {
4566 IsIdentity = false;
4567 break;
4568 }
4569 IdentitySrc = Ops[i].getOperand(0);
4570 }
4571 if (IsIdentity)
4572 return IdentitySrc;
4573
4574 return SDValue();
4575}
4576
4577/// Try to simplify vector concatenation to an input value, undef, or build
4578/// vector.
4579static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
4580 ArrayRef<SDValue> Ops,
4581 SelectionDAG &DAG) {
4582 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!")(static_cast<void> (0));
4583 assert(llvm::all_of(Ops,(static_cast<void> (0))
4584 [Ops](SDValue Op) {(static_cast<void> (0))
4585 return Ops[0].getValueType() == Op.getValueType();(static_cast<void> (0))
4586 }) &&(static_cast<void> (0))
4587 "Concatenation of vectors with inconsistent value types!")(static_cast<void> (0));
4588 assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) ==(static_cast<void> (0))
4589 VT.getVectorElementCount() &&(static_cast<void> (0))
4590 "Incorrect element count in vector concatenation!")(static_cast<void> (0));
4591
4592 if (Ops.size() == 1)
4593 return Ops[0];
4594
4595 // Concat of UNDEFs is UNDEF.
4596 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4597 return DAG.getUNDEF(VT);
4598
4599 // Scan the operands and look for extract operations from a single source
4600 // that correspond to insertion at the same location via this concatenation:
4601 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
4602 SDValue IdentitySrc;
4603 bool IsIdentity = true;
4604 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
4605 SDValue Op = Ops[i];
4606 unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements();
4607 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
4608 Op.getOperand(0).getValueType() != VT ||
4609 (IdentitySrc && Op.getOperand(0) != IdentitySrc) ||
4610 Op.getConstantOperandVal(1) != IdentityIndex) {
4611 IsIdentity = false;
4612 break;
4613 }
4614 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&(static_cast<void> (0))
4615 "Unexpected identity source vector for concat of extracts")(static_cast<void> (0));
4616 IdentitySrc = Op.getOperand(0);
4617 }
4618 if (IsIdentity) {
4619 assert(IdentitySrc && "Failed to set source vector of extracts")(static_cast<void> (0));
4620 return IdentitySrc;
4621 }
4622
4623 // The code below this point is only designed to work for fixed width
4624 // vectors, so we bail out for now.
4625 if (VT.isScalableVector())
4626 return SDValue();
4627
4628 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
4629 // simplified to one big BUILD_VECTOR.
4630 // FIXME: Add support for SCALAR_TO_VECTOR as well.
4631 EVT SVT = VT.getScalarType();
4632 SmallVector<SDValue, 16> Elts;
4633 for (SDValue Op : Ops) {
4634 EVT OpVT = Op.getValueType();
4635 if (Op.isUndef())
4636 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
4637 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
4638 Elts.append(Op->op_begin(), Op->op_end());
4639 else
4640 return SDValue();
4641 }
4642
4643 // BUILD_VECTOR requires all inputs to be of the same type, find the
4644 // maximum type and extend them all.
4645 for (SDValue Op : Elts)
4646 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
4647
4648 if (SVT.bitsGT(VT.getScalarType())) {
4649 for (SDValue &Op : Elts) {
4650 if (Op.isUndef())
4651 Op = DAG.getUNDEF(SVT);
4652 else
4653 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
4654 ? DAG.getZExtOrTrunc(Op, DL, SVT)
4655 : DAG.getSExtOrTrunc(Op, DL, SVT);
4656 }
4657 }
4658
4659 SDValue V = DAG.getBuildVector(VT, DL, Elts);
4660 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
4661 return V;
4662}
4663
4664/// Gets or creates the specified node.
4665SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
4666 FoldingSetNodeID ID;
4667 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
4668 void *IP = nullptr;
4669 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4670 return SDValue(E, 0);
4671
4672 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4673 getVTList(VT));
4674 CSEMap.InsertNode(N, IP);
4675
4676 InsertNode(N);
4677 SDValue V = SDValue(N, 0);
4678 NewSDValueDbgMsg(V, "Creating new node: ", this);
4679 return V;
4680}
4681
4682SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4683 SDValue Operand) {
4684 SDNodeFlags Flags;
4685 if (Inserter)
4686 Flags = Inserter->getFlags();
4687 return getNode(Opcode, DL, VT, Operand, Flags);
4688}
4689
4690SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4691 SDValue Operand, const SDNodeFlags Flags) {
4692 assert(Operand.getOpcode() != ISD::DELETED_NODE &&(static_cast<void> (0))
4693 "Operand is DELETED_NODE!")(static_cast<void> (0));
4694 // Constant fold unary operations with an integer constant operand. Even
4695 // opaque constant will be folded, because the folding of unary operations
4696 // doesn't create new constants with different values. Nevertheless, the
4697 // opaque flag is preserved during folding to prevent future folding with
4698 // other constants.
4699 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
4700 const APInt &Val = C->getAPIntValue();
4701 switch (Opcode) {
4702 default: break;
4703 case ISD::SIGN_EXTEND:
4704 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4705 C->isTargetOpcode(), C->isOpaque());
4706 case ISD::TRUNCATE:
4707 if (C->isOpaque())
4708 break;
4709 LLVM_FALLTHROUGH[[gnu::fallthrough]];
4710 case ISD::ZERO_EXTEND:
4711 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4712 C->isTargetOpcode(), C->isOpaque());
4713 case ISD::ANY_EXTEND:
4714 // Some targets like RISCV prefer to sign extend some types.
4715 if (TLI->isSExtCheaperThanZExt(Operand.getValueType(), VT))
4716 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4717 C->isTargetOpcode(), C->isOpaque());
4718 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4719 C->isTargetOpcode(), C->isOpaque());
4720 case ISD::UINT_TO_FP:
4721 case ISD::SINT_TO_FP: {
4722 APFloat apf(EVTToAPFloatSemantics(VT),
4723 APInt::getNullValue(VT.getSizeInBits()));
4724 (void)apf.convertFromAPInt(Val,
4725 Opcode==ISD::SINT_TO_FP,
4726 APFloat::rmNearestTiesToEven);
4727 return getConstantFP(apf, DL, VT);
4728 }
4729 case ISD::BITCAST:
4730 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
4731 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
4732 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
4733 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
4734 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
4735 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
4736 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
4737 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
4738 break;
4739 case ISD::ABS:
4740 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
4741 C->isOpaque());
4742 case ISD::BITREVERSE:
4743 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
4744 C->isOpaque());
4745 case ISD::BSWAP:
4746 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
4747 C->isOpaque());
4748 case ISD::CTPOP:
4749 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
4750 C->isOpaque());
4751 case ISD::CTLZ:
4752 case ISD::CTLZ_ZERO_UNDEF:
4753 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
4754 C->isOpaque());
4755 case ISD::CTTZ:
4756 case ISD::CTTZ_ZERO_UNDEF:
4757 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
4758 C->isOpaque());
4759 case ISD::FP16_TO_FP: {
4760 bool Ignored;
4761 APFloat FPV(APFloat::IEEEhalf(),
4762 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
4763
4764 // This can return overflow, underflow, or inexact; we don't care.
4765 // FIXME need to be more flexible about rounding mode.
4766 (void)FPV.convert(EVTToAPFloatSemantics(VT),
4767 APFloat::rmNearestTiesToEven, &Ignored);
4768 return getConstantFP(FPV, DL, VT);
4769 }
4770 case ISD::STEP_VECTOR: {
4771 if (SDValue V = FoldSTEP_VECTOR(DL, VT, Operand, *this))
4772 return V;
4773 break;
4774 }
4775 }
4776 }
4777
4778 // Constant fold unary operations with a floating point constant operand.
4779 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
4780 APFloat V = C->getValueAPF(); // make copy
4781 switch (Opcode) {
4782 case ISD::FNEG:
4783 V.changeSign();
4784 return getConstantFP(V, DL, VT);
4785 case ISD::FABS:
4786 V.clearSign();
4787 return getConstantFP(V, DL, VT);
4788 case ISD::FCEIL: {
4789 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
4790 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4791 return getConstantFP(V, DL, VT);
4792 break;
4793 }
4794 case ISD::FTRUNC: {
4795 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
4796 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4797 return getConstantFP(V, DL, VT);
4798 break;
4799 }
4800 case ISD::FFLOOR: {
4801 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
4802 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4803 return getConstantFP(V, DL, VT);
4804 break;
4805 }
4806 case ISD::FP_EXTEND: {
4807 bool ignored;
4808 // This can return overflow, underflow, or inexact; we don't care.
4809 // FIXME need to be more flexible about rounding mode.
4810 (void)V.convert(EVTToAPFloatSemantics(VT),
4811 APFloat::rmNearestTiesToEven, &ignored);
4812 return getConstantFP(V, DL, VT);
4813 }
4814 case ISD::FP_TO_SINT:
4815 case ISD::FP_TO_UINT: {
4816 bool ignored;
4817 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
4818 // FIXME need to be more flexible about rounding mode.
4819 APFloat::opStatus s =
4820 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
4821 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
4822 break;
4823 return getConstant(IntVal, DL, VT);
4824 }
4825 case ISD::BITCAST:
4826 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
4827 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4828 if (VT == MVT::i16 && C->getValueType(0) == MVT::bf16)
4829 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4830 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
4831 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4832 if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
4833 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4834 break;
4835 case ISD::FP_TO_FP16: {
4836 bool Ignored;
4837 // This can return overflow, underflow, or inexact; we don't care.
4838 // FIXME need to be more flexible about rounding mode.
4839 (void)V.convert(APFloat::IEEEhalf(),
4840 APFloat::rmNearestTiesToEven, &Ignored);
4841 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4842 }
4843 }
4844 }
4845
4846 // Constant fold unary operations with a vector integer or float operand.
4847 switch (Opcode) {
4848 default:
4849 // FIXME: Entirely reasonable to perform folding of other unary
4850 // operations here as the need arises.
4851 break;
4852 case ISD::FNEG:
4853 case ISD::FABS:
4854 case ISD::FCEIL:
4855 case ISD::FTRUNC:
4856 case ISD::FFLOOR:
4857 case ISD::FP_EXTEND:
4858 case ISD::FP_TO_SINT:
4859 case ISD::FP_TO_UINT:
4860 case ISD::TRUNCATE:
4861 case ISD::ANY_EXTEND:
4862 case ISD::ZERO_EXTEND:
4863 case ISD::SIGN_EXTEND:
4864 case ISD::UINT_TO_FP:
4865 case ISD::SINT_TO_FP:
4866 case ISD::ABS:
4867 case ISD::BITREVERSE:
4868 case ISD::BSWAP:
4869 case ISD::CTLZ:
4870 case ISD::CTLZ_ZERO_UNDEF:
4871 case ISD::CTTZ:
4872 case ISD::CTTZ_ZERO_UNDEF:
4873 case ISD::CTPOP: {
4874 SDValue Ops = {Operand};
4875 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
4876 return Fold;
4877 }
4878 }
4879
4880 unsigned OpOpcode = Operand.getNode()->getOpcode();
4881 switch (Opcode) {
4882 case ISD::STEP_VECTOR:
4883 assert(VT.isScalableVector() &&(static_cast<void> (0))
4884 "STEP_VECTOR can only be used with scalable types")(static_cast<void> (0));
4885 assert(OpOpcode == ISD::TargetConstant &&(static_cast<void> (0))
4886 VT.getVectorElementType() == Operand.getValueType() &&(static_cast<void> (0))
4887 "Unexpected step operand")(static_cast<void> (0));
4888 break;
4889 case ISD::FREEZE:
4890 assert(VT == Operand.getValueType() && "Unexpected VT!")(static_cast<void> (0));
4891 break;
4892 case ISD::TokenFactor:
4893 case ISD::MERGE_VALUES:
4894 case ISD::CONCAT_VECTORS:
4895 return Operand; // Factor, merge or concat of one node? No need.
4896 case ISD::BUILD_VECTOR: {
4897 // Attempt to simplify BUILD_VECTOR.
4898 SDValue Ops[] = {Operand};
4899 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
4900 return V;
4901 break;
4902 }
4903 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node")__builtin_unreachable();
4904 case ISD::FP_EXTEND:
4905 assert(VT.isFloatingPoint() &&(static_cast<void> (0))
4906 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!")(static_cast<void> (0));
4907 if (Operand.getValueType() == VT) return Operand; // noop conversion.
4908 assert((!VT.isVector() ||(static_cast<void> (0))
4909 VT.getVectorElementCount() ==(static_cast<void> (0))
4910 Operand.getValueType().getVectorElementCount()) &&(static_cast<void> (0))
4911 "Vector element count mismatch!")(static_cast<void> (0));
4912 assert(Operand.getValueType().bitsLT(VT) &&(static_cast<void> (0))
4913 "Invalid fpext node, dst < src!")(static_cast<void> (0));
4914 if (Operand.isUndef())
4915 return getUNDEF(VT);
4916 break;
4917 case ISD::FP_TO_SINT:
4918 case ISD::FP_TO_UINT:
4919 if (Operand.isUndef())
4920 return getUNDEF(VT);
4921 break;
4922 case ISD::SINT_TO_FP:
4923 case ISD::UINT_TO_FP:
4924 // [us]itofp(undef) = 0, because the result value is bounded.
4925 if (Operand.isUndef())
4926 return getConstantFP(0.0, DL, VT);
4927 break;
4928 case ISD::SIGN_EXTEND:
4929 assert(VT.isInteger() && Operand.getValueType().isInteger() &&(static_cast<void> (0))
4930 "Invalid SIGN_EXTEND!")(static_cast<void> (0));
4931 assert(VT.isVector() == Operand.getValueType().isVector() &&(static_cast<void> (0))
4932 "SIGN_EXTEND result type type should be vector iff the operand "(static_cast<void> (0))
4933 "type is vector!")(static_cast<void> (0));
4934 if (Operand.getValueType() == VT) return Operand; // noop extension
4935 assert((!VT.isVector() ||(static_cast<void> (0))
4936 VT.getVectorElementCount() ==(static_cast<void> (0))
4937 Operand.getValueType().getVectorElementCount()) &&(static_cast<void> (0))
4938 "Vector element count mismatch!")(static_cast<void> (0));
4939 assert(Operand.getValueType().bitsLT(VT) &&(static_cast<void> (0))
4940 "Invalid sext node, dst < src!")(static_cast<void> (0));
4941 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
4942 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4943 if (OpOpcode == ISD::UNDEF)
4944 // sext(undef) = 0, because the top bits will all be the same.
4945 return getConstant(0, DL, VT);
4946 break;
4947 case ISD::ZERO_EXTEND:
4948 assert(VT.isInteger() && Operand.getValueType().isInteger() &&(static_cast<void> (0))
4949 "Invalid ZERO_EXTEND!")(static_cast<void> (0));
4950 assert(VT.isVector() == Operand.getValueType().isVector() &&(static_cast<void> (0))
4951 "ZERO_EXTEND result type type should be vector iff the operand "(static_cast<void> (0))
4952 "type is vector!")(static_cast<void> (0));
4953 if (Operand.getValueType() == VT) return Operand; // noop extension
4954 assert((!VT.isVector() ||(static_cast<void> (0))
4955 VT.getVectorElementCount() ==(static_cast<void> (0))
4956 Operand.getValueType().getVectorElementCount()) &&(static_cast<void> (0))
4957 "Vector element count mismatch!")(static_cast<void> (0));
4958 assert(Operand.getValueType().bitsLT(VT) &&(static_cast<void> (0))
4959 "Invalid zext node, dst < src!")(static_cast<void> (0));
4960 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
4961 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0));
4962 if (OpOpcode == ISD::UNDEF)
4963 // zext(undef) = 0, because the top bits will be zero.
4964 return getConstant(0, DL, VT);
4965 break;
4966 case ISD::ANY_EXTEND:
4967 assert(VT.isInteger() && Operand.getValueType().isInteger() &&(static_cast<void> (0))
4968 "Invalid ANY_EXTEND!")(static_cast<void> (0));
4969 assert(VT.isVector() == Operand.getValueType().isVector() &&(static_cast<void> (0))
4970 "ANY_EXTEND result type type should be vector iff the operand "(static_cast<void> (0))
4971 "type is vector!")(static_cast<void> (0));
4972 if (Operand.getValueType() == VT) return Operand; // noop extension
4973 assert((!VT.isVector() ||(static_cast<void> (0))
4974 VT.getVectorElementCount() ==(static_cast<void> (0))
4975 Operand.getValueType().getVectorElementCount()) &&(static_cast<void> (0))
4976 "Vector element count mismatch!")(static_cast<void> (0));
4977 assert(Operand.getValueType().bitsLT(VT) &&(static_cast<void> (0))
4978 "Invalid anyext node, dst < src!")(static_cast<void> (0));
4979
4980 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4981 OpOpcode == ISD::ANY_EXTEND)
4982 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
4983 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4984 if (OpOpcode == ISD::UNDEF)
4985 return getUNDEF(VT);
4986
4987 // (ext (trunc x)) -> x
4988 if (OpOpcode == ISD::TRUNCATE) {
4989 SDValue OpOp = Operand.getOperand(0);
4990 if (OpOp.getValueType() == VT) {
4991 transferDbgValues(Operand, OpOp);
4992 return OpOp;
4993 }
4994 }
4995 break;
4996 case ISD::TRUNCATE:
4997 assert(VT.isInteger() && Operand.getValueType().isInteger() &&(static_cast<void> (0))
4998 "Invalid TRUNCATE!")(static_cast<void> (0));
4999 assert(VT.isVector() == Operand.getValueType().isVector() &&(static_cast<void> (0))
5000 "TRUNCATE result type type should be vector iff the operand "(static_cast<void> (0))
5001 "type is vector!")(static_cast<void> (0));
5002 if (Operand.getValueType() == VT) return Operand; // noop truncate
5003 assert((!VT.isVector() ||(static_cast<void> (0))
5004 VT.getVectorElementCount() ==(static_cast<void> (0))
5005 Operand.getValueType().getVectorElementCount()) &&(static_cast<void> (0))
5006 "Vector element count mismatch!")(static_cast<void> (0));
5007 assert(Operand.getValueType().bitsGT(VT) &&(static_cast<void> (0))
5008 "Invalid truncate node, src < dst!")(static_cast<void> (0));
5009 if (OpOpcode == ISD::TRUNCATE)
5010 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
5011 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
5012 OpOpcode == ISD::ANY_EXTEND) {
5013 // If the source is smaller than the dest, we still need an extend.
5014 if (Operand.getOperand(0).getValueType().getScalarType()
5015 .bitsLT(VT.getScalarType()))
5016 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
5017 if (Operand.getOperand(0).getValueType().bitsGT(VT))
5018 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
5019 return Operand.getOperand(0);
5020 }
5021 if (OpOpcode == ISD::UNDEF)
5022 return getUNDEF(VT);
5023 break;
5024 case ISD::ANY_EXTEND_VECTOR_INREG:
5025 case ISD::ZERO_EXTEND_VECTOR_INREG:
5026 case ISD::SIGN_EXTEND_VECTOR_INREG:
5027 assert(VT.isVector() && "This DAG node is restricted to vector types.")(static_cast<void> (0));
5028 assert(Operand.getValueType().bitsLE(VT) &&(static_cast<void> (0))
5029 "The input must be the same size or smaller than the result.")(static_cast<void> (0));
5030 assert(VT.getVectorMinNumElements() <(static_cast<void> (0))
5031 Operand.getValueType().getVectorMinNumElements() &&(static_cast<void> (0))
5032 "The destination vector type must have fewer lanes than the input.")(static_cast<void> (0));
5033 break;
5034 case ISD::ABS:
5035 assert(VT.isInteger() && VT == Operand.getValueType() &&(static_cast<void> (0))
5036 "Invalid ABS!")(static_cast<void> (0));
5037 if (OpOpcode == ISD::UNDEF)
5038 return getUNDEF(VT);
5039 break;
5040 case ISD::BSWAP:
5041 assert(VT.isInteger() && VT == Operand.getValueType() &&(static_cast<void> (0))
5042 "Invalid BSWAP!")(static_cast<void> (0));
5043 assert((VT.getScalarSizeInBits() % 16 == 0) &&(static_cast<void> (0))
5044 "BSWAP types must be a multiple of 16 bits!")(static_cast<void> (0));
5045 if (OpOpcode == ISD::UNDEF)
5046 return getUNDEF(VT);
5047 break;
5048 case ISD::BITREVERSE:
5049 assert(VT.isInteger() && VT == Operand.getValueType() &&(static_cast<void> (0))
5050 "Invalid BITREVERSE!")(static_cast<void> (0));
5051 if (OpOpcode == ISD::UNDEF)
5052 return getUNDEF(VT);
5053 break;
5054 case ISD::BITCAST:
5055 // Basic sanity checking.
5056 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&(static_cast<void> (0))
5057 "Cannot BITCAST between types of different sizes!")(static_cast<void> (0));
5058 if (VT == Operand.getValueType()) return Operand; // noop conversion.
5059 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
5060 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
5061 if (OpOpcode == ISD::UNDEF)
5062 return getUNDEF(VT);
5063 break;
5064 case ISD::SCALAR_TO_VECTOR:
5065 assert(VT.isVector() && !Operand.getValueType().isVector() &&(static_cast<void> (0))
5066 (VT.getVectorElementType() == Operand.getValueType() ||(static_cast<void> (0))
5067 (VT.getVectorElementType().isInteger() &&(static_cast<void> (0))
5068 Operand.getValueType().isInteger() &&(static_cast<void> (0))
5069 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&(static_cast<void> (0))
5070 "Illegal SCALAR_TO_VECTOR node!")(static_cast<void> (0));
5071 if (OpOpcode == ISD::UNDEF)
5072 return getUNDEF(VT);
5073 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
5074 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
5075 isa<ConstantSDNode>(Operand.getOperand(1)) &&
5076 Operand.getConstantOperandVal(1) == 0 &&
5077 Operand.getOperand(0).getValueType() == VT)
5078 return Operand.getOperand(0);
5079 break;
5080 case ISD::FNEG:
5081 // Negation of an unknown bag of bits is still completely undefined.
5082 if (OpOpcode == ISD::UNDEF)
5083 return getUNDEF(VT);
5084
5085 if (OpOpcode == ISD::FNEG) // --X -> X
5086 return Operand.getOperand(0);
5087 break;
5088 case ISD::FABS:
5089 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
5090 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0));
5091 break;
5092 case ISD::VSCALE:
5093 assert(VT == Operand.getValueType() && "Unexpected VT!")(static_cast<void> (0));
5094 break;
5095 case ISD::CTPOP:
5096 if (Operand.getValueType().getScalarType() == MVT::i1)
5097 return Operand;
5098 break;
5099 case ISD::CTLZ:
5100 case ISD::CTTZ:
5101 if (Operand.getValueType().getScalarType() == MVT::i1)
5102 return getNOT(DL, Operand, Operand.getValueType());
5103 break;
5104 case ISD::VECREDUCE_SMIN:
5105 case ISD::VECREDUCE_UMAX:
5106 if (Operand.getValueType().getScalarType() == MVT::i1)
5107 return getNode(ISD::VECREDUCE_OR, DL, VT, Operand);
5108 break;
5109 case ISD::VECREDUCE_SMAX:
5110 case ISD::VECREDUCE_UMIN:
5111 if (Operand.getValueType().getScalarType() == MVT::i1)
5112 return getNode(ISD::VECREDUCE_AND, DL, VT, Operand);
5113 break;
5114 }
5115
5116 SDNode *N;
5117 SDVTList VTs = getVTList(VT);
5118 SDValue Ops[] = {Operand};
5119 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
5120 FoldingSetNodeID ID;
5121 AddNodeIDNode(ID, Opcode, VTs, Ops);
5122 void *IP = nullptr;
5123 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5124 E->intersectFlagsWith(Flags);
5125 return SDValue(E, 0);
5126 }
5127
5128 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5129 N->setFlags(Flags);
5130 createOperands(N, Ops);
5131 CSEMap.InsertNode(N, IP);
5132 } else {
5133 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5134 createOperands(N, Ops);
5135 }
5136
5137 InsertNode(N);
5138 SDValue V = SDValue(N, 0);
5139 NewSDValueDbgMsg(V, "Creating new node: ", this);
5140 return V;
5141}
5142
5143static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
5144 const APInt &C2) {
5145 switch (Opcode) {
5146 case ISD::ADD: return C1 + C2;
5147 case ISD::SUB: return C1 - C2;
5148 case ISD::MUL: return C1 * C2;
5149 case ISD::AND: return C1 & C2;
5150 case ISD::OR: return C1 | C2;
5151 case ISD::XOR: return C1 ^ C2;
5152 case ISD::SHL: return C1 << C2;
5153 case ISD::SRL: return C1.lshr(C2);
5154 case ISD::SRA: return C1.ashr(C2);
5155 case ISD::ROTL: return C1.rotl(C2);
5156 case ISD::ROTR: return C1.rotr(C2);
5157 case ISD::SMIN: return C1.sle(C2) ? C1 : C2;
5158 case ISD::SMAX: return C1.sge(C2) ? C1 : C2;
5159 case ISD::UMIN: return C1.ule(C2) ? C1 : C2;
5160 case ISD::UMAX: return C1.uge(C2) ? C1 : C2;
5161 case ISD::SADDSAT: return C1.sadd_sat(C2);
5162 case ISD::UADDSAT: return C1.uadd_sat(C2);
5163 case ISD::SSUBSAT: return C1.ssub_sat(C2);
5164 case ISD::USUBSAT: return C1.usub_sat(C2);
5165 case ISD::UDIV:
5166 if (!C2.getBoolValue())
5167 break;
5168 return C1.udiv(C2);
5169 case ISD::UREM:
5170 if (!C2.getBoolValue())
5171 break;
5172 return C1.urem(C2);
5173 case ISD::SDIV:
5174 if (!C2.getBoolValue())
5175 break;
5176 return C1.sdiv(C2);
5177 case ISD::SREM:
5178 if (!C2.getBoolValue())
5179 break;
5180 return C1.srem(C2);
5181 case ISD::MULHS: {
5182 unsigned FullWidth = C1.getBitWidth() * 2;
5183 APInt C1Ext = C1.sext(FullWidth);
5184 APInt C2Ext = C2.sext(FullWidth);
5185 return (C1Ext * C2Ext).extractBits(C1.getBitWidth(), C1.getBitWidth());
5186 }
5187 case ISD::MULHU: {
5188 unsigned FullWidth = C1.getBitWidth() * 2;
5189 APInt C1Ext = C1.zext(FullWidth);
5190 APInt C2Ext = C2.zext(FullWidth);
5191 return (C1Ext * C2Ext).extractBits(C1.getBitWidth(), C1.getBitWidth());
5192 }
5193 }
5194 return llvm::None;
5195}
5196
5197SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
5198 const GlobalAddressSDNode *GA,
5199 const SDNode *N2) {
5200 if (GA->getOpcode() != ISD::GlobalAddress)
5201 return SDValue();
5202 if (!TLI->isOffsetFoldingLegal(GA))
5203 return SDValue();
5204 auto *C2 = dyn_cast<ConstantSDNode>(N2);
5205 if (!C2)
5206 return SDValue();
5207 int64_t Offset = C2->getSExtValue();
5208 switch (Opcode) {
5209 case ISD::ADD: break;
5210 case ISD::SUB: Offset = -uint64_t(Offset); break;
5211 default: return SDValue();
5212 }
5213 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT,
5214 GA->getOffset() + uint64_t(Offset));
5215}
5216
5217bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
5218 switch (Opcode) {
5219 case ISD::SDIV:
5220 case ISD::UDIV:
5221 case ISD::SREM:
5222 case ISD::UREM: {
5223 // If a divisor is zero/undef or any element of a divisor vector is
5224 // zero/undef, the whole op is undef.
5225 assert(Ops.size() == 2 && "Div/rem should have 2 operands")(static_cast<void> (0));
5226 SDValue Divisor = Ops[1];
5227 if (Divisor.isUndef() || isNullConstant(Divisor))
5228 return true;
5229
5230 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
5231 llvm::any_of(Divisor->op_values(),
5232 [](SDValue V) { return V.isUndef() ||
5233 isNullConstant(V); });
5234 // TODO: Handle signed overflow.
5235 }
5236 // TODO: Handle oversized shifts.
5237 default:
5238 return false;
5239 }
5240}
5241
5242SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
5243 EVT VT, ArrayRef<SDValue> Ops) {
5244 // If the opcode is a target-specific ISD node, there's nothing we can
5245 // do here and the operand rules may not line up with the below, so
5246 // bail early.
5247 // We can't create a scalar CONCAT_VECTORS so skip it. It will break
5248 // for concats involving SPLAT_VECTOR. Concats of BUILD_VECTORS are handled by
5249 // foldCONCAT_VECTORS in getNode before this is called.
5250 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::CONCAT_VECTORS)
5251 return SDValue();
5252
5253 // For now, the array Ops should only contain two values.
5254 // This enforcement will be removed once this function is merged with
5255 // FoldConstantVectorArithmetic
5256 if (Ops.size() != 2)
5257 return SDValue();
5258
5259 if (isUndef(Opcode, Ops))
5260 return getUNDEF(VT);
5261
5262 SDNode *N1 = Ops[0].getNode();
5263 SDNode *N2 = Ops[1].getNode();
5264
5265 // Handle the case of two scalars.
5266 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) {
5267 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) {
5268 if (C1->isOpaque() || C2->isOpaque())
5269 return SDValue();
5270
5271 Optional<APInt> FoldAttempt =
5272 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
5273 if (!FoldAttempt)
5274 return SDValue();
5275
5276 SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT);
5277 assert((!Folded || !VT.isVector()) &&(static_cast<void> (0))
5278 "Can't fold vectors ops with scalar operands")(static_cast<void> (0));
5279 return Folded;
5280 }
5281 }
5282
5283 // fold (add Sym, c) -> Sym+c
5284 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1))
5285 return FoldSymbolOffset(Opcode, VT, GA, N2);
5286 if (TLI->isCommutativeBinOp(Opcode))
5287 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2))
5288 return FoldSymbolOffset(Opcode, VT, GA, N1);
5289
5290 // For fixed width vectors, extract each constant element and fold them
5291 // individually. Either input may be an undef value.
5292 bool IsBVOrSV1 = N1->getOpcode() == ISD::BUILD_VECTOR ||
5293 N1->getOpcode() == ISD::SPLAT_VECTOR;
5294 if (!IsBVOrSV1 && !N1->isUndef())
5295 return SDValue();
5296 bool IsBVOrSV2 = N2->getOpcode() == ISD::BUILD_VECTOR ||
5297 N2->getOpcode() == ISD::SPLAT_VECTOR;
5298 if (!IsBVOrSV2 && !N2->isUndef())
5299 return SDValue();
5300 // If both operands are undef, that's handled the same way as scalars.
5301 if (!IsBVOrSV1 && !IsBVOrSV2)
5302 return SDValue();
5303
5304 EVT SVT = VT.getScalarType();
5305 EVT LegalSVT = SVT;
5306 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
5307 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5308 if (LegalSVT.bitsLT(SVT))
5309 return SDValue();
5310 }
5311
5312 SmallVector<SDValue, 4> Outputs;
5313 unsigned NumOps = 0;
5314 if (IsBVOrSV1)
5315 NumOps = std::max(NumOps, N1->getNumOperands());
5316 if (IsBVOrSV2)
5317 NumOps = std::max(NumOps, N2->getNumOperands());
5318 assert(NumOps != 0 && "Expected non-zero operands")(static_cast<void> (0));
5319 // Scalable vectors should only be SPLAT_VECTOR or UNDEF here. We only need
5320 // one iteration for that.
5321 assert((!VT.isScalableVector() || NumOps == 1) &&(static_cast<void> (0))
5322 "Scalable vector should only have one scalar")(static_cast<void> (0));
5323
5324 for (unsigned I = 0; I != NumOps; ++I) {
5325 // We can have a fixed length SPLAT_VECTOR and a BUILD_VECTOR so we need
5326 // to use operand 0 of the SPLAT_VECTOR for each fixed element.
5327 SDValue V1;
5328 if (N1->getOpcode() == ISD::BUILD_VECTOR)
5329 V1 = N1->getOperand(I);
5330 else if (N1->getOpcode() == ISD::SPLAT_VECTOR)
5331 V1 = N1->getOperand(0);
5332 else
5333 V1 = getUNDEF(SVT);
5334
5335 SDValue V2;
5336 if (N2->getOpcode() == ISD::BUILD_VECTOR)
5337 V2 = N2->getOperand(I);
5338 else if (N2->getOpcode() == ISD::SPLAT_VECTOR)
5339 V2 = N2->getOperand(0);
5340 else
5341 V2 = getUNDEF(SVT);
5342
5343 if (SVT.isInteger()) {
5344 if (V1.getValueType().bitsGT(SVT))
5345 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1);
5346 if (V2.getValueType().bitsGT(SVT))
5347 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2);
5348 }
5349
5350 if (V1.getValueType() != SVT || V2.getValueType() != SVT)
5351 return SDValue();
5352
5353 // Fold one vector element.
5354 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
5355 if (LegalSVT != SVT)
5356 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5357
5358 // Scalar folding only succeeded if the result is a constant or UNDEF.
5359 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5360 ScalarResult.getOpcode() != ISD::ConstantFP)
5361 return SDValue();
5362 Outputs.push_back(ScalarResult);
5363 }
5364
5365 if (N1->getOpcode() == ISD::BUILD_VECTOR ||
5366 N2->getOpcode() == ISD::BUILD_VECTOR) {
5367 assert(VT.getVectorNumElements() == Outputs.size() &&(static_cast<void> (0))
5368 "Vector size mismatch!")(static_cast<void> (0));
5369
5370 // Build a big vector out of the scalar elements we generated.
5371 return getBuildVector(VT, SDLoc(), Outputs);
5372 }
5373
5374 assert((N1->getOpcode() == ISD::SPLAT_VECTOR ||(static_cast<void> (0))
5375 N2->getOpcode() == ISD::SPLAT_VECTOR) &&(static_cast<void> (0))
5376 "One operand should be a splat vector")(static_cast<void> (0));
5377
5378 assert(Outputs.size() == 1 && "Vector size mismatch!")(static_cast<void> (0));
5379 return getSplatVector(VT, SDLoc(), Outputs[0]);
5380}
5381
5382// TODO: Merge with FoldConstantArithmetic
5383SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
5384 const SDLoc &DL, EVT VT,
5385 ArrayRef<SDValue> Ops,
5386 const SDNodeFlags Flags) {
5387 // If the opcode is a target-specific ISD node, there's nothing we can
5388 // do here and the operand rules may not line up with the below, so
5389 // bail early.
5390 if (Opcode >= ISD::BUILTIN_OP_END)
5391 return SDValue();
5392
5393 if (isUndef(Opcode, Ops))
5394 return getUNDEF(VT);
5395
5396 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
5397 if (!VT.isVector())
5398 return SDValue();
5399
5400 ElementCount NumElts = VT.getVectorElementCount();
5401
5402 auto IsScalarOrSameVectorSize = [NumElts](const SDValue &Op) {
5403 return !Op.getValueType().isVector() ||
5404 Op.getValueType().getVectorElementCount() == NumElts;
5405 };
5406
5407 auto IsConstantBuildVectorSplatVectorOrUndef = [](const SDValue &Op) {
5408 APInt SplatVal;
5409 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
5410 return Op.isUndef() || Op.getOpcode() == ISD::CONDCODE ||
5411 (BV && BV->isConstant()) ||
5412 (Op.getOpcode() == ISD::SPLAT_VECTOR &&
5413 ISD::isConstantSplatVector(Op.getNode(), SplatVal));
5414 };
5415
5416 // All operands must be vector types with the same number of elements as
5417 // the result type and must be either UNDEF or a build vector of constant
5418 // or UNDEF scalars.
5419 if (!llvm::all_of(Ops, IsConstantBuildVectorSplatVectorOrUndef) ||
5420 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
5421 return SDValue();
5422
5423 // If we are comparing vectors, then the result needs to be a i1 boolean
5424 // that is then sign-extended back to the legal result type.
5425 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
5426
5427 // Find legal integer scalar type for constant promotion and
5428 // ensure that its scalar size is at least as large as source.
5429 EVT LegalSVT = VT.getScalarType();
5430 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
5431 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5432 if (LegalSVT.bitsLT(VT.getScalarType()))
5433 return SDValue();
5434 }
5435
5436 // For scalable vector types we know we're dealing with SPLAT_VECTORs. We
5437 // only have one operand to check. For fixed-length vector types we may have
5438 // a combination of BUILD_VECTOR and SPLAT_VECTOR.
5439 unsigned NumOperands = NumElts.isScalable() ? 1 : NumElts.getFixedValue();
5440
5441 // Constant fold each scalar lane separately.
5442 SmallVector<SDValue, 4> ScalarResults;
5443 for (unsigned I = 0; I != NumOperands; I++) {
5444 SmallVector<SDValue, 4> ScalarOps;
5445 for (SDValue Op : Ops) {
5446 EVT InSVT = Op.getValueType().getScalarType();
5447 if (Op.getOpcode() != ISD::BUILD_VECTOR &&
5448 Op.getOpcode() != ISD::SPLAT_VECTOR) {
5449 // We've checked that this is UNDEF or a constant of some kind.
5450 if (Op.isUndef())
5451 ScalarOps.push_back(getUNDEF(InSVT));
5452 else
5453 ScalarOps.push_back(Op);
5454 continue;
5455 }
5456
5457 SDValue ScalarOp =
5458 Op.getOperand(Op.getOpcode() == ISD::SPLAT_VECTOR ? 0 : I);
5459 EVT ScalarVT = ScalarOp.getValueType();
5460
5461 // Build vector (integer) scalar operands may need implicit
5462 // truncation - do this before constant folding.
5463 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
5464 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
5465
5466 ScalarOps.push_back(ScalarOp);
5467 }
5468
5469 // Constant fold the scalar operands.
5470 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
5471
5472 // Legalize the (integer) scalar constant if necessary.
5473 if (LegalSVT != SVT)
5474 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5475
5476 // Scalar folding only succeeded if the result is a constant or UNDEF.
5477 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5478 ScalarResult.getOpcode() != ISD::ConstantFP)
5479 return SDValue();
5480 ScalarResults.push_back(ScalarResult);
5481 }
5482
5483 SDValue V = NumElts.isScalable() ? getSplatVector(VT, DL, ScalarResults[0])
5484 : getBuildVector(VT, DL, ScalarResults);
5485 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
5486 return V;
5487}
5488
5489SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
5490 EVT VT, SDValue N1, SDValue N2) {
5491 // TODO: We don't do any constant folding for strict FP opcodes here, but we
5492 // should. That will require dealing with a potentially non-default
5493 // rounding mode, checking the "opStatus" return value from the APFloat
5494 // math calculations, and possibly other variations.
5495 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
5496 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
5497 if (N1CFP && N2CFP) {
5498 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF();
5499 switch (Opcode) {
5500 case ISD::FADD:
5501 C1.add(C2, APFloat::rmNearestTiesToEven);
5502 return getConstantFP(C1, DL, VT);
5503 case ISD::FSUB:
5504 C1.subtract(C2, APFloat::rmNearestTiesToEven);
5505 return getConstantFP(C1, DL, VT);
5506 case ISD::FMUL:
5507 C1.multiply(C2, APFloat::rmNearestTiesToEven);
5508 return getConstantFP(C1, DL, VT);
5509 case ISD::FDIV:
5510 C1.divide(C2, APFloat::rmNearestTiesToEven);
5511 return getConstantFP(C1, DL, VT);
5512 case ISD::FREM:
5513 C1.mod(C2);
5514 return getConstantFP(C1, DL, VT);
5515 case ISD::FCOPYSIGN:
5516 C1.copySign(C2);
5517 return getConstantFP(C1, DL, VT);
5518 default: break;
5519 }
5520 }
5521 if (N1CFP && Opcode == ISD::FP_ROUND) {
5522 APFloat C1 = N1CFP->getValueAPF(); // make copy
5523 bool Unused;
5524 // This can return overflow, underflow, or inexact; we don't care.
5525 // FIXME need to be more flexible about rounding mode.
5526 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
5527 &Unused);
5528 return getConstantFP(C1, DL, VT);
5529 }
5530
5531 switch (Opcode) {
5532 case ISD::FSUB:
5533 // -0.0 - undef --> undef (consistent with "fneg undef")
5534 if (N1CFP && N1CFP->getValueAPF().isNegZero() && N2.isUndef())
5535 return getUNDEF(VT);
5536 LLVM_FALLTHROUGH[[gnu::fallthrough]];
5537
5538 case ISD::FADD:
5539 case ISD::FMUL:
5540 case ISD::FDIV:
5541 case ISD::FREM:
5542 // If both operands are undef, the result is undef. If 1 operand is undef,
5543 // the result is NaN. This should match the behavior of the IR optimizer.
5544 if (N1.isUndef() && N2.isUndef())
5545 return getUNDEF(VT);
5546 if (N1.isUndef() || N2.isUndef())
5547 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
5548 }
5549 return SDValue();
5550}
5551
5552SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) {
5553 assert(Val.getValueType().isInteger() && "Invalid AssertAlign!")(static_cast<void> (0));
5554
5555 // There's no need to assert on a byte-aligned pointer. All pointers are at
5556 // least byte aligned.
5557 if (A == Align(1))
5558 return Val;
5559
5560 FoldingSetNodeID ID;
5561 AddNodeIDNode(ID, ISD::AssertAlign, getVTList(Val.getValueType()), {Val});
5562 ID.AddInteger(A.value());
5563
5564 void *IP = nullptr;
5565 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5566 return SDValue(E, 0);
5567
5568 auto *N = newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(),
5569 Val.getValueType(), A);
5570 createOperands(N, {Val});
5571
5572 CSEMap.InsertNode(N, IP);
5573 InsertNode(N);
5574
5575 SDValue V(N, 0);
5576 NewSDValueDbgMsg(V, "Creating new node: ", this);
5577 return V;
5578}
5579
5580SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5581 SDValue N1, SDValue N2) {
5582 SDNodeFlags Flags;
5583 if (Inserter)
5584 Flags = Inserter->getFlags();
5585 return getNode(Opcode, DL, VT, N1, N2, Flags);
5586}
5587
5588SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5589 SDValue N1, SDValue N2, const SDNodeFlags Flags) {
5590 assert(N1.getOpcode() != ISD::DELETED_NODE &&(static_cast<void> (0))
5591 N2.getOpcode() != ISD::DELETED_NODE &&(static_cast<void> (0))
5592 "Operand is DELETED_NODE!")(static_cast<void> (0));
5593 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
5594 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
5595 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5596 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5597
5598 // Canonicalize constant to RHS if commutative.
5599 if (TLI->isCommutativeBinOp(Opcode)) {
5600 if (N1C && !N2C) {
5601 std::swap(N1C, N2C);
5602 std::swap(N1, N2);
5603 } else if (N1CFP && !N2CFP) {
5604 std::swap(N1CFP, N2CFP);
5605 std::swap(N1, N2);
5606 }
5607 }
5608
5609 switch (Opcode) {
5610 default: break;
5611 case ISD::TokenFactor:
5612 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&(static_cast<void> (0))
5613 N2.getValueType() == MVT::Other && "Invalid token factor!")(static_cast<void> (0));
5614 // Fold trivial token factors.
5615 if (N1.getOpcode() == ISD::EntryToken) return N2;
5616 if (N2.getOpcode() == ISD::EntryToken) return N1;
5617 if (N1 == N2) return N1;
5618 break;
5619 case ISD::BUILD_VECTOR: {
5620 // Attempt to simplify BUILD_VECTOR.
5621 SDValue Ops[] = {N1, N2};
5622 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5623 return V;
5624 break;
5625 }
5626 case ISD::CONCAT_VECTORS: {
5627 SDValue Ops[] = {N1, N2};
5628 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5629 return V;
5630 break;
5631 }
5632 case ISD::AND:
5633 assert(VT.isInteger() && "This operator does not apply to FP types!")(static_cast<void> (0));
5634 assert(N1.getValueType() == N2.getValueType() &&(static_cast<void> (0))
5635 N1.getValueType() == VT && "Binary operator types must match!")(static_cast<void> (0));
5636 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
5637 // worth handling here.
5638 if (N2C && N2C->isNullValue())
5639 return N2;
5640 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
5641 return N1;
5642 break;
5643 case ISD::OR:
5644 case ISD::XOR:
5645 case ISD::ADD:
5646 case ISD::SUB:
5647 assert(VT.isInteger() && "This operator does not apply to FP types!")(static_cast<void> (0));
5648 assert(N1.getValueType() == N2.getValueType() &&(static_cast<void> (0))
5649 N1.getValueType() == VT && "Binary operator types must match!")(static_cast<void> (0));
5650 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
5651 // it's worth handling here.
5652 if (N2C && N2C->isNullValue())
5653 return N1;
5654 if ((Opcode == ISD::ADD || Opcode == ISD::SUB) && VT.isVector() &&
5655 VT.getVectorElementType() == MVT::i1)
5656 return getNode(ISD::XOR, DL, VT, N1, N2);
5657 break;
5658 case ISD::MUL:
5659 assert(VT.isInteger() && "This operator does not apply to FP types!")(static_cast<void> (0));
5660 assert(N1.getValueType() == N2.getValueType() &&(static_cast<void> (0))
5661 N1.getValueType() == VT && "Binary operator types must match!")(static_cast<void> (0));
5662 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5663 return getNode(ISD::AND, DL, VT, N1, N2);
5664 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
5665 const APInt &MulImm = N1->getConstantOperandAPInt(0);
5666 const APInt &N2CImm = N2C->getAPIntValue();
5667 return getVScale(DL, VT, MulImm * N2CImm);
5668 }
5669 break;
5670 case ISD::UDIV:
5671 case ISD::UREM:
5672 case ISD::MULHU:
5673 case ISD::MULHS:
5674 case ISD::SDIV:
5675 case ISD::SREM:
5676 case ISD::SADDSAT:
5677 case ISD::SSUBSAT:
5678 case ISD::UADDSAT:
5679 case ISD::USUBSAT:
5680 assert(VT.isInteger() && "This operator does not apply to FP types!")(static_cast<void> (0));
5681 assert(N1.getValueType() == N2.getValueType() &&(static_cast<void> (0))
5682 N1.getValueType() == VT && "Binary operator types must match!")(static_cast<void> (0));
5683 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
5684 // fold (add_sat x, y) -> (or x, y) for bool types.
5685 if (Opcode == ISD::SADDSAT || Opcode == ISD::UADDSAT)
5686 return getNode(ISD::OR, DL, VT, N1, N2);
5687 // fold (sub_sat x, y) -> (and x, ~y) for bool types.
5688 if (Opcode == ISD::SSUBSAT || Opcode == ISD::USUBSAT)
5689 return getNode(ISD::AND, DL, VT, N1, getNOT(DL, N2, VT));
5690 }
5691 break;
5692 case ISD::SMIN:
5693 case ISD::UMAX:
5694 assert(VT.isInteger() && "This operator does not apply to FP types!")(static_cast<void> (0));
5695 assert(N1.getValueType() == N2.getValueType() &&(static_cast<void> (0))
5696 N1.getValueType() == VT && "Binary operator types must match!")(static_cast<void> (0));
5697 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5698 return getNode(ISD::OR, DL, VT, N1, N2);
5699 break;
5700 case ISD::SMAX:
5701 case ISD::UMIN:
5702 assert(VT.isInteger() && "This operator does not apply to FP types!")(static_cast<void> (0));
5703 assert(N1.getValueType() == N2.getValueType() &&(static_cast<void> (0))
5704 N1.getValueType() == VT && "Binary operator types must match!")(static_cast<void> (0));
5705 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5706 return getNode(ISD::AND, DL, VT, N1, N2);
5707 break;
5708 case ISD::FADD:
5709 case ISD::FSUB:
5710 case ISD::FMUL:
5711 case ISD::FDIV:
5712 case ISD::FREM:
5713 assert(VT.isFloatingPoint() && "This operator only applies to FP types!")(static_cast<void> (0));
5714 assert(N1.getValueType() == N2.getValueType() &&(static_cast<void> (0))
5715 N1.getValueType() == VT && "Binary operator types must match!")(static_cast<void> (0));
5716 if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags))
5717 return V;
5718 break;
5719 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
5720 assert(N1.getValueType() == VT &&(static_cast<void> (0))
5721 N1.getValueType().isFloatingPoint() &&(static_cast<void> (0))
5722 N2.getValueType().isFloatingPoint() &&(static_cast<void> (0))
5723 "Invalid FCOPYSIGN!")(static_cast<void> (0));
5724 break;
5725 case ISD::SHL:
5726 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
5727 const APInt &MulImm = N1->getConstantOperandAPInt(0);
5728 const APInt &ShiftImm = N2C->getAPIntValue();
5729 return getVScale(DL, VT, MulImm << ShiftImm);
5730 }
5731 LLVM_FALLTHROUGH[[gnu::fallthrough]];
5732 case ISD::SRA:
5733 case ISD::SRL:
5734 if (SDValue V = simplifyShift(N1, N2))
5735 return V;
5736 LLVM_FALLTHROUGH[[gnu::fallthrough]];
5737 case ISD::ROTL:
5738 case ISD::ROTR:
5739 assert(VT == N1.getValueType() &&(static_cast<void> (0))
5740 "Shift operators return type must be the same as their first arg")(static_cast<void> (0));
5741 assert(VT.isInteger() && N2.getValueType().isInteger() &&(static_cast<void> (0))
5742 "Shifts only work on integers")(static_cast<void> (0));
5743 assert((!VT.isVector() || VT == N2.getValueType()) &&(static_cast<void> (0))
5744 "Vector shift amounts must be in the same as their first arg")(static_cast<void> (0));
5745 // Verify that the shift amount VT is big enough to hold valid shift
5746 // amounts. This catches things like trying to shift an i1024 value by an
5747 // i8, which is easy to fall into in generic code that uses
5748 // TLI.getShiftAmount().
5749 assert(N2.getValueType().getScalarSizeInBits() >=(static_cast<void> (0))
5750 Log2_32_Ceil(VT.getScalarSizeInBits()) &&(static_cast<void> (0))
5751 "Invalid use of small shift amount with oversized value!")(static_cast<void> (0));
5752
5753 // Always fold shifts of i1 values so the code generator doesn't need to
5754 // handle them. Since we know the size of the shift has to be less than the
5755 // size of the value, the shift/rotate count is guaranteed to be zero.
5756 if (VT == MVT::i1)
5757 return N1;
5758 if (N2C && N2C->isNullValue())
5759 return N1;
5760 break;
5761 case ISD::FP_ROUND:
5762 assert(VT.isFloatingPoint() &&(static_cast<void> (0))
5763 N1.getValueType().isFloatingPoint() &&(static_cast<void> (0))
5764 VT.bitsLE(N1.getValueType()) &&(static_cast<void> (0))
5765 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&(static_cast<void> (0))
5766 "Invalid FP_ROUND!")(static_cast<void> (0));
5767 if (N1.getValueType() == VT) return N1; // noop conversion.
5768 break;
5769 case ISD::AssertSext:
5770 case ISD::AssertZext: {
5771 EVT EVT = cast<VTSDNode>(N2)->getVT();
5772 assert(VT == N1.getValueType() && "Not an inreg extend!")(static_cast<void> (0));
5773 assert(VT.isInteger() && EVT.isInteger() &&(static_cast<void> (0))
5774 "Cannot *_EXTEND_INREG FP types")(static_cast<void> (0));
5775 assert(!EVT.isVector() &&(static_cast<void> (0))
5776 "AssertSExt/AssertZExt type should be the vector element type "(static_cast<void> (0))
5777 "rather than the vector type!")(static_cast<void> (0));
5778 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!")(static_cast<void> (0));
5779 if (VT.getScalarType() == EVT) return N1; // noop assertion.
5780 break;
5781 }
5782 case ISD::SIGN_EXTEND_INREG: {
5783 EVT EVT = cast<VTSDNode>(N2)->getVT();
5784 assert(VT == N1.getValueType() && "Not an inreg extend!")(static_cast<void> (0));
5785 assert(VT.isInteger() && EVT.isInteger() &&(static_cast<void> (0))
5786 "Cannot *_EXTEND_INREG FP types")(static_cast<void> (0));
5787 assert(EVT.isVector() == VT.isVector() &&(static_cast<void> (0))
5788 "SIGN_EXTEND_INREG type should be vector iff the operand "(static_cast<void> (0))
5789 "type is vector!")(static_cast<void> (0));
5790 assert((!EVT.isVector() ||(static_cast<void> (0))
5791 EVT.getVectorElementCount() == VT.getVectorElementCount()) &&(static_cast<void> (0))
5792 "Vector element counts must match in SIGN_EXTEND_INREG")(static_cast<void> (0));
5793 assert(EVT.bitsLE(VT) && "Not extending!")(static_cast<void> (0));
5794 if (EVT == VT) return N1; // Not actually extending
5795
5796 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
5797 unsigned FromBits = EVT.getScalarSizeInBits();
5798 Val <<= Val.getBitWidth() - FromBits;
5799 Val.ashrInPlace(Val.getBitWidth() - FromBits);
5800 return getConstant(Val, DL, ConstantVT);
5801 };
5802
5803 if (N1C) {
5804 const APInt &Val = N1C->getAPIntValue();
5805 return SignExtendInReg(Val, VT);
5806 }
5807
5808 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
5809 SmallVector<SDValue, 8> Ops;
5810 llvm::EVT OpVT = N1.getOperand(0).getValueType();
5811 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
5812 SDValue Op = N1.getOperand(i);
5813 if (Op.isUndef()) {
5814 Ops.push_back(getUNDEF(OpVT));
5815 continue;
5816 }
5817 ConstantSDNode *C = cast<ConstantSDNode>(Op);
5818 APInt Val = C->getAPIntValue();
5819 Ops.push_back(SignExtendInReg(Val, OpVT));
5820 }
5821 return getBuildVector(VT, DL, Ops);
5822 }
5823 break;
5824 }
5825 case ISD::FP_TO_SINT_SAT:
5826 case ISD::FP_TO_UINT_SAT: {
5827 assert(VT.isInteger() && cast<VTSDNode>(N2)->getVT().isInteger() &&(static_cast<void> (0))
5828 N1.getValueType().isFloatingPoint() && "Invalid FP_TO_*INT_SAT")(static_cast<void> (0));
5829 assert(N1.getValueType().isVector() == VT.isVector() &&(static_cast<void> (0))
5830 "FP_TO_*INT_SAT type should be vector iff the operand type is "(static_cast<void> (0))
5831 "vector!")(static_cast<void> (0));
5832 assert((!VT.isVector() || VT.getVectorNumElements() ==(static_cast<void> (0))
5833 N1.getValueType().getVectorNumElements()) &&(static_cast<void> (0))
5834 "Vector element counts must match in FP_TO_*INT_SAT")(static_cast<void> (0));
5835 assert(!cast<VTSDNode>(N2)->getVT().isVector() &&(static_cast<void> (0))
5836 "Type to saturate to must be a scalar.")(static_cast<void> (0));
5837 assert(cast<VTSDNode>(N2)->getVT().bitsLE(VT.getScalarType()) &&(static_cast<void> (0))
5838 "Not extending!")(static_cast<void> (0));
5839 break;
5840 }
5841 case ISD::EXTRACT_VECTOR_ELT:
5842 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() &&(static_cast<void> (0))
5843 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \(static_cast<void> (0))
5844 element type of the vector.")(static_cast<void> (0));
5845
5846 // Extract from an undefined value or using an undefined index is undefined.
5847 if (N1.isUndef() || N2.isUndef())
5848 return getUNDEF(VT);
5849
5850 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length
5851 // vectors. For scalable vectors we will provide appropriate support for
5852 // dealing with arbitrary indices.
5853 if (N2C && N1.getValueType().isFixedLengthVector() &&
5854 N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
5855 return getUNDEF(VT);
5856
5857 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
5858 // expanding copies of large vectors from registers. This only works for
5859 // fixed length vectors, since we need to know the exact number of
5860 // elements.
5861 if (N2C && N1.getOperand(0).getValueType().isFixedLengthVector() &&
5862 N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0) {
5863 unsigned Factor =
5864 N1.getOperand(0).getValueType().getVectorNumElements();
5865 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
5866 N1.getOperand(N2C->getZExtValue() / Factor),
5867 getVectorIdxConstant(N2C->getZExtValue() % Factor, DL));
5868 }
5869
5870 // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while
5871 // lowering is expanding large vector constants.
5872 if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR ||
5873 N1.getOpcode() == ISD::SPLAT_VECTOR)) {
5874 assert((N1.getOpcode() != ISD::BUILD_VECTOR ||(static_cast<void> (0))
5875 N1.getValueType().isFixedLengthVector()) &&(static_cast<void> (0))
5876 "BUILD_VECTOR used for scalable vectors")(static_cast<void> (0));
5877 unsigned Index =
5878 N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0;
5879 SDValue Elt = N1.getOperand(Index);
5880
5881 if (VT != Elt.getValueType())
5882 // If the vector element type is not legal, the BUILD_VECTOR operands
5883 // are promoted and implicitly truncated, and the result implicitly
5884 // extended. Make that explicit here.
5885 Elt = getAnyExtOrTrunc(Elt, DL, VT);
5886
5887 return Elt;
5888 }
5889
5890 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
5891 // operations are lowered to scalars.
5892 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
5893 // If the indices are the same, return the inserted element else
5894 // if the indices are known different, extract the element from
5895 // the original vector.
5896 SDValue N1Op2 = N1.getOperand(2);
5897 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
5898
5899 if (N1Op2C && N2C) {
5900 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
5901 if (VT == N1.getOperand(1).getValueType())
5902 return N1.getOperand(1);
5903 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
5904 }
5905 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
5906 }
5907 }
5908
5909 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
5910 // when vector types are scalarized and v1iX is legal.
5911 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx).
5912 // Here we are completely ignoring the extract element index (N2),
5913 // which is fine for fixed width vectors, since any index other than 0
5914 // is undefined anyway. However, this cannot be ignored for scalable
5915 // vectors - in theory we could support this, but we don't want to do this
5916 // without a profitability check.
5917 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5918 N1.getValueType().isFixedLengthVector() &&
5919 N1.getValueType().getVectorNumElements() == 1) {
5920 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
5921 N1.getOperand(1));
5922 }
5923 break;
5924 case ISD::EXTRACT_ELEMENT:
5925 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!")(static_cast<void> (0));
5926 assert(!N1.getValueType().isVector() && !VT.isVector() &&(static_cast<void> (0))
5927 (N1.getValueType().isInteger() == VT.isInteger()) &&(static_cast<void> (0))
5928 N1.getValueType() != VT &&(static_cast<void> (0))
5929 "Wrong types for EXTRACT_ELEMENT!")(static_cast<void> (0));
5930
5931 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
5932 // 64-bit integers into 32-bit parts. Instead of building the extract of
5933 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
5934 if (N1.getOpcode() == ISD::BUILD_PAIR)
5935 return N1.getOperand(N2C->getZExtValue());
5936
5937 // EXTRACT_ELEMENT of a constant int is also very common.
5938 if (N1C) {
5939 unsigned ElementSize = VT.getSizeInBits();
5940 unsigned Shift = ElementSize * N2C->getZExtValue();
5941 const APInt &Val = N1C->getAPIntValue();
5942 return getConstant(Val.extractBits(ElementSize, Shift), DL, VT);
5943 }
5944 break;
5945 case ISD::EXTRACT_SUBVECTOR: {
5946 EVT N1VT = N1.getValueType();
5947 assert(VT.isVector() && N1VT.isVector() &&(static_cast<void> (0))
5948 "Extract subvector VTs must be vectors!")(static_cast<void> (0));
5949 assert(VT.getVectorElementType() == N1VT.getVectorElementType() &&(static_cast<void> (0))
5950 "Extract subvector VTs must have the same element type!")(static_cast<void> (0));
5951 assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) &&(static_cast<void> (0))
5952 "Cannot extract a scalable vector from a fixed length vector!")(static_cast<void> (0));
5953 assert((VT.isScalableVector() != N1VT.isScalableVector() ||(static_cast<void> (0))
5954 VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) &&(static_cast<void> (0))
5955 "Extract subvector must be from larger vector to smaller vector!")(static_cast<void> (0));
5956 assert(N2C && "Extract subvector index must be a constant")(static_cast<void> (0));
5957 assert((VT.isScalableVector() != N1VT.isScalableVector() ||(static_cast<void> (0))
5958 (VT.getVectorMinNumElements() + N2C->getZExtValue()) <=(static_cast<void> (0))
5959 N1VT.getVectorMinNumElements()) &&(static_cast<void> (0))
5960 "Extract subvector overflow!")(static_cast<void> (0));
5961 assert(N2C->getAPIntValue().getBitWidth() ==(static_cast<void> (0))
5962 TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() &&(static_cast<void> (0))
5963 "Constant index for EXTRACT_SUBVECTOR has an invalid size")(static_cast<void> (0));
5964
5965 // Trivial extraction.
5966 if (VT == N1VT)
5967 return N1;
5968
5969 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
5970 if (N1.isUndef())
5971 return getUNDEF(VT);
5972
5973 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
5974 // the concat have the same type as the extract.
5975 if (N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0 &&
5976 VT == N1.getOperand(0).getValueType()) {
5977 unsigned Factor = VT.getVectorMinNumElements();
5978 return N1.getOperand(N2C->getZExtValue() / Factor);
5979 }
5980
5981 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
5982 // during shuffle legalization.
5983 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
5984 VT == N1.getOperand(1).getValueType())
5985 return N1.getOperand(1);
5986 break;
5987 }
5988 }
5989
5990 // Perform trivial constant folding.
5991 if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2}))
5992 return SV;
5993
5994 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2))
5995 return V;