LLVM 23.0.0git
SelectionDAG.cpp
Go to the documentation of this file.
1//===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the SelectionDAG class.
10//
11//===----------------------------------------------------------------------===//
12
14#include "SDNodeDbgValue.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/APSInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/DenseSet.h"
21#include "llvm/ADT/FoldingSet.h"
22#include "llvm/ADT/STLExtras.h"
25#include "llvm/ADT/Twine.h"
51#include "llvm/IR/Constant.h"
52#include "llvm/IR/Constants.h"
53#include "llvm/IR/DataLayout.h"
55#include "llvm/IR/DebugLoc.h"
57#include "llvm/IR/Function.h"
58#include "llvm/IR/GlobalValue.h"
59#include "llvm/IR/Metadata.h"
60#include "llvm/IR/Type.h"
64#include "llvm/Support/Debug.h"
74#include <algorithm>
75#include <cassert>
76#include <cstdint>
77#include <cstdlib>
78#include <limits>
79#include <optional>
80#include <string>
81#include <utility>
82#include <vector>
83
84using namespace llvm;
85using namespace llvm::SDPatternMatch;
86
87/// makeVTList - Return an instance of the SDVTList struct initialized with the
88/// specified members.
89static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
90 SDVTList Res = {VTs, NumVTs};
91 return Res;
92}
93
94// Default null implementations of the callbacks.
98
99void SelectionDAG::DAGNodeDeletedListener::anchor() {}
100void SelectionDAG::DAGNodeInsertedListener::anchor() {}
101
102#define DEBUG_TYPE "selectiondag"
103
104static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
105 cl::Hidden, cl::init(true),
106 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
107
108static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
109 cl::desc("Number limit for gluing ld/st of memcpy."),
110 cl::Hidden, cl::init(0));
111
113 MaxSteps("has-predecessor-max-steps", cl::Hidden, cl::init(8192),
114 cl::desc("DAG combiner limit number of steps when searching DAG "
115 "for predecessor nodes"));
116
118 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
119}
120
122
123//===----------------------------------------------------------------------===//
124// ConstantFPSDNode Class
125//===----------------------------------------------------------------------===//
126
127/// isExactlyValue - We don't rely on operator== working on double values, as
128/// it returns true for things that are clearly not equal, like -0.0 and 0.0.
129/// As such, this method can be used to do an exact bit-for-bit comparison of
130/// two floating point values.
132 return getValueAPF().bitwiseIsEqual(V);
133}
134
136 const APFloat& Val) {
137 assert(VT.isFloatingPoint() && "Can only convert between FP types");
138
139 // convert modifies in place, so make a copy.
140 APFloat Val2 = APFloat(Val);
141 bool losesInfo;
143 &losesInfo);
144 return !losesInfo;
145}
146
147//===----------------------------------------------------------------------===//
148// ISD Namespace
149//===----------------------------------------------------------------------===//
150
151bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
152 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
153 if (auto OptAPInt = N->getOperand(0)->bitcastToAPInt()) {
154 unsigned EltSize =
155 N->getValueType(0).getVectorElementType().getSizeInBits();
156 SplatVal = OptAPInt->trunc(EltSize);
157 return true;
158 }
159 }
160
161 auto *BV = dyn_cast<BuildVectorSDNode>(N);
162 if (!BV)
163 return false;
164
165 APInt SplatUndef;
166 unsigned SplatBitSize;
167 bool HasUndefs;
168 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
169 // Endianness does not matter here. We are checking for a splat given the
170 // element size of the vector, and if we find such a splat for little endian
171 // layout, then that should be valid also for big endian (as the full vector
172 // size is known to be a multiple of the element size).
173 const bool IsBigEndian = false;
174 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
175 EltSize, IsBigEndian) &&
176 EltSize == SplatBitSize;
177}
178
179// FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
180// specializations of the more general isConstantSplatVector()?
181
182bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
183 // Look through a bit convert.
184 while (N->getOpcode() == ISD::BITCAST)
185 N = N->getOperand(0).getNode();
186
187 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
188 APInt SplatVal;
189 return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnes();
190 }
191
192 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
193
194 unsigned i = 0, e = N->getNumOperands();
195
196 // Skip over all of the undef values.
197 while (i != e && N->getOperand(i).isUndef())
198 ++i;
199
200 // Do not accept an all-undef vector.
201 if (i == e) return false;
202
203 // Do not accept build_vectors that aren't all constants or which have non-~0
204 // elements. We have to be a bit careful here, as the type of the constant
205 // may not be the same as the type of the vector elements due to type
206 // legalization (the elements are promoted to a legal type for the target and
207 // a vector of a type may be legal when the base element type is not).
208 // We only want to check enough bits to cover the vector elements, because
209 // we care if the resultant vector is all ones, not whether the individual
210 // constants are.
211 SDValue NotZero = N->getOperand(i);
212 if (auto OptAPInt = NotZero->bitcastToAPInt()) {
213 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
214 if (OptAPInt->countr_one() < EltSize)
215 return false;
216 } else
217 return false;
218
219 // Okay, we have at least one ~0 value, check to see if the rest match or are
220 // undefs. Even with the above element type twiddling, this should be OK, as
221 // the same type legalization should have applied to all the elements.
222 for (++i; i != e; ++i)
223 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
224 return false;
225 return true;
226}
227
228bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
229 // Look through a bit convert.
230 while (N->getOpcode() == ISD::BITCAST)
231 N = N->getOperand(0).getNode();
232
233 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
234 APInt SplatVal;
235 return isConstantSplatVector(N, SplatVal) && SplatVal.isZero();
236 }
237
238 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
239
240 bool IsAllUndef = true;
241 for (const SDValue &Op : N->op_values()) {
242 if (Op.isUndef())
243 continue;
244 IsAllUndef = false;
245 // Do not accept build_vectors that aren't all constants or which have non-0
246 // elements. We have to be a bit careful here, as the type of the constant
247 // may not be the same as the type of the vector elements due to type
248 // legalization (the elements are promoted to a legal type for the target
249 // and a vector of a type may be legal when the base element type is not).
250 // We only want to check enough bits to cover the vector elements, because
251 // we care if the resultant vector is all zeros, not whether the individual
252 // constants are.
253 if (auto OptAPInt = Op->bitcastToAPInt()) {
254 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
255 if (OptAPInt->countr_zero() < EltSize)
256 return false;
257 } else
258 return false;
259 }
260
261 // Do not accept an all-undef vector.
262 if (IsAllUndef)
263 return false;
264 return true;
265}
266
268 return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true);
269}
270
272 return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true);
273}
274
276 if (N->getOpcode() != ISD::BUILD_VECTOR)
277 return false;
278
279 for (const SDValue &Op : N->op_values()) {
280 if (Op.isUndef())
281 continue;
283 return false;
284 }
285 return true;
286}
287
289 if (N->getOpcode() != ISD::BUILD_VECTOR)
290 return false;
291
292 for (const SDValue &Op : N->op_values()) {
293 if (Op.isUndef())
294 continue;
296 return false;
297 }
298 return true;
299}
300
301bool ISD::isVectorShrinkable(const SDNode *N, unsigned NewEltSize,
302 bool Signed) {
303 assert(N->getValueType(0).isVector() && "Expected a vector!");
304
305 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
306 if (EltSize <= NewEltSize)
307 return false;
308
309 if (N->getOpcode() == ISD::ZERO_EXTEND) {
310 return (N->getOperand(0).getValueType().getScalarSizeInBits() <=
311 NewEltSize) &&
312 !Signed;
313 }
314 if (N->getOpcode() == ISD::SIGN_EXTEND) {
315 return (N->getOperand(0).getValueType().getScalarSizeInBits() <=
316 NewEltSize) &&
317 Signed;
318 }
319 if (N->getOpcode() != ISD::BUILD_VECTOR)
320 return false;
321
322 for (const SDValue &Op : N->op_values()) {
323 if (Op.isUndef())
324 continue;
326 return false;
327
328 APInt C = Op->getAsAPIntVal().trunc(EltSize);
329 if (Signed && C.trunc(NewEltSize).sext(EltSize) != C)
330 return false;
331 if (!Signed && C.trunc(NewEltSize).zext(EltSize) != C)
332 return false;
333 }
334
335 return true;
336}
337
339 // Return false if the node has no operands.
340 // This is "logically inconsistent" with the definition of "all" but
341 // is probably the desired behavior.
342 if (N->getNumOperands() == 0)
343 return false;
344 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
345}
346
348 return N->getOpcode() == ISD::FREEZE && N->getOperand(0).isUndef();
349}
350
351template <typename ConstNodeType>
353 std::function<bool(ConstNodeType *)> Match,
354 bool AllowUndefs, bool AllowTruncation) {
355 // FIXME: Add support for scalar UNDEF cases?
356 if (auto *C = dyn_cast<ConstNodeType>(Op))
357 return Match(C);
358
359 // FIXME: Add support for vector UNDEF cases?
360 if (ISD::BUILD_VECTOR != Op.getOpcode() &&
361 ISD::SPLAT_VECTOR != Op.getOpcode())
362 return false;
363
364 EVT SVT = Op.getValueType().getScalarType();
365 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
366 if (AllowUndefs && Op.getOperand(i).isUndef()) {
367 if (!Match(nullptr))
368 return false;
369 continue;
370 }
371
372 auto *Cst = dyn_cast<ConstNodeType>(Op.getOperand(i));
373 if (!Cst || (!AllowTruncation && Cst->getValueType(0) != SVT) ||
374 !Match(Cst))
375 return false;
376 }
377 return true;
378}
379// Build used template types.
381 SDValue, std::function<bool(ConstantSDNode *)>, bool, bool);
383 SDValue, std::function<bool(ConstantFPSDNode *)>, bool, bool);
384
386 SDValue LHS, SDValue RHS,
387 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
388 bool AllowUndefs, bool AllowTypeMismatch) {
389 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
390 return false;
391
392 // TODO: Add support for scalar UNDEF cases?
393 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
394 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
395 return Match(LHSCst, RHSCst);
396
397 // TODO: Add support for vector UNDEF cases?
398 if (LHS.getOpcode() != RHS.getOpcode() ||
399 (LHS.getOpcode() != ISD::BUILD_VECTOR &&
400 LHS.getOpcode() != ISD::SPLAT_VECTOR))
401 return false;
402
403 EVT SVT = LHS.getValueType().getScalarType();
404 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
405 SDValue LHSOp = LHS.getOperand(i);
406 SDValue RHSOp = RHS.getOperand(i);
407 bool LHSUndef = AllowUndefs && LHSOp.isUndef();
408 bool RHSUndef = AllowUndefs && RHSOp.isUndef();
409 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
410 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
411 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
412 return false;
413 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT ||
414 LHSOp.getValueType() != RHSOp.getValueType()))
415 return false;
416 if (!Match(LHSCst, RHSCst))
417 return false;
418 }
419 return true;
420}
421
423 switch (MinMaxOpc) {
424 default:
425 llvm_unreachable("unrecognized opcode");
426 case ISD::UMIN:
427 return ISD::UMAX;
428 case ISD::UMAX:
429 return ISD::UMIN;
430 case ISD::SMIN:
431 return ISD::SMAX;
432 case ISD::SMAX:
433 return ISD::SMIN;
434 }
435}
436
438 switch (MinMaxOpc) {
439 default:
440 llvm_unreachable("unrecognized min/max opcode");
441 case ISD::SMIN:
442 return ISD::UMIN;
443 case ISD::SMAX:
444 return ISD::UMAX;
445 case ISD::UMIN:
446 return ISD::SMIN;
447 case ISD::UMAX:
448 return ISD::SMAX;
449 }
450}
451
453 switch (VecReduceOpcode) {
454 default:
455 llvm_unreachable("Expected VECREDUCE opcode");
458 case ISD::VP_REDUCE_FADD:
459 case ISD::VP_REDUCE_SEQ_FADD:
460 return ISD::FADD;
463 case ISD::VP_REDUCE_FMUL:
464 case ISD::VP_REDUCE_SEQ_FMUL:
465 return ISD::FMUL;
467 case ISD::VP_REDUCE_ADD:
468 return ISD::ADD;
470 case ISD::VP_REDUCE_MUL:
471 return ISD::MUL;
473 case ISD::VP_REDUCE_AND:
474 return ISD::AND;
476 case ISD::VP_REDUCE_OR:
477 return ISD::OR;
479 case ISD::VP_REDUCE_XOR:
480 return ISD::XOR;
482 case ISD::VP_REDUCE_SMAX:
483 return ISD::SMAX;
485 case ISD::VP_REDUCE_SMIN:
486 return ISD::SMIN;
488 case ISD::VP_REDUCE_UMAX:
489 return ISD::UMAX;
491 case ISD::VP_REDUCE_UMIN:
492 return ISD::UMIN;
494 case ISD::VP_REDUCE_FMAX:
495 return ISD::FMAXNUM;
497 case ISD::VP_REDUCE_FMIN:
498 return ISD::FMINNUM;
500 case ISD::VP_REDUCE_FMAXIMUM:
501 return ISD::FMAXIMUM;
503 case ISD::VP_REDUCE_FMINIMUM:
504 return ISD::FMINIMUM;
505 }
506}
507
509 switch (MaskedOpc) {
510 case ISD::MASKED_UDIV:
511 return ISD::UDIV;
512 case ISD::MASKED_SDIV:
513 return ISD::SDIV;
514 case ISD::MASKED_UREM:
515 return ISD::UREM;
516 case ISD::MASKED_SREM:
517 return ISD::SREM;
518 default:
519 llvm_unreachable("Expected masked binop opcode");
520 }
521}
522
523bool ISD::isVPOpcode(unsigned Opcode) {
524 switch (Opcode) {
525 default:
526 return false;
527#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) \
528 case ISD::VPSD: \
529 return true;
530#include "llvm/IR/VPIntrinsics.def"
531 }
532}
533
534bool ISD::isVPBinaryOp(unsigned Opcode) {
535 switch (Opcode) {
536 default:
537 break;
538#define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD:
539#define VP_PROPERTY_BINARYOP return true;
540#define END_REGISTER_VP_SDNODE(VPSD) break;
541#include "llvm/IR/VPIntrinsics.def"
542 }
543 return false;
544}
545
546bool ISD::isVPReduction(unsigned Opcode) {
547 switch (Opcode) {
548 default:
549 return false;
550 case ISD::VP_REDUCE_ADD:
551 case ISD::VP_REDUCE_MUL:
552 case ISD::VP_REDUCE_AND:
553 case ISD::VP_REDUCE_OR:
554 case ISD::VP_REDUCE_XOR:
555 case ISD::VP_REDUCE_SMAX:
556 case ISD::VP_REDUCE_SMIN:
557 case ISD::VP_REDUCE_UMAX:
558 case ISD::VP_REDUCE_UMIN:
559 case ISD::VP_REDUCE_FMAX:
560 case ISD::VP_REDUCE_FMIN:
561 case ISD::VP_REDUCE_FMAXIMUM:
562 case ISD::VP_REDUCE_FMINIMUM:
563 case ISD::VP_REDUCE_FADD:
564 case ISD::VP_REDUCE_FMUL:
565 case ISD::VP_REDUCE_SEQ_FADD:
566 case ISD::VP_REDUCE_SEQ_FMUL:
567 return true;
568 }
569}
570
571/// The operand position of the vector mask.
572std::optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
573 switch (Opcode) {
574 default:
575 return std::nullopt;
576#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...) \
577 case ISD::VPSD: \
578 return MASKPOS;
579#include "llvm/IR/VPIntrinsics.def"
580 }
581}
582
583/// The operand position of the explicit vector length parameter.
584std::optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
585 switch (Opcode) {
586 default:
587 return std::nullopt;
588#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
589 case ISD::VPSD: \
590 return EVLPOS;
591#include "llvm/IR/VPIntrinsics.def"
592 }
593}
594
595std::optional<unsigned> ISD::getBaseOpcodeForVP(unsigned VPOpcode,
596 bool hasFPExcept) {
597 // FIXME: Return strict opcodes in case of fp exceptions.
598 switch (VPOpcode) {
599 default:
600 return std::nullopt;
601#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) case ISD::VPOPC:
602#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) return ISD::SDOPC;
603#define END_REGISTER_VP_SDNODE(VPOPC) break;
604#include "llvm/IR/VPIntrinsics.def"
605 }
606 return std::nullopt;
607}
608
609std::optional<unsigned> ISD::getVPForBaseOpcode(unsigned Opcode) {
610 switch (Opcode) {
611 default:
612 return std::nullopt;
613#define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) break;
614#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) case ISD::SDOPC:
615#define END_REGISTER_VP_SDNODE(VPOPC) return ISD::VPOPC;
616#include "llvm/IR/VPIntrinsics.def"
617 }
618}
619
621 switch (ExtType) {
622 case ISD::EXTLOAD:
623 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
624 case ISD::SEXTLOAD:
625 return ISD::SIGN_EXTEND;
626 case ISD::ZEXTLOAD:
627 return ISD::ZERO_EXTEND;
628 default:
629 break;
630 }
631
632 llvm_unreachable("Invalid LoadExtType");
633}
634
636 // To perform this operation, we just need to swap the L and G bits of the
637 // operation.
638 unsigned OldL = (Operation >> 2) & 1;
639 unsigned OldG = (Operation >> 1) & 1;
640 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
641 (OldL << 1) | // New G bit
642 (OldG << 2)); // New L bit.
643}
644
646 unsigned Operation = Op;
647 if (isIntegerLike)
648 Operation ^= 7; // Flip L, G, E bits, but not U.
649 else
650 Operation ^= 15; // Flip all of the condition bits.
651
653 Operation &= ~8; // Don't let N and U bits get set.
654
655 return ISD::CondCode(Operation);
656}
657
661
663 bool isIntegerLike) {
664 return getSetCCInverseImpl(Op, isIntegerLike);
665}
666
667/// For an integer comparison, return 1 if the comparison is a signed operation
668/// and 2 if the result is an unsigned comparison. Return zero if the operation
669/// does not depend on the sign of the input (setne and seteq).
670static int isSignedOp(ISD::CondCode Opcode) {
671 switch (Opcode) {
672 default: llvm_unreachable("Illegal integer setcc operation!");
673 case ISD::SETEQ:
674 case ISD::SETNE: return 0;
675 case ISD::SETLT:
676 case ISD::SETLE:
677 case ISD::SETGT:
678 case ISD::SETGE: return 1;
679 case ISD::SETULT:
680 case ISD::SETULE:
681 case ISD::SETUGT:
682 case ISD::SETUGE: return 2;
683 }
684}
685
687 EVT Type) {
688 bool IsInteger = Type.isInteger();
689 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
690 // Cannot fold a signed integer setcc with an unsigned integer setcc.
691 return ISD::SETCC_INVALID;
692
693 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
694
695 // If the N and U bits get set, then the resultant comparison DOES suddenly
696 // care about orderedness, and it is true when ordered.
697 if (Op > ISD::SETTRUE2)
698 Op &= ~16; // Clear the U bit if the N bit is set.
699
700 // Canonicalize illegal integer setcc's.
701 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
702 Op = ISD::SETNE;
703
704 return ISD::CondCode(Op);
705}
706
708 EVT Type) {
709 bool IsInteger = Type.isInteger();
710 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
711 // Cannot fold a signed setcc with an unsigned setcc.
712 return ISD::SETCC_INVALID;
713
714 // Combine all of the condition bits.
715 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
716
717 // Canonicalize illegal integer setcc's.
718 if (IsInteger) {
719 switch (Result) {
720 default: break;
721 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
722 case ISD::SETOEQ: // SETEQ & SETU[LG]E
723 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
724 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
725 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
726 }
727 }
728
729 return Result;
730}
731
732//===----------------------------------------------------------------------===//
733// SDNode Profile Support
734//===----------------------------------------------------------------------===//
735
736/// AddNodeIDOpcode - Add the node opcode to the NodeID data.
737static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
738 ID.AddInteger(OpC);
739}
740
741/// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
742/// solely with their pointer.
744 ID.AddPointer(VTList.VTs);
745}
746
747/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
750 for (const auto &Op : Ops) {
751 ID.AddPointer(Op.getNode());
752 ID.AddInteger(Op.getResNo());
753 }
754}
755
756/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
759 for (const auto &Op : Ops) {
760 ID.AddPointer(Op.getNode());
761 ID.AddInteger(Op.getResNo());
762 }
763}
764
765static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned OpC,
766 SDVTList VTList, ArrayRef<SDValue> OpList) {
767 AddNodeIDOpcode(ID, OpC);
768 AddNodeIDValueTypes(ID, VTList);
769 AddNodeIDOperands(ID, OpList);
770}
771
772/// If this is an SDNode with special info, add this info to the NodeID data.
774 switch (N->getOpcode()) {
777 case ISD::MCSymbol:
778 llvm_unreachable("Should only be used on nodes with operands");
779 default: break; // Normal nodes don't need extra info.
781 case ISD::Constant: {
783 ID.AddPointer(C->getConstantIntValue());
784 ID.AddBoolean(C->isOpaque());
785 break;
786 }
788 case ISD::ConstantFP:
789 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
790 break;
796 ID.AddPointer(GA->getGlobal());
797 ID.AddInteger(GA->getOffset());
798 ID.AddInteger(GA->getTargetFlags());
799 break;
800 }
801 case ISD::BasicBlock:
803 break;
804 case ISD::Register:
805 ID.AddInteger(cast<RegisterSDNode>(N)->getReg().id());
806 break;
808 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
809 break;
810 case ISD::SRCVALUE:
811 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
812 break;
813 case ISD::FrameIndex:
815 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
816 break;
818 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid());
819 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex());
820 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes());
821 break;
822 case ISD::JumpTable:
824 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
825 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
826 break;
830 ID.AddInteger(CP->getAlign().value());
831 ID.AddInteger(CP->getOffset());
834 else
835 ID.AddPointer(CP->getConstVal());
836 ID.AddInteger(CP->getTargetFlags());
837 break;
838 }
839 case ISD::TargetIndex: {
841 ID.AddInteger(TI->getIndex());
842 ID.AddInteger(TI->getOffset());
843 ID.AddInteger(TI->getTargetFlags());
844 break;
845 }
846 case ISD::LOAD: {
847 const LoadSDNode *LD = cast<LoadSDNode>(N);
848 ID.AddInteger(LD->getMemoryVT().getRawBits());
849 ID.AddInteger(LD->getRawSubclassData());
850 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
851 ID.AddInteger(LD->getMemOperand()->getFlags());
852 break;
853 }
854 case ISD::STORE: {
855 const StoreSDNode *ST = cast<StoreSDNode>(N);
856 ID.AddInteger(ST->getMemoryVT().getRawBits());
857 ID.AddInteger(ST->getRawSubclassData());
858 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
859 ID.AddInteger(ST->getMemOperand()->getFlags());
860 break;
861 }
862 case ISD::VP_LOAD: {
863 const VPLoadSDNode *ELD = cast<VPLoadSDNode>(N);
864 ID.AddInteger(ELD->getMemoryVT().getRawBits());
865 ID.AddInteger(ELD->getRawSubclassData());
866 ID.AddInteger(ELD->getPointerInfo().getAddrSpace());
867 ID.AddInteger(ELD->getMemOperand()->getFlags());
868 break;
869 }
870 case ISD::VP_LOAD_FF: {
871 const auto *LD = cast<VPLoadFFSDNode>(N);
872 ID.AddInteger(LD->getMemoryVT().getRawBits());
873 ID.AddInteger(LD->getRawSubclassData());
874 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
875 ID.AddInteger(LD->getMemOperand()->getFlags());
876 break;
877 }
878 case ISD::VP_STORE: {
879 const VPStoreSDNode *EST = cast<VPStoreSDNode>(N);
880 ID.AddInteger(EST->getMemoryVT().getRawBits());
881 ID.AddInteger(EST->getRawSubclassData());
882 ID.AddInteger(EST->getPointerInfo().getAddrSpace());
883 ID.AddInteger(EST->getMemOperand()->getFlags());
884 break;
885 }
886 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: {
888 ID.AddInteger(SLD->getMemoryVT().getRawBits());
889 ID.AddInteger(SLD->getRawSubclassData());
890 ID.AddInteger(SLD->getPointerInfo().getAddrSpace());
891 break;
892 }
893 case ISD::EXPERIMENTAL_VP_STRIDED_STORE: {
895 ID.AddInteger(SST->getMemoryVT().getRawBits());
896 ID.AddInteger(SST->getRawSubclassData());
897 ID.AddInteger(SST->getPointerInfo().getAddrSpace());
898 break;
899 }
900 case ISD::VP_GATHER: {
902 ID.AddInteger(EG->getMemoryVT().getRawBits());
903 ID.AddInteger(EG->getRawSubclassData());
904 ID.AddInteger(EG->getPointerInfo().getAddrSpace());
905 ID.AddInteger(EG->getMemOperand()->getFlags());
906 break;
907 }
908 case ISD::VP_SCATTER: {
910 ID.AddInteger(ES->getMemoryVT().getRawBits());
911 ID.AddInteger(ES->getRawSubclassData());
912 ID.AddInteger(ES->getPointerInfo().getAddrSpace());
913 ID.AddInteger(ES->getMemOperand()->getFlags());
914 break;
915 }
916 case ISD::MLOAD: {
918 ID.AddInteger(MLD->getMemoryVT().getRawBits());
919 ID.AddInteger(MLD->getRawSubclassData());
920 ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
921 ID.AddInteger(MLD->getMemOperand()->getFlags());
922 break;
923 }
924 case ISD::MSTORE: {
926 ID.AddInteger(MST->getMemoryVT().getRawBits());
927 ID.AddInteger(MST->getRawSubclassData());
928 ID.AddInteger(MST->getPointerInfo().getAddrSpace());
929 ID.AddInteger(MST->getMemOperand()->getFlags());
930 break;
931 }
932 case ISD::MGATHER: {
934 ID.AddInteger(MG->getMemoryVT().getRawBits());
935 ID.AddInteger(MG->getRawSubclassData());
936 ID.AddInteger(MG->getPointerInfo().getAddrSpace());
937 ID.AddInteger(MG->getMemOperand()->getFlags());
938 break;
939 }
940 case ISD::MSCATTER: {
942 ID.AddInteger(MS->getMemoryVT().getRawBits());
943 ID.AddInteger(MS->getRawSubclassData());
944 ID.AddInteger(MS->getPointerInfo().getAddrSpace());
945 ID.AddInteger(MS->getMemOperand()->getFlags());
946 break;
947 }
950 case ISD::ATOMIC_SWAP:
962 case ISD::ATOMIC_LOAD:
963 case ISD::ATOMIC_STORE: {
964 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
965 ID.AddInteger(AT->getMemoryVT().getRawBits());
966 ID.AddInteger(AT->getRawSubclassData());
967 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
968 ID.AddInteger(AT->getMemOperand()->getFlags());
969 break;
970 }
971 case ISD::VECTOR_SHUFFLE: {
972 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
973 for (int M : Mask)
974 ID.AddInteger(M);
975 break;
976 }
977 case ISD::ADDRSPACECAST: {
979 ID.AddInteger(ASC->getSrcAddressSpace());
980 ID.AddInteger(ASC->getDestAddressSpace());
981 break;
982 }
984 case ISD::BlockAddress: {
986 ID.AddPointer(BA->getBlockAddress());
987 ID.AddInteger(BA->getOffset());
988 ID.AddInteger(BA->getTargetFlags());
989 break;
990 }
991 case ISD::AssertAlign:
992 ID.AddInteger(cast<AssertAlignSDNode>(N)->getAlign().value());
993 break;
994 case ISD::PREFETCH:
997 // Handled by MemIntrinsicSDNode check after the switch.
998 break;
1000 ID.AddPointer(cast<MDNodeSDNode>(N)->getMD());
1001 break;
1002 } // end switch (N->getOpcode())
1003
1004 // MemIntrinsic nodes could also have subclass data, address spaces, and flags
1005 // to check.
1006 if (auto *MN = dyn_cast<MemIntrinsicSDNode>(N)) {
1007 ID.AddInteger(MN->getRawSubclassData());
1008 ID.AddInteger(MN->getMemoryVT().getRawBits());
1009 for (const MachineMemOperand *MMO : MN->memoperands()) {
1010 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
1011 ID.AddInteger(MMO->getFlags());
1012 }
1013 }
1014}
1015
1016/// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
1017/// data.
1019 AddNodeIDOpcode(ID, N->getOpcode());
1020 // Add the return value info.
1021 AddNodeIDValueTypes(ID, N->getVTList());
1022 // Add the operand info.
1023 AddNodeIDOperands(ID, N->ops());
1024
1025 // Handle SDNode leafs with special info.
1027}
1028
1029//===----------------------------------------------------------------------===//
1030// SelectionDAG Class
1031//===----------------------------------------------------------------------===//
1032
1033/// doNotCSE - Return true if CSE should not be performed for this node.
1034static bool doNotCSE(SDNode *N) {
1035 if (N->getValueType(0) == MVT::Glue)
1036 return true; // Never CSE anything that produces a glue result.
1037
1038 switch (N->getOpcode()) {
1039 default: break;
1040 case ISD::HANDLENODE:
1041 case ISD::EH_LABEL:
1042 return true; // Never CSE these nodes.
1043 }
1044
1045 // Check that remaining values produced are not flags.
1046 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
1047 if (N->getValueType(i) == MVT::Glue)
1048 return true; // Never CSE anything that produces a glue result.
1049
1050 return false;
1051}
1052
1053/// Construct a DemandedElts mask which demands all elements of \p V.
1054/// If \p V is not a fixed-length vector, then this will return a single bit.
1056 EVT VT = V.getValueType();
1057 // Since the number of lanes in a scalable vector is unknown at compile time,
1058 // we track one bit which is implicitly broadcast to all lanes. This means
1059 // that all lanes in a scalable vector are considered demanded.
1061 : APInt(1, 1);
1062}
1063
1064/// RemoveDeadNodes - This method deletes all unreachable nodes in the
1065/// SelectionDAG.
1067 // Create a dummy node (which is not added to allnodes), that adds a reference
1068 // to the root node, preventing it from being deleted.
1069 HandleSDNode Dummy(getRoot());
1070
1071 SmallVector<SDNode*, 128> DeadNodes;
1072
1073 // Add all obviously-dead nodes to the DeadNodes worklist.
1074 for (SDNode &Node : allnodes())
1075 if (Node.use_empty())
1076 DeadNodes.push_back(&Node);
1077
1078 RemoveDeadNodes(DeadNodes);
1079
1080 // If the root changed (e.g. it was a dead load, update the root).
1081 setRoot(Dummy.getValue());
1082}
1083
1084/// RemoveDeadNodes - This method deletes the unreachable nodes in the
1085/// given list, and any nodes that become unreachable as a result.
1087
1088 // Process the worklist, deleting the nodes and adding their uses to the
1089 // worklist.
1090 while (!DeadNodes.empty()) {
1091 SDNode *N = DeadNodes.pop_back_val();
1092 // Skip to next node if we've already managed to delete the node. This could
1093 // happen if replacing a node causes a node previously added to the node to
1094 // be deleted.
1095 if (N->getOpcode() == ISD::DELETED_NODE)
1096 continue;
1097
1098 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1099 DUL->NodeDeleted(N, nullptr);
1100
1101 // Take the node out of the appropriate CSE map.
1102 RemoveNodeFromCSEMaps(N);
1103
1104 // Next, brutally remove the operand list. This is safe to do, as there are
1105 // no cycles in the graph.
1106 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
1107 SDUse &Use = *I++;
1108 SDNode *Operand = Use.getNode();
1109 Use.set(SDValue());
1110
1111 // Now that we removed this operand, see if there are no uses of it left.
1112 if (Operand->use_empty())
1113 DeadNodes.push_back(Operand);
1114 }
1115
1116 DeallocateNode(N);
1117 }
1118}
1119
1121 SmallVector<SDNode*, 16> DeadNodes(1, N);
1122
1123 // Create a dummy node that adds a reference to the root node, preventing
1124 // it from being deleted. (This matters if the root is an operand of the
1125 // dead node.)
1126 HandleSDNode Dummy(getRoot());
1127
1128 RemoveDeadNodes(DeadNodes);
1129}
1130
1132 // First take this out of the appropriate CSE map.
1133 RemoveNodeFromCSEMaps(N);
1134
1135 // Finally, remove uses due to operands of this node, remove from the
1136 // AllNodes list, and delete the node.
1137 DeleteNodeNotInCSEMaps(N);
1138}
1139
1140void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
1141 assert(N->getIterator() != AllNodes.begin() &&
1142 "Cannot delete the entry node!");
1143 assert(N->use_empty() && "Cannot delete a node that is not dead!");
1144
1145 // Drop all of the operands and decrement used node's use counts.
1146 N->DropOperands();
1147
1148 DeallocateNode(N);
1149}
1150
1151void SDDbgInfo::add(SDDbgValue *V, bool isParameter) {
1152 assert(!(V->isVariadic() && isParameter));
1153 if (isParameter)
1154 ByvalParmDbgValues.push_back(V);
1155 else
1156 DbgValues.push_back(V);
1157 for (const SDNode *Node : V->getSDNodes())
1158 if (Node)
1159 DbgValMap[Node].push_back(V);
1160}
1161
1163 DbgValMapType::iterator I = DbgValMap.find(Node);
1164 if (I == DbgValMap.end())
1165 return;
1166 for (auto &Val: I->second)
1167 Val->setIsInvalidated();
1168 DbgValMap.erase(I);
1169}
1170
1171void SelectionDAG::DeallocateNode(SDNode *N) {
1172 // If we have operands, deallocate them.
1174
1175 NodeAllocator.Deallocate(AllNodes.remove(N));
1176
1177 // Set the opcode to DELETED_NODE to help catch bugs when node
1178 // memory is reallocated.
1179 // FIXME: There are places in SDag that have grown a dependency on the opcode
1180 // value in the released node.
1181 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
1182 N->NodeType = ISD::DELETED_NODE;
1183
1184 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
1185 // them and forget about that node.
1186 DbgInfo->erase(N);
1187
1188 // Invalidate extra info.
1189 SDEI.erase(N);
1190}
1191
1192#ifndef NDEBUG
1193/// VerifySDNode - Check the given SDNode. Aborts if it is invalid.
1194void SelectionDAG::verifyNode(SDNode *N) const {
1195 switch (N->getOpcode()) {
1196 default:
1197 if (N->isTargetOpcode())
1199 break;
1200 case ISD::BUILD_PAIR: {
1201 EVT VT = N->getValueType(0);
1202 assert(N->getNumValues() == 1 && "Too many results!");
1203 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
1204 "Wrong return type!");
1205 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
1206 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
1207 "Mismatched operand types!");
1208 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
1209 "Wrong operand type!");
1210 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
1211 "Wrong return type size");
1212 break;
1213 }
1214 case ISD::BUILD_VECTOR: {
1215 assert(N->getNumValues() == 1 && "Too many results!");
1216 assert(N->getValueType(0).isVector() && "Wrong return type!");
1217 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
1218 "Wrong number of operands!");
1219 EVT EltVT = N->getValueType(0).getVectorElementType();
1220 for (const SDUse &Op : N->ops()) {
1221 assert((Op.getValueType() == EltVT ||
1222 (EltVT.isInteger() && Op.getValueType().isInteger() &&
1223 EltVT.bitsLE(Op.getValueType()))) &&
1224 "Wrong operand type!");
1225 assert(Op.getValueType() == N->getOperand(0).getValueType() &&
1226 "Operands must all have the same type");
1227 }
1228 break;
1229 }
1230 case ISD::SADDO:
1231 case ISD::UADDO:
1232 case ISD::SSUBO:
1233 case ISD::USUBO:
1234 assert(N->getNumValues() == 2 && "Wrong number of results!");
1235 assert(N->getVTList().NumVTs == 2 && N->getNumOperands() == 2 &&
1236 "Invalid add/sub overflow op!");
1237 assert(N->getVTList().VTs[0].isInteger() &&
1238 N->getVTList().VTs[1].isInteger() &&
1239 N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
1240 N->getOperand(0).getValueType() == N->getVTList().VTs[0] &&
1241 "Binary operator types must match!");
1242 break;
1243 }
1244}
1245#endif // NDEBUG
1246
1247/// Insert a newly allocated node into the DAG.
1248///
1249/// Handles insertion into the all nodes list and CSE map, as well as
1250/// verification and other common operations when a new node is allocated.
1251void SelectionDAG::InsertNode(SDNode *N) {
1252 AllNodes.push_back(N);
1253#ifndef NDEBUG
1254 N->PersistentId = NextPersistentId++;
1255 verifyNode(N);
1256#endif
1257 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1258 DUL->NodeInserted(N);
1259}
1260
1261/// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
1262/// correspond to it. This is useful when we're about to delete or repurpose
1263/// the node. We don't want future request for structurally identical nodes
1264/// to return N anymore.
1265bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
1266 bool Erased = false;
1267 switch (N->getOpcode()) {
1268 case ISD::HANDLENODE: return false; // noop.
1269 case ISD::CONDCODE:
1270 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
1271 "Cond code doesn't exist!");
1272 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
1273 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
1274 break;
1276 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
1277 break;
1279 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
1280 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1281 ESN->getSymbol(), ESN->getTargetFlags()));
1282 break;
1283 }
1284 case ISD::MCSymbol: {
1285 auto *MCSN = cast<MCSymbolSDNode>(N);
1286 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1287 break;
1288 }
1289 case ISD::VALUETYPE: {
1290 EVT VT = cast<VTSDNode>(N)->getVT();
1291 if (VT.isExtended()) {
1292 Erased = ExtendedValueTypeNodes.erase(VT);
1293 } else {
1294 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
1295 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
1296 }
1297 break;
1298 }
1299 default:
1300 // Remove it from the CSE Map.
1301 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
1302 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
1303 Erased = CSEMap.RemoveNode(N);
1304 break;
1305 }
1306#ifndef NDEBUG
1307 // Verify that the node was actually in one of the CSE maps, unless it has a
1308 // glue result (which cannot be CSE'd) or is one of the special cases that are
1309 // not subject to CSE.
1310 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
1311 !N->isMachineOpcode() && !doNotCSE(N)) {
1312 N->dump(this);
1313 dbgs() << "\n";
1314 llvm_unreachable("Node is not in map!");
1315 }
1316#endif
1317 return Erased;
1318}
1319
1320/// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
1321/// maps and modified in place. Add it back to the CSE maps, unless an identical
1322/// node already exists, in which case transfer all its users to the existing
1323/// node. This transfer can potentially trigger recursive merging.
1324void
1325SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
1326 // For node types that aren't CSE'd, just act as if no identical node
1327 // already exists.
1328 if (!doNotCSE(N)) {
1329 SDNode *Existing = CSEMap.GetOrInsertNode(N);
1330 if (Existing != N) {
1331 // If there was already an existing matching node, use ReplaceAllUsesWith
1332 // to replace the dead one with the existing one. This can cause
1333 // recursive merging of other unrelated nodes down the line.
1334 Existing->intersectFlagsWith(N->getFlags());
1335 if (auto *MemNode = dyn_cast<MemSDNode>(Existing))
1336 MemNode->refineRanges(cast<MemSDNode>(N)->memoperands());
1337 ReplaceAllUsesWith(N, Existing);
1338
1339 // N is now dead. Inform the listeners and delete it.
1340 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1341 DUL->NodeDeleted(N, Existing);
1342 DeleteNodeNotInCSEMaps(N);
1343 return;
1344 }
1345 }
1346
1347 // If the node doesn't already exist, we updated it. Inform listeners.
1348 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1349 DUL->NodeUpdated(N);
1350}
1351
1352/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1353/// were replaced with those specified. If this node is never memoized,
1354/// return null, otherwise return a pointer to the slot it would take. If a
1355/// node already exists with these operands, the slot will be non-null.
1356SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
1357 void *&InsertPos) {
1358 if (doNotCSE(N))
1359 return nullptr;
1360
1361 SDValue Ops[] = { Op };
1362 FoldingSetNodeID ID;
1363 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1365 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1366 if (Node)
1367 Node->intersectFlagsWith(N->getFlags());
1368 return Node;
1369}
1370
1371/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1372/// were replaced with those specified. If this node is never memoized,
1373/// return null, otherwise return a pointer to the slot it would take. If a
1374/// node already exists with these operands, the slot will be non-null.
1375SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
1376 SDValue Op1, SDValue Op2,
1377 void *&InsertPos) {
1378 if (doNotCSE(N))
1379 return nullptr;
1380
1381 SDValue Ops[] = { Op1, Op2 };
1382 FoldingSetNodeID ID;
1383 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1385 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1386 if (Node)
1387 Node->intersectFlagsWith(N->getFlags());
1388 return Node;
1389}
1390
1391/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1392/// were replaced with those specified. If this node is never memoized,
1393/// return null, otherwise return a pointer to the slot it would take. If a
1394/// node already exists with these operands, the slot will be non-null.
1395SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
1396 void *&InsertPos) {
1397 if (doNotCSE(N))
1398 return nullptr;
1399
1400 FoldingSetNodeID ID;
1401 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1403 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1404 if (Node)
1405 Node->intersectFlagsWith(N->getFlags());
1406 return Node;
1407}
1408
1410 Type *Ty = VT == MVT::iPTR ? PointerType::get(*getContext(), 0)
1411 : VT.getTypeForEVT(*getContext());
1412
1413 return getDataLayout().getABITypeAlign(Ty);
1414}
1415
1416// EntryNode could meaningfully have debug info if we can find it...
1418 : TM(tm), OptLevel(OL), EntryNode(ISD::EntryToken, 0, DebugLoc(),
1419 getVTList(MVT::Other, MVT::Glue)),
1420 Root(getEntryNode()) {
1421 InsertNode(&EntryNode);
1422 DbgInfo = new SDDbgInfo();
1423}
1424
1426 OptimizationRemarkEmitter &NewORE, Pass *PassPtr,
1427 const TargetLibraryInfo *LibraryInfo,
1428 const LibcallLoweringInfo *LibcallsInfo,
1429 UniformityInfo *NewUA, ProfileSummaryInfo *PSIin,
1431 FunctionVarLocs const *VarLocs) {
1432 MF = &NewMF;
1433 SDAGISelPass = PassPtr;
1434 ORE = &NewORE;
1437 LibInfo = LibraryInfo;
1438 Libcalls = LibcallsInfo;
1439 Context = &MF->getFunction().getContext();
1440 UA = NewUA;
1441 PSI = PSIin;
1442 BFI = BFIin;
1443 MMI = &MMIin;
1444 FnVarLocs = VarLocs;
1445}
1446
1448 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
1449 allnodes_clear();
1450 OperandRecycler.clear(OperandAllocator);
1451 delete DbgInfo;
1452}
1453
1455 return llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI);
1456}
1457
1458void SelectionDAG::allnodes_clear() {
1459 assert(&*AllNodes.begin() == &EntryNode);
1460 AllNodes.remove(AllNodes.begin());
1461 while (!AllNodes.empty())
1462 DeallocateNode(&AllNodes.front());
1463#ifndef NDEBUG
1464 NextPersistentId = 0;
1465#endif
1466}
1467
1468SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1469 void *&InsertPos) {
1470 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1471 if (N) {
1472 switch (N->getOpcode()) {
1473 default: break;
1474 case ISD::Constant:
1475 case ISD::ConstantFP:
1476 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1477 "debug location. Use another overload.");
1478 }
1479 }
1480 return N;
1481}
1482
1483SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1484 const SDLoc &DL, void *&InsertPos) {
1485 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1486 if (N) {
1487 switch (N->getOpcode()) {
1488 case ISD::Constant:
1489 case ISD::ConstantFP:
1490 // Erase debug location from the node if the node is used at several
1491 // different places. Do not propagate one location to all uses as it
1492 // will cause a worse single stepping debugging experience.
1493 if (N->getDebugLoc() != DL.getDebugLoc())
1494 N->setDebugLoc(DebugLoc());
1495 break;
1496 default:
1497 // When the node's point of use is located earlier in the instruction
1498 // sequence than its prior point of use, update its debug info to the
1499 // earlier location.
1500 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1501 N->setDebugLoc(DL.getDebugLoc());
1502 break;
1503 }
1504 }
1505 return N;
1506}
1507
1509 allnodes_clear();
1510 OperandRecycler.clear(OperandAllocator);
1511 OperandAllocator.Reset();
1512 CSEMap.clear();
1513
1514 ExtendedValueTypeNodes.clear();
1515 ExternalSymbols.clear();
1516 TargetExternalSymbols.clear();
1517 MCSymbols.clear();
1518 SDEI.clear();
1519 llvm::fill(CondCodeNodes, nullptr);
1520 llvm::fill(ValueTypeNodes, nullptr);
1521
1522 EntryNode.UseList = nullptr;
1523 InsertNode(&EntryNode);
1524 Root = getEntryNode();
1525 DbgInfo->clear();
1526}
1527
1529 return VT.bitsGT(Op.getValueType())
1530 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1531 : getNode(ISD::FP_ROUND, DL, VT, Op,
1532 getIntPtrConstant(0, DL, /*isTarget=*/true));
1533}
1534
1535std::pair<SDValue, SDValue>
1537 const SDLoc &DL, EVT VT) {
1538 assert(!VT.bitsEq(Op.getValueType()) &&
1539 "Strict no-op FP extend/round not allowed.");
1540 SDValue Res =
1541 VT.bitsGT(Op.getValueType())
1542 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op})
1543 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other},
1544 {Chain, Op, getIntPtrConstant(0, DL, /*isTarget=*/true)});
1545
1546 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1));
1547}
1548
1550 return VT.bitsGT(Op.getValueType()) ?
1551 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1552 getNode(ISD::TRUNCATE, DL, VT, Op);
1553}
1554
1556 return VT.bitsGT(Op.getValueType()) ?
1557 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1558 getNode(ISD::TRUNCATE, DL, VT, Op);
1559}
1560
1562 return VT.bitsGT(Op.getValueType()) ?
1563 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1564 getNode(ISD::TRUNCATE, DL, VT, Op);
1565}
1566
1568 EVT VT) {
1569 assert(!VT.isVector());
1570 auto Type = Op.getValueType();
1571 SDValue DestOp;
1572 if (Type == VT)
1573 return Op;
1574 auto Size = Op.getValueSizeInBits();
1575 DestOp = getBitcast(EVT::getIntegerVT(*Context, Size), Op);
1576 if (DestOp.getValueType() == VT)
1577 return DestOp;
1578
1579 return getAnyExtOrTrunc(DestOp, DL, VT);
1580}
1581
1583 EVT VT) {
1584 assert(!VT.isVector());
1585 auto Type = Op.getValueType();
1586 SDValue DestOp;
1587 if (Type == VT)
1588 return Op;
1589 auto Size = Op.getValueSizeInBits();
1590 DestOp = getBitcast(MVT::getIntegerVT(Size), Op);
1591 if (DestOp.getValueType() == VT)
1592 return DestOp;
1593
1594 return getSExtOrTrunc(DestOp, DL, VT);
1595}
1596
1598 EVT VT) {
1599 assert(!VT.isVector());
1600 auto Type = Op.getValueType();
1601 SDValue DestOp;
1602 if (Type == VT)
1603 return Op;
1604 auto Size = Op.getValueSizeInBits();
1605 DestOp = getBitcast(MVT::getIntegerVT(Size), Op);
1606 if (DestOp.getValueType() == VT)
1607 return DestOp;
1608
1609 return getZExtOrTrunc(DestOp, DL, VT);
1610}
1611
1613 EVT OpVT) {
1614 if (VT.bitsLE(Op.getValueType()))
1615 return getNode(ISD::TRUNCATE, SL, VT, Op);
1616
1617 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1618 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1619}
1620
1622 EVT OpVT = Op.getValueType();
1623 assert(VT.isInteger() && OpVT.isInteger() &&
1624 "Cannot getZeroExtendInReg FP types");
1625 assert(VT.isVector() == OpVT.isVector() &&
1626 "getZeroExtendInReg type should be vector iff the operand "
1627 "type is vector!");
1628 assert((!VT.isVector() ||
1630 "Vector element counts must match in getZeroExtendInReg");
1631 assert(VT.getScalarType().bitsLE(OpVT.getScalarType()) && "Not extending!");
1632 if (OpVT == VT)
1633 return Op;
1634 // TODO: Use computeKnownBits instead of AssertZext.
1635 if (Op.getOpcode() == ISD::AssertZext && cast<VTSDNode>(Op.getOperand(1))
1636 ->getVT()
1637 .getScalarType()
1638 .bitsLE(VT.getScalarType()))
1639 return Op;
1641 VT.getScalarSizeInBits());
1642 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT));
1643}
1644
1646 SDValue EVL, const SDLoc &DL,
1647 EVT VT) {
1648 EVT OpVT = Op.getValueType();
1649 assert(VT.isInteger() && OpVT.isInteger() &&
1650 "Cannot getVPZeroExtendInReg FP types");
1651 assert(VT.isVector() && OpVT.isVector() &&
1652 "getVPZeroExtendInReg type and operand type should be vector!");
1654 "Vector element counts must match in getZeroExtendInReg");
1655 assert(VT.getScalarType().bitsLE(OpVT.getScalarType()) && "Not extending!");
1656 if (OpVT == VT)
1657 return Op;
1659 VT.getScalarSizeInBits());
1660 return getNode(ISD::VP_AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT), Mask,
1661 EVL);
1662}
1663
1665 // Only unsigned pointer semantics are supported right now. In the future this
1666 // might delegate to TLI to check pointer signedness.
1667 return getZExtOrTrunc(Op, DL, VT);
1668}
1669
1671 // Only unsigned pointer semantics are supported right now. In the future this
1672 // might delegate to TLI to check pointer signedness.
1673 return getZeroExtendInReg(Op, DL, VT);
1674}
1675
1677 return getNode(ISD::SUB, DL, VT, getConstant(0, DL, VT), Val);
1678}
1679
1680/// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1682 return getNode(ISD::XOR, DL, VT, Val, getAllOnesConstant(DL, VT));
1683}
1684
1686 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1687 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1688}
1689
1691 SDValue Mask, SDValue EVL, EVT VT) {
1692 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1693 return getNode(ISD::VP_XOR, DL, VT, Val, TrueValue, Mask, EVL);
1694}
1695
1697 SDValue Mask, SDValue EVL) {
1698 return getVPZExtOrTrunc(DL, VT, Op, Mask, EVL);
1699}
1700
1702 SDValue Mask, SDValue EVL) {
1703 if (VT.bitsGT(Op.getValueType()))
1704 return getNode(ISD::VP_ZERO_EXTEND, DL, VT, Op, Mask, EVL);
1705 if (VT.bitsLT(Op.getValueType()))
1706 return getNode(ISD::VP_TRUNCATE, DL, VT, Op, Mask, EVL);
1707 return Op;
1708}
1709
1711 EVT OpVT) {
1712 if (!V)
1713 return getConstant(0, DL, VT);
1714
1715 switch (TLI->getBooleanContents(OpVT)) {
1718 return getConstant(1, DL, VT);
1720 return getAllOnesConstant(DL, VT);
1721 }
1722 llvm_unreachable("Unexpected boolean content enum!");
1723}
1724
1726 bool isT, bool isO) {
1727 return getConstant(APInt(VT.getScalarSizeInBits(), Val, /*isSigned=*/false),
1728 DL, VT, isT, isO);
1729}
1730
1732 bool isT, bool isO) {
1733 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1734}
1735
1737 EVT VT, bool isT, bool isO) {
1738 assert(VT.isInteger() && "Cannot create FP integer constant!");
1739
1740 EVT EltVT = VT.getScalarType();
1741 const ConstantInt *Elt = &Val;
1742
1743 // Vector splats are explicit within the DAG, with ConstantSDNode holding the
1744 // to-be-splatted scalar ConstantInt.
1745 if (isa<VectorType>(Elt->getType()))
1746 Elt = ConstantInt::get(*getContext(), Elt->getValue());
1747
1748 // In some cases the vector type is legal but the element type is illegal and
1749 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1750 // inserted value (the type does not need to match the vector element type).
1751 // Any extra bits introduced will be truncated away.
1752 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1754 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1755 APInt NewVal;
1756 if (TLI->isSExtCheaperThanZExt(VT.getScalarType(), EltVT))
1757 NewVal = Elt->getValue().sextOrTrunc(EltVT.getSizeInBits());
1758 else
1759 NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1760 Elt = ConstantInt::get(*getContext(), NewVal);
1761 }
1762 // In other cases the element type is illegal and needs to be expanded, for
1763 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1764 // the value into n parts and use a vector type with n-times the elements.
1765 // Then bitcast to the type requested.
1766 // Legalizing constants too early makes the DAGCombiner's job harder so we
1767 // only legalize if the DAG tells us we must produce legal types.
1768 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1769 TLI->getTypeAction(*getContext(), EltVT) ==
1771 const APInt &NewVal = Elt->getValue();
1772 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1773 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1774
1775 // For scalable vectors, try to use a SPLAT_VECTOR_PARTS node.
1776 if (VT.isScalableVector() ||
1777 TLI->isOperationLegal(ISD::SPLAT_VECTOR, VT)) {
1778 assert(EltVT.getSizeInBits() % ViaEltSizeInBits == 0 &&
1779 "Can only handle an even split!");
1780 unsigned Parts = EltVT.getSizeInBits() / ViaEltSizeInBits;
1781
1782 SmallVector<SDValue, 2> ScalarParts;
1783 for (unsigned i = 0; i != Parts; ++i)
1784 ScalarParts.push_back(getConstant(
1785 NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1786 ViaEltVT, isT, isO));
1787
1788 return getNode(ISD::SPLAT_VECTOR_PARTS, DL, VT, ScalarParts);
1789 }
1790
1791 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1792 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1793
1794 // Check the temporary vector is the correct size. If this fails then
1795 // getTypeToTransformTo() probably returned a type whose size (in bits)
1796 // isn't a power-of-2 factor of the requested type size.
1797 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1798
1799 SmallVector<SDValue, 2> EltParts;
1800 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i)
1801 EltParts.push_back(getConstant(
1802 NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1803 ViaEltVT, isT, isO));
1804
1805 // EltParts is currently in little endian order. If we actually want
1806 // big-endian order then reverse it now.
1807 if (getDataLayout().isBigEndian())
1808 std::reverse(EltParts.begin(), EltParts.end());
1809
1810 // The elements must be reversed when the element order is different
1811 // to the endianness of the elements (because the BITCAST is itself a
1812 // vector shuffle in this situation). However, we do not need any code to
1813 // perform this reversal because getConstant() is producing a vector
1814 // splat.
1815 // This situation occurs in MIPS MSA.
1816
1818 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1819 llvm::append_range(Ops, EltParts);
1820
1821 SDValue V =
1822 getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1823 return V;
1824 }
1825
1826 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1827 "APInt size does not match type size!");
1828 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1829 SDVTList VTs = getVTList(EltVT);
1831 AddNodeIDNode(ID, Opc, VTs, {});
1832 ID.AddPointer(Elt);
1833 ID.AddBoolean(isO);
1834 void *IP = nullptr;
1835 SDNode *N = nullptr;
1836 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1837 if (!VT.isVector())
1838 return SDValue(N, 0);
1839
1840 if (!N) {
1841 N = newSDNode<ConstantSDNode>(isT, isO, Elt, VTs);
1842 if (!isT)
1843 N->setDebugLoc(DL.getDebugLoc());
1844 CSEMap.InsertNode(N, IP);
1845 InsertNode(N);
1846 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1847 }
1848
1849 SDValue Result(N, 0);
1850 if (VT.isVector())
1851 Result = getSplat(VT, DL, Result);
1852 return Result;
1853}
1854
1856 bool isT, bool isO) {
1857 unsigned Size = VT.getScalarSizeInBits();
1858 return getConstant(APInt(Size, Val, /*isSigned=*/true), DL, VT, isT, isO);
1859}
1860
1862 bool IsOpaque) {
1864 IsTarget, IsOpaque);
1865}
1866
1868 bool isTarget) {
1869 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1870}
1871
1873 const SDLoc &DL) {
1874 assert(VT.isInteger() && "Shift amount is not an integer type!");
1875 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout());
1876 return getConstant(Val, DL, ShiftVT);
1877}
1878
1880 const SDLoc &DL) {
1881 assert(Val.ult(VT.getScalarSizeInBits()) && "Out of range shift");
1882 return getShiftAmountConstant(Val.getZExtValue(), VT, DL);
1883}
1884
1886 bool isTarget) {
1887 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget);
1888}
1889
1891 bool isTarget) {
1892 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1893}
1894
1896 EVT VT, bool isTarget) {
1897 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1898
1899 EVT EltVT = VT.getScalarType();
1900 const ConstantFP *Elt = &V;
1901
1902 // Vector splats are explicit within the DAG, with ConstantFPSDNode holding
1903 // the to-be-splatted scalar ConstantFP.
1904 if (isa<VectorType>(Elt->getType()))
1905 Elt = ConstantFP::get(*getContext(), Elt->getValue());
1906
1907 // Do the map lookup using the actual bit pattern for the floating point
1908 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1909 // we don't have issues with SNANs.
1910 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1911 SDVTList VTs = getVTList(EltVT);
1913 AddNodeIDNode(ID, Opc, VTs, {});
1914 ID.AddPointer(Elt);
1915 void *IP = nullptr;
1916 SDNode *N = nullptr;
1917 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1918 if (!VT.isVector())
1919 return SDValue(N, 0);
1920
1921 if (!N) {
1922 N = newSDNode<ConstantFPSDNode>(isTarget, Elt, VTs);
1923 CSEMap.InsertNode(N, IP);
1924 InsertNode(N);
1925 }
1926
1927 SDValue Result(N, 0);
1928 if (VT.isVector())
1929 Result = getSplat(VT, DL, Result);
1930 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1931 return Result;
1932}
1933
1935 bool isTarget) {
1936 EVT EltVT = VT.getScalarType();
1937 if (EltVT == MVT::f32)
1938 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1939 if (EltVT == MVT::f64)
1940 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1941 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1942 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1943 bool Ignored;
1944 APFloat APF = APFloat(Val);
1946 &Ignored);
1947 return getConstantFP(APF, DL, VT, isTarget);
1948 }
1949 llvm_unreachable("Unsupported type in getConstantFP");
1950}
1951
1953 EVT VT, int64_t Offset, bool isTargetGA,
1954 unsigned TargetFlags) {
1955 assert((TargetFlags == 0 || isTargetGA) &&
1956 "Cannot set target flags on target-independent globals");
1957
1958 // Truncate (with sign-extension) the offset value to the pointer size.
1960 if (BitWidth < 64)
1962
1963 unsigned Opc;
1964 if (GV->isThreadLocal())
1966 else
1968
1969 SDVTList VTs = getVTList(VT);
1971 AddNodeIDNode(ID, Opc, VTs, {});
1972 ID.AddPointer(GV);
1973 ID.AddInteger(Offset);
1974 ID.AddInteger(TargetFlags);
1975 void *IP = nullptr;
1976 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1977 return SDValue(E, 0);
1978
1979 auto *N = newSDNode<GlobalAddressSDNode>(
1980 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VTs, Offset, TargetFlags);
1981 CSEMap.InsertNode(N, IP);
1982 InsertNode(N);
1983 return SDValue(N, 0);
1984}
1985
1987 SDVTList VTs = getVTList(MVT::Untyped);
1990 ID.AddPointer(GV);
1991 void *IP = nullptr;
1992 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP))
1993 return SDValue(E, 0);
1994
1995 auto *N = newSDNode<DeactivationSymbolSDNode>(GV, VTs);
1996 CSEMap.InsertNode(N, IP);
1997 InsertNode(N);
1998 return SDValue(N, 0);
1999}
2000
2001SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
2002 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
2003 SDVTList VTs = getVTList(VT);
2005 AddNodeIDNode(ID, Opc, VTs, {});
2006 ID.AddInteger(FI);
2007 void *IP = nullptr;
2008 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2009 return SDValue(E, 0);
2010
2011 auto *N = newSDNode<FrameIndexSDNode>(FI, VTs, isTarget);
2012 CSEMap.InsertNode(N, IP);
2013 InsertNode(N);
2014 return SDValue(N, 0);
2015}
2016
2017SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
2018 unsigned TargetFlags) {
2019 assert((TargetFlags == 0 || isTarget) &&
2020 "Cannot set target flags on target-independent jump tables");
2021 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
2022 SDVTList VTs = getVTList(VT);
2024 AddNodeIDNode(ID, Opc, VTs, {});
2025 ID.AddInteger(JTI);
2026 ID.AddInteger(TargetFlags);
2027 void *IP = nullptr;
2028 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2029 return SDValue(E, 0);
2030
2031 auto *N = newSDNode<JumpTableSDNode>(JTI, VTs, isTarget, TargetFlags);
2032 CSEMap.InsertNode(N, IP);
2033 InsertNode(N);
2034 return SDValue(N, 0);
2035}
2036
2038 const SDLoc &DL) {
2040 return getNode(ISD::JUMP_TABLE_DEBUG_INFO, DL, MVT::Other, Chain,
2041 getTargetConstant(static_cast<uint64_t>(JTI), DL, PTy, true));
2042}
2043
2045 MaybeAlign Alignment, int Offset,
2046 bool isTarget, unsigned TargetFlags) {
2047 assert((TargetFlags == 0 || isTarget) &&
2048 "Cannot set target flags on target-independent globals");
2049 if (!Alignment)
2050 Alignment = shouldOptForSize()
2051 ? getDataLayout().getABITypeAlign(C->getType())
2052 : getDataLayout().getPrefTypeAlign(C->getType());
2053 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
2054 SDVTList VTs = getVTList(VT);
2056 AddNodeIDNode(ID, Opc, VTs, {});
2057 ID.AddInteger(Alignment->value());
2058 ID.AddInteger(Offset);
2059 ID.AddPointer(C);
2060 ID.AddInteger(TargetFlags);
2061 void *IP = nullptr;
2062 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2063 return SDValue(E, 0);
2064
2065 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VTs, Offset, *Alignment,
2066 TargetFlags);
2067 CSEMap.InsertNode(N, IP);
2068 InsertNode(N);
2069 SDValue V = SDValue(N, 0);
2070 NewSDValueDbgMsg(V, "Creating new constant pool: ", this);
2071 return V;
2072}
2073
2075 MaybeAlign Alignment, int Offset,
2076 bool isTarget, unsigned TargetFlags) {
2077 assert((TargetFlags == 0 || isTarget) &&
2078 "Cannot set target flags on target-independent globals");
2079 if (!Alignment)
2080 Alignment = getDataLayout().getPrefTypeAlign(C->getType());
2081 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
2082 SDVTList VTs = getVTList(VT);
2084 AddNodeIDNode(ID, Opc, VTs, {});
2085 ID.AddInteger(Alignment->value());
2086 ID.AddInteger(Offset);
2087 C->addSelectionDAGCSEId(ID);
2088 ID.AddInteger(TargetFlags);
2089 void *IP = nullptr;
2090 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2091 return SDValue(E, 0);
2092
2093 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VTs, Offset, *Alignment,
2094 TargetFlags);
2095 CSEMap.InsertNode(N, IP);
2096 InsertNode(N);
2097 return SDValue(N, 0);
2098}
2099
2102 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), {});
2103 ID.AddPointer(MBB);
2104 void *IP = nullptr;
2105 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2106 return SDValue(E, 0);
2107
2108 auto *N = newSDNode<BasicBlockSDNode>(MBB);
2109 CSEMap.InsertNode(N, IP);
2110 InsertNode(N);
2111 return SDValue(N, 0);
2112}
2113
2115 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
2116 ValueTypeNodes.size())
2117 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
2118
2119 SDNode *&N = VT.isExtended() ?
2120 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
2121
2122 if (N) return SDValue(N, 0);
2123 N = newSDNode<VTSDNode>(VT);
2124 InsertNode(N);
2125 return SDValue(N, 0);
2126}
2127
2129 SDNode *&N = ExternalSymbols[Sym];
2130 if (N) return SDValue(N, 0);
2131 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, getVTList(VT));
2132 InsertNode(N);
2133 return SDValue(N, 0);
2134}
2135
2136SDValue SelectionDAG::getExternalSymbol(RTLIB::LibcallImpl Libcall, EVT VT) {
2138 return getExternalSymbol(SymName.data(), VT);
2139}
2140
2142 SDNode *&N = MCSymbols[Sym];
2143 if (N)
2144 return SDValue(N, 0);
2145 N = newSDNode<MCSymbolSDNode>(Sym, getVTList(VT));
2146 InsertNode(N);
2147 return SDValue(N, 0);
2148}
2149
2151 unsigned TargetFlags) {
2152 SDNode *&N =
2153 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
2154 if (N) return SDValue(N, 0);
2155 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, getVTList(VT));
2156 InsertNode(N);
2157 return SDValue(N, 0);
2158}
2159
2161 EVT VT, unsigned TargetFlags) {
2163 return getTargetExternalSymbol(SymName.data(), VT, TargetFlags);
2164}
2165
2167 if ((unsigned)Cond >= CondCodeNodes.size())
2168 CondCodeNodes.resize(Cond+1);
2169
2170 if (!CondCodeNodes[Cond]) {
2171 auto *N = newSDNode<CondCodeSDNode>(Cond);
2172 CondCodeNodes[Cond] = N;
2173 InsertNode(N);
2174 }
2175
2176 return SDValue(CondCodeNodes[Cond], 0);
2177}
2178
2180 assert(MulImm.getBitWidth() == VT.getSizeInBits() &&
2181 "APInt size does not match type size!");
2182
2183 if (MulImm == 0)
2184 return getConstant(0, DL, VT);
2185
2186 const MachineFunction &MF = getMachineFunction();
2187 const Function &F = MF.getFunction();
2188 ConstantRange CR = getVScaleRange(&F, 64);
2189 if (const APInt *C = CR.getSingleElement())
2190 return getConstant(MulImm * C->getZExtValue(), DL, VT);
2191
2192 return getNode(ISD::VSCALE, DL, VT, getConstant(MulImm, DL, VT));
2193}
2194
2195/// \returns a value of type \p VT that represents the runtime value of \p
2196/// Quantity, i.e. scaled by vscale if it's scalable, or a fixed constant
2197/// otherwise. Quantity should be a FixedOrScalableQuantity, i.e. ElementCount
2198/// or TypeSize.
2199template <typename Ty>
2201 EVT VT, Ty Quantity) {
2202 if (Quantity.isScalable())
2203 return DAG.getVScale(
2204 DL, VT, APInt(VT.getSizeInBits(), Quantity.getKnownMinValue()));
2205
2206 return DAG.getConstant(Quantity.getKnownMinValue(), DL, VT);
2207}
2208
2210 ElementCount EC) {
2211 return getFixedOrScalableQuantity(*this, DL, VT, EC);
2212}
2213
2215 return getFixedOrScalableQuantity(*this, DL, VT, TS);
2216}
2217
2219 ElementCount EC) {
2220 EVT IdxVT = TLI->getVectorIdxTy(getDataLayout());
2221 EVT MaskVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), DataVT);
2222 return getNode(ISD::GET_ACTIVE_LANE_MASK, DL, MaskVT,
2223 getConstant(0, DL, IdxVT), getElementCount(DL, IdxVT, EC));
2224}
2225
2227 APInt One(ResVT.getScalarSizeInBits(), 1);
2228 return getStepVector(DL, ResVT, One);
2229}
2230
2232 const APInt &StepVal) {
2233 assert(ResVT.getScalarSizeInBits() == StepVal.getBitWidth());
2234 if (ResVT.isScalableVector())
2235 return getNode(
2236 ISD::STEP_VECTOR, DL, ResVT,
2237 getTargetConstant(StepVal, DL, ResVT.getVectorElementType()));
2238
2239 SmallVector<SDValue, 16> OpsStepConstants;
2240 for (uint64_t i = 0; i < ResVT.getVectorNumElements(); i++)
2241 OpsStepConstants.push_back(
2242 getConstant(StepVal * i, DL, ResVT.getVectorElementType()));
2243 return getBuildVector(ResVT, DL, OpsStepConstants);
2244}
2245
2246/// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
2247/// point at N1 to point at N2 and indices that point at N2 to point at N1.
2252
2254 SDValue N2, ArrayRef<int> Mask) {
2255 assert(VT.getVectorNumElements() == Mask.size() &&
2256 "Must have the same number of vector elements as mask elements!");
2257 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
2258 "Invalid VECTOR_SHUFFLE");
2259
2260 // Canonicalize shuffle undef, undef -> undef
2261 if (N1.isUndef() && N2.isUndef())
2262 return getUNDEF(VT);
2263
2264 // Validate that all indices in Mask are within the range of the elements
2265 // input to the shuffle.
2266 int NElts = Mask.size();
2267 assert(llvm::all_of(Mask,
2268 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
2269 "Index out of range");
2270
2271 // Copy the mask so we can do any needed cleanup.
2272 SmallVector<int, 8> MaskVec(Mask);
2273
2274 // Canonicalize shuffle v, v -> v, undef
2275 if (N1 == N2) {
2276 N2 = getUNDEF(VT);
2277 for (int i = 0; i != NElts; ++i)
2278 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
2279 }
2280
2281 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
2282 if (N1.isUndef())
2283 commuteShuffle(N1, N2, MaskVec);
2284
2285 if (TLI->hasVectorBlend()) {
2286 // If shuffling a splat, try to blend the splat instead. We do this here so
2287 // that even when this arises during lowering we don't have to re-handle it.
2288 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
2289 BitVector UndefElements;
2290 SDValue Splat = BV->getSplatValue(&UndefElements);
2291 if (!Splat)
2292 return;
2293
2294 for (int i = 0; i < NElts; ++i) {
2295 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
2296 continue;
2297
2298 // If this input comes from undef, mark it as such.
2299 if (UndefElements[MaskVec[i] - Offset]) {
2300 MaskVec[i] = -1;
2301 continue;
2302 }
2303
2304 // If we can blend a non-undef lane, use that instead.
2305 if (!UndefElements[i])
2306 MaskVec[i] = i + Offset;
2307 }
2308 };
2309 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
2310 BlendSplat(N1BV, 0);
2311 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
2312 BlendSplat(N2BV, NElts);
2313 }
2314
2315 // Canonicalize all index into lhs, -> shuffle lhs, undef
2316 // Canonicalize all index into rhs, -> shuffle rhs, undef
2317 bool AllLHS = true, AllRHS = true;
2318 bool N2Undef = N2.isUndef();
2319 for (int i = 0; i != NElts; ++i) {
2320 if (MaskVec[i] >= NElts) {
2321 if (N2Undef)
2322 MaskVec[i] = -1;
2323 else
2324 AllLHS = false;
2325 } else if (MaskVec[i] >= 0) {
2326 AllRHS = false;
2327 }
2328 }
2329 if (AllLHS && AllRHS)
2330 return getUNDEF(VT);
2331 if (AllLHS && !N2Undef)
2332 N2 = getUNDEF(VT);
2333 if (AllRHS) {
2334 N1 = getUNDEF(VT);
2335 commuteShuffle(N1, N2, MaskVec);
2336 }
2337 // Reset our undef status after accounting for the mask.
2338 N2Undef = N2.isUndef();
2339 // Re-check whether both sides ended up undef.
2340 if (N1.isUndef() && N2Undef)
2341 return getUNDEF(VT);
2342
2343 // If Identity shuffle return that node.
2344 bool Identity = true, AllSame = true;
2345 for (int i = 0; i != NElts; ++i) {
2346 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
2347 if (MaskVec[i] != MaskVec[0]) AllSame = false;
2348 }
2349 if (Identity && NElts)
2350 return N1;
2351
2352 // Shuffling a constant splat doesn't change the result.
2353 if (N2Undef) {
2354 SDValue V = N1;
2355
2356 // Look through any bitcasts. We check that these don't change the number
2357 // (and size) of elements and just changes their types.
2358 while (V.getOpcode() == ISD::BITCAST)
2359 V = V->getOperand(0);
2360
2361 // A splat should always show up as a build vector node.
2362 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
2363 BitVector UndefElements;
2364 SDValue Splat = BV->getSplatValue(&UndefElements);
2365 // If this is a splat of an undef, shuffling it is also undef.
2366 if (Splat && Splat.isUndef())
2367 return getUNDEF(VT);
2368
2369 bool SameNumElts =
2370 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
2371
2372 // We only have a splat which can skip shuffles if there is a splatted
2373 // value and no undef lanes rearranged by the shuffle.
2374 if (Splat && UndefElements.none()) {
2375 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
2376 // number of elements match or the value splatted is a zero constant.
2377 if (SameNumElts || isNullConstant(Splat))
2378 return N1;
2379 }
2380
2381 // If the shuffle itself creates a splat, build the vector directly.
2382 if (AllSame && SameNumElts) {
2383 EVT BuildVT = BV->getValueType(0);
2384 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
2385 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
2386
2387 // We may have jumped through bitcasts, so the type of the
2388 // BUILD_VECTOR may not match the type of the shuffle.
2389 if (BuildVT != VT)
2390 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
2391 return NewBV;
2392 }
2393 }
2394 }
2395
2396 SDVTList VTs = getVTList(VT);
2398 SDValue Ops[2] = { N1, N2 };
2400 for (int i = 0; i != NElts; ++i)
2401 ID.AddInteger(MaskVec[i]);
2402
2403 void* IP = nullptr;
2404 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
2405 return SDValue(E, 0);
2406
2407 // Allocate the mask array for the node out of the BumpPtrAllocator, since
2408 // SDNode doesn't have access to it. This memory will be "leaked" when
2409 // the node is deallocated, but recovered when the NodeAllocator is released.
2410 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
2411 llvm::copy(MaskVec, MaskAlloc);
2412
2413 auto *N = newSDNode<ShuffleVectorSDNode>(VTs, dl.getIROrder(),
2414 dl.getDebugLoc(), MaskAlloc);
2415 createOperands(N, Ops);
2416
2417 CSEMap.InsertNode(N, IP);
2418 InsertNode(N);
2419 SDValue V = SDValue(N, 0);
2420 NewSDValueDbgMsg(V, "Creating new node: ", this);
2421 return V;
2422}
2423
2425 EVT VT = SV.getValueType(0);
2426 SmallVector<int, 8> MaskVec(SV.getMask());
2428
2429 SDValue Op0 = SV.getOperand(0);
2430 SDValue Op1 = SV.getOperand(1);
2431 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
2432}
2433
2435 SDVTList VTs = getVTList(VT);
2437 AddNodeIDNode(ID, ISD::Register, VTs, {});
2438 ID.AddInteger(Reg.id());
2439 void *IP = nullptr;
2440 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2441 return SDValue(E, 0);
2442
2443 auto *N = newSDNode<RegisterSDNode>(Reg, VTs);
2444 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, UA);
2445 CSEMap.InsertNode(N, IP);
2446 InsertNode(N);
2447 return SDValue(N, 0);
2448}
2449
2452 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), {});
2453 ID.AddPointer(RegMask);
2454 void *IP = nullptr;
2455 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2456 return SDValue(E, 0);
2457
2458 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
2459 CSEMap.InsertNode(N, IP);
2460 InsertNode(N);
2461 return SDValue(N, 0);
2462}
2463
2465 MCSymbol *Label) {
2466 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
2467}
2468
2469SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
2470 SDValue Root, MCSymbol *Label) {
2472 SDValue Ops[] = { Root };
2473 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
2474 ID.AddPointer(Label);
2475 void *IP = nullptr;
2476 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2477 return SDValue(E, 0);
2478
2479 auto *N =
2480 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label);
2481 createOperands(N, Ops);
2482
2483 CSEMap.InsertNode(N, IP);
2484 InsertNode(N);
2485 return SDValue(N, 0);
2486}
2487
2489 int64_t Offset, bool isTarget,
2490 unsigned TargetFlags) {
2491 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
2492 SDVTList VTs = getVTList(VT);
2493
2495 AddNodeIDNode(ID, Opc, VTs, {});
2496 ID.AddPointer(BA);
2497 ID.AddInteger(Offset);
2498 ID.AddInteger(TargetFlags);
2499 void *IP = nullptr;
2500 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2501 return SDValue(E, 0);
2502
2503 auto *N = newSDNode<BlockAddressSDNode>(Opc, VTs, BA, Offset, TargetFlags);
2504 CSEMap.InsertNode(N, IP);
2505 InsertNode(N);
2506 return SDValue(N, 0);
2507}
2508
2511 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), {});
2512 ID.AddPointer(V);
2513
2514 void *IP = nullptr;
2515 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2516 return SDValue(E, 0);
2517
2518 auto *N = newSDNode<SrcValueSDNode>(V);
2519 CSEMap.InsertNode(N, IP);
2520 InsertNode(N);
2521 return SDValue(N, 0);
2522}
2523
2526 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), {});
2527 ID.AddPointer(MD);
2528
2529 void *IP = nullptr;
2530 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2531 return SDValue(E, 0);
2532
2533 auto *N = newSDNode<MDNodeSDNode>(MD);
2534 CSEMap.InsertNode(N, IP);
2535 InsertNode(N);
2536 return SDValue(N, 0);
2537}
2538
2540 if (VT == V.getValueType())
2541 return V;
2542
2543 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
2544}
2545
2547 unsigned SrcAS, unsigned DestAS) {
2548 SDVTList VTs = getVTList(VT);
2549 SDValue Ops[] = {Ptr};
2552 ID.AddInteger(SrcAS);
2553 ID.AddInteger(DestAS);
2554
2555 void *IP = nullptr;
2556 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
2557 return SDValue(E, 0);
2558
2559 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
2560 VTs, SrcAS, DestAS);
2561 createOperands(N, Ops);
2562
2563 CSEMap.InsertNode(N, IP);
2564 InsertNode(N);
2565 return SDValue(N, 0);
2566}
2567
2569 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V);
2570}
2571
2573 bool PoisonOnly) {
2574 if (isGuaranteedNotToBeUndefOrPoison(V, DemandedElts, PoisonOnly))
2575 return V;
2576 return getFreeze(V);
2577}
2578
2579/// getShiftAmountOperand - Return the specified value casted to
2580/// the target's desired shift amount type.
2582 EVT OpTy = Op.getValueType();
2583 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
2584 if (OpTy == ShTy || OpTy.isVector()) return Op;
2585
2586 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
2587}
2588
2590 SDLoc dl(Node);
2592 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2593 EVT VT = Node->getValueType(0);
2594 SDValue Tmp1 = Node->getOperand(0);
2595 SDValue Tmp2 = Node->getOperand(1);
2596 const MaybeAlign MA(Node->getConstantOperandVal(3));
2597
2598 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
2599 Tmp2, MachinePointerInfo(V));
2600 SDValue VAList = VAListLoad;
2601
2602 if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
2603 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2604 getConstant(MA->value() - 1, dl, VAList.getValueType()));
2605
2606 VAList = getNode(
2607 ISD::AND, dl, VAList.getValueType(), VAList,
2608 getSignedConstant(-(int64_t)MA->value(), dl, VAList.getValueType()));
2609 }
2610
2611 // Increment the pointer, VAList, to the next vaarg
2612 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2613 getConstant(getDataLayout().getTypeAllocSize(
2614 VT.getTypeForEVT(*getContext())),
2615 dl, VAList.getValueType()));
2616 // Store the incremented VAList to the legalized pointer
2617 Tmp1 =
2618 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
2619 // Load the actual argument out of the pointer VAList
2620 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
2621}
2622
2624 SDLoc dl(Node);
2626 // This defaults to loading a pointer from the input and storing it to the
2627 // output, returning the chain.
2628 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2629 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2630 SDValue Tmp1 =
2631 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
2632 Node->getOperand(2), MachinePointerInfo(VS));
2633 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
2634 MachinePointerInfo(VD));
2635}
2636
2638 const DataLayout &DL = getDataLayout();
2639 Type *Ty = VT.getTypeForEVT(*getContext());
2640 Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2641
2642 if (TLI->isTypeLegal(VT) || !VT.isVector())
2643 return RedAlign;
2644
2645 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2646 const Align StackAlign = TFI->getStackAlign();
2647
2648 // See if we can choose a smaller ABI alignment in cases where it's an
2649 // illegal vector type that will get broken down.
2650 if (RedAlign > StackAlign) {
2651 EVT IntermediateVT;
2652 MVT RegisterVT;
2653 unsigned NumIntermediates;
2654 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT,
2655 NumIntermediates, RegisterVT);
2656 Ty = IntermediateVT.getTypeForEVT(*getContext());
2657 Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2658 if (RedAlign2 < RedAlign)
2659 RedAlign = RedAlign2;
2660
2661 if (!getMachineFunction().getFrameInfo().isStackRealignable())
2662 // If the stack is not realignable, the alignment should be limited to the
2663 // StackAlignment
2664 RedAlign = std::min(RedAlign, StackAlign);
2665 }
2666
2667 return RedAlign;
2668}
2669
2671 MachineFrameInfo &MFI = MF->getFrameInfo();
2672 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2673 int StackID = 0;
2674 if (Bytes.isScalable())
2675 StackID = TFI->getStackIDForScalableVectors();
2676 // The stack id gives an indication of whether the object is scalable or
2677 // not, so it's safe to pass in the minimum size here.
2678 int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinValue(), Alignment,
2679 false, nullptr, StackID);
2680 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
2681}
2682
2684 Type *Ty = VT.getTypeForEVT(*getContext());
2685 Align StackAlign =
2686 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign));
2687 return CreateStackTemporary(VT.getStoreSize(), StackAlign);
2688}
2689
2691 TypeSize VT1Size = VT1.getStoreSize();
2692 TypeSize VT2Size = VT2.getStoreSize();
2693 assert(VT1Size.isScalable() == VT2Size.isScalable() &&
2694 "Don't know how to choose the maximum size when creating a stack "
2695 "temporary");
2696 TypeSize Bytes = VT1Size.getKnownMinValue() > VT2Size.getKnownMinValue()
2697 ? VT1Size
2698 : VT2Size;
2699
2700 Type *Ty1 = VT1.getTypeForEVT(*getContext());
2701 Type *Ty2 = VT2.getTypeForEVT(*getContext());
2702 const DataLayout &DL = getDataLayout();
2703 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2));
2704 return CreateStackTemporary(Bytes, Align);
2705}
2706
2708 ISD::CondCode Cond, const SDLoc &dl,
2709 SDNodeFlags Flags) {
2710 EVT OpVT = N1.getValueType();
2711
2712 auto GetUndefBooleanConstant = [&]() {
2713 if (VT.getScalarType() == MVT::i1 ||
2714 TLI->getBooleanContents(OpVT) ==
2716 return getUNDEF(VT);
2717 // ZeroOrOne / ZeroOrNegative require specific values for the high bits,
2718 // so we cannot use getUNDEF(). Return zero instead.
2719 return getConstant(0, dl, VT);
2720 };
2721
2722 // These setcc operations always fold.
2723 switch (Cond) {
2724 default: break;
2725 case ISD::SETFALSE:
2726 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
2727 case ISD::SETTRUE:
2728 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
2729
2730 case ISD::SETOEQ:
2731 case ISD::SETOGT:
2732 case ISD::SETOGE:
2733 case ISD::SETOLT:
2734 case ISD::SETOLE:
2735 case ISD::SETONE:
2736 case ISD::SETO:
2737 case ISD::SETUO:
2738 case ISD::SETUEQ:
2739 case ISD::SETUNE:
2740 assert(!OpVT.isInteger() && "Illegal setcc for integer!");
2741 break;
2742 }
2743
2744 if (OpVT.isInteger()) {
2745 // For EQ and NE, we can always pick a value for the undef to make the
2746 // predicate pass or fail, so we can return undef.
2747 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2748 // icmp eq/ne X, undef -> undef.
2749 if ((N1.isUndef() || N2.isUndef()) &&
2750 (Cond == ISD::SETEQ || Cond == ISD::SETNE))
2751 return GetUndefBooleanConstant();
2752
2753 // If both operands are undef, we can return undef for int comparison.
2754 // icmp undef, undef -> undef.
2755 if (N1.isUndef() && N2.isUndef())
2756 return GetUndefBooleanConstant();
2757
2758 // icmp X, X -> true/false
2759 // icmp X, undef -> true/false because undef could be X.
2760 if (N1.isUndef() || N2.isUndef() || N1 == N2)
2761 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
2762 }
2763
2765 const APInt &C2 = N2C->getAPIntValue();
2767 const APInt &C1 = N1C->getAPIntValue();
2768
2770 dl, VT, OpVT);
2771 }
2772 }
2773
2774 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2775 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2776
2777 if (N1CFP && N2CFP) {
2778 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF());
2779 switch (Cond) {
2780 default: break;
2781 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
2782 return GetUndefBooleanConstant();
2783 [[fallthrough]];
2784 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2785 OpVT);
2786 case ISD::SETNE: if (R==APFloat::cmpUnordered)
2787 return GetUndefBooleanConstant();
2788 [[fallthrough]];
2790 R==APFloat::cmpLessThan, dl, VT,
2791 OpVT);
2792 case ISD::SETLT: if (R==APFloat::cmpUnordered)
2793 return GetUndefBooleanConstant();
2794 [[fallthrough]];
2795 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2796 OpVT);
2797 case ISD::SETGT: if (R==APFloat::cmpUnordered)
2798 return GetUndefBooleanConstant();
2799 [[fallthrough]];
2801 VT, OpVT);
2802 case ISD::SETLE: if (R==APFloat::cmpUnordered)
2803 return GetUndefBooleanConstant();
2804 [[fallthrough]];
2806 R==APFloat::cmpEqual, dl, VT,
2807 OpVT);
2808 case ISD::SETGE: if (R==APFloat::cmpUnordered)
2809 return GetUndefBooleanConstant();
2810 [[fallthrough]];
2812 R==APFloat::cmpEqual, dl, VT, OpVT);
2813 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2814 OpVT);
2815 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2816 OpVT);
2818 R==APFloat::cmpEqual, dl, VT,
2819 OpVT);
2820 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2821 OpVT);
2823 R==APFloat::cmpLessThan, dl, VT,
2824 OpVT);
2826 R==APFloat::cmpUnordered, dl, VT,
2827 OpVT);
2829 VT, OpVT);
2830 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2831 OpVT);
2832 }
2833 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
2834 // Ensure that the constant occurs on the RHS.
2836 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
2837 return SDValue();
2838 return getSetCC(dl, VT, N2, N1, SwappedCond, /*Chain=*/{},
2839 /*IsSignaling=*/false, Flags);
2840 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2841 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
2842 // If an operand is known to be a nan (or undef that could be a nan), we can
2843 // fold it.
2844 // Choosing NaN for the undef will always make unordered comparison succeed
2845 // and ordered comparison fails.
2846 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2847 switch (ISD::getUnorderedFlavor(Cond)) {
2848 default:
2849 llvm_unreachable("Unknown flavor!");
2850 case 0: // Known false.
2851 return getBoolConstant(false, dl, VT, OpVT);
2852 case 1: // Known true.
2853 return getBoolConstant(true, dl, VT, OpVT);
2854 case 2: // Undefined.
2855 return GetUndefBooleanConstant();
2856 }
2857 }
2858
2859 // Could not fold it.
2860 return SDValue();
2861}
2862
2863/// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2864/// use this predicate to simplify operations downstream.
2866 unsigned BitWidth = Op.getScalarValueSizeInBits();
2868}
2869
2870// TODO: Should have argument to specify if sign bit of nan is ignorable.
2872 if (Depth >= MaxRecursionDepth)
2873 return false; // Limit search depth.
2874
2875 unsigned Opc = Op.getOpcode();
2876 switch (Opc) {
2877 case ISD::FABS:
2878 return true;
2879 case ISD::AssertNoFPClass: {
2880 FPClassTest NoFPClass =
2881 static_cast<FPClassTest>(Op.getConstantOperandVal(1));
2882
2883 const FPClassTest TestMask = fcNan | fcNegative;
2884 return (NoFPClass & TestMask) == TestMask;
2885 }
2886 case ISD::ARITH_FENCE:
2887 return SignBitIsZeroFP(Op.getOperand(0), Depth + 1);
2888 case ISD::FEXP:
2889 case ISD::FEXP2:
2890 case ISD::FEXP10:
2891 return Op->getFlags().hasNoNaNs();
2892 case ISD::FMINNUM:
2893 case ISD::FMINNUM_IEEE:
2894 case ISD::FMINIMUM:
2895 case ISD::FMINIMUMNUM:
2896 return SignBitIsZeroFP(Op.getOperand(1), Depth + 1) &&
2897 SignBitIsZeroFP(Op.getOperand(0), Depth + 1);
2898 case ISD::FMAXNUM:
2899 case ISD::FMAXNUM_IEEE:
2900 case ISD::FMAXIMUM:
2901 case ISD::FMAXIMUMNUM:
2902 // TODO: If we can ignore the sign bit of nans, only one side being known 0
2903 // is sufficient.
2904 return SignBitIsZeroFP(Op.getOperand(1), Depth + 1) &&
2905 SignBitIsZeroFP(Op.getOperand(0), Depth + 1);
2906 default:
2907 return false;
2908 }
2909
2910 llvm_unreachable("covered opcode switch");
2911}
2912
2913/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2914/// this predicate to simplify operations downstream. Mask is known to be zero
2915/// for bits that V cannot have.
2917 unsigned Depth) const {
2918 return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero);
2919}
2920
2921/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2922/// DemandedElts. We use this predicate to simplify operations downstream.
2923/// Mask is known to be zero for bits that V cannot have.
2925 const APInt &DemandedElts,
2926 unsigned Depth) const {
2927 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
2928}
2929
2930/// MaskedVectorIsZero - Return true if 'Op' is known to be zero in
2931/// DemandedElts. We use this predicate to simplify operations downstream.
2933 unsigned Depth /* = 0 */) const {
2934 return computeKnownBits(V, DemandedElts, Depth).isZero();
2935}
2936
2937/// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
2939 unsigned Depth) const {
2940 return Mask.isSubsetOf(computeKnownBits(V, Depth).One);
2941}
2942
2944 const APInt &DemandedElts,
2945 unsigned Depth) const {
2946 EVT VT = Op.getValueType();
2947 assert(VT.isVector() && !VT.isScalableVector() && "Only for fixed vectors!");
2948
2949 unsigned NumElts = VT.getVectorNumElements();
2950 assert(DemandedElts.getBitWidth() == NumElts && "Unexpected demanded mask.");
2951
2952 APInt KnownZeroElements = APInt::getZero(NumElts);
2953 for (unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
2954 if (!DemandedElts[EltIdx])
2955 continue; // Don't query elements that are not demanded.
2956 APInt Mask = APInt::getOneBitSet(NumElts, EltIdx);
2957 if (MaskedVectorIsZero(Op, Mask, Depth))
2958 KnownZeroElements.setBit(EltIdx);
2959 }
2960 return KnownZeroElements;
2961}
2962
2963/// isSplatValue - Return true if the vector V has the same value
2964/// across all DemandedElts. For scalable vectors, we don't know the
2965/// number of lanes at compile time. Instead, we use a 1 bit APInt
2966/// to represent a conservative value for all lanes; that is, that
2967/// one bit value is implicitly splatted across all lanes.
2968bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2969 APInt &UndefElts, unsigned Depth) const {
2970 unsigned Opcode = V.getOpcode();
2971 EVT VT = V.getValueType();
2972 assert(VT.isVector() && "Vector type expected");
2973 assert((!VT.isScalableVector() || DemandedElts.getBitWidth() == 1) &&
2974 "scalable demanded bits are ignored");
2975
2976 if (!DemandedElts)
2977 return false; // No demanded elts, better to assume we don't know anything.
2978
2979 if (Depth >= MaxRecursionDepth)
2980 return false; // Limit search depth.
2981
2982 // Deal with some common cases here that work for both fixed and scalable
2983 // vector types.
2984 switch (Opcode) {
2985 case ISD::SPLAT_VECTOR:
2986 UndefElts = V.getOperand(0).isUndef()
2987 ? APInt::getAllOnes(DemandedElts.getBitWidth())
2988 : APInt(DemandedElts.getBitWidth(), 0);
2989 return true;
2990 case ISD::ADD:
2991 case ISD::SUB:
2992 case ISD::AND:
2993 case ISD::XOR:
2994 case ISD::OR: {
2995 APInt UndefLHS, UndefRHS;
2996 SDValue LHS = V.getOperand(0);
2997 SDValue RHS = V.getOperand(1);
2998 // Only recognize splats with the same demanded undef elements for both
2999 // operands, otherwise we might fail to handle binop-specific undef
3000 // handling.
3001 // e.g. (and undef, 0) -> 0 etc.
3002 if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) &&
3003 isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1) &&
3004 (DemandedElts & UndefLHS) == (DemandedElts & UndefRHS)) {
3005 UndefElts = UndefLHS | UndefRHS;
3006 return true;
3007 }
3008 return false;
3009 }
3010 case ISD::ABS:
3011 case ISD::TRUNCATE:
3012 case ISD::SIGN_EXTEND:
3013 case ISD::ZERO_EXTEND:
3014 return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1);
3015 default:
3016 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
3017 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
3018 return TLI->isSplatValueForTargetNode(V, DemandedElts, UndefElts, *this,
3019 Depth);
3020 break;
3021 }
3022
3023 // We don't support other cases than those above for scalable vectors at
3024 // the moment.
3025 if (VT.isScalableVector())
3026 return false;
3027
3028 unsigned NumElts = VT.getVectorNumElements();
3029 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch");
3030 UndefElts = APInt::getZero(NumElts);
3031
3032 switch (Opcode) {
3033 case ISD::BUILD_VECTOR: {
3034 SDValue Scl;
3035 for (unsigned i = 0; i != NumElts; ++i) {
3036 SDValue Op = V.getOperand(i);
3037 if (Op.isUndef()) {
3038 UndefElts.setBit(i);
3039 continue;
3040 }
3041 if (!DemandedElts[i])
3042 continue;
3043 if (Scl && Scl != Op)
3044 return false;
3045 Scl = Op;
3046 }
3047 return true;
3048 }
3049 case ISD::VECTOR_SHUFFLE: {
3050 // Check if this is a shuffle node doing a splat or a shuffle of a splat.
3051 APInt DemandedLHS = APInt::getZero(NumElts);
3052 APInt DemandedRHS = APInt::getZero(NumElts);
3053 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
3054 for (int i = 0; i != (int)NumElts; ++i) {
3055 int M = Mask[i];
3056 if (M < 0) {
3057 UndefElts.setBit(i);
3058 continue;
3059 }
3060 if (!DemandedElts[i])
3061 continue;
3062 if (M < (int)NumElts)
3063 DemandedLHS.setBit(M);
3064 else
3065 DemandedRHS.setBit(M - NumElts);
3066 }
3067
3068 // If we aren't demanding either op, assume there's no splat.
3069 // If we are demanding both ops, assume there's no splat.
3070 if ((DemandedLHS.isZero() && DemandedRHS.isZero()) ||
3071 (!DemandedLHS.isZero() && !DemandedRHS.isZero()))
3072 return false;
3073
3074 // See if the demanded elts of the source op is a splat or we only demand
3075 // one element, which should always be a splat.
3076 // TODO: Handle source ops splats with undefs.
3077 auto CheckSplatSrc = [&](SDValue Src, const APInt &SrcElts) {
3078 APInt SrcUndefs;
3079 return (SrcElts.popcount() == 1) ||
3080 (isSplatValue(Src, SrcElts, SrcUndefs, Depth + 1) &&
3081 (SrcElts & SrcUndefs).isZero());
3082 };
3083 if (!DemandedLHS.isZero())
3084 return CheckSplatSrc(V.getOperand(0), DemandedLHS);
3085 return CheckSplatSrc(V.getOperand(1), DemandedRHS);
3086 }
3088 // Offset the demanded elts by the subvector index.
3089 SDValue Src = V.getOperand(0);
3090 // We don't support scalable vectors at the moment.
3091 if (Src.getValueType().isScalableVector())
3092 return false;
3093 uint64_t Idx = V.getConstantOperandVal(1);
3094 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3095 APInt UndefSrcElts;
3096 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
3097 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
3098 UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
3099 return true;
3100 }
3101 break;
3102 }
3106 // Widen the demanded elts by the src element count.
3107 SDValue Src = V.getOperand(0);
3108 // We don't support scalable vectors at the moment.
3109 if (Src.getValueType().isScalableVector())
3110 return false;
3111 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3112 APInt UndefSrcElts;
3113 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts);
3114 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
3115 UndefElts = UndefSrcElts.trunc(NumElts);
3116 return true;
3117 }
3118 break;
3119 }
3120 case ISD::BITCAST: {
3121 SDValue Src = V.getOperand(0);
3122 EVT SrcVT = Src.getValueType();
3123 unsigned SrcBitWidth = SrcVT.getScalarSizeInBits();
3124 unsigned BitWidth = VT.getScalarSizeInBits();
3125
3126 // Ignore bitcasts from unsupported types.
3127 // TODO: Add fp support?
3128 if (!SrcVT.isVector() || !SrcVT.isInteger() || !VT.isInteger())
3129 break;
3130
3131 // Bitcast 'small element' vector to 'large element' vector.
3132 if ((BitWidth % SrcBitWidth) == 0) {
3133 // See if each sub element is a splat.
3134 unsigned Scale = BitWidth / SrcBitWidth;
3135 unsigned NumSrcElts = SrcVT.getVectorNumElements();
3136 APInt ScaledDemandedElts =
3137 APIntOps::ScaleBitMask(DemandedElts, NumSrcElts);
3138 for (unsigned I = 0; I != Scale; ++I) {
3139 APInt SubUndefElts;
3140 APInt SubDemandedElt = APInt::getOneBitSet(Scale, I);
3141 APInt SubDemandedElts = APInt::getSplat(NumSrcElts, SubDemandedElt);
3142 SubDemandedElts &= ScaledDemandedElts;
3143 if (!isSplatValue(Src, SubDemandedElts, SubUndefElts, Depth + 1))
3144 return false;
3145 // TODO: Add support for merging sub undef elements.
3146 if (!SubUndefElts.isZero())
3147 return false;
3148 }
3149 return true;
3150 }
3151 break;
3152 }
3153 }
3154
3155 return false;
3156}
3157
3158/// Helper wrapper to main isSplatValue function.
3159bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) const {
3160 EVT VT = V.getValueType();
3161 assert(VT.isVector() && "Vector type expected");
3162
3163 APInt UndefElts;
3164 // Since the number of lanes in a scalable vector is unknown at compile time,
3165 // we track one bit which is implicitly broadcast to all lanes. This means
3166 // that all lanes in a scalable vector are considered demanded.
3167 APInt DemandedElts
3169 return isSplatValue(V, DemandedElts, UndefElts) &&
3170 (AllowUndefs || !UndefElts);
3171}
3172
3175
3176 EVT VT = V.getValueType();
3177 unsigned Opcode = V.getOpcode();
3178 switch (Opcode) {
3179 default: {
3180 APInt UndefElts;
3181 // Since the number of lanes in a scalable vector is unknown at compile time,
3182 // we track one bit which is implicitly broadcast to all lanes. This means
3183 // that all lanes in a scalable vector are considered demanded.
3184 APInt DemandedElts
3186
3187 if (isSplatValue(V, DemandedElts, UndefElts)) {
3188 if (VT.isScalableVector()) {
3189 // DemandedElts and UndefElts are ignored for scalable vectors, since
3190 // the only supported cases are SPLAT_VECTOR nodes.
3191 SplatIdx = 0;
3192 } else {
3193 // Handle case where all demanded elements are UNDEF.
3194 if (DemandedElts.isSubsetOf(UndefElts)) {
3195 SplatIdx = 0;
3196 return getUNDEF(VT);
3197 }
3198 SplatIdx = (UndefElts & DemandedElts).countr_one();
3199 }
3200 return V;
3201 }
3202 break;
3203 }
3204 case ISD::SPLAT_VECTOR:
3205 SplatIdx = 0;
3206 return V;
3207 case ISD::VECTOR_SHUFFLE: {
3208 assert(!VT.isScalableVector());
3209 // Check if this is a shuffle node doing a splat.
3210 // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
3211 // getTargetVShiftNode currently struggles without the splat source.
3212 auto *SVN = cast<ShuffleVectorSDNode>(V);
3213 if (!SVN->isSplat())
3214 break;
3215 int Idx = SVN->getSplatIndex();
3216 int NumElts = V.getValueType().getVectorNumElements();
3217 SplatIdx = Idx % NumElts;
3218 return V.getOperand(Idx / NumElts);
3219 }
3220 }
3221
3222 return SDValue();
3223}
3224
3226 int SplatIdx;
3227 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) {
3228 EVT SVT = SrcVector.getValueType().getScalarType();
3229 EVT LegalSVT = SVT;
3230 if (LegalTypes && !TLI->isTypeLegal(SVT)) {
3231 if (!SVT.isInteger())
3232 return SDValue();
3233 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
3234 if (LegalSVT.bitsLT(SVT))
3235 return SDValue();
3236 }
3237 return getExtractVectorElt(SDLoc(V), LegalSVT, SrcVector, SplatIdx);
3238 }
3239 return SDValue();
3240}
3241
3242std::optional<ConstantRange>
3244 unsigned Depth) const {
3245 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
3246 V.getOpcode() == ISD::SRA) &&
3247 "Unknown shift node");
3248 // Shifting more than the bitwidth is not valid.
3249 unsigned BitWidth = V.getScalarValueSizeInBits();
3250
3251 if (auto *Cst = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
3252 const APInt &ShAmt = Cst->getAPIntValue();
3253 if (ShAmt.uge(BitWidth))
3254 return std::nullopt;
3255 return ConstantRange(ShAmt);
3256 }
3257
3258 if (auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1))) {
3259 const APInt *MinAmt = nullptr, *MaxAmt = nullptr;
3260 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3261 if (!DemandedElts[i])
3262 continue;
3263 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
3264 if (!SA) {
3265 MinAmt = MaxAmt = nullptr;
3266 break;
3267 }
3268 const APInt &ShAmt = SA->getAPIntValue();
3269 if (ShAmt.uge(BitWidth))
3270 return std::nullopt;
3271 if (!MinAmt || MinAmt->ugt(ShAmt))
3272 MinAmt = &ShAmt;
3273 if (!MaxAmt || MaxAmt->ult(ShAmt))
3274 MaxAmt = &ShAmt;
3275 }
3276 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&
3277 "Failed to find matching min/max shift amounts");
3278 if (MinAmt && MaxAmt)
3279 return ConstantRange(*MinAmt, *MaxAmt + 1);
3280 }
3281
3282 // Use computeKnownBits to find a hidden constant/knownbits (usually type
3283 // legalized). e.g. Hidden behind multiple bitcasts/build_vector/casts etc.
3284 KnownBits KnownAmt = computeKnownBits(V.getOperand(1), DemandedElts, Depth);
3285 if (KnownAmt.getMaxValue().ult(BitWidth))
3286 return ConstantRange::fromKnownBits(KnownAmt, /*IsSigned=*/false);
3287
3288 return std::nullopt;
3289}
3290
3291std::optional<unsigned>
3293 unsigned Depth) const {
3294 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
3295 V.getOpcode() == ISD::SRA) &&
3296 "Unknown shift node");
3297 if (std::optional<ConstantRange> AmtRange =
3298 getValidShiftAmountRange(V, DemandedElts, Depth))
3299 if (const APInt *ShAmt = AmtRange->getSingleElement())
3300 return ShAmt->getZExtValue();
3301 return std::nullopt;
3302}
3303
3304std::optional<unsigned>
3306 APInt DemandedElts = getDemandAllEltsMask(V);
3307 return getValidShiftAmount(V, DemandedElts, Depth);
3308}
3309
3310std::optional<unsigned>
3312 unsigned Depth) const {
3313 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
3314 V.getOpcode() == ISD::SRA) &&
3315 "Unknown shift node");
3316 if (std::optional<ConstantRange> AmtRange =
3317 getValidShiftAmountRange(V, DemandedElts, Depth))
3318 return AmtRange->getUnsignedMin().getZExtValue();
3319 return std::nullopt;
3320}
3321
3322std::optional<unsigned>
3324 APInt DemandedElts = getDemandAllEltsMask(V);
3325 return getValidMinimumShiftAmount(V, DemandedElts, Depth);
3326}
3327
3328std::optional<unsigned>
3330 unsigned Depth) const {
3331 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
3332 V.getOpcode() == ISD::SRA) &&
3333 "Unknown shift node");
3334 if (std::optional<ConstantRange> AmtRange =
3335 getValidShiftAmountRange(V, DemandedElts, Depth))
3336 return AmtRange->getUnsignedMax().getZExtValue();
3337 return std::nullopt;
3338}
3339
3340std::optional<unsigned>
3342 APInt DemandedElts = getDemandAllEltsMask(V);
3343 return getValidMaximumShiftAmount(V, DemandedElts, Depth);
3344}
3345
3346/// Determine which bits of Op are known to be either zero or one and return
3347/// them in Known. For vectors, the known bits are those that are shared by
3348/// every vector element.
3350 APInt DemandedElts = getDemandAllEltsMask(Op);
3351 return computeKnownBits(Op, DemandedElts, Depth);
3352}
3353
3354/// Determine which bits of Op are known to be either zero or one and return
3355/// them in Known. The DemandedElts argument allows us to only collect the known
3356/// bits that are shared by the requested vector elements.
3358 unsigned Depth) const {
3359 unsigned BitWidth = Op.getScalarValueSizeInBits();
3360
3361 KnownBits Known(BitWidth); // Don't know anything.
3362
3363 if (auto OptAPInt = Op->bitcastToAPInt()) {
3364 // We know all of the bits for a constant!
3365 return KnownBits::makeConstant(*std::move(OptAPInt));
3366 }
3367
3368 if (Depth >= MaxRecursionDepth)
3369 return Known; // Limit search depth.
3370
3371 KnownBits Known2;
3372 unsigned NumElts = DemandedElts.getBitWidth();
3373 assert((!Op.getValueType().isScalableVector() || NumElts == 1) &&
3374 "DemandedElts for scalable vectors must be 1 to represent all lanes");
3375 assert((!Op.getValueType().isFixedLengthVector() ||
3376 NumElts == Op.getValueType().getVectorNumElements()) &&
3377 "Unexpected vector size");
3378
3379 if (!DemandedElts)
3380 return Known; // No demanded elts, better to assume we don't know anything.
3381
3382 unsigned Opcode = Op.getOpcode();
3383 switch (Opcode) {
3384 case ISD::MERGE_VALUES:
3385 return computeKnownBits(Op.getOperand(Op.getResNo()), DemandedElts,
3386 Depth + 1);
3387 case ISD::SPLAT_VECTOR: {
3388 SDValue SrcOp = Op.getOperand(0);
3389 assert(SrcOp.getValueSizeInBits() >= BitWidth &&
3390 "Expected SPLAT_VECTOR implicit truncation");
3391 // Implicitly truncate the bits to match the official semantics of
3392 // SPLAT_VECTOR.
3393 Known = computeKnownBits(SrcOp, Depth + 1).trunc(BitWidth);
3394 break;
3395 }
3397 unsigned ScalarSize = Op.getOperand(0).getScalarValueSizeInBits();
3398 assert(ScalarSize * Op.getNumOperands() == BitWidth &&
3399 "Expected SPLAT_VECTOR_PARTS scalars to cover element width");
3400 for (auto [I, SrcOp] : enumerate(Op->ops())) {
3401 Known.insertBits(computeKnownBits(SrcOp, Depth + 1), ScalarSize * I);
3402 }
3403 break;
3404 }
3405 case ISD::STEP_VECTOR: {
3406 const APInt &Step = Op.getConstantOperandAPInt(0);
3407
3408 if (Step.isPowerOf2())
3409 Known.Zero.setLowBits(Step.logBase2());
3410
3412
3413 if (!isUIntN(BitWidth, Op.getValueType().getVectorMinNumElements()))
3414 break;
3415 const APInt MinNumElts =
3416 APInt(BitWidth, Op.getValueType().getVectorMinNumElements());
3417
3418 bool Overflow;
3419 const APInt MaxNumElts = getVScaleRange(&F, BitWidth)
3421 .umul_ov(MinNumElts, Overflow);
3422 if (Overflow)
3423 break;
3424
3425 const APInt MaxValue = (MaxNumElts - 1).umul_ov(Step, Overflow);
3426 if (Overflow)
3427 break;
3428
3429 Known.Zero.setHighBits(MaxValue.countl_zero());
3430 break;
3431 }
3432 case ISD::BUILD_VECTOR:
3433 assert(!Op.getValueType().isScalableVector());
3434 // Collect the known bits that are shared by every demanded vector element.
3435 Known.setAllConflict();
3436 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
3437 if (!DemandedElts[i])
3438 continue;
3439
3440 SDValue SrcOp = Op.getOperand(i);
3441 Known2 = computeKnownBits(SrcOp, Depth + 1);
3442
3443 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3444 if (SrcOp.getValueSizeInBits() != BitWidth) {
3445 assert(SrcOp.getValueSizeInBits() > BitWidth &&
3446 "Expected BUILD_VECTOR implicit truncation");
3447 Known2 = Known2.trunc(BitWidth);
3448 }
3449
3450 // Known bits are the values that are shared by every demanded element.
3451 Known = Known.intersectWith(Known2);
3452
3453 // If we don't know any bits, early out.
3454 if (Known.isUnknown())
3455 break;
3456 }
3457 break;
3458 case ISD::VECTOR_COMPRESS: {
3459 SDValue Vec = Op.getOperand(0);
3460 SDValue PassThru = Op.getOperand(2);
3461 Known = computeKnownBits(PassThru, DemandedElts, Depth + 1);
3462 // If we don't know any bits, early out.
3463 if (Known.isUnknown())
3464 break;
3465 Known2 = computeKnownBits(Vec, Depth + 1);
3466 Known = Known.intersectWith(Known2);
3467 break;
3468 }
3469 case ISD::VECTOR_SHUFFLE: {
3470 assert(!Op.getValueType().isScalableVector());
3471 // Collect the known bits that are shared by every vector element referenced
3472 // by the shuffle.
3473 APInt DemandedLHS, DemandedRHS;
3475 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3476 if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts,
3477 DemandedLHS, DemandedRHS))
3478 break;
3479
3480 // Known bits are the values that are shared by every demanded element.
3481 Known.setAllConflict();
3482 if (!!DemandedLHS) {
3483 SDValue LHS = Op.getOperand(0);
3484 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
3485 Known = Known.intersectWith(Known2);
3486 }
3487 // If we don't know any bits, early out.
3488 if (Known.isUnknown())
3489 break;
3490 if (!!DemandedRHS) {
3491 SDValue RHS = Op.getOperand(1);
3492 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
3493 Known = Known.intersectWith(Known2);
3494 }
3495 break;
3496 }
3497 case ISD::VSCALE: {
3499 const APInt &Multiplier = Op.getConstantOperandAPInt(0);
3500 Known = getVScaleRange(&F, BitWidth).multiply(Multiplier).toKnownBits();
3501 break;
3502 }
3503 case ISD::CONCAT_VECTORS: {
3504 if (Op.getValueType().isScalableVector())
3505 break;
3506 // Split DemandedElts and test each of the demanded subvectors.
3507 Known.setAllConflict();
3508 EVT SubVectorVT = Op.getOperand(0).getValueType();
3509 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
3510 unsigned NumSubVectors = Op.getNumOperands();
3511 for (unsigned i = 0; i != NumSubVectors; ++i) {
3512 APInt DemandedSub =
3513 DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
3514 if (!!DemandedSub) {
3515 SDValue Sub = Op.getOperand(i);
3516 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
3517 Known = Known.intersectWith(Known2);
3518 }
3519 // If we don't know any bits, early out.
3520 if (Known.isUnknown())
3521 break;
3522 }
3523 break;
3524 }
3525 case ISD::INSERT_SUBVECTOR: {
3526 if (Op.getValueType().isScalableVector())
3527 break;
3528 // Demand any elements from the subvector and the remainder from the src its
3529 // inserted into.
3530 SDValue Src = Op.getOperand(0);
3531 SDValue Sub = Op.getOperand(1);
3532 uint64_t Idx = Op.getConstantOperandVal(2);
3533 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
3534 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
3535 APInt DemandedSrcElts = DemandedElts;
3536 DemandedSrcElts.clearBits(Idx, Idx + NumSubElts);
3537
3538 Known.setAllConflict();
3539 if (!!DemandedSubElts) {
3540 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
3541 if (Known.isUnknown())
3542 break; // early-out.
3543 }
3544 if (!!DemandedSrcElts) {
3545 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
3546 Known = Known.intersectWith(Known2);
3547 }
3548 break;
3549 }
3551 // Offset the demanded elts by the subvector index.
3552 SDValue Src = Op.getOperand(0);
3553
3554 APInt DemandedSrcElts;
3555 if (Src.getValueType().isScalableVector())
3556 DemandedSrcElts = APInt(1, 1); // <=> 'demand all elements'
3557 else {
3558 uint64_t Idx = Op.getConstantOperandVal(1);
3559 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3560 DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
3561 }
3562 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
3563 break;
3564 }
3565 case ISD::SCALAR_TO_VECTOR: {
3566 if (Op.getValueType().isScalableVector())
3567 break;
3568 // We know about scalar_to_vector as much as we know about it source,
3569 // which becomes the first element of otherwise unknown vector.
3570 if (DemandedElts != 1)
3571 break;
3572
3573 SDValue N0 = Op.getOperand(0);
3574 Known = computeKnownBits(N0, Depth + 1);
3575 if (N0.getValueSizeInBits() != BitWidth)
3576 Known = Known.trunc(BitWidth);
3577
3578 break;
3579 }
3580 case ISD::BITCAST: {
3581 if (Op.getValueType().isScalableVector())
3582 break;
3583
3584 SDValue N0 = Op.getOperand(0);
3585 EVT SubVT = N0.getValueType();
3586 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
3587
3588 // Ignore bitcasts from unsupported types.
3589 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
3590 break;
3591
3592 // Fast handling of 'identity' bitcasts.
3593 if (BitWidth == SubBitWidth) {
3594 Known = computeKnownBits(N0, DemandedElts, Depth + 1);
3595 break;
3596 }
3597
3598 bool IsLE = getDataLayout().isLittleEndian();
3599
3600 // Bitcast 'small element' vector to 'large element' scalar/vector.
3601 if ((BitWidth % SubBitWidth) == 0) {
3602 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
3603
3604 // Collect known bits for the (larger) output by collecting the known
3605 // bits from each set of sub elements and shift these into place.
3606 // We need to separately call computeKnownBits for each set of
3607 // sub elements as the knownbits for each is likely to be different.
3608 unsigned SubScale = BitWidth / SubBitWidth;
3609 APInt SubDemandedElts(NumElts * SubScale, 0);
3610 for (unsigned i = 0; i != NumElts; ++i)
3611 if (DemandedElts[i])
3612 SubDemandedElts.setBit(i * SubScale);
3613
3614 for (unsigned i = 0; i != SubScale; ++i) {
3615 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
3616 Depth + 1);
3617 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
3618 Known.insertBits(Known2, SubBitWidth * Shifts);
3619 }
3620 }
3621
3622 // Bitcast 'large element' scalar/vector to 'small element' vector.
3623 if ((SubBitWidth % BitWidth) == 0) {
3624 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
3625
3626 // Collect known bits for the (smaller) output by collecting the known
3627 // bits from the overlapping larger input elements and extracting the
3628 // sub sections we actually care about.
3629 unsigned SubScale = SubBitWidth / BitWidth;
3630 APInt SubDemandedElts =
3631 APIntOps::ScaleBitMask(DemandedElts, NumElts / SubScale);
3632 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
3633
3634 Known.setAllConflict();
3635 for (unsigned i = 0; i != NumElts; ++i)
3636 if (DemandedElts[i]) {
3637 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3638 unsigned Offset = (Shifts % SubScale) * BitWidth;
3639 Known = Known.intersectWith(Known2.extractBits(BitWidth, Offset));
3640 // If we don't know any bits, early out.
3641 if (Known.isUnknown())
3642 break;
3643 }
3644 }
3645 break;
3646 }
3647 case ISD::AND:
3648 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3649 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3650
3651 Known &= Known2;
3652 break;
3653 case ISD::OR:
3654 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3655 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3656
3657 Known |= Known2;
3658 break;
3659 case ISD::XOR:
3660 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3661 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3662
3663 Known ^= Known2;
3664 break;
3665 case ISD::MUL: {
3666 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3667 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3668 bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
3669 // TODO: SelfMultiply can be poison, but not undef.
3670 if (SelfMultiply)
3671 SelfMultiply &= isGuaranteedNotToBeUndefOrPoison(
3672 Op.getOperand(0), DemandedElts, false, Depth + 1);
3673 Known = KnownBits::mul(Known, Known2, SelfMultiply);
3674
3675 // If the multiplication is known not to overflow, the product of a number
3676 // with itself is non-negative. Only do this if we didn't already computed
3677 // the opposite value for the sign bit.
3678 if (Op->getFlags().hasNoSignedWrap() &&
3679 Op.getOperand(0) == Op.getOperand(1) &&
3680 !Known.isNegative())
3681 Known.makeNonNegative();
3682 break;
3683 }
3684 case ISD::MULHU: {
3685 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3686 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3687 Known = KnownBits::mulhu(Known, Known2);
3688 break;
3689 }
3690 case ISD::MULHS: {
3691 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3692 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3693 Known = KnownBits::mulhs(Known, Known2);
3694 break;
3695 }
3696 case ISD::ABDU: {
3697 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3698 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3699 Known = KnownBits::abdu(Known, Known2);
3700 break;
3701 }
3702 case ISD::ABDS: {
3703 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3704 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3705 Known = KnownBits::abds(Known, Known2);
3706 unsigned SignBits1 =
3707 ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3708 if (SignBits1 == 1)
3709 break;
3710 unsigned SignBits0 =
3711 ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3712 Known.Zero.setHighBits(std::min(SignBits0, SignBits1) - 1);
3713 break;
3714 }
3715 case ISD::UMUL_LOHI: {
3716 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3717 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3718 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3719 bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
3720 if (Op.getResNo() == 0)
3721 Known = KnownBits::mul(Known, Known2, SelfMultiply);
3722 else
3723 Known = KnownBits::mulhu(Known, Known2);
3724 break;
3725 }
3726 case ISD::SMUL_LOHI: {
3727 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3728 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3729 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3730 bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1);
3731 if (Op.getResNo() == 0)
3732 Known = KnownBits::mul(Known, Known2, SelfMultiply);
3733 else
3734 Known = KnownBits::mulhs(Known, Known2);
3735 break;
3736 }
3737 case ISD::AVGFLOORU: {
3738 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3739 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3740 Known = KnownBits::avgFloorU(Known, Known2);
3741 break;
3742 }
3743 case ISD::AVGCEILU: {
3744 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3745 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3746 Known = KnownBits::avgCeilU(Known, Known2);
3747 break;
3748 }
3749 case ISD::AVGFLOORS: {
3750 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3751 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3752 Known = KnownBits::avgFloorS(Known, Known2);
3753 break;
3754 }
3755 case ISD::AVGCEILS: {
3756 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3757 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3758 Known = KnownBits::avgCeilS(Known, Known2);
3759 break;
3760 }
3761 case ISD::SELECT:
3762 case ISD::VSELECT:
3763 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3764 // If we don't know any bits, early out.
3765 if (Known.isUnknown())
3766 break;
3767 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
3768
3769 // Only known if known in both the LHS and RHS.
3770 Known = Known.intersectWith(Known2);
3771 break;
3772 case ISD::SELECT_CC:
3773 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
3774 // If we don't know any bits, early out.
3775 if (Known.isUnknown())
3776 break;
3777 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3778
3779 // Only known if known in both the LHS and RHS.
3780 Known = Known.intersectWith(Known2);
3781 break;
3782 case ISD::SMULO:
3783 case ISD::UMULO:
3784 if (Op.getResNo() != 1)
3785 break;
3786 // The boolean result conforms to getBooleanContents.
3787 // If we know the result of a setcc has the top bits zero, use this info.
3788 // We know that we have an integer-based boolean since these operations
3789 // are only available for integer.
3790 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3792 BitWidth > 1)
3793 Known.Zero.setBitsFrom(1);
3794 break;
3795 case ISD::SETCC:
3796 case ISD::SETCCCARRY:
3797 case ISD::STRICT_FSETCC:
3798 case ISD::STRICT_FSETCCS: {
3799 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3800 // If we know the result of a setcc has the top bits zero, use this info.
3801 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3803 BitWidth > 1)
3804 Known.Zero.setBitsFrom(1);
3805 break;
3806 }
3807 case ISD::SHL: {
3808 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3809 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3810
3811 bool NUW = Op->getFlags().hasNoUnsignedWrap();
3812 bool NSW = Op->getFlags().hasNoSignedWrap();
3813
3814 bool ShAmtNonZero = Known2.isNonZero();
3815
3816 Known = KnownBits::shl(Known, Known2, NUW, NSW, ShAmtNonZero);
3817
3818 // Minimum shift low bits are known zero.
3819 if (std::optional<unsigned> ShMinAmt =
3820 getValidMinimumShiftAmount(Op, DemandedElts, Depth + 1))
3821 Known.Zero.setLowBits(*ShMinAmt);
3822 break;
3823 }
3824 case ISD::SRL:
3825 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3826 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3827 Known = KnownBits::lshr(Known, Known2, /*ShAmtNonZero=*/false,
3828 Op->getFlags().hasExact());
3829
3830 // Minimum shift high bits are known zero.
3831 if (std::optional<unsigned> ShMinAmt =
3832 getValidMinimumShiftAmount(Op, DemandedElts, Depth + 1))
3833 Known.Zero.setHighBits(*ShMinAmt);
3834 break;
3835 case ISD::SRA:
3836 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3837 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3838 Known = KnownBits::ashr(Known, Known2, /*ShAmtNonZero=*/false,
3839 Op->getFlags().hasExact());
3840 break;
3841 case ISD::ROTL:
3842 case ISD::ROTR:
3843 if (ConstantSDNode *C =
3844 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
3845 unsigned Amt = C->getAPIntValue().urem(BitWidth);
3846
3847 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3848
3849 // Canonicalize to ROTR.
3850 if (Opcode == ISD::ROTL && Amt != 0)
3851 Amt = BitWidth - Amt;
3852
3853 Known.Zero = Known.Zero.rotr(Amt);
3854 Known.One = Known.One.rotr(Amt);
3855 }
3856 break;
3857 case ISD::FSHL:
3858 case ISD::FSHR:
3859 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) {
3860 unsigned Amt = C->getAPIntValue().urem(BitWidth);
3861
3862 // For fshl, 0-shift returns the 1st arg.
3863 // For fshr, 0-shift returns the 2nd arg.
3864 if (Amt == 0) {
3865 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
3866 DemandedElts, Depth + 1);
3867 break;
3868 }
3869
3870 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3871 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3872 const APInt ShAmt(BitWidth, Amt);
3873 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3874 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3875 Known = Opcode == ISD::FSHL ? KnownBits::fshl(Known, Known2, ShAmt)
3876 : KnownBits::fshr(Known, Known2, ShAmt);
3877 }
3878 break;
3879 case ISD::SHL_PARTS:
3880 case ISD::SRA_PARTS:
3881 case ISD::SRL_PARTS: {
3882 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3883
3884 // Collect lo/hi source values and concatenate.
3885 unsigned LoBits = Op.getOperand(0).getScalarValueSizeInBits();
3886 unsigned HiBits = Op.getOperand(1).getScalarValueSizeInBits();
3887 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3888 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3889 Known = Known2.concat(Known);
3890
3891 // Collect shift amount.
3892 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3893
3894 if (Opcode == ISD::SHL_PARTS)
3895 Known = KnownBits::shl(Known, Known2);
3896 else if (Opcode == ISD::SRA_PARTS)
3897 Known = KnownBits::ashr(Known, Known2);
3898 else // if (Opcode == ISD::SRL_PARTS)
3899 Known = KnownBits::lshr(Known, Known2);
3900
3901 // TODO: Minimum shift low/high bits are known zero.
3902
3903 if (Op.getResNo() == 0)
3904 Known = Known.extractBits(LoBits, 0);
3905 else
3906 Known = Known.extractBits(HiBits, LoBits);
3907 break;
3908 }
3910 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3911 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3912 Known = Known.sextInReg(EVT.getScalarSizeInBits());
3913 break;
3914 }
3915 case ISD::CTTZ:
3916 case ISD::CTTZ_ZERO_UNDEF: {
3917 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3918 // If we have a known 1, its position is our upper bound.
3919 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
3920 unsigned LowBits = llvm::bit_width(PossibleTZ);
3921 Known.Zero.setBitsFrom(LowBits);
3922 break;
3923 }
3924 case ISD::CTLZ:
3925 case ISD::CTLZ_ZERO_UNDEF: {
3926 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3927 // If we have a known 1, its position is our upper bound.
3928 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
3929 unsigned LowBits = llvm::bit_width(PossibleLZ);
3930 Known.Zero.setBitsFrom(LowBits);
3931 break;
3932 }
3933 case ISD::CTLS: {
3934 unsigned MinRedundantSignBits =
3935 ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1) - 1;
3936 ConstantRange Range(APInt(BitWidth, MinRedundantSignBits),
3938 Known = Range.toKnownBits();
3939 break;
3940 }
3941 case ISD::CTPOP: {
3942 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3943 // If we know some of the bits are zero, they can't be one.
3944 unsigned PossibleOnes = Known2.countMaxPopulation();
3945 Known.Zero.setBitsFrom(llvm::bit_width(PossibleOnes));
3946 break;
3947 }
3948 case ISD::PARITY: {
3949 // Parity returns 0 everywhere but the LSB.
3950 Known.Zero.setBitsFrom(1);
3951 break;
3952 }
3953 case ISD::CLMUL: {
3954 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3955 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3956 Known = KnownBits::clmul(Known, Known2);
3957 break;
3958 }
3959 case ISD::MGATHER:
3960 case ISD::MLOAD: {
3961 ISD::LoadExtType ETy =
3962 (Opcode == ISD::MGATHER)
3963 ? cast<MaskedGatherSDNode>(Op)->getExtensionType()
3964 : cast<MaskedLoadSDNode>(Op)->getExtensionType();
3965 if (ETy == ISD::ZEXTLOAD) {
3966 EVT MemVT = cast<MemSDNode>(Op)->getMemoryVT();
3967 KnownBits Known0(MemVT.getScalarSizeInBits());
3968 return Known0.zext(BitWidth);
3969 }
3970 break;
3971 }
3972 case ISD::LOAD: {
3974 const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3975 if (ISD::isNON_EXTLoad(LD) && Cst) {
3976 // Determine any common known bits from the loaded constant pool value.
3977 Type *CstTy = Cst->getType();
3978 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits() &&
3979 !Op.getValueType().isScalableVector()) {
3980 // If its a vector splat, then we can (quickly) reuse the scalar path.
3981 // NOTE: We assume all elements match and none are UNDEF.
3982 if (CstTy->isVectorTy()) {
3983 if (const Constant *Splat = Cst->getSplatValue()) {
3984 Cst = Splat;
3985 CstTy = Cst->getType();
3986 }
3987 }
3988 // TODO - do we need to handle different bitwidths?
3989 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) {
3990 // Iterate across all vector elements finding common known bits.
3991 Known.setAllConflict();
3992 for (unsigned i = 0; i != NumElts; ++i) {
3993 if (!DemandedElts[i])
3994 continue;
3995 if (Constant *Elt = Cst->getAggregateElement(i)) {
3996 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3997 const APInt &Value = CInt->getValue();
3998 Known.One &= Value;
3999 Known.Zero &= ~Value;
4000 continue;
4001 }
4002 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4003 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4004 Known.One &= Value;
4005 Known.Zero &= ~Value;
4006 continue;
4007 }
4008 }
4009 Known.One.clearAllBits();
4010 Known.Zero.clearAllBits();
4011 break;
4012 }
4013 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
4014 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
4015 Known = KnownBits::makeConstant(CInt->getValue());
4016 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
4017 Known =
4018 KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt());
4019 }
4020 }
4021 }
4022 } else if (Op.getResNo() == 0) {
4023 unsigned ScalarMemorySize = LD->getMemoryVT().getScalarSizeInBits();
4024 KnownBits KnownScalarMemory(ScalarMemorySize);
4025 if (const MDNode *MD = LD->getRanges())
4026 computeKnownBitsFromRangeMetadata(*MD, KnownScalarMemory);
4027
4028 // Extend the Known bits from memory to the size of the scalar result.
4029 if (ISD::isZEXTLoad(Op.getNode()))
4030 Known = KnownScalarMemory.zext(BitWidth);
4031 else if (ISD::isSEXTLoad(Op.getNode()))
4032 Known = KnownScalarMemory.sext(BitWidth);
4033 else if (ISD::isEXTLoad(Op.getNode()))
4034 Known = KnownScalarMemory.anyext(BitWidth);
4035 else
4036 Known = KnownScalarMemory;
4037 assert(Known.getBitWidth() == BitWidth);
4038 return Known;
4039 }
4040 break;
4041 }
4043 if (Op.getValueType().isScalableVector())
4044 break;
4045 EVT InVT = Op.getOperand(0).getValueType();
4046 APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements());
4047 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
4048 Known = Known.zext(BitWidth);
4049 break;
4050 }
4051 case ISD::ZERO_EXTEND: {
4052 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4053 Known = Known.zext(BitWidth);
4054 break;
4055 }
4057 if (Op.getValueType().isScalableVector())
4058 break;
4059 EVT InVT = Op.getOperand(0).getValueType();
4060 APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements());
4061 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
4062 // If the sign bit is known to be zero or one, then sext will extend
4063 // it to the top bits, else it will just zext.
4064 Known = Known.sext(BitWidth);
4065 break;
4066 }
4067 case ISD::SIGN_EXTEND: {
4068 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4069 // If the sign bit is known to be zero or one, then sext will extend
4070 // it to the top bits, else it will just zext.
4071 Known = Known.sext(BitWidth);
4072 break;
4073 }
4075 if (Op.getValueType().isScalableVector())
4076 break;
4077 EVT InVT = Op.getOperand(0).getValueType();
4078 APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements());
4079 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
4080 Known = Known.anyext(BitWidth);
4081 break;
4082 }
4083 case ISD::ANY_EXTEND: {
4084 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4085 Known = Known.anyext(BitWidth);
4086 break;
4087 }
4088 case ISD::TRUNCATE: {
4089 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4090 Known = Known.trunc(BitWidth);
4091 break;
4092 }
4093 case ISD::TRUNCATE_SSAT_S: {
4094 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4095 Known = Known.truncSSat(BitWidth);
4096 break;
4097 }
4098 case ISD::TRUNCATE_SSAT_U: {
4099 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4100 Known = Known.truncSSatU(BitWidth);
4101 break;
4102 }
4103 case ISD::TRUNCATE_USAT_U: {
4104 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4105 Known = Known.truncUSat(BitWidth);
4106 break;
4107 }
4108 case ISD::AssertZext: {
4109 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
4111 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4112 Known.Zero |= (~InMask);
4113 Known.One &= (~Known.Zero);
4114 break;
4115 }
4116 case ISD::AssertAlign: {
4117 unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign());
4118 assert(LogOfAlign != 0);
4119
4120 // TODO: Should use maximum with source
4121 // If a node is guaranteed to be aligned, set low zero bits accordingly as
4122 // well as clearing one bits.
4123 Known.Zero.setLowBits(LogOfAlign);
4124 Known.One.clearLowBits(LogOfAlign);
4125 break;
4126 }
4127 case ISD::AssertNoFPClass: {
4128 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4129
4130 FPClassTest NoFPClass =
4131 static_cast<FPClassTest>(Op.getConstantOperandVal(1));
4132 const FPClassTest NegativeTestMask = fcNan | fcNegative;
4133 if ((NoFPClass & NegativeTestMask) == NegativeTestMask) {
4134 // Cannot be negative.
4135 Known.makeNonNegative();
4136 }
4137
4138 const FPClassTest PositiveTestMask = fcNan | fcPositive;
4139 if ((NoFPClass & PositiveTestMask) == PositiveTestMask) {
4140 // Cannot be positive.
4141 Known.makeNegative();
4142 }
4143
4144 break;
4145 }
4146 case ISD::FABS:
4147 // fabs clears the sign bit
4148 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4149 Known.makeNonNegative();
4150 break;
4151 case ISD::FGETSIGN:
4152 // All bits are zero except the low bit.
4153 Known.Zero.setBitsFrom(1);
4154 break;
4155 case ISD::ADD: {
4156 SDNodeFlags Flags = Op.getNode()->getFlags();
4157 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4158 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4159 bool SelfAdd = Op.getOperand(0) == Op.getOperand(1) &&
4161 Op.getOperand(0), DemandedElts, false, Depth + 1);
4162 Known = KnownBits::add(Known, Known2, Flags.hasNoSignedWrap(),
4163 Flags.hasNoUnsignedWrap(), SelfAdd);
4164 break;
4165 }
4166 case ISD::SUB: {
4167 SDNodeFlags Flags = Op.getNode()->getFlags();
4168 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4169 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4170 Known = KnownBits::sub(Known, Known2, Flags.hasNoSignedWrap(),
4171 Flags.hasNoUnsignedWrap());
4172 break;
4173 }
4174 case ISD::USUBO:
4175 case ISD::SSUBO:
4176 case ISD::USUBO_CARRY:
4177 case ISD::SSUBO_CARRY:
4178 if (Op.getResNo() == 1) {
4179 // If we know the result of a setcc has the top bits zero, use this info.
4180 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
4182 BitWidth > 1)
4183 Known.Zero.setBitsFrom(1);
4184 break;
4185 }
4186 [[fallthrough]];
4187 case ISD::SUBC: {
4188 assert(Op.getResNo() == 0 &&
4189 "We only compute knownbits for the difference here.");
4190
4191 // With USUBO_CARRY and SSUBO_CARRY a borrow bit may be added in.
4192 KnownBits Borrow(1);
4193 if (Opcode == ISD::USUBO_CARRY || Opcode == ISD::SSUBO_CARRY) {
4194 Borrow = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
4195 // Borrow has bit width 1
4196 Borrow = Borrow.trunc(1);
4197 } else {
4198 Borrow.setAllZero();
4199 }
4200
4201 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4202 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4203 Known = KnownBits::computeForSubBorrow(Known, Known2, Borrow);
4204 break;
4205 }
4206 case ISD::UADDO:
4207 case ISD::SADDO:
4208 case ISD::UADDO_CARRY:
4209 case ISD::SADDO_CARRY:
4210 if (Op.getResNo() == 1) {
4211 // If we know the result of a setcc has the top bits zero, use this info.
4212 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
4214 BitWidth > 1)
4215 Known.Zero.setBitsFrom(1);
4216 break;
4217 }
4218 [[fallthrough]];
4219 case ISD::ADDC:
4220 case ISD::ADDE: {
4221 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.");
4222
4223 // With ADDE and UADDO_CARRY, a carry bit may be added in.
4224 KnownBits Carry(1);
4225 if (Opcode == ISD::ADDE)
4226 // Can't track carry from glue, set carry to unknown.
4227 Carry.resetAll();
4228 else if (Opcode == ISD::UADDO_CARRY || Opcode == ISD::SADDO_CARRY) {
4229 Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
4230 // Carry has bit width 1
4231 Carry = Carry.trunc(1);
4232 } else {
4233 Carry.setAllZero();
4234 }
4235
4236 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4237 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4238 Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
4239 break;
4240 }
4241 case ISD::UDIV: {
4242 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4243 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4244 Known = KnownBits::udiv(Known, Known2, Op->getFlags().hasExact());
4245 break;
4246 }
4247 case ISD::SDIV: {
4248 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4249 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4250 Known = KnownBits::sdiv(Known, Known2, Op->getFlags().hasExact());
4251 break;
4252 }
4253 case ISD::SREM: {
4254 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4255 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4256 Known = KnownBits::srem(Known, Known2);
4257 break;
4258 }
4259 case ISD::UREM: {
4260 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4261 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4262 Known = KnownBits::urem(Known, Known2);
4263 break;
4264 }
4265 case ISD::EXTRACT_ELEMENT: {
4266 Known = computeKnownBits(Op.getOperand(0), Depth+1);
4267 const unsigned Index = Op.getConstantOperandVal(1);
4268 const unsigned EltBitWidth = Op.getValueSizeInBits();
4269
4270 // Remove low part of known bits mask
4271 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
4272 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
4273
4274 // Remove high part of known bit mask
4275 Known = Known.trunc(EltBitWidth);
4276 break;
4277 }
4279 SDValue InVec = Op.getOperand(0);
4280 SDValue EltNo = Op.getOperand(1);
4281 EVT VecVT = InVec.getValueType();
4282 // computeKnownBits not yet implemented for scalable vectors.
4283 if (VecVT.isScalableVector())
4284 break;
4285 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
4286 const unsigned NumSrcElts = VecVT.getVectorNumElements();
4287
4288 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
4289 // anything about the extended bits.
4290 if (BitWidth > EltBitWidth)
4291 Known = Known.trunc(EltBitWidth);
4292
4293 // If we know the element index, just demand that vector element, else for
4294 // an unknown element index, ignore DemandedElts and demand them all.
4295 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
4296 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4297 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4298 DemandedSrcElts =
4299 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
4300
4301 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1);
4302 if (BitWidth > EltBitWidth)
4303 Known = Known.anyext(BitWidth);
4304 break;
4305 }
4307 if (Op.getValueType().isScalableVector())
4308 break;
4309
4310 // If we know the element index, split the demand between the
4311 // source vector and the inserted element, otherwise assume we need
4312 // the original demanded vector elements and the value.
4313 SDValue InVec = Op.getOperand(0);
4314 SDValue InVal = Op.getOperand(1);
4315 SDValue EltNo = Op.getOperand(2);
4316 bool DemandedVal = true;
4317 APInt DemandedVecElts = DemandedElts;
4318 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4319 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4320 unsigned EltIdx = CEltNo->getZExtValue();
4321 DemandedVal = !!DemandedElts[EltIdx];
4322 DemandedVecElts.clearBit(EltIdx);
4323 }
4324 Known.setAllConflict();
4325 if (DemandedVal) {
4326 Known2 = computeKnownBits(InVal, Depth + 1);
4327 Known = Known.intersectWith(Known2.zextOrTrunc(BitWidth));
4328 }
4329 if (!!DemandedVecElts) {
4330 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1);
4331 Known = Known.intersectWith(Known2);
4332 }
4333 break;
4334 }
4335 case ISD::BITREVERSE: {
4336 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4337 Known = Known2.reverseBits();
4338 break;
4339 }
4340 case ISD::BSWAP: {
4341 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4342 Known = Known2.byteSwap();
4343 break;
4344 }
4345 case ISD::ABS: {
4346 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4347 Known = Known2.abs();
4348 Known.Zero.setHighBits(
4349 ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1) - 1);
4350 break;
4351 }
4352 case ISD::USUBSAT: {
4353 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4354 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4355 Known = KnownBits::usub_sat(Known, Known2);
4356 break;
4357 }
4358 case ISD::UMIN: {
4359 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4360 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4361 Known = KnownBits::umin(Known, Known2);
4362 break;
4363 }
4364 case ISD::UMAX: {
4365 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4366 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4367 Known = KnownBits::umax(Known, Known2);
4368 break;
4369 }
4370 case ISD::SMIN:
4371 case ISD::SMAX: {
4372 // If we have a clamp pattern, we know that the number of sign bits will be
4373 // the minimum of the clamp min/max range.
4374 bool IsMax = (Opcode == ISD::SMAX);
4375 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
4376 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
4377 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
4378 CstHigh =
4379 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
4380 if (CstLow && CstHigh) {
4381 if (!IsMax)
4382 std::swap(CstLow, CstHigh);
4383
4384 const APInt &ValueLow = CstLow->getAPIntValue();
4385 const APInt &ValueHigh = CstHigh->getAPIntValue();
4386 if (ValueLow.sle(ValueHigh)) {
4387 unsigned LowSignBits = ValueLow.getNumSignBits();
4388 unsigned HighSignBits = ValueHigh.getNumSignBits();
4389 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
4390 if (ValueLow.isNegative() && ValueHigh.isNegative()) {
4391 Known.One.setHighBits(MinSignBits);
4392 break;
4393 }
4394 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
4395 Known.Zero.setHighBits(MinSignBits);
4396 break;
4397 }
4398 }
4399 }
4400
4401 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4402 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4403 if (IsMax)
4404 Known = KnownBits::smax(Known, Known2);
4405 else
4406 Known = KnownBits::smin(Known, Known2);
4407
4408 // For SMAX, if CstLow is non-negative we know the result will be
4409 // non-negative and thus all sign bits are 0.
4410 // TODO: There's an equivalent of this for smin with negative constant for
4411 // known ones.
4412 if (IsMax && CstLow) {
4413 const APInt &ValueLow = CstLow->getAPIntValue();
4414 if (ValueLow.isNonNegative()) {
4415 unsigned SignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4416 Known.Zero.setHighBits(std::min(SignBits, ValueLow.getNumSignBits()));
4417 }
4418 }
4419
4420 break;
4421 }
4422 case ISD::UINT_TO_FP: {
4423 Known.makeNonNegative();
4424 break;
4425 }
4426 case ISD::SINT_TO_FP: {
4427 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4428 if (Known2.isNonNegative())
4429 Known.makeNonNegative();
4430 else if (Known2.isNegative())
4431 Known.makeNegative();
4432 break;
4433 }
4434 case ISD::FP_TO_UINT_SAT: {
4435 // FP_TO_UINT_SAT produces an unsigned value that fits in the saturating VT.
4436 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
4438 break;
4439 }
4440 case ISD::ATOMIC_LOAD: {
4441 // If we are looking at the loaded value.
4442 if (Op.getResNo() == 0) {
4443 auto *AT = cast<AtomicSDNode>(Op);
4444 unsigned ScalarMemorySize = AT->getMemoryVT().getScalarSizeInBits();
4445 KnownBits KnownScalarMemory(ScalarMemorySize);
4446 if (const MDNode *MD = AT->getRanges())
4447 computeKnownBitsFromRangeMetadata(*MD, KnownScalarMemory);
4448
4449 switch (AT->getExtensionType()) {
4450 case ISD::ZEXTLOAD:
4451 Known = KnownScalarMemory.zext(BitWidth);
4452 break;
4453 case ISD::SEXTLOAD:
4454 Known = KnownScalarMemory.sext(BitWidth);
4455 break;
4456 case ISD::EXTLOAD:
4457 switch (TLI->getExtendForAtomicOps()) {
4458 case ISD::ZERO_EXTEND:
4459 Known = KnownScalarMemory.zext(BitWidth);
4460 break;
4461 case ISD::SIGN_EXTEND:
4462 Known = KnownScalarMemory.sext(BitWidth);
4463 break;
4464 default:
4465 Known = KnownScalarMemory.anyext(BitWidth);
4466 break;
4467 }
4468 break;
4469 case ISD::NON_EXTLOAD:
4470 Known = KnownScalarMemory;
4471 break;
4472 }
4473 assert(Known.getBitWidth() == BitWidth);
4474 }
4475 break;
4476 }
4478 if (Op.getResNo() == 1) {
4479 // The boolean result conforms to getBooleanContents.
4480 // If we know the result of a setcc has the top bits zero, use this info.
4481 // We know that we have an integer-based boolean since these operations
4482 // are only available for integer.
4483 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
4485 BitWidth > 1)
4486 Known.Zero.setBitsFrom(1);
4487 break;
4488 }
4489 [[fallthrough]];
4491 case ISD::ATOMIC_SWAP:
4502 case ISD::ATOMIC_LOAD_UMAX: {
4503 // If we are looking at the loaded value.
4504 if (Op.getResNo() == 0) {
4505 auto *AT = cast<AtomicSDNode>(Op);
4506 unsigned MemBits = AT->getMemoryVT().getScalarSizeInBits();
4507
4508 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
4509 Known.Zero.setBitsFrom(MemBits);
4510 }
4511 break;
4512 }
4513 case ISD::FrameIndex:
4515 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(),
4516 Known, getMachineFunction());
4517 break;
4518
4519 default:
4520 if (Opcode < ISD::BUILTIN_OP_END)
4521 break;
4522 [[fallthrough]];
4526 // TODO: Probably okay to remove after audit; here to reduce change size
4527 // in initial enablement patch for scalable vectors
4528 if (Op.getValueType().isScalableVector())
4529 break;
4530
4531 // Allow the target to implement this method for its nodes.
4532 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
4533 break;
4534 }
4535
4536 return Known;
4537}
4538
4539/// Convert ConstantRange OverflowResult into SelectionDAG::OverflowKind.
4552
4555 // X + 0 never overflow
4556 if (isNullConstant(N1))
4557 return OFK_Never;
4558
4559 // If both operands each have at least two sign bits, the addition
4560 // cannot overflow.
4561 if (ComputeNumSignBits(N0) > 1 && ComputeNumSignBits(N1) > 1)
4562 return OFK_Never;
4563
4564 // TODO: Add ConstantRange::signedAddMayOverflow handling.
4565 return OFK_Sometime;
4566}
4567
4570 // X + 0 never overflow
4571 if (isNullConstant(N1))
4572 return OFK_Never;
4573
4574 // mulhi + 1 never overflow
4575 KnownBits N1Known = computeKnownBits(N1);
4576 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
4577 N1Known.getMaxValue().ult(2))
4578 return OFK_Never;
4579
4580 KnownBits N0Known = computeKnownBits(N0);
4581 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1 &&
4582 N0Known.getMaxValue().ult(2))
4583 return OFK_Never;
4584
4585 // Fallback to ConstantRange::unsignedAddMayOverflow handling.
4586 ConstantRange N0Range = ConstantRange::fromKnownBits(N0Known, false);
4587 ConstantRange N1Range = ConstantRange::fromKnownBits(N1Known, false);
4588 return mapOverflowResult(N0Range.unsignedAddMayOverflow(N1Range));
4589}
4590
4593 // X - 0 never overflow
4594 if (isNullConstant(N1))
4595 return OFK_Never;
4596
4597 // If both operands each have at least two sign bits, the subtraction
4598 // cannot overflow.
4599 if (ComputeNumSignBits(N0) > 1 && ComputeNumSignBits(N1) > 1)
4600 return OFK_Never;
4601
4602 KnownBits N0Known = computeKnownBits(N0);
4603 KnownBits N1Known = computeKnownBits(N1);
4604 ConstantRange N0Range = ConstantRange::fromKnownBits(N0Known, true);
4605 ConstantRange N1Range = ConstantRange::fromKnownBits(N1Known, true);
4606 return mapOverflowResult(N0Range.signedSubMayOverflow(N1Range));
4607}
4608
4611 // X - 0 never overflow
4612 if (isNullConstant(N1))
4613 return OFK_Never;
4614
4615 ConstantRange N0Range =
4616 computeConstantRangeIncludingKnownBits(N0, /*ForSigned=*/false);
4617 ConstantRange N1Range =
4618 computeConstantRangeIncludingKnownBits(N1, /*ForSigned=*/false);
4619 return mapOverflowResult(N0Range.unsignedSubMayOverflow(N1Range));
4620}
4621
4624 // X * 0 and X * 1 never overflow.
4625 if (isNullConstant(N1) || isOneConstant(N1))
4626 return OFK_Never;
4627
4630 return mapOverflowResult(N0Range.unsignedMulMayOverflow(N1Range));
4631}
4632
4635 // X * 0 and X * 1 never overflow.
4636 if (isNullConstant(N1) || isOneConstant(N1))
4637 return OFK_Never;
4638
4639 // Get the size of the result.
4640 unsigned BitWidth = N0.getScalarValueSizeInBits();
4641
4642 // Sum of the sign bits.
4643 unsigned SignBits = ComputeNumSignBits(N0) + ComputeNumSignBits(N1);
4644
4645 // If we have enough sign bits, then there's no overflow.
4646 if (SignBits > BitWidth + 1)
4647 return OFK_Never;
4648
4649 if (SignBits == BitWidth + 1) {
4650 // The overflow occurs when the true multiplication of the
4651 // the operands is the minimum negative number.
4652 KnownBits N0Known = computeKnownBits(N0);
4653 KnownBits N1Known = computeKnownBits(N1);
4654 // If one of the operands is non-negative, then there's no
4655 // overflow.
4656 if (N0Known.isNonNegative() || N1Known.isNonNegative())
4657 return OFK_Never;
4658 }
4659
4660 return OFK_Sometime;
4661}
4662
4664 unsigned Depth) const {
4665 APInt DemandedElts = getDemandAllEltsMask(Op);
4666 return computeConstantRange(Op, DemandedElts, ForSigned, Depth);
4667}
4668
4670 const APInt &DemandedElts,
4671 bool ForSigned,
4672 unsigned Depth) const {
4673 EVT VT = Op.getValueType();
4674 unsigned BitWidth = VT.getScalarSizeInBits();
4675
4676 if (Depth >= MaxRecursionDepth)
4677 return ConstantRange::getFull(BitWidth);
4678
4679 if (ConstantSDNode *C = isConstOrConstSplat(Op, DemandedElts))
4680 return ConstantRange(C->getAPIntValue());
4681
4682 unsigned Opcode = Op.getOpcode();
4683 switch (Opcode) {
4684 case ISD::VSCALE: {
4686 const APInt &Multiplier = Op.getConstantOperandAPInt(0);
4687 return getVScaleRange(&F, BitWidth).multiply(Multiplier);
4688 }
4689 default:
4690 break;
4691 }
4692
4693 return ConstantRange::getFull(BitWidth);
4694}
4695
4698 unsigned Depth) const {
4699 APInt DemandedElts = getDemandAllEltsMask(Op);
4700 return computeConstantRangeIncludingKnownBits(Op, DemandedElts, ForSigned,
4701 Depth);
4702}
4703
4705 SDValue Op, const APInt &DemandedElts, bool ForSigned,
4706 unsigned Depth) const {
4707 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
4708 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4709 ConstantRange CR2 = computeConstantRange(Op, DemandedElts, ForSigned, Depth);
4712 return CR1.intersectWith(CR2, RangeType);
4713}
4714
4716 unsigned Depth) const {
4717 APInt DemandedElts = getDemandAllEltsMask(Val);
4718 return isKnownToBeAPowerOfTwo(Val, DemandedElts, OrZero, Depth);
4719}
4720
4722 const APInt &DemandedElts,
4723 bool OrZero, unsigned Depth) const {
4724 if (Depth >= MaxRecursionDepth)
4725 return false; // Limit search depth.
4726
4727 EVT OpVT = Val.getValueType();
4728 unsigned BitWidth = OpVT.getScalarSizeInBits();
4729 [[maybe_unused]] unsigned NumElts = DemandedElts.getBitWidth();
4730 assert((!OpVT.isScalableVector() || NumElts == 1) &&
4731 "DemandedElts for scalable vectors must be 1 to represent all lanes");
4732 assert(
4733 (!OpVT.isFixedLengthVector() || NumElts == OpVT.getVectorNumElements()) &&
4734 "Unexpected vector size");
4735
4736 auto IsPowerOfTwoOrZero = [BitWidth, OrZero](const ConstantSDNode *C) {
4737 APInt V = C->getAPIntValue().zextOrTrunc(BitWidth);
4738 return (OrZero && V.isZero()) || V.isPowerOf2();
4739 };
4740
4741 // Is the constant a known power of 2 or zero?
4742 if (ISD::matchUnaryPredicate(Val, IsPowerOfTwoOrZero))
4743 return true;
4744
4745 switch (Val.getOpcode()) {
4746 case ISD::BUILD_VECTOR:
4747 // Are all operands of a build vector constant powers of two or zero?
4748 if (all_of(enumerate(Val->ops()), [&](auto P) {
4749 auto *C = dyn_cast<ConstantSDNode>(P.value());
4750 return !DemandedElts[P.index()] || (C && IsPowerOfTwoOrZero(C));
4751 }))
4752 return true;
4753 break;
4754
4755 case ISD::SPLAT_VECTOR:
4756 // Is the operand of a splat vector a constant power of two?
4757 if (auto *C = dyn_cast<ConstantSDNode>(Val->getOperand(0)))
4758 if (IsPowerOfTwoOrZero(C))
4759 return true;
4760 break;
4761
4763 SDValue InVec = Val.getOperand(0);
4764 SDValue EltNo = Val.getOperand(1);
4765 EVT VecVT = InVec.getValueType();
4766
4767 // Skip scalable vectors or implicit extensions.
4768 if (VecVT.isScalableVector() ||
4769 OpVT.getScalarSizeInBits() != VecVT.getScalarSizeInBits())
4770 break;
4771
4772 // If we know the element index, just demand that vector element, else for
4773 // an unknown element index, ignore DemandedElts and demand them all.
4774 const unsigned NumSrcElts = VecVT.getVectorNumElements();
4775 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4776 APInt DemandedSrcElts =
4777 ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)
4778 ? APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue())
4779 : APInt::getAllOnes(NumSrcElts);
4780 return isKnownToBeAPowerOfTwo(InVec, DemandedSrcElts, OrZero, Depth + 1);
4781 }
4782
4783 case ISD::AND: {
4784 // Looking for `x & -x` pattern:
4785 // If x == 0:
4786 // x & -x -> 0
4787 // If x != 0:
4788 // x & -x -> non-zero pow2
4789 // so if we find the pattern return whether we know `x` is non-zero.
4790 SDValue X, Z;
4791 if (sd_match(Val, m_And(m_Value(X), m_Neg(m_Deferred(X)))) ||
4792 (sd_match(Val, m_And(m_Value(X), m_Sub(m_Value(Z), m_Deferred(X)))) &&
4793 MaskedVectorIsZero(Z, DemandedElts, Depth + 1)))
4794 return OrZero || isKnownNeverZero(X, DemandedElts, Depth);
4795 break;
4796 }
4797
4798 case ISD::SHL: {
4799 // A left-shift of a constant one will have exactly one bit set because
4800 // shifting the bit off the end is undefined.
4801 auto *C = isConstOrConstSplat(Val.getOperand(0), DemandedElts);
4802 if (C && C->getAPIntValue() == 1)
4803 return true;
4804 return (OrZero || isKnownNeverZero(Val, DemandedElts, Depth)) &&
4805 isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedElts, OrZero,
4806 Depth + 1);
4807 }
4808
4809 case ISD::SRL: {
4810 // A logical right-shift of a constant sign-bit will have exactly
4811 // one bit set.
4812 auto *C = isConstOrConstSplat(Val.getOperand(0), DemandedElts);
4813 if (C && C->getAPIntValue().isSignMask())
4814 return true;
4815 return (OrZero || isKnownNeverZero(Val, DemandedElts, Depth)) &&
4816 isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedElts, OrZero,
4817 Depth + 1);
4818 }
4819
4820 case ISD::TRUNCATE:
4821 return (OrZero || isKnownNeverZero(Val, DemandedElts, Depth)) &&
4822 isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedElts, OrZero,
4823 Depth + 1);
4824
4825 case ISD::ROTL:
4826 case ISD::ROTR:
4827 return isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedElts, OrZero,
4828 Depth + 1);
4829 case ISD::BSWAP:
4830 case ISD::BITREVERSE:
4831 return isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedElts, OrZero,
4832 Depth + 1);
4833
4834 case ISD::SMIN:
4835 case ISD::SMAX:
4836 case ISD::UMIN:
4837 case ISD::UMAX:
4838 return isKnownToBeAPowerOfTwo(Val.getOperand(1), DemandedElts, OrZero,
4839 Depth + 1) &&
4840 isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedElts, OrZero,
4841 Depth + 1);
4842
4843 case ISD::SELECT:
4844 case ISD::VSELECT:
4845 return isKnownToBeAPowerOfTwo(Val.getOperand(2), DemandedElts, OrZero,
4846 Depth + 1) &&
4847 isKnownToBeAPowerOfTwo(Val.getOperand(1), DemandedElts, OrZero,
4848 Depth + 1);
4849
4850 case ISD::ZERO_EXTEND:
4851 return isKnownToBeAPowerOfTwo(Val.getOperand(0), /*OrZero=*/false,
4852 Depth + 1);
4853
4854 case ISD::VSCALE:
4855 // vscale(power-of-two) is a power-of-two
4856 return isKnownToBeAPowerOfTwo(Val.getOperand(0), /*OrZero=*/false,
4857 Depth + 1);
4858
4859 case ISD::VECTOR_SHUFFLE: {
4861 // Demanded elements with undef shuffle mask elements are unknown
4862 // - we cannot guarantee they are a power of two, so return false.
4863 APInt DemandedLHS, DemandedRHS;
4865 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
4866 if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts,
4867 DemandedLHS, DemandedRHS))
4868 return false;
4869
4870 // All demanded elements from LHS must be known power of two.
4871 if (!!DemandedLHS && !isKnownToBeAPowerOfTwo(Val.getOperand(0), DemandedLHS,
4872 OrZero, Depth + 1))
4873 return false;
4874
4875 // All demanded elements from RHS must be known power of two.
4876 if (!!DemandedRHS && !isKnownToBeAPowerOfTwo(Val.getOperand(1), DemandedRHS,
4877 OrZero, Depth + 1))
4878 return false;
4879
4880 return true;
4881 }
4882 }
4883
4884 // More could be done here, though the above checks are enough
4885 // to handle some common cases.
4886 return false;
4887}
4888
4890 if (ConstantFPSDNode *C1 = isConstOrConstSplatFP(Val, true))
4891 return C1->getValueAPF().getExactLog2Abs() >= 0;
4892
4893 if (Val.getOpcode() == ISD::UINT_TO_FP || Val.getOpcode() == ISD::SINT_TO_FP)
4894 return isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1);
4895
4896 return false;
4897}
4898
4900 APInt DemandedElts = getDemandAllEltsMask(Op);
4901 return ComputeNumSignBits(Op, DemandedElts, Depth);
4902}
4903
4904unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
4905 unsigned Depth) const {
4906 EVT VT = Op.getValueType();
4907 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
4908 unsigned VTBits = VT.getScalarSizeInBits();
4909 unsigned NumElts = DemandedElts.getBitWidth();
4910 unsigned Tmp, Tmp2;
4911 unsigned FirstAnswer = 1;
4912
4913 assert((!VT.isScalableVector() || NumElts == 1) &&
4914 "DemandedElts for scalable vectors must be 1 to represent all lanes");
4915
4916 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
4917 const APInt &Val = C->getAPIntValue();
4918 return Val.getNumSignBits();
4919 }
4920
4921 if (Depth >= MaxRecursionDepth)
4922 return 1; // Limit search depth.
4923
4924 if (!DemandedElts)
4925 return 1; // No demanded elts, better to assume we don't know anything.
4926
4927 unsigned Opcode = Op.getOpcode();
4928 switch (Opcode) {
4929 default: break;
4930 case ISD::AssertSext:
4931 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
4932 return VTBits-Tmp+1;
4933 case ISD::AssertZext:
4934 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
4935 return VTBits-Tmp;
4936 case ISD::FREEZE:
4937 if (isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), DemandedElts,
4938 /*PoisonOnly=*/false))
4939 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4940 break;
4941 case ISD::MERGE_VALUES:
4942 return ComputeNumSignBits(Op.getOperand(Op.getResNo()), DemandedElts,
4943 Depth + 1);
4944 case ISD::SPLAT_VECTOR: {
4945 // Check if the sign bits of source go down as far as the truncated value.
4946 unsigned NumSrcBits = Op.getOperand(0).getValueSizeInBits();
4947 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4948 if (NumSrcSignBits > (NumSrcBits - VTBits))
4949 return NumSrcSignBits - (NumSrcBits - VTBits);
4950 break;
4951 }
4952 case ISD::BUILD_VECTOR:
4953 assert(!VT.isScalableVector());
4954 Tmp = VTBits;
4955 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
4956 if (!DemandedElts[i])
4957 continue;
4958
4959 SDValue SrcOp = Op.getOperand(i);
4960 // BUILD_VECTOR can implicitly truncate sources, we handle this specially
4961 // for constant nodes to ensure we only look at the sign bits.
4963 APInt T = C->getAPIntValue().trunc(VTBits);
4964 Tmp2 = T.getNumSignBits();
4965 } else {
4966 Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1);
4967
4968 if (SrcOp.getValueSizeInBits() != VTBits) {
4969 assert(SrcOp.getValueSizeInBits() > VTBits &&
4970 "Expected BUILD_VECTOR implicit truncation");
4971 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
4972 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
4973 }
4974 }
4975 Tmp = std::min(Tmp, Tmp2);
4976 }
4977 return Tmp;
4978
4979 case ISD::VECTOR_COMPRESS: {
4980 SDValue Vec = Op.getOperand(0);
4981 SDValue PassThru = Op.getOperand(2);
4982 Tmp = ComputeNumSignBits(PassThru, DemandedElts, Depth + 1);
4983 if (Tmp == 1)
4984 return 1;
4985 Tmp2 = ComputeNumSignBits(Vec, Depth + 1);
4986 Tmp = std::min(Tmp, Tmp2);
4987 return Tmp;
4988 }
4989
4990 case ISD::VECTOR_SHUFFLE: {
4991 // Collect the minimum number of sign bits that are shared by every vector
4992 // element referenced by the shuffle.
4993 APInt DemandedLHS, DemandedRHS;
4995 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
4996 if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts,
4997 DemandedLHS, DemandedRHS))
4998 return 1;
4999
5000 Tmp = std::numeric_limits<unsigned>::max();
5001 if (!!DemandedLHS)
5002 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
5003 if (!!DemandedRHS) {
5004 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
5005 Tmp = std::min(Tmp, Tmp2);
5006 }
5007 // If we don't know anything, early out and try computeKnownBits fall-back.
5008 if (Tmp == 1)
5009 break;
5010 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
5011 return Tmp;
5012 }
5013
5014 case ISD::BITCAST: {
5015 if (VT.isScalableVector())
5016 break;
5017 SDValue N0 = Op.getOperand(0);
5018 EVT SrcVT = N0.getValueType();
5019 unsigned SrcBits = SrcVT.getScalarSizeInBits();
5020
5021 // Ignore bitcasts from unsupported types..
5022 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
5023 break;
5024
5025 // Fast handling of 'identity' bitcasts.
5026 if (VTBits == SrcBits)
5027 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
5028
5029 bool IsLE = getDataLayout().isLittleEndian();
5030
5031 // Bitcast 'large element' scalar/vector to 'small element' vector.
5032 if ((SrcBits % VTBits) == 0) {
5033 assert(VT.isVector() && "Expected bitcast to vector");
5034
5035 unsigned Scale = SrcBits / VTBits;
5036 APInt SrcDemandedElts =
5037 APIntOps::ScaleBitMask(DemandedElts, NumElts / Scale);
5038
5039 // Fast case - sign splat can be simply split across the small elements.
5040 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
5041 if (Tmp == SrcBits)
5042 return VTBits;
5043
5044 // Slow case - determine how far the sign extends into each sub-element.
5045 Tmp2 = VTBits;
5046 for (unsigned i = 0; i != NumElts; ++i)
5047 if (DemandedElts[i]) {
5048 unsigned SubOffset = i % Scale;
5049 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
5050 SubOffset = SubOffset * VTBits;
5051 if (Tmp <= SubOffset)
5052 return 1;
5053 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
5054 }
5055 return Tmp2;
5056 }
5057 break;
5058 }
5059
5061 // FP_TO_SINT_SAT produces a signed value that fits in the saturating VT.
5062 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
5063 return VTBits - Tmp + 1;
5064 case ISD::SIGN_EXTEND:
5065 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
5066 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
5068 // Max of the input and what this extends.
5069 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
5070 Tmp = VTBits-Tmp+1;
5071 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
5072 return std::max(Tmp, Tmp2);
5074 if (VT.isScalableVector())
5075 break;
5076 SDValue Src = Op.getOperand(0);
5077 EVT SrcVT = Src.getValueType();
5078 APInt DemandedSrcElts = DemandedElts.zext(SrcVT.getVectorNumElements());
5079 Tmp = VTBits - SrcVT.getScalarSizeInBits();
5080 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
5081 }
5082 case ISD::SRA:
5083 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5084 // SRA X, C -> adds C sign bits.
5085 if (std::optional<unsigned> ShAmt =
5086 getValidMinimumShiftAmount(Op, DemandedElts, Depth + 1))
5087 Tmp = std::min(Tmp + *ShAmt, VTBits);
5088 return Tmp;
5089 case ISD::SHL:
5090 if (std::optional<ConstantRange> ShAmtRange =
5091 getValidShiftAmountRange(Op, DemandedElts, Depth + 1)) {
5092 unsigned MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();
5093 unsigned MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();
5094 // Try to look through ZERO/SIGN/ANY_EXTEND. If all extended bits are
5095 // shifted out, then we can compute the number of sign bits for the
5096 // operand being extended. A future improvement could be to pass along the
5097 // "shifted left by" information in the recursive calls to
5098 // ComputeKnownSignBits. Allowing us to handle this more generically.
5099 if (ISD::isExtOpcode(Op.getOperand(0).getOpcode())) {
5100 SDValue Ext = Op.getOperand(0);
5101 EVT ExtVT = Ext.getValueType();
5102 SDValue Extendee = Ext.getOperand(0);
5103 EVT ExtendeeVT = Extendee.getValueType();
5104 unsigned SizeDifference =
5105 ExtVT.getScalarSizeInBits() - ExtendeeVT.getScalarSizeInBits();
5106 if (SizeDifference <= MinShAmt) {
5107 Tmp = SizeDifference +
5108 ComputeNumSignBits(Extendee, DemandedElts, Depth + 1);
5109 if (MaxShAmt < Tmp)
5110 return Tmp - MaxShAmt;
5111 }
5112 }
5113 // shl destroys sign bits, ensure it doesn't shift out all sign bits.
5114 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5115 if (MaxShAmt < Tmp)
5116 return Tmp - MaxShAmt;
5117 }
5118 break;
5119 case ISD::AND:
5120 case ISD::OR:
5121 case ISD::XOR: // NOT is handled here.
5122 // Logical binary ops preserve the number of sign bits at the worst.
5123 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
5124 if (Tmp != 1) {
5125 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
5126 FirstAnswer = std::min(Tmp, Tmp2);
5127 // We computed what we know about the sign bits as our first
5128 // answer. Now proceed to the generic code that uses
5129 // computeKnownBits, and pick whichever answer is better.
5130 }
5131 break;
5132
5133 case ISD::SELECT:
5134 case ISD::VSELECT:
5135 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
5136 if (Tmp == 1) return 1; // Early out.
5137 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
5138 return std::min(Tmp, Tmp2);
5139 case ISD::SELECT_CC:
5140 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
5141 if (Tmp == 1) return 1; // Early out.
5142 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
5143 return std::min(Tmp, Tmp2);
5144
5145 case ISD::SMIN:
5146 case ISD::SMAX: {
5147 // If we have a clamp pattern, we know that the number of sign bits will be
5148 // the minimum of the clamp min/max range.
5149 bool IsMax = (Opcode == ISD::SMAX);
5150 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
5151 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
5152 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
5153 CstHigh =
5154 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
5155 if (CstLow && CstHigh) {
5156 if (!IsMax)
5157 std::swap(CstLow, CstHigh);
5158 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
5159 Tmp = CstLow->getAPIntValue().getNumSignBits();
5160 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
5161 return std::min(Tmp, Tmp2);
5162 }
5163 }
5164
5165 // Fallback - just get the minimum number of sign bits of the operands.
5166 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5167 if (Tmp == 1)
5168 return 1; // Early out.
5169 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
5170 return std::min(Tmp, Tmp2);
5171 }
5172 case ISD::UMIN:
5173 case ISD::UMAX:
5174 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5175 if (Tmp == 1)
5176 return 1; // Early out.
5177 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
5178 return std::min(Tmp, Tmp2);
5179 case ISD::SSUBO_CARRY:
5180 case ISD::USUBO_CARRY:
5181 // sub_carry(x,x,c) -> 0/-1 (sext carry)
5182 if (Op.getResNo() == 0 && Op.getOperand(0) == Op.getOperand(1))
5183 return VTBits;
5184 [[fallthrough]];
5185 case ISD::SADDO:
5186 case ISD::UADDO:
5187 case ISD::SADDO_CARRY:
5188 case ISD::UADDO_CARRY:
5189 case ISD::SSUBO:
5190 case ISD::USUBO:
5191 case ISD::SMULO:
5192 case ISD::UMULO:
5193 if (Op.getResNo() != 1)
5194 break;
5195 // The boolean result conforms to getBooleanContents. Fall through.
5196 // If setcc returns 0/-1, all bits are sign bits.
5197 // We know that we have an integer-based boolean since these operations
5198 // are only available for integer.
5199 if (TLI->getBooleanContents(VT.isVector(), false) ==
5201 return VTBits;
5202 break;
5203 case ISD::SETCC:
5204 case ISD::SETCCCARRY:
5205 case ISD::STRICT_FSETCC:
5206 case ISD::STRICT_FSETCCS: {
5207 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
5208 // If setcc returns 0/-1, all bits are sign bits.
5209 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
5211 return VTBits;
5212 break;
5213 }
5214 case ISD::ROTL:
5215 case ISD::ROTR:
5216 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5217
5218 // If we're rotating an 0/-1 value, then it stays an 0/-1 value.
5219 if (Tmp == VTBits)
5220 return VTBits;
5221
5222 if (ConstantSDNode *C =
5223 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
5224 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
5225
5226 // Handle rotate right by N like a rotate left by 32-N.
5227 if (Opcode == ISD::ROTR)
5228 RotAmt = (VTBits - RotAmt) % VTBits;
5229
5230 // If we aren't rotating out all of the known-in sign bits, return the
5231 // number that are left. This handles rotl(sext(x), 1) for example.
5232 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
5233 }
5234 break;
5235 case ISD::ADD:
5236 case ISD::ADDC:
5237 // TODO: Move Operand 1 check before Operand 0 check
5238 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5239 if (Tmp == 1) return 1; // Early out.
5240
5241 // Special case decrementing a value (ADD X, -1):
5242 if (ConstantSDNode *CRHS =
5243 isConstOrConstSplat(Op.getOperand(1), DemandedElts))
5244 if (CRHS->isAllOnes()) {
5245 KnownBits Known =
5246 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
5247
5248 // If the input is known to be 0 or 1, the output is 0/-1, which is all
5249 // sign bits set.
5250 if ((Known.Zero | 1).isAllOnes())
5251 return VTBits;
5252
5253 // If we are subtracting one from a positive number, there is no carry
5254 // out of the result.
5255 if (Known.isNonNegative())
5256 return Tmp;
5257 }
5258
5259 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
5260 if (Tmp2 == 1) return 1; // Early out.
5261
5262 // Add can have at most one carry bit. Thus we know that the output
5263 // is, at worst, one more bit than the inputs.
5264 return std::min(Tmp, Tmp2) - 1;
5265 case ISD::SUB:
5266 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
5267 if (Tmp2 == 1) return 1; // Early out.
5268
5269 // Handle NEG.
5270 if (ConstantSDNode *CLHS =
5271 isConstOrConstSplat(Op.getOperand(0), DemandedElts))
5272 if (CLHS->isZero()) {
5273 KnownBits Known =
5274 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
5275 // If the input is known to be 0 or 1, the output is 0/-1, which is all
5276 // sign bits set.
5277 if ((Known.Zero | 1).isAllOnes())
5278 return VTBits;
5279
5280 // If the input is known to be positive (the sign bit is known clear),
5281 // the output of the NEG has the same number of sign bits as the input.
5282 if (Known.isNonNegative())
5283 return Tmp2;
5284
5285 // Otherwise, we treat this like a SUB.
5286 }
5287
5288 // Sub can have at most one carry bit. Thus we know that the output
5289 // is, at worst, one more bit than the inputs.
5290 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5291 if (Tmp == 1) return 1; // Early out.
5292 return std::min(Tmp, Tmp2) - 1;
5293 case ISD::MUL: {
5294 // The output of the Mul can be at most twice the valid bits in the inputs.
5295 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
5296 if (SignBitsOp0 == 1)
5297 break;
5298 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
5299 if (SignBitsOp1 == 1)
5300 break;
5301 unsigned OutValidBits =
5302 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
5303 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
5304 }
5305 case ISD::AVGCEILS:
5306 case ISD::AVGFLOORS:
5307 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5308 if (Tmp == 1)
5309 return 1; // Early out.
5310 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
5311 return std::min(Tmp, Tmp2);
5312 case ISD::SREM:
5313 // The sign bit is the LHS's sign bit, except when the result of the
5314 // remainder is zero. The magnitude of the result should be less than or
5315 // equal to the magnitude of the LHS. Therefore, the result should have
5316 // at least as many sign bits as the left hand side.
5317 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
5318 case ISD::TRUNCATE: {
5319 // Check if the sign bits of source go down as far as the truncated value.
5320 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
5321 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
5322 if (NumSrcSignBits > (NumSrcBits - VTBits))
5323 return NumSrcSignBits - (NumSrcBits - VTBits);
5324 break;
5325 }
5326 case ISD::EXTRACT_ELEMENT: {
5327 if (VT.isScalableVector())
5328 break;
5329 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
5330 const int BitWidth = Op.getValueSizeInBits();
5331 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
5332
5333 // Get reverse index (starting from 1), Op1 value indexes elements from
5334 // little end. Sign starts at big end.
5335 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
5336
5337 // If the sign portion ends in our element the subtraction gives correct
5338 // result. Otherwise it gives either negative or > bitwidth result
5339 return std::clamp(KnownSign - rIndex * BitWidth, 1, BitWidth);
5340 }
5342 if (VT.isScalableVector())
5343 break;
5344 // If we know the element index, split the demand between the
5345 // source vector and the inserted element, otherwise assume we need
5346 // the original demanded vector elements and the value.
5347 SDValue InVec = Op.getOperand(0);
5348 SDValue InVal = Op.getOperand(1);
5349 SDValue EltNo = Op.getOperand(2);
5350 bool DemandedVal = true;
5351 APInt DemandedVecElts = DemandedElts;
5352 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
5353 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
5354 unsigned EltIdx = CEltNo->getZExtValue();
5355 DemandedVal = !!DemandedElts[EltIdx];
5356 DemandedVecElts.clearBit(EltIdx);
5357 }
5358 Tmp = std::numeric_limits<unsigned>::max();
5359 if (DemandedVal) {
5360 // TODO - handle implicit truncation of inserted elements.
5361 if (InVal.getScalarValueSizeInBits() != VTBits)
5362 break;
5363 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
5364 Tmp = std::min(Tmp, Tmp2);
5365 }
5366 if (!!DemandedVecElts) {
5367 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1);
5368 Tmp = std::min(Tmp, Tmp2);
5369 }
5370 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
5371 return Tmp;
5372 }
5374 SDValue InVec = Op.getOperand(0);
5375 SDValue EltNo = Op.getOperand(1);
5376 EVT VecVT = InVec.getValueType();
5377 // ComputeNumSignBits not yet implemented for scalable vectors.
5378 if (VecVT.isScalableVector())
5379 break;
5380 const unsigned BitWidth = Op.getValueSizeInBits();
5381 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
5382 const unsigned NumSrcElts = VecVT.getVectorNumElements();
5383
5384 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
5385 // anything about sign bits. But if the sizes match we can derive knowledge
5386 // about sign bits from the vector operand.
5387 if (BitWidth != EltBitWidth)
5388 break;
5389
5390 // If we know the element index, just demand that vector element, else for
5391 // an unknown element index, ignore DemandedElts and demand them all.
5392 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
5393 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
5394 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
5395 DemandedSrcElts =
5396 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
5397
5398 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
5399 }
5401 // Offset the demanded elts by the subvector index.
5402 SDValue Src = Op.getOperand(0);
5403
5404 APInt DemandedSrcElts;
5405 if (Src.getValueType().isScalableVector())
5406 DemandedSrcElts = APInt(1, 1);
5407 else {
5408 uint64_t Idx = Op.getConstantOperandVal(1);
5409 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5410 DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
5411 }
5412 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
5413 }
5414 case ISD::CONCAT_VECTORS: {
5415 if (VT.isScalableVector())
5416 break;
5417 // Determine the minimum number of sign bits across all demanded
5418 // elts of the input vectors. Early out if the result is already 1.
5419 Tmp = std::numeric_limits<unsigned>::max();
5420 EVT SubVectorVT = Op.getOperand(0).getValueType();
5421 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
5422 unsigned NumSubVectors = Op.getNumOperands();
5423 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
5424 APInt DemandedSub =
5425 DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
5426 if (!DemandedSub)
5427 continue;
5428 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
5429 Tmp = std::min(Tmp, Tmp2);
5430 }
5431 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
5432 return Tmp;
5433 }
5434 case ISD::INSERT_SUBVECTOR: {
5435 if (VT.isScalableVector())
5436 break;
5437 // Demand any elements from the subvector and the remainder from the src its
5438 // inserted into.
5439 SDValue Src = Op.getOperand(0);
5440 SDValue Sub = Op.getOperand(1);
5441 uint64_t Idx = Op.getConstantOperandVal(2);
5442 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
5443 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
5444 APInt DemandedSrcElts = DemandedElts;
5445 DemandedSrcElts.clearBits(Idx, Idx + NumSubElts);
5446
5447 Tmp = std::numeric_limits<unsigned>::max();
5448 if (!!DemandedSubElts) {
5449 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
5450 if (Tmp == 1)
5451 return 1; // early-out
5452 }
5453 if (!!DemandedSrcElts) {
5454 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
5455 Tmp = std::min(Tmp, Tmp2);
5456 }
5457 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
5458 return Tmp;
5459 }
5460 case ISD::LOAD: {
5461 // If we are looking at the loaded value of the SDNode.
5462 if (Op.getResNo() != 0)
5463 break;
5464
5466 if (const MDNode *Ranges = LD->getRanges()) {
5467 if (DemandedElts != 1)
5468 break;
5469
5471 if (VTBits > CR.getBitWidth()) {
5472 switch (LD->getExtensionType()) {
5473 case ISD::SEXTLOAD:
5474 CR = CR.signExtend(VTBits);
5475 break;
5476 case ISD::ZEXTLOAD:
5477 CR = CR.zeroExtend(VTBits);
5478 break;
5479 default:
5480 break;
5481 }
5482 }
5483
5484 if (VTBits != CR.getBitWidth())
5485 break;
5486 return std::min(CR.getSignedMin().getNumSignBits(),
5488 }
5489
5490 unsigned ExtType = LD->getExtensionType();
5491 switch (ExtType) {
5492 default:
5493 break;
5494 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known.
5495 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5496 return VTBits - Tmp + 1;
5497 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known.
5498 Tmp = LD->getMemoryVT().getScalarSizeInBits();
5499 return VTBits - Tmp;
5500 case ISD::NON_EXTLOAD:
5501 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
5502 // We only need to handle vectors - computeKnownBits should handle
5503 // scalar cases.
5504 Type *CstTy = Cst->getType();
5505 if (CstTy->isVectorTy() && !VT.isScalableVector() &&
5506 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits() &&
5507 VTBits == CstTy->getScalarSizeInBits()) {
5508 Tmp = VTBits;
5509 for (unsigned i = 0; i != NumElts; ++i) {
5510 if (!DemandedElts[i])
5511 continue;
5512 if (Constant *Elt = Cst->getAggregateElement(i)) {
5513 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
5514 const APInt &Value = CInt->getValue();
5515 Tmp = std::min(Tmp, Value.getNumSignBits());
5516 continue;
5517 }
5518 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
5519 APInt Value = CFP->getValueAPF().bitcastToAPInt();
5520 Tmp = std::min(Tmp, Value.getNumSignBits());
5521 continue;
5522 }
5523 }
5524 // Unknown type. Conservatively assume no bits match sign bit.
5525 return 1;
5526 }
5527 return Tmp;
5528 }
5529 }
5530 break;
5531 }
5532
5533 break;
5534 }
5537 case ISD::ATOMIC_SWAP:
5549 case ISD::ATOMIC_LOAD: {
5550 auto *AT = cast<AtomicSDNode>(Op);
5551 // If we are looking at the loaded value.
5552 if (Op.getResNo() == 0) {
5553 Tmp = AT->getMemoryVT().getScalarSizeInBits();
5554 if (Tmp == VTBits)
5555 return 1; // early-out
5556
5557 // For atomic_load, prefer to use the extension type.
5558 if (Op->getOpcode() == ISD::ATOMIC_LOAD) {
5559 switch (AT->getExtensionType()) {
5560 default:
5561 break;
5562 case ISD::SEXTLOAD:
5563 return VTBits - Tmp + 1;
5564 case ISD::ZEXTLOAD:
5565 return VTBits - Tmp;
5566 }
5567 }
5568
5569 if (TLI->getExtendForAtomicOps() == ISD::SIGN_EXTEND)
5570 return VTBits - Tmp + 1;
5571 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
5572 return VTBits - Tmp;
5573 }
5574 break;
5575 }
5576 }
5577
5578 // Allow the target to implement this method for its nodes.
5579 if (Opcode >= ISD::BUILTIN_OP_END ||
5580 Opcode == ISD::INTRINSIC_WO_CHAIN ||
5581 Opcode == ISD::INTRINSIC_W_CHAIN ||
5582 Opcode == ISD::INTRINSIC_VOID) {
5583 // TODO: This can probably be removed once target code is audited. This
5584 // is here purely to reduce patch size and review complexity.
5585 if (!VT.isScalableVector()) {
5586 unsigned NumBits =
5587 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
5588 if (NumBits > 1)
5589 FirstAnswer = std::max(FirstAnswer, NumBits);
5590 }
5591 }
5592
5593 // Finally, if we can prove that the top bits of the result are 0's or 1's,
5594 // use this information.
5595 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
5596 return std::max(FirstAnswer, Known.countMinSignBits());
5597}
5598
5600 unsigned Depth) const {
5601 unsigned SignBits = ComputeNumSignBits(Op, Depth);
5602 return Op.getScalarValueSizeInBits() - SignBits + 1;
5603}
5604
5606 const APInt &DemandedElts,
5607 unsigned Depth) const {
5608 unsigned SignBits = ComputeNumSignBits(Op, DemandedElts, Depth);
5609 return Op.getScalarValueSizeInBits() - SignBits + 1;
5610}
5611
5613 unsigned Depth) const {
5614 // Early out for FREEZE.
5615 if (Op.getOpcode() == ISD::FREEZE)
5616 return true;
5617
5618 APInt DemandedElts = getDemandAllEltsMask(Op);
5619 return isGuaranteedNotToBeUndefOrPoison(Op, DemandedElts, PoisonOnly, Depth);
5620}
5621
5623 const APInt &DemandedElts,
5624 bool PoisonOnly,
5625 unsigned Depth) const {
5626 unsigned Opcode = Op.getOpcode();
5627
5628 // Early out for FREEZE.
5629 if (Opcode == ISD::FREEZE)
5630 return true;
5631
5632 if (Depth >= MaxRecursionDepth)
5633 return false; // Limit search depth.
5634
5635 if (isIntOrFPConstant(Op))
5636 return true;
5637
5638 switch (Opcode) {
5639 case ISD::CONDCODE:
5640 case ISD::VALUETYPE:
5641 case ISD::FrameIndex:
5643 case ISD::CopyFromReg:
5644 return true;
5645
5646 case ISD::POISON:
5647 return false;
5648
5649 case ISD::UNDEF:
5650 return PoisonOnly;
5651
5652 case ISD::BUILD_VECTOR:
5653 // NOTE: BUILD_VECTOR has implicit truncation of wider scalar elements -
5654 // this shouldn't affect the result.
5655 for (unsigned i = 0, e = Op.getNumOperands(); i < e; ++i) {
5656 if (!DemandedElts[i])
5657 continue;
5659 Depth + 1))
5660 return false;
5661 }
5662 return true;
5663
5665 SDValue Src = Op.getOperand(0);
5666 if (Src.getValueType().isScalableVector())
5667 break;
5668 uint64_t Idx = Op.getConstantOperandVal(1);
5669 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
5670 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
5671 return isGuaranteedNotToBeUndefOrPoison(Src, DemandedSrcElts, PoisonOnly,
5672 Depth + 1);
5673 }
5674
5675 case ISD::INSERT_SUBVECTOR: {
5676 if (Op.getValueType().isScalableVector())
5677 break;
5678 SDValue Src = Op.getOperand(0);
5679 SDValue Sub = Op.getOperand(1);
5680 uint64_t Idx = Op.getConstantOperandVal(2);
5681 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
5682 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
5683 APInt DemandedSrcElts = DemandedElts;
5684 DemandedSrcElts.clearBits(Idx, Idx + NumSubElts);
5685
5686 if (!!DemandedSubElts && !isGuaranteedNotToBeUndefOrPoison(
5687 Sub, DemandedSubElts, PoisonOnly, Depth + 1))
5688 return false;
5689 if (!!DemandedSrcElts && !isGuaranteedNotToBeUndefOrPoison(
5690 Src, DemandedSrcElts, PoisonOnly, Depth + 1))
5691 return false;
5692 return true;
5693 }
5694
5696 SDValue Src = Op.getOperand(0);
5697 auto *IndexC = dyn_cast<ConstantSDNode>(Op.getOperand(1));
5698 EVT SrcVT = Src.getValueType();
5699 if (SrcVT.isFixedLengthVector() && IndexC &&
5700 IndexC->getAPIntValue().ult(SrcVT.getVectorNumElements())) {
5701 APInt DemandedSrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
5702 IndexC->getZExtValue());
5703 return isGuaranteedNotToBeUndefOrPoison(Src, DemandedSrcElts, PoisonOnly,
5704 Depth + 1);
5705 }
5706 break;
5707 }
5708
5710 SDValue InVec = Op.getOperand(0);
5711 SDValue InVal = Op.getOperand(1);
5712 SDValue EltNo = Op.getOperand(2);
5713 EVT VT = InVec.getValueType();
5714 auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
5715 if (IndexC && VT.isFixedLengthVector() &&
5716 IndexC->getAPIntValue().ult(VT.getVectorNumElements())) {
5717 if (DemandedElts[IndexC->getZExtValue()] &&
5719 return false;
5720 APInt InVecDemandedElts = DemandedElts;
5721 InVecDemandedElts.clearBit(IndexC->getZExtValue());
5722 if (!!InVecDemandedElts &&
5724 peekThroughInsertVectorElt(InVec, InVecDemandedElts),
5725 InVecDemandedElts, PoisonOnly, Depth + 1))
5726 return false;
5727 return true;
5728 }
5729 break;
5730 }
5731
5733 // Check upper (known undef) elements.
5734 if (DemandedElts.ugt(1) && !PoisonOnly)
5735 return false;
5736 // Check element zero.
5737 if (DemandedElts[0] && !isGuaranteedNotToBeUndefOrPoison(
5738 Op.getOperand(0), PoisonOnly, Depth + 1))
5739 return false;
5740 return true;
5741
5742 case ISD::SPLAT_VECTOR:
5743 return isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), PoisonOnly,
5744 Depth + 1);
5745
5746 case ISD::VECTOR_SHUFFLE: {
5747 APInt DemandedLHS, DemandedRHS;
5748 auto *SVN = cast<ShuffleVectorSDNode>(Op);
5749 if (!getShuffleDemandedElts(DemandedElts.getBitWidth(), SVN->getMask(),
5750 DemandedElts, DemandedLHS, DemandedRHS,
5751 /*AllowUndefElts=*/false))
5752 return false;
5753 if (!DemandedLHS.isZero() &&
5754 !isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), DemandedLHS,
5755 PoisonOnly, Depth + 1))
5756 return false;
5757 if (!DemandedRHS.isZero() &&
5758 !isGuaranteedNotToBeUndefOrPoison(Op.getOperand(1), DemandedRHS,
5759 PoisonOnly, Depth + 1))
5760 return false;
5761 return true;
5762 }
5763
5764 case ISD::SHL:
5765 case ISD::SRL:
5766 case ISD::SRA:
5767 // Shift amount operand is checked by canCreateUndefOrPoison. So it is
5768 // enough to check operand 0 if Op can't create undef/poison.
5769 return !canCreateUndefOrPoison(Op, DemandedElts, PoisonOnly,
5770 /*ConsiderFlags*/ true, Depth) &&
5771 isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), DemandedElts,
5772 PoisonOnly, Depth + 1);
5773
5774 case ISD::BSWAP:
5775 case ISD::CTPOP:
5776 case ISD::BITREVERSE:
5777 case ISD::AND:
5778 case ISD::OR:
5779 case ISD::XOR:
5780 case ISD::ADD:
5781 case ISD::SUB:
5782 case ISD::MUL:
5783 case ISD::SADDSAT:
5784 case ISD::UADDSAT:
5785 case ISD::SSUBSAT:
5786 case ISD::USUBSAT:
5787 case ISD::SSHLSAT:
5788 case ISD::USHLSAT:
5789 case ISD::SMIN:
5790 case ISD::SMAX:
5791 case ISD::UMIN:
5792 case ISD::UMAX:
5793 case ISD::ZERO_EXTEND:
5794 case ISD::SIGN_EXTEND:
5795 case ISD::ANY_EXTEND:
5796 case ISD::TRUNCATE:
5797 case ISD::VSELECT: {
5798 // If Op can't create undef/poison and none of its operands are undef/poison
5799 // then Op is never undef/poison. A difference from the more common check
5800 // below, outside the switch, is that we handle elementwise operations for
5801 // which the DemandedElts mask is valid for all operands here.
5802 return !canCreateUndefOrPoison(Op, DemandedElts, PoisonOnly,
5803 /*ConsiderFlags*/ true, Depth) &&
5804 all_of(Op->ops(), [&](SDValue V) {
5805 return isGuaranteedNotToBeUndefOrPoison(V, DemandedElts,
5806 PoisonOnly, Depth + 1);
5807 });
5808 }
5809
5810 // TODO: Search for noundef attributes from library functions.
5811
5812 // TODO: Pointers dereferenced by ISD::LOAD/STORE ops are noundef.
5813
5814 default:
5815 // Allow the target to implement this method for its nodes.
5816 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
5817 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
5818 return TLI->isGuaranteedNotToBeUndefOrPoisonForTargetNode(
5819 Op, DemandedElts, *this, PoisonOnly, Depth);
5820 break;
5821 }
5822
5823 // If Op can't create undef/poison and none of its operands are undef/poison
5824 // then Op is never undef/poison.
5825 // NOTE: TargetNodes can handle this in themselves in
5826 // isGuaranteedNotToBeUndefOrPoisonForTargetNode or let
5827 // TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode handle it.
5828 return !canCreateUndefOrPoison(Op, PoisonOnly, /*ConsiderFlags*/ true,
5829 Depth) &&
5830 all_of(Op->ops(), [&](SDValue V) {
5831 return isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly, Depth + 1);
5832 });
5833}
5834
5836 bool ConsiderFlags,
5837 unsigned Depth) const {
5838 APInt DemandedElts = getDemandAllEltsMask(Op);
5839 return canCreateUndefOrPoison(Op, DemandedElts, PoisonOnly, ConsiderFlags,
5840 Depth);
5841}
5842
5844 bool PoisonOnly, bool ConsiderFlags,
5845 unsigned Depth) const {
5846 if (ConsiderFlags && Op->hasPoisonGeneratingFlags())
5847 return true;
5848
5849 unsigned Opcode = Op.getOpcode();
5850 switch (Opcode) {
5851 case ISD::AssertSext:
5852 case ISD::AssertZext:
5853 case ISD::AssertAlign:
5855 // Assertion nodes can create poison if the assertion fails.
5856 return true;
5857
5858 case ISD::FREEZE:
5862 case ISD::SADDSAT:
5863 case ISD::UADDSAT:
5864 case ISD::SSUBSAT:
5865 case ISD::USUBSAT:
5866 case ISD::MULHU:
5867 case ISD::MULHS:
5868 case ISD::AVGFLOORS:
5869 case ISD::AVGFLOORU:
5870 case ISD::AVGCEILS:
5871 case ISD::AVGCEILU:
5872 case ISD::ABDU:
5873 case ISD::ABDS:
5874 case ISD::SMIN:
5875 case ISD::SMAX:
5876 case ISD::SCMP:
5877 case ISD::UMIN:
5878 case ISD::UMAX:
5879 case ISD::UCMP:
5880 case ISD::AND:
5881 case ISD::XOR:
5882 case ISD::ROTL:
5883 case ISD::ROTR:
5884 case ISD::FSHL:
5885 case ISD::FSHR:
5886 case ISD::BSWAP:
5887 case ISD::CTTZ:
5888 case ISD::CTLZ:
5889 case ISD::CTLS:
5890 case ISD::CTPOP:
5891 case ISD::BITREVERSE:
5892 case ISD::PARITY:
5893 case ISD::SIGN_EXTEND:
5894 case ISD::TRUNCATE:
5898 case ISD::BITCAST:
5899 case ISD::BUILD_VECTOR:
5900 case ISD::BUILD_PAIR:
5901 case ISD::SPLAT_VECTOR:
5902 case ISD::FABS:
5903 return false;
5904
5905 case ISD::ABS:
5906 // ISD::ABS defines abs(INT_MIN) -> INT_MIN and never generates poison.
5907 // Different to Intrinsic::abs.
5908 return false;
5909
5910 case ISD::ADDC:
5911 case ISD::SUBC:
5912 case ISD::ADDE:
5913 case ISD::SUBE:
5914 case ISD::SADDO:
5915 case ISD::SSUBO:
5916 case ISD::SMULO:
5917 case ISD::SADDO_CARRY:
5918 case ISD::SSUBO_CARRY:
5919 case ISD::UADDO:
5920 case ISD::USUBO:
5921 case ISD::UMULO:
5922 case ISD::UADDO_CARRY:
5923 case ISD::USUBO_CARRY:
5924 // No poison on result or overflow flags.
5925 return false;
5926
5927 case ISD::SELECT_CC:
5928 case ISD::SETCC: {
5929 // Integer setcc cannot create undef or poison.
5930 if (Op.getOperand(0).getValueType().isInteger())
5931 return false;
5932
5933 // FP compares are more complicated. They can create poison for nan/infinity
5934 // based on options and flags. The options and flags also cause special
5935 // nonan condition codes to be used. Those condition codes may be preserved
5936 // even if the nonan flag is dropped somewhere.
5937 unsigned CCOp = Opcode == ISD::SETCC ? 2 : 4;
5938 ISD::CondCode CCCode = cast<CondCodeSDNode>(Op.getOperand(CCOp))->get();
5939 return (unsigned)CCCode & 0x10U;
5940 }
5941
5942 case ISD::OR:
5943 case ISD::ZERO_EXTEND:
5944 case ISD::SELECT:
5945 case ISD::VSELECT:
5946 case ISD::ADD:
5947 case ISD::SUB:
5948 case ISD::MUL:
5949 case ISD::FNEG:
5950 case ISD::FADD:
5951 case ISD::FSUB:
5952 case ISD::FMUL:
5953 case ISD::FDIV:
5954 case ISD::FREM:
5955 case ISD::FCOPYSIGN:
5956 case ISD::FMA:
5957 case ISD::FMAD:
5958 case ISD::FMULADD:
5959 case ISD::FP_EXTEND:
5965 // No poison except from flags (which is handled above)
5966 return false;
5967
5968 case ISD::SHL:
5969 case ISD::SRL:
5970 case ISD::SRA:
5971 // If the max shift amount isn't in range, then the shift can
5972 // create poison.
5973 return !getValidMaximumShiftAmount(Op, DemandedElts, Depth + 1);
5974
5977 // If the amount is zero then the result will be poison.
5978 // TODO: Add isKnownNeverZero DemandedElts handling.
5979 return !isKnownNeverZero(Op.getOperand(0), Depth + 1);
5980
5982 // Check if we demand any upper (undef) elements.
5983 return !PoisonOnly && DemandedElts.ugt(1);
5984
5987 // Ensure that the element index is in bounds.
5988 EVT VecVT = Op.getOperand(0).getValueType();
5989 SDValue Idx = Op.getOperand(Opcode == ISD::INSERT_VECTOR_ELT ? 2 : 1);
5990 KnownBits KnownIdx = computeKnownBits(Idx, Depth + 1);
5991 return KnownIdx.getMaxValue().uge(VecVT.getVectorMinNumElements());
5992 }
5993
5994 case ISD::VECTOR_SHUFFLE: {
5995 // Check for any demanded shuffle element that is undef.
5996 auto *SVN = cast<ShuffleVectorSDNode>(Op);
5997 for (auto [Idx, Elt] : enumerate(SVN->getMask()))
5998 if (Elt < 0 && DemandedElts[Idx])
5999 return true;
6000 return false;
6001 }
6002
6004 return false;
6005
6006 default:
6007 // Allow the target to implement this method for its nodes.
6008 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
6009 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
6010 return TLI->canCreateUndefOrPoisonForTargetNode(
6011 Op, DemandedElts, *this, PoisonOnly, ConsiderFlags, Depth);
6012 break;
6013 }
6014
6015 // Be conservative and return true.
6016 return true;
6017}
6018
6019bool SelectionDAG::isADDLike(SDValue Op, bool NoWrap) const {
6020 unsigned Opcode = Op.getOpcode();
6021 if (Opcode == ISD::OR)
6022 return Op->getFlags().hasDisjoint() ||
6023 haveNoCommonBitsSet(Op.getOperand(0), Op.getOperand(1));
6024 if (Opcode == ISD::XOR)
6025 return !NoWrap && isMinSignedConstant(Op.getOperand(1));
6026 return false;
6027}
6028
6030 return Op.getNumOperands() == 2 && isa<ConstantSDNode>(Op.getOperand(1)) &&
6031 (Op.isAnyAdd() || isADDLike(Op));
6032}
6033
6035 FPClassTest InterestedClasses,
6036 unsigned Depth) const {
6037 APInt DemandedElts = getDemandAllEltsMask(Op);
6038 return computeKnownFPClass(Op, DemandedElts, InterestedClasses, Depth);
6039}
6040
6042 const APInt &DemandedElts,
6043 FPClassTest InterestedClasses,
6044 unsigned Depth) const {
6045 KnownFPClass Known;
6046
6047 if (const auto *CFP = dyn_cast<ConstantFPSDNode>(Op))
6048 return KnownFPClass(CFP->getValueAPF());
6049
6050 if (Depth >= MaxRecursionDepth)
6051 return Known;
6052
6053 if (Op.getOpcode() == ISD::UNDEF)
6054 return Known;
6055
6056 EVT VT = Op.getValueType();
6057 assert(VT.isFloatingPoint() && "Computing KnownFPClass on non-FP op!");
6058 assert((!VT.isFixedLengthVector() ||
6059 DemandedElts.getBitWidth() == VT.getVectorNumElements()) &&
6060 "Unexpected vector size");
6061
6062 if (!DemandedElts)
6063 return Known;
6064
6065 unsigned Opcode = Op.getOpcode();
6066 switch (Opcode) {
6067 case ISD::POISON: {
6068 Known.KnownFPClasses = fcNone;
6069 Known.SignBit = false;
6070 break;
6071 }
6072 case ISD::FNEG: {
6073 Known = computeKnownFPClass(Op.getOperand(0), DemandedElts,
6074 InterestedClasses, Depth + 1);
6075 Known.fneg();
6076 break;
6077 }
6078 case ISD::BUILD_VECTOR: {
6079 assert(!VT.isScalableVector());
6080 bool First = true;
6081 for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) {
6082 if (!DemandedElts[I])
6083 continue;
6084
6085 if (First) {
6086 Known =
6087 computeKnownFPClass(Op.getOperand(I), InterestedClasses, Depth + 1);
6088 First = false;
6089 } else {
6090 Known |=
6091 computeKnownFPClass(Op.getOperand(I), InterestedClasses, Depth + 1);
6092 }
6093
6094 if (Known.isUnknown())
6095 break;
6096 }
6097 break;
6098 }
6100 SDValue Src = Op.getOperand(0);
6101 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6102 EVT SrcVT = Src.getValueType();
6103 if (SrcVT.isFixedLengthVector() && CIdx) {
6104 if (CIdx->getAPIntValue().ult(SrcVT.getVectorNumElements())) {
6105 APInt DemandedSrcElts = APInt::getOneBitSet(
6106 SrcVT.getVectorNumElements(), CIdx->getZExtValue());
6107 Known = computeKnownFPClass(Src, DemandedSrcElts, InterestedClasses,
6108 Depth + 1);
6109 } else {
6110 // Out of bounds index is poison.
6111 Known.KnownFPClasses = fcNone;
6112 }
6113 } else {
6114 Known = computeKnownFPClass(Src, InterestedClasses, Depth + 1);
6115 }
6116 break;
6117 }
6118 case ISD::SPLAT_VECTOR: {
6119 Known = computeKnownFPClass(Op.getOperand(0), InterestedClasses, Depth + 1);
6120 break;
6121 }
6122 case ISD::BITCAST: {
6123 // FIXME: It should not be necessary to check for an elementwise bitcast.
6124 // If a bitcast is not elementwise between vector / scalar types,
6125 // computeKnownBits already splices the known bits of the source elements
6126 // appropriately so as to line up with the bits of the result's demanded
6127 // elements.
6128 EVT SrcVT = Op.getOperand(0).getValueType();
6129 if (VT.isScalableVector() || SrcVT.isScalableVector())
6130 break;
6131 unsigned VTNumElts = VT.isVector() ? VT.getVectorNumElements() : 1;
6132 unsigned SrcVTNumElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1;
6133 if (VTNumElts != SrcVTNumElts)
6134 break;
6135
6136 KnownBits Bits = computeKnownBits(Op, DemandedElts, Depth + 1);
6137 Known = KnownFPClass::bitcast(VT.getFltSemantics(), Bits);
6138 break;
6139 }
6140 case ISD::FABS: {
6141 Known = computeKnownFPClass(Op.getOperand(0), DemandedElts,
6142 InterestedClasses, Depth + 1);
6143 Known.fabs();
6144 break;
6145 }
6146 case ISD::FCOPYSIGN: {
6147 Known = computeKnownFPClass(Op.getOperand(0), DemandedElts,
6148 InterestedClasses, Depth + 1);
6149 KnownFPClass KnownSign = computeKnownFPClass(Op.getOperand(1), DemandedElts,
6150 InterestedClasses, Depth + 1);
6151 Known.copysign(KnownSign);
6152 break;
6153 }
6154 case ISD::AssertNoFPClass: {
6155 Known = computeKnownFPClass(Op.getOperand(0), DemandedElts,
6156 InterestedClasses, Depth + 1);
6157 FPClassTest AssertedClasses =
6158 static_cast<FPClassTest>(Op->getConstantOperandVal(1));
6159 Known.KnownFPClasses &= ~AssertedClasses;
6160 break;
6161 }
6162 case ISD::SELECT:
6163 case ISD::VSELECT: {
6164 // TODO: Add adjustKnownFPClassForSelectArm clamp recognition as in
6165 // IR-level ValueTracking.
6166 KnownFPClass KnownFalseClass = computeKnownFPClass(
6167 Op.getOperand(2), DemandedElts, InterestedClasses, Depth + 1);
6168 if (KnownFalseClass.isUnknown())
6169 break;
6170 KnownFPClass KnownTrueClass = computeKnownFPClass(
6171 Op.getOperand(1), DemandedElts, InterestedClasses, Depth + 1);
6172 Known = KnownTrueClass.intersectWith(KnownFalseClass);
6173 break;
6174 }
6175 default:
6176 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
6177 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID) {
6178 TLI->computeKnownFPClassForTargetNode(Op, Known, DemandedElts, *this,
6179 Depth);
6180 }
6181 break;
6182 }
6183
6184 return Known;
6185}
6186
6188 unsigned Depth) const {
6189 APInt DemandedElts = getDemandAllEltsMask(Op);
6190 return isKnownNeverNaN(Op, DemandedElts, SNaN, Depth);
6191}
6192
6194 bool SNaN, unsigned Depth) const {
6195 assert(!DemandedElts.isZero() && "No demanded elements");
6196
6197 // If we're told that NaNs won't happen, assume they won't.
6198 if (Op->getFlags().hasNoNaNs())
6199 return true;
6200
6201 if (Depth >= MaxRecursionDepth)
6202 return false; // Limit search depth.
6203
6204 unsigned Opcode = Op.getOpcode();
6205 switch (Opcode) {
6206 case ISD::FADD:
6207 case ISD::FSUB:
6208 case ISD::FMUL:
6209 case ISD::FDIV:
6210 case ISD::FREM:
6211 case ISD::FSIN:
6212 case ISD::FCOS:
6213 case ISD::FTAN:
6214 case ISD::FASIN:
6215 case ISD::FACOS:
6216 case ISD::FATAN:
6217 case ISD::FATAN2:
6218 case ISD::FSINH:
6219 case ISD::FCOSH:
6220 case ISD::FTANH:
6221 case ISD::FMA:
6222 case ISD::FMULADD:
6223 case ISD::FMAD: {
6224 if (SNaN)
6225 return true;
6226 // TODO: Need isKnownNeverInfinity
6227 return false;
6228 }
6229 case ISD::FCANONICALIZE:
6230 case ISD::FEXP:
6231 case ISD::FEXP2:
6232 case ISD::FEXP10:
6233 case ISD::FTRUNC:
6234 case ISD::FFLOOR:
6235 case ISD::FCEIL:
6236 case ISD::FROUND:
6237 case ISD::FROUNDEVEN:
6238 case ISD::LROUND:
6239 case ISD::LLROUND:
6240 case ISD::FRINT:
6241 case ISD::LRINT:
6242 case ISD::LLRINT:
6243 case ISD::FNEARBYINT:
6244 case ISD::FLDEXP: {
6245 if (SNaN)
6246 return true;
6247 return isKnownNeverNaN(Op.getOperand(0), DemandedElts, SNaN, Depth + 1);
6248 }
6249 case ISD::FABS:
6250 case ISD::FNEG:
6251 case ISD::FCOPYSIGN: {
6252 return isKnownNeverNaN(Op.getOperand(0), DemandedElts, SNaN, Depth + 1);
6253 }
6254 case ISD::SELECT:
6255 return isKnownNeverNaN(Op.getOperand(1), DemandedElts, SNaN, Depth + 1) &&
6256 isKnownNeverNaN(Op.getOperand(2), DemandedElts, SNaN, Depth + 1);
6257 case ISD::FP_EXTEND:
6258 case ISD::FP_ROUND: {
6259 if (SNaN)
6260 return true;
6261 return isKnownNeverNaN(Op.getOperand(0), DemandedElts, SNaN, Depth + 1);
6262 }
6263 case ISD::SINT_TO_FP:
6264 case ISD::UINT_TO_FP:
6265 return true;
6266 case ISD::FSQRT: // Need is known positive
6267 case ISD::FLOG:
6268 case ISD::FLOG2:
6269 case ISD::FLOG10:
6270 case ISD::FPOWI:
6271 case ISD::FPOW: {
6272 if (SNaN)
6273 return true;
6274 // TODO: Refine on operand
6275 return false;
6276 }
6277 case ISD::FMINNUM:
6278 case ISD::FMAXNUM:
6279 case ISD::FMINIMUMNUM:
6280 case ISD::FMAXIMUMNUM: {
6281 // Only one needs to be known not-nan, since it will be returned if the
6282 // other ends up being one.
6283 return isKnownNeverNaN(Op.getOperand(0), DemandedElts, SNaN, Depth + 1) ||
6284 isKnownNeverNaN(Op.getOperand(1), DemandedElts, SNaN, Depth + 1);
6285 }
6286 case ISD::FMINNUM_IEEE:
6287 case ISD::FMAXNUM_IEEE: {
6288 if (SNaN)
6289 return true;
6290 // This can return a NaN if either operand is an sNaN, or if both operands
6291 // are NaN.
6292 return (isKnownNeverNaN(Op.getOperand(0), DemandedElts, false, Depth + 1) &&
6293 isKnownNeverSNaN(Op.getOperand(1), DemandedElts, Depth + 1)) ||
6294 (isKnownNeverNaN(Op.getOperand(1), DemandedElts, false, Depth + 1) &&
6295 isKnownNeverSNaN(Op.getOperand(0), DemandedElts, Depth + 1));
6296 }
6297 case ISD::FMINIMUM:
6298 case ISD::FMAXIMUM: {
6299 // TODO: Does this quiet or return the origina NaN as-is?
6300 return isKnownNeverNaN(Op.getOperand(0), DemandedElts, SNaN, Depth + 1) &&
6301 isKnownNeverNaN(Op.getOperand(1), DemandedElts, SNaN, Depth + 1);
6302 }
6304 SDValue Src = Op.getOperand(0);
6305 auto *Idx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6306 EVT SrcVT = Src.getValueType();
6307 if (SrcVT.isFixedLengthVector() && Idx &&
6308 Idx->getAPIntValue().ult(SrcVT.getVectorNumElements())) {
6309 APInt DemandedSrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
6310 Idx->getZExtValue());
6311 return isKnownNeverNaN(Src, DemandedSrcElts, SNaN, Depth + 1);
6312 }
6313 return isKnownNeverNaN(Src, SNaN, Depth + 1);
6314 }
6316 SDValue Src = Op.getOperand(0);
6317 if (Src.getValueType().isFixedLengthVector()) {
6318 unsigned Idx = Op.getConstantOperandVal(1);
6319 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
6320 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx);
6321 return isKnownNeverNaN(Src, DemandedSrcElts, SNaN, Depth + 1);
6322 }
6323 return isKnownNeverNaN(Src, SNaN, Depth + 1);
6324 }
6325 case ISD::INSERT_SUBVECTOR: {
6326 SDValue BaseVector = Op.getOperand(0);
6327 SDValue SubVector = Op.getOperand(1);
6328 EVT BaseVectorVT = BaseVector.getValueType();
6329 if (BaseVectorVT.isFixedLengthVector()) {
6330 unsigned Idx = Op.getConstantOperandVal(2);
6331 unsigned NumBaseElts = BaseVectorVT.getVectorNumElements();
6332 unsigned NumSubElts = SubVector.getValueType().getVectorNumElements();
6333
6334 // Clear/Extract the bits at the position where the subvector will be
6335 // inserted.
6336 APInt DemandedMask =
6337 APInt::getBitsSet(NumBaseElts, Idx, Idx + NumSubElts);
6338 APInt DemandedSrcElts = DemandedElts & ~DemandedMask;
6339 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
6340
6341 bool NeverNaN = true;
6342 if (!DemandedSrcElts.isZero())
6343 NeverNaN &=
6344 isKnownNeverNaN(BaseVector, DemandedSrcElts, SNaN, Depth + 1);
6345 if (NeverNaN && !DemandedSubElts.isZero())
6346 NeverNaN &=
6347 isKnownNeverNaN(SubVector, DemandedSubElts, SNaN, Depth + 1);
6348 return NeverNaN;
6349 }
6350 return isKnownNeverNaN(BaseVector, SNaN, Depth + 1) &&
6351 isKnownNeverNaN(SubVector, SNaN, Depth + 1);
6352 }
6353 case ISD::BUILD_VECTOR: {
6354 unsigned NumElts = Op.getNumOperands();
6355 for (unsigned I = 0; I != NumElts; ++I)
6356 if (DemandedElts[I] &&
6357 !isKnownNeverNaN(Op.getOperand(I), SNaN, Depth + 1))
6358 return false;
6359 return true;
6360 }
6361 case ISD::SPLAT_VECTOR:
6362 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
6363 case ISD::AssertNoFPClass: {
6364 FPClassTest NoFPClass =
6365 static_cast<FPClassTest>(Op.getConstantOperandVal(1));
6366 if ((NoFPClass & fcNan) == fcNan)
6367 return true;
6368 if (SNaN && (NoFPClass & fcSNan) == fcSNan)
6369 return true;
6370 return isKnownNeverNaN(Op.getOperand(0), DemandedElts, SNaN, Depth + 1);
6371 }
6372 default:
6373 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
6374 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID) {
6375 return TLI->isKnownNeverNaNForTargetNode(Op, DemandedElts, *this, SNaN,
6376 Depth);
6377 }
6378 break;
6379 }
6380
6381 FPClassTest NanMask = SNaN ? fcSNan : fcNan;
6382 KnownFPClass Known = computeKnownFPClass(Op, DemandedElts, NanMask, Depth);
6383 return Known.isKnownNever(NanMask);
6384}
6385
6387 APInt DemandedElts = getDemandAllEltsMask(Op);
6388 return isKnownNeverLogicalZero(Op, DemandedElts, Depth);
6389}
6390
6392 const APInt &DemandedElts,
6393 unsigned Depth) const {
6394 assert(!DemandedElts.isZero() && "No demanded elements");
6395 EVT VT = Op.getValueType();
6396 KnownFPClass Known =
6397 computeKnownFPClass(Op, DemandedElts, fcZero | fcSubnormal, Depth);
6398 return Known.isKnownNeverLogicalZero(getDenormalMode(VT));
6399}
6400
6402 APInt DemandedElts = getDemandAllEltsMask(Op);
6403 return isKnownNeverZero(Op, DemandedElts, Depth);
6404}
6405
6407 unsigned Depth) const {
6408 if (Depth >= MaxRecursionDepth)
6409 return false; // Limit search depth.
6410
6411 EVT OpVT = Op.getValueType();
6412 unsigned BitWidth = OpVT.getScalarSizeInBits();
6413
6414 assert(!Op.getValueType().isFloatingPoint() &&
6415 "Floating point types unsupported - use isKnownNeverLogicalZero");
6416
6417 // If the value is a constant, we can obviously see if it is a zero or not.
6418 auto IsNeverZero = [BitWidth](const ConstantSDNode *C) {
6419 APInt V = C->getAPIntValue().zextOrTrunc(BitWidth);
6420 return !V.isZero();
6421 };
6422
6423 if (ISD::matchUnaryPredicate(Op, IsNeverZero))
6424 return true;
6425
6426 // TODO: Recognize more cases here. Most of the cases are also incomplete to
6427 // some degree.
6428 switch (Op.getOpcode()) {
6429 default:
6430 break;
6431
6432 case ISD::BUILD_VECTOR:
6433 // Are all operands of a build vector constant non-zero?
6434 if (all_of(enumerate(Op->ops()), [&](auto P) {
6435 auto *C = dyn_cast<ConstantSDNode>(P.value());
6436 return !DemandedElts[P.index()] || (C && IsNeverZero(C));
6437 }))
6438 return true;
6439 break;
6440
6441 case ISD::SPLAT_VECTOR:
6442 // Is the operand of a splat vector a constant non-zero?
6443 if (auto *C = dyn_cast<ConstantSDNode>(Op->getOperand(0)))
6444 if (IsNeverZero(C))
6445 return true;
6446 break;
6447
6449 SDValue InVec = Op.getOperand(0);
6450 SDValue EltNo = Op.getOperand(1);
6451 EVT VecVT = InVec.getValueType();
6452
6453 // Skip scalable vectors or implicit extensions.
6454 if (VecVT.isScalableVector() ||
6455 OpVT.getScalarSizeInBits() != VecVT.getScalarSizeInBits())
6456 break;
6457
6458 // If we know the element index, just demand that vector element, else for
6459 // an unknown element index, ignore DemandedElts and demand them all.
6460 const unsigned NumSrcElts = VecVT.getVectorNumElements();
6461 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
6462 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
6463 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
6464 DemandedSrcElts =
6465 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
6466
6467 return isKnownNeverZero(InVec, DemandedSrcElts, Depth + 1);
6468 }
6469
6470 case ISD::OR:
6471 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) ||
6472 isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6473
6474 case ISD::VSELECT:
6475 case ISD::SELECT:
6476 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) &&
6477 isKnownNeverZero(Op.getOperand(2), DemandedElts, Depth + 1);
6478
6479 case ISD::SHL: {
6480 if (Op->getFlags().hasNoSignedWrap() || Op->getFlags().hasNoUnsignedWrap())
6481 return isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6482 KnownBits ValKnown =
6483 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6484 // 1 << X is never zero.
6485 if (ValKnown.One[0])
6486 return true;
6487 // If max shift cnt of known ones is non-zero, result is non-zero.
6488 APInt MaxCnt = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1)
6489 .getMaxValue();
6490 if (MaxCnt.ult(ValKnown.getBitWidth()) &&
6491 !ValKnown.One.shl(MaxCnt).isZero())
6492 return true;
6493 break;
6494 }
6495
6496 case ISD::VECTOR_SHUFFLE: {
6497 if (Op.getValueType().isScalableVector())
6498 return false;
6499
6500 unsigned NumElts = DemandedElts.getBitWidth();
6501
6502 // All demanded elements from LHS and RHS must be known non-zero.
6503 // Demanded elements with undef shuffle mask elements are unknown.
6504
6505 APInt DemandedLHS, DemandedRHS;
6506 auto *SVN = cast<ShuffleVectorSDNode>(Op);
6507 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
6508 if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts,
6509 DemandedLHS, DemandedRHS))
6510 return false;
6511
6512 return (!DemandedLHS ||
6513 isKnownNeverZero(Op.getOperand(0), DemandedLHS, Depth + 1)) &&
6514 (!DemandedRHS ||
6515 isKnownNeverZero(Op.getOperand(1), DemandedRHS, Depth + 1));
6516 }
6517
6518 case ISD::UADDSAT:
6519 case ISD::UMAX:
6520 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) ||
6521 isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6522
6523 case ISD::UMIN:
6524 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) &&
6525 isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6526
6527 // For smin/smax: If either operand is known negative/positive
6528 // respectively we don't need the other to be known at all.
6529 case ISD::SMAX: {
6530 KnownBits Op1 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6531 if (Op1.isStrictlyPositive())
6532 return true;
6533
6534 KnownBits Op0 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6535 if (Op0.isStrictlyPositive())
6536 return true;
6537
6538 if (Op1.isNonZero() && Op0.isNonZero())
6539 return true;
6540
6541 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) &&
6542 isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6543 }
6544 case ISD::SMIN: {
6545 KnownBits Op1 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6546 if (Op1.isNegative())
6547 return true;
6548
6549 KnownBits Op0 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6550 if (Op0.isNegative())
6551 return true;
6552
6553 if (Op1.isNonZero() && Op0.isNonZero())
6554 return true;
6555
6556 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) &&
6557 isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6558 }
6559
6560 case ISD::ROTL:
6561 case ISD::ROTR:
6562 case ISD::BITREVERSE:
6563 case ISD::BSWAP:
6564 case ISD::CTPOP:
6565 case ISD::ABS:
6566 return isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6567
6568 case ISD::SRA:
6569 case ISD::SRL: {
6570 if (Op->getFlags().hasExact())
6571 return isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6572 KnownBits ValKnown =
6573 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6574 if (ValKnown.isNegative())
6575 return true;
6576 // If max shift cnt of known ones is non-zero, result is non-zero.
6577 APInt MaxCnt = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1)
6578 .getMaxValue();
6579 if (MaxCnt.ult(ValKnown.getBitWidth()) &&
6580 !ValKnown.One.lshr(MaxCnt).isZero())
6581 return true;
6582 break;
6583 }
6584 case ISD::UDIV:
6585 case ISD::SDIV:
6586 // div exact can only produce a zero if the dividend is zero.
6587 // TODO: For udiv this is also true if Op1 u<= Op0
6588 if (Op->getFlags().hasExact())
6589 return isKnownNeverZero(Op.getOperand(0), Depth + 1);
6590 break;
6591
6592 case ISD::ADD:
6593 if (Op->getFlags().hasNoUnsignedWrap())
6594 if (isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1) ||
6595 isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1))
6596 return true;
6597 // TODO: There are a lot more cases we can prove for add.
6598 break;
6599
6600 case ISD::SUB: {
6601 if (isNullConstant(Op.getOperand(0)))
6602 return isKnownNeverZero(Op.getOperand(1), DemandedElts, Depth + 1);
6603
6604 std::optional<bool> ne = KnownBits::ne(
6605 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1),
6606 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1));
6607 return ne && *ne;
6608 }
6609
6610 case ISD::MUL:
6611 if (Op->getFlags().hasNoSignedWrap() || Op->getFlags().hasNoUnsignedWrap())
6612 if (isKnownNeverZero(Op.getOperand(1), Depth + 1) &&
6613 isKnownNeverZero(Op.getOperand(0), Depth + 1))
6614 return true;
6615 break;
6616
6617 case ISD::ZERO_EXTEND:
6618 case ISD::SIGN_EXTEND:
6619 return isKnownNeverZero(Op.getOperand(0), DemandedElts, Depth + 1);
6620 case ISD::VSCALE: {
6622 const APInt &Multiplier = Op.getConstantOperandAPInt(0);
6623 ConstantRange CR =
6624 getVScaleRange(&F, Op.getScalarValueSizeInBits()).multiply(Multiplier);
6625 if (!CR.contains(APInt(CR.getBitWidth(), 0)))
6626 return true;
6627 break;
6628 }
6629 }
6630
6632}
6633
6635 if (ConstantFPSDNode *C1 = isConstOrConstSplatFP(Op, true))
6636 return !C1->isNegative();
6637
6638 switch (Op.getOpcode()) {
6639 case ISD::FABS:
6640 case ISD::FEXP:
6641 case ISD::FEXP2:
6642 case ISD::FEXP10:
6643 return true;
6644 default:
6645 return false;
6646 }
6647
6648 llvm_unreachable("covered opcode switch");
6649}
6650
6652 assert(Use.getValueType().isFloatingPoint());
6653 const SDNode *User = Use.getUser();
6654 if (User->getFlags().hasNoSignedZeros())
6655 return true;
6656
6657 unsigned OperandNo = Use.getOperandNo();
6658 // Check if this use is insensitive to the sign of zero
6659 switch (User->getOpcode()) {
6660 case ISD::SETCC:
6661 // Comparisons: IEEE-754 specifies +0.0 == -0.0.
6662 case ISD::FABS:
6663 // fabs always produces +0.0.
6664 return true;
6665 case ISD::FCOPYSIGN:
6666 // copysign overwrites the sign bit of the first operand.
6667 return OperandNo == 0;
6668 case ISD::FADD:
6669 case ISD::FSUB: {
6670 // Arithmetic with non-zero constants fixes the uncertainty around the
6671 // sign bit.
6672 SDValue Other = User->getOperand(1 - OperandNo);
6674 }
6675 case ISD::FP_TO_SINT:
6676 case ISD::FP_TO_UINT:
6677 // fp-to-int conversions normalize signed zeros.
6678 return true;
6679 default:
6680 return false;
6681 }
6682}
6683
6685 if (Op->getFlags().hasNoSignedZeros())
6686 return true;
6687 // FIXME: Limit the amount of checked uses to not introduce a compile-time
6688 // regression. Ideally, this should be implemented as a demanded-bits
6689 // optimization that stems from the users.
6690 if (Op->use_size() > 2)
6691 return false;
6692 return all_of(Op->uses(),
6693 [&](const SDUse &Use) { return canIgnoreSignBitOfZero(Use); });
6694}
6695
6697 // Check the obvious case.
6698 if (A == B) return true;
6699
6700 // For negative and positive zero.
6703 if (CA->isZero() && CB->isZero()) return true;
6704
6705 // Otherwise they may not be equal.
6706 return false;
6707}
6708
6709// Only bits set in Mask must be negated, other bits may be arbitrary.
6711 if (isBitwiseNot(V, AllowUndefs))
6712 return V.getOperand(0);
6713
6714 // Handle any_extend (not (truncate X)) pattern, where Mask only sets
6715 // bits in the non-extended part.
6716 ConstantSDNode *MaskC = isConstOrConstSplat(Mask);
6717 if (!MaskC || V.getOpcode() != ISD::ANY_EXTEND)
6718 return SDValue();
6719 SDValue ExtArg = V.getOperand(0);
6720 if (ExtArg.getScalarValueSizeInBits() >=
6721 MaskC->getAPIntValue().getActiveBits() &&
6722 isBitwiseNot(ExtArg, AllowUndefs) &&
6723 ExtArg.getOperand(0).getOpcode() == ISD::TRUNCATE &&
6724 ExtArg.getOperand(0).getOperand(0).getValueType() == V.getValueType())
6725 return ExtArg.getOperand(0).getOperand(0);
6726 return SDValue();
6727}
6728
6730 // Match masked merge pattern (X & ~M) op (Y & M)
6731 // Including degenerate case (X & ~M) op M
6732 auto MatchNoCommonBitsPattern = [&](SDValue Not, SDValue Mask,
6733 SDValue Other) {
6734 if (SDValue NotOperand =
6735 getBitwiseNotOperand(Not, Mask, /* AllowUndefs */ true)) {
6736 if (NotOperand->getOpcode() == ISD::ZERO_EXTEND ||
6737 NotOperand->getOpcode() == ISD::TRUNCATE)
6738 NotOperand = NotOperand->getOperand(0);
6739
6740 if (Other == NotOperand)
6741 return true;
6742 if (Other->getOpcode() == ISD::AND)
6743 return NotOperand == Other->getOperand(0) ||
6744 NotOperand == Other->getOperand(1);
6745 }
6746 return false;
6747 };
6748
6749 if (A->getOpcode() == ISD::ZERO_EXTEND || A->getOpcode() == ISD::TRUNCATE)
6750 A = A->getOperand(0);
6751
6752 if (B->getOpcode() == ISD::ZERO_EXTEND || B->getOpcode() == ISD::TRUNCATE)
6753 B = B->getOperand(0);
6754
6755 if (A->getOpcode() == ISD::AND)
6756 return MatchNoCommonBitsPattern(A->getOperand(0), A->getOperand(1), B) ||
6757 MatchNoCommonBitsPattern(A->getOperand(1), A->getOperand(0), B);
6758 return false;
6759}
6760
6761// FIXME: unify with llvm::haveNoCommonBitsSet.
6763 assert(A.getValueType() == B.getValueType() &&
6764 "Values must have the same type");
6767 return true;
6770}
6771
6772static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step,
6773 SelectionDAG &DAG) {
6774 if (cast<ConstantSDNode>(Step)->isZero())
6775 return DAG.getConstant(0, DL, VT);
6776
6777 return SDValue();
6778}
6779
6782 SelectionDAG &DAG) {
6783 int NumOps = Ops.size();
6784 assert(NumOps != 0 && "Can't build an empty vector!");
6785 assert(!VT.isScalableVector() &&
6786 "BUILD_VECTOR cannot be used with scalable types");
6787 assert(VT.getVectorNumElements() == (unsigned)NumOps &&
6788 "Incorrect element count in BUILD_VECTOR!");
6789
6790 // BUILD_VECTOR of UNDEFs is UNDEF.
6791 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
6792 return DAG.getUNDEF(VT);
6793
6794 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
6795 SDValue IdentitySrc;
6796 bool IsIdentity = true;
6797 for (int i = 0; i != NumOps; ++i) {
6798 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6799 Ops[i].getOperand(0).getValueType() != VT ||
6800 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
6801 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
6802 Ops[i].getConstantOperandAPInt(1) != i) {
6803 IsIdentity = false;
6804 break;
6805 }
6806 IdentitySrc = Ops[i].getOperand(0);
6807 }
6808 if (IsIdentity)
6809 return IdentitySrc;
6810
6811 return SDValue();
6812}
6813
6814/// Try to simplify vector concatenation to an input value, undef, or build
6815/// vector.
6818 SelectionDAG &DAG) {
6819 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
6821 [Ops](SDValue Op) {
6822 return Ops[0].getValueType() == Op.getValueType();
6823 }) &&
6824 "Concatenation of vectors with inconsistent value types!");
6825 assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) ==
6826 VT.getVectorElementCount() &&
6827 "Incorrect element count in vector concatenation!");
6828
6829 if (Ops.size() == 1)
6830 return Ops[0];
6831
6832 // Concat of UNDEFs is UNDEF.
6833 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
6834 return DAG.getUNDEF(VT);
6835
6836 // Scan the operands and look for extract operations from a single source
6837 // that correspond to insertion at the same location via this concatenation:
6838 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
6839 SDValue IdentitySrc;
6840 bool IsIdentity = true;
6841 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
6842 SDValue Op = Ops[i];
6843 unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements();
6844 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
6845 Op.getOperand(0).getValueType() != VT ||
6846 (IdentitySrc && Op.getOperand(0) != IdentitySrc) ||
6847 Op.getConstantOperandVal(1) != IdentityIndex) {
6848 IsIdentity = false;
6849 break;
6850 }
6851 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&
6852 "Unexpected identity source vector for concat of extracts");
6853 IdentitySrc = Op.getOperand(0);
6854 }
6855 if (IsIdentity) {
6856 assert(IdentitySrc && "Failed to set source vector of extracts");
6857 return IdentitySrc;
6858 }
6859
6860 // The code below this point is only designed to work for fixed width
6861 // vectors, so we bail out for now.
6862 if (VT.isScalableVector())
6863 return SDValue();
6864
6865 // A CONCAT_VECTOR of scalar sources, such as UNDEF, BUILD_VECTOR and
6866 // single-element INSERT_VECTOR_ELT operands can be simplified to one big
6867 // BUILD_VECTOR.
6868 // FIXME: Add support for SCALAR_TO_VECTOR as well.
6869 EVT SVT = VT.getScalarType();
6871 for (SDValue Op : Ops) {
6872 EVT OpVT = Op.getValueType();
6873 if (Op.isUndef())
6874 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
6875 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
6876 Elts.append(Op->op_begin(), Op->op_end());
6877 else if (Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
6878 OpVT.getVectorNumElements() == 1 &&
6879 isNullConstant(Op.getOperand(2)))
6880 Elts.push_back(Op.getOperand(1));
6881 else
6882 return SDValue();
6883 }
6884
6885 // BUILD_VECTOR requires all inputs to be of the same type, find the
6886 // maximum type and extend them all.
6887 for (SDValue Op : Elts)
6888 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
6889
6890 if (SVT.bitsGT(VT.getScalarType())) {
6891 for (SDValue &Op : Elts) {
6892 if (Op.isUndef())
6893 Op = DAG.getUNDEF(SVT);
6894 else
6895 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
6896 ? DAG.getZExtOrTrunc(Op, DL, SVT)
6897 : DAG.getSExtOrTrunc(Op, DL, SVT);
6898 }
6899 }
6900
6901 SDValue V = DAG.getBuildVector(VT, DL, Elts);
6902 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
6903 return V;
6904}
6905
6906/// Gets or creates the specified node.
6907SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
6908 SDVTList VTs = getVTList(VT);
6910 AddNodeIDNode(ID, Opcode, VTs, {});
6911 void *IP = nullptr;
6912 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
6913 return SDValue(E, 0);
6914
6915 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6916 CSEMap.InsertNode(N, IP);
6917
6918 InsertNode(N);
6919 SDValue V = SDValue(N, 0);
6920 NewSDValueDbgMsg(V, "Creating new node: ", this);
6921 return V;
6922}
6923
6924SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6925 SDValue N1) {
6926 SDNodeFlags Flags;
6927 if (Inserter)
6928 Flags = Inserter->getFlags();
6929 return getNode(Opcode, DL, VT, N1, Flags);
6930}
6931
6932SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6933 SDValue N1, const SDNodeFlags Flags) {
6934 assert(N1.getOpcode() != ISD::DELETED_NODE && "Operand is DELETED_NODE!");
6935
6936 // Constant fold unary operations with a vector integer or float operand.
6937 switch (Opcode) {
6938 default:
6939 // FIXME: Entirely reasonable to perform folding of other unary
6940 // operations here as the need arises.
6941 break;
6942 case ISD::FNEG:
6943 case ISD::FABS:
6944 case ISD::FCEIL:
6945 case ISD::FTRUNC:
6946 case ISD::FFLOOR:
6947 case ISD::FP_EXTEND:
6948 case ISD::FP_TO_SINT:
6949 case ISD::FP_TO_UINT:
6950 case ISD::FP_TO_FP16:
6951 case ISD::FP_TO_BF16:
6952 case ISD::TRUNCATE:
6953 case ISD::ANY_EXTEND:
6954 case ISD::ZERO_EXTEND:
6955 case ISD::SIGN_EXTEND:
6956 case ISD::UINT_TO_FP:
6957 case ISD::SINT_TO_FP:
6958 case ISD::FP16_TO_FP:
6959 case ISD::BF16_TO_FP:
6960 case ISD::BITCAST:
6961 case ISD::ABS:
6962 case ISD::BITREVERSE:
6963 case ISD::BSWAP:
6964 case ISD::CTLZ:
6966 case ISD::CTTZ:
6968 case ISD::CTPOP:
6969 case ISD::CTLS:
6970 case ISD::STEP_VECTOR: {
6971 SDValue Ops = {N1};
6972 if (SDValue Fold = FoldConstantArithmetic(Opcode, DL, VT, Ops))
6973 return Fold;
6974 }
6975 }
6976
6977 unsigned OpOpcode = N1.getNode()->getOpcode();
6978 switch (Opcode) {
6979 case ISD::STEP_VECTOR:
6980 assert(VT.isScalableVector() &&
6981 "STEP_VECTOR can only be used with scalable types");
6982 assert(OpOpcode == ISD::TargetConstant &&
6983 VT.getVectorElementType() == N1.getValueType() &&
6984 "Unexpected step operand");
6985 break;
6986 case ISD::FREEZE:
6987 assert(VT == N1.getValueType() && "Unexpected VT!");
6988 if (isGuaranteedNotToBeUndefOrPoison(N1, /*PoisonOnly=*/false))
6989 return N1;
6990 break;
6991 case ISD::TokenFactor:
6992 case ISD::MERGE_VALUES:
6994 return N1; // Factor, merge or concat of one node? No need.
6995 case ISD::BUILD_VECTOR: {
6996 // Attempt to simplify BUILD_VECTOR.
6997 SDValue Ops[] = {N1};
6998 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
6999 return V;
7000 break;
7001 }
7002 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
7003 case ISD::FP_EXTEND:
7005 "Invalid FP cast!");
7006 if (N1.getValueType() == VT) return N1; // noop conversion.
7007 assert((!VT.isVector() || VT.getVectorElementCount() ==
7009 "Vector element count mismatch!");
7010 assert(N1.getValueType().bitsLT(VT) && "Invalid fpext node, dst < src!");
7011 if (N1.isUndef())
7012 return getUNDEF(VT);
7013 break;
7014 case ISD::FP_TO_SINT:
7015 case ISD::FP_TO_UINT:
7016 if (N1.isUndef())
7017 return getUNDEF(VT);
7018 break;
7019 case ISD::SINT_TO_FP:
7020 case ISD::UINT_TO_FP:
7021 // [us]itofp(undef) = 0, because the result value is bounded.
7022 if (N1.isUndef())
7023 return getConstantFP(0.0, DL, VT);
7024 break;
7025 case ISD::SIGN_EXTEND:
7026 assert(VT.isInteger() && N1.getValueType().isInteger() &&
7027 "Invalid SIGN_EXTEND!");
7028 assert(VT.isVector() == N1.getValueType().isVector() &&
7029 "SIGN_EXTEND result type type should be vector iff the operand "
7030 "type is vector!");
7031 if (N1.getValueType() == VT) return N1; // noop extension
7032 assert((!VT.isVector() || VT.getVectorElementCount() ==
7034 "Vector element count mismatch!");
7035 assert(N1.getValueType().bitsLT(VT) && "Invalid sext node, dst < src!");
7036 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) {
7037 SDNodeFlags Flags;
7038 if (OpOpcode == ISD::ZERO_EXTEND)
7039 Flags.setNonNeg(N1->getFlags().hasNonNeg());
7040 SDValue NewVal = getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
7041 transferDbgValues(N1, NewVal);
7042 return NewVal;
7043 }
7044
7045 if (OpOpcode == ISD::POISON)
7046 return getPOISON(VT);
7047
7048 if (N1.isUndef())
7049 // sext(undef) = 0, because the top bits will all be the same.
7050 return getConstant(0, DL, VT);
7051
7052 // Skip unnecessary sext_inreg pattern:
7053 // (sext (trunc x)) -> x iff the upper bits are all signbits.
7054 if (OpOpcode == ISD::TRUNCATE) {
7055 SDValue OpOp = N1.getOperand(0);
7056 if (OpOp.getValueType() == VT) {
7057 unsigned NumSignExtBits =
7059 if (ComputeNumSignBits(OpOp) > NumSignExtBits) {
7060 transferDbgValues(N1, OpOp);
7061 return OpOp;
7062 }
7063 }
7064 }
7065 break;
7066 case ISD::ZERO_EXTEND:
7067 assert(VT.isInteger() && N1.getValueType().isInteger() &&
7068 "Invalid ZERO_EXTEND!");
7069 assert(VT.isVector() == N1.getValueType().isVector() &&
7070 "ZERO_EXTEND result type type should be vector iff the operand "
7071 "type is vector!");
7072 if (N1.getValueType() == VT) return N1; // noop extension
7073 assert((!VT.isVector() || VT.getVectorElementCount() ==
7075 "Vector element count mismatch!");
7076 assert(N1.getValueType().bitsLT(VT) && "Invalid zext node, dst < src!");
7077 if (OpOpcode == ISD::ZERO_EXTEND) { // (zext (zext x)) -> (zext x)
7078 SDNodeFlags Flags;
7079 Flags.setNonNeg(N1->getFlags().hasNonNeg());
7080 SDValue NewVal =
7081 getNode(ISD::ZERO_EXTEND, DL, VT, N1.getOperand(0), Flags);
7082 transferDbgValues(N1, NewVal);
7083 return NewVal;
7084 }
7085
7086 if (OpOpcode == ISD::POISON)
7087 return getPOISON(VT);
7088
7089 if (N1.isUndef())
7090 // zext(undef) = 0, because the top bits will be zero.
7091 return getConstant(0, DL, VT);
7092
7093 // Skip unnecessary zext_inreg pattern:
7094 // (zext (trunc x)) -> x iff the upper bits are known zero.
7095 // TODO: Remove (zext (trunc (and x, c))) exception which some targets
7096 // use to recognise zext_inreg patterns.
7097 if (OpOpcode == ISD::TRUNCATE) {
7098 SDValue OpOp = N1.getOperand(0);
7099 if (OpOp.getValueType() == VT) {
7100 if (OpOp.getOpcode() != ISD::AND) {
7103 if (MaskedValueIsZero(OpOp, HiBits)) {
7104 transferDbgValues(N1, OpOp);
7105 return OpOp;
7106 }
7107 }
7108 }
7109 }
7110 break;
7111 case ISD::ANY_EXTEND:
7112 assert(VT.isInteger() && N1.getValueType().isInteger() &&
7113 "Invalid ANY_EXTEND!");
7114 assert(VT.isVector() == N1.getValueType().isVector() &&
7115 "ANY_EXTEND result type type should be vector iff the operand "
7116 "type is vector!");
7117 if (N1.getValueType() == VT) return N1; // noop extension
7118 assert((!VT.isVector() || VT.getVectorElementCount() ==
7120 "Vector element count mismatch!");
7121 assert(N1.getValueType().bitsLT(VT) && "Invalid anyext node, dst < src!");
7122
7123 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
7124 OpOpcode == ISD::ANY_EXTEND) {
7125 SDNodeFlags Flags;
7126 if (OpOpcode == ISD::ZERO_EXTEND)
7127 Flags.setNonNeg(N1->getFlags().hasNonNeg());
7128 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
7129 return getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
7130 }
7131 if (N1.isUndef())
7132 return getUNDEF(VT);
7133
7134 // (ext (trunc x)) -> x
7135 if (OpOpcode == ISD::TRUNCATE) {
7136 SDValue OpOp = N1.getOperand(0);
7137 if (OpOp.getValueType() == VT) {
7138 transferDbgValues(N1, OpOp);
7139 return OpOp;
7140 }
7141 }
7142 break;
7143 case ISD::TRUNCATE:
7144 assert(VT.isInteger() && N1.getValueType().isInteger() &&
7145 "Invalid TRUNCATE!");
7146 assert(VT.isVector() == N1.getValueType().isVector() &&
7147 "TRUNCATE result type type should be vector iff the operand "
7148 "type is vector!");
7149 if (N1.getValueType() == VT) return N1; // noop truncate
7150 assert((!VT.isVector() || VT.getVectorElementCount() ==
7152 "Vector element count mismatch!");
7153 assert(N1.getValueType().bitsGT(VT) && "Invalid truncate node, src < dst!");
7154 if (OpOpcode == ISD::TRUNCATE)
7155 return getNode(ISD::TRUNCATE, DL, VT, N1.getOperand(0));
7156 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
7157 OpOpcode == ISD::ANY_EXTEND) {
7158 // If the source is smaller than the dest, we still need an extend.
7160 VT.getScalarType())) {
7161 SDNodeFlags Flags;
7162 if (OpOpcode == ISD::ZERO_EXTEND)
7163 Flags.setNonNeg(N1->getFlags().hasNonNeg());
7164 return getNode(OpOpcode, DL, VT, N1.getOperand(0), Flags);
7165 }
7166 if (N1.getOperand(0).getValueType().bitsGT(VT))
7167 return getNode(ISD::TRUNCATE, DL, VT, N1.getOperand(0));
7168 return N1.getOperand(0);
7169 }
7170 if (N1.isUndef())
7171 return getUNDEF(VT);
7172 if (OpOpcode == ISD::VSCALE && !NewNodesMustHaveLegalTypes)
7173 return getVScale(DL, VT,
7175 break;
7179 assert(VT.isVector() && "This DAG node is restricted to vector types.");
7180 assert(N1.getValueType().bitsLE(VT) &&
7181 "The input must be the same size or smaller than the result.");
7184 "The destination vector type must have fewer lanes than the input.");
7185 break;
7186 case ISD::ABS:
7187 assert(VT.isInteger() && VT == N1.getValueType() && "Invalid ABS!");
7188 if (N1.isUndef())
7189 return getConstant(0, DL, VT);
7190 break;
7191 case ISD::BSWAP:
7192 assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BSWAP!");
7193 assert((VT.getScalarSizeInBits() % 16 == 0) &&
7194 "BSWAP types must be a multiple of 16 bits!");
7195 if (N1.isUndef())
7196 return getUNDEF(VT);
7197 // bswap(bswap(X)) -> X.
7198 if (OpOpcode == ISD::BSWAP)
7199 return N1.getOperand(0);
7200 break;
7201 case ISD::BITREVERSE:
7202 assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BITREVERSE!");
7203 if (N1.isUndef())
7204 return getUNDEF(VT);
7205 break;
7206 case ISD::BITCAST:
7208 "Cannot BITCAST between types of different sizes!");
7209 if (VT == N1.getValueType()) return N1; // noop conversion.
7210 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
7211 return getNode(ISD::BITCAST, DL, VT, N1.getOperand(0));
7212 if (N1.isUndef())
7213 return getUNDEF(VT);
7214 break;
7216 assert(VT.isVector() && !N1.getValueType().isVector() &&
7217 (VT.getVectorElementType() == N1.getValueType() ||
7219 N1.getValueType().isInteger() &&
7221 "Illegal SCALAR_TO_VECTOR node!");
7222 if (N1.isUndef())
7223 return getUNDEF(VT);
7224 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
7225 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
7227 N1.getConstantOperandVal(1) == 0 &&
7228 N1.getOperand(0).getValueType() == VT)
7229 return N1.getOperand(0);
7230 break;
7231 case ISD::FNEG:
7232 // Negation of an unknown bag of bits is still completely undefined.
7233 if (N1.isUndef())
7234 return getUNDEF(VT);
7235
7236 if (OpOpcode == ISD::FNEG) // --X -> X
7237 return N1.getOperand(0);
7238 break;
7239 case ISD::FABS:
7240 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
7241 return getNode(ISD::FABS, DL, VT, N1.getOperand(0));
7242 break;
7243 case ISD::VSCALE:
7244 assert(VT == N1.getValueType() && "Unexpected VT!");
7245 break;
7246 case ISD::CTPOP:
7247 if (N1.getValueType().getScalarType() == MVT::i1)
7248 return N1;
7249 break;
7250 case ISD::CTLZ:
7251 case ISD::CTTZ:
7252 if (N1.getValueType().getScalarType() == MVT::i1)
7253 return getNOT(DL, N1, N1.getValueType());
7254 break;
7255 case ISD::CTLS:
7256 if (N1.getValueType().getScalarType() == MVT::i1)
7257 return getConstant(0, DL, VT);
7258 break;
7259 case ISD::VECREDUCE_ADD:
7260 if (N1.getValueType().getScalarType() == MVT::i1)
7261 return getNode(ISD::VECREDUCE_XOR, DL, VT, N1);
7262 break;
7265 if (N1.getValueType().getScalarType() == MVT::i1)
7266 return getNode(ISD::VECREDUCE_OR, DL, VT, N1);
7267 break;
7270 if (N1.getValueType().getScalarType() == MVT::i1)
7271 return getNode(ISD::VECREDUCE_AND, DL, VT, N1);
7272 break;
7273 case ISD::SPLAT_VECTOR:
7274 assert(VT.isVector() && "Wrong return type!");
7275 // FIXME: Hexagon uses i32 scalar for a floating point zero vector so allow
7276 // that for now.
7278 (VT.isFloatingPoint() && N1.getValueType() == MVT::i32) ||
7280 N1.getValueType().isInteger() &&
7282 "Wrong operand type!");
7283 break;
7284 }
7285
7286 SDNode *N;
7287 SDVTList VTs = getVTList(VT);
7288 SDValue Ops[] = {N1};
7289 if (VT != MVT::Glue) { // Don't CSE glue producing nodes
7291 AddNodeIDNode(ID, Opcode, VTs, Ops);
7292 void *IP = nullptr;
7293 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
7294 E->intersectFlagsWith(Flags);
7295 return SDValue(E, 0);
7296 }
7297
7298 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7299 N->setFlags(Flags);
7300 createOperands(N, Ops);
7301 CSEMap.InsertNode(N, IP);
7302 } else {
7303 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7304 createOperands(N, Ops);
7305 }
7306
7307 InsertNode(N);
7308 SDValue V = SDValue(N, 0);
7309 NewSDValueDbgMsg(V, "Creating new node: ", this);
7310 return V;
7311}
7312
7313static std::optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
7314 const APInt &C2) {
7315 switch (Opcode) {
7316 case ISD::ADD: return C1 + C2;
7317 case ISD::SUB: return C1 - C2;
7318 case ISD::MUL: return C1 * C2;
7319 case ISD::AND: return C1 & C2;
7320 case ISD::OR: return C1 | C2;
7321 case ISD::XOR: return C1 ^ C2;
7322 case ISD::SHL: return C1 << C2;
7323 case ISD::SRL: return C1.lshr(C2);
7324 case ISD::SRA: return C1.ashr(C2);
7325 case ISD::ROTL: return C1.rotl(C2);
7326 case ISD::ROTR: return C1.rotr(C2);
7327 case ISD::SMIN: return C1.sle(C2) ? C1 : C2;
7328 case ISD::SMAX: return C1.sge(C2) ? C1 : C2;
7329 case ISD::UMIN: return C1.ule(C2) ? C1 : C2;
7330 case ISD::UMAX: return C1.uge(C2) ? C1 : C2;
7331 case ISD::SADDSAT: return C1.sadd_sat(C2);
7332 case ISD::UADDSAT: return C1.uadd_sat(C2);
7333 case ISD::SSUBSAT: return C1.ssub_sat(C2);
7334 case ISD::USUBSAT: return C1.usub_sat(C2);
7335 case ISD::SSHLSAT: return C1.sshl_sat(C2);
7336 case ISD::USHLSAT: return C1.ushl_sat(C2);
7337 case ISD::UDIV:
7338 if (!C2.getBoolValue())
7339 break;
7340 return C1.udiv(C2);
7341 case ISD::UREM:
7342 if (!C2.getBoolValue())
7343 break;
7344 return C1.urem(C2);
7345 case ISD::SDIV:
7346 if (!C2.getBoolValue())
7347 break;
7348 return C1.sdiv(C2);
7349 case ISD::SREM:
7350 if (!C2.getBoolValue())
7351 break;
7352 return C1.srem(C2);
7353 case ISD::AVGFLOORS:
7354 return APIntOps::avgFloorS(C1, C2);
7355 case ISD::AVGFLOORU:
7356 return APIntOps::avgFloorU(C1, C2);
7357 case ISD::AVGCEILS:
7358 return APIntOps::avgCeilS(C1, C2);
7359 case ISD::AVGCEILU:
7360 return APIntOps::avgCeilU(C1, C2);
7361 case ISD::ABDS:
7362 return APIntOps::abds(C1, C2);
7363 case ISD::ABDU:
7364 return APIntOps::abdu(C1, C2);
7365 case ISD::MULHS:
7366 return APIntOps::mulhs(C1, C2);
7367 case ISD::MULHU:
7368 return APIntOps::mulhu(C1, C2);
7369 case ISD::CLMUL:
7370 return APIntOps::clmul(C1, C2);
7371 case ISD::CLMULR:
7372 return APIntOps::clmulr(C1, C2);
7373 case ISD::CLMULH:
7374 return APIntOps::clmulh(C1, C2);
7375 }
7376 return std::nullopt;
7377}
7378// Handle constant folding with UNDEF.
7379// TODO: Handle more cases.
7380static std::optional<APInt> FoldValueWithUndef(unsigned Opcode, const APInt &C1,
7381 bool IsUndef1, const APInt &C2,
7382 bool IsUndef2) {
7383 if (!(IsUndef1 || IsUndef2))
7384 return FoldValue(Opcode, C1, C2);
7385
7386 // Fold and(x, undef) -> 0
7387 // Fold mul(x, undef) -> 0
7388 if (Opcode == ISD::AND || Opcode == ISD::MUL)
7389 return APInt::getZero(C1.getBitWidth());
7390
7391 return std::nullopt;
7392}
7393
7395 const GlobalAddressSDNode *GA,
7396 const SDNode *N2) {
7397 if (GA->getOpcode() != ISD::GlobalAddress)
7398 return SDValue();
7399 if (!TLI->isOffsetFoldingLegal(GA))
7400 return SDValue();
7401 auto *C2 = dyn_cast<ConstantSDNode>(N2);
7402 if (!C2)
7403 return SDValue();
7404 int64_t Offset = C2->getSExtValue();
7405 switch (Opcode) {
7406 case ISD::ADD:
7407 case ISD::PTRADD:
7408 break;
7409 case ISD::SUB: Offset = -uint64_t(Offset); break;
7410 default: return SDValue();
7411 }
7412 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT,
7413 GA->getOffset() + uint64_t(Offset));
7414}
7415
7417 switch (Opcode) {
7418 case ISD::SDIV:
7419 case ISD::UDIV:
7420 case ISD::SREM:
7421 case ISD::UREM: {
7422 // If a divisor is zero/undef or any element of a divisor vector is
7423 // zero/undef, the whole op is undef.
7424 assert(Ops.size() == 2 && "Div/rem should have 2 operands");
7425 SDValue Divisor = Ops[1];
7426 if (Divisor.isUndef() || isNullConstant(Divisor))
7427 return true;
7428
7429 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
7430 llvm::any_of(Divisor->op_values(),
7431 [](SDValue V) { return V.isUndef() ||
7432 isNullConstant(V); });
7433 // TODO: Handle signed overflow.
7434 }
7435 // TODO: Handle oversized shifts.
7436 default:
7437 return false;
7438 }
7439}
7440
7443 SDNodeFlags Flags) {
7444 // If the opcode is a target-specific ISD node, there's nothing we can
7445 // do here and the operand rules may not line up with the below, so
7446 // bail early.
7447 // We can't create a scalar CONCAT_VECTORS so skip it. It will break
7448 // for concats involving SPLAT_VECTOR. Concats of BUILD_VECTORS are handled by
7449 // foldCONCAT_VECTORS in getNode before this is called.
7450 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::CONCAT_VECTORS)
7451 return SDValue();
7452
7453 unsigned NumOps = Ops.size();
7454 if (NumOps == 0)
7455 return SDValue();
7456
7457 if (isUndef(Opcode, Ops))
7458 return getUNDEF(VT);
7459
7460 // Handle unary special cases.
7461 if (NumOps == 1) {
7462 SDValue N1 = Ops[0];
7463
7464 // Constant fold unary operations with an integer constant operand. Even
7465 // opaque constant will be folded, because the folding of unary operations
7466 // doesn't create new constants with different values. Nevertheless, the
7467 // opaque flag is preserved during folding to prevent future folding with
7468 // other constants.
7469 if (auto *C = dyn_cast<ConstantSDNode>(N1)) {
7470 const APInt &Val = C->getAPIntValue();
7471 switch (Opcode) {
7472 case ISD::SIGN_EXTEND:
7473 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
7474 C->isTargetOpcode(), C->isOpaque());
7475 case ISD::TRUNCATE:
7476 if (C->isOpaque())
7477 break;
7478 [[fallthrough]];
7479 case ISD::ZERO_EXTEND:
7480 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
7481 C->isTargetOpcode(), C->isOpaque());
7482 case ISD::ANY_EXTEND:
7483 // Some targets like RISCV prefer to sign extend some types.
7484 if (TLI->isSExtCheaperThanZExt(N1.getValueType(), VT))
7485 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
7486 C->isTargetOpcode(), C->isOpaque());
7487 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
7488 C->isTargetOpcode(), C->isOpaque());
7489 case ISD::ABS:
7490 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
7491 C->isOpaque());
7492 case ISD::BITREVERSE:
7493 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
7494 C->isOpaque());
7495 case ISD::BSWAP:
7496 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
7497 C->isOpaque());
7498 case ISD::CTPOP:
7499 return getConstant(Val.popcount(), DL, VT, C->isTargetOpcode(),
7500 C->isOpaque());
7501 case ISD::CTLZ:
7503 return getConstant(Val.countl_zero(), DL, VT, C->isTargetOpcode(),
7504 C->isOpaque());
7505 case ISD::CTTZ:
7507 return getConstant(Val.countr_zero(), DL, VT, C->isTargetOpcode(),
7508 C->isOpaque());
7509 case ISD::CTLS:
7510 // CTLS returns the number of extra sign bits so subtract one.
7511 return getConstant(Val.getNumSignBits() - 1, DL, VT,
7512 C->isTargetOpcode(), C->isOpaque());
7513 case ISD::UINT_TO_FP:
7514 case ISD::SINT_TO_FP: {
7516 (void)FPV.convertFromAPInt(Val, Opcode == ISD::SINT_TO_FP,
7518 return getConstantFP(FPV, DL, VT);
7519 }
7520 case ISD::FP16_TO_FP:
7521 case ISD::BF16_TO_FP: {
7522 bool Ignored;
7523 APFloat FPV(Opcode == ISD::FP16_TO_FP ? APFloat::IEEEhalf()
7524 : APFloat::BFloat(),
7525 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
7526
7527 // This can return overflow, underflow, or inexact; we don't care.
7528 // FIXME need to be more flexible about rounding mode.
7530 &Ignored);
7531 return getConstantFP(FPV, DL, VT);
7532 }
7533 case ISD::STEP_VECTOR:
7534 if (SDValue V = FoldSTEP_VECTOR(DL, VT, N1, *this))
7535 return V;
7536 break;
7537 case ISD::BITCAST:
7538 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
7539 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
7540 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
7541 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
7542 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
7543 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
7544 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
7545 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
7546 break;
7547 }
7548 }
7549
7550 // Constant fold unary operations with a floating point constant operand.
7551 if (auto *C = dyn_cast<ConstantFPSDNode>(N1)) {
7552 APFloat V = C->getValueAPF(); // make copy
7553 switch (Opcode) {
7554 case ISD::FNEG:
7555 V.changeSign();
7556 return getConstantFP(V, DL, VT);
7557 case ISD::FABS:
7558 V.clearSign();
7559 return getConstantFP(V, DL, VT);
7560 case ISD::FCEIL: {
7561 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
7563 return getConstantFP(V, DL, VT);
7564 return SDValue();
7565 }
7566 case ISD::FTRUNC: {
7567 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
7569 return getConstantFP(V, DL, VT);
7570 return SDValue();
7571 }
7572 case ISD::FFLOOR: {
7573 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
7575 return getConstantFP(V, DL, VT);
7576 return SDValue();
7577 }
7578 case ISD::FP_EXTEND: {
7579 bool ignored;
7580 // This can return overflow, underflow, or inexact; we don't care.
7581 // FIXME need to be more flexible about rounding mode.
7582 (void)V.convert(VT.getFltSemantics(), APFloat::rmNearestTiesToEven,
7583 &ignored);
7584 return getConstantFP(V, DL, VT);
7585 }
7586 case ISD::FP_TO_SINT:
7587 case ISD::FP_TO_UINT: {
7588 bool ignored;
7589 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
7590 // FIXME need to be more flexible about rounding mode.
7592 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
7593 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
7594 break;
7595 return getConstant(IntVal, DL, VT);
7596 }
7597 case ISD::FP_TO_FP16:
7598 case ISD::FP_TO_BF16: {
7599 bool Ignored;
7600 // This can return overflow, underflow, or inexact; we don't care.
7601 // FIXME need to be more flexible about rounding mode.
7602 (void)V.convert(Opcode == ISD::FP_TO_FP16 ? APFloat::IEEEhalf()
7603 : APFloat::BFloat(),
7605 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
7606 }
7607 case ISD::BITCAST:
7608 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
7609 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL,
7610 VT);
7611 if (VT == MVT::i16 && C->getValueType(0) == MVT::bf16)
7612 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL,
7613 VT);
7614 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
7615 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL,
7616 VT);
7617 if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
7618 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
7619 break;
7620 }
7621 }
7622
7623 // Early-out if we failed to constant fold a bitcast.
7624 if (Opcode == ISD::BITCAST)
7625 return SDValue();
7626 }
7627
7628 // Handle binops special cases.
7629 if (NumOps == 2) {
7630 if (SDValue CFP = foldConstantFPMath(Opcode, DL, VT, Ops))
7631 return CFP;
7632
7633 if (auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
7634 if (auto *C2 = dyn_cast<ConstantSDNode>(Ops[1])) {
7635 if (C1->isOpaque() || C2->isOpaque())
7636 return SDValue();
7637
7638 std::optional<APInt> FoldAttempt =
7639 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
7640 if (!FoldAttempt)
7641 return SDValue();
7642
7643 SDValue Folded = getConstant(*FoldAttempt, DL, VT);
7644 assert((!Folded || !VT.isVector()) &&
7645 "Can't fold vectors ops with scalar operands");
7646 return Folded;
7647 }
7648 }
7649
7650 // fold (add Sym, c) -> Sym+c
7652 return FoldSymbolOffset(Opcode, VT, GA, Ops[1].getNode());
7653 if (TLI->isCommutativeBinOp(Opcode))
7655 return FoldSymbolOffset(Opcode, VT, GA, Ops[0].getNode());
7656
7657 // fold (sext_in_reg c1) -> c2
7658 if (Opcode == ISD::SIGN_EXTEND_INREG) {
7659 EVT EVT = cast<VTSDNode>(Ops[1])->getVT();
7660
7661 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
7662 unsigned FromBits = EVT.getScalarSizeInBits();
7663 Val <<= Val.getBitWidth() - FromBits;
7664 Val.ashrInPlace(Val.getBitWidth() - FromBits);
7665 return getConstant(Val, DL, ConstantVT);
7666 };
7667
7668 if (auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) {
7669 const APInt &Val = C1->getAPIntValue();
7670 return SignExtendInReg(Val, VT);
7671 }
7672
7674 SmallVector<SDValue, 8> ScalarOps;
7675 llvm::EVT OpVT = Ops[0].getOperand(0).getValueType();
7676 for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I) {
7677 SDValue Op = Ops[0].getOperand(I);
7678 if (Op.isUndef()) {
7679 ScalarOps.push_back(getUNDEF(OpVT));
7680 continue;
7681 }
7682 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
7683 ScalarOps.push_back(SignExtendInReg(Val, OpVT));
7684 }
7685 return getBuildVector(VT, DL, ScalarOps);
7686 }
7687
7688 if (Ops[0].getOpcode() == ISD::SPLAT_VECTOR &&
7689 isa<ConstantSDNode>(Ops[0].getOperand(0)))
7690 return getNode(ISD::SPLAT_VECTOR, DL, VT,
7691 SignExtendInReg(Ops[0].getConstantOperandAPInt(0),
7692 Ops[0].getOperand(0).getValueType()));
7693 }
7694 }
7695
7696 // Handle fshl/fshr special cases.
7697 if (Opcode == ISD::FSHL || Opcode == ISD::FSHR) {
7698 auto *C1 = dyn_cast<ConstantSDNode>(Ops[0]);
7699 auto *C2 = dyn_cast<ConstantSDNode>(Ops[1]);
7700 auto *C3 = dyn_cast<ConstantSDNode>(Ops[2]);
7701
7702 if (C1 && C2 && C3) {
7703 if (C1->isOpaque() || C2->isOpaque() || C3->isOpaque())
7704 return SDValue();
7705 const APInt &V1 = C1->getAPIntValue(), &V2 = C2->getAPIntValue(),
7706 &V3 = C3->getAPIntValue();
7707
7708 APInt FoldedVal = Opcode == ISD::FSHL ? APIntOps::fshl(V1, V2, V3)
7709 : APIntOps::fshr(V1, V2, V3);
7710 return getConstant(FoldedVal, DL, VT);
7711 }
7712 }
7713
7714 // Handle fma/fmad special cases.
7715 if (Opcode == ISD::FMA || Opcode == ISD::FMAD || Opcode == ISD::FMULADD) {
7716 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
7717 assert(Ops[0].getValueType() == VT && Ops[1].getValueType() == VT &&
7718 Ops[2].getValueType() == VT && "FMA types must match!");
7722 if (C1 && C2 && C3) {
7723 APFloat V1 = C1->getValueAPF();
7724 const APFloat &V2 = C2->getValueAPF();
7725 const APFloat &V3 = C3->getValueAPF();
7726 if (Opcode == ISD::FMAD || Opcode == ISD::FMULADD) {
7729 } else
7731 return getConstantFP(V1, DL, VT);
7732 }
7733 }
7734
7735 // This is for vector folding only from here on.
7736 if (!VT.isVector())
7737 return SDValue();
7738
7739 ElementCount NumElts = VT.getVectorElementCount();
7740
7741 // See if we can fold through any bitcasted integer ops.
7742 if (NumOps == 2 && VT.isFixedLengthVector() && VT.isInteger() &&
7743 Ops[0].getValueType() == VT && Ops[1].getValueType() == VT &&
7744 (Ops[0].getOpcode() == ISD::BITCAST ||
7745 Ops[1].getOpcode() == ISD::BITCAST)) {
7748 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
7749 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
7750 if (BV1 && BV2 && N1.getValueType().isInteger() &&
7751 N2.getValueType().isInteger()) {
7752 bool IsLE = getDataLayout().isLittleEndian();
7753 unsigned EltBits = VT.getScalarSizeInBits();
7754 SmallVector<APInt> RawBits1, RawBits2;
7755 BitVector UndefElts1, UndefElts2;
7756 if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) &&
7757 BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) {
7758 SmallVector<APInt> RawBits;
7759 for (unsigned I = 0, E = NumElts.getFixedValue(); I != E; ++I) {
7760 std::optional<APInt> Fold = FoldValueWithUndef(
7761 Opcode, RawBits1[I], UndefElts1[I], RawBits2[I], UndefElts2[I]);
7762 if (!Fold)
7763 break;
7764 RawBits.push_back(*Fold);
7765 }
7766 if (RawBits.size() == NumElts.getFixedValue()) {
7767 // We have constant folded, but we might need to cast this again back
7768 // to the original (possibly legalized) type.
7769 EVT BVVT, BVEltVT;
7770 if (N1.getValueType() == VT) {
7771 BVVT = N1.getValueType();
7772 BVEltVT = BV1->getOperand(0).getValueType();
7773 } else {
7774 BVVT = N2.getValueType();
7775 BVEltVT = BV2->getOperand(0).getValueType();
7776 }
7777 unsigned BVEltBits = BVEltVT.getSizeInBits();
7778 SmallVector<APInt> DstBits;
7779 BitVector DstUndefs;
7781 DstBits, RawBits, DstUndefs,
7782 BitVector(RawBits.size(), false));
7783 SmallVector<SDValue> Ops(DstBits.size(), getUNDEF(BVEltVT));
7784 for (unsigned I = 0, E = DstBits.size(); I != E; ++I) {
7785 if (DstUndefs[I])
7786 continue;
7787 Ops[I] = getConstant(DstBits[I].sext(BVEltBits), DL, BVEltVT);
7788 }
7789 return getBitcast(VT, getBuildVector(BVVT, DL, Ops));
7790 }
7791 }
7792 }
7793 // Logic ops can be folded from raw integer bits - mainly for AVX512 masks.
7794 if (ISD::isBitwiseLogicOp(Opcode) && isa<ConstantSDNode>(N1) &&
7795 isa<ConstantSDNode>(N2)) {
7796 if (SDValue Res = FoldConstantArithmetic(Opcode, DL, N1.getValueType(),
7797 {N1, N2}, Flags))
7798 return getBitcast(VT, Res);
7799 }
7800 }
7801
7802 // Fold (mul step_vector(C0), C1) to (step_vector(C0 * C1)).
7803 // (shl step_vector(C0), C1) -> (step_vector(C0 << C1))
7804 if ((Opcode == ISD::MUL || Opcode == ISD::SHL) &&
7805 Ops[0].getOpcode() == ISD::STEP_VECTOR) {
7806 APInt RHSVal;
7807 if (ISD::isConstantSplatVector(Ops[1].getNode(), RHSVal)) {
7808 APInt NewStep = Opcode == ISD::MUL
7809 ? Ops[0].getConstantOperandAPInt(0) * RHSVal
7810 : Ops[0].getConstantOperandAPInt(0) << RHSVal;
7811 return getStepVector(DL, VT, NewStep);
7812 }
7813 }
7814
7815 auto IsScalarOrSameVectorSize = [NumElts](const SDValue &Op) {
7816 return !Op.getValueType().isVector() ||
7817 Op.getValueType().getVectorElementCount() == NumElts;
7818 };
7819
7820 auto IsBuildVectorSplatVectorOrUndef = [](const SDValue &Op) {
7821 return Op.isUndef() || Op.getOpcode() == ISD::CONDCODE ||
7822 Op.getOpcode() == ISD::BUILD_VECTOR ||
7823 Op.getOpcode() == ISD::SPLAT_VECTOR;
7824 };
7825
7826 // All operands must be vector types with the same number of elements as
7827 // the result type and must be either UNDEF or a build/splat vector
7828 // or UNDEF scalars.
7829 if (!llvm::all_of(Ops, IsBuildVectorSplatVectorOrUndef) ||
7830 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
7831 return SDValue();
7832
7833 // If we are comparing vectors, then the result needs to be a i1 boolean that
7834 // is then extended back to the legal result type depending on how booleans
7835 // are represented.
7836 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
7837 ISD::NodeType ExtendCode =
7838 (Opcode == ISD::SETCC && SVT != VT.getScalarType())
7839 ? TargetLowering::getExtendForContent(TLI->getBooleanContents(VT))
7841
7842 // Find legal integer scalar type for constant promotion and
7843 // ensure that its scalar size is at least as large as source.
7844 EVT LegalSVT = VT.getScalarType();
7845 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
7846 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
7847 if (LegalSVT.bitsLT(VT.getScalarType()))
7848 return SDValue();
7849 }
7850
7851 // For scalable vector types we know we're dealing with SPLAT_VECTORs. We
7852 // only have one operand to check. For fixed-length vector types we may have
7853 // a combination of BUILD_VECTOR and SPLAT_VECTOR.
7854 unsigned NumVectorElts = NumElts.isScalable() ? 1 : NumElts.getFixedValue();
7855
7856 // Constant fold each scalar lane separately.
7857 SmallVector<SDValue, 4> ScalarResults;
7858 for (unsigned I = 0; I != NumVectorElts; I++) {
7859 SmallVector<SDValue, 4> ScalarOps;
7860 for (SDValue Op : Ops) {
7861 EVT InSVT = Op.getValueType().getScalarType();
7862 if (Op.getOpcode() != ISD::BUILD_VECTOR &&
7863 Op.getOpcode() != ISD::SPLAT_VECTOR) {
7864 if (Op.isUndef())
7865 ScalarOps.push_back(getUNDEF(InSVT));
7866 else
7867 ScalarOps.push_back(Op);
7868 continue;
7869 }
7870
7871 SDValue ScalarOp =
7872 Op.getOperand(Op.getOpcode() == ISD::SPLAT_VECTOR ? 0 : I);
7873 EVT ScalarVT = ScalarOp.getValueType();
7874
7875 // Build vector (integer) scalar operands may need implicit
7876 // truncation - do this before constant folding.
7877 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) {
7878 // Don't create illegally-typed nodes unless they're constants or undef
7879 // - if we fail to constant fold we can't guarantee the (dead) nodes
7880 // we're creating will be cleaned up before being visited for
7881 // legalization.
7882 if (NewNodesMustHaveLegalTypes && !ScalarOp.isUndef() &&
7883 !isa<ConstantSDNode>(ScalarOp) &&
7884 TLI->getTypeAction(*getContext(), InSVT) !=
7886 return SDValue();
7887 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
7888 }
7889
7890 ScalarOps.push_back(ScalarOp);
7891 }
7892
7893 // Constant fold the scalar operands.
7894 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
7895
7896 // Scalar folding only succeeded if the result is a constant or UNDEF.
7897 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
7898 ScalarResult.getOpcode() != ISD::ConstantFP)
7899 return SDValue();
7900
7901 // Legalize the (integer) scalar constant if necessary. We only do
7902 // this once we know the folding succeeded, since otherwise we would
7903 // get a node with illegal type which has a user.
7904 if (LegalSVT != SVT)
7905 ScalarResult = getNode(ExtendCode, DL, LegalSVT, ScalarResult);
7906
7907 ScalarResults.push_back(ScalarResult);
7908 }
7909
7910 SDValue V = NumElts.isScalable() ? getSplatVector(VT, DL, ScalarResults[0])
7911 : getBuildVector(VT, DL, ScalarResults);
7912 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
7913 return V;
7914}
7915
7918 // TODO: Add support for unary/ternary fp opcodes.
7919 if (Ops.size() != 2)
7920 return SDValue();
7921
7922 // TODO: We don't do any constant folding for strict FP opcodes here, but we
7923 // should. That will require dealing with a potentially non-default
7924 // rounding mode, checking the "opStatus" return value from the APFloat
7925 // math calculations, and possibly other variations.
7926 SDValue N1 = Ops[0];
7927 SDValue N2 = Ops[1];
7928 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1, /*AllowUndefs*/ false);
7929 ConstantFPSDNode *N2CFP = isConstOrConstSplatFP(N2, /*AllowUndefs*/ false);
7930 if (N1CFP && N2CFP) {
7931 APFloat C1 = N1CFP->getValueAPF(); // make copy
7932 const APFloat &C2 = N2CFP->getValueAPF();
7933 switch (Opcode) {
7934 case ISD::FADD:
7936 return getConstantFP(C1, DL, VT);
7937 case ISD::FSUB:
7939 return getConstantFP(C1, DL, VT);
7940 case ISD::FMUL:
7942 return getConstantFP(C1, DL, VT);
7943 case ISD::FDIV:
7945 return getConstantFP(C1, DL, VT);
7946 case ISD::FREM:
7947 C1.mod(C2);
7948 return getConstantFP(C1, DL, VT);
7949 case ISD::FCOPYSIGN:
7950 C1.copySign(C2);
7951 return getConstantFP(C1, DL, VT);
7952 case ISD::FMINNUM:
7953 return getConstantFP(minnum(C1, C2), DL, VT);
7954 case ISD::FMAXNUM:
7955 return getConstantFP(maxnum(C1, C2), DL, VT);
7956 case ISD::FMINIMUM:
7957 return getConstantFP(minimum(C1, C2), DL, VT);
7958 case ISD::FMAXIMUM:
7959 return getConstantFP(maximum(C1, C2), DL, VT);
7960 case ISD::FMINIMUMNUM:
7961 return getConstantFP(minimumnum(C1, C2), DL, VT);
7962 case ISD::FMAXIMUMNUM:
7963 return getConstantFP(maximumnum(C1, C2), DL, VT);
7964 default: break;
7965 }
7966 }
7967 if (N1CFP && Opcode == ISD::FP_ROUND) {
7968 APFloat C1 = N1CFP->getValueAPF(); // make copy
7969 bool Unused;
7970 // This can return overflow, underflow, or inexact; we don't care.
7971 // FIXME need to be more flexible about rounding mode.
7973 &Unused);
7974 return getConstantFP(C1, DL, VT);
7975 }
7976
7977 switch (Opcode) {
7978 case ISD::FSUB:
7979 // -0.0 - undef --> undef (consistent with "fneg undef")
7980 if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, /*AllowUndefs*/ true))
7981 if (N1C && N1C->getValueAPF().isNegZero() && N2.isUndef())
7982 return getUNDEF(VT);
7983 [[fallthrough]];
7984
7985 case ISD::FADD:
7986 case ISD::FMUL:
7987 case ISD::FDIV:
7988 case ISD::FREM:
7989 // If both operands are undef, the result is undef. If 1 operand is undef,
7990 // the result is NaN. This should match the behavior of the IR optimizer.
7991 if (N1.isUndef() && N2.isUndef())
7992 return getUNDEF(VT);
7993 if (N1.isUndef() || N2.isUndef())
7995 }
7996 return SDValue();
7997}
7998
8000 const SDLoc &DL, EVT DstEltVT) {
8001 EVT SrcEltVT = BV->getValueType(0).getVectorElementType();
8002
8003 // If this is already the right type, we're done.
8004 if (SrcEltVT == DstEltVT)
8005 return SDValue(BV, 0);
8006
8007 unsigned SrcBitSize = SrcEltVT.getSizeInBits();
8008 unsigned DstBitSize = DstEltVT.getSizeInBits();
8009
8010 // If this is a conversion of N elements of one type to N elements of another
8011 // type, convert each element. This handles FP<->INT cases.
8012 if (SrcBitSize == DstBitSize) {
8014 for (SDValue Op : BV->op_values()) {
8015 // If the vector element type is not legal, the BUILD_VECTOR operands
8016 // are promoted and implicitly truncated. Make that explicit here.
8017 if (Op.getValueType() != SrcEltVT)
8018 Op = getNode(ISD::TRUNCATE, DL, SrcEltVT, Op);
8019 Ops.push_back(getBitcast(DstEltVT, Op));
8020 }
8021 EVT VT = EVT::getVectorVT(*getContext(), DstEltVT,
8023 return getBuildVector(VT, DL, Ops);
8024 }
8025
8026 // Otherwise, we're growing or shrinking the elements. To avoid having to
8027 // handle annoying details of growing/shrinking FP values, we convert them to
8028 // int first.
8029 if (SrcEltVT.isFloatingPoint()) {
8030 // Convert the input float vector to a int vector where the elements are the
8031 // same sizes.
8032 EVT IntEltVT = EVT::getIntegerVT(*getContext(), SrcEltVT.getSizeInBits());
8033 if (SDValue Tmp = FoldConstantBuildVector(BV, DL, IntEltVT))
8035 DstEltVT);
8036 return SDValue();
8037 }
8038
8039 // Now we know the input is an integer vector. If the output is a FP type,
8040 // convert to integer first, then to FP of the right size.
8041 if (DstEltVT.isFloatingPoint()) {
8042 EVT IntEltVT = EVT::getIntegerVT(*getContext(), DstEltVT.getSizeInBits());
8043 if (SDValue Tmp = FoldConstantBuildVector(BV, DL, IntEltVT))
8045 DstEltVT);
8046 return SDValue();
8047 }
8048
8049 // Okay, we know the src/dst types are both integers of differing types.
8050 assert(SrcEltVT.isInteger() && DstEltVT.isInteger());
8051
8052 // Extract the constant raw bit data.
8053 BitVector UndefElements;
8054 SmallVector<APInt> RawBits;
8055 bool IsLE = getDataLayout().isLittleEndian();
8056 if (!BV->getConstantRawBits(IsLE, DstBitSize, RawBits, UndefElements))
8057 return SDValue();
8058
8060 for (unsigned I = 0, E = RawBits.size(); I != E; ++I) {
8061 if (UndefElements[I])
8062 Ops.push_back(getUNDEF(DstEltVT));
8063 else
8064 Ops.push_back(getConstant(RawBits[I], DL, DstEltVT));
8065 }
8066
8067 EVT VT = EVT::getVectorVT(*getContext(), DstEltVT, Ops.size());
8068 return getBuildVector(VT, DL, Ops);
8069}
8070
8072 assert(Val.getValueType().isInteger() && "Invalid AssertAlign!");
8073
8074 // There's no need to assert on a byte-aligned pointer. All pointers are at
8075 // least byte aligned.
8076 if (A == Align(1))
8077 return Val;
8078
8079 SDVTList VTs = getVTList(Val.getValueType());
8081 AddNodeIDNode(ID, ISD::AssertAlign, VTs, {Val});
8082 ID.AddInteger(A.value());
8083
8084 void *IP = nullptr;
8085 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
8086 return SDValue(E, 0);
8087
8088 auto *N =
8089 newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(), VTs, A);
8090 createOperands(N, {Val});
8091
8092 CSEMap.InsertNode(N, IP);
8093 InsertNode(N);
8094
8095 SDValue V(N, 0);
8096 NewSDValueDbgMsg(V, "Creating new node: ", this);
8097 return V;
8098}
8099
8100SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8101 SDValue N1, SDValue N2) {
8102 SDNodeFlags Flags;
8103 if (Inserter)
8104 Flags = Inserter->getFlags();
8105 return getNode(Opcode, DL, VT, N1, N2, Flags);
8106}
8107
8109 SDValue &N2) const {
8110 if (!TLI->isCommutativeBinOp(Opcode))
8111 return;
8112
8113 // Canonicalize:
8114 // binop(const, nonconst) -> binop(nonconst, const)
8117 bool N1CFP = isConstantFPBuildVectorOrConstantFP(N1);
8118 bool N2CFP = isConstantFPBuildVectorOrConstantFP(N2);
8119 if ((N1C && !N2C) || (N1CFP && !N2CFP))
8120 std::swap(N1, N2);
8121
8122 // Canonicalize:
8123 // binop(splat(x), step_vector) -> binop(step_vector, splat(x))
8124 else if (N1.getOpcode() == ISD::SPLAT_VECTOR &&
8126 std::swap(N1, N2);
8127}
8128
8129SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8130 SDValue N1, SDValue N2, const SDNodeFlags Flags) {
8132 N2.getOpcode() != ISD::DELETED_NODE &&
8133 "Operand is DELETED_NODE!");
8134
8135 canonicalizeCommutativeBinop(Opcode, N1, N2);
8136
8137 auto *N1C = dyn_cast<ConstantSDNode>(N1);
8138 auto *N2C = dyn_cast<ConstantSDNode>(N2);
8139
8140 // Don't allow undefs in vector splats - we might be returning N2 when folding
8141 // to zero etc.
8142 ConstantSDNode *N2CV =
8143 isConstOrConstSplat(N2, /*AllowUndefs*/ false, /*AllowTruncation*/ true);
8144
8145 switch (Opcode) {
8146 default: break;
8147 case ISD::TokenFactor:
8148 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
8149 N2.getValueType() == MVT::Other && "Invalid token factor!");
8150 // Fold trivial token factors.
8151 if (N1.getOpcode() == ISD::EntryToken) return N2;
8152 if (N2.getOpcode() == ISD::EntryToken) return N1;
8153 if (N1 == N2) return N1;
8154 break;
8155 case ISD::BUILD_VECTOR: {
8156 // Attempt to simplify BUILD_VECTOR.
8157 SDValue Ops[] = {N1, N2};
8158 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
8159 return V;
8160 break;
8161 }
8162 case ISD::CONCAT_VECTORS: {
8163 SDValue Ops[] = {N1, N2};
8164 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
8165 return V;
8166 break;
8167 }
8168 case ISD::AND:
8169 assert(VT.isInteger() && "This operator does not apply to FP types!");
8170 assert(N1.getValueType() == N2.getValueType() &&
8171 N1.getValueType() == VT && "Binary operator types must match!");
8172 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
8173 // worth handling here.
8174 if (N2CV && N2CV->isZero())
8175 return N2;
8176 if (N2CV && N2CV->isAllOnes()) // X & -1 -> X
8177 return N1;
8178 break;
8179 case ISD::OR:
8180 case ISD::XOR:
8181 case ISD::ADD:
8182 case ISD::PTRADD:
8183 case ISD::SUB:
8184 assert(VT.isInteger() && "This operator does not apply to FP types!");
8185 assert(N1.getValueType() == N2.getValueType() &&
8186 N1.getValueType() == VT && "Binary operator types must match!");
8187 // The equal operand types requirement is unnecessarily strong for PTRADD.
8188 // However, the SelectionDAGBuilder does not generate PTRADDs with different
8189 // operand types, and we'd need to re-implement GEP's non-standard wrapping
8190 // logic everywhere where PTRADDs may be folded or combined to properly
8191 // support them. If/when we introduce pointer types to the SDAG, we will
8192 // need to relax this constraint.
8193
8194 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
8195 // it's worth handling here.
8196 if (N2CV && N2CV->isZero())
8197 return N1;
8198 if ((Opcode == ISD::ADD || Opcode == ISD::SUB) &&
8199 VT.getScalarType() == MVT::i1)
8200 return getNode(ISD::XOR, DL, VT, N1, N2);
8201 // Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)).
8202 if (Opcode == ISD::ADD && N1.getOpcode() == ISD::VSCALE &&
8203 N2.getOpcode() == ISD::VSCALE) {
8204 const APInt &C1 = N1->getConstantOperandAPInt(0);
8205 const APInt &C2 = N2->getConstantOperandAPInt(0);
8206 return getVScale(DL, VT, C1 + C2);
8207 }
8208 break;
8209 case ISD::MUL:
8210 assert(VT.isInteger() && "This operator does not apply to FP types!");
8211 assert(N1.getValueType() == N2.getValueType() &&
8212 N1.getValueType() == VT && "Binary operator types must match!");
8213 if (VT.getScalarType() == MVT::i1)
8214 return getNode(ISD::AND, DL, VT, N1, N2);
8215 if (N2CV && N2CV->isZero())
8216 return N2;
8217 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
8218 const APInt &MulImm = N1->getConstantOperandAPInt(0);
8219 const APInt &N2CImm = N2C->getAPIntValue();
8220 return getVScale(DL, VT, MulImm * N2CImm);
8221 }
8222 break;
8223 case ISD::UDIV:
8224 case ISD::UREM:
8225 case ISD::MULHU:
8226 case ISD::MULHS:
8227 case ISD::SDIV:
8228 case ISD::SREM:
8229 case ISD::SADDSAT:
8230 case ISD::SSUBSAT:
8231 case ISD::UADDSAT:
8232 case ISD::USUBSAT:
8233 assert(VT.isInteger() && "This operator does not apply to FP types!");
8234 assert(N1.getValueType() == N2.getValueType() &&
8235 N1.getValueType() == VT && "Binary operator types must match!");
8236 if (VT.getScalarType() == MVT::i1) {
8237 // fold (add_sat x, y) -> (or x, y) for bool types.
8238 if (Opcode == ISD::SADDSAT || Opcode == ISD::UADDSAT)
8239 return getNode(ISD::OR, DL, VT, N1, N2);
8240 // fold (sub_sat x, y) -> (and x, ~y) for bool types.
8241 if (Opcode == ISD::SSUBSAT || Opcode == ISD::USUBSAT)
8242 return getNode(ISD::AND, DL, VT, N1, getNOT(DL, N2, VT));
8243 }
8244 break;
8245 case ISD::SCMP:
8246 case ISD::UCMP:
8247 assert(N1.getValueType() == N2.getValueType() &&
8248 "Types of operands of UCMP/SCMP must match");
8249 assert(N1.getValueType().isVector() == VT.isVector() &&
8250 "Operands and return type of must both be scalars or vectors");
8251 if (VT.isVector())
8254 "Result and operands must have the same number of elements");
8255 break;
8256 case ISD::AVGFLOORS:
8257 case ISD::AVGFLOORU:
8258 case ISD::AVGCEILS:
8259 case ISD::AVGCEILU:
8260 assert(VT.isInteger() && "This operator does not apply to FP types!");
8261 assert(N1.getValueType() == N2.getValueType() &&
8262 N1.getValueType() == VT && "Binary operator types must match!");
8263 break;
8264 case ISD::ABDS:
8265 case ISD::ABDU:
8266 assert(VT.isInteger() && "This operator does not apply to FP types!");
8267 assert(N1.getValueType() == N2.getValueType() &&
8268 N1.getValueType() == VT && "Binary operator types must match!");
8269 if (VT.getScalarType() == MVT::i1)
8270 return getNode(ISD::XOR, DL, VT, N1, N2);
8271 break;
8272 case ISD::SMIN:
8273 case ISD::UMAX:
8274 assert(VT.isInteger() && "This operator does not apply to FP types!");
8275 assert(N1.getValueType() == N2.getValueType() &&
8276 N1.getValueType() == VT && "Binary operator types must match!");
8277 if (VT.getScalarType() == MVT::i1)
8278 return getNode(ISD::OR, DL, VT, N1, N2);
8279 break;
8280 case ISD::SMAX:
8281 case ISD::UMIN:
8282 assert(VT.isInteger() && "This operator does not apply to FP types!");
8283 assert(N1.getValueType() == N2.getValueType() &&
8284 N1.getValueType() == VT && "Binary operator types must match!");
8285 if (VT.getScalarType() == MVT::i1)
8286 return getNode(ISD::AND, DL, VT, N1, N2);
8287 break;
8288 case ISD::FADD:
8289 case ISD::FSUB:
8290 case ISD::FMUL:
8291 case ISD::FDIV:
8292 case ISD::FREM:
8293 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
8294 assert(N1.getValueType() == N2.getValueType() &&
8295 N1.getValueType() == VT && "Binary operator types must match!");
8296 if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags))
8297 return V;
8298 break;
8299 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
8300 assert(N1.getValueType() == VT &&
8303 "Invalid FCOPYSIGN!");
8304 break;
8305 case ISD::SHL:
8306 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
8307 const APInt &MulImm = N1->getConstantOperandAPInt(0);
8308 const APInt &ShiftImm = N2C->getAPIntValue();
8309 return getVScale(DL, VT, MulImm << ShiftImm);
8310 }
8311 [[fallthrough]];
8312 case ISD::SRA:
8313 case ISD::SRL:
8314 if (SDValue V = simplifyShift(N1, N2))
8315 return V;
8316 [[fallthrough]];
8317 case ISD::ROTL:
8318 case ISD::ROTR:
8319 case ISD::SSHLSAT:
8320 case ISD::USHLSAT:
8321 assert(VT == N1.getValueType() &&
8322 "Shift operators return type must be the same as their first arg");
8323 assert(VT.isInteger() && N2.getValueType().isInteger() &&
8324 "Shifts only work on integers");
8325 assert((!VT.isVector() || VT == N2.getValueType()) &&
8326 "Vector shift amounts must be in the same as their first arg");
8327 // Verify that the shift amount VT is big enough to hold valid shift
8328 // amounts. This catches things like trying to shift an i1024 value by an
8329 // i8, which is easy to fall into in generic code that uses
8330 // TLI.getShiftAmount().
8333 "Invalid use of small shift amount with oversized value!");
8334
8335 // Always fold shifts of i1 values so the code generator doesn't need to
8336 // handle them. Since we know the size of the shift has to be less than the
8337 // size of the value, the shift/rotate count is guaranteed to be zero.
8338 if (VT == MVT::i1)
8339 return N1;
8340 if (N2CV && N2CV->isZero())
8341 return N1;
8342 break;
8343 case ISD::FP_ROUND:
8345 VT.bitsLE(N1.getValueType()) && N2C &&
8346 (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
8347 N2.getOpcode() == ISD::TargetConstant && "Invalid FP_ROUND!");
8348 if (N1.getValueType() == VT) return N1; // noop conversion.
8349 break;
8350 case ISD::IS_FPCLASS: {
8352 "IS_FPCLASS is used for a non-floating type");
8353 assert(isa<ConstantSDNode>(N2) && "FPClassTest is not Constant");
8354 // is.fpclass(poison, mask) -> poison
8355 if (N1.getOpcode() == ISD::POISON)
8356 return getPOISON(VT);
8357 FPClassTest Mask = static_cast<FPClassTest>(N2->getAsZExtVal());
8358 // If all tests are made, it doesn't matter what the value is.
8359 if ((Mask & fcAllFlags) == fcAllFlags)
8360 return getBoolConstant(true, DL, VT, N1.getValueType());
8361 if ((Mask & fcAllFlags) == 0)
8362 return getBoolConstant(false, DL, VT, N1.getValueType());
8363 break;
8364 }
8365 case ISD::AssertNoFPClass: {
8367 "AssertNoFPClass is used for a non-floating type");
8368 assert(isa<ConstantSDNode>(N2) && "NoFPClass is not Constant");
8369 FPClassTest NoFPClass = static_cast<FPClassTest>(N2->getAsZExtVal());
8370 assert(llvm::to_underlying(NoFPClass) <=
8372 "FPClassTest value too large");
8373 (void)NoFPClass;
8374 break;
8375 }
8376 case ISD::AssertSext:
8377 case ISD::AssertZext: {
8378 EVT EVT = cast<VTSDNode>(N2)->getVT();
8379 assert(VT == N1.getValueType() && "Not an inreg extend!");
8380 assert(VT.isInteger() && EVT.isInteger() &&
8381 "Cannot *_EXTEND_INREG FP types");
8382 assert(!EVT.isVector() &&
8383 "AssertSExt/AssertZExt type should be the vector element type "
8384 "rather than the vector type!");
8385 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
8386 if (VT.getScalarType() == EVT) return N1; // noop assertion.
8387 break;
8388 }
8390 EVT EVT = cast<VTSDNode>(N2)->getVT();
8391 assert(VT == N1.getValueType() && "Not an inreg extend!");
8392 assert(VT.isInteger() && EVT.isInteger() &&
8393 "Cannot *_EXTEND_INREG FP types");
8394 assert(EVT.isVector() == VT.isVector() &&
8395 "SIGN_EXTEND_INREG type should be vector iff the operand "
8396 "type is vector!");
8397 assert((!EVT.isVector() ||
8399 "Vector element counts must match in SIGN_EXTEND_INREG");
8400 assert(EVT.getScalarType().bitsLE(VT.getScalarType()) && "Not extending!");
8401 if (EVT == VT) return N1; // Not actually extending
8402 break;
8403 }
8405 case ISD::FP_TO_UINT_SAT: {
8406 assert(VT.isInteger() && cast<VTSDNode>(N2)->getVT().isInteger() &&
8407 N1.getValueType().isFloatingPoint() && "Invalid FP_TO_*INT_SAT");
8408 assert(N1.getValueType().isVector() == VT.isVector() &&
8409 "FP_TO_*INT_SAT type should be vector iff the operand type is "
8410 "vector!");
8411 assert((!VT.isVector() || VT.getVectorElementCount() ==
8413 "Vector element counts must match in FP_TO_*INT_SAT");
8414 assert(!cast<VTSDNode>(N2)->getVT().isVector() &&
8415 "Type to saturate to must be a scalar.");
8416 assert(cast<VTSDNode>(N2)->getVT().bitsLE(VT.getScalarType()) &&
8417 "Not extending!");
8418 break;
8419 }
8422 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
8423 element type of the vector.");
8424
8425 // Extract from an undefined value or using an undefined index is undefined.
8426 if (N1.isUndef() || N2.isUndef())
8427 return getUNDEF(VT);
8428
8429 // EXTRACT_VECTOR_ELT of out-of-bounds element is POISON for fixed length
8430 // vectors. For scalable vectors we will provide appropriate support for
8431 // dealing with arbitrary indices.
8432 if (N2C && N1.getValueType().isFixedLengthVector() &&
8433 N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
8434 return getPOISON(VT);
8435
8436 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
8437 // expanding copies of large vectors from registers. This only works for
8438 // fixed length vectors, since we need to know the exact number of
8439 // elements.
8440 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS &&
8442 unsigned Factor = N1.getOperand(0).getValueType().getVectorNumElements();
8443 return getExtractVectorElt(DL, VT,
8444 N1.getOperand(N2C->getZExtValue() / Factor),
8445 N2C->getZExtValue() % Factor);
8446 }
8447
8448 // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while
8449 // lowering is expanding large vector constants.
8450 if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR ||
8451 N1.getOpcode() == ISD::SPLAT_VECTOR)) {
8454 "BUILD_VECTOR used for scalable vectors");
8455 unsigned Index =
8456 N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0;
8457 SDValue Elt = N1.getOperand(Index);
8458
8459 if (VT != Elt.getValueType())
8460 // If the vector element type is not legal, the BUILD_VECTOR operands
8461 // are promoted and implicitly truncated, and the result implicitly
8462 // extended. Make that explicit here.
8463 Elt = getAnyExtOrTrunc(Elt, DL, VT);
8464
8465 return Elt;
8466 }
8467
8468 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
8469 // operations are lowered to scalars.
8470 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
8471 // If the indices are the same, return the inserted element else
8472 // if the indices are known different, extract the element from
8473 // the original vector.
8474 SDValue N1Op2 = N1.getOperand(2);
8476
8477 if (N1Op2C && N2C) {
8478 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
8479 if (VT == N1.getOperand(1).getValueType())
8480 return N1.getOperand(1);
8481 if (VT.isFloatingPoint()) {
8483 return getFPExtendOrRound(N1.getOperand(1), DL, VT);
8484 }
8485 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
8486 }
8487 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
8488 }
8489 }
8490
8491 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
8492 // when vector types are scalarized and v1iX is legal.
8493 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx).
8494 // Here we are completely ignoring the extract element index (N2),
8495 // which is fine for fixed width vectors, since any index other than 0
8496 // is undefined anyway. However, this cannot be ignored for scalable
8497 // vectors - in theory we could support this, but we don't want to do this
8498 // without a profitability check.
8499 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
8501 N1.getValueType().getVectorNumElements() == 1) {
8502 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
8503 N1.getOperand(1));
8504 }
8505 break;
8507 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
8508 assert(!N1.getValueType().isVector() && !VT.isVector() &&
8509 (N1.getValueType().isInteger() == VT.isInteger()) &&
8510 N1.getValueType() != VT &&
8511 "Wrong types for EXTRACT_ELEMENT!");
8512
8513 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
8514 // 64-bit integers into 32-bit parts. Instead of building the extract of
8515 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
8516 if (N1.getOpcode() == ISD::BUILD_PAIR)
8517 return N1.getOperand(N2C->getZExtValue());
8518
8519 // EXTRACT_ELEMENT of a constant int is also very common.
8520 if (N1C) {
8521 unsigned ElementSize = VT.getSizeInBits();
8522 unsigned Shift = ElementSize * N2C->getZExtValue();
8523 const APInt &Val = N1C->getAPIntValue();
8524 return getConstant(Val.extractBits(ElementSize, Shift), DL, VT);
8525 }
8526 break;
8528 EVT N1VT = N1.getValueType();
8529 assert(VT.isVector() && N1VT.isVector() &&
8530 "Extract subvector VTs must be vectors!");
8532 "Extract subvector VTs must have the same element type!");
8533 assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) &&
8534 "Cannot extract a scalable vector from a fixed length vector!");
8535 assert((VT.isScalableVector() != N1VT.isScalableVector() ||
8537 "Extract subvector must be from larger vector to smaller vector!");
8538 assert(N2C && "Extract subvector index must be a constant");
8539 assert((VT.isScalableVector() != N1VT.isScalableVector() ||
8540 (VT.getVectorMinNumElements() + N2C->getZExtValue()) <=
8541 N1VT.getVectorMinNumElements()) &&
8542 "Extract subvector overflow!");
8543 assert(N2C->getAPIntValue().getBitWidth() ==
8544 TLI->getVectorIdxWidth(getDataLayout()) &&
8545 "Constant index for EXTRACT_SUBVECTOR has an invalid size");
8546 assert(N2C->getZExtValue() % VT.getVectorMinNumElements() == 0 &&
8547 "Extract index is not a multiple of the output vector length");
8548
8549 // Trivial extraction.
8550 if (VT == N1VT)
8551 return N1;
8552
8553 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
8554 if (N1.isUndef())
8555 return getUNDEF(VT);
8556
8557 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
8558 // the concat have the same type as the extract.
8559 if (N1.getOpcode() == ISD::CONCAT_VECTORS &&
8560 VT == N1.getOperand(0).getValueType()) {
8561 unsigned Factor = VT.getVectorMinNumElements();
8562 return N1.getOperand(N2C->getZExtValue() / Factor);
8563 }
8564
8565 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
8566 // during shuffle legalization.
8567 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
8568 VT == N1.getOperand(1).getValueType())
8569 return N1.getOperand(1);
8570 break;
8571 }
8572 }
8573
8574 if (N1.getOpcode() == ISD::POISON || N2.getOpcode() == ISD::POISON) {
8575 switch (Opcode) {
8576 case ISD::XOR:
8577 case ISD::ADD:
8578 case ISD::PTRADD:
8579 case ISD::SUB:
8581 case ISD::UDIV:
8582 case ISD::SDIV:
8583 case ISD::UREM:
8584 case ISD::SREM:
8585 case ISD::MUL:
8586 case ISD::AND:
8587 case ISD::SSUBSAT:
8588 case ISD::USUBSAT:
8589 case ISD::UMIN:
8590 case ISD::OR:
8591 case ISD::SADDSAT:
8592 case ISD::UADDSAT:
8593 case ISD::UMAX:
8594 case ISD::SMAX:
8595 case ISD::SMIN:
8596 // fold op(arg1, poison) -> poison, fold op(poison, arg2) -> poison.
8597 return N2.getOpcode() == ISD::POISON ? N2 : N1;
8598 }
8599 }
8600
8601 // Canonicalize an UNDEF to the RHS, even over a constant.
8602 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() != ISD::UNDEF) {
8603 if (TLI->isCommutativeBinOp(Opcode)) {
8604 std::swap(N1, N2);
8605 } else {
8606 switch (Opcode) {
8607 case ISD::PTRADD:
8608 case ISD::SUB:
8609 // fold op(undef, non_undef_arg2) -> undef.
8610 return N1;
8612 case ISD::UDIV:
8613 case ISD::SDIV:
8614 case ISD::UREM:
8615 case ISD::SREM:
8616 case ISD::SSUBSAT:
8617 case ISD::USUBSAT:
8618 // fold op(undef, non_undef_arg2) -> 0.
8619 return getConstant(0, DL, VT);
8620 }
8621 }
8622 }
8623
8624 // Fold a bunch of operators when the RHS is undef.
8625 if (N2.getOpcode() == ISD::UNDEF) {
8626 switch (Opcode) {
8627 case ISD::XOR:
8628 if (N1.getOpcode() == ISD::UNDEF)
8629 // Handle undef ^ undef -> 0 special case. This is a common
8630 // idiom (misuse).
8631 return getConstant(0, DL, VT);
8632 [[fallthrough]];
8633 case ISD::ADD:
8634 case ISD::PTRADD:
8635 case ISD::SUB:
8636 // fold op(arg1, undef) -> undef.
8637 return N2;
8638 case ISD::UDIV:
8639 case ISD::SDIV:
8640 case ISD::UREM:
8641 case ISD::SREM:
8642 // fold op(arg1, undef) -> poison.
8643 return getPOISON(VT);
8644 case ISD::MUL:
8645 case ISD::AND:
8646 case ISD::SSUBSAT:
8647 case ISD::USUBSAT:
8648 case ISD::UMIN:
8649 // fold op(undef, undef) -> undef, fold op(arg1, undef) -> 0.
8650 return N1.getOpcode() == ISD::UNDEF ? N2 : getConstant(0, DL, VT);
8651 case ISD::OR:
8652 case ISD::SADDSAT:
8653 case ISD::UADDSAT:
8654 case ISD::UMAX:
8655 // fold op(undef, undef) -> undef, fold op(arg1, undef) -> -1.
8656 return N1.getOpcode() == ISD::UNDEF ? N2 : getAllOnesConstant(DL, VT);
8657 case ISD::SMAX:
8658 // fold op(undef, undef) -> undef, fold op(arg1, undef) -> MAX_INT.
8659 return N1.getOpcode() == ISD::UNDEF
8660 ? N2
8661 : getConstant(
8663 VT);
8664 case ISD::SMIN:
8665 // fold op(undef, undef) -> undef, fold op(arg1, undef) -> MIN_INT.
8666 return N1.getOpcode() == ISD::UNDEF
8667 ? N2
8668 : getConstant(
8670 VT);
8671 }
8672 }
8673
8674 // Perform trivial constant folding.
8675 if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2}, Flags))
8676 return SV;
8677
8678 // Memoize this node if possible.
8679 SDNode *N;
8680 SDVTList VTs = getVTList(VT);
8681 SDValue Ops[] = {N1, N2};
8682 if (VT != MVT::Glue) {
8684 AddNodeIDNode(ID, Opcode, VTs, Ops);
8685 void *IP = nullptr;
8686 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
8687 E->intersectFlagsWith(Flags);
8688 return SDValue(E, 0);
8689 }
8690
8691 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8692 N->setFlags(Flags);
8693 createOperands(N, Ops);
8694 CSEMap.InsertNode(N, IP);
8695 } else {
8696 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8697 createOperands(N, Ops);
8698 }
8699
8700 InsertNode(N);
8701 SDValue V = SDValue(N, 0);
8702 NewSDValueDbgMsg(V, "Creating new node: ", this);
8703 return V;
8704}
8705
8706SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8707 SDValue N1, SDValue N2, SDValue N3) {
8708 SDNodeFlags Flags;
8709 if (Inserter)
8710 Flags = Inserter->getFlags();
8711 return getNode(Opcode, DL, VT, N1, N2, N3, Flags);
8712}
8713
8714SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8715 SDValue N1, SDValue N2, SDValue N3,
8716 const SDNodeFlags Flags) {
8718 N2.getOpcode() != ISD::DELETED_NODE &&
8719 N3.getOpcode() != ISD::DELETED_NODE &&
8720 "Operand is DELETED_NODE!");
8721 // Perform various simplifications.
8722 switch (Opcode) {
8723 case ISD::BUILD_VECTOR: {
8724 // Attempt to simplify BUILD_VECTOR.
8725 SDValue Ops[] = {N1, N2, N3};
8726 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
8727 return V;
8728 break;
8729 }
8730 case ISD::CONCAT_VECTORS: {
8731 SDValue Ops[] = {N1, N2, N3};
8732 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
8733 return V;
8734 break;
8735 }
8736 case ISD::SETCC: {
8737 assert(VT.isInteger() && "SETCC result type must be an integer!");
8738 assert(N1.getValueType() == N2.getValueType() &&
8739 "SETCC operands must have the same type!");
8740 assert(VT.isVector() == N1.getValueType().isVector() &&
8741 "SETCC type should be vector iff the operand type is vector!");
8742 assert((!VT.isVector() || VT.getVectorElementCount() ==
8744 "SETCC vector element counts must match!");
8745 // Use FoldSetCC to simplify SETCC's.
8746 if (SDValue V =
8747 FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL, Flags))
8748 return V;
8749 break;
8750 }
8751 case ISD::SELECT:
8752 case ISD::VSELECT:
8753 if (SDValue V = simplifySelect(N1, N2, N3))
8754 return V;
8755 break;
8757 llvm_unreachable("should use getVectorShuffle constructor!");
8759 if (isNullConstant(N3))
8760 return N1;
8761 break;
8763 if (isNullConstant(N3))
8764 return N2;
8765 break;
8767 assert(VT.isVector() && VT == N1.getValueType() &&
8768 "INSERT_VECTOR_ELT vector type mismatch");
8770 "INSERT_VECTOR_ELT scalar fp/int mismatch");
8771 assert((!VT.isFloatingPoint() ||
8772 VT.getVectorElementType() == N2.getValueType()) &&
8773 "INSERT_VECTOR_ELT fp scalar type mismatch");
8774 assert((!VT.isInteger() ||
8776 "INSERT_VECTOR_ELT int scalar size mismatch");
8777
8778 auto *N3C = dyn_cast<ConstantSDNode>(N3);
8779 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except
8780 // for scalable vectors where we will generate appropriate code to
8781 // deal with out-of-bounds cases correctly.
8782 if (N3C && VT.isFixedLengthVector() &&
8783 N3C->getZExtValue() >= VT.getVectorNumElements())
8784 return getUNDEF(VT);
8785
8786 // Undefined index can be assumed out-of-bounds, so that's UNDEF too.
8787 if (N3.isUndef())
8788 return getUNDEF(VT);
8789
8790 // If inserting poison, just use the input vector.
8791 if (N2.getOpcode() == ISD::POISON)
8792 return N1;
8793
8794 // Inserting undef into undef/poison is still undef.
8795 if (N2.getOpcode() == ISD::UNDEF && N1.isUndef())
8796 return getUNDEF(VT);
8797
8798 // If the inserted element is an UNDEF, just use the input vector.
8799 // But not if skipping the insert could make the result more poisonous.
8800 if (N2.isUndef()) {
8801 if (N3C && VT.isFixedLengthVector()) {
8802 APInt EltMask =
8803 APInt::getOneBitSet(VT.getVectorNumElements(), N3C->getZExtValue());
8804 if (isGuaranteedNotToBePoison(N1, EltMask))
8805 return N1;
8806 } else if (isGuaranteedNotToBePoison(N1))
8807 return N1;
8808 }
8809 break;
8810 }
8811 case ISD::INSERT_SUBVECTOR: {
8812 // If inserting poison, just use the input vector,
8813 if (N2.getOpcode() == ISD::POISON)
8814 return N1;
8815
8816 // Inserting undef into undef/poison is still undef.
8817 if (N2.getOpcode() == ISD::UNDEF && N1.isUndef())
8818 return getUNDEF(VT);
8819
8820 EVT N2VT = N2.getValueType();
8821 assert(VT == N1.getValueType() &&
8822 "Dest and insert subvector source types must match!");
8823 assert(VT.isVector() && N2VT.isVector() &&
8824 "Insert subvector VTs must be vectors!");
8826 "Insert subvector VTs must have the same element type!");
8827 assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) &&
8828 "Cannot insert a scalable vector into a fixed length vector!");
8829 assert((VT.isScalableVector() != N2VT.isScalableVector() ||
8831 "Insert subvector must be from smaller vector to larger vector!");
8833 "Insert subvector index must be constant");
8834 assert((VT.isScalableVector() != N2VT.isScalableVector() ||
8835 (N2VT.getVectorMinNumElements() + N3->getAsZExtVal()) <=
8837 "Insert subvector overflow!");
8839 TLI->getVectorIdxWidth(getDataLayout()) &&
8840 "Constant index for INSERT_SUBVECTOR has an invalid size");
8841
8842 // Trivial insertion.
8843 if (VT == N2VT)
8844 return N2;
8845
8846 // If this is an insert of an extracted vector into an undef/poison vector,
8847 // we can just use the input to the extract. But not if skipping the
8848 // extract+insert could make the result more poisonous.
8849 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
8850 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) {
8851 if (N1.getOpcode() == ISD::POISON)
8852 return N2.getOperand(0);
8853 if (VT.isFixedLengthVector() && N2VT.isFixedLengthVector()) {
8854 unsigned LoBit = N3->getAsZExtVal();
8855 unsigned HiBit = LoBit + N2VT.getVectorNumElements();
8856 APInt EltMask =
8857 APInt::getBitsSet(VT.getVectorNumElements(), LoBit, HiBit);
8858 if (isGuaranteedNotToBePoison(N2.getOperand(0), ~EltMask))
8859 return N2.getOperand(0);
8860 } else if (isGuaranteedNotToBePoison(N2.getOperand(0)))
8861 return N2.getOperand(0);
8862 }
8863
8864 // If the inserted subvector is UNDEF, just use the input vector.
8865 // But not if skipping the insert could make the result more poisonous.
8866 if (N2.isUndef()) {
8867 if (VT.isFixedLengthVector()) {
8868 unsigned LoBit = N3->getAsZExtVal();
8869 unsigned HiBit = LoBit + N2VT.getVectorNumElements();
8870 APInt EltMask =
8871 APInt::getBitsSet(VT.getVectorNumElements(), LoBit, HiBit);
8872 if (isGuaranteedNotToBePoison(N1, EltMask))
8873 return N1;
8874 } else if (isGuaranteedNotToBePoison(N1))
8875 return N1;
8876 }
8877 break;
8878 }
8879 case ISD::BITCAST:
8880 // Fold bit_convert nodes from a type to themselves.
8881 if (N1.getValueType() == VT)
8882 return N1;
8883 break;
8884 case ISD::VP_TRUNCATE:
8885 case ISD::VP_SIGN_EXTEND:
8886 case ISD::VP_ZERO_EXTEND:
8887 // Don't create noop casts.
8888 if (N1.getValueType() == VT)
8889 return N1;
8890 break;
8891 case ISD::VECTOR_COMPRESS: {
8892 [[maybe_unused]] EVT VecVT = N1.getValueType();
8893 [[maybe_unused]] EVT MaskVT = N2.getValueType();
8894 [[maybe_unused]] EVT PassthruVT = N3.getValueType();
8895 assert(VT == VecVT && "Vector and result type don't match.");
8896 assert(VecVT.isVector() && MaskVT.isVector() && PassthruVT.isVector() &&
8897 "All inputs must be vectors.");
8898 assert(VecVT == PassthruVT && "Vector and passthru types don't match.");
8900 "Vector and mask must have same number of elements.");
8901
8902 if (N1.isUndef() || N2.isUndef())
8903 return N3;
8904
8905 break;
8906 }
8911 [[maybe_unused]] EVT AccVT = N1.getValueType();
8912 [[maybe_unused]] EVT Input1VT = N2.getValueType();
8913 [[maybe_unused]] EVT Input2VT = N3.getValueType();
8914 assert(Input1VT.isVector() && Input1VT == Input2VT &&
8915 "Expected the second and third operands of the PARTIAL_REDUCE_MLA "
8916 "node to have the same type!");
8917 assert(VT.isVector() && VT == AccVT &&
8918 "Expected the first operand of the PARTIAL_REDUCE_MLA node to have "
8919 "the same type as its result!");
8921 AccVT.getVectorElementCount()) &&
8922 "Expected the element count of the second and third operands of the "
8923 "PARTIAL_REDUCE_MLA node to be a positive integer multiple of the "
8924 "element count of the first operand and the result!");
8926 "Expected the second and third operands of the PARTIAL_REDUCE_MLA "
8927 "node to have an element type which is the same as or smaller than "
8928 "the element type of the first operand and result!");
8929 break;
8930 }
8931 }
8932
8933 // Perform trivial constant folding for arithmetic operators.
8934 switch (Opcode) {
8935 case ISD::FMA:
8936 case ISD::FMAD:
8937 case ISD::SETCC:
8938 case ISD::FSHL:
8939 case ISD::FSHR:
8940 if (SDValue SV =
8941 FoldConstantArithmetic(Opcode, DL, VT, {N1, N2, N3}, Flags))
8942 return SV;
8943 break;
8944 }
8945
8946 // Memoize node if it doesn't produce a glue result.
8947 SDNode *N;
8948 SDVTList VTs = getVTList(VT);
8949 SDValue Ops[] = {N1, N2, N3};
8950 if (VT != MVT::Glue) {
8952 AddNodeIDNode(ID, Opcode, VTs, Ops);
8953 void *IP = nullptr;
8954 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
8955 E->intersectFlagsWith(Flags);
8956 return SDValue(E, 0);
8957 }
8958
8959 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8960 N->setFlags(Flags);
8961 createOperands(N, Ops);
8962 CSEMap.InsertNode(N, IP);
8963 } else {
8964 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8965 createOperands(N, Ops);
8966 }
8967
8968 InsertNode(N);
8969 SDValue V = SDValue(N, 0);
8970 NewSDValueDbgMsg(V, "Creating new node: ", this);
8971 return V;
8972}
8973
8974SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8975 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
8976 const SDNodeFlags Flags) {
8977 SDValue Ops[] = { N1, N2, N3, N4 };
8978 return getNode(Opcode, DL, VT, Ops, Flags);
8979}
8980
8981SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8982 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
8983 SDNodeFlags Flags;
8984 if (Inserter)
8985 Flags = Inserter->getFlags();
8986 return getNode(Opcode, DL, VT, N1, N2, N3, N4, Flags);
8987}
8988
8989SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8990 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
8991 SDValue N5, const SDNodeFlags Flags) {
8992 SDValue Ops[] = { N1, N2, N3, N4, N5 };
8993 return getNode(Opcode, DL, VT, Ops, Flags);
8994}
8995
8996SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8997 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
8998 SDValue N5) {
8999 SDNodeFlags Flags;
9000 if (Inserter)
9001 Flags = Inserter->getFlags();
9002 return getNode(Opcode, DL, VT, N1, N2, N3, N4, N5, Flags);
9003}
9004
9005/// getStackArgumentTokenFactor - Compute a TokenFactor to force all
9006/// the incoming stack arguments to be loaded from the stack.
9008 SmallVector<SDValue, 8> ArgChains;
9009
9010 // Include the original chain at the beginning of the list. When this is
9011 // used by target LowerCall hooks, this helps legalize find the
9012 // CALLSEQ_BEGIN node.
9013 ArgChains.push_back(Chain);
9014
9015 // Add a chain value for each stack argument.
9016 for (SDNode *U : getEntryNode().getNode()->users())
9017 if (LoadSDNode *L = dyn_cast<LoadSDNode>(U))
9018 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
9019 if (FI->getIndex() < 0)
9020 ArgChains.push_back(SDValue(L, 1));
9021
9022 // Build a tokenfactor for all the chains.
9023 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
9024}
9025
9026/// getMemsetValue - Vectorized representation of the memset value
9027/// operand.
9029 const SDLoc &dl) {
9030 assert(!Value.isUndef());
9031
9032 unsigned NumBits = VT.getScalarSizeInBits();
9034 assert(C->getAPIntValue().getBitWidth() == 8);
9035 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
9036 if (VT.isInteger()) {
9037 bool IsOpaque = VT.getSizeInBits() > 64 ||
9038 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue());
9039 return DAG.getConstant(Val, dl, VT, false, IsOpaque);
9040 }
9041 return DAG.getConstantFP(APFloat(VT.getFltSemantics(), Val), dl, VT);
9042 }
9043
9044 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
9045 EVT IntVT = VT.getScalarType();
9046 if (!IntVT.isInteger())
9047 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
9048
9049 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
9050 if (NumBits > 8) {
9051 // Use a multiplication with 0x010101... to extend the input to the
9052 // required length.
9053 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
9054 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
9055 DAG.getConstant(Magic, dl, IntVT));
9056 }
9057
9058 if (VT != Value.getValueType() && !VT.isInteger())
9059 Value = DAG.getBitcast(VT.getScalarType(), Value);
9060 if (VT != Value.getValueType())
9061 Value = DAG.getSplatBuildVector(VT, dl, Value);
9062
9063 return Value;
9064}
9065
9066/// getMemsetStringVal - Similar to getMemsetValue. Except this is only
9067/// used when a memcpy is turned into a memset when the source is a constant
9068/// string ptr.
9070 const TargetLowering &TLI,
9071 const ConstantDataArraySlice &Slice) {
9072 // Handle vector with all elements zero.
9073 if (Slice.Array == nullptr) {
9074 if (VT.isInteger())
9075 return DAG.getConstant(0, dl, VT);
9076 return DAG.getNode(ISD::BITCAST, dl, VT,
9077 DAG.getConstant(0, dl, VT.changeTypeToInteger()));
9078 }
9079
9080 assert(!VT.isVector() && "Can't handle vector type here!");
9081 unsigned NumVTBits = VT.getSizeInBits();
9082 unsigned NumVTBytes = NumVTBits / 8;
9083 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
9084
9085 APInt Val(NumVTBits, 0);
9086 if (DAG.getDataLayout().isLittleEndian()) {
9087 for (unsigned i = 0; i != NumBytes; ++i)
9088 Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
9089 } else {
9090 for (unsigned i = 0; i != NumBytes; ++i)
9091 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
9092 }
9093
9094 // If the "cost" of materializing the integer immediate is less than the cost
9095 // of a load, then it is cost effective to turn the load into the immediate.
9096 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
9097 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
9098 return DAG.getConstant(Val, dl, VT);
9099 return SDValue();
9100}
9101
9103 const SDLoc &DL,
9104 const SDNodeFlags Flags) {
9105 SDValue Index = getTypeSize(DL, Base.getValueType(), Offset);
9106 return getMemBasePlusOffset(Base, Index, DL, Flags);
9107}
9108
9110 const SDLoc &DL,
9111 const SDNodeFlags Flags) {
9112 assert(Offset.getValueType().isInteger());
9113 EVT BasePtrVT = Ptr.getValueType();
9114 if (TLI->shouldPreservePtrArith(this->getMachineFunction().getFunction(),
9115 BasePtrVT))
9116 return getNode(ISD::PTRADD, DL, BasePtrVT, Ptr, Offset, Flags);
9117 // InBounds only applies to PTRADD, don't set it if we generate ADD.
9118 SDNodeFlags AddFlags = Flags;
9119 AddFlags.setInBounds(false);
9120 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, AddFlags);
9121}
9122
9123/// Returns true if memcpy source is constant data.
9125 uint64_t SrcDelta = 0;
9126 GlobalAddressSDNode *G = nullptr;
9127 if (Src.getOpcode() == ISD::GlobalAddress)
9129 else if (Src->isAnyAdd() &&
9130 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
9131 Src.getOperand(1).getOpcode() == ISD::Constant) {
9132 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
9133 SrcDelta = Src.getConstantOperandVal(1);
9134 }
9135 if (!G)
9136 return false;
9137
9138 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
9139 SrcDelta + G->getOffset());
9140}
9141
9143 SelectionDAG &DAG) {
9144 // On Darwin, -Os means optimize for size without hurting performance, so
9145 // only really optimize for size when -Oz (MinSize) is used.
9147 return MF.getFunction().hasMinSize();
9148 return DAG.shouldOptForSize();
9149}
9150
9152 SmallVector<SDValue, 32> &OutChains, unsigned From,
9153 unsigned To, SmallVector<SDValue, 16> &OutLoadChains,
9154 SmallVector<SDValue, 16> &OutStoreChains) {
9155 assert(OutLoadChains.size() && "Missing loads in memcpy inlining");
9156 assert(OutStoreChains.size() && "Missing stores in memcpy inlining");
9157 SmallVector<SDValue, 16> GluedLoadChains;
9158 for (unsigned i = From; i < To; ++i) {
9159 OutChains.push_back(OutLoadChains[i]);
9160 GluedLoadChains.push_back(OutLoadChains[i]);
9161 }
9162
9163 // Chain for all loads.
9164 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
9165 GluedLoadChains);
9166
9167 for (unsigned i = From; i < To; ++i) {
9168 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
9169 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(),
9170 ST->getBasePtr(), ST->getMemoryVT(),
9171 ST->getMemOperand());
9172 OutChains.push_back(NewStore);
9173 }
9174}
9175
9177 SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src,
9178 uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline,
9179 MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo,
9180 const AAMDNodes &AAInfo, BatchAAResults *BatchAA) {
9181 // Turn a memcpy of undef to nop.
9182 // FIXME: We need to honor volatile even is Src is undef.
9183 if (Src.isUndef())
9184 return Chain;
9185
9186 // Expand memcpy to a series of load and store ops if the size operand falls
9187 // below a certain threshold.
9188 // TODO: In the AlwaysInline case, if the size is big then generate a loop
9189 // rather than maybe a humongous number of loads and stores.
9190 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9191 const DataLayout &DL = DAG.getDataLayout();
9192 LLVMContext &C = *DAG.getContext();
9193 std::vector<EVT> MemOps;
9194 bool DstAlignCanChange = false;
9196 MachineFrameInfo &MFI = MF.getFrameInfo();
9197 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
9199 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
9200 DstAlignCanChange = true;
9201 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
9202 if (!SrcAlign || Alignment > *SrcAlign)
9203 SrcAlign = Alignment;
9204 assert(SrcAlign && "SrcAlign must be set");
9206 // If marked as volatile, perform a copy even when marked as constant.
9207 bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice);
9208 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
9209 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
9210 const MemOp Op = isZeroConstant
9211 ? MemOp::Set(Size, DstAlignCanChange, Alignment,
9212 /*IsZeroMemset*/ true, isVol)
9213 : MemOp::Copy(Size, DstAlignCanChange, Alignment,
9214 *SrcAlign, isVol, CopyFromConstant);
9215 if (!TLI.findOptimalMemOpLowering(
9216 C, MemOps, Limit, Op, DstPtrInfo.getAddrSpace(),
9217 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes(), nullptr))
9218 return SDValue();
9219
9220 if (DstAlignCanChange) {
9221 Type *Ty = MemOps[0].getTypeForEVT(C);
9222 Align NewAlign = DL.getABITypeAlign(Ty);
9223
9224 // Don't promote to an alignment that would require dynamic stack
9225 // realignment which may conflict with optimizations such as tail call
9226 // optimization.
9228 if (!TRI->hasStackRealignment(MF))
9229 if (MaybeAlign StackAlign = DL.getStackAlignment())
9230 NewAlign = std::min(NewAlign, *StackAlign);
9231
9232 if (NewAlign > Alignment) {
9233 // Give the stack frame object a larger alignment if needed.
9234 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
9235 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
9236 Alignment = NewAlign;
9237 }
9238 }
9239
9240 // Prepare AAInfo for loads/stores after lowering this memcpy.
9241 AAMDNodes NewAAInfo = AAInfo;
9242 NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
9243
9244 const Value *SrcVal = dyn_cast_if_present<const Value *>(SrcPtrInfo.V);
9245 bool isConstant =
9246 BatchAA && SrcVal &&
9247 BatchAA->pointsToConstantMemory(MemoryLocation(SrcVal, Size, AAInfo));
9248
9249 MachineMemOperand::Flags MMOFlags =
9251 SmallVector<SDValue, 16> OutLoadChains;
9252 SmallVector<SDValue, 16> OutStoreChains;
9253 SmallVector<SDValue, 32> OutChains;
9254 unsigned NumMemOps = MemOps.size();
9255 uint64_t SrcOff = 0, DstOff = 0;
9256 for (unsigned i = 0; i != NumMemOps; ++i) {
9257 EVT VT = MemOps[i];
9258 unsigned VTSize = VT.getSizeInBits() / 8;
9259 SDValue Value, Store;
9260
9261 if (VTSize > Size) {
9262 // Issuing an unaligned load / store pair that overlaps with the previous
9263 // pair. Adjust the offset accordingly.
9264 assert(i == NumMemOps-1 && i != 0);
9265 SrcOff -= VTSize - Size;
9266 DstOff -= VTSize - Size;
9267 }
9268
9269 if (CopyFromConstant &&
9270 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
9271 // It's unlikely a store of a vector immediate can be done in a single
9272 // instruction. It would require a load from a constantpool first.
9273 // We only handle zero vectors here.
9274 // FIXME: Handle other cases where store of vector immediate is done in
9275 // a single instruction.
9276 ConstantDataArraySlice SubSlice;
9277 if (SrcOff < Slice.Length) {
9278 SubSlice = Slice;
9279 SubSlice.move(SrcOff);
9280 } else {
9281 // This is an out-of-bounds access and hence UB. Pretend we read zero.
9282 SubSlice.Array = nullptr;
9283 SubSlice.Offset = 0;
9284 SubSlice.Length = VTSize;
9285 }
9286 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
9287 if (Value.getNode()) {
9288 Store = DAG.getStore(
9289 Chain, dl, Value,
9290 DAG.getObjectPtrOffset(dl, Dst, TypeSize::getFixed(DstOff)),
9291 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
9292 OutChains.push_back(Store);
9293 }
9294 }
9295
9296 if (!Store.getNode()) {
9297 // The type might not be legal for the target. This should only happen
9298 // if the type is smaller than a legal type, as on PPC, so the right
9299 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
9300 // to Load/Store if NVT==VT.
9301 // FIXME does the case above also need this?
9302 EVT NVT = TLI.getTypeToTransformTo(C, VT);
9303 assert(NVT.bitsGE(VT));
9304
9305 bool isDereferenceable =
9306 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
9307 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
9308 if (isDereferenceable)
9310 if (isConstant)
9311 SrcMMOFlags |= MachineMemOperand::MOInvariant;
9312
9313 Value = DAG.getExtLoad(
9314 ISD::EXTLOAD, dl, NVT, Chain,
9315 DAG.getObjectPtrOffset(dl, Src, TypeSize::getFixed(SrcOff)),
9316 SrcPtrInfo.getWithOffset(SrcOff), VT,
9317 commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags, NewAAInfo);
9318 OutLoadChains.push_back(Value.getValue(1));
9319
9320 Store = DAG.getTruncStore(
9321 Chain, dl, Value,
9322 DAG.getObjectPtrOffset(dl, Dst, TypeSize::getFixed(DstOff)),
9323 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo);
9324 OutStoreChains.push_back(Store);
9325 }
9326 SrcOff += VTSize;
9327 DstOff += VTSize;
9328 Size -= VTSize;
9329 }
9330
9331 unsigned GluedLdStLimit = MaxLdStGlue == 0 ?
9333 unsigned NumLdStInMemcpy = OutStoreChains.size();
9334
9335 if (NumLdStInMemcpy) {
9336 // It may be that memcpy might be converted to memset if it's memcpy
9337 // of constants. In such a case, we won't have loads and stores, but
9338 // just stores. In the absence of loads, there is nothing to gang up.
9339 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) {
9340 // If target does not care, just leave as it.
9341 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) {
9342 OutChains.push_back(OutLoadChains[i]);
9343 OutChains.push_back(OutStoreChains[i]);
9344 }
9345 } else {
9346 // Ld/St less than/equal limit set by target.
9347 if (NumLdStInMemcpy <= GluedLdStLimit) {
9348 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
9349 NumLdStInMemcpy, OutLoadChains,
9350 OutStoreChains);
9351 } else {
9352 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
9353 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
9354 unsigned GlueIter = 0;
9355
9356 // Residual ld/st.
9357 if (RemainingLdStInMemcpy) {
9359 DAG, dl, OutChains, NumLdStInMemcpy - RemainingLdStInMemcpy,
9360 NumLdStInMemcpy, OutLoadChains, OutStoreChains);
9361 }
9362
9363 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
9364 unsigned IndexFrom = NumLdStInMemcpy - RemainingLdStInMemcpy -
9365 GlueIter - GluedLdStLimit;
9366 unsigned IndexTo = NumLdStInMemcpy - RemainingLdStInMemcpy - GlueIter;
9367 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo,
9368 OutLoadChains, OutStoreChains);
9369 GlueIter += GluedLdStLimit;
9370 }
9371 }
9372 }
9373 }
9374 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
9375}
9376
9378 SDValue Chain, SDValue Dst, SDValue Src,
9379 uint64_t Size, Align Alignment,
9380 bool isVol, bool AlwaysInline,
9381 MachinePointerInfo DstPtrInfo,
9382 MachinePointerInfo SrcPtrInfo,
9383 const AAMDNodes &AAInfo) {
9384 // Turn a memmove of undef to nop.
9385 // FIXME: We need to honor volatile even is Src is undef.
9386 if (Src.isUndef())
9387 return Chain;
9388
9389 // Expand memmove to a series of load and store ops if the size operand falls
9390 // below a certain threshold.
9391 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9392 const DataLayout &DL = DAG.getDataLayout();
9393 LLVMContext &C = *DAG.getContext();
9394 std::vector<EVT> MemOps;
9395 bool DstAlignCanChange = false;
9397 MachineFrameInfo &MFI = MF.getFrameInfo();
9398 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
9400 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
9401 DstAlignCanChange = true;
9402 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
9403 if (!SrcAlign || Alignment > *SrcAlign)
9404 SrcAlign = Alignment;
9405 assert(SrcAlign && "SrcAlign must be set");
9406 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
9407 if (!TLI.findOptimalMemOpLowering(
9408 C, MemOps, Limit,
9409 MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign, isVol),
9410 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
9411 MF.getFunction().getAttributes(), nullptr))
9412 return SDValue();
9413
9414 if (DstAlignCanChange) {
9415 Type *Ty = MemOps[0].getTypeForEVT(C);
9416 Align NewAlign = DL.getABITypeAlign(Ty);
9417
9418 // Don't promote to an alignment that would require dynamic stack
9419 // realignment which may conflict with optimizations such as tail call
9420 // optimization.
9422 if (!TRI->hasStackRealignment(MF))
9423 if (MaybeAlign StackAlign = DL.getStackAlignment())
9424 NewAlign = std::min(NewAlign, *StackAlign);
9425
9426 if (NewAlign > Alignment) {
9427 // Give the stack frame object a larger alignment if needed.
9428 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
9429 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
9430 Alignment = NewAlign;
9431 }
9432 }
9433
9434 // Prepare AAInfo for loads/stores after lowering this memmove.
9435 AAMDNodes NewAAInfo = AAInfo;
9436 NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
9437
9438 MachineMemOperand::Flags MMOFlags =
9440 uint64_t SrcOff = 0;
9441 SmallVector<SDValue, 8> LoadValues;
9442 SmallVector<SDValue, 8> LoadChains;
9443 SmallVector<SDValue, 8> OutChains;
9444 unsigned NumMemOps = MemOps.size();
9445 for (unsigned i = 0; i < NumMemOps; i++) {
9446 EVT VT = MemOps[i];
9447 unsigned VTSize = VT.getSizeInBits() / 8;
9448 SDValue Value;
9449 bool IsOverlapping = false;
9450
9451 if (i == NumMemOps - 1 && i != 0 && VTSize > Size - SrcOff) {
9452 // Issuing an unaligned load / store pair that overlaps with the previous
9453 // pair. Adjust the offset accordingly.
9454 SrcOff = Size - VTSize;
9455 IsOverlapping = true;
9456 }
9457
9458 // Calculate the actual alignment at the current offset. The alignment at
9459 // SrcOff may be lower than the base alignment, especially when using
9460 // overlapping loads.
9461 Align SrcAlignAtOffset = commonAlignment(*SrcAlign, SrcOff);
9462 if (IsOverlapping) {
9463 // Verify that the target allows misaligned memory accesses at the
9464 // adjusted offset when using overlapping loads.
9465 unsigned Fast;
9466 if (!TLI.allowsMisalignedMemoryAccesses(VT, SrcPtrInfo.getAddrSpace(),
9467 SrcAlignAtOffset, MMOFlags,
9468 &Fast) ||
9469 !Fast) {
9470 // This should have been caught by findOptimalMemOpLowering, but verify
9471 // here for safety.
9472 return SDValue();
9473 }
9474 }
9475
9476 bool isDereferenceable =
9477 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
9478 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
9479 if (isDereferenceable)
9481 Value =
9482 DAG.getLoad(VT, dl, Chain,
9483 DAG.getObjectPtrOffset(dl, Src, TypeSize::getFixed(SrcOff)),
9484 SrcPtrInfo.getWithOffset(SrcOff), SrcAlignAtOffset,
9485 SrcMMOFlags, NewAAInfo);
9486 LoadValues.push_back(Value);
9487 LoadChains.push_back(Value.getValue(1));
9488 SrcOff += VTSize;
9489 }
9490 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
9491 OutChains.clear();
9492 uint64_t DstOff = 0;
9493 for (unsigned i = 0; i < NumMemOps; i++) {
9494 EVT VT = MemOps[i];
9495 unsigned VTSize = VT.getSizeInBits() / 8;
9496 SDValue Store;
9497 bool IsOverlapping = false;
9498
9499 if (i == NumMemOps - 1 && i != 0 && VTSize > Size - DstOff) {
9500 // Issuing an unaligned load / store pair that overlaps with the previous
9501 // pair. Adjust the offset accordingly.
9502 DstOff = Size - VTSize;
9503 IsOverlapping = true;
9504 }
9505
9506 // Calculate the actual alignment at the current offset. The alignment at
9507 // DstOff may be lower than the base alignment, especially when using
9508 // overlapping stores.
9509 Align DstAlignAtOffset = commonAlignment(Alignment, DstOff);
9510 if (IsOverlapping) {
9511 // Verify that the target allows misaligned memory accesses at the
9512 // adjusted offset when using overlapping stores.
9513 unsigned Fast;
9514 if (!TLI.allowsMisalignedMemoryAccesses(VT, DstPtrInfo.getAddrSpace(),
9515 DstAlignAtOffset, MMOFlags,
9516 &Fast) ||
9517 !Fast) {
9518 // This should have been caught by findOptimalMemOpLowering, but verify
9519 // here for safety.
9520 return SDValue();
9521 }
9522 }
9523 Store = DAG.getStore(
9524 Chain, dl, LoadValues[i],
9525 DAG.getObjectPtrOffset(dl, Dst, TypeSize::getFixed(DstOff)),
9526 DstPtrInfo.getWithOffset(DstOff), DstAlignAtOffset, MMOFlags,
9527 NewAAInfo);
9528 OutChains.push_back(Store);
9529 DstOff += VTSize;
9530 }
9531
9532 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
9533}
9534
9535/// Lower the call to 'memset' intrinsic function into a series of store
9536/// operations.
9537///
9538/// \param DAG Selection DAG where lowered code is placed.
9539/// \param dl Link to corresponding IR location.
9540/// \param Chain Control flow dependency.
9541/// \param Dst Pointer to destination memory location.
9542/// \param Src Value of byte to write into the memory.
9543/// \param Size Number of bytes to write.
9544/// \param Alignment Alignment of the destination in bytes.
9545/// \param isVol True if destination is volatile.
9546/// \param AlwaysInline Makes sure no function call is generated.
9547/// \param DstPtrInfo IR information on the memory pointer.
9548/// \returns New head in the control flow, if lowering was successful, empty
9549/// SDValue otherwise.
9550///
9551/// The function tries to replace 'llvm.memset' intrinsic with several store
9552/// operations and value calculation code. This is usually profitable for small
9553/// memory size or when the semantic requires inlining.
9555 SDValue Chain, SDValue Dst, SDValue Src,
9556 uint64_t Size, Align Alignment, bool isVol,
9557 bool AlwaysInline, MachinePointerInfo DstPtrInfo,
9558 const AAMDNodes &AAInfo) {
9559 // Turn a memset of undef to nop.
9560 // FIXME: We need to honor volatile even is Src is undef.
9561 if (Src.isUndef())
9562 return Chain;
9563
9564 // Expand memset to a series of load/store ops if the size operand
9565 // falls below a certain threshold.
9566 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9567 std::vector<EVT> MemOps;
9568 bool DstAlignCanChange = false;
9569 LLVMContext &C = *DAG.getContext();
9571 MachineFrameInfo &MFI = MF.getFrameInfo();
9572 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
9574 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
9575 DstAlignCanChange = true;
9576 bool IsZeroVal = isNullConstant(Src);
9577 unsigned Limit = AlwaysInline ? ~0 : TLI.getMaxStoresPerMemset(OptSize);
9578
9579 EVT LargestVT;
9580 if (!TLI.findOptimalMemOpLowering(
9581 C, MemOps, Limit,
9582 MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
9583 DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes(),
9584 &LargestVT))
9585 return SDValue();
9586
9587 if (DstAlignCanChange) {
9588 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
9589 const DataLayout &DL = DAG.getDataLayout();
9590 Align NewAlign = DL.getABITypeAlign(Ty);
9591
9592 // Don't promote to an alignment that would require dynamic stack
9593 // realignment which may conflict with optimizations such as tail call
9594 // optimization.
9596 if (!TRI->hasStackRealignment(MF))
9597 if (MaybeAlign StackAlign = DL.getStackAlignment())
9598 NewAlign = std::min(NewAlign, *StackAlign);
9599
9600 if (NewAlign > Alignment) {
9601 // Give the stack frame object a larger alignment if needed.
9602 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
9603 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
9604 Alignment = NewAlign;
9605 }
9606 }
9607
9608 SmallVector<SDValue, 8> OutChains;
9609 uint64_t DstOff = 0;
9610 unsigned NumMemOps = MemOps.size();
9611
9612 // Find the largest store and generate the bit pattern for it.
9613 // If target didn't set LargestVT, compute it from MemOps.
9614 if (!LargestVT.isSimple()) {
9615 LargestVT = MemOps[0];
9616 for (unsigned i = 1; i < NumMemOps; i++)
9617 if (MemOps[i].bitsGT(LargestVT))
9618 LargestVT = MemOps[i];
9619 }
9620 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
9621
9622 // Prepare AAInfo for loads/stores after lowering this memset.
9623 AAMDNodes NewAAInfo = AAInfo;
9624 NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
9625
9626 for (unsigned i = 0; i < NumMemOps; i++) {
9627 EVT VT = MemOps[i];
9628 unsigned VTSize = VT.getSizeInBits() / 8;
9629 // The target should specify store types that exactly cover the memset size
9630 // (with the last store potentially being oversized for overlapping stores).
9631 assert(Size > 0 && "Target specified more stores than needed in "
9632 "findOptimalMemOpLowering");
9633 if (VTSize > Size) {
9634 // Issuing an unaligned load / store pair that overlaps with the previous
9635 // pair. Adjust the offset accordingly.
9636 assert(i == NumMemOps-1 && i != 0);
9637 DstOff -= VTSize - Size;
9638 }
9639
9640 // If this store is smaller than the largest store see whether we can get
9641 // the smaller value for free with a truncate or extract vector element and
9642 // then store.
9643 SDValue Value = MemSetValue;
9644 if (VT.bitsLT(LargestVT)) {
9645 unsigned Index;
9646 unsigned NElts = LargestVT.getSizeInBits() / VT.getSizeInBits();
9647 EVT SVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), NElts);
9648 if (!LargestVT.isVector() && !VT.isVector() &&
9649 TLI.isTruncateFree(LargestVT, VT))
9650 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
9651 else if (LargestVT.isVector() && !VT.isVector() &&
9653 LargestVT.getTypeForEVT(*DAG.getContext()),
9654 VT.getSizeInBits(), Index) &&
9655 TLI.isTypeLegal(SVT) &&
9656 LargestVT.getSizeInBits() == SVT.getSizeInBits()) {
9657 // Target which can combine store(extractelement VectorTy, Idx) can get
9658 // the smaller value for free.
9659 SDValue TailValue = DAG.getNode(ISD::BITCAST, dl, SVT, MemSetValue);
9660 Value = DAG.getExtractVectorElt(dl, VT, TailValue, Index);
9661 } else
9662 Value = getMemsetValue(Src, VT, DAG, dl);
9663 }
9664 assert(Value.getValueType() == VT && "Value with wrong type.");
9665 SDValue Store = DAG.getStore(
9666 Chain, dl, Value,
9667 DAG.getObjectPtrOffset(dl, Dst, TypeSize::getFixed(DstOff)),
9668 DstPtrInfo.getWithOffset(DstOff), Alignment,
9670 NewAAInfo);
9671 OutChains.push_back(Store);
9672 DstOff += VT.getSizeInBits() / 8;
9673 // For oversized overlapping stores, only subtract the remaining bytes.
9674 // For normal stores, subtract the full store size.
9675 if (VTSize > Size) {
9676 Size = 0;
9677 } else {
9678 Size -= VTSize;
9679 }
9680 }
9681
9682 // After processing all stores, Size should be exactly 0. Any remaining bytes
9683 // indicate a bug in the target's findOptimalMemOpLowering implementation.
9684 assert(Size == 0 && "Target's findOptimalMemOpLowering did not specify "
9685 "stores that exactly cover the memset size");
9686
9687 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
9688}
9689
9691 unsigned AS) {
9692 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
9693 // pointer operands can be losslessly bitcasted to pointers of address space 0
9694 if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) {
9695 report_fatal_error("cannot lower memory intrinsic in address space " +
9696 Twine(AS));
9697 }
9698}
9699
9701 const SelectionDAG *SelDAG,
9702 bool AllowReturnsFirstArg) {
9703 if (!CI || !CI->isTailCall())
9704 return false;
9705 // TODO: Fix "returns-first-arg" determination so it doesn't depend on which
9706 // helper symbol we lower to.
9707 return isInTailCallPosition(*CI, SelDAG->getTarget(),
9708 AllowReturnsFirstArg &&
9710}
9711
9712static std::pair<SDValue, SDValue>
9715 const CallInst *CI, RTLIB::Libcall Call,
9716 SelectionDAG *DAG, const TargetLowering *TLI) {
9717 RTLIB::LibcallImpl LCImpl = DAG->getLibcalls().getLibcallImpl(Call);
9718
9719 if (LCImpl == RTLIB::Unsupported)
9720 return {};
9721
9723 bool IsTailCall =
9724 isInTailCallPositionWrapper(CI, DAG, /*AllowReturnsFirstArg=*/true);
9725 SDValue Callee =
9726 DAG->getExternalSymbol(LCImpl, TLI->getPointerTy(DAG->getDataLayout()));
9727
9728 CLI.setDebugLoc(dl)
9729 .setChain(Chain)
9731 CI->getType(), Callee, std::move(Args))
9732 .setTailCall(IsTailCall);
9733
9734 return TLI->LowerCallTo(CLI);
9735}
9736
9737std::pair<SDValue, SDValue> SelectionDAG::getStrcmp(SDValue Chain,
9738 const SDLoc &dl, SDValue S1,
9739 SDValue S2,
9740 const CallInst *CI) {
9742 TargetLowering::ArgListTy Args = {{S1, PT}, {S2, PT}};
9743 return getRuntimeCallSDValueHelper(Chain, dl, std::move(Args), CI,
9744 RTLIB::STRCMP, this, TLI);
9745}
9746
9747std::pair<SDValue, SDValue> SelectionDAG::getStrstr(SDValue Chain,
9748 const SDLoc &dl, SDValue S1,
9749 SDValue S2,
9750 const CallInst *CI) {
9752 TargetLowering::ArgListTy Args = {{S1, PT}, {S2, PT}};
9753 return getRuntimeCallSDValueHelper(Chain, dl, std::move(Args), CI,
9754 RTLIB::STRSTR, this, TLI);
9755}
9756
9757std::pair<SDValue, SDValue> SelectionDAG::getMemccpy(SDValue Chain,
9758 const SDLoc &dl,
9759 SDValue Dst, SDValue Src,
9761 const CallInst *CI) {
9763
9765 {Dst, PT},
9766 {Src, PT},
9769 return getRuntimeCallSDValueHelper(Chain, dl, std::move(Args), CI,
9770 RTLIB::MEMCCPY, this, TLI);
9771}
9772
9773std::pair<SDValue, SDValue>
9775 SDValue Mem1, SDValue Size, const CallInst *CI) {
9778 {Mem0, PT},
9779 {Mem1, PT},
9781 return getRuntimeCallSDValueHelper(Chain, dl, std::move(Args), CI,
9782 RTLIB::MEMCMP, this, TLI);
9783}
9784
9785std::pair<SDValue, SDValue> SelectionDAG::getStrcpy(SDValue Chain,
9786 const SDLoc &dl,
9787 SDValue Dst, SDValue Src,
9788 const CallInst *CI) {
9790 TargetLowering::ArgListTy Args = {{Dst, PT}, {Src, PT}};
9791 return getRuntimeCallSDValueHelper(Chain, dl, std::move(Args), CI,
9792 RTLIB::STRCPY, this, TLI);
9793}
9794
9795std::pair<SDValue, SDValue> SelectionDAG::getStrlen(SDValue Chain,
9796 const SDLoc &dl,
9797 SDValue Src,
9798 const CallInst *CI) {
9799 // Emit a library call.
9802 return getRuntimeCallSDValueHelper(Chain, dl, std::move(Args), CI,
9803 RTLIB::STRLEN, this, TLI);
9804}
9805
9807 SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size,
9808 Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI,
9809 std::optional<bool> OverrideTailCall, MachinePointerInfo DstPtrInfo,
9810 MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo,
9811 BatchAAResults *BatchAA) {
9812 // Check to see if we should lower the memcpy to loads and stores first.
9813 // For cases within the target-specified limits, this is the best choice.
9815 if (ConstantSize) {
9816 // Memcpy with size zero? Just return the original chain.
9817 if (ConstantSize->isZero())
9818 return Chain;
9819
9821 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
9822 isVol, false, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
9823 if (Result.getNode())
9824 return Result;
9825 }
9826
9827 // Then check to see if we should lower the memcpy with target-specific
9828 // code. If the target chooses to do this, this is the next best.
9829 if (TSI) {
9830 SDValue Result = TSI->EmitTargetCodeForMemcpy(
9831 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline,
9832 DstPtrInfo, SrcPtrInfo);
9833 if (Result.getNode())
9834 return Result;
9835 }
9836
9837 // If we really need inline code and the target declined to provide it,
9838 // use a (potentially long) sequence of loads and stores.
9839 if (AlwaysInline) {
9840 assert(ConstantSize && "AlwaysInline requires a constant size!");
9842 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
9843 isVol, true, DstPtrInfo, SrcPtrInfo, AAInfo, BatchAA);
9844 }
9845
9848
9849 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
9850 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
9851 // respect volatile, so they may do things like read or write memory
9852 // beyond the given memory regions. But fixing this isn't easy, and most
9853 // people don't care.
9854
9855 // Emit a library call.
9858 Args.emplace_back(Dst, PtrTy);
9859 Args.emplace_back(Src, PtrTy);
9860 Args.emplace_back(Size, getDataLayout().getIntPtrType(*getContext()));
9861 // FIXME: pass in SDLoc
9863 bool IsTailCall = false;
9864 RTLIB::LibcallImpl MemCpyImpl = TLI->getMemcpyImpl();
9865
9866 if (OverrideTailCall.has_value()) {
9867 IsTailCall = *OverrideTailCall;
9868 } else {
9869 bool LowersToMemcpy = MemCpyImpl == RTLIB::impl_memcpy;
9870 IsTailCall = isInTailCallPositionWrapper(CI, this, LowersToMemcpy);
9871 }
9872
9873 CLI.setDebugLoc(dl)
9874 .setChain(Chain)
9875 .setLibCallee(
9876 Libcalls->getLibcallImplCallingConv(MemCpyImpl),
9877 Dst.getValueType().getTypeForEVT(*getContext()),
9878 getExternalSymbol(MemCpyImpl, TLI->getPointerTy(getDataLayout())),
9879 std::move(Args))
9881 .setTailCall(IsTailCall);
9882
9883 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
9884 return CallResult.second;
9885}
9886
9888 SDValue Dst, SDValue Src, SDValue Size,
9889 Type *SizeTy, unsigned ElemSz,
9890 bool isTailCall,
9891 MachinePointerInfo DstPtrInfo,
9892 MachinePointerInfo SrcPtrInfo) {
9893 // Emit a library call.
9896 Args.emplace_back(Dst, ArgTy);
9897 Args.emplace_back(Src, ArgTy);
9898 Args.emplace_back(Size, SizeTy);
9899
9900 RTLIB::Libcall LibraryCall =
9902 RTLIB::LibcallImpl LibcallImpl = Libcalls->getLibcallImpl(LibraryCall);
9903 if (LibcallImpl == RTLIB::Unsupported)
9904 report_fatal_error("Unsupported element size");
9905
9907 CLI.setDebugLoc(dl)
9908 .setChain(Chain)
9909 .setLibCallee(
9910 Libcalls->getLibcallImplCallingConv(LibcallImpl),
9912 getExternalSymbol(LibcallImpl, TLI->getPointerTy(getDataLayout())),
9913 std::move(Args))
9915 .setTailCall(isTailCall);
9916
9917 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
9918 return CallResult.second;
9919}
9920
9922 SDValue Src, SDValue Size, Align Alignment,
9923 bool isVol, const CallInst *CI,
9924 std::optional<bool> OverrideTailCall,
9925 MachinePointerInfo DstPtrInfo,
9926 MachinePointerInfo SrcPtrInfo,
9927 const AAMDNodes &AAInfo,
9928 BatchAAResults *BatchAA) {
9929 // Check to see if we should lower the memmove to loads and stores first.
9930 // For cases within the target-specified limits, this is the best choice.
9932 if (ConstantSize) {
9933 // Memmove with size zero? Just return the original chain.
9934 if (ConstantSize->isZero())
9935 return Chain;
9936
9938 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
9939 isVol, false, DstPtrInfo, SrcPtrInfo, AAInfo);
9940 if (Result.getNode())
9941 return Result;
9942 }
9943
9944 // Then check to see if we should lower the memmove with target-specific
9945 // code. If the target chooses to do this, this is the next best.
9946 if (TSI) {
9947 SDValue Result =
9948 TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size,
9949 Alignment, isVol, DstPtrInfo, SrcPtrInfo);
9950 if (Result.getNode())
9951 return Result;
9952 }
9953
9956
9957 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
9958 // not be safe. See memcpy above for more details.
9959
9960 // Emit a library call.
9963 Args.emplace_back(Dst, PtrTy);
9964 Args.emplace_back(Src, PtrTy);
9965 Args.emplace_back(Size, getDataLayout().getIntPtrType(*getContext()));
9966 // FIXME: pass in SDLoc
9968
9969 RTLIB::LibcallImpl MemmoveImpl = Libcalls->getLibcallImpl(RTLIB::MEMMOVE);
9970
9971 bool IsTailCall = false;
9972 if (OverrideTailCall.has_value()) {
9973 IsTailCall = *OverrideTailCall;
9974 } else {
9975 bool LowersToMemmove = MemmoveImpl == RTLIB::impl_memmove;
9976 IsTailCall = isInTailCallPositionWrapper(CI, this, LowersToMemmove);
9977 }
9978
9979 CLI.setDebugLoc(dl)
9980 .setChain(Chain)
9981 .setLibCallee(
9982 Libcalls->getLibcallImplCallingConv(MemmoveImpl),
9983 Dst.getValueType().getTypeForEVT(*getContext()),
9984 getExternalSymbol(MemmoveImpl, TLI->getPointerTy(getDataLayout())),
9985 std::move(Args))
9987 .setTailCall(IsTailCall);
9988
9989 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
9990 return CallResult.second;
9991}
9992
9994 SDValue Dst, SDValue Src, SDValue Size,
9995 Type *SizeTy, unsigned ElemSz,
9996 bool isTailCall,
9997 MachinePointerInfo DstPtrInfo,
9998 MachinePointerInfo SrcPtrInfo) {
9999 // Emit a library call.
10001 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext());
10002 Args.emplace_back(Dst, IntPtrTy);
10003 Args.emplace_back(Src, IntPtrTy);
10004 Args.emplace_back(Size, SizeTy);
10005
10006 RTLIB::Libcall LibraryCall =
10008 RTLIB::LibcallImpl LibcallImpl = Libcalls->getLibcallImpl(LibraryCall);
10009 if (LibcallImpl == RTLIB::Unsupported)
10010 report_fatal_error("Unsupported element size");
10011
10013 CLI.setDebugLoc(dl)
10014 .setChain(Chain)
10015 .setLibCallee(
10016 Libcalls->getLibcallImplCallingConv(LibcallImpl),
10018 getExternalSymbol(LibcallImpl, TLI->getPointerTy(getDataLayout())),
10019 std::move(Args))
10021 .setTailCall(isTailCall);
10022
10023 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
10024 return CallResult.second;
10025}
10026
10028 SDValue Src, SDValue Size, Align Alignment,
10029 bool isVol, bool AlwaysInline,
10030 const CallInst *CI,
10031 MachinePointerInfo DstPtrInfo,
10032 const AAMDNodes &AAInfo) {
10033 // Check to see if we should lower the memset to stores first.
10034 // For cases within the target-specified limits, this is the best choice.
10036 if (ConstantSize) {
10037 // Memset with size zero? Just return the original chain.
10038 if (ConstantSize->isZero())
10039 return Chain;
10040
10041 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src,
10042 ConstantSize->getZExtValue(), Alignment,
10043 isVol, false, DstPtrInfo, AAInfo);
10044
10045 if (Result.getNode())
10046 return Result;
10047 }
10048
10049 // Then check to see if we should lower the memset with target-specific
10050 // code. If the target chooses to do this, this is the next best.
10051 if (TSI) {
10052 SDValue Result = TSI->EmitTargetCodeForMemset(
10053 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline, DstPtrInfo);
10054 if (Result.getNode())
10055 return Result;
10056 }
10057
10058 // If we really need inline code and the target declined to provide it,
10059 // use a (potentially long) sequence of loads and stores.
10060 if (AlwaysInline) {
10061 assert(ConstantSize && "AlwaysInline requires a constant size!");
10062 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src,
10063 ConstantSize->getZExtValue(), Alignment,
10064 isVol, true, DstPtrInfo, AAInfo);
10065 assert(Result &&
10066 "getMemsetStores must return a valid sequence when AlwaysInline");
10067 return Result;
10068 }
10069
10071
10072 // Emit a library call.
10073 auto &Ctx = *getContext();
10074 const auto& DL = getDataLayout();
10075
10077 // FIXME: pass in SDLoc
10078 CLI.setDebugLoc(dl).setChain(Chain);
10079
10080 RTLIB::LibcallImpl BzeroImpl = Libcalls->getLibcallImpl(RTLIB::BZERO);
10081 bool UseBZero = BzeroImpl != RTLIB::Unsupported && isNullConstant(Src);
10082
10083 // If zeroing out and bzero is present, use it.
10084 if (UseBZero) {
10086 Args.emplace_back(Dst, PointerType::getUnqual(Ctx));
10087 Args.emplace_back(Size, DL.getIntPtrType(Ctx));
10088 CLI.setLibCallee(
10089 Libcalls->getLibcallImplCallingConv(BzeroImpl), Type::getVoidTy(Ctx),
10090 getExternalSymbol(BzeroImpl, TLI->getPointerTy(DL)), std::move(Args));
10091 } else {
10092 RTLIB::LibcallImpl MemsetImpl = Libcalls->getLibcallImpl(RTLIB::MEMSET);
10093
10095 Args.emplace_back(Dst, PointerType::getUnqual(Ctx));
10096 Args.emplace_back(Src, Src.getValueType().getTypeForEVT(Ctx));
10097 Args.emplace_back(Size, DL.getIntPtrType(Ctx));
10098 CLI.setLibCallee(Libcalls->getLibcallImplCallingConv(MemsetImpl),
10099 Dst.getValueType().getTypeForEVT(Ctx),
10100 getExternalSymbol(MemsetImpl, TLI->getPointerTy(DL)),
10101 std::move(Args));
10102 }
10103
10104 RTLIB::LibcallImpl MemsetImpl = Libcalls->getLibcallImpl(RTLIB::MEMSET);
10105 bool LowersToMemset = MemsetImpl == RTLIB::impl_memset;
10106
10107 // If we're going to use bzero, make sure not to tail call unless the
10108 // subsequent return doesn't need a value, as bzero doesn't return the first
10109 // arg unlike memset.
10110 bool ReturnsFirstArg = CI && funcReturnsFirstArgOfCall(*CI) && !UseBZero;
10111 bool IsTailCall =
10112 CI && CI->isTailCall() &&
10113 isInTailCallPosition(*CI, getTarget(), ReturnsFirstArg && LowersToMemset);
10114 CLI.setDiscardResult().setTailCall(IsTailCall);
10115
10116 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
10117 return CallResult.second;
10118}
10119
10122 Type *SizeTy, unsigned ElemSz,
10123 bool isTailCall,
10124 MachinePointerInfo DstPtrInfo) {
10125 // Emit a library call.
10127 Args.emplace_back(Dst, getDataLayout().getIntPtrType(*getContext()));
10128 Args.emplace_back(Value, Type::getInt8Ty(*getContext()));
10129 Args.emplace_back(Size, SizeTy);
10130
10131 RTLIB::Libcall LibraryCall =
10133 RTLIB::LibcallImpl LibcallImpl = Libcalls->getLibcallImpl(LibraryCall);
10134 if (LibcallImpl == RTLIB::Unsupported)
10135 report_fatal_error("Unsupported element size");
10136
10138 CLI.setDebugLoc(dl)
10139 .setChain(Chain)
10140 .setLibCallee(
10141 Libcalls->getLibcallImplCallingConv(LibcallImpl),
10143 getExternalSymbol(LibcallImpl, TLI->getPointerTy(getDataLayout())),
10144 std::move(Args))
10146 .setTailCall(isTailCall);
10147
10148 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
10149 return CallResult.second;
10150}
10151
10152SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
10154 MachineMemOperand *MMO,
10155 ISD::LoadExtType ExtType) {
10157 AddNodeIDNode(ID, Opcode, VTList, Ops);
10158 ID.AddInteger(MemVT.getRawBits());
10159 ID.AddInteger(getSyntheticNodeSubclassData<AtomicSDNode>(
10160 dl.getIROrder(), Opcode, VTList, MemVT, MMO, ExtType));
10161 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10162 ID.AddInteger(MMO->getFlags());
10163 void* IP = nullptr;
10164 if (auto *E = cast_or_null<AtomicSDNode>(FindNodeOrInsertPos(ID, dl, IP))) {
10165 E->refineAlignment(MMO);
10166 E->refineRanges(MMO);
10167 return SDValue(E, 0);
10168 }
10169
10170 auto *N = newSDNode<AtomicSDNode>(dl.getIROrder(), dl.getDebugLoc(), Opcode,
10171 VTList, MemVT, MMO, ExtType);
10172 createOperands(N, Ops);
10173
10174 CSEMap.InsertNode(N, IP);
10175 InsertNode(N);
10176 SDValue V(N, 0);
10177 NewSDValueDbgMsg(V, "Creating new node: ", this);
10178 return V;
10179}
10180
10182 EVT MemVT, SDVTList VTs, SDValue Chain,
10183 SDValue Ptr, SDValue Cmp, SDValue Swp,
10184 MachineMemOperand *MMO) {
10185 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
10187 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
10188
10189 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
10190 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
10191}
10192
10193SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
10194 SDValue Chain, SDValue Ptr, SDValue Val,
10195 MachineMemOperand *MMO) {
10196 assert((Opcode == ISD::ATOMIC_LOAD_ADD || Opcode == ISD::ATOMIC_LOAD_SUB ||
10197 Opcode == ISD::ATOMIC_LOAD_AND || Opcode == ISD::ATOMIC_LOAD_CLR ||
10198 Opcode == ISD::ATOMIC_LOAD_OR || Opcode == ISD::ATOMIC_LOAD_XOR ||
10199 Opcode == ISD::ATOMIC_LOAD_NAND || Opcode == ISD::ATOMIC_LOAD_MIN ||
10200 Opcode == ISD::ATOMIC_LOAD_MAX || Opcode == ISD::ATOMIC_LOAD_UMIN ||
10201 Opcode == ISD::ATOMIC_LOAD_UMAX || Opcode == ISD::ATOMIC_LOAD_FADD ||
10202 Opcode == ISD::ATOMIC_LOAD_FSUB || Opcode == ISD::ATOMIC_LOAD_FMAX ||
10203 Opcode == ISD::ATOMIC_LOAD_FMIN ||
10204 Opcode == ISD::ATOMIC_LOAD_FMINIMUM ||
10205 Opcode == ISD::ATOMIC_LOAD_FMAXIMUM ||
10206 Opcode == ISD::ATOMIC_LOAD_UINC_WRAP ||
10207 Opcode == ISD::ATOMIC_LOAD_UDEC_WRAP ||
10208 Opcode == ISD::ATOMIC_LOAD_USUB_COND ||
10209 Opcode == ISD::ATOMIC_LOAD_USUB_SAT || Opcode == ISD::ATOMIC_SWAP ||
10210 Opcode == ISD::ATOMIC_STORE) &&
10211 "Invalid Atomic Op");
10212
10213 EVT VT = Val.getValueType();
10214
10215 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
10216 getVTList(VT, MVT::Other);
10217 SDValue Ops[] = {Chain, Ptr, Val};
10218 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
10219}
10220
10222 EVT MemVT, EVT VT, SDValue Chain,
10223 SDValue Ptr, MachineMemOperand *MMO) {
10224 SDVTList VTs = getVTList(VT, MVT::Other);
10225 SDValue Ops[] = {Chain, Ptr};
10226 return getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, VTs, Ops, MMO, ExtType);
10227}
10228
10229/// getMergeValues - Create a MERGE_VALUES node from the given operands.
10231 if (Ops.size() == 1)
10232 return Ops[0];
10233
10235 VTs.reserve(Ops.size());
10236 for (const SDValue &Op : Ops)
10237 VTs.push_back(Op.getValueType());
10238 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
10239}
10240
10242 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
10243 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
10245 const AAMDNodes &AAInfo) {
10246 if (Size.hasValue() && !Size.getValue())
10248
10250 MachineMemOperand *MMO =
10251 MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo);
10252
10253 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
10254}
10255
10257 SDVTList VTList,
10258 ArrayRef<SDValue> Ops, EVT MemVT,
10259 MachineMemOperand *MMO) {
10260 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, ArrayRef(MMO));
10261}
10262
10264 SDVTList VTList,
10265 ArrayRef<SDValue> Ops, EVT MemVT,
10267 assert(!MMOs.empty() && "Must have at least one MMO");
10268 assert(
10269 (Opcode == ISD::INTRINSIC_VOID || Opcode == ISD::INTRINSIC_W_CHAIN ||
10270 Opcode == ISD::PREFETCH ||
10271 (Opcode <= (unsigned)std::numeric_limits<int>::max() &&
10272 Opcode >= ISD::BUILTIN_OP_END && TSI->isTargetMemoryOpcode(Opcode))) &&
10273 "Opcode is not a memory-accessing opcode!");
10274
10276 if (MMOs.size() == 1) {
10277 MemRefs = MMOs[0];
10278 } else {
10279 // Allocate: [size_t count][MMO*][MMO*]...
10280 size_t AllocSize =
10281 sizeof(size_t) + MMOs.size() * sizeof(MachineMemOperand *);
10282 void *Buffer = Allocator.Allocate(AllocSize, alignof(size_t));
10283 size_t *CountPtr = static_cast<size_t *>(Buffer);
10284 *CountPtr = MMOs.size();
10285 MachineMemOperand **Array =
10286 reinterpret_cast<MachineMemOperand **>(CountPtr + 1);
10287 llvm::copy(MMOs, Array);
10288 MemRefs = Array;
10289 }
10290
10291 // Memoize the node unless it returns a glue result.
10293 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
10295 AddNodeIDNode(ID, Opcode, VTList, Ops);
10296 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
10297 Opcode, dl.getIROrder(), VTList, MemVT, MemRefs));
10298 ID.AddInteger(MemVT.getRawBits());
10299 for (const MachineMemOperand *MMO : MMOs) {
10300 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10301 ID.AddInteger(MMO->getFlags());
10302 }
10303 void *IP = nullptr;
10304 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
10305 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMOs);
10306 return SDValue(E, 0);
10307 }
10308
10309 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
10310 VTList, MemVT, MemRefs);
10311 createOperands(N, Ops);
10312 CSEMap.InsertNode(N, IP);
10313 } else {
10314 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
10315 VTList, MemVT, MemRefs);
10316 createOperands(N, Ops);
10317 }
10318 InsertNode(N);
10319 SDValue V(N, 0);
10320 NewSDValueDbgMsg(V, "Creating new node: ", this);
10321 return V;
10322}
10323
10325 SDValue Chain, int FrameIndex) {
10326 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END;
10327 const auto VTs = getVTList(MVT::Other);
10328 SDValue Ops[2] = {
10329 Chain,
10330 getFrameIndex(FrameIndex,
10331 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()),
10332 true)};
10333
10335 AddNodeIDNode(ID, Opcode, VTs, Ops);
10336 ID.AddInteger(FrameIndex);
10337 void *IP = nullptr;
10338 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
10339 return SDValue(E, 0);
10340
10341 LifetimeSDNode *N =
10342 newSDNode<LifetimeSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs);
10343 createOperands(N, Ops);
10344 CSEMap.InsertNode(N, IP);
10345 InsertNode(N);
10346 SDValue V(N, 0);
10347 NewSDValueDbgMsg(V, "Creating new node: ", this);
10348 return V;
10349}
10350
10352 uint64_t Guid, uint64_t Index,
10353 uint32_t Attr) {
10354 const unsigned Opcode = ISD::PSEUDO_PROBE;
10355 const auto VTs = getVTList(MVT::Other);
10356 SDValue Ops[] = {Chain};
10358 AddNodeIDNode(ID, Opcode, VTs, Ops);
10359 ID.AddInteger(Guid);
10360 ID.AddInteger(Index);
10361 void *IP = nullptr;
10362 if (SDNode *E = FindNodeOrInsertPos(ID, Dl, IP))
10363 return SDValue(E, 0);
10364
10365 auto *N = newSDNode<PseudoProbeSDNode>(
10366 Opcode, Dl.getIROrder(), Dl.getDebugLoc(), VTs, Guid, Index, Attr);
10367 createOperands(N, Ops);
10368 CSEMap.InsertNode(N, IP);
10369 InsertNode(N);
10370 SDValue V(N, 0);
10371 NewSDValueDbgMsg(V, "Creating new node: ", this);
10372 return V;
10373}
10374
10375/// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
10376/// MachinePointerInfo record from it. This is particularly useful because the
10377/// code generator has many cases where it doesn't bother passing in a
10378/// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
10380 SelectionDAG &DAG, SDValue Ptr,
10381 int64_t Offset = 0) {
10382 // If this is FI+Offset, we can model it.
10383 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
10385 FI->getIndex(), Offset);
10386
10387 // If this is (FI+Offset1)+Offset2, we can model it.
10388 if (Ptr.getOpcode() != ISD::ADD ||
10391 return Info;
10392
10393 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
10395 DAG.getMachineFunction(), FI,
10396 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
10397}
10398
10399/// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
10400/// MachinePointerInfo record from it. This is particularly useful because the
10401/// code generator has many cases where it doesn't bother passing in a
10402/// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
10404 SelectionDAG &DAG, SDValue Ptr,
10405 SDValue OffsetOp) {
10406 // If the 'Offset' value isn't a constant, we can't handle this.
10408 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
10409 if (OffsetOp.isUndef())
10410 return InferPointerInfo(Info, DAG, Ptr);
10411 return Info;
10412}
10413
10415 EVT VT, const SDLoc &dl, SDValue Chain,
10416 SDValue Ptr, SDValue Offset,
10417 MachinePointerInfo PtrInfo, EVT MemVT,
10418 Align Alignment,
10419 MachineMemOperand::Flags MMOFlags,
10420 const AAMDNodes &AAInfo, const MDNode *Ranges) {
10421 assert(Chain.getValueType() == MVT::Other &&
10422 "Invalid chain type");
10423
10424 MMOFlags |= MachineMemOperand::MOLoad;
10425 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
10426 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
10427 // clients.
10428 if (PtrInfo.V.isNull())
10429 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
10430
10431 TypeSize Size = MemVT.getStoreSize();
10433 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
10434 Alignment, AAInfo, Ranges);
10435 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
10436}
10437
10439 EVT VT, const SDLoc &dl, SDValue Chain,
10440 SDValue Ptr, SDValue Offset, EVT MemVT,
10441 MachineMemOperand *MMO) {
10442 if (VT == MemVT) {
10443 ExtType = ISD::NON_EXTLOAD;
10444 } else if (ExtType == ISD::NON_EXTLOAD) {
10445 assert(VT == MemVT && "Non-extending load from different memory type!");
10446 } else {
10447 // Extending load.
10448 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
10449 "Should only be an extending load, not truncating!");
10450 assert(VT.isInteger() == MemVT.isInteger() &&
10451 "Cannot convert from FP to Int or Int -> FP!");
10452 assert(VT.isVector() == MemVT.isVector() &&
10453 "Cannot use an ext load to convert to or from a vector!");
10454 assert((!VT.isVector() ||
10456 "Cannot use an ext load to change the number of vector elements!");
10457 }
10458
10459 assert((!MMO->getRanges() ||
10461 ->getBitWidth() == MemVT.getScalarSizeInBits() &&
10462 MemVT.isInteger())) &&
10463 "Range metadata and load type must match!");
10464
10465 bool Indexed = AM != ISD::UNINDEXED;
10466 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
10467
10468 SDVTList VTs = Indexed ?
10469 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
10470 SDValue Ops[] = { Chain, Ptr, Offset };
10472 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
10473 ID.AddInteger(MemVT.getRawBits());
10474 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
10475 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
10476 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10477 ID.AddInteger(MMO->getFlags());
10478 void *IP = nullptr;
10479 if (auto *E = cast_or_null<LoadSDNode>(FindNodeOrInsertPos(ID, dl, IP))) {
10480 E->refineAlignment(MMO);
10481 E->refineRanges(MMO);
10482 return SDValue(E, 0);
10483 }
10484 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
10485 ExtType, MemVT, MMO);
10486 createOperands(N, Ops);
10487
10488 CSEMap.InsertNode(N, IP);
10489 InsertNode(N);
10490 SDValue V(N, 0);
10491 NewSDValueDbgMsg(V, "Creating new node: ", this);
10492 return V;
10493}
10494
10496 SDValue Ptr, MachinePointerInfo PtrInfo,
10497 MaybeAlign Alignment,
10498 MachineMemOperand::Flags MMOFlags,
10499 const AAMDNodes &AAInfo, const MDNode *Ranges) {
10501 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
10502 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
10503}
10504
10506 SDValue Ptr, MachineMemOperand *MMO) {
10508 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
10509 VT, MMO);
10510}
10511
10513 EVT VT, SDValue Chain, SDValue Ptr,
10514 MachinePointerInfo PtrInfo, EVT MemVT,
10515 MaybeAlign Alignment,
10516 MachineMemOperand::Flags MMOFlags,
10517 const AAMDNodes &AAInfo) {
10519 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
10520 MemVT, Alignment, MMOFlags, AAInfo);
10521}
10522
10524 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
10525 MachineMemOperand *MMO) {
10527 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
10528 MemVT, MMO);
10529}
10530
10534 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
10535 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
10536 // Don't propagate the invariant or dereferenceable flags.
10537 auto MMOFlags =
10538 LD->getMemOperand()->getFlags() &
10540 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
10541 LD->getChain(), Base, Offset, LD->getPointerInfo(),
10542 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
10543}
10544
10546 SDValue Ptr, MachinePointerInfo PtrInfo,
10547 Align Alignment,
10548 MachineMemOperand::Flags MMOFlags,
10549 const AAMDNodes &AAInfo) {
10550 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
10551
10552 MMOFlags |= MachineMemOperand::MOStore;
10553 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
10554
10555 if (PtrInfo.V.isNull())
10556 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
10557
10560 MachineMemOperand *MMO =
10561 MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
10562 return getStore(Chain, dl, Val, Ptr, MMO);
10563}
10564
10566 SDValue Ptr, MachineMemOperand *MMO) {
10568 return getStore(Chain, dl, Val, Ptr, Undef, Val.getValueType(), MMO,
10570}
10571
10573 SDValue Ptr, SDValue Offset, EVT SVT,
10575 bool IsTruncating) {
10576 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
10577 EVT VT = Val.getValueType();
10578 if (VT == SVT) {
10579 IsTruncating = false;
10580 } else if (!IsTruncating) {
10581 assert(VT == SVT && "No-truncating store from different memory type!");
10582 } else {
10584 "Should only be a truncating store, not extending!");
10585 assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!");
10586 assert(VT.isVector() == SVT.isVector() &&
10587 "Cannot use trunc store to convert to or from a vector!");
10588 assert((!VT.isVector() ||
10590 "Cannot use trunc store to change the number of vector elements!");
10591 }
10592
10593 bool Indexed = AM != ISD::UNINDEXED;
10594 assert((Indexed || Offset.isUndef()) && "Unindexed store with an offset!");
10595 SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
10596 : getVTList(MVT::Other);
10597 SDValue Ops[] = {Chain, Val, Ptr, Offset};
10600 ID.AddInteger(SVT.getRawBits());
10601 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
10602 dl.getIROrder(), VTs, AM, IsTruncating, SVT, MMO));
10603 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10604 ID.AddInteger(MMO->getFlags());
10605 void *IP = nullptr;
10606 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
10607 cast<StoreSDNode>(E)->refineAlignment(MMO);
10608 return SDValue(E, 0);
10609 }
10610 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
10611 IsTruncating, SVT, MMO);
10612 createOperands(N, Ops);
10613
10614 CSEMap.InsertNode(N, IP);
10615 InsertNode(N);
10616 SDValue V(N, 0);
10617 NewSDValueDbgMsg(V, "Creating new node: ", this);
10618 return V;
10619}
10620
10622 SDValue Ptr, MachinePointerInfo PtrInfo,
10623 EVT SVT, Align Alignment,
10624 MachineMemOperand::Flags MMOFlags,
10625 const AAMDNodes &AAInfo) {
10626 assert(Chain.getValueType() == MVT::Other &&
10627 "Invalid chain type");
10628
10629 MMOFlags |= MachineMemOperand::MOStore;
10630 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
10631
10632 if (PtrInfo.V.isNull())
10633 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
10634
10636 MachineMemOperand *MMO = MF.getMachineMemOperand(
10637 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo);
10638 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
10639}
10640
10642 SDValue Ptr, EVT SVT,
10643 MachineMemOperand *MMO) {
10645 return getStore(Chain, dl, Val, Ptr, Undef, SVT, MMO, ISD::UNINDEXED, true);
10646}
10647
10651 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
10652 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
10653 return getStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
10654 ST->getMemoryVT(), ST->getMemOperand(), AM,
10655 ST->isTruncatingStore());
10656}
10657
10659 ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
10660 SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL,
10661 MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment,
10662 MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo,
10663 const MDNode *Ranges, bool IsExpanding) {
10664 MMOFlags |= MachineMemOperand::MOLoad;
10665 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
10666 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
10667 // clients.
10668 if (PtrInfo.V.isNull())
10669 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
10670
10671 TypeSize Size = MemVT.getStoreSize();
10673 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
10674 Alignment, AAInfo, Ranges);
10675 return getLoadVP(AM, ExtType, VT, dl, Chain, Ptr, Offset, Mask, EVL, MemVT,
10676 MMO, IsExpanding);
10677}
10678
10680 ISD::LoadExtType ExtType, EVT VT,
10681 const SDLoc &dl, SDValue Chain, SDValue Ptr,
10682 SDValue Offset, SDValue Mask, SDValue EVL,
10683 EVT MemVT, MachineMemOperand *MMO,
10684 bool IsExpanding) {
10685 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
10686 assert(Mask.getValueType().getVectorElementCount() ==
10687 VT.getVectorElementCount() &&
10688 "Vector width mismatch between mask and data");
10689
10690 bool Indexed = AM != ISD::UNINDEXED;
10691 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
10692
10693 SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other)
10694 : getVTList(VT, MVT::Other);
10695 SDValue Ops[] = {Chain, Ptr, Offset, Mask, EVL};
10697 AddNodeIDNode(ID, ISD::VP_LOAD, VTs, Ops);
10698 ID.AddInteger(MemVT.getRawBits());
10699 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadSDNode>(
10700 dl.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
10701 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10702 ID.AddInteger(MMO->getFlags());
10703 void *IP = nullptr;
10704 if (auto *E = cast_or_null<VPLoadSDNode>(FindNodeOrInsertPos(ID, dl, IP))) {
10705 E->refineAlignment(MMO);
10706 E->refineRanges(MMO);
10707 return SDValue(E, 0);
10708 }
10709 auto *N = newSDNode<VPLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
10710 ExtType, IsExpanding, MemVT, MMO);
10711 createOperands(N, Ops);
10712
10713 CSEMap.InsertNode(N, IP);
10714 InsertNode(N);
10715 SDValue V(N, 0);
10716 NewSDValueDbgMsg(V, "Creating new node: ", this);
10717 return V;
10718}
10719
10721 SDValue Ptr, SDValue Mask, SDValue EVL,
10722 MachinePointerInfo PtrInfo,
10723 MaybeAlign Alignment,
10724 MachineMemOperand::Flags MMOFlags,
10725 const AAMDNodes &AAInfo, const MDNode *Ranges,
10726 bool IsExpanding) {
10728 return getLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
10729 Mask, EVL, PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges,
10730 IsExpanding);
10731}
10732
10734 SDValue Ptr, SDValue Mask, SDValue EVL,
10735 MachineMemOperand *MMO, bool IsExpanding) {
10737 return getLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
10738 Mask, EVL, VT, MMO, IsExpanding);
10739}
10740
10742 EVT VT, SDValue Chain, SDValue Ptr,
10743 SDValue Mask, SDValue EVL,
10744 MachinePointerInfo PtrInfo, EVT MemVT,
10745 MaybeAlign Alignment,
10746 MachineMemOperand::Flags MMOFlags,
10747 const AAMDNodes &AAInfo, bool IsExpanding) {
10749 return getLoadVP(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, Mask,
10750 EVL, PtrInfo, MemVT, Alignment, MMOFlags, AAInfo, nullptr,
10751 IsExpanding);
10752}
10753
10755 EVT VT, SDValue Chain, SDValue Ptr,
10756 SDValue Mask, SDValue EVL, EVT MemVT,
10757 MachineMemOperand *MMO, bool IsExpanding) {
10759 return getLoadVP(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, Mask,
10760 EVL, MemVT, MMO, IsExpanding);
10761}
10762
10766 auto *LD = cast<VPLoadSDNode>(OrigLoad);
10767 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
10768 // Don't propagate the invariant or dereferenceable flags.
10769 auto MMOFlags =
10770 LD->getMemOperand()->getFlags() &
10772 return getLoadVP(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
10773 LD->getChain(), Base, Offset, LD->getMask(),
10774 LD->getVectorLength(), LD->getPointerInfo(),
10775 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(),
10776 nullptr, LD->isExpandingLoad());
10777}
10778
10780 SDValue Ptr, SDValue Offset, SDValue Mask,
10781 SDValue EVL, EVT MemVT, MachineMemOperand *MMO,
10782 ISD::MemIndexedMode AM, bool IsTruncating,
10783 bool IsCompressing) {
10784 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
10785 assert(Mask.getValueType().getVectorElementCount() ==
10787 "Vector width mismatch between mask and data");
10788
10789 bool Indexed = AM != ISD::UNINDEXED;
10790 assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!");
10791 SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
10792 : getVTList(MVT::Other);
10793 SDValue Ops[] = {Chain, Val, Ptr, Offset, Mask, EVL};
10795 AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
10796 ID.AddInteger(MemVT.getRawBits());
10797 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
10798 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10799 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10800 ID.AddInteger(MMO->getFlags());
10801 void *IP = nullptr;
10802 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
10803 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
10804 return SDValue(E, 0);
10805 }
10806 auto *N = newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
10807 IsTruncating, IsCompressing, MemVT, MMO);
10808 createOperands(N, Ops);
10809
10810 CSEMap.InsertNode(N, IP);
10811 InsertNode(N);
10812 SDValue V(N, 0);
10813 NewSDValueDbgMsg(V, "Creating new node: ", this);
10814 return V;
10815}
10816
10818 SDValue Val, SDValue Ptr, SDValue Mask,
10819 SDValue EVL, MachinePointerInfo PtrInfo,
10820 EVT SVT, Align Alignment,
10821 MachineMemOperand::Flags MMOFlags,
10822 const AAMDNodes &AAInfo,
10823 bool IsCompressing) {
10824 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
10825
10826 MMOFlags |= MachineMemOperand::MOStore;
10827 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
10828
10829 if (PtrInfo.V.isNull())
10830 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
10831
10833 MachineMemOperand *MMO = MF.getMachineMemOperand(
10834 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo);
10835 return getTruncStoreVP(Chain, dl, Val, Ptr, Mask, EVL, SVT, MMO,
10836 IsCompressing);
10837}
10838
10840 SDValue Val, SDValue Ptr, SDValue Mask,
10841 SDValue EVL, EVT SVT,
10842 MachineMemOperand *MMO,
10843 bool IsCompressing) {
10844 EVT VT = Val.getValueType();
10845
10846 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
10847 if (VT == SVT)
10848 return getStoreVP(Chain, dl, Val, Ptr, getUNDEF(Ptr.getValueType()), Mask,
10849 EVL, VT, MMO, ISD::UNINDEXED,
10850 /*IsTruncating*/ false, IsCompressing);
10851
10853 "Should only be a truncating store, not extending!");
10854 assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!");
10855 assert(VT.isVector() == SVT.isVector() &&
10856 "Cannot use trunc store to convert to or from a vector!");
10857 assert((!VT.isVector() ||
10859 "Cannot use trunc store to change the number of vector elements!");
10860
10861 SDVTList VTs = getVTList(MVT::Other);
10863 SDValue Ops[] = {Chain, Val, Ptr, Undef, Mask, EVL};
10865 AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
10866 ID.AddInteger(SVT.getRawBits());
10867 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
10868 dl.getIROrder(), VTs, ISD::UNINDEXED, true, IsCompressing, SVT, MMO));
10869 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10870 ID.AddInteger(MMO->getFlags());
10871 void *IP = nullptr;
10872 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
10873 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
10874 return SDValue(E, 0);
10875 }
10876 auto *N =
10877 newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
10878 ISD::UNINDEXED, true, IsCompressing, SVT, MMO);
10879 createOperands(N, Ops);
10880
10881 CSEMap.InsertNode(N, IP);
10882 InsertNode(N);
10883 SDValue V(N, 0);
10884 NewSDValueDbgMsg(V, "Creating new node: ", this);
10885 return V;
10886}
10887
10891 auto *ST = cast<VPStoreSDNode>(OrigStore);
10892 assert(ST->getOffset().isUndef() && "Store is already an indexed store!");
10893 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
10894 SDValue Ops[] = {ST->getChain(), ST->getValue(), Base,
10895 Offset, ST->getMask(), ST->getVectorLength()};
10897 AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
10898 ID.AddInteger(ST->getMemoryVT().getRawBits());
10899 ID.AddInteger(ST->getRawSubclassData());
10900 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
10901 ID.AddInteger(ST->getMemOperand()->getFlags());
10902 void *IP = nullptr;
10903 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
10904 return SDValue(E, 0);
10905
10906 auto *N = newSDNode<VPStoreSDNode>(
10907 dl.getIROrder(), dl.getDebugLoc(), VTs, AM, ST->isTruncatingStore(),
10908 ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand());
10909 createOperands(N, Ops);
10910
10911 CSEMap.InsertNode(N, IP);
10912 InsertNode(N);
10913 SDValue V(N, 0);
10914 NewSDValueDbgMsg(V, "Creating new node: ", this);
10915 return V;
10916}
10917
10919 ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL,
10920 SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask,
10921 SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding) {
10922 bool Indexed = AM != ISD::UNINDEXED;
10923 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
10924
10925 SDValue Ops[] = {Chain, Ptr, Offset, Stride, Mask, EVL};
10926 SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other)
10927 : getVTList(VT, MVT::Other);
10929 AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_LOAD, VTs, Ops);
10930 ID.AddInteger(VT.getRawBits());
10931 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedLoadSDNode>(
10932 DL.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
10933 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10934
10935 void *IP = nullptr;
10936 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
10937 cast<VPStridedLoadSDNode>(E)->refineAlignment(MMO);
10938 return SDValue(E, 0);
10939 }
10940
10941 auto *N =
10942 newSDNode<VPStridedLoadSDNode>(DL.getIROrder(), DL.getDebugLoc(), VTs, AM,
10943 ExtType, IsExpanding, MemVT, MMO);
10944 createOperands(N, Ops);
10945 CSEMap.InsertNode(N, IP);
10946 InsertNode(N);
10947 SDValue V(N, 0);
10948 NewSDValueDbgMsg(V, "Creating new node: ", this);
10949 return V;
10950}
10951
10953 SDValue Ptr, SDValue Stride,
10954 SDValue Mask, SDValue EVL,
10955 MachineMemOperand *MMO,
10956 bool IsExpanding) {
10958 return getStridedLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, DL, Chain, Ptr,
10959 Undef, Stride, Mask, EVL, VT, MMO, IsExpanding);
10960}
10961
10963 ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain,
10964 SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT,
10965 MachineMemOperand *MMO, bool IsExpanding) {
10967 return getStridedLoadVP(ISD::UNINDEXED, ExtType, VT, DL, Chain, Ptr, Undef,
10968 Stride, Mask, EVL, MemVT, MMO, IsExpanding);
10969}
10970
10972 SDValue Val, SDValue Ptr,
10973 SDValue Offset, SDValue Stride,
10974 SDValue Mask, SDValue EVL, EVT MemVT,
10975 MachineMemOperand *MMO,
10977 bool IsTruncating, bool IsCompressing) {
10978 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
10979 bool Indexed = AM != ISD::UNINDEXED;
10980 assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!");
10981 SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other)
10982 : getVTList(MVT::Other);
10983 SDValue Ops[] = {Chain, Val, Ptr, Offset, Stride, Mask, EVL};
10985 AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_STORE, VTs, Ops);
10986 ID.AddInteger(MemVT.getRawBits());
10987 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
10988 DL.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
10989 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
10990 void *IP = nullptr;
10991 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
10992 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
10993 return SDValue(E, 0);
10994 }
10995 auto *N = newSDNode<VPStridedStoreSDNode>(DL.getIROrder(), DL.getDebugLoc(),
10996 VTs, AM, IsTruncating,
10997 IsCompressing, MemVT, MMO);
10998 createOperands(N, Ops);
10999
11000 CSEMap.InsertNode(N, IP);
11001 InsertNode(N);
11002 SDValue V(N, 0);
11003 NewSDValueDbgMsg(V, "Creating new node: ", this);
11004 return V;
11005}
11006
11008 SDValue Val, SDValue Ptr,
11009 SDValue Stride, SDValue Mask,
11010 SDValue EVL, EVT SVT,
11011 MachineMemOperand *MMO,
11012 bool IsCompressing) {
11013 EVT VT = Val.getValueType();
11014
11015 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
11016 if (VT == SVT)
11017 return getStridedStoreVP(Chain, DL, Val, Ptr, getUNDEF(Ptr.getValueType()),
11018 Stride, Mask, EVL, VT, MMO, ISD::UNINDEXED,
11019 /*IsTruncating*/ false, IsCompressing);
11020
11022 "Should only be a truncating store, not extending!");
11023 assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!");
11024 assert(VT.isVector() == SVT.isVector() &&
11025 "Cannot use trunc store to convert to or from a vector!");
11026 assert((!VT.isVector() ||
11028 "Cannot use trunc store to change the number of vector elements!");
11029
11030 SDVTList VTs = getVTList(MVT::Other);
11032 SDValue Ops[] = {Chain, Val, Ptr, Undef, Stride, Mask, EVL};
11034 AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_STORE, VTs, Ops);
11035 ID.AddInteger(SVT.getRawBits());
11036 ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>(
11037 DL.getIROrder(), VTs, ISD::UNINDEXED, true, IsCompressing, SVT, MMO));
11038 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11039 void *IP = nullptr;
11040 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
11041 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO);
11042 return SDValue(E, 0);
11043 }
11044 auto *N = newSDNode<VPStridedStoreSDNode>(DL.getIROrder(), DL.getDebugLoc(),
11045 VTs, ISD::UNINDEXED, true,
11046 IsCompressing, SVT, MMO);
11047 createOperands(N, Ops);
11048
11049 CSEMap.InsertNode(N, IP);
11050 InsertNode(N);
11051 SDValue V(N, 0);
11052 NewSDValueDbgMsg(V, "Creating new node: ", this);
11053 return V;
11054}
11055
11058 ISD::MemIndexType IndexType) {
11059 assert(Ops.size() == 6 && "Incompatible number of operands");
11060
11062 AddNodeIDNode(ID, ISD::VP_GATHER, VTs, Ops);
11063 ID.AddInteger(VT.getRawBits());
11064 ID.AddInteger(getSyntheticNodeSubclassData<VPGatherSDNode>(
11065 dl.getIROrder(), VTs, VT, MMO, IndexType));
11066 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11067 ID.AddInteger(MMO->getFlags());
11068 void *IP = nullptr;
11069 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11070 cast<VPGatherSDNode>(E)->refineAlignment(MMO);
11071 return SDValue(E, 0);
11072 }
11073
11074 auto *N = newSDNode<VPGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
11075 VT, MMO, IndexType);
11076 createOperands(N, Ops);
11077
11078 assert(N->getMask().getValueType().getVectorElementCount() ==
11079 N->getValueType(0).getVectorElementCount() &&
11080 "Vector width mismatch between mask and data");
11081 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
11082 N->getValueType(0).getVectorElementCount().isScalable() &&
11083 "Scalable flags of index and data do not match");
11085 N->getIndex().getValueType().getVectorElementCount(),
11086 N->getValueType(0).getVectorElementCount()) &&
11087 "Vector width mismatch between index and data");
11088 assert(isa<ConstantSDNode>(N->getScale()) &&
11089 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11090 "Scale should be a constant power of 2");
11091
11092 CSEMap.InsertNode(N, IP);
11093 InsertNode(N);
11094 SDValue V(N, 0);
11095 NewSDValueDbgMsg(V, "Creating new node: ", this);
11096 return V;
11097}
11098
11101 MachineMemOperand *MMO,
11102 ISD::MemIndexType IndexType) {
11103 assert(Ops.size() == 7 && "Incompatible number of operands");
11104
11106 AddNodeIDNode(ID, ISD::VP_SCATTER, VTs, Ops);
11107 ID.AddInteger(VT.getRawBits());
11108 ID.AddInteger(getSyntheticNodeSubclassData<VPScatterSDNode>(
11109 dl.getIROrder(), VTs, VT, MMO, IndexType));
11110 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11111 ID.AddInteger(MMO->getFlags());
11112 void *IP = nullptr;
11113 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11114 cast<VPScatterSDNode>(E)->refineAlignment(MMO);
11115 return SDValue(E, 0);
11116 }
11117 auto *N = newSDNode<VPScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
11118 VT, MMO, IndexType);
11119 createOperands(N, Ops);
11120
11121 assert(N->getMask().getValueType().getVectorElementCount() ==
11122 N->getValue().getValueType().getVectorElementCount() &&
11123 "Vector width mismatch between mask and data");
11124 assert(
11125 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
11126 N->getValue().getValueType().getVectorElementCount().isScalable() &&
11127 "Scalable flags of index and data do not match");
11129 N->getIndex().getValueType().getVectorElementCount(),
11130 N->getValue().getValueType().getVectorElementCount()) &&
11131 "Vector width mismatch between index and data");
11132 assert(isa<ConstantSDNode>(N->getScale()) &&
11133 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11134 "Scale should be a constant power of 2");
11135
11136 CSEMap.InsertNode(N, IP);
11137 InsertNode(N);
11138 SDValue V(N, 0);
11139 NewSDValueDbgMsg(V, "Creating new node: ", this);
11140 return V;
11141}
11142
11145 SDValue PassThru, EVT MemVT,
11146 MachineMemOperand *MMO,
11148 ISD::LoadExtType ExtTy, bool isExpanding) {
11149 bool Indexed = AM != ISD::UNINDEXED;
11150 assert((Indexed || Offset.isUndef()) &&
11151 "Unindexed masked load with an offset!");
11152 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other)
11153 : getVTList(VT, MVT::Other);
11154 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru};
11157 ID.AddInteger(MemVT.getRawBits());
11158 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
11159 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
11160 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11161 ID.AddInteger(MMO->getFlags());
11162 void *IP = nullptr;
11163 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11164 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
11165 return SDValue(E, 0);
11166 }
11167 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
11168 AM, ExtTy, isExpanding, MemVT, MMO);
11169 createOperands(N, Ops);
11170
11171 CSEMap.InsertNode(N, IP);
11172 InsertNode(N);
11173 SDValue V(N, 0);
11174 NewSDValueDbgMsg(V, "Creating new node: ", this);
11175 return V;
11176}
11177
11182 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!");
11183 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base,
11184 Offset, LD->getMask(), LD->getPassThru(),
11185 LD->getMemoryVT(), LD->getMemOperand(), AM,
11186 LD->getExtensionType(), LD->isExpandingLoad());
11187}
11188
11191 SDValue Mask, EVT MemVT,
11192 MachineMemOperand *MMO,
11193 ISD::MemIndexedMode AM, bool IsTruncating,
11194 bool IsCompressing) {
11195 assert(Chain.getValueType() == MVT::Other &&
11196 "Invalid chain type");
11197 bool Indexed = AM != ISD::UNINDEXED;
11198 assert((Indexed || Offset.isUndef()) &&
11199 "Unindexed masked store with an offset!");
11200 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other)
11201 : getVTList(MVT::Other);
11202 SDValue Ops[] = {Chain, Val, Base, Offset, Mask};
11205 ID.AddInteger(MemVT.getRawBits());
11206 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
11207 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
11208 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11209 ID.AddInteger(MMO->getFlags());
11210 void *IP = nullptr;
11211 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11212 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
11213 return SDValue(E, 0);
11214 }
11215 auto *N =
11216 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
11217 IsTruncating, IsCompressing, MemVT, MMO);
11218 createOperands(N, Ops);
11219
11220 CSEMap.InsertNode(N, IP);
11221 InsertNode(N);
11222 SDValue V(N, 0);
11223 NewSDValueDbgMsg(V, "Creating new node: ", this);
11224 return V;
11225}
11226
11231 assert(ST->getOffset().isUndef() &&
11232 "Masked store is already a indexed store!");
11233 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
11234 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
11235 AM, ST->isTruncatingStore(), ST->isCompressingStore());
11236}
11237
11240 MachineMemOperand *MMO,
11241 ISD::MemIndexType IndexType,
11242 ISD::LoadExtType ExtTy) {
11243 assert(Ops.size() == 6 && "Incompatible number of operands");
11244
11247 ID.AddInteger(MemVT.getRawBits());
11248 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
11249 dl.getIROrder(), VTs, MemVT, MMO, IndexType, ExtTy));
11250 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11251 ID.AddInteger(MMO->getFlags());
11252 void *IP = nullptr;
11253 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11254 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
11255 return SDValue(E, 0);
11256 }
11257
11258 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
11259 VTs, MemVT, MMO, IndexType, ExtTy);
11260 createOperands(N, Ops);
11261
11262 assert(N->getPassThru().getValueType() == N->getValueType(0) &&
11263 "Incompatible type of the PassThru value in MaskedGatherSDNode");
11264 assert(N->getMask().getValueType().getVectorElementCount() ==
11265 N->getValueType(0).getVectorElementCount() &&
11266 "Vector width mismatch between mask and data");
11267 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
11268 N->getValueType(0).getVectorElementCount().isScalable() &&
11269 "Scalable flags of index and data do not match");
11271 N->getIndex().getValueType().getVectorElementCount(),
11272 N->getValueType(0).getVectorElementCount()) &&
11273 "Vector width mismatch between index and data");
11274 assert(isa<ConstantSDNode>(N->getScale()) &&
11275 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11276 "Scale should be a constant power of 2");
11277
11278 CSEMap.InsertNode(N, IP);
11279 InsertNode(N);
11280 SDValue V(N, 0);
11281 NewSDValueDbgMsg(V, "Creating new node: ", this);
11282 return V;
11283}
11284
11287 MachineMemOperand *MMO,
11288 ISD::MemIndexType IndexType,
11289 bool IsTrunc) {
11290 assert(Ops.size() == 6 && "Incompatible number of operands");
11291
11294 ID.AddInteger(MemVT.getRawBits());
11295 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
11296 dl.getIROrder(), VTs, MemVT, MMO, IndexType, IsTrunc));
11297 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11298 ID.AddInteger(MMO->getFlags());
11299 void *IP = nullptr;
11300 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11301 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
11302 return SDValue(E, 0);
11303 }
11304
11305 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
11306 VTs, MemVT, MMO, IndexType, IsTrunc);
11307 createOperands(N, Ops);
11308
11309 assert(N->getMask().getValueType().getVectorElementCount() ==
11310 N->getValue().getValueType().getVectorElementCount() &&
11311 "Vector width mismatch between mask and data");
11312 assert(
11313 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
11314 N->getValue().getValueType().getVectorElementCount().isScalable() &&
11315 "Scalable flags of index and data do not match");
11317 N->getIndex().getValueType().getVectorElementCount(),
11318 N->getValue().getValueType().getVectorElementCount()) &&
11319 "Vector width mismatch between index and data");
11320 assert(isa<ConstantSDNode>(N->getScale()) &&
11321 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11322 "Scale should be a constant power of 2");
11323
11324 CSEMap.InsertNode(N, IP);
11325 InsertNode(N);
11326 SDValue V(N, 0);
11327 NewSDValueDbgMsg(V, "Creating new node: ", this);
11328 return V;
11329}
11330
11332 const SDLoc &dl, ArrayRef<SDValue> Ops,
11333 MachineMemOperand *MMO,
11334 ISD::MemIndexType IndexType) {
11335 assert(Ops.size() == 7 && "Incompatible number of operands");
11336
11339 ID.AddInteger(MemVT.getRawBits());
11340 ID.AddInteger(getSyntheticNodeSubclassData<MaskedHistogramSDNode>(
11341 dl.getIROrder(), VTs, MemVT, MMO, IndexType));
11342 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11343 ID.AddInteger(MMO->getFlags());
11344 void *IP = nullptr;
11345 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
11346 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
11347 return SDValue(E, 0);
11348 }
11349
11350 auto *N = newSDNode<MaskedHistogramSDNode>(dl.getIROrder(), dl.getDebugLoc(),
11351 VTs, MemVT, MMO, IndexType);
11352 createOperands(N, Ops);
11353
11354 assert(N->getMask().getValueType().getVectorElementCount() ==
11355 N->getIndex().getValueType().getVectorElementCount() &&
11356 "Vector width mismatch between mask and data");
11357 assert(isa<ConstantSDNode>(N->getScale()) &&
11358 N->getScale()->getAsAPIntVal().isPowerOf2() &&
11359 "Scale should be a constant power of 2");
11360 assert(N->getInc().getValueType().isInteger() && "Non integer update value");
11361
11362 CSEMap.InsertNode(N, IP);
11363 InsertNode(N);
11364 SDValue V(N, 0);
11365 NewSDValueDbgMsg(V, "Creating new node: ", this);
11366 return V;
11367}
11368
11370 SDValue Ptr, SDValue Mask, SDValue EVL,
11371 MachineMemOperand *MMO) {
11372 SDVTList VTs = getVTList(VT, EVL.getValueType(), MVT::Other);
11373 SDValue Ops[] = {Chain, Ptr, Mask, EVL};
11375 AddNodeIDNode(ID, ISD::VP_LOAD_FF, VTs, Ops);
11376 ID.AddInteger(VT.getRawBits());
11377 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadFFSDNode>(DL.getIROrder(),
11378 VTs, VT, MMO));
11379 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11380 ID.AddInteger(MMO->getFlags());
11381 void *IP = nullptr;
11382 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
11383 cast<VPLoadFFSDNode>(E)->refineAlignment(MMO);
11384 return SDValue(E, 0);
11385 }
11386 auto *N = newSDNode<VPLoadFFSDNode>(DL.getIROrder(), DL.getDebugLoc(), VTs,
11387 VT, MMO);
11388 createOperands(N, Ops);
11389
11390 CSEMap.InsertNode(N, IP);
11391 InsertNode(N);
11392 SDValue V(N, 0);
11393 NewSDValueDbgMsg(V, "Creating new node: ", this);
11394 return V;
11395}
11396
11398 EVT MemVT, MachineMemOperand *MMO) {
11399 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
11400 SDVTList VTs = getVTList(MVT::Other);
11401 SDValue Ops[] = {Chain, Ptr};
11404 ID.AddInteger(MemVT.getRawBits());
11405 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
11406 ISD::GET_FPENV_MEM, dl.getIROrder(), VTs, MemVT, MMO));
11407 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11408 ID.AddInteger(MMO->getFlags());
11409 void *IP = nullptr;
11410 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
11411 return SDValue(E, 0);
11412
11413 auto *N = newSDNode<FPStateAccessSDNode>(ISD::GET_FPENV_MEM, dl.getIROrder(),
11414 dl.getDebugLoc(), VTs, MemVT, MMO);
11415 createOperands(N, Ops);
11416
11417 CSEMap.InsertNode(N, IP);
11418 InsertNode(N);
11419 SDValue V(N, 0);
11420 NewSDValueDbgMsg(V, "Creating new node: ", this);
11421 return V;
11422}
11423
11425 EVT MemVT, MachineMemOperand *MMO) {
11426 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
11427 SDVTList VTs = getVTList(MVT::Other);
11428 SDValue Ops[] = {Chain, Ptr};
11431 ID.AddInteger(MemVT.getRawBits());
11432 ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>(
11433 ISD::SET_FPENV_MEM, dl.getIROrder(), VTs, MemVT, MMO));
11434 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
11435 ID.AddInteger(MMO->getFlags());
11436 void *IP = nullptr;
11437 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
11438 return SDValue(E, 0);
11439
11440 auto *N = newSDNode<FPStateAccessSDNode>(ISD::SET_FPENV_MEM, dl.getIROrder(),
11441 dl.getDebugLoc(), VTs, MemVT, MMO);
11442 createOperands(N, Ops);
11443
11444 CSEMap.InsertNode(N, IP);
11445 InsertNode(N);
11446 SDValue V(N, 0);
11447 NewSDValueDbgMsg(V, "Creating new node: ", this);
11448 return V;
11449}
11450
11452 // select undef, T, F --> T (if T is a constant), otherwise F
11453 // select, ?, undef, F --> F
11454 // select, ?, T, undef --> T
11455 if (Cond.isUndef())
11456 return isConstantValueOfAnyType(T) ? T : F;
11457 if (T.isUndef())
11459 if (F.isUndef())
11461
11462 // select true, T, F --> T
11463 // select false, T, F --> F
11464 if (auto C = isBoolConstant(Cond))
11465 return *C ? T : F;
11466
11467 // select ?, T, T --> T
11468 if (T == F)
11469 return T;
11470
11471 return SDValue();
11472}
11473
11475 // shift undef, Y --> 0 (can always assume that the undef value is 0)
11476 if (X.isUndef())
11477 return getConstant(0, SDLoc(X.getNode()), X.getValueType());
11478 // shift X, undef --> undef (because it may shift by the bitwidth)
11479 if (Y.isUndef())
11480 return getUNDEF(X.getValueType());
11481
11482 // shift 0, Y --> 0
11483 // shift X, 0 --> X
11485 return X;
11486
11487 // shift X, C >= bitwidth(X) --> undef
11488 // All vector elements must be too big (or undef) to avoid partial undefs.
11489 auto isShiftTooBig = [X](ConstantSDNode *Val) {
11490 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
11491 };
11492 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true))
11493 return getUNDEF(X.getValueType());
11494
11495 // shift i1/vXi1 X, Y --> X (any non-zero shift amount is undefined).
11496 if (X.getValueType().getScalarType() == MVT::i1)
11497 return X;
11498
11499 return SDValue();
11500}
11501
11503 SDNodeFlags Flags) {
11504 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
11505 // (an undef operand can be chosen to be Nan/Inf), then the result of this
11506 // operation is poison. That result can be relaxed to undef.
11507 ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true);
11508 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true);
11509 bool HasNan = (XC && XC->getValueAPF().isNaN()) ||
11510 (YC && YC->getValueAPF().isNaN());
11511 bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
11512 (YC && YC->getValueAPF().isInfinity());
11513
11514 if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef()))
11515 return getUNDEF(X.getValueType());
11516
11517 if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef()))
11518 return getUNDEF(X.getValueType());
11519
11520 if (!YC)
11521 return SDValue();
11522
11523 // X + -0.0 --> X
11524 if (Opcode == ISD::FADD)
11525 if (YC->getValueAPF().isNegZero())
11526 return X;
11527
11528 // X - +0.0 --> X
11529 if (Opcode == ISD::FSUB)
11530 if (YC->getValueAPF().isPosZero())
11531 return X;
11532
11533 // X * 1.0 --> X
11534 // X / 1.0 --> X
11535 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV)
11536 if (YC->getValueAPF().isExactlyValue(1.0))
11537 return X;
11538
11539 // X * 0.0 --> 0.0
11540 if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
11541 if (YC->getValueAPF().isZero())
11542 return getConstantFP(0.0, SDLoc(Y), Y.getValueType());
11543
11544 return SDValue();
11545}
11546
11548 SDValue Ptr, SDValue SV, unsigned Align) {
11549 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
11550 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
11551}
11552
11553SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
11555 switch (Ops.size()) {
11556 case 0: return getNode(Opcode, DL, VT);
11557 case 1: return getNode(Opcode, DL, VT, Ops[0].get());
11558 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
11559 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
11560 default: break;
11561 }
11562
11563 // Copy from an SDUse array into an SDValue array for use with
11564 // the regular getNode logic.
11566 return getNode(Opcode, DL, VT, NewOps);
11567}
11568
11569SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
11571 SDNodeFlags Flags;
11572 if (Inserter)
11573 Flags = Inserter->getFlags();
11574 return getNode(Opcode, DL, VT, Ops, Flags);
11575}
11576
11577SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
11578 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
11579 unsigned NumOps = Ops.size();
11580 switch (NumOps) {
11581 case 0: return getNode(Opcode, DL, VT);
11582 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
11583 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
11584 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags);
11585 default: break;
11586 }
11587
11588#ifndef NDEBUG
11589 for (const auto &Op : Ops)
11590 assert(Op.getOpcode() != ISD::DELETED_NODE &&
11591 "Operand is DELETED_NODE!");
11592#endif
11593
11594 switch (Opcode) {
11595 default: break;
11596 case ISD::BUILD_VECTOR:
11597 // Attempt to simplify BUILD_VECTOR.
11598 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
11599 return V;
11600 break;
11602 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
11603 return V;
11604 break;
11605 case ISD::SELECT_CC:
11606 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
11607 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
11608 "LHS and RHS of condition must have same type!");
11609 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
11610 "True and False arms of SelectCC must have same type!");
11611 assert(Ops[2].getValueType() == VT &&
11612 "select_cc node must be of same type as true and false value!");
11613 assert((!Ops[0].getValueType().isVector() ||
11614 Ops[0].getValueType().getVectorElementCount() ==
11615 VT.getVectorElementCount()) &&
11616 "Expected select_cc with vector result to have the same sized "
11617 "comparison type!");
11618 break;
11619 case ISD::BR_CC:
11620 assert(NumOps == 5 && "BR_CC takes 5 operands!");
11621 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
11622 "LHS/RHS of comparison should match types!");
11623 break;
11624 case ISD::VP_ADD:
11625 case ISD::VP_SUB:
11626 // If it is VP_ADD/VP_SUB mask operation then turn it to VP_XOR
11627 if (VT.getScalarType() == MVT::i1)
11628 Opcode = ISD::VP_XOR;
11629 break;
11630 case ISD::VP_MUL:
11631 // If it is VP_MUL mask operation then turn it to VP_AND
11632 if (VT.getScalarType() == MVT::i1)
11633 Opcode = ISD::VP_AND;
11634 break;
11635 case ISD::VP_REDUCE_MUL:
11636 // If it is VP_REDUCE_MUL mask operation then turn it to VP_REDUCE_AND
11637 if (VT == MVT::i1)
11638 Opcode = ISD::VP_REDUCE_AND;
11639 break;
11640 case ISD::VP_REDUCE_ADD:
11641 // If it is VP_REDUCE_ADD mask operation then turn it to VP_REDUCE_XOR
11642 if (VT == MVT::i1)
11643 Opcode = ISD::VP_REDUCE_XOR;
11644 break;
11645 case ISD::VP_REDUCE_SMAX:
11646 case ISD::VP_REDUCE_UMIN:
11647 // If it is VP_REDUCE_SMAX/VP_REDUCE_UMIN mask operation then turn it to
11648 // VP_REDUCE_AND.
11649 if (VT == MVT::i1)
11650 Opcode = ISD::VP_REDUCE_AND;
11651 break;
11652 case ISD::VP_REDUCE_SMIN:
11653 case ISD::VP_REDUCE_UMAX:
11654 // If it is VP_REDUCE_SMIN/VP_REDUCE_UMAX mask operation then turn it to
11655 // VP_REDUCE_OR.
11656 if (VT == MVT::i1)
11657 Opcode = ISD::VP_REDUCE_OR;
11658 break;
11659 }
11660
11661 // Memoize nodes.
11662 SDNode *N;
11663 SDVTList VTs = getVTList(VT);
11664
11665 if (VT != MVT::Glue) {
11667 AddNodeIDNode(ID, Opcode, VTs, Ops);
11668 void *IP = nullptr;
11669
11670 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
11671 E->intersectFlagsWith(Flags);
11672 return SDValue(E, 0);
11673 }
11674
11675 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
11676 createOperands(N, Ops);
11677
11678 CSEMap.InsertNode(N, IP);
11679 } else {
11680 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
11681 createOperands(N, Ops);
11682 }
11683
11684 N->setFlags(Flags);
11685 InsertNode(N);
11686 SDValue V(N, 0);
11687 NewSDValueDbgMsg(V, "Creating new node: ", this);
11688 return V;
11689}
11690
11691SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
11692 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
11693 SDNodeFlags Flags;
11694 if (Inserter)
11695 Flags = Inserter->getFlags();
11696 return getNode(Opcode, DL, getVTList(ResultTys), Ops, Flags);
11697}
11698
11699SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
11701 const SDNodeFlags Flags) {
11702 return getNode(Opcode, DL, getVTList(ResultTys), Ops, Flags);
11703}
11704
11705SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11707 SDNodeFlags Flags;
11708 if (Inserter)
11709 Flags = Inserter->getFlags();
11710 return getNode(Opcode, DL, VTList, Ops, Flags);
11711}
11712
11713SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11714 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
11715 if (VTList.NumVTs == 1)
11716 return getNode(Opcode, DL, VTList.VTs[0], Ops, Flags);
11717
11718#ifndef NDEBUG
11719 for (const auto &Op : Ops)
11720 assert(Op.getOpcode() != ISD::DELETED_NODE &&
11721 "Operand is DELETED_NODE!");
11722#endif
11723
11724 switch (Opcode) {
11725 case ISD::SADDO:
11726 case ISD::UADDO:
11727 case ISD::SSUBO:
11728 case ISD::USUBO: {
11729 assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
11730 "Invalid add/sub overflow op!");
11731 assert(VTList.VTs[0].isInteger() && VTList.VTs[1].isInteger() &&
11732 Ops[0].getValueType() == Ops[1].getValueType() &&
11733 Ops[0].getValueType() == VTList.VTs[0] &&
11734 "Binary operator types must match!");
11735 SDValue N1 = Ops[0], N2 = Ops[1];
11736 canonicalizeCommutativeBinop(Opcode, N1, N2);
11737
11738 // (X +- 0) -> X with zero-overflow.
11739 ConstantSDNode *N2CV = isConstOrConstSplat(N2, /*AllowUndefs*/ false,
11740 /*AllowTruncation*/ true);
11741 if (N2CV && N2CV->isZero()) {
11742 SDValue ZeroOverFlow = getConstant(0, DL, VTList.VTs[1]);
11743 return getNode(ISD::MERGE_VALUES, DL, VTList, {N1, ZeroOverFlow}, Flags);
11744 }
11745
11746 if (VTList.VTs[0].getScalarType() == MVT::i1 &&
11747 VTList.VTs[1].getScalarType() == MVT::i1) {
11748 SDValue F1 = getFreeze(N1);
11749 SDValue F2 = getFreeze(N2);
11750 // {vXi1,vXi1} (u/s)addo(vXi1 x, vXi1y) -> {xor(x,y),and(x,y)}
11751 if (Opcode == ISD::UADDO || Opcode == ISD::SADDO)
11752 return getNode(ISD::MERGE_VALUES, DL, VTList,
11753 {getNode(ISD::XOR, DL, VTList.VTs[0], F1, F2),
11754 getNode(ISD::AND, DL, VTList.VTs[1], F1, F2)},
11755 Flags);
11756 // {vXi1,vXi1} (u/s)subo(vXi1 x, vXi1y) -> {xor(x,y),and(~x,y)}
11757 if (Opcode == ISD::USUBO || Opcode == ISD::SSUBO) {
11758 SDValue NotF1 = getNOT(DL, F1, VTList.VTs[0]);
11759 return getNode(ISD::MERGE_VALUES, DL, VTList,
11760 {getNode(ISD::XOR, DL, VTList.VTs[0], F1, F2),
11761 getNode(ISD::AND, DL, VTList.VTs[1], NotF1, F2)},
11762 Flags);
11763 }
11764 }
11765 break;
11766 }
11767 case ISD::SADDO_CARRY:
11768 case ISD::UADDO_CARRY:
11769 case ISD::SSUBO_CARRY:
11770 case ISD::USUBO_CARRY:
11771 assert(VTList.NumVTs == 2 && Ops.size() == 3 &&
11772 "Invalid add/sub overflow op!");
11773 assert(VTList.VTs[0].isInteger() && VTList.VTs[1].isInteger() &&
11774 Ops[0].getValueType() == Ops[1].getValueType() &&
11775 Ops[0].getValueType() == VTList.VTs[0] &&
11776 Ops[2].getValueType() == VTList.VTs[1] &&
11777 "Binary operator types must match!");
11778 break;
11779 case ISD::SMUL_LOHI:
11780 case ISD::UMUL_LOHI: {
11781 assert(VTList.NumVTs == 2 && Ops.size() == 2 && "Invalid mul lo/hi op!");
11782 assert(VTList.VTs[0].isInteger() && VTList.VTs[0] == VTList.VTs[1] &&
11783 VTList.VTs[0] == Ops[0].getValueType() &&
11784 VTList.VTs[0] == Ops[1].getValueType() &&
11785 "Binary operator types must match!");
11786 // Constant fold.
11789 if (LHS && RHS) {
11790 unsigned Width = VTList.VTs[0].getScalarSizeInBits();
11791 unsigned OutWidth = Width * 2;
11792 APInt Val = LHS->getAPIntValue();
11793 APInt Mul = RHS->getAPIntValue();
11794 if (Opcode == ISD::SMUL_LOHI) {
11795 Val = Val.sext(OutWidth);
11796 Mul = Mul.sext(OutWidth);
11797 } else {
11798 Val = Val.zext(OutWidth);
11799 Mul = Mul.zext(OutWidth);
11800 }
11801 Val *= Mul;
11802
11803 SDValue Hi =
11804 getConstant(Val.extractBits(Width, Width), DL, VTList.VTs[0]);
11805 SDValue Lo = getConstant(Val.trunc(Width), DL, VTList.VTs[0]);
11806 return getNode(ISD::MERGE_VALUES, DL, VTList, {Lo, Hi}, Flags);
11807 }
11808 break;
11809 }
11810 case ISD::FFREXP: {
11811 assert(VTList.NumVTs == 2 && Ops.size() == 1 && "Invalid ffrexp op!");
11812 assert(VTList.VTs[0].isFloatingPoint() && VTList.VTs[1].isInteger() &&
11813 VTList.VTs[0] == Ops[0].getValueType() && "frexp type mismatch");
11814
11816 int FrexpExp;
11817 APFloat FrexpMant =
11818 frexp(C->getValueAPF(), FrexpExp, APFloat::rmNearestTiesToEven);
11819 SDValue Result0 = getConstantFP(FrexpMant, DL, VTList.VTs[0]);
11820 SDValue Result1 = getSignedConstant(FrexpMant.isFinite() ? FrexpExp : 0,
11821 DL, VTList.VTs[1]);
11822 return getNode(ISD::MERGE_VALUES, DL, VTList, {Result0, Result1}, Flags);
11823 }
11824
11825 break;
11826 }
11828 assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
11829 "Invalid STRICT_FP_EXTEND!");
11830 assert(VTList.VTs[0].isFloatingPoint() &&
11831 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!");
11832 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
11833 "STRICT_FP_EXTEND result type should be vector iff the operand "
11834 "type is vector!");
11835 assert((!VTList.VTs[0].isVector() ||
11836 VTList.VTs[0].getVectorElementCount() ==
11837 Ops[1].getValueType().getVectorElementCount()) &&
11838 "Vector element count mismatch!");
11839 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) &&
11840 "Invalid fpext node, dst <= src!");
11841 break;
11843 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!");
11844 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
11845 "STRICT_FP_ROUND result type should be vector iff the operand "
11846 "type is vector!");
11847 assert((!VTList.VTs[0].isVector() ||
11848 VTList.VTs[0].getVectorElementCount() ==
11849 Ops[1].getValueType().getVectorElementCount()) &&
11850 "Vector element count mismatch!");
11851 assert(VTList.VTs[0].isFloatingPoint() &&
11852 Ops[1].getValueType().isFloatingPoint() &&
11853 VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
11854 Ops[2].getOpcode() == ISD::TargetConstant &&
11855 (Ops[2]->getAsZExtVal() == 0 || Ops[2]->getAsZExtVal() == 1) &&
11856 "Invalid STRICT_FP_ROUND!");
11857 break;
11858 }
11859
11860 // Memoize the node unless it returns a glue result.
11861 SDNode *N;
11862 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
11864 AddNodeIDNode(ID, Opcode, VTList, Ops);
11865 void *IP = nullptr;
11866 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
11867 E->intersectFlagsWith(Flags);
11868 return SDValue(E, 0);
11869 }
11870
11871 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
11872 createOperands(N, Ops);
11873 CSEMap.InsertNode(N, IP);
11874 } else {
11875 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
11876 createOperands(N, Ops);
11877 }
11878
11879 N->setFlags(Flags);
11880 InsertNode(N);
11881 SDValue V(N, 0);
11882 NewSDValueDbgMsg(V, "Creating new node: ", this);
11883 return V;
11884}
11885
11886SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
11887 SDVTList VTList) {
11888 return getNode(Opcode, DL, VTList, ArrayRef<SDValue>());
11889}
11890
11891SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11892 SDValue N1) {
11893 SDValue Ops[] = { N1 };
11894 return getNode(Opcode, DL, VTList, Ops);
11895}
11896
11897SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11898 SDValue N1, SDValue N2) {
11899 SDValue Ops[] = { N1, N2 };
11900 return getNode(Opcode, DL, VTList, Ops);
11901}
11902
11903SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11904 SDValue N1, SDValue N2, SDValue N3) {
11905 SDValue Ops[] = { N1, N2, N3 };
11906 return getNode(Opcode, DL, VTList, Ops);
11907}
11908
11909SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11910 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
11911 SDValue Ops[] = { N1, N2, N3, N4 };
11912 return getNode(Opcode, DL, VTList, Ops);
11913}
11914
11915SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
11916 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
11917 SDValue N5) {
11918 SDValue Ops[] = { N1, N2, N3, N4, N5 };
11919 return getNode(Opcode, DL, VTList, Ops);
11920}
11921
11923 if (!VT.isExtended())
11924 return makeVTList(SDNode::getValueTypeList(VT.getSimpleVT()), 1);
11925
11926 return makeVTList(&(*EVTs.insert(VT).first), 1);
11927}
11928
11931 ID.AddInteger(2U);
11932 ID.AddInteger(VT1.getRawBits());
11933 ID.AddInteger(VT2.getRawBits());
11934
11935 void *IP = nullptr;
11936 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
11937 if (!Result) {
11938 EVT *Array = Allocator.Allocate<EVT>(2);
11939 Array[0] = VT1;
11940 Array[1] = VT2;
11941 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
11942 VTListMap.InsertNode(Result, IP);
11943 }
11944 return Result->getSDVTList();
11945}
11946
11949 ID.AddInteger(3U);
11950 ID.AddInteger(VT1.getRawBits());
11951 ID.AddInteger(VT2.getRawBits());
11952 ID.AddInteger(VT3.getRawBits());
11953
11954 void *IP = nullptr;
11955 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
11956 if (!Result) {
11957 EVT *Array = Allocator.Allocate<EVT>(3);
11958 Array[0] = VT1;
11959 Array[1] = VT2;
11960 Array[2] = VT3;
11961 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
11962 VTListMap.InsertNode(Result, IP);
11963 }
11964 return Result->getSDVTList();
11965}
11966
11969 ID.AddInteger(4U);
11970 ID.AddInteger(VT1.getRawBits());
11971 ID.AddInteger(VT2.getRawBits());
11972 ID.AddInteger(VT3.getRawBits());
11973 ID.AddInteger(VT4.getRawBits());
11974
11975 void *IP = nullptr;
11976 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
11977 if (!Result) {
11978 EVT *Array = Allocator.Allocate<EVT>(4);
11979 Array[0] = VT1;
11980 Array[1] = VT2;
11981 Array[2] = VT3;
11982 Array[3] = VT4;
11983 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
11984 VTListMap.InsertNode(Result, IP);
11985 }
11986 return Result->getSDVTList();
11987}
11988
11990 unsigned NumVTs = VTs.size();
11992 ID.AddInteger(NumVTs);
11993 for (unsigned index = 0; index < NumVTs; index++) {
11994 ID.AddInteger(VTs[index].getRawBits());
11995 }
11996
11997 void *IP = nullptr;
11998 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
11999 if (!Result) {
12000 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
12001 llvm::copy(VTs, Array);
12002 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
12003 VTListMap.InsertNode(Result, IP);
12004 }
12005 return Result->getSDVTList();
12006}
12007
12008
12009/// UpdateNodeOperands - *Mutate* the specified node in-place to have the
12010/// specified operands. If the resultant node already exists in the DAG,
12011/// this does not modify the specified node, instead it returns the node that
12012/// already exists. If the resultant node does not exist in the DAG, the
12013/// input node is returned. As a degenerate case, if you specify the same
12014/// input operands as the node already has, the input node is returned.
12016 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
12017
12018 // Check to see if there is no change.
12019 if (Op == N->getOperand(0)) return N;
12020
12021 // See if the modified node already exists.
12022 void *InsertPos = nullptr;
12023 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
12024 return Existing;
12025
12026 // Nope it doesn't. Remove the node from its current place in the maps.
12027 if (InsertPos)
12028 if (!RemoveNodeFromCSEMaps(N))
12029 InsertPos = nullptr;
12030
12031 // Now we update the operands.
12032 N->OperandList[0].set(Op);
12033
12035 // If this gets put into a CSE map, add it.
12036 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
12037 return N;
12038}
12039
12041 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
12042
12043 // Check to see if there is no change.
12044 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
12045 return N; // No operands changed, just return the input node.
12046
12047 // See if the modified node already exists.
12048 void *InsertPos = nullptr;
12049 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
12050 return Existing;
12051
12052 // Nope it doesn't. Remove the node from its current place in the maps.
12053 if (InsertPos)
12054 if (!RemoveNodeFromCSEMaps(N))
12055 InsertPos = nullptr;
12056
12057 // Now we update the operands.
12058 if (N->OperandList[0] != Op1)
12059 N->OperandList[0].set(Op1);
12060 if (N->OperandList[1] != Op2)
12061 N->OperandList[1].set(Op2);
12062
12064 // If this gets put into a CSE map, add it.
12065 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
12066 return N;
12067}
12068
12071 SDValue Ops[] = { Op1, Op2, Op3 };
12072 return UpdateNodeOperands(N, Ops);
12073}
12074
12077 SDValue Op3, SDValue Op4) {
12078 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
12079 return UpdateNodeOperands(N, Ops);
12080}
12081
12084 SDValue Op3, SDValue Op4, SDValue Op5) {
12085 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
12086 return UpdateNodeOperands(N, Ops);
12087}
12088
12091 unsigned NumOps = Ops.size();
12092 assert(N->getNumOperands() == NumOps &&
12093 "Update with wrong number of operands");
12094
12095 // If no operands changed just return the input node.
12096 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
12097 return N;
12098
12099 // See if the modified node already exists.
12100 void *InsertPos = nullptr;
12101 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
12102 return Existing;
12103
12104 // Nope it doesn't. Remove the node from its current place in the maps.
12105 if (InsertPos)
12106 if (!RemoveNodeFromCSEMaps(N))
12107 InsertPos = nullptr;
12108
12109 // Now we update the operands.
12110 for (unsigned i = 0; i != NumOps; ++i)
12111 if (N->OperandList[i] != Ops[i])
12112 N->OperandList[i].set(Ops[i]);
12113
12115 // If this gets put into a CSE map, add it.
12116 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
12117 return N;
12118}
12119
12120/// DropOperands - Release the operands and set this node to have
12121/// zero operands.
12123 // Unlike the code in MorphNodeTo that does this, we don't need to
12124 // watch for dead nodes here.
12125 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
12126 SDUse &Use = *I++;
12127 Use.set(SDValue());
12128 }
12129}
12130
12132 ArrayRef<MachineMemOperand *> NewMemRefs) {
12133 if (NewMemRefs.empty()) {
12134 N->clearMemRefs();
12135 return;
12136 }
12137
12138 // Check if we can avoid allocating by storing a single reference directly.
12139 if (NewMemRefs.size() == 1) {
12140 N->MemRefs = NewMemRefs[0];
12141 N->NumMemRefs = 1;
12142 return;
12143 }
12144
12145 MachineMemOperand **MemRefsBuffer =
12146 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size());
12147 llvm::copy(NewMemRefs, MemRefsBuffer);
12148 N->MemRefs = MemRefsBuffer;
12149 N->NumMemRefs = static_cast<int>(NewMemRefs.size());
12150}
12151
12152/// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
12153/// machine opcode.
12154///
12156 EVT VT) {
12157 SDVTList VTs = getVTList(VT);
12158 return SelectNodeTo(N, MachineOpc, VTs, {});
12159}
12160
12162 EVT VT, SDValue Op1) {
12163 SDVTList VTs = getVTList(VT);
12164 SDValue Ops[] = { Op1 };
12165 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12166}
12167
12169 EVT VT, SDValue Op1,
12170 SDValue Op2) {
12171 SDVTList VTs = getVTList(VT);
12172 SDValue Ops[] = { Op1, Op2 };
12173 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12174}
12175
12177 EVT VT, SDValue Op1,
12178 SDValue Op2, SDValue Op3) {
12179 SDVTList VTs = getVTList(VT);
12180 SDValue Ops[] = { Op1, Op2, Op3 };
12181 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12182}
12183
12186 SDVTList VTs = getVTList(VT);
12187 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12188}
12189
12191 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
12192 SDVTList VTs = getVTList(VT1, VT2);
12193 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12194}
12195
12197 EVT VT1, EVT VT2) {
12198 SDVTList VTs = getVTList(VT1, VT2);
12199 return SelectNodeTo(N, MachineOpc, VTs, {});
12200}
12201
12203 EVT VT1, EVT VT2, EVT VT3,
12205 SDVTList VTs = getVTList(VT1, VT2, VT3);
12206 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12207}
12208
12210 EVT VT1, EVT VT2,
12211 SDValue Op1, SDValue Op2) {
12212 SDVTList VTs = getVTList(VT1, VT2);
12213 SDValue Ops[] = { Op1, Op2 };
12214 return SelectNodeTo(N, MachineOpc, VTs, Ops);
12215}
12216
12219 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
12220 // Reset the NodeID to -1.
12221 New->setNodeId(-1);
12222 if (New != N) {
12223 ReplaceAllUsesWith(N, New);
12225 }
12226 return New;
12227}
12228
12229/// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
12230/// the line number information on the merged node since it is not possible to
12231/// preserve the information that operation is associated with multiple lines.
12232/// This will make the debugger working better at -O0, were there is a higher
12233/// probability having other instructions associated with that line.
12234///
12235/// For IROrder, we keep the smaller of the two
12236SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
12237 DebugLoc NLoc = N->getDebugLoc();
12238 if (NLoc && OptLevel == CodeGenOptLevel::None && OLoc.getDebugLoc() != NLoc) {
12239 N->setDebugLoc(DebugLoc());
12240 }
12241 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
12242 N->setIROrder(Order);
12243 return N;
12244}
12245
12246/// MorphNodeTo - This *mutates* the specified node to have the specified
12247/// return type, opcode, and operands.
12248///
12249/// Note that MorphNodeTo returns the resultant node. If there is already a
12250/// node of the specified opcode and operands, it returns that node instead of
12251/// the current one. Note that the SDLoc need not be the same.
12252///
12253/// Using MorphNodeTo is faster than creating a new node and swapping it in
12254/// with ReplaceAllUsesWith both because it often avoids allocating a new
12255/// node, and because it doesn't require CSE recalculation for any of
12256/// the node's users.
12257///
12258/// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
12259/// As a consequence it isn't appropriate to use from within the DAG combiner or
12260/// the legalizer which maintain worklists that would need to be updated when
12261/// deleting things.
12264 // If an identical node already exists, use it.
12265 void *IP = nullptr;
12266 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
12268 AddNodeIDNode(ID, Opc, VTs, Ops);
12269 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
12270 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
12271 }
12272
12273 if (!RemoveNodeFromCSEMaps(N))
12274 IP = nullptr;
12275
12276 // Start the morphing.
12277 N->NodeType = Opc;
12278 N->ValueList = VTs.VTs;
12279 N->NumValues = VTs.NumVTs;
12280
12281 // Clear the operands list, updating used nodes to remove this from their
12282 // use list. Keep track of any operands that become dead as a result.
12283 SmallPtrSet<SDNode*, 16> DeadNodeSet;
12284 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
12285 SDUse &Use = *I++;
12286 SDNode *Used = Use.getNode();
12287 Use.set(SDValue());
12288 if (Used->use_empty())
12289 DeadNodeSet.insert(Used);
12290 }
12291
12292 // For MachineNode, initialize the memory references information.
12294 MN->clearMemRefs();
12295
12296 // Swap for an appropriately sized array from the recycler.
12297 removeOperands(N);
12298 createOperands(N, Ops);
12299
12300 // Delete any nodes that are still dead after adding the uses for the
12301 // new operands.
12302 if (!DeadNodeSet.empty()) {
12303 SmallVector<SDNode *, 16> DeadNodes;
12304 for (SDNode *N : DeadNodeSet)
12305 if (N->use_empty())
12306 DeadNodes.push_back(N);
12307 RemoveDeadNodes(DeadNodes);
12308 }
12309
12310 if (IP)
12311 CSEMap.InsertNode(N, IP); // Memoize the new node.
12312 return N;
12313}
12314
12316 unsigned OrigOpc = Node->getOpcode();
12317 unsigned NewOpc;
12318 switch (OrigOpc) {
12319 default:
12320 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
12321#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
12322 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
12323#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
12324 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
12325#include "llvm/IR/ConstrainedOps.def"
12326 }
12327
12328 assert(Node->getNumValues() == 2 && "Unexpected number of results!");
12329
12330 // We're taking this node out of the chain, so we need to re-link things.
12331 SDValue InputChain = Node->getOperand(0);
12332 SDValue OutputChain = SDValue(Node, 1);
12333 ReplaceAllUsesOfValueWith(OutputChain, InputChain);
12334
12336 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
12337 Ops.push_back(Node->getOperand(i));
12338
12339 SDVTList VTs = getVTList(Node->getValueType(0));
12340 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops);
12341
12342 // MorphNodeTo can operate in two ways: if an existing node with the
12343 // specified operands exists, it can just return it. Otherwise, it
12344 // updates the node in place to have the requested operands.
12345 if (Res == Node) {
12346 // If we updated the node in place, reset the node ID. To the isel,
12347 // this should be just like a newly allocated machine node.
12348 Res->setNodeId(-1);
12349 } else {
12352 }
12353
12354 return Res;
12355}
12356
12357/// getMachineNode - These are used for target selectors to create a new node
12358/// with specified return type(s), MachineInstr opcode, and operands.
12359///
12360/// Note that getMachineNode returns the resultant node. If there is already a
12361/// node of the specified opcode and operands, it returns that node instead of
12362/// the current one.
12364 EVT VT) {
12365 SDVTList VTs = getVTList(VT);
12366 return getMachineNode(Opcode, dl, VTs, {});
12367}
12368
12370 EVT VT, SDValue Op1) {
12371 SDVTList VTs = getVTList(VT);
12372 SDValue Ops[] = { Op1 };
12373 return getMachineNode(Opcode, dl, VTs, Ops);
12374}
12375
12377 EVT VT, SDValue Op1, SDValue Op2) {
12378 SDVTList VTs = getVTList(VT);
12379 SDValue Ops[] = { Op1, Op2 };
12380 return getMachineNode(Opcode, dl, VTs, Ops);
12381}
12382
12384 EVT VT, SDValue Op1, SDValue Op2,
12385 SDValue Op3) {
12386 SDVTList VTs = getVTList(VT);
12387 SDValue Ops[] = { Op1, Op2, Op3 };
12388 return getMachineNode(Opcode, dl, VTs, Ops);
12389}
12390
12393 SDVTList VTs = getVTList(VT);
12394 return getMachineNode(Opcode, dl, VTs, Ops);
12395}
12396
12398 EVT VT1, EVT VT2, SDValue Op1,
12399 SDValue Op2) {
12400 SDVTList VTs = getVTList(VT1, VT2);
12401 SDValue Ops[] = { Op1, Op2 };
12402 return getMachineNode(Opcode, dl, VTs, Ops);
12403}
12404
12406 EVT VT1, EVT VT2, SDValue Op1,
12407 SDValue Op2, SDValue Op3) {
12408 SDVTList VTs = getVTList(VT1, VT2);
12409 SDValue Ops[] = { Op1, Op2, Op3 };
12410 return getMachineNode(Opcode, dl, VTs, Ops);
12411}
12412
12414 EVT VT1, EVT VT2,
12416 SDVTList VTs = getVTList(VT1, VT2);
12417 return getMachineNode(Opcode, dl, VTs, Ops);
12418}
12419
12421 EVT VT1, EVT VT2, EVT VT3,
12422 SDValue Op1, SDValue Op2) {
12423 SDVTList VTs = getVTList(VT1, VT2, VT3);
12424 SDValue Ops[] = { Op1, Op2 };
12425 return getMachineNode(Opcode, dl, VTs, Ops);
12426}
12427
12429 EVT VT1, EVT VT2, EVT VT3,
12430 SDValue Op1, SDValue Op2,
12431 SDValue Op3) {
12432 SDVTList VTs = getVTList(VT1, VT2, VT3);
12433 SDValue Ops[] = { Op1, Op2, Op3 };
12434 return getMachineNode(Opcode, dl, VTs, Ops);
12435}
12436
12438 EVT VT1, EVT VT2, EVT VT3,
12440 SDVTList VTs = getVTList(VT1, VT2, VT3);
12441 return getMachineNode(Opcode, dl, VTs, Ops);
12442}
12443
12445 ArrayRef<EVT> ResultTys,
12447 SDVTList VTs = getVTList(ResultTys);
12448 return getMachineNode(Opcode, dl, VTs, Ops);
12449}
12450
12452 SDVTList VTs,
12454 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
12456 void *IP = nullptr;
12457
12458 if (DoCSE) {
12460 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
12461 IP = nullptr;
12462 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
12463 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
12464 }
12465 }
12466
12467 // Allocate a new MachineSDNode.
12468 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
12469 createOperands(N, Ops);
12470
12471 if (DoCSE)
12472 CSEMap.InsertNode(N, IP);
12473
12474 InsertNode(N);
12475 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this);
12476 return N;
12477}
12478
12479/// getTargetExtractSubreg - A convenience function for creating
12480/// TargetOpcode::EXTRACT_SUBREG nodes.
12482 SDValue Operand) {
12483 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
12484 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
12485 VT, Operand, SRIdxVal);
12486 return SDValue(Subreg, 0);
12487}
12488
12489/// getTargetInsertSubreg - A convenience function for creating
12490/// TargetOpcode::INSERT_SUBREG nodes.
12492 SDValue Operand, SDValue Subreg) {
12493 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
12494 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
12495 VT, Operand, Subreg, SRIdxVal);
12496 return SDValue(Result, 0);
12497}
12498
12499/// getNodeIfExists - Get the specified node if it's already available, or
12500/// else return NULL.
12503 bool AllowCommute) {
12504 SDNodeFlags Flags;
12505 if (Inserter)
12506 Flags = Inserter->getFlags();
12507 return getNodeIfExists(Opcode, VTList, Ops, Flags, AllowCommute);
12508}
12509
12512 const SDNodeFlags Flags,
12513 bool AllowCommute) {
12514 if (VTList.VTs[VTList.NumVTs - 1] == MVT::Glue)
12515 return nullptr;
12516
12517 auto Lookup = [&](ArrayRef<SDValue> LookupOps) -> SDNode * {
12519 AddNodeIDNode(ID, Opcode, VTList, LookupOps);
12520 void *IP = nullptr;
12521 if (SDNode *E = FindNodeOrInsertPos(ID, IP)) {
12522 E->intersectFlagsWith(Flags);
12523 return E;
12524 }
12525 return nullptr;
12526 };
12527
12528 if (SDNode *Existing = Lookup(Ops))
12529 return Existing;
12530
12531 if (AllowCommute && TLI->isCommutativeBinOp(Opcode))
12532 return Lookup({Ops[1], Ops[0]});
12533
12534 return nullptr;
12535}
12536
12537/// doesNodeExist - Check if a node exists without modifying its flags.
12538bool SelectionDAG::doesNodeExist(unsigned Opcode, SDVTList VTList,
12540 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
12542 AddNodeIDNode(ID, Opcode, VTList, Ops);
12543 void *IP = nullptr;
12544 if (FindNodeOrInsertPos(ID, SDLoc(), IP))
12545 return true;
12546 }
12547 return false;
12548}
12549
12550/// getDbgValue - Creates a SDDbgValue node.
12551///
12552/// SDNode
12554 SDNode *N, unsigned R, bool IsIndirect,
12555 const DebugLoc &DL, unsigned O) {
12556 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
12557 "Expected inlined-at fields to agree");
12558 return new (DbgInfo->getAlloc())
12559 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromNode(N, R),
12560 {}, IsIndirect, DL, O,
12561 /*IsVariadic=*/false);
12562}
12563
12564/// Constant
12566 DIExpression *Expr,
12567 const Value *C,
12568 const DebugLoc &DL, unsigned O) {
12569 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
12570 "Expected inlined-at fields to agree");
12571 return new (DbgInfo->getAlloc())
12572 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromConst(C), {},
12573 /*IsIndirect=*/false, DL, O,
12574 /*IsVariadic=*/false);
12575}
12576
12577/// FrameIndex
12579 DIExpression *Expr, unsigned FI,
12580 bool IsIndirect,
12581 const DebugLoc &DL,
12582 unsigned O) {
12583 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
12584 "Expected inlined-at fields to agree");
12585 return getFrameIndexDbgValue(Var, Expr, FI, {}, IsIndirect, DL, O);
12586}
12587
12588/// FrameIndex with dependencies
12590 DIExpression *Expr, unsigned FI,
12591 ArrayRef<SDNode *> Dependencies,
12592 bool IsIndirect,
12593 const DebugLoc &DL,
12594 unsigned O) {
12595 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
12596 "Expected inlined-at fields to agree");
12597 return new (DbgInfo->getAlloc())
12598 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromFrameIdx(FI),
12599 Dependencies, IsIndirect, DL, O,
12600 /*IsVariadic=*/false);
12601}
12602
12603/// VReg
12605 Register VReg, bool IsIndirect,
12606 const DebugLoc &DL, unsigned O) {
12607 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
12608 "Expected inlined-at fields to agree");
12609 return new (DbgInfo->getAlloc())
12610 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromVReg(VReg),
12611 {}, IsIndirect, DL, O,
12612 /*IsVariadic=*/false);
12613}
12614
12617 ArrayRef<SDNode *> Dependencies,
12618 bool IsIndirect, const DebugLoc &DL,
12619 unsigned O, bool IsVariadic) {
12620 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
12621 "Expected inlined-at fields to agree");
12622 return new (DbgInfo->getAlloc())
12623 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, Locs, Dependencies, IsIndirect,
12624 DL, O, IsVariadic);
12625}
12626
12628 unsigned OffsetInBits, unsigned SizeInBits,
12629 bool InvalidateDbg) {
12630 SDNode *FromNode = From.getNode();
12631 SDNode *ToNode = To.getNode();
12632 assert(FromNode && ToNode && "Can't modify dbg values");
12633
12634 // PR35338
12635 // TODO: assert(From != To && "Redundant dbg value transfer");
12636 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
12637 if (From == To || FromNode == ToNode)
12638 return;
12639
12640 if (!FromNode->getHasDebugValue())
12641 return;
12642
12643 SDDbgOperand FromLocOp =
12644 SDDbgOperand::fromNode(From.getNode(), From.getResNo());
12646
12648 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
12649 if (Dbg->isInvalidated())
12650 continue;
12651
12652 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
12653
12654 // Create a new location ops vector that is equal to the old vector, but
12655 // with each instance of FromLocOp replaced with ToLocOp.
12656 bool Changed = false;
12657 auto NewLocOps = Dbg->copyLocationOps();
12658 std::replace_if(
12659 NewLocOps.begin(), NewLocOps.end(),
12660 [&Changed, FromLocOp](const SDDbgOperand &Op) {
12661 bool Match = Op == FromLocOp;
12662 Changed |= Match;
12663 return Match;
12664 },
12665 ToLocOp);
12666 // Ignore this SDDbgValue if we didn't find a matching location.
12667 if (!Changed)
12668 continue;
12669
12670 DIVariable *Var = Dbg->getVariable();
12671 auto *Expr = Dbg->getExpression();
12672 // If a fragment is requested, update the expression.
12673 if (SizeInBits) {
12674 // When splitting a larger (e.g., sign-extended) value whose
12675 // lower bits are described with an SDDbgValue, do not attempt
12676 // to transfer the SDDbgValue to the upper bits.
12677 if (auto FI = Expr->getFragmentInfo())
12678 if (OffsetInBits + SizeInBits > FI->SizeInBits)
12679 continue;
12680 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
12681 SizeInBits);
12682 if (!Fragment)
12683 continue;
12684 Expr = *Fragment;
12685 }
12686
12687 auto AdditionalDependencies = Dbg->getAdditionalDependencies();
12688 // Clone the SDDbgValue and move it to To.
12689 SDDbgValue *Clone = getDbgValueList(
12690 Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(),
12691 Dbg->getDebugLoc(), std::max(ToNode->getIROrder(), Dbg->getOrder()),
12692 Dbg->isVariadic());
12693 ClonedDVs.push_back(Clone);
12694
12695 if (InvalidateDbg) {
12696 // Invalidate value and indicate the SDDbgValue should not be emitted.
12697 Dbg->setIsInvalidated();
12698 Dbg->setIsEmitted();
12699 }
12700 }
12701
12702 for (SDDbgValue *Dbg : ClonedDVs) {
12703 assert(is_contained(Dbg->getSDNodes(), ToNode) &&
12704 "Transferred DbgValues should depend on the new SDNode");
12705 AddDbgValue(Dbg, false);
12706 }
12707}
12708
12710 if (!N.getHasDebugValue())
12711 return;
12712
12713 auto GetLocationOperand = [](SDNode *Node, unsigned ResNo) {
12714 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(Node))
12715 return SDDbgOperand::fromFrameIdx(FISDN->getIndex());
12716 return SDDbgOperand::fromNode(Node, ResNo);
12717 };
12718
12720 for (auto *DV : GetDbgValues(&N)) {
12721 if (DV->isInvalidated())
12722 continue;
12723 switch (N.getOpcode()) {
12724 default:
12725 break;
12726 case ISD::ADD: {
12727 SDValue N0 = N.getOperand(0);
12728 SDValue N1 = N.getOperand(1);
12729 if (!isa<ConstantSDNode>(N0)) {
12730 bool RHSConstant = isa<ConstantSDNode>(N1);
12732 if (RHSConstant)
12733 Offset = N.getConstantOperandVal(1);
12734 // We are not allowed to turn indirect debug values variadic, so
12735 // don't salvage those.
12736 if (!RHSConstant && DV->isIndirect())
12737 continue;
12738
12739 // Rewrite an ADD constant node into a DIExpression. Since we are
12740 // performing arithmetic to compute the variable's *value* in the
12741 // DIExpression, we need to mark the expression with a
12742 // DW_OP_stack_value.
12743 auto *DIExpr = DV->getExpression();
12744 auto NewLocOps = DV->copyLocationOps();
12745 bool Changed = false;
12746 size_t OrigLocOpsSize = NewLocOps.size();
12747 for (size_t i = 0; i < OrigLocOpsSize; ++i) {
12748 // We're not given a ResNo to compare against because the whole
12749 // node is going away. We know that any ISD::ADD only has one
12750 // result, so we can assume any node match is using the result.
12751 if (NewLocOps[i].getKind() != SDDbgOperand::SDNODE ||
12752 NewLocOps[i].getSDNode() != &N)
12753 continue;
12754 NewLocOps[i] = GetLocationOperand(N0.getNode(), N0.getResNo());
12755 if (RHSConstant) {
12758 DIExpr = DIExpression::appendOpsToArg(DIExpr, ExprOps, i, true);
12759 } else {
12760 // Convert to a variadic expression (if not already).
12761 // convertToVariadicExpression() returns a const pointer, so we use
12762 // a temporary const variable here.
12763 const auto *TmpDIExpr =
12767 ExprOps.push_back(NewLocOps.size());
12768 ExprOps.push_back(dwarf::DW_OP_plus);
12769 SDDbgOperand RHS =
12771 NewLocOps.push_back(RHS);
12772 DIExpr = DIExpression::appendOpsToArg(TmpDIExpr, ExprOps, i, true);
12773 }
12774 Changed = true;
12775 }
12776 (void)Changed;
12777 assert(Changed && "Salvage target doesn't use N");
12778
12779 bool IsVariadic =
12780 DV->isVariadic() || OrigLocOpsSize != NewLocOps.size();
12781
12782 auto AdditionalDependencies = DV->getAdditionalDependencies();
12783 SDDbgValue *Clone = getDbgValueList(
12784 DV->getVariable(), DIExpr, NewLocOps, AdditionalDependencies,
12785 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder(), IsVariadic);
12786 ClonedDVs.push_back(Clone);
12787 DV->setIsInvalidated();
12788 DV->setIsEmitted();
12789 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
12790 N0.getNode()->dumprFull(this);
12791 dbgs() << " into " << *DIExpr << '\n');
12792 }
12793 break;
12794 }
12795 case ISD::TRUNCATE: {
12796 SDValue N0 = N.getOperand(0);
12797 TypeSize FromSize = N0.getValueSizeInBits();
12798 TypeSize ToSize = N.getValueSizeInBits(0);
12799
12800 DIExpression *DbgExpression = DV->getExpression();
12801 auto ExtOps = DIExpression::getExtOps(FromSize, ToSize, false);
12802 auto NewLocOps = DV->copyLocationOps();
12803 bool Changed = false;
12804 for (size_t i = 0; i < NewLocOps.size(); ++i) {
12805 if (NewLocOps[i].getKind() != SDDbgOperand::SDNODE ||
12806 NewLocOps[i].getSDNode() != &N)
12807 continue;
12808
12809 NewLocOps[i] = GetLocationOperand(N0.getNode(), N0.getResNo());
12810 DbgExpression = DIExpression::appendOpsToArg(DbgExpression, ExtOps, i);
12811 Changed = true;
12812 }
12813 assert(Changed && "Salvage target doesn't use N");
12814 (void)Changed;
12815
12816 SDDbgValue *Clone =
12817 getDbgValueList(DV->getVariable(), DbgExpression, NewLocOps,
12818 DV->getAdditionalDependencies(), DV->isIndirect(),
12819 DV->getDebugLoc(), DV->getOrder(), DV->isVariadic());
12820
12821 ClonedDVs.push_back(Clone);
12822 DV->setIsInvalidated();
12823 DV->setIsEmitted();
12824 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; N0.getNode()->dumprFull(this);
12825 dbgs() << " into " << *DbgExpression << '\n');
12826 break;
12827 }
12828 }
12829 }
12830
12831 for (SDDbgValue *Dbg : ClonedDVs) {
12832 assert((!Dbg->getSDNodes().empty() ||
12833 llvm::any_of(Dbg->getLocationOps(),
12834 [&](const SDDbgOperand &Op) {
12835 return Op.getKind() == SDDbgOperand::FRAMEIX;
12836 })) &&
12837 "Salvaged DbgValue should depend on a new SDNode");
12838 AddDbgValue(Dbg, false);
12839 }
12840}
12841
12842/// Creates a SDDbgLabel node.
12844 const DebugLoc &DL, unsigned O) {
12845 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
12846 "Expected inlined-at fields to agree");
12847 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O);
12848}
12849
12850namespace {
12851
12852/// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
12853/// pointed to by a use iterator is deleted, increment the use iterator
12854/// so that it doesn't dangle.
12855///
12856class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
12859
12860 void NodeDeleted(SDNode *N, SDNode *E) override {
12861 // Increment the iterator as needed.
12862 while (UI != UE && N == UI->getUser())
12863 ++UI;
12864 }
12865
12866public:
12867 RAUWUpdateListener(SelectionDAG &d,
12870 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
12871};
12872
12873} // end anonymous namespace
12874
12875/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
12876/// This can cause recursive merging of nodes in the DAG.
12877///
12878/// This version assumes From has a single result value.
12879///
12881 SDNode *From = FromN.getNode();
12882 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
12883 "Cannot replace with this method!");
12884 assert(From != To.getNode() && "Cannot replace uses of with self");
12885
12886 // Preserve Debug Values
12887 transferDbgValues(FromN, To);
12888 // Preserve extra info.
12889 copyExtraInfo(From, To.getNode());
12890
12891 // Iterate over all the existing uses of From. New uses will be added
12892 // to the beginning of the use list, which we avoid visiting.
12893 // This specifically avoids visiting uses of From that arise while the
12894 // replacement is happening, because any such uses would be the result
12895 // of CSE: If an existing node looks like From after one of its operands
12896 // is replaced by To, we don't want to replace of all its users with To
12897 // too. See PR3018 for more info.
12898 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
12899 RAUWUpdateListener Listener(*this, UI, UE);
12900 while (UI != UE) {
12901 SDNode *User = UI->getUser();
12902
12903 // This node is about to morph, remove its old self from the CSE maps.
12904 RemoveNodeFromCSEMaps(User);
12905
12906 // A user can appear in a use list multiple times, and when this
12907 // happens the uses are usually next to each other in the list.
12908 // To help reduce the number of CSE recomputations, process all
12909 // the uses of this user that we can find this way.
12910 do {
12911 SDUse &Use = *UI;
12912 ++UI;
12913 Use.set(To);
12914 if (To->isDivergent() != From->isDivergent())
12916 } while (UI != UE && UI->getUser() == User);
12917 // Now that we have modified User, add it back to the CSE maps. If it
12918 // already exists there, recursively merge the results together.
12919 AddModifiedNodeToCSEMaps(User);
12920 }
12921
12922 // If we just RAUW'd the root, take note.
12923 if (FromN == getRoot())
12924 setRoot(To);
12925}
12926
12927/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
12928/// This can cause recursive merging of nodes in the DAG.
12929///
12930/// This version assumes that for each value of From, there is a
12931/// corresponding value in To in the same position with the same type.
12932///
12934#ifndef NDEBUG
12935 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
12936 assert((!From->hasAnyUseOfValue(i) ||
12937 From->getValueType(i) == To->getValueType(i)) &&
12938 "Cannot use this version of ReplaceAllUsesWith!");
12939#endif
12940
12941 // Handle the trivial case.
12942 if (From == To)
12943 return;
12944
12945 // Preserve Debug Info. Only do this if there's a use.
12946 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
12947 if (From->hasAnyUseOfValue(i)) {
12948 assert((i < To->getNumValues()) && "Invalid To location");
12949 transferDbgValues(SDValue(From, i), SDValue(To, i));
12950 }
12951 // Preserve extra info.
12952 copyExtraInfo(From, To);
12953
12954 // Iterate over just the existing users of From. See the comments in
12955 // the ReplaceAllUsesWith above.
12956 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
12957 RAUWUpdateListener Listener(*this, UI, UE);
12958 while (UI != UE) {
12959 SDNode *User = UI->getUser();
12960
12961 // This node is about to morph, remove its old self from the CSE maps.
12962 RemoveNodeFromCSEMaps(User);
12963
12964 // A user can appear in a use list multiple times, and when this
12965 // happens the uses are usually next to each other in the list.
12966 // To help reduce the number of CSE recomputations, process all
12967 // the uses of this user that we can find this way.
12968 do {
12969 SDUse &Use = *UI;
12970 ++UI;
12971 Use.setNode(To);
12972 if (To->isDivergent() != From->isDivergent())
12974 } while (UI != UE && UI->getUser() == User);
12975
12976 // Now that we have modified User, add it back to the CSE maps. If it
12977 // already exists there, recursively merge the results together.
12978 AddModifiedNodeToCSEMaps(User);
12979 }
12980
12981 // If we just RAUW'd the root, take note.
12982 if (From == getRoot().getNode())
12983 setRoot(SDValue(To, getRoot().getResNo()));
12984}
12985
12986/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
12987/// This can cause recursive merging of nodes in the DAG.
12988///
12989/// This version can replace From with any result values. To must match the
12990/// number and types of values returned by From.
12992 if (From->getNumValues() == 1) // Handle the simple case efficiently.
12993 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
12994
12995 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) {
12996 // Preserve Debug Info.
12997 transferDbgValues(SDValue(From, i), To[i]);
12998 // Preserve extra info.
12999 copyExtraInfo(From, To[i].getNode());
13000 }
13001
13002 // Iterate over just the existing users of From. See the comments in
13003 // the ReplaceAllUsesWith above.
13004 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
13005 RAUWUpdateListener Listener(*this, UI, UE);
13006 while (UI != UE) {
13007 SDNode *User = UI->getUser();
13008
13009 // This node is about to morph, remove its old self from the CSE maps.
13010 RemoveNodeFromCSEMaps(User);
13011
13012 // A user can appear in a use list multiple times, and when this happens the
13013 // uses are usually next to each other in the list. To help reduce the
13014 // number of CSE and divergence recomputations, process all the uses of this
13015 // user that we can find this way.
13016 bool To_IsDivergent = false;
13017 do {
13018 SDUse &Use = *UI;
13019 const SDValue &ToOp = To[Use.getResNo()];
13020 ++UI;
13021 Use.set(ToOp);
13022 if (ToOp.getValueType() != MVT::Other)
13023 To_IsDivergent |= ToOp->isDivergent();
13024 } while (UI != UE && UI->getUser() == User);
13025
13026 if (To_IsDivergent != From->isDivergent())
13028
13029 // Now that we have modified User, add it back to the CSE maps. If it
13030 // already exists there, recursively merge the results together.
13031 AddModifiedNodeToCSEMaps(User);
13032 }
13033
13034 // If we just RAUW'd the root, take note.
13035 if (From == getRoot().getNode())
13036 setRoot(SDValue(To[getRoot().getResNo()]));
13037}
13038
13039/// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
13040/// uses of other values produced by From.getNode() alone. The Deleted
13041/// vector is handled the same way as for ReplaceAllUsesWith.
13043 // Handle the really simple, really trivial case efficiently.
13044 if (From == To) return;
13045
13046 // Handle the simple, trivial, case efficiently.
13047 if (From.getNode()->getNumValues() == 1) {
13048 ReplaceAllUsesWith(From, To);
13049 return;
13050 }
13051
13052 // Preserve Debug Info.
13053 transferDbgValues(From, To);
13054 copyExtraInfo(From.getNode(), To.getNode());
13055
13056 // Iterate over just the existing users of From. See the comments in
13057 // the ReplaceAllUsesWith above.
13058 SDNode::use_iterator UI = From.getNode()->use_begin(),
13059 UE = From.getNode()->use_end();
13060 RAUWUpdateListener Listener(*this, UI, UE);
13061 while (UI != UE) {
13062 SDNode *User = UI->getUser();
13063 bool UserRemovedFromCSEMaps = false;
13064
13065 // A user can appear in a use list multiple times, and when this
13066 // happens the uses are usually next to each other in the list.
13067 // To help reduce the number of CSE recomputations, process all
13068 // the uses of this user that we can find this way.
13069 do {
13070 SDUse &Use = *UI;
13071
13072 // Skip uses of different values from the same node.
13073 if (Use.getResNo() != From.getResNo()) {
13074 ++UI;
13075 continue;
13076 }
13077
13078 // If this node hasn't been modified yet, it's still in the CSE maps,
13079 // so remove its old self from the CSE maps.
13080 if (!UserRemovedFromCSEMaps) {
13081 RemoveNodeFromCSEMaps(User);
13082 UserRemovedFromCSEMaps = true;
13083 }
13084
13085 ++UI;
13086 Use.set(To);
13087 if (To->isDivergent() != From->isDivergent())
13089 } while (UI != UE && UI->getUser() == User);
13090 // We are iterating over all uses of the From node, so if a use
13091 // doesn't use the specific value, no changes are made.
13092 if (!UserRemovedFromCSEMaps)
13093 continue;
13094
13095 // Now that we have modified User, add it back to the CSE maps. If it
13096 // already exists there, recursively merge the results together.
13097 AddModifiedNodeToCSEMaps(User);
13098 }
13099
13100 // If we just RAUW'd the root, take note.
13101 if (From == getRoot())
13102 setRoot(To);
13103}
13104
13105namespace {
13106
13107/// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
13108/// to record information about a use.
13109struct UseMemo {
13110 SDNode *User;
13111 unsigned Index;
13112 SDUse *Use;
13113};
13114
13115/// operator< - Sort Memos by User.
13116bool operator<(const UseMemo &L, const UseMemo &R) {
13117 return (intptr_t)L.User < (intptr_t)R.User;
13118}
13119
13120/// RAUOVWUpdateListener - Helper for ReplaceAllUsesOfValuesWith - When the node
13121/// pointed to by a UseMemo is deleted, set the User to nullptr to indicate that
13122/// the node already has been taken care of recursively.
13123class RAUOVWUpdateListener : public SelectionDAG::DAGUpdateListener {
13124 SmallVectorImpl<UseMemo> &Uses;
13125
13126 void NodeDeleted(SDNode *N, SDNode *E) override {
13127 for (UseMemo &Memo : Uses)
13128 if (Memo.User == N)
13129 Memo.User = nullptr;
13130 }
13131
13132public:
13133 RAUOVWUpdateListener(SelectionDAG &d, SmallVectorImpl<UseMemo> &uses)
13134 : SelectionDAG::DAGUpdateListener(d), Uses(uses) {}
13135};
13136
13137} // end anonymous namespace
13138
13139/// Return true if a glue output should propagate divergence information.
13141 switch (Node->getOpcode()) {
13142 case ISD::CopyFromReg:
13143 case ISD::CopyToReg:
13144 return false;
13145 default:
13146 return true;
13147 }
13148
13149 llvm_unreachable("covered opcode switch");
13150}
13151
13153 if (TLI->isSDNodeAlwaysUniform(N)) {
13154 assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, UA) &&
13155 "Conflicting divergence information!");
13156 return false;
13157 }
13158 if (TLI->isSDNodeSourceOfDivergence(N, FLI, UA))
13159 return true;
13160 for (const auto &Op : N->ops()) {
13161 EVT VT = Op.getValueType();
13162
13163 // Skip Chain. It does not carry divergence.
13164 if (VT != MVT::Other && Op.getNode()->isDivergent() &&
13165 (VT != MVT::Glue || gluePropagatesDivergence(Op.getNode())))
13166 return true;
13167 }
13168 return false;
13169}
13170
13172 SmallVector<SDNode *, 16> Worklist(1, N);
13173 do {
13174 N = Worklist.pop_back_val();
13175 bool IsDivergent = calculateDivergence(N);
13176 if (N->SDNodeBits.IsDivergent != IsDivergent) {
13177 N->SDNodeBits.IsDivergent = IsDivergent;
13178 llvm::append_range(Worklist, N->users());
13179 }
13180 } while (!Worklist.empty());
13181}
13182
13183void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
13185 Order.reserve(AllNodes.size());
13186 for (auto &N : allnodes()) {
13187 unsigned NOps = N.getNumOperands();
13188 Degree[&N] = NOps;
13189 if (0 == NOps)
13190 Order.push_back(&N);
13191 }
13192 for (size_t I = 0; I != Order.size(); ++I) {
13193 SDNode *N = Order[I];
13194 for (auto *U : N->users()) {
13195 unsigned &UnsortedOps = Degree[U];
13196 if (0 == --UnsortedOps)
13197 Order.push_back(U);
13198 }
13199 }
13200}
13201
13202#if !defined(NDEBUG) && LLVM_ENABLE_ABI_BREAKING_CHECKS
13203void SelectionDAG::VerifyDAGDivergence() {
13204 std::vector<SDNode *> TopoOrder;
13205 CreateTopologicalOrder(TopoOrder);
13206 for (auto *N : TopoOrder) {
13207 assert(calculateDivergence(N) == N->isDivergent() &&
13208 "Divergence bit inconsistency detected");
13209 }
13210}
13211#endif
13212
13213/// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
13214/// uses of other values produced by From.getNode() alone. The same value
13215/// may appear in both the From and To list. The Deleted vector is
13216/// handled the same way as for ReplaceAllUsesWith.
13218 const SDValue *To,
13219 unsigned Num){
13220 // Handle the simple, trivial case efficiently.
13221 if (Num == 1)
13222 return ReplaceAllUsesOfValueWith(*From, *To);
13223
13224 transferDbgValues(*From, *To);
13225 copyExtraInfo(From->getNode(), To->getNode());
13226
13227 // Read up all the uses and make records of them. This helps
13228 // processing new uses that are introduced during the
13229 // replacement process.
13231 for (unsigned i = 0; i != Num; ++i) {
13232 unsigned FromResNo = From[i].getResNo();
13233 SDNode *FromNode = From[i].getNode();
13234 for (SDUse &Use : FromNode->uses()) {
13235 if (Use.getResNo() == FromResNo) {
13236 UseMemo Memo = {Use.getUser(), i, &Use};
13237 Uses.push_back(Memo);
13238 }
13239 }
13240 }
13241
13242 // Sort the uses, so that all the uses from a given User are together.
13244 RAUOVWUpdateListener Listener(*this, Uses);
13245
13246 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
13247 UseIndex != UseIndexEnd; ) {
13248 // We know that this user uses some value of From. If it is the right
13249 // value, update it.
13250 SDNode *User = Uses[UseIndex].User;
13251 // If the node has been deleted by recursive CSE updates when updating
13252 // another node, then just skip this entry.
13253 if (User == nullptr) {
13254 ++UseIndex;
13255 continue;
13256 }
13257
13258 // This node is about to morph, remove its old self from the CSE maps.
13259 RemoveNodeFromCSEMaps(User);
13260
13261 // The Uses array is sorted, so all the uses for a given User
13262 // are next to each other in the list.
13263 // To help reduce the number of CSE recomputations, process all
13264 // the uses of this user that we can find this way.
13265 do {
13266 unsigned i = Uses[UseIndex].Index;
13267 SDUse &Use = *Uses[UseIndex].Use;
13268 ++UseIndex;
13269
13270 Use.set(To[i]);
13271 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
13272
13273 // Now that we have modified User, add it back to the CSE maps. If it
13274 // already exists there, recursively merge the results together.
13275 AddModifiedNodeToCSEMaps(User);
13276 }
13277}
13278
13279/// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
13280/// based on their topological order. It returns the maximum id and a vector
13281/// of the SDNodes* in assigned order by reference.
13283 unsigned DAGSize = 0;
13284
13285 // SortedPos tracks the progress of the algorithm. Nodes before it are
13286 // sorted, nodes after it are unsorted. When the algorithm completes
13287 // it is at the end of the list.
13288 allnodes_iterator SortedPos = allnodes_begin();
13289
13290 // Visit all the nodes. Move nodes with no operands to the front of
13291 // the list immediately. Annotate nodes that do have operands with their
13292 // operand count. Before we do this, the Node Id fields of the nodes
13293 // may contain arbitrary values. After, the Node Id fields for nodes
13294 // before SortedPos will contain the topological sort index, and the
13295 // Node Id fields for nodes At SortedPos and after will contain the
13296 // count of outstanding operands.
13298 checkForCycles(&N, this);
13299 unsigned Degree = N.getNumOperands();
13300 if (Degree == 0) {
13301 // A node with no uses, add it to the result array immediately.
13302 N.setNodeId(DAGSize++);
13303 allnodes_iterator Q(&N);
13304 if (Q != SortedPos)
13305 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
13306 assert(SortedPos != AllNodes.end() && "Overran node list");
13307 ++SortedPos;
13308 } else {
13309 // Temporarily use the Node Id as scratch space for the degree count.
13310 N.setNodeId(Degree);
13311 }
13312 }
13313
13314 // Visit all the nodes. As we iterate, move nodes into sorted order,
13315 // such that by the time the end is reached all nodes will be sorted.
13316 for (SDNode &Node : allnodes()) {
13317 SDNode *N = &Node;
13318 checkForCycles(N, this);
13319 // N is in sorted position, so all its uses have one less operand
13320 // that needs to be sorted.
13321 for (SDNode *P : N->users()) {
13322 unsigned Degree = P->getNodeId();
13323 assert(Degree != 0 && "Invalid node degree");
13324 --Degree;
13325 if (Degree == 0) {
13326 // All of P's operands are sorted, so P may sorted now.
13327 P->setNodeId(DAGSize++);
13328 if (P->getIterator() != SortedPos)
13329 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
13330 assert(SortedPos != AllNodes.end() && "Overran node list");
13331 ++SortedPos;
13332 } else {
13333 // Update P's outstanding operand count.
13334 P->setNodeId(Degree);
13335 }
13336 }
13337 if (Node.getIterator() == SortedPos) {
13338#ifndef NDEBUG
13340 SDNode *S = &*++I;
13341 dbgs() << "Overran sorted position:\n";
13342 S->dumprFull(this); dbgs() << "\n";
13343 dbgs() << "Checking if this is due to cycles\n";
13344 checkForCycles(this, true);
13345#endif
13346 llvm_unreachable(nullptr);
13347 }
13348 }
13349
13350 assert(SortedPos == AllNodes.end() &&
13351 "Topological sort incomplete!");
13352 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
13353 "First node in topological sort is not the entry token!");
13354 assert(AllNodes.front().getNodeId() == 0 &&
13355 "First node in topological sort has non-zero id!");
13356 assert(AllNodes.front().getNumOperands() == 0 &&
13357 "First node in topological sort has operands!");
13358 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
13359 "Last node in topologic sort has unexpected id!");
13360 assert(AllNodes.back().use_empty() &&
13361 "Last node in topologic sort has users!");
13362 assert(DAGSize == allnodes_size() && "Node count mismatch!");
13363 return DAGSize;
13364}
13365
13367 SmallVectorImpl<const SDNode *> &SortedNodes) const {
13368 SortedNodes.clear();
13369 // Node -> remaining number of outstanding operands.
13370 DenseMap<const SDNode *, unsigned> RemainingOperands;
13371
13372 // Put nodes without any operands into SortedNodes first.
13373 for (const SDNode &N : allnodes()) {
13374 checkForCycles(&N, this);
13375 unsigned NumOperands = N.getNumOperands();
13376 if (NumOperands == 0)
13377 SortedNodes.push_back(&N);
13378 else
13379 // Record their total number of outstanding operands.
13380 RemainingOperands[&N] = NumOperands;
13381 }
13382
13383 // A node is pushed into SortedNodes when all of its operands (predecessors in
13384 // the graph) are also in SortedNodes.
13385 for (unsigned i = 0U; i < SortedNodes.size(); ++i) {
13386 const SDNode *N = SortedNodes[i];
13387 for (const SDNode *U : N->users()) {
13388 // HandleSDNode is never part of a DAG and therefore has no entry in
13389 // RemainingOperands.
13390 if (U->getOpcode() == ISD::HANDLENODE)
13391 continue;
13392 unsigned &NumRemOperands = RemainingOperands[U];
13393 assert(NumRemOperands && "Invalid number of remaining operands");
13394 --NumRemOperands;
13395 if (!NumRemOperands)
13396 SortedNodes.push_back(U);
13397 }
13398 }
13399
13400 assert(SortedNodes.size() == AllNodes.size() && "Node count mismatch");
13401 assert(SortedNodes.front()->getOpcode() == ISD::EntryToken &&
13402 "First node in topological sort is not the entry token");
13403 assert(SortedNodes.front()->getNumOperands() == 0 &&
13404 "First node in topological sort has operands");
13405}
13406
13407/// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
13408/// value is produced by SD.
13409void SelectionDAG::AddDbgValue(SDDbgValue *DB, bool isParameter) {
13410 for (SDNode *SD : DB->getSDNodes()) {
13411 if (!SD)
13412 continue;
13413 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
13414 SD->setHasDebugValue(true);
13415 }
13416 DbgInfo->add(DB, isParameter);
13417}
13418
13419void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { DbgInfo->add(DB); }
13420
13422 SDValue NewMemOpChain) {
13423 assert(isa<MemSDNode>(NewMemOpChain) && "Expected a memop node");
13424 assert(NewMemOpChain.getValueType() == MVT::Other && "Expected a token VT");
13425 // The new memory operation must have the same position as the old load in
13426 // terms of memory dependency. Create a TokenFactor for the old load and new
13427 // memory operation and update uses of the old load's output chain to use that
13428 // TokenFactor.
13429 if (OldChain == NewMemOpChain || OldChain.use_empty())
13430 return NewMemOpChain;
13431
13432 SDValue TokenFactor = getNode(ISD::TokenFactor, SDLoc(OldChain), MVT::Other,
13433 OldChain, NewMemOpChain);
13434 ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
13435 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewMemOpChain);
13436 return TokenFactor;
13437}
13438
13440 SDValue NewMemOp) {
13441 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
13442 SDValue OldChain = SDValue(OldLoad, 1);
13443 SDValue NewMemOpChain = NewMemOp.getValue(1);
13444 return makeEquivalentMemoryOrdering(OldChain, NewMemOpChain);
13445}
13446
13448 Function **OutFunction) {
13449 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
13450
13451 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13452 auto *Module = MF->getFunction().getParent();
13453 auto *Function = Module->getFunction(Symbol);
13454
13455 if (OutFunction != nullptr)
13456 *OutFunction = Function;
13457
13458 if (Function != nullptr) {
13459 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace());
13460 return getGlobalAddress(Function, SDLoc(Op), PtrTy);
13461 }
13462
13463 std::string ErrorStr;
13464 raw_string_ostream ErrorFormatter(ErrorStr);
13465 ErrorFormatter << "Undefined external symbol ";
13466 ErrorFormatter << '"' << Symbol << '"';
13467 report_fatal_error(Twine(ErrorStr));
13468}
13469
13470//===----------------------------------------------------------------------===//
13471// SDNode Class
13472//===----------------------------------------------------------------------===//
13473
13476 return Const != nullptr && Const->isZero();
13477}
13478
13480 return V.isUndef() || isNullConstant(V);
13481}
13482
13485 return Const != nullptr && Const->isZero() && !Const->isNegative();
13486}
13487
13490 return Const != nullptr && Const->isAllOnes();
13491}
13492
13495 return Const != nullptr && Const->isOne();
13496}
13497
13500 return Const != nullptr && Const->isMinSignedValue();
13501}
13502
13503bool llvm::isNeutralConstant(unsigned Opcode, SDNodeFlags Flags, SDValue V,
13504 unsigned OperandNo) {
13505 // NOTE: The cases should match with IR's ConstantExpr::getBinOpIdentity().
13506 // TODO: Target-specific opcodes could be added.
13507 if (auto *ConstV = isConstOrConstSplat(V, /*AllowUndefs*/ false,
13508 /*AllowTruncation*/ true)) {
13509 APInt Const = ConstV->getAPIntValue().trunc(V.getScalarValueSizeInBits());
13510 switch (Opcode) {
13511 case ISD::ADD:
13512 case ISD::OR:
13513 case ISD::XOR:
13514 case ISD::UMAX:
13515 return Const.isZero();
13516 case ISD::MUL:
13517 return Const.isOne();
13518 case ISD::AND:
13519 case ISD::UMIN:
13520 return Const.isAllOnes();
13521 case ISD::SMAX:
13522 return Const.isMinSignedValue();
13523 case ISD::SMIN:
13524 return Const.isMaxSignedValue();
13525 case ISD::SUB:
13526 case ISD::SHL:
13527 case ISD::SRA:
13528 case ISD::SRL:
13529 return OperandNo == 1 && Const.isZero();
13530 case ISD::UDIV:
13531 case ISD::SDIV:
13532 return OperandNo == 1 && Const.isOne();
13533 }
13534 } else if (auto *ConstFP = isConstOrConstSplatFP(V)) {
13535 switch (Opcode) {
13536 case ISD::FADD:
13537 return ConstFP->isZero() &&
13538 (Flags.hasNoSignedZeros() || ConstFP->isNegative());
13539 case ISD::FSUB:
13540 return OperandNo == 1 && ConstFP->isZero() &&
13541 (Flags.hasNoSignedZeros() || !ConstFP->isNegative());
13542 case ISD::FMUL:
13543 return ConstFP->isExactlyValue(1.0);
13544 case ISD::FDIV:
13545 return OperandNo == 1 && ConstFP->isExactlyValue(1.0);
13546 case ISD::FMINNUM:
13547 case ISD::FMAXNUM: {
13548 // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF.
13549 EVT VT = V.getValueType();
13550 const fltSemantics &Semantics = VT.getFltSemantics();
13551 APFloat NeutralAF = !Flags.hasNoNaNs()
13552 ? APFloat::getQNaN(Semantics)
13553 : !Flags.hasNoInfs()
13554 ? APFloat::getInf(Semantics)
13555 : APFloat::getLargest(Semantics);
13556 if (Opcode == ISD::FMAXNUM)
13557 NeutralAF.changeSign();
13558
13559 return ConstFP->isExactlyValue(NeutralAF);
13560 }
13561 }
13562 }
13563 return false;
13564}
13565
13567 while (V.getOpcode() == ISD::BITCAST)
13568 V = V.getOperand(0);
13569 return V;
13570}
13571
13573 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse())
13574 V = V.getOperand(0);
13575 return V;
13576}
13577
13579 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR)
13580 V = V.getOperand(0);
13581 return V;
13582}
13583
13585 while (V.getOpcode() == ISD::INSERT_VECTOR_ELT) {
13586 SDValue InVec = V.getOperand(0);
13587 SDValue EltNo = V.getOperand(2);
13588 EVT VT = InVec.getValueType();
13589 auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
13590 if (IndexC && VT.isFixedLengthVector() &&
13591 IndexC->getAPIntValue().ult(VT.getVectorNumElements()) &&
13592 !DemandedElts[IndexC->getZExtValue()]) {
13593 V = InVec;
13594 continue;
13595 }
13596 break;
13597 }
13598 return V;
13599}
13600
13602 while (V.getOpcode() == ISD::TRUNCATE)
13603 V = V.getOperand(0);
13604 return V;
13605}
13606
13607bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) {
13608 if (V.getOpcode() != ISD::XOR)
13609 return false;
13610 V = peekThroughBitcasts(V.getOperand(1));
13611 unsigned NumBits = V.getScalarValueSizeInBits();
13612 ConstantSDNode *C =
13613 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true);
13614 return C && (C->getAPIntValue().countr_one() >= NumBits);
13615}
13616
13618 bool AllowTruncation) {
13619 APInt DemandedElts = getDemandAllEltsMask(N);
13620 return isConstOrConstSplat(N, DemandedElts, AllowUndefs, AllowTruncation);
13621}
13622
13624 bool AllowUndefs,
13625 bool AllowTruncation) {
13627 return CN;
13628
13629 // SplatVectors can truncate their operands. Ignore that case here unless
13630 // AllowTruncation is set.
13631 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
13632 EVT VecEltVT = N->getValueType(0).getVectorElementType();
13633 if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
13634 EVT CVT = CN->getValueType(0);
13635 assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension");
13636 if (AllowTruncation || CVT == VecEltVT)
13637 return CN;
13638 }
13639 }
13640
13642 BitVector UndefElements;
13643 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
13644
13645 // BuildVectors can truncate their operands. Ignore that case here unless
13646 // AllowTruncation is set.
13647 // TODO: Look into whether we should allow UndefElements in non-DemandedElts
13648 if (CN && (UndefElements.none() || AllowUndefs)) {
13649 EVT CVT = CN->getValueType(0);
13650 EVT NSVT = N.getValueType().getScalarType();
13651 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
13652 if (AllowTruncation || (CVT == NSVT))
13653 return CN;
13654 }
13655 }
13656
13657 return nullptr;
13658}
13659
13661 APInt DemandedElts = getDemandAllEltsMask(N);
13662 return isConstOrConstSplatFP(N, DemandedElts, AllowUndefs);
13663}
13664
13666 const APInt &DemandedElts,
13667 bool AllowUndefs) {
13669 return CN;
13670
13672 BitVector UndefElements;
13673 ConstantFPSDNode *CN =
13674 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
13675 // TODO: Look into whether we should allow UndefElements in non-DemandedElts
13676 if (CN && (UndefElements.none() || AllowUndefs))
13677 return CN;
13678 }
13679
13680 if (N.getOpcode() == ISD::SPLAT_VECTOR)
13681 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0)))
13682 return CN;
13683
13684 return nullptr;
13685}
13686
13687bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) {
13688 // TODO: may want to use peekThroughBitcast() here.
13689 ConstantSDNode *C =
13690 isConstOrConstSplat(N, AllowUndefs, /*AllowTruncation=*/true);
13691 return C && C->isZero();
13692}
13693
13694bool llvm::isOneOrOneSplat(SDValue N, bool AllowUndefs) {
13695 ConstantSDNode *C =
13696 isConstOrConstSplat(N, AllowUndefs, /*AllowTruncation*/ true);
13697 return C && C->isOne();
13698}
13699
13700bool llvm::isOneOrOneSplatFP(SDValue N, bool AllowUndefs) {
13701 ConstantFPSDNode *C = isConstOrConstSplatFP(N, AllowUndefs);
13702 return C && C->isExactlyValue(1.0);
13703}
13704
13705bool llvm::isAllOnesOrAllOnesSplat(SDValue N, bool AllowUndefs) {
13707 unsigned BitWidth = N.getScalarValueSizeInBits();
13708 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
13709 return C && C->isAllOnes() && C->getValueSizeInBits(0) == BitWidth;
13710}
13711
13712bool llvm::isOnesOrOnesSplat(SDValue N, bool AllowUndefs) {
13713 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
13714 return C && APInt::isSameValue(C->getAPIntValue(),
13715 APInt(C->getAPIntValue().getBitWidth(), 1));
13716}
13717
13718bool llvm::isZeroOrZeroSplat(SDValue N, bool AllowUndefs) {
13720 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs, true);
13721 return C && C->isZero();
13722}
13723
13724bool llvm::isZeroOrZeroSplatFP(SDValue N, bool AllowUndefs) {
13725 ConstantFPSDNode *C = isConstOrConstSplatFP(N, AllowUndefs);
13726 return C && C->isZero();
13727}
13728
13732
13734 unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT memvt,
13736 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MemRefs(memrefs) {
13737 bool IsVolatile = false;
13738 bool IsNonTemporal = false;
13739 bool IsDereferenceable = true;
13740 bool IsInvariant = true;
13741 for (const MachineMemOperand *MMO : memoperands()) {
13742 IsVolatile |= MMO->isVolatile();
13743 IsNonTemporal |= MMO->isNonTemporal();
13744 IsDereferenceable &= MMO->isDereferenceable();
13745 IsInvariant &= MMO->isInvariant();
13746 }
13747 MemSDNodeBits.IsVolatile = IsVolatile;
13748 MemSDNodeBits.IsNonTemporal = IsNonTemporal;
13749 MemSDNodeBits.IsDereferenceable = IsDereferenceable;
13750 MemSDNodeBits.IsInvariant = IsInvariant;
13751
13752 // For the single-MMO case, we check here that the size of the memory operand
13753 // fits within the size of the MMO. This is because the MMO might indicate
13754 // only a possible address range instead of specifying the affected memory
13755 // addresses precisely.
13758 getMemOperand()->getSize().getValue())) &&
13759 "Size mismatch!");
13760}
13761
13762/// Profile - Gather unique data for the node.
13763///
13765 AddNodeIDNode(ID, this);
13766}
13767
13768namespace {
13769
13770 struct EVTArray {
13771 std::vector<EVT> VTs;
13772
13773 EVTArray() {
13774 VTs.reserve(MVT::VALUETYPE_SIZE);
13775 for (unsigned i = 0; i < MVT::VALUETYPE_SIZE; ++i)
13776 VTs.push_back(MVT((MVT::SimpleValueType)i));
13777 }
13778 };
13779
13780} // end anonymous namespace
13781
13782/// getValueTypeList - Return a pointer to the specified value type.
13783///
13784const EVT *SDNode::getValueTypeList(MVT VT) {
13785 static EVTArray SimpleVTArray;
13786
13787 assert(VT < MVT::VALUETYPE_SIZE && "Value type out of range!");
13788 return &SimpleVTArray.VTs[VT.SimpleTy];
13789}
13790
13791/// hasAnyUseOfValue - Return true if there are any use of the indicated
13792/// value. This method ignores uses of other values defined by this operation.
13793bool SDNode::hasAnyUseOfValue(unsigned Value) const {
13794 assert(Value < getNumValues() && "Bad value!");
13795
13796 for (SDUse &U : uses())
13797 if (U.getResNo() == Value)
13798 return true;
13799
13800 return false;
13801}
13802
13803/// isOnlyUserOf - Return true if this node is the only use of N.
13804bool SDNode::isOnlyUserOf(const SDNode *N) const {
13805 bool Seen = false;
13806 for (const SDNode *User : N->users()) {
13807 if (User == this)
13808 Seen = true;
13809 else
13810 return false;
13811 }
13812
13813 return Seen;
13814}
13815
13816/// Return true if the only users of N are contained in Nodes.
13818 bool Seen = false;
13819 for (const SDNode *User : N->users()) {
13820 if (llvm::is_contained(Nodes, User))
13821 Seen = true;
13822 else
13823 return false;
13824 }
13825
13826 return Seen;
13827}
13828
13829/// Return true if the referenced return value is an operand of N.
13830bool SDValue::isOperandOf(const SDNode *N) const {
13831 return is_contained(N->op_values(), *this);
13832}
13833
13834bool SDNode::isOperandOf(const SDNode *N) const {
13835 return any_of(N->op_values(),
13836 [this](SDValue Op) { return this == Op.getNode(); });
13837}
13838
13839/// reachesChainWithoutSideEffects - Return true if this operand (which must
13840/// be a chain) reaches the specified operand without crossing any
13841/// side-effecting instructions on any chain path. In practice, this looks
13842/// through token factors and non-volatile loads. In order to remain efficient,
13843/// this only looks a couple of nodes in, it does not do an exhaustive search.
13844///
13845/// Note that we only need to examine chains when we're searching for
13846/// side-effects; SelectionDAG requires that all side-effects are represented
13847/// by chains, even if another operand would force a specific ordering. This
13848/// constraint is necessary to allow transformations like splitting loads.
13850 unsigned Depth) const {
13851 if (*this == Dest) return true;
13852
13853 // Don't search too deeply, we just want to be able to see through
13854 // TokenFactor's etc.
13855 if (Depth == 0) return false;
13856
13857 // If this is a token factor, all inputs to the TF happen in parallel.
13858 if (getOpcode() == ISD::TokenFactor) {
13859 // First, try a shallow search.
13860 if (is_contained((*this)->ops(), Dest)) {
13861 // We found the chain we want as an operand of this TokenFactor.
13862 // Essentially, we reach the chain without side-effects if we could
13863 // serialize the TokenFactor into a simple chain of operations with
13864 // Dest as the last operation. This is automatically true if the
13865 // chain has one use: there are no other ordering constraints.
13866 // If the chain has more than one use, we give up: some other
13867 // use of Dest might force a side-effect between Dest and the current
13868 // node.
13869 if (Dest.hasOneUse())
13870 return true;
13871 }
13872 // Next, try a deep search: check whether every operand of the TokenFactor
13873 // reaches Dest.
13874 return llvm::all_of((*this)->ops(), [=](SDValue Op) {
13875 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
13876 });
13877 }
13878
13879 // Loads don't have side effects, look through them.
13880 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
13881 if (Ld->isUnordered())
13882 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
13883 }
13884 return false;
13885}
13886
13887bool SDNode::hasPredecessor(const SDNode *N) const {
13890 Worklist.push_back(this);
13891 return hasPredecessorHelper(N, Visited, Worklist);
13892}
13893
13895 this->Flags &= Flags;
13896}
13897
13898SDValue
13900 ArrayRef<ISD::NodeType> CandidateBinOps,
13901 bool AllowPartials) {
13902 // The pattern must end in an extract from index 0.
13903 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
13904 !isNullConstant(Extract->getOperand(1)))
13905 return SDValue();
13906
13907 // Match against one of the candidate binary ops.
13908 SDValue Op = Extract->getOperand(0);
13909 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
13910 return Op.getOpcode() == unsigned(BinOp);
13911 }))
13912 return SDValue();
13913
13914 // Floating-point reductions may require relaxed constraints on the final step
13915 // of the reduction because they may reorder intermediate operations.
13916 unsigned CandidateBinOp = Op.getOpcode();
13917 if (Op.getValueType().isFloatingPoint()) {
13918 SDNodeFlags Flags = Op->getFlags();
13919 switch (CandidateBinOp) {
13920 case ISD::FADD:
13921 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
13922 return SDValue();
13923 break;
13924 default:
13925 llvm_unreachable("Unhandled FP opcode for binop reduction");
13926 }
13927 }
13928
13929 // Matching failed - attempt to see if we did enough stages that a partial
13930 // reduction from a subvector is possible.
13931 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) {
13932 if (!AllowPartials || !Op)
13933 return SDValue();
13934 EVT OpVT = Op.getValueType();
13935 EVT OpSVT = OpVT.getScalarType();
13936 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts);
13937 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0))
13938 return SDValue();
13939 BinOp = (ISD::NodeType)CandidateBinOp;
13940 return getExtractSubvector(SDLoc(Op), SubVT, Op, 0);
13941 };
13942
13943 // At each stage, we're looking for something that looks like:
13944 // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
13945 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
13946 // i32 undef, i32 undef, i32 undef, i32 undef>
13947 // %a = binop <8 x i32> %op, %s
13948 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
13949 // we expect something like:
13950 // <4,5,6,7,u,u,u,u>
13951 // <2,3,u,u,u,u,u,u>
13952 // <1,u,u,u,u,u,u,u>
13953 // While a partial reduction match would be:
13954 // <2,3,u,u,u,u,u,u>
13955 // <1,u,u,u,u,u,u,u>
13956 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
13957 SDValue PrevOp;
13958 for (unsigned i = 0; i < Stages; ++i) {
13959 unsigned MaskEnd = (1 << i);
13960
13961 if (Op.getOpcode() != CandidateBinOp)
13962 return PartialReduction(PrevOp, MaskEnd);
13963
13964 SDValue Op0 = Op.getOperand(0);
13965 SDValue Op1 = Op.getOperand(1);
13966
13968 if (Shuffle) {
13969 Op = Op1;
13970 } else {
13971 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
13972 Op = Op0;
13973 }
13974
13975 // The first operand of the shuffle should be the same as the other operand
13976 // of the binop.
13977 if (!Shuffle || Shuffle->getOperand(0) != Op)
13978 return PartialReduction(PrevOp, MaskEnd);
13979
13980 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
13981 for (int Index = 0; Index < (int)MaskEnd; ++Index)
13982 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index))
13983 return PartialReduction(PrevOp, MaskEnd);
13984
13985 PrevOp = Op;
13986 }
13987
13988 // Handle subvector reductions, which tend to appear after the shuffle
13989 // reduction stages.
13990 while (Op.getOpcode() == CandidateBinOp) {
13991 unsigned NumElts = Op.getValueType().getVectorNumElements();
13992 SDValue Op0 = Op.getOperand(0);
13993 SDValue Op1 = Op.getOperand(1);
13994 if (Op0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
13996 Op0.getOperand(0) != Op1.getOperand(0))
13997 break;
13998 SDValue Src = Op0.getOperand(0);
13999 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
14000 if (NumSrcElts != (2 * NumElts))
14001 break;
14002 if (!(Op0.getConstantOperandAPInt(1) == 0 &&
14003 Op1.getConstantOperandAPInt(1) == NumElts) &&
14004 !(Op1.getConstantOperandAPInt(1) == 0 &&
14005 Op0.getConstantOperandAPInt(1) == NumElts))
14006 break;
14007 Op = Src;
14008 }
14009
14010 BinOp = (ISD::NodeType)CandidateBinOp;
14011 return Op;
14012}
14013
14015 EVT VT = N->getValueType(0);
14016 EVT EltVT = VT.getVectorElementType();
14017 unsigned NE = VT.getVectorNumElements();
14018
14019 SDLoc dl(N);
14020
14021 // If ResNE is 0, fully unroll the vector op.
14022 if (ResNE == 0)
14023 ResNE = NE;
14024 else if (NE > ResNE)
14025 NE = ResNE;
14026
14027 if (N->getNumValues() == 2) {
14028 SmallVector<SDValue, 8> Scalars0, Scalars1;
14029 SmallVector<SDValue, 4> Operands(N->getNumOperands());
14030 EVT VT1 = N->getValueType(1);
14031 EVT EltVT1 = VT1.getVectorElementType();
14032
14033 unsigned i;
14034 for (i = 0; i != NE; ++i) {
14035 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
14036 SDValue Operand = N->getOperand(j);
14037 EVT OperandVT = Operand.getValueType();
14038
14039 // A vector operand; extract a single element.
14040 EVT OperandEltVT = OperandVT.getVectorElementType();
14041 Operands[j] = getExtractVectorElt(dl, OperandEltVT, Operand, i);
14042 }
14043
14044 SDValue EltOp = getNode(N->getOpcode(), dl, {EltVT, EltVT1}, Operands);
14045 Scalars0.push_back(EltOp);
14046 Scalars1.push_back(EltOp.getValue(1));
14047 }
14048
14049 for (; i < ResNE; ++i) {
14050 Scalars0.push_back(getUNDEF(EltVT));
14051 Scalars1.push_back(getUNDEF(EltVT1));
14052 }
14053
14054 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
14055 EVT VecVT1 = EVT::getVectorVT(*getContext(), EltVT1, ResNE);
14056 SDValue Vec0 = getBuildVector(VecVT, dl, Scalars0);
14057 SDValue Vec1 = getBuildVector(VecVT1, dl, Scalars1);
14058 return getMergeValues({Vec0, Vec1}, dl);
14059 }
14060
14061 assert(N->getNumValues() == 1 &&
14062 "Can't unroll a vector with multiple results!");
14063
14065 SmallVector<SDValue, 4> Operands(N->getNumOperands());
14066
14067 unsigned i;
14068 for (i= 0; i != NE; ++i) {
14069 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
14070 SDValue Operand = N->getOperand(j);
14071 EVT OperandVT = Operand.getValueType();
14072 if (OperandVT.isVector()) {
14073 // A vector operand; extract a single element.
14074 EVT OperandEltVT = OperandVT.getVectorElementType();
14075 Operands[j] = getExtractVectorElt(dl, OperandEltVT, Operand, i);
14076 } else {
14077 // A scalar operand; just use it as is.
14078 Operands[j] = Operand;
14079 }
14080 }
14081
14082 switch (N->getOpcode()) {
14083 default: {
14084 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
14085 N->getFlags()));
14086 break;
14087 }
14088 case ISD::VSELECT:
14089 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
14090 break;
14091 case ISD::SHL:
14092 case ISD::SRA:
14093 case ISD::SRL:
14094 case ISD::ROTL:
14095 case ISD::ROTR:
14096 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
14097 getShiftAmountOperand(Operands[0].getValueType(),
14098 Operands[1])));
14099 break;
14101 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
14102 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
14103 Operands[0],
14104 getValueType(ExtVT)));
14105 break;
14106 }
14107 case ISD::ADDRSPACECAST: {
14108 const auto *ASC = cast<AddrSpaceCastSDNode>(N);
14109 Scalars.push_back(getAddrSpaceCast(dl, EltVT, Operands[0],
14110 ASC->getSrcAddressSpace(),
14111 ASC->getDestAddressSpace()));
14112 break;
14113 }
14114 }
14115 }
14116
14117 for (; i < ResNE; ++i)
14118 Scalars.push_back(getUNDEF(EltVT));
14119
14120 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
14121 return getBuildVector(VecVT, dl, Scalars);
14122}
14123
14124std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp(
14125 SDNode *N, unsigned ResNE) {
14126 unsigned Opcode = N->getOpcode();
14127 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO ||
14128 Opcode == ISD::USUBO || Opcode == ISD::SSUBO ||
14129 Opcode == ISD::UMULO || Opcode == ISD::SMULO) &&
14130 "Expected an overflow opcode");
14131
14132 EVT ResVT = N->getValueType(0);
14133 EVT OvVT = N->getValueType(1);
14134 EVT ResEltVT = ResVT.getVectorElementType();
14135 EVT OvEltVT = OvVT.getVectorElementType();
14136 SDLoc dl(N);
14137
14138 // If ResNE is 0, fully unroll the vector op.
14139 unsigned NE = ResVT.getVectorNumElements();
14140 if (ResNE == 0)
14141 ResNE = NE;
14142 else if (NE > ResNE)
14143 NE = ResNE;
14144
14145 SmallVector<SDValue, 8> LHSScalars;
14146 SmallVector<SDValue, 8> RHSScalars;
14147 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE);
14148 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE);
14149
14150 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT);
14151 SDVTList VTs = getVTList(ResEltVT, SVT);
14152 SmallVector<SDValue, 8> ResScalars;
14153 SmallVector<SDValue, 8> OvScalars;
14154 for (unsigned i = 0; i < NE; ++i) {
14155 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
14156 SDValue Ov =
14157 getSelect(dl, OvEltVT, Res.getValue(1),
14158 getBoolConstant(true, dl, OvEltVT, ResVT),
14159 getConstant(0, dl, OvEltVT));
14160
14161 ResScalars.push_back(Res);
14162 OvScalars.push_back(Ov);
14163 }
14164
14165 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT));
14166 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT));
14167
14168 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE);
14169 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE);
14170 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars),
14171 getBuildVector(NewOvVT, dl, OvScalars));
14172}
14173
14176 unsigned Bytes,
14177 int Dist) const {
14178 if (LD->isVolatile() || Base->isVolatile())
14179 return false;
14180 // TODO: probably too restrictive for atomics, revisit
14181 if (!LD->isSimple())
14182 return false;
14183 if (LD->isIndexed() || Base->isIndexed())
14184 return false;
14185 if (LD->getChain() != Base->getChain())
14186 return false;
14187 EVT VT = LD->getMemoryVT();
14188 if (VT.getSizeInBits() / 8 != Bytes)
14189 return false;
14190
14191 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
14192 auto LocDecomp = BaseIndexOffset::match(LD, *this);
14193
14194 int64_t Offset = 0;
14195 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
14196 return (Dist * (int64_t)Bytes == Offset);
14197 return false;
14198}
14199
14200/// InferPtrAlignment - Infer alignment of a load / store address. Return
14201/// std::nullopt if it cannot be inferred.
14203 // If this is a GlobalAddress + cst, return the alignment.
14204 const GlobalValue *GV = nullptr;
14205 int64_t GVOffset = 0;
14206 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
14207 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
14208 KnownBits Known(PtrWidth);
14210 unsigned AlignBits = Known.countMinTrailingZeros();
14211 if (AlignBits)
14212 return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset);
14213 }
14214
14215 // If this is a direct reference to a stack slot, use information about the
14216 // stack slot's alignment.
14217 int FrameIdx = INT_MIN;
14218 int64_t FrameOffset = 0;
14220 FrameIdx = FI->getIndex();
14221 } else if (isBaseWithConstantOffset(Ptr) &&
14223 // Handle FI+Cst
14224 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
14225 FrameOffset = Ptr.getConstantOperandVal(1);
14226 }
14227
14228 if (FrameIdx != INT_MIN) {
14230 return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset);
14231 }
14232
14233 return std::nullopt;
14234}
14235
14236/// Split the scalar node with EXTRACT_ELEMENT using the provided
14237/// VTs and return the low/high part.
14238std::pair<SDValue, SDValue> SelectionDAG::SplitScalar(const SDValue &N,
14239 const SDLoc &DL,
14240 const EVT &LoVT,
14241 const EVT &HiVT) {
14242 assert(!LoVT.isVector() && !HiVT.isVector() && !N.getValueType().isVector() &&
14243 "Split node must be a scalar type");
14244 SDValue Lo =
14246 SDValue Hi =
14248 return std::make_pair(Lo, Hi);
14249}
14250
14251/// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
14252/// which is split (or expanded) into two not necessarily identical pieces.
14253std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
14254 // Currently all types are split in half.
14255 EVT LoVT, HiVT;
14256 if (!VT.isVector())
14257 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
14258 else
14259 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
14260
14261 return std::make_pair(LoVT, HiVT);
14262}
14263
14264/// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a
14265/// type, dependent on an enveloping VT that has been split into two identical
14266/// pieces. Sets the HiIsEmpty flag when hi type has zero storage size.
14267std::pair<EVT, EVT>
14269 bool *HiIsEmpty) const {
14270 EVT EltTp = VT.getVectorElementType();
14271 // Examples:
14272 // custom VL=8 with enveloping VL=8/8 yields 8/0 (hi empty)
14273 // custom VL=9 with enveloping VL=8/8 yields 8/1
14274 // custom VL=10 with enveloping VL=8/8 yields 8/2
14275 // etc.
14276 ElementCount VTNumElts = VT.getVectorElementCount();
14277 ElementCount EnvNumElts = EnvVT.getVectorElementCount();
14278 assert(VTNumElts.isScalable() == EnvNumElts.isScalable() &&
14279 "Mixing fixed width and scalable vectors when enveloping a type");
14280 EVT LoVT, HiVT;
14281 if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) {
14282 LoVT = EVT::getVectorVT(*getContext(), EltTp, EnvNumElts);
14283 HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts);
14284 *HiIsEmpty = false;
14285 } else {
14286 // Flag that hi type has zero storage size, but return split envelop type
14287 // (this would be easier if vector types with zero elements were allowed).
14288 LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts);
14289 HiVT = EVT::getVectorVT(*getContext(), EltTp, EnvNumElts);
14290 *HiIsEmpty = true;
14291 }
14292 return std::make_pair(LoVT, HiVT);
14293}
14294
14295/// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
14296/// low/high part.
14297std::pair<SDValue, SDValue>
14298SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
14299 const EVT &HiVT) {
14300 assert(LoVT.isScalableVector() == HiVT.isScalableVector() &&
14301 LoVT.isScalableVector() == N.getValueType().isScalableVector() &&
14302 "Splitting vector with an invalid mixture of fixed and scalable "
14303 "vector types");
14305 N.getValueType().getVectorMinNumElements() &&
14306 "More vector elements requested than available!");
14307 SDValue Lo, Hi;
14308 Lo = getExtractSubvector(DL, LoVT, N, 0);
14309 // For scalable vectors it is safe to use LoVT.getVectorMinNumElements()
14310 // (rather than having to use ElementCount), because EXTRACT_SUBVECTOR scales
14311 // IDX with the runtime scaling factor of the result vector type. For
14312 // fixed-width result vectors, that runtime scaling factor is 1.
14315 return std::make_pair(Lo, Hi);
14316}
14317
14318std::pair<SDValue, SDValue> SelectionDAG::SplitEVL(SDValue N, EVT VecVT,
14319 const SDLoc &DL) {
14320 // Split the vector length parameter.
14321 // %evl -> umin(%evl, %halfnumelts) and usubsat(%evl - %halfnumelts).
14322 EVT VT = N.getValueType();
14324 "Expecting the mask to be an evenly-sized vector");
14325 SDValue HalfNumElts = getElementCount(
14327 SDValue Lo = getNode(ISD::UMIN, DL, VT, N, HalfNumElts);
14328 SDValue Hi = getNode(ISD::USUBSAT, DL, VT, N, HalfNumElts);
14329 return std::make_pair(Lo, Hi);
14330}
14331
14332/// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
14334 EVT VT = N.getValueType();
14337 return getInsertSubvector(DL, getUNDEF(WideVT), N, 0);
14338}
14339
14342 unsigned Start, unsigned Count,
14343 EVT EltVT) {
14344 EVT VT = Op.getValueType();
14345 if (Count == 0)
14347 if (EltVT == EVT())
14348 EltVT = VT.getVectorElementType();
14349 SDLoc SL(Op);
14350 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
14351 Args.push_back(getExtractVectorElt(SL, EltVT, Op, i));
14352 }
14353}
14354
14355// getAddressSpace - Return the address space this GlobalAddress belongs to.
14357 return getGlobal()->getType()->getAddressSpace();
14358}
14359
14362 return Val.MachineCPVal->getType();
14363 return Val.ConstVal->getType();
14364}
14365
14366bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
14367 unsigned &SplatBitSize,
14368 bool &HasAnyUndefs,
14369 unsigned MinSplatBits,
14370 bool IsBigEndian) const {
14371 EVT VT = getValueType(0);
14372 assert(VT.isVector() && "Expected a vector type");
14373 unsigned VecWidth = VT.getSizeInBits();
14374 if (MinSplatBits > VecWidth)
14375 return false;
14376
14377 // FIXME: The widths are based on this node's type, but build vectors can
14378 // truncate their operands.
14379 SplatValue = APInt(VecWidth, 0);
14380 SplatUndef = APInt(VecWidth, 0);
14381
14382 // Get the bits. Bits with undefined values (when the corresponding element
14383 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
14384 // in SplatValue. If any of the values are not constant, give up and return
14385 // false.
14386 unsigned int NumOps = getNumOperands();
14387 assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
14388 unsigned EltWidth = VT.getScalarSizeInBits();
14389
14390 for (unsigned j = 0; j < NumOps; ++j) {
14391 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
14392 SDValue OpVal = getOperand(i);
14393 unsigned BitPos = j * EltWidth;
14394
14395 if (OpVal.isUndef())
14396 SplatUndef.setBits(BitPos, BitPos + EltWidth);
14397 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
14398 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
14399 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
14400 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
14401 else
14402 return false;
14403 }
14404
14405 // The build_vector is all constants or undefs. Find the smallest element
14406 // size that splats the vector.
14407 HasAnyUndefs = (SplatUndef != 0);
14408
14409 // FIXME: This does not work for vectors with elements less than 8 bits.
14410 while (VecWidth > 8) {
14411 // If we can't split in half, stop here.
14412 if (VecWidth & 1)
14413 break;
14414
14415 unsigned HalfSize = VecWidth / 2;
14416 APInt HighValue = SplatValue.extractBits(HalfSize, HalfSize);
14417 APInt LowValue = SplatValue.extractBits(HalfSize, 0);
14418 APInt HighUndef = SplatUndef.extractBits(HalfSize, HalfSize);
14419 APInt LowUndef = SplatUndef.extractBits(HalfSize, 0);
14420
14421 // If the two halves do not match (ignoring undef bits), stop here.
14422 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
14423 MinSplatBits > HalfSize)
14424 break;
14425
14426 SplatValue = HighValue | LowValue;
14427 SplatUndef = HighUndef & LowUndef;
14428
14429 VecWidth = HalfSize;
14430 }
14431
14432 // FIXME: The loop above only tries to split in halves. But if the input
14433 // vector for example is <3 x i16> it wouldn't be able to detect a
14434 // SplatBitSize of 16. No idea if that is a design flaw currently limiting
14435 // optimizations. I guess that back in the days when this helper was created
14436 // vectors normally was power-of-2 sized.
14437
14438 SplatBitSize = VecWidth;
14439 return true;
14440}
14441
14443 BitVector *UndefElements) const {
14444 unsigned NumOps = getNumOperands();
14445 if (UndefElements) {
14446 UndefElements->clear();
14447 UndefElements->resize(NumOps);
14448 }
14449 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
14450 if (!DemandedElts)
14451 return SDValue();
14452 SDValue Splatted;
14453 for (unsigned i = 0; i != NumOps; ++i) {
14454 if (!DemandedElts[i])
14455 continue;
14456 SDValue Op = getOperand(i);
14457 if (Op.isUndef()) {
14458 if (UndefElements)
14459 (*UndefElements)[i] = true;
14460 } else if (!Splatted) {
14461 Splatted = Op;
14462 } else if (Splatted != Op) {
14463 return SDValue();
14464 }
14465 }
14466
14467 if (!Splatted) {
14468 unsigned FirstDemandedIdx = DemandedElts.countr_zero();
14469 assert(getOperand(FirstDemandedIdx).isUndef() &&
14470 "Can only have a splat without a constant for all undefs.");
14471 return getOperand(FirstDemandedIdx);
14472 }
14473
14474 return Splatted;
14475}
14476
14478 APInt DemandedElts = APInt::getAllOnes(getNumOperands());
14479 return getSplatValue(DemandedElts, UndefElements);
14480}
14481
14483 SmallVectorImpl<SDValue> &Sequence,
14484 BitVector *UndefElements) const {
14485 unsigned NumOps = getNumOperands();
14486 Sequence.clear();
14487 if (UndefElements) {
14488 UndefElements->clear();
14489 UndefElements->resize(NumOps);
14490 }
14491 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
14492 if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps))
14493 return false;
14494
14495 // Set the undefs even if we don't find a sequence (like getSplatValue).
14496 if (UndefElements)
14497 for (unsigned I = 0; I != NumOps; ++I)
14498 if (DemandedElts[I] && getOperand(I).isUndef())
14499 (*UndefElements)[I] = true;
14500
14501 // Iteratively widen the sequence length looking for repetitions.
14502 for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
14503 Sequence.append(SeqLen, SDValue());
14504 for (unsigned I = 0; I != NumOps; ++I) {
14505 if (!DemandedElts[I])
14506 continue;
14507 SDValue &SeqOp = Sequence[I % SeqLen];
14509 if (Op.isUndef()) {
14510 if (!SeqOp)
14511 SeqOp = Op;
14512 continue;
14513 }
14514 if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) {
14515 Sequence.clear();
14516 break;
14517 }
14518 SeqOp = Op;
14519 }
14520 if (!Sequence.empty())
14521 return true;
14522 }
14523
14524 assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern");
14525 return false;
14526}
14527
14529 BitVector *UndefElements) const {
14530 APInt DemandedElts = APInt::getAllOnes(getNumOperands());
14531 return getRepeatedSequence(DemandedElts, Sequence, UndefElements);
14532}
14533
14536 BitVector *UndefElements) const {
14538 getSplatValue(DemandedElts, UndefElements));
14539}
14540
14543 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
14544}
14545
14548 BitVector *UndefElements) const {
14550 getSplatValue(DemandedElts, UndefElements));
14551}
14552
14557
14558int32_t
14560 uint32_t BitWidth) const {
14561 if (ConstantFPSDNode *CN =
14563 bool IsExact;
14564 APSInt IntVal(BitWidth);
14565 const APFloat &APF = CN->getValueAPF();
14566 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
14567 APFloat::opOK ||
14568 !IsExact)
14569 return -1;
14570
14571 return IntVal.exactLogBase2();
14572 }
14573 return -1;
14574}
14575
14577 bool IsLittleEndian, unsigned DstEltSizeInBits,
14578 SmallVectorImpl<APInt> &RawBitElements, BitVector &UndefElements) const {
14579 // Early-out if this contains anything but Undef/Constant/ConstantFP.
14580 if (!isConstant())
14581 return false;
14582
14583 unsigned NumSrcOps = getNumOperands();
14584 unsigned SrcEltSizeInBits = getValueType(0).getScalarSizeInBits();
14585 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
14586 "Invalid bitcast scale");
14587
14588 // Extract raw src bits.
14589 SmallVector<APInt> SrcBitElements(NumSrcOps,
14590 APInt::getZero(SrcEltSizeInBits));
14591 BitVector SrcUndeElements(NumSrcOps, false);
14592
14593 for (unsigned I = 0; I != NumSrcOps; ++I) {
14595 if (Op.isUndef()) {
14596 SrcUndeElements.set(I);
14597 continue;
14598 }
14599 auto *CInt = dyn_cast<ConstantSDNode>(Op);
14600 auto *CFP = dyn_cast<ConstantFPSDNode>(Op);
14601 assert((CInt || CFP) && "Unknown constant");
14602 SrcBitElements[I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits)
14603 : CFP->getValueAPF().bitcastToAPInt();
14604 }
14605
14606 // Recast to dst width.
14607 recastRawBits(IsLittleEndian, DstEltSizeInBits, RawBitElements,
14608 SrcBitElements, UndefElements, SrcUndeElements);
14609 return true;
14610}
14611
14612void BuildVectorSDNode::recastRawBits(bool IsLittleEndian,
14613 unsigned DstEltSizeInBits,
14614 SmallVectorImpl<APInt> &DstBitElements,
14615 ArrayRef<APInt> SrcBitElements,
14616 BitVector &DstUndefElements,
14617 const BitVector &SrcUndefElements) {
14618 unsigned NumSrcOps = SrcBitElements.size();
14619 unsigned SrcEltSizeInBits = SrcBitElements[0].getBitWidth();
14620 assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 &&
14621 "Invalid bitcast scale");
14622 assert(NumSrcOps == SrcUndefElements.size() &&
14623 "Vector size mismatch");
14624
14625 unsigned NumDstOps = (NumSrcOps * SrcEltSizeInBits) / DstEltSizeInBits;
14626 DstUndefElements.clear();
14627 DstUndefElements.resize(NumDstOps, false);
14628 DstBitElements.assign(NumDstOps, APInt::getZero(DstEltSizeInBits));
14629
14630 // Concatenate src elements constant bits together into dst element.
14631 if (SrcEltSizeInBits <= DstEltSizeInBits) {
14632 unsigned Scale = DstEltSizeInBits / SrcEltSizeInBits;
14633 for (unsigned I = 0; I != NumDstOps; ++I) {
14634 DstUndefElements.set(I);
14635 APInt &DstBits = DstBitElements[I];
14636 for (unsigned J = 0; J != Scale; ++J) {
14637 unsigned Idx = (I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
14638 if (SrcUndefElements[Idx])
14639 continue;
14640 DstUndefElements.reset(I);
14641 const APInt &SrcBits = SrcBitElements[Idx];
14642 assert(SrcBits.getBitWidth() == SrcEltSizeInBits &&
14643 "Illegal constant bitwidths");
14644 DstBits.insertBits(SrcBits, J * SrcEltSizeInBits);
14645 }
14646 }
14647 return;
14648 }
14649
14650 // Split src element constant bits into dst elements.
14651 unsigned Scale = SrcEltSizeInBits / DstEltSizeInBits;
14652 for (unsigned I = 0; I != NumSrcOps; ++I) {
14653 if (SrcUndefElements[I]) {
14654 DstUndefElements.set(I * Scale, (I + 1) * Scale);
14655 continue;
14656 }
14657 const APInt &SrcBits = SrcBitElements[I];
14658 for (unsigned J = 0; J != Scale; ++J) {
14659 unsigned Idx = (I * Scale) + (IsLittleEndian ? J : (Scale - J - 1));
14660 APInt &DstBits = DstBitElements[Idx];
14661 DstBits = SrcBits.extractBits(DstEltSizeInBits, J * DstEltSizeInBits);
14662 }
14663 }
14664}
14665
14667 for (const SDValue &Op : op_values()) {
14668 unsigned Opc = Op.getOpcode();
14669 if (!Op.isUndef() && Opc != ISD::Constant && Opc != ISD::ConstantFP)
14670 return false;
14671 }
14672 return true;
14673}
14674
14675std::optional<std::pair<APInt, APInt>>
14677 unsigned NumOps = getNumOperands();
14678 if (NumOps < 2)
14679 return std::nullopt;
14680
14681 unsigned EltSize = getValueType(0).getScalarSizeInBits();
14682 APInt Start, Stride;
14683 int FirstIdx = -1, SecondIdx = -1;
14684
14685 // Find the first two non-undef constant elements to determine Start and
14686 // Stride, then verify all remaining elements match the sequence.
14687 for (unsigned I = 0; I < NumOps; ++I) {
14689 if (Op->isUndef())
14690 continue;
14691 if (!isa<ConstantSDNode>(Op))
14692 return std::nullopt;
14693
14694 APInt Val = getConstantOperandAPInt(I).trunc(EltSize);
14695 if (FirstIdx < 0) {
14696 FirstIdx = I;
14697 Start = Val;
14698 } else if (SecondIdx < 0) {
14699 SecondIdx = I;
14700 // Compute stride using modular arithmetic. Simple division would handle
14701 // common strides (1, 2, -1, etc.), but modular inverse maximizes matches.
14702 // Example: <0, poison, poison, 0xFF> has stride 0x55 since 3*0x55 = 0xFF
14703 // Note that modular arithmetic is agnostic to signed/unsigned.
14704 unsigned IdxDiff = I - FirstIdx;
14705 APInt ValDiff = Val - Start;
14706
14707 // Step 1: Factor out common powers of 2 from IdxDiff and ValDiff.
14708 unsigned CommonPow2Bits = llvm::countr_zero(IdxDiff);
14709 if (ValDiff.countr_zero() < CommonPow2Bits)
14710 return std::nullopt; // ValDiff not divisible by 2^CommonPow2Bits
14711 IdxDiff >>= CommonPow2Bits;
14712 ValDiff.lshrInPlace(CommonPow2Bits);
14713
14714 // Step 2: IdxDiff is now odd, so its inverse mod 2^EltSize exists.
14715 // TODO: There are 2^CommonPow2Bits valid strides; currently we only try
14716 // one, but we could try all candidates to handle more cases.
14717 Stride = ValDiff * APInt(EltSize, IdxDiff).multiplicativeInverse();
14718 if (Stride.isZero())
14719 return std::nullopt;
14720
14721 // Step 3: Adjust Start based on the first defined element's index.
14722 Start -= Stride * FirstIdx;
14723 } else {
14724 // Verify this element matches the sequence.
14725 if (Val != Start + Stride * I)
14726 return std::nullopt;
14727 }
14728 }
14729
14730 // Need at least two defined elements.
14731 if (SecondIdx < 0)
14732 return std::nullopt;
14733
14734 return std::make_pair(Start, Stride);
14735}
14736
14738 // Find the first non-undef value in the shuffle mask.
14739 unsigned i, e;
14740 for (i = 0, e = Mask.size(); i != e && Mask[i] < 0; ++i)
14741 /* search */;
14742
14743 // If all elements are undefined, this shuffle can be considered a splat
14744 // (although it should eventually get simplified away completely).
14745 if (i == e)
14746 return true;
14747
14748 // Make sure all remaining elements are either undef or the same as the first
14749 // non-undef value.
14750 for (int Idx = Mask[i]; i != e; ++i)
14751 if (Mask[i] >= 0 && Mask[i] != Idx)
14752 return false;
14753 return true;
14754}
14755
14756// Returns true if it is a constant integer BuildVector or constant integer,
14757// possibly hidden by a bitcast.
14759 SDValue N, bool AllowOpaques) const {
14761
14762 if (auto *C = dyn_cast<ConstantSDNode>(N))
14763 return AllowOpaques || !C->isOpaque();
14764
14766 return true;
14767
14768 // Treat a GlobalAddress supporting constant offset folding as a
14769 // constant integer.
14770 if (auto *GA = dyn_cast<GlobalAddressSDNode>(N))
14771 if (GA->getOpcode() == ISD::GlobalAddress &&
14772 TLI->isOffsetFoldingLegal(GA))
14773 return true;
14774
14775 if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
14776 isa<ConstantSDNode>(N.getOperand(0)))
14777 return true;
14778 return false;
14779}
14780
14781// Returns true if it is a constant float BuildVector or constant float.
14784 return true;
14785
14787 return true;
14788
14789 if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
14790 isa<ConstantFPSDNode>(N.getOperand(0)))
14791 return true;
14792
14793 return false;
14794}
14795
14796std::optional<bool> SelectionDAG::isBoolConstant(SDValue N) const {
14797 ConstantSDNode *Const =
14798 isConstOrConstSplat(N, false, /*AllowTruncation=*/true);
14799 if (!Const)
14800 return std::nullopt;
14801
14802 EVT VT = N->getValueType(0);
14803 const APInt CVal = Const->getAPIntValue().trunc(VT.getScalarSizeInBits());
14804 switch (TLI->getBooleanContents(N.getValueType())) {
14806 if (CVal.isOne())
14807 return true;
14808 if (CVal.isZero())
14809 return false;
14810 return std::nullopt;
14812 if (CVal.isAllOnes())
14813 return true;
14814 if (CVal.isZero())
14815 return false;
14816 return std::nullopt;
14818 return CVal[0];
14819 }
14820 llvm_unreachable("Unknown BooleanContent enum");
14821}
14822
14823void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
14824 assert(!Node->OperandList && "Node already has operands");
14826 "too many operands to fit into SDNode");
14827 SDUse *Ops = OperandRecycler.allocate(
14828 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator);
14829
14830 bool IsDivergent = false;
14831 for (unsigned I = 0; I != Vals.size(); ++I) {
14832 Ops[I].setUser(Node);
14833 Ops[I].setInitial(Vals[I]);
14834 EVT VT = Ops[I].getValueType();
14835
14836 // Skip Chain. It does not carry divergence.
14837 if (VT != MVT::Other &&
14838 (VT != MVT::Glue || gluePropagatesDivergence(Ops[I].getNode())) &&
14839 Ops[I].getNode()->isDivergent()) {
14840 IsDivergent = true;
14841 }
14842 }
14843 Node->NumOperands = Vals.size();
14844 Node->OperandList = Ops;
14845 if (!TLI->isSDNodeAlwaysUniform(Node)) {
14846 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, UA);
14847 Node->SDNodeBits.IsDivergent = IsDivergent;
14848 }
14849 checkForCycles(Node);
14850}
14851
14854 size_t Limit = SDNode::getMaxNumOperands();
14855 while (Vals.size() > Limit) {
14856 unsigned SliceIdx = Vals.size() - Limit;
14857 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit);
14858 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs);
14859 Vals.erase(Vals.begin() + SliceIdx, Vals.end());
14860 Vals.emplace_back(NewTF);
14861 }
14862 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals);
14863}
14864
14866 EVT VT, SDNodeFlags Flags) {
14867 switch (Opcode) {
14868 default:
14869 return SDValue();
14870 case ISD::ADD:
14871 case ISD::OR:
14872 case ISD::XOR:
14873 case ISD::UMAX:
14874 return getConstant(0, DL, VT);
14875 case ISD::MUL:
14876 return getConstant(1, DL, VT);
14877 case ISD::AND:
14878 case ISD::UMIN:
14879 return getAllOnesConstant(DL, VT);
14880 case ISD::SMAX:
14882 case ISD::SMIN:
14884 case ISD::FADD:
14885 // If flags allow, prefer positive zero since it's generally cheaper
14886 // to materialize on most targets.
14887 return getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, VT);
14888 case ISD::FMUL:
14889 return getConstantFP(1.0, DL, VT);
14890 case ISD::FMINNUM:
14891 case ISD::FMAXNUM: {
14892 // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF.
14893 const fltSemantics &Semantics = VT.getFltSemantics();
14894 APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) :
14895 !Flags.hasNoInfs() ? APFloat::getInf(Semantics) :
14896 APFloat::getLargest(Semantics);
14897 if (Opcode == ISD::FMAXNUM)
14898 NeutralAF.changeSign();
14899
14900 return getConstantFP(NeutralAF, DL, VT);
14901 }
14902 case ISD::FMINIMUM:
14903 case ISD::FMAXIMUM: {
14904 // Neutral element for fminimum is Inf or FLT_MAX, depending on FMF.
14905 const fltSemantics &Semantics = VT.getFltSemantics();
14906 APFloat NeutralAF = !Flags.hasNoInfs() ? APFloat::getInf(Semantics)
14907 : APFloat::getLargest(Semantics);
14908 if (Opcode == ISD::FMAXIMUM)
14909 NeutralAF.changeSign();
14910
14911 return getConstantFP(NeutralAF, DL, VT);
14912 }
14913
14914 }
14915}
14916
14918 SDValue Acc, SDValue LHS,
14919 SDValue RHS) {
14920 EVT AccVT = Acc.getValueType();
14921 if (AccVT.isFloatingPoint()) {
14922 assert(Opc == ISD::PARTIAL_REDUCE_FMLA && "Unexpected opcode");
14923 SDValue NegRHS = getNode(ISD::FNEG, DL, RHS.getValueType(), RHS);
14924 return getNode(Opc, DL, AccVT, Acc, LHS, NegRHS);
14925 }
14927 "Unexpected opcode");
14928 SDValue NegAcc = getNegative(Acc, DL, AccVT);
14929 SDValue MLA = getNode(Opc, DL, AccVT, NegAcc, LHS, RHS);
14930 return getNegative(MLA, DL, AccVT);
14931}
14932
14933/// Helper used to make a call to a library function that has one argument of
14934/// pointer type.
14935///
14936/// Such functions include 'fegetmode', 'fesetenv' and some others, which are
14937/// used to get or set floating-point state. They have one argument of pointer
14938/// type, which points to the memory region containing bits of the
14939/// floating-point state. The value returned by such function is ignored in the
14940/// created call.
14941///
14942/// \param LibFunc Reference to library function (value of RTLIB::Libcall).
14943/// \param Ptr Pointer used to save/load state.
14944/// \param InChain Ingoing token chain.
14945/// \returns Outgoing chain token.
14947 SDValue InChain,
14948 const SDLoc &DLoc) {
14949 assert(InChain.getValueType() == MVT::Other && "Expected token chain");
14951 Args.emplace_back(Ptr, Ptr.getValueType().getTypeForEVT(*getContext()));
14952 RTLIB::LibcallImpl LibcallImpl =
14953 Libcalls->getLibcallImpl(static_cast<RTLIB::Libcall>(LibFunc));
14954 if (LibcallImpl == RTLIB::Unsupported)
14955 reportFatalUsageError("emitting call to unsupported libcall");
14956
14957 SDValue Callee =
14958 getExternalSymbol(LibcallImpl, TLI->getPointerTy(getDataLayout()));
14960 CLI.setDebugLoc(DLoc).setChain(InChain).setLibCallee(
14961 Libcalls->getLibcallImplCallingConv(LibcallImpl),
14962 Type::getVoidTy(*getContext()), Callee, std::move(Args));
14963 return TLI->LowerCallTo(CLI).second;
14964}
14965
14967 assert(From && To && "Invalid SDNode; empty source SDValue?");
14968 auto I = SDEI.find(From);
14969 if (I == SDEI.end())
14970 return;
14971
14972 // Use of operator[] on the DenseMap may cause an insertion, which invalidates
14973 // the iterator, hence the need to make a copy to prevent a use-after-free.
14974 NodeExtraInfo NEI = I->second;
14975 if (LLVM_LIKELY(!NEI.PCSections)) {
14976 // No deep copy required for the types of extra info set.
14977 //
14978 // FIXME: Investigate if other types of extra info also need deep copy. This
14979 // depends on the types of nodes they can be attached to: if some extra info
14980 // is only ever attached to nodes where a replacement To node is always the
14981 // node where later use and propagation of the extra info has the intended
14982 // semantics, no deep copy is required.
14983 SDEI[To] = std::move(NEI);
14984 return;
14985 }
14986
14987 const SDNode *EntrySDN = getEntryNode().getNode();
14988
14989 // We need to copy NodeExtraInfo to all _new_ nodes that are being introduced
14990 // through the replacement of From with To. Otherwise, replacements of a node
14991 // (From) with more complex nodes (To and its operands) may result in lost
14992 // extra info where the root node (To) is insignificant in further propagating
14993 // and using extra info when further lowering to MIR.
14994 //
14995 // In the first step pre-populate the visited set with the nodes reachable
14996 // from the old From node. This avoids copying NodeExtraInfo to parts of the
14997 // DAG that is not new and should be left untouched.
14998 SmallVector<const SDNode *> Leafs{From}; // Leafs reachable with VisitFrom.
14999 DenseSet<const SDNode *> FromReach; // The set of nodes reachable from From.
15000 auto VisitFrom = [&](auto &&Self, const SDNode *N, int MaxDepth) {
15001 if (MaxDepth == 0) {
15002 // Remember this node in case we need to increase MaxDepth and continue
15003 // populating FromReach from this node.
15004 Leafs.emplace_back(N);
15005 return;
15006 }
15007 if (!FromReach.insert(N).second)
15008 return;
15009 for (const SDValue &Op : N->op_values())
15010 Self(Self, Op.getNode(), MaxDepth - 1);
15011 };
15012
15013 // Copy extra info to To and all its transitive operands (that are new).
15015 auto DeepCopyTo = [&](auto &&Self, const SDNode *N) {
15016 if (FromReach.contains(N))
15017 return true;
15018 if (!Visited.insert(N).second)
15019 return true;
15020 if (EntrySDN == N)
15021 return false;
15022 for (const SDValue &Op : N->op_values()) {
15023 if (N == To && Op.getNode() == EntrySDN) {
15024 // Special case: New node's operand is the entry node; just need to
15025 // copy extra info to new node.
15026 break;
15027 }
15028 if (!Self(Self, Op.getNode()))
15029 return false;
15030 }
15031 // Copy only if entry node was not reached.
15032 SDEI[N] = std::move(NEI);
15033 return true;
15034 };
15035
15036 // We first try with a lower MaxDepth, assuming that the path to common
15037 // operands between From and To is relatively short. This significantly
15038 // improves performance in the common case. The initial MaxDepth is big
15039 // enough to avoid retry in the common case; the last MaxDepth is large
15040 // enough to avoid having to use the fallback below (and protects from
15041 // potential stack exhaustion from recursion).
15042 for (int PrevDepth = 0, MaxDepth = 16; MaxDepth <= 1024;
15043 PrevDepth = MaxDepth, MaxDepth *= 2, Visited.clear()) {
15044 // StartFrom is the previous (or initial) set of leafs reachable at the
15045 // previous maximum depth.
15047 std::swap(StartFrom, Leafs);
15048 for (const SDNode *N : StartFrom)
15049 VisitFrom(VisitFrom, N, MaxDepth - PrevDepth);
15050 if (LLVM_LIKELY(DeepCopyTo(DeepCopyTo, To)))
15051 return;
15052 // This should happen very rarely (reached the entry node).
15053 LLVM_DEBUG(dbgs() << __func__ << ": MaxDepth=" << MaxDepth << " too low\n");
15054 assert(!Leafs.empty());
15055 }
15056
15057 // This should not happen - but if it did, that means the subgraph reachable
15058 // from From has depth greater or equal to maximum MaxDepth, and VisitFrom()
15059 // could not visit all reachable common operands. Consequently, we were able
15060 // to reach the entry node.
15061 errs() << "warning: incomplete propagation of SelectionDAG::NodeExtraInfo\n";
15062 assert(false && "From subgraph too complex - increase max. MaxDepth?");
15063 // Best-effort fallback if assertions disabled.
15064 SDEI[To] = std::move(NEI);
15065}
15066
15067#ifndef NDEBUG
15068static void checkForCyclesHelper(const SDNode *N,
15071 const llvm::SelectionDAG *DAG) {
15072 // If this node has already been checked, don't check it again.
15073 if (Checked.count(N))
15074 return;
15075
15076 // If a node has already been visited on this depth-first walk, reject it as
15077 // a cycle.
15078 if (!Visited.insert(N).second) {
15079 errs() << "Detected cycle in SelectionDAG\n";
15080 dbgs() << "Offending node:\n";
15081 N->dumprFull(DAG); dbgs() << "\n";
15082 abort();
15083 }
15084
15085 for (const SDValue &Op : N->op_values())
15086 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
15087
15088 Checked.insert(N);
15089 Visited.erase(N);
15090}
15091#endif
15092
15094 const llvm::SelectionDAG *DAG,
15095 bool force) {
15096#ifndef NDEBUG
15097 bool check = force;
15098#ifdef EXPENSIVE_CHECKS
15099 check = true;
15100#endif // EXPENSIVE_CHECKS
15101 if (check) {
15102 assert(N && "Checking nonexistent SDNode");
15105 checkForCyclesHelper(N, visited, checked, DAG);
15106 }
15107#endif // !NDEBUG
15108}
15109
15110void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
15111 checkForCycles(DAG->getRoot().getNode(), DAG, force);
15112}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isConstant(const MachineInstr &MI)
constexpr LLT S1
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
#define __asan_unpoison_memory_region(p, size)
Definition Compiler.h:592
#define LLVM_LIKELY(EXPR)
Definition Compiler.h:335
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseSet and SmallDenseSet classes.
This file contains constants used for implementing Dwarf debug support.
This file defines a hash set that can be used to remove duplication of nodes in a graph.
iv users
Definition IVUsers.cpp:48
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB)
static bool shouldLowerMemFuncForSize(const MachineFunction &MF)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition Lint.cpp:539
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
static bool isConstantSplatVector(SDValue N, APInt &SplatValue, unsigned MinSizeInBits)
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
This file contains the declarations for metadata subclasses.
#define T
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define P(N)
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
Contains matchers for matching SelectionDAG nodes and values.
static Type * getValueType(Value *V, bool LookThroughCmp=false)
Returns the "element type" of the given value/instruction V.
This file contains some templates that are useful if you are working with the STL at all.
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo, BatchAAResults *BatchAA)
static SDValue getFixedOrScalableQuantity(SelectionDAG &DAG, const SDLoc &DL, EVT VT, Ty Quantity)
static std::pair< SDValue, SDValue > getRuntimeCallSDValueHelper(SDValue Chain, const SDLoc &dl, TargetLowering::ArgListTy &&Args, const CallInst *CI, RTLIB::Libcall Call, SelectionDAG *DAG, const TargetLowering *TLI)
static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo)
Lower the call to 'memset' intrinsic function into a series of store operations.
static std::optional< APInt > FoldValueWithUndef(unsigned Opcode, const APInt &C1, bool IsUndef1, const APInt &C2, bool IsUndef2)
static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step, SelectionDAG &DAG)
static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned OpC, SDVTList VTList, ArrayRef< SDValue > OpList)
static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, const TargetLowering &TLI, const ConstantDataArraySlice &Slice)
getMemsetStringVal - Similar to getMemsetValue.
static cl::opt< bool > EnableMemCpyDAGOpt("enable-memcpy-dag-opt", cl::Hidden, cl::init(true), cl::desc("Gang up loads and stores generated by inlining of memcpy"))
static bool haveNoCommonBitsSetCommutative(SDValue A, SDValue B)
static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList)
AddNodeIDValueTypes - Value type lists are intern'd so we can represent them solely with their pointe...
static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef< int > M)
Swaps the values of N1 and N2.
static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice)
Returns true if memcpy source is constant data.
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, Align Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo)
static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)
AddNodeIDOpcode - Add the node opcode to the NodeID data.
static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike)
static bool doNotCSE(SDNode *N)
doNotCSE - Return true if CSE should not be performed for this node.
static cl::opt< int > MaxLdStGlue("ldstmemcpy-glue-max", cl::desc("Number limit for gluing ld/st of memcpy."), cl::Hidden, cl::init(0))
static void AddNodeIDOperands(FoldingSetNodeID &ID, ArrayRef< SDValue > Ops)
AddNodeIDOperands - Various routines for adding operands to the NodeID data.
static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
Try to simplify vector concatenation to an input value, undef, or build vector.
static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, SelectionDAG &DAG, SDValue Ptr, int64_t Offset=0)
InferPointerInfo - If the specified ptr/offset is a frame index, infer a MachinePointerInfo record fr...
static bool isInTailCallPositionWrapper(const CallInst *CI, const SelectionDAG *SelDAG, bool AllowReturnsFirstArg)
static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N)
If this is an SDNode with special info, add this info to the NodeID data.
static bool gluePropagatesDivergence(const SDNode *Node)
Return true if a glue output should propagate divergence information.
static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G)
static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs)
makeVTList - Return an instance of the SDVTList struct initialized with the specified members.
static void checkForCyclesHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallPtrSetImpl< const SDNode * > &Checked, const llvm::SelectionDAG *DAG)
static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, SmallVector< SDValue, 32 > &OutChains, unsigned From, unsigned To, SmallVector< SDValue, 16 > &OutLoadChains, SmallVector< SDValue, 16 > &OutStoreChains)
static int isSignedOp(ISD::CondCode Opcode)
For an integer comparison, return 1 if the comparison is a signed operation and 2 if the result is an...
static std::optional< APInt > FoldValue(unsigned Opcode, const APInt &C1, const APInt &C2)
static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SelectionDAG &DAG)
static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, unsigned AS)
static cl::opt< unsigned > MaxSteps("has-predecessor-max-steps", cl::Hidden, cl::init(8192), cl::desc("DAG combiner limit number of steps when searching DAG " "for predecessor nodes"))
static APInt getDemandAllEltsMask(SDValue V)
Construct a DemandedElts mask which demands all elements of V.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
static void removeOperands(MachineInstr &MI, unsigned i)
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static unsigned getSize(unsigned Kind)
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition APFloat.h:334
static constexpr roundingMode rmTowardZero
Definition APFloat.h:348
static const fltSemantics & BFloat()
Definition APFloat.h:295
static const fltSemantics & IEEEquad()
Definition APFloat.h:298
static const fltSemantics & IEEEdouble()
Definition APFloat.h:297
static constexpr roundingMode rmTowardNegative
Definition APFloat.h:347
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static constexpr roundingMode rmTowardPositive
Definition APFloat.h:346
static const fltSemantics & IEEEhalf()
Definition APFloat.h:294
opStatus
IEEE-754R 7: Default exception handling.
Definition APFloat.h:360
static APFloat getQNaN(const fltSemantics &Sem, bool Negative=false, const APInt *payload=nullptr)
Factory for QNaN values.
Definition APFloat.h:1175
opStatus divide(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1263
void copySign(const APFloat &RHS)
Definition APFloat.h:1357
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition APFloat.cpp:5890
opStatus subtract(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1245
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
Definition APFloat.h:1517
opStatus add(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1236
bool isFinite() const
Definition APFloat.h:1539
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
Definition APFloat.h:1402
opStatus multiply(const APFloat &RHS, roundingMode RM)
Definition APFloat.h:1254
opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend, roundingMode RM)
Definition APFloat.h:1290
bool isZero() const
Definition APFloat.h:1530
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Definition APFloat.h:1193
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition APFloat.h:1387
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition APFloat.h:1153
opStatus mod(const APFloat &RHS)
Definition APFloat.h:1281
bool isPosZero() const
Definition APFloat.h:1545
bool isNegZero() const
Definition APFloat.h:1546
void changeSign()
Definition APFloat.h:1352
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Definition APFloat.h:1164
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:2022
LLVM_ABI APInt usub_sat(const APInt &RHS) const
Definition APInt.cpp:2106
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition APInt.cpp:1615
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
void clearBit(unsigned BitPosition)
Set a given bit to 0.
Definition APInt.h:1429
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1054
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
Definition APInt.h:230
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
Definition APInt.h:1414
unsigned popcount() const
Count the number of bits set.
Definition APInt.h:1693
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
Definition APInt.h:1408
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
Definition APInt.cpp:639
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1075
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1535
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:967
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1353
APInt abs() const
Get the absolute value.
Definition APInt.h:1818
LLVM_ABI APInt sadd_sat(const APInt &RHS) const
Definition APInt.cpp:2077
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1189
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
Definition APInt.h:259
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1708
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1118
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition APInt.h:210
bool isNegative() const
Determine sign of this APInt.
Definition APInt.h:330
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition APInt.cpp:1686
void clearAllBits()
Set every bit to 0.
Definition APInt.h:1419
LLVM_ABI APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
Definition APInt.cpp:1196
LLVM_ABI APInt reverseBits() const
Definition APInt.cpp:789
void ashrInPlace(unsigned ShiftAmt)
Arithmetic right-shift this APInt by ShiftAmt in place.
Definition APInt.h:841
bool sle(const APInt &RHS) const
Signed less or equal comparison.
Definition APInt.h:1173
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1662
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
Definition APInt.h:1651
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1621
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition APInt.cpp:651
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
LLVM_ABI APInt sshl_sat(const APInt &RHS) const
Definition APInt.cpp:2137
LLVM_ABI APInt ushl_sat(const APInt &RHS) const
Definition APInt.cpp:2151
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1083
static bool isSameValue(const APInt &I1, const APInt &I2, bool SignedCompare=false)
Determine if two APInts have the same value, after zero-extending or sign-extending (if SignedCompare...
Definition APInt.h:555
LLVM_ABI APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
Definition APInt.cpp:1183
LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
Definition APInt.cpp:397
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
Definition APInt.h:1458
unsigned logBase2() const
Definition APInt.h:1784
LLVM_ABI APInt uadd_sat(const APInt &RHS) const
Definition APInt.cpp:2087
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:834
LLVM_ABI APInt multiplicativeInverse() const
Definition APInt.cpp:1316
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition APInt.cpp:1787
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:1027
void setBits(unsigned loBit, unsigned hiBit)
Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
Definition APInt.h:1390
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition APInt.h:880
LLVM_ABI APInt byteSwap() const
Definition APInt.cpp:767
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition APInt.h:1264
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
void clearBits(unsigned LoBit, unsigned HiBit)
Clear the bits from LoBit (inclusive) to HiBit (exclusive) to 0.
Definition APInt.h:1440
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
Definition APInt.h:1411
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
Definition APInt.cpp:482
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition APInt.h:1244
bool isOne() const
Determine if this is a value of 1.
Definition APInt.h:390
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:287
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
Definition APInt.h:865
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:858
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1228
LLVM_ABI APInt ssub_sat(const APInt &RHS) const
Definition APInt.cpp:2096
An arbitrary precision integer that knows its signedness.
Definition APSInt.h:24
unsigned getSrcAddressSpace() const
unsigned getDestAddressSpace() const
static Capacity get(size_t N)
Get the capacity of an array that can hold at least N elements.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This is an SDNode representing atomic operations.
static LLVM_ABI BaseIndexOffset match(const SDNode *N, const SelectionDAG &DAG)
Parses tree in N for base, index, offset addresses.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
BitVector & reset()
Definition BitVector.h:411
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
Definition BitVector.h:360
void clear()
clear - Removes all bits from the bitvector.
Definition BitVector.h:354
BitVector & set()
Definition BitVector.h:370
bool none() const
none - Returns true if none of the bits are set.
Definition BitVector.h:207
size_type size() const
size - Returns the number of bits in this bitvector.
Definition BitVector.h:178
const BlockAddress * getBlockAddress() const
The address of a basic block.
Definition Constants.h:1065
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
A "pseudo-class" with methods for operating on BUILD_VECTORs.
LLVM_ABI bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &RawBitElements, BitVector &UndefElements) const
Extract the raw bit data from a build vector of Undef, Constant or ConstantFP node elements.
static LLVM_ABI void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits, SmallVectorImpl< APInt > &DstBitElements, ArrayRef< APInt > SrcBitElements, BitVector &DstUndefElements, const BitVector &SrcUndefElements)
Recast bit data SrcBitElements to DstEltSizeInBits wide elements.
LLVM_ABI bool getRepeatedSequence(const APInt &DemandedElts, SmallVectorImpl< SDValue > &Sequence, BitVector *UndefElements=nullptr) const
Find the shortest repeating sequence of values in the build vector.
LLVM_ABI ConstantFPSDNode * getConstantFPSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant FP or null if this is not a constant FP splat.
LLVM_ABI SDValue getSplatValue(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted value or a null value if this is not a splat.
LLVM_ABI bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
LLVM_ABI ConstantSDNode * getConstantSplatNode(const APInt &DemandedElts, BitVector *UndefElements=nullptr) const
Returns the demanded splatted constant or null if this is not a constant splat.
LLVM_ABI int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2,...
LLVM_ABI std::optional< std::pair< APInt, APInt > > isArithmeticSequence() const
If this BuildVector is constant and represents an arithmetic sequence "<a, a+n, a+2n,...
LLVM_ABI bool isConstant() const
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
static LLVM_ABI bool isValueValidForType(EVT VT, const APFloat &Val)
const APFloat & getValueAPF() const
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
const APFloat & getValue() const
Definition Constants.h:464
This is the shared class of boolean and integer constants.
Definition Constants.h:87
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
MachineConstantPoolValue * getMachineCPVal() const
const Constant * getConstVal() const
LLVM_ABI Type * getType() const
This class represents a range of values.
LLVM_ABI ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
LLVM_ABI OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
LLVM_ABI KnownBits toKnownBits() const
Return known bits for values in this range.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
This is an important base class in LLVM.
Definition Constant.h:43
LLVM_ABI Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
DWARF expression.
static LLVM_ABI ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed)
Returns the ops for a zero- or sign-extension in a DIExpression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI const DIExpression * convertToVariadicExpression(const DIExpression *Expr)
If Expr is a non-variadic expression (i.e.
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
Base class for variables.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:217
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
LLVM_ABI unsigned getPointerTypeSizeInBits(Type *) const
The pointer representation size in bits for this type.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
A debug info location.
Definition DebugLoc.h:123
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
const char * getSymbol() const
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
Definition FoldingSet.h:210
Data structure describing the variable locations in a function.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition Function.h:711
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:354
LLVM_ABI unsigned getAddressSpace() const
const GlobalValue * getGlobal() const
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
This class is used to form a handle around another node that is persistent and is updated across invo...
const SDValue & getValue() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Tracks which library functions to use for a particular subtarget.
LLVM_ABI CallingConv::ID getLibcallImplCallingConv(RTLIB::LibcallImpl Call) const
Get the CallingConv that should be used for the specified libcall.
LLVM_ABI RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const
Return the lowering's selection of implementation call for Call.
This SDNode is used for LIFETIME_START/LIFETIME_END values.
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
Machine Value Type.
SimpleValueType SimpleTy
static MVT getIntegerVT(unsigned BitWidth)
Abstract base class for all machine specific constantpool value subclasses.
virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID)=0
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
void setObjectAlignment(int ObjectIdx, Align Alignment)
setObjectAlignment - Change the alignment of the specified stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
A description of a memory reference used in the backend.
const MDNode * getRanges() const
Return the range tag for the memory reference.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
Flags getFlags() const
Return the raw flags of the source value,.
This class contains meta information specific to a module.
An SDNode that represents everything that will be needed to construct a MachineInstr.
This class is used to represent an MGATHER node.
This class is used to represent an MLOAD node.
This class is used to represent an MSCATTER node.
This class is used to represent an MSTORE node.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
size_t getNumMemOperands() const
Return the number of memory operands.
LLVM_ABI MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT memvt, PointerUnion< MachineMemOperand *, MachineMemOperand ** > memrefs)
Constructor that supports single or multiple MMOs.
PointerUnion< MachineMemOperand *, MachineMemOperand ** > MemRefs
Memory reference information.
MachineMemOperand * getMemOperand() const
Return the unique MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
ArrayRef< MachineMemOperand * > memoperands() const
Return the memory operands for this node.
unsigned getRawSubclassData() const
Return the SubclassData value, without HasDebugValue.
EVT getMemoryVT() const
Return the type of the in-memory value.
Representation for a specific memory location.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:235
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition ArrayRef.h:298
The optimization diagnostic interface.
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
Class to represent pointers.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A discriminated union of two or more pointer types, with the discriminator in the low bits of the poi...
bool isNull() const
Test if the pointer held in the union is null, regardless of which type it is.
Analysis providing profile information.
void Deallocate(SubClass *E)
Deallocate - Release storage for the pointed-to object.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Keeps track of dbg_value information through SDISel.
LLVM_ABI void add(SDDbgValue *V, bool isParameter)
LLVM_ABI void erase(const SDNode *Node)
Invalidate all DbgValues attached to the node and remove it from the Node-to-DbgValues map.
Holds the information from a dbg_label node through SDISel.
Holds the information for a single machine location through SDISel; either an SDNode,...
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
@ SDNODE
Value is the result of an expression.
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
unsigned getIROrder() const
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
LLVM_ABI void dumprFull(const SelectionDAG *G=nullptr) const
printrFull to dbgs().
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool isDivergent() const
LLVM_ABI bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
static constexpr size_t getMaxNumOperands()
Return the maximum number of operands that a SDNode can hold.
iterator_range< use_iterator > uses()
MemSDNodeBitfields MemSDNodeBits
LLVM_ABI void Profile(FoldingSetNodeID &ID) const
Gather unique data for the node.
bool getHasDebugValue() const
SDNodeFlags getFlags() const
void setNodeId(int Id)
Set unique node id.
LLVM_ABI void intersectFlagsWith(const SDNodeFlags Flags)
Clear any flags in this node that aren't also set in Flags.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
bool use_empty() const
Return true if there are no uses of this node.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
static LLVM_ABI bool areOnlyUsersOf(ArrayRef< const SDNode * > Nodes, const SDNode *N)
Return true if all the users of N are contained in Nodes.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
LLVM_ABI bool isOperandOf(const SDNode *N) const
Return true if this node is an operand of N.
const APInt & getConstantOperandAPInt(unsigned Num) const
Helper method returns the APInt of a ConstantSDNode operand.
std::optional< APInt > bitcastToAPInt() const
LLVM_ABI bool hasPredecessor(const SDNode *N) const
Return true if N is a predecessor of this node.
LLVM_ABI bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
op_iterator op_end() const
op_iterator op_begin() const
static use_iterator use_end()
LLVM_ABI void DropOperands()
Release the operands and set this node to have zero operands.
SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
Create an SDNode.
Represents a use of a SDNode.
SDNode * getUser()
This returns the SDNode that contains this Use.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
LLVM_ABI bool isOperandOf(const SDNode *N) const
Return true if the referenced return value is an operand of N.
SDValue()=default
LLVM_ABI bool reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth=2) const
Return true if this operand (which must be a chain) reaches the specified operand without crossing an...
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
bool use_empty() const
Return true if there are no nodes using value ResNo of Node.
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
virtual void verifyTargetNode(const SelectionDAG &DAG, const SDNode *N) const
Checks that the given target-specific node is valid. Aborts if it is not.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC)
LLVM_ABI Align getReducedAlign(EVT VT, bool UseABI)
In most cases this function returns the ABI alignment for a given type, except for illegal vector typ...
LLVM_ABI SDValue getVPZeroExtendInReg(SDValue Op, SDValue Mask, SDValue EVL, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
LLVM_ABI SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op)
Return the specified value casted to the target's desired shift amount type.
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI std::pair< SDValue, SDValue > getMemccpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue C, SDValue Size, const CallInst *CI)
Lower a memccpy operation into a target library call and return the resulting chain and call result a...
LLVM_ABI bool isKnownNeverLogicalZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Test whether the given floating point SDValue (or all elements of it, if it is a vector) is known to ...
LLVM_ABI SDValue getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsExpanding=false)
SDValue getExtractVectorElt(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Extract element at Idx from Vec.
LLVM_ABI SDValue getSplatSourceVector(SDValue V, int &SplatIndex)
If V is a splatted value, return the source vector and its splat index.
LLVM_ABI SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI OverflowKind computeOverflowForUnsignedSub(SDValue N0, SDValue N1) const
Determine if the result of the unsigned sub of 2 nodes can overflow.
LLVM_ABI unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI std::pair< SDValue, SDValue > getStrlen(SDValue Chain, const SDLoc &dl, SDValue Src, const CallInst *CI)
Lower a strlen operation into a target library call and return the resulting chain and call result as...
LLVM_ABI SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
LLVM_ABI SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond, const SDLoc &dl, SDNodeFlags Flags={})
Constant fold a setcc to true or false.
bool isKnownNeverSNaN(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
LLVM_ABI std::optional< bool > isBoolConstant(SDValue N) const
Check if a value \op N is a constant using the target's BooleanContent for its type.
LLVM_ABI SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI ConstantRange computeConstantRange(SDValue Op, bool ForSigned, unsigned Depth=0) const
Determine the possible constant range of an integer or vector of integers.
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI void updateDivergence(SDNode *N)
LLVM_ABI SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT, SDNodeFlags Flags)
Get the (commutative) neutral element for the given opcode, if it exists.
LLVM_ABI SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
LLVM_ABI SDValue getAtomicLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT MemVT, EVT VT, SDValue Chain, SDValue Ptr, MachineMemOperand *MMO)
LLVM_ABI SDNode * getNodeIfExists(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops, const SDNodeFlags Flags, bool AllowCommute=false)
Get the specified node if it's already available, or else return NULL.
LLVM_ABI SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
LLVM_ABI SDNode * SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type,...
LLVM_ABI void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE, Pass *PassPtr, const TargetLibraryInfo *LibraryInfo, const LibcallLoweringInfo *LibcallsInfo, UniformityInfo *UA, ProfileSummaryInfo *PSIin, BlockFrequencyInfo *BFIin, MachineModuleInfo &MMI, FunctionVarLocs const *FnVarLocs)
Prepare this SelectionDAG to process code in the given MachineFunction.
LLVM_ABI SelectionDAG(const TargetMachine &TM, CodeGenOptLevel)
LLVM_ABI SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBitcastedSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
LLVM_ABI SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
LLVM_ABI SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
LLVM_ABI bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
LLVM_ABI void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
LLVM_ABI SDValue getJumpTableDebugInfo(int JTI, SDValue Chain, const SDLoc &DL)
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI std::optional< unsigned > getValidMaximumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
OverflowKind
Used to represent the possible overflow behavior of an operation.
static LLVM_ABI unsigned getHasPredecessorMaxSteps()
LLVM_ABI bool haveNoCommonBitsSet(SDValue A, SDValue B) const
Return true if A and B have no common bits set.
SDValue getExtractSubvector(const SDLoc &DL, EVT VT, SDValue Vec, unsigned Idx)
Return the VT typed sub-vector of Vec at Idx.
LLVM_ABI bool cannotBeOrderedNegativeFP(SDValue Op) const
Test whether the given float value is known to be positive.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI bool calculateDivergence(SDNode *N)
LLVM_ABI std::pair< SDValue, SDValue > getStrcmp(SDValue Chain, const SDLoc &dl, SDValue S0, SDValue S1, const CallInst *CI)
Lower a strcmp operation into a target library call and return the resulting chain and call result as...
LLVM_ABI SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
LLVM_ABI SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
LLVM_ABI SDNode * mutateStrictFPToFP(SDNode *Node)
Mutate the specified strict FP node to its non-strict equivalent, unlinking the node from its chain a...
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI bool canIgnoreSignBitOfZero(const SDUse &Use) const
Check if a use of a float value is insensitive to signed zeros.
LLVM_ABI bool SignBitIsZeroFP(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero, for a floating-point value.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getInsertSubvector(const SDLoc &DL, SDValue Vec, SDValue SubVec, unsigned Idx)
Insert SubVec at the Idx element of Vec.
LLVM_ABI SDValue getBitcastedZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false, SDNodeFlags Flags={})
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
LLVM_ABI SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI bool isEqualTo(SDValue A, SDValue B) const
Test whether two SDValues are known to compare equal.
static constexpr unsigned MaxRecursionDepth
LLVM_ABI SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
bool isGuaranteedNotToBePoison(SDValue Op, unsigned Depth=0) const
Return true if this function can prove that Op is never poison.
LLVM_ABI SDValue expandVACopy(SDNode *Node)
Expand the specified ISD::VACOPY node as the Legalize pass would.
LLVM_ABI SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI APInt computeVectorKnownZeroElements(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
For each demanded element of a vector, see if it is known to be zero.
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
LLVM_ABI std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
LLVM_ABI void salvageDebugInfo(SDNode &N)
To be invoked on an SDNode that is slated to be erased.
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
LLVM_ABI std::pair< SDValue, SDValue > UnrollVectorOverflowOp(SDNode *N, unsigned ResNE=0)
Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
LLVM_ABI SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcastedAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by first bitcasting (from potentia...
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
LLVM_ABI void DeleteNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
LLVM_ABI std::pair< SDValue, SDValue > getStrcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, const CallInst *CI)
Lower a strcpy operation into a target library call and return the resulting chain and call result as...
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
LLVM_ABI std::optional< unsigned > getValidShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has a uniform shift amount that is less than the element bit-width of the shi...
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
LLVM_ABI SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal)
Try to simplify a select/vselect into 1 of its operands or a constant.
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
LLVM_ABI bool isConstantFPBuildVectorOrConstantFP(SDValue N) const
Test whether the given value is a constant FP or similar node.
const DataLayout & getDataLayout() const
SDValue getPartialReduceMLS(unsigned Opc, const SDLoc &DL, SDValue Acc, SDValue LHS, SDValue RHS)
Get an expression that implements a partial multiply-subtract reduction.
LLVM_ABI SDValue expandVAArg(SDNode *Node)
Expand the specified ISD::VAARG node as the Legalize pass would.
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops)
Check if a node exists without modifying its flags.
LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(SDValue Op, bool ForSigned, unsigned Depth=0) const
Combine constant ranges from computeConstantRange() and computeKnownBits().
const SelectionDAGTargetInfo & getSelectionDAGInfo() const
LLVM_ABI bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
LLVM_ABI SDValue getMaskedHistogram(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
LLVM_ABI SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
LLVM_ABI SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI OverflowKind computeOverflowForUnsignedMul(SDValue N0, SDValue N1) const
Determine if the result of the unsigned mul of 2 nodes can overflow.
LLVM_ABI void copyExtraInfo(SDNode *From, SDNode *To)
Copy extra info associated with one node to another.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getLoadFFVP(EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL, MachineMemOperand *MMO)
LLVM_ABI SDValue getTypeSize(const SDLoc &DL, EVT VT, TypeSize TS)
LLVM_ABI SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
LLVM_ABI void clear()
Clear state and free memory necessary to make this SelectionDAG ready to process a new block.
LLVM_ABI std::pair< SDValue, SDValue > getMemcmp(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, const CallInst *CI)
Lower a memcmp operation into a target library call and return the resulting chain and call result as...
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
LLVM_ABI std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
LLVM_ABI SDValue makeStateFunctionCall(unsigned LibFunc, SDValue Ptr, SDValue InChain, const SDLoc &DLoc)
Helper used to make a call to a library function that has one argument of pointer type.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly=false, unsigned Depth=0) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
LLVM_ABI SDValue getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
LLVM_ABI SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
LLVM_ABI OverflowKind computeOverflowForSignedMul(SDValue N0, SDValue N1) const
Determine if the result of the signed mul of 2 nodes can overflow.
LLVM_ABI MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
LLVM_ABI void dump() const
Dump the textual format of this DAG.
LLVM_ABI bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if '(Op & Mask) == Mask'.
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
LLVM_ABI void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
LLVM_ABI void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
bool isConstantValueOfAnyType(SDValue N) const
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, Register VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
LLVM_ABI KnownFPClass computeKnownFPClass(SDValue Op, FPClassTest InterestedClasses, unsigned Depth=0) const
Determine floating-point class information about Op.
LLVM_ABI SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI SDValue getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI bool isKnownNeverZero(SDValue Op, unsigned Depth=0) const
Test whether the given SDValue is known to contain non-zero value(s).
LLVM_ABI SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops, SDNodeFlags Flags=SDNodeFlags())
LLVM_ABI std::optional< unsigned > getValidMinimumShiftAmount(SDValue V, const APInt &DemandedElts, unsigned Depth=0) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
LLVM_ABI SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
LLVM_ABI SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
LLVM_ABI std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getVPLogicalNOT(const SDLoc &DL, SDValue Val, SDValue Mask, SDValue EVL, EVT VT)
Create a vector-predicated logical NOT operation as (VP_XOR Val, BooleanOne, Mask,...
LLVM_ABI SDValue getMaskFromElementCount(const SDLoc &DL, EVT VT, ElementCount Len)
Return a vector with the first 'Len' lanes set to true and remaining lanes set to false.
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
iterator_range< allnodes_iterator > allnodes()
LLVM_ABI SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
LLVM_ABI SDValue WidenVector(const SDValue &N, const SDLoc &DL)
Widen the vector up to the next power of two using INSERT_SUBVECTOR.
const LibcallLoweringInfo & getLibcalls() const
LLVM_ABI SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
LLVM_ABI SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the Fra...
ArrayRef< SDDbgValue * > GetDbgValues(const SDNode *SD) const
Get the debug values which reference the given SDNode.
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI OverflowKind computeOverflowForSignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the signed addition of 2 nodes can overflow.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
LLVM_ABI unsigned AssignTopologicalOrder()
Topological-sort the AllNodes list and a assign a unique node id for each node in the DAG based on th...
ilist< SDNode >::size_type allnodes_size() const
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
LLVM_ABI SDValue FoldConstantBuildVector(BuildVectorSDNode *BV, const SDLoc &DL, EVT DstEltVT)
Fold BUILD_VECTOR of constants/undefs to the destination type BUILD_VECTOR of constants/undefs elemen...
LLVM_ABI SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
LLVM_ABI SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, bool IsCompressing=false)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI bool MaskedVectorIsZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Return true if 'Op' is known to be zero in DemandedElts.
LLVM_ABI SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
LLVM_ABI SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
LLVM_ABI SDValue getExtStridedLoadVP(ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
LLVM_ABI SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
LLVM_ABI SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
LLVM_ABI bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
LLVM_ABI SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void getTopologicallyOrderedNodes(SmallVectorImpl< const SDNode * > &SortedNodes) const
Get all the nodes in their topological order without modifying any states.
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
LLVM_ABI std::pair< SDValue, SDValue > getStrstr(SDValue Chain, const SDLoc &dl, SDValue S0, SDValue S1, const CallInst *CI)
Lower a strstr operation into a target library call and return the resulting chain and call result as...
LLVM_ABI SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
LLVM_ABI bool canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts, bool PoisonOnly=false, bool ConsiderFlags=true, unsigned Depth=0) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
LLVM_ABI OverflowKind computeOverflowForUnsignedAdd(SDValue N0, SDValue N1) const
Determine if the result of the unsigned addition of 2 nodes can overflow.
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT SVT, MachineMemOperand *MMO, bool IsCompressing=false)
LLVM_ABI void canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1, SDValue &N2) const
Swap N1 and N2 if Opcode is a commutative binary opcode and the canonical form expects the opposite o...
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI SDValue getCondCode(ISD::CondCode Cond)
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVM_ABI bool isKnownToBeAPowerOfTwoFP(SDValue Val, unsigned Depth=0) const
Test if the given fp value is known to be an integer power-of-2, either positive or negative.
LLVM_ABI OverflowKind computeOverflowForSignedSub(SDValue N0, SDValue N1) const
Determine if the result of the signed sub of 2 nodes can overflow.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
LLVM_ABI SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, SDNodeFlags Flags)
Try to simplify a floating-point binary operation into 1 of its operands or a constant.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI bool isKnownToBeAPowerOfTwo(SDValue Val, bool OrZero=false, unsigned Depth=0) const
Test if the given value is known to have exactly one bit set.
LLVM_ABI SDValue getDeactivationSymbol(const GlobalValue *GV)
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
LLVM_ABI bool isUndef(unsigned Opcode, ArrayRef< SDValue > Ops)
Return true if the result of this operation is always undefined.
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
LLVM_ABI std::pair< EVT, EVT > GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, bool *HiIsEmpty) const
Compute the VTs needed for the low/hi parts of a type, dependent on an enveloping VT that has been sp...
LLVM_ABI SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
Fold floating-point operations when all operands are constants and/or undefined.
LLVM_ABI std::optional< ConstantRange > getValidShiftAmountRange(SDValue V, const APInt &DemandedElts, unsigned Depth) const
If a SHL/SRA/SRL node V has shift amounts that are all less than the element bit-width of the shift n...
LLVM_ABI SDValue FoldSymbolOffset(unsigned Opcode, EVT VT, const GlobalAddressSDNode *GA, const SDNode *N2)
LLVM_ABI SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
LLVM_ABI SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
DenormalMode getDenormalMode(EVT VT) const
Return the current function's default denormal handling kind for the given floating point type.
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, ArrayRef< ISD::NodeType > CandidateBinOps, bool AllowPartials=false)
Match a binop + shuffle pyramid that represents a horizontal reduction over the elements of a vector ...
LLVM_ABI bool isADDLike(SDValue Op, bool NoWrap=false) const
Return true if the specified operand is an ISD::OR or ISD::XOR node that can be treated as an ISD::AD...
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
LLVM_ABI SDValue simplifyShift(SDValue X, SDValue Y)
Try to simplify a shift into 1 of its operands or a constant.
LLVM_ABI void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits=0, unsigned SizeInBits=0, bool InvalidateDbg=true)
Transfer debug values from one node to another, while optionally generating fragment expressions for ...
LLVM_ABI SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
LLVM_ABI SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
ilist< SDNode >::iterator allnodes_iterator
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
int getMaskElt(unsigned Idx) const
ArrayRef< int > getMask() const
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
static LLVM_ABI bool isSplatMask(ArrayRef< int > Mask)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:137
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Completely target-dependent object reference.
unsigned getTargetFlags() const
Provides information about what library functions are available for the current target.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself.
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const
Return true if the target shall perform extract vector element and store given that the vector is kno...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
BooleanContent
Enum that describes how the target represents true/false values.
virtual unsigned getMaxGluedStoresPerMemcpy() const
Get maximum # of store operations to be glued together.
std::vector< ArgListEntry > ArgListTy
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual bool isLegalStoreImmediate(int64_t Value) const
Return true if the specified immediate is legal for the value input of a store instruction.
static ISD::NodeType getExtendForContent(BooleanContent Content)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes, EVT *LargestVT=nullptr) const
Determines the optimal series of memory ops to replace the memset / memcpy.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
Primary interface to the complete machine description for the target machine.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const SelectionDAGTargetInfo * getSelectionDAGInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).
Definition Triple.h:644
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:311
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
Definition Use.cpp:35
LLVM_ABI void set(Value *Val)
Definition Value.h:883
User * getUser() const
Returns the User that contains this Use.
Definition Use.h:61
Value * getOperand(unsigned i) const
Definition User.h:207
This class is used to represent an VP_GATHER node.
This class is used to represent a VP_LOAD node.
This class is used to represent an VP_SCATTER node.
This class is used to represent a VP_STORE node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
constexpr bool hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const
Returns true if there exists a value X where RHS.multiplyCoefficientBy(X) will result in a value whos...
Definition TypeSize.h:269
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:230
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr bool isKnownEven() const
A return value of true indicates we know at compile time that the number of elements (vscale * Min) i...
Definition TypeSize.h:176
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:252
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:237
A raw_ostream that writes to an std::string.
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt clmulr(const APInt &LHS, const APInt &RHS)
Perform a reversed carry-less multiply.
Definition APInt.cpp:3252
LLVM_ABI APInt mulhu(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on zero-extended operands.
Definition APInt.cpp:3182
LLVM_ABI APInt avgCeilU(const APInt &C1, const APInt &C2)
Compute the ceil of the unsigned average of C1 and C2.
Definition APInt.cpp:3169
LLVM_ABI APInt avgFloorU(const APInt &C1, const APInt &C2)
Compute the floor of the unsigned average of C1 and C2.
Definition APInt.cpp:3159
LLVM_ABI APInt fshr(const APInt &Hi, const APInt &Lo, const APInt &Shift)
Perform a funnel shift right.
Definition APInt.cpp:3233
LLVM_ABI APInt mulhs(const APInt &C1, const APInt &C2)
Performs (2*N)-bit multiplication on sign-extended operands.
Definition APInt.cpp:3174
LLVM_ABI APInt clmul(const APInt &LHS, const APInt &RHS)
Perform a carry-less multiply, also known as XOR multiplication, and return low-bits.
Definition APInt.cpp:3242
APInt abds(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be signed.
Definition APInt.h:2297
LLVM_ABI APInt fshl(const APInt &Hi, const APInt &Lo, const APInt &Shift)
Perform a funnel shift left.
Definition APInt.cpp:3224
LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)
Splat/Merge neighboring bits to widen/narrow the bitmask represented by.
Definition APInt.cpp:3060
LLVM_ABI APInt clmulh(const APInt &LHS, const APInt &RHS)
Perform a carry-less multiply, and return high-bits.
Definition APInt.cpp:3257
APInt abdu(const APInt &A, const APInt &B)
Determine the absolute difference of two APInts considered to be unsigned.
Definition APInt.h:2302
LLVM_ABI APInt avgFloorS(const APInt &C1, const APInt &C2)
Compute the floor of the signed average of C1 and C2.
Definition APInt.cpp:3154
LLVM_ABI APInt avgCeilS(const APInt &C1, const APInt &C2)
Compute the ceil of the signed average of C1 and C2.
Definition APInt.cpp:3164
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
Definition ISDOpcodes.h:24
LLVM_ABI CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical AND between different comparisons of identical values: ((X op1 Y) & (X...
LLVM_ABI bool isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are ~0 ...
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition ISDOpcodes.h:41
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:819
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition ISDOpcodes.h:261
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:788
@ TargetConstantPool
Definition ISDOpcodes.h:189
@ MDNODE_SDNODE
MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to reference metadata in the IR.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
Definition ISDOpcodes.h:511
@ PTRADD
PTRADD represents pointer arithmetic semantics, for targets that opt in using shouldPreservePtrArith(...
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
Definition ISDOpcodes.h:45
@ POISON
POISON - A poison node.
Definition ISDOpcodes.h:236
@ PARTIAL_REDUCE_SMLA
PARTIAL_REDUCE_[U|S]MLA(Accumulator, Input1, Input2) The partial reduction nodes sign or zero extend ...
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
Definition ISDOpcodes.h:538
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:275
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
Definition ISDOpcodes.h:600
@ JUMP_TABLE_DEBUG_INFO
JUMP_TABLE_DEBUG_INFO - Jumptable debug info.
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:779
@ TargetBlockAddress
Definition ISDOpcodes.h:191
@ DEACTIVATION_SYMBOL
Untyped node storing deactivation symbol reference (DeactivationSymbolSDNode).
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:294
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
Definition ISDOpcodes.h:522
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:853
@ ATOMIC_LOAD_USUB_COND
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:518
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition ISDOpcodes.h:220
@ GlobalAddress
Definition ISDOpcodes.h:88
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:880
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition ISDOpcodes.h:584
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:747
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition ISDOpcodes.h:910
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
Definition ISDOpcodes.h:528
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:993
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:254
@ CLMUL
Carry-less multiplication operations.
Definition ISDOpcodes.h:774
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ GlobalTLSAddress
Definition ISDOpcodes.h:89
@ SRCVALUE
SRCVALUE - This is a node type that holds a Value* that is used to make reference to a value in the L...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ ATOMIC_LOAD_USUB_SAT
@ PARTIAL_REDUCE_UMLA
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:844
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
Definition ISDOpcodes.h:715
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
Definition ISDOpcodes.h:665
@ TargetExternalSymbol
Definition ISDOpcodes.h:190
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:787
@ TargetJumpTable
Definition ISDOpcodes.h:188
@ TargetIndex
TargetIndex - Like a constant pool entry, but with completely target-dependent semantics.
Definition ISDOpcodes.h:198
@ PARTIAL_REDUCE_FMLA
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ TRUNCATE_SSAT_U
Definition ISDOpcodes.h:873
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
Definition ISDOpcodes.h:827
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:352
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
Definition ISDOpcodes.h:691
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
Definition ISDOpcodes.h:541
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
Definition ISDOpcodes.h:548
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:374
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:796
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
Definition ISDOpcodes.h:233
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition ISDOpcodes.h:247
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition ISDOpcodes.h:672
@ AssertAlign
AssertAlign - These nodes record if a register contains a value that has a known alignment and the tr...
Definition ISDOpcodes.h:69
@ GET_ACTIVE_LANE_MASK
GET_ACTIVE_LANE_MASK - this corrosponds to the llvm.get.active.lane.mask intrinsic.
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:230
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:348
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
Definition ISDOpcodes.h:185
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ CTLS
Count leading redundant sign bits.
Definition ISDOpcodes.h:792
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:704
@ ATOMIC_LOAD_FMAXIMUM
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:765
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
Definition ISDOpcodes.h:78
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition ISDOpcodes.h:649
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition ISDOpcodes.h:614
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
Definition ISDOpcodes.h:48
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:576
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition ISDOpcodes.h:224
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:850
@ TargetConstantFP
Definition ISDOpcodes.h:180
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:811
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
Definition ISDOpcodes.h:386
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:356
@ ATOMIC_LOAD_FMINIMUM
@ TargetFrameIndex
Definition ISDOpcodes.h:187
@ VECTOR_SPLICE_LEFT
VECTOR_SPLICE_LEFT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1, VEC2) left by OFFSET elements an...
Definition ISDOpcodes.h:653
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
Definition ISDOpcodes.h:899
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:888
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:727
@ MASKED_UDIV
Masked vector arithmetic that returns poison on disabled lanes.
@ LIFETIME_START
This corresponds to the llvm.lifetime.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:978
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:805
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:328
@ MGATHER
Masked gather and scatter - load and store operations for a vector of random addresses with additiona...
@ HANDLENODE
HANDLENODE node - Used as a handle for various purposes.
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ ATOMIC_LOAD_UDEC_WRAP
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
Definition ISDOpcodes.h:500
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:926
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
Definition ISDOpcodes.h:179
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:505
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:739
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:205
@ GET_FPENV_MEM
Gets the current floating-point environment.
@ PSEUDO_PROBE
Pseudo probe for AutoFDO, as a place holder in a basic block to improve the sample counts quality.
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
Definition ISDOpcodes.h:735
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
Definition ISDOpcodes.h:710
@ VECTOR_SPLICE_RIGHT
VECTOR_SPLICE_RIGHT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1,VEC2) right by OFFSET elements a...
Definition ISDOpcodes.h:657
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:304
@ SPLAT_VECTOR_PARTS
SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the scalar values joined together a...
Definition ISDOpcodes.h:681
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
Definition ISDOpcodes.h:241
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:565
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition ISDOpcodes.h:53
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ ExternalSymbol
Definition ISDOpcodes.h:93
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:959
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
Definition ISDOpcodes.h:699
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition ISDOpcodes.h:921
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
Definition ISDOpcodes.h:997
@ EXPERIMENTAL_VECTOR_HISTOGRAM
Experimental vector histogram intrinsic Operands: Input Chain, Inc, Mask, Base, Index,...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition ISDOpcodes.h:945
@ VECREDUCE_FMINIMUM
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:856
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ VECREDUCE_SEQ_FMUL
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:833
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
@ ATOMIC_LOAD_UINC_WRAP
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:534
@ PARTIAL_REDUCE_SUMLA
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:365
@ SET_FPENV_MEM
Sets the current floating point environment.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ TRUNCATE_SSAT_S
TRUNCATE_[SU]SAT_[SU] - Truncate for saturated operand [SU] located in middle, prefix for SAT means i...
Definition ISDOpcodes.h:871
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
Definition ISDOpcodes.h:722
@ TRUNCATE_USAT_U
Definition ISDOpcodes.h:875
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:338
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition ISDOpcodes.h:213
@ TargetGlobalTLSAddress
Definition ISDOpcodes.h:186
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:556
LLVM_ABI NodeType getOppositeSignednessMinMaxOpcode(unsigned MinMaxOpc)
Given a MinMaxOpc of ISD::(U|S)MIN or ISD::(U|S)MAX, returns the corresponding opcode with the opposi...
LLVM_ABI bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
LLVM_ABI NodeType getExtForLoadExtType(bool IsFP, LoadExtType)
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
bool isExtOpcode(unsigned Opcode)
LLVM_ABI bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
LLVM_ABI NodeType getUnmaskedBinOpOpcode(unsigned MaskedOpc)
Given a MaskedOpc of ISD::MASKED_(U|S)(DIV|REM), returns the unmasked ISD::(U|S)(DIV|REM).
LLVM_ABI bool isVectorShrinkable(const SDNode *N, unsigned NewEltSize, bool Signed)
Returns true if the specified node is a vector where all elements can be truncated to the specified e...
LLVM_ABI bool isVPBinaryOp(unsigned Opcode)
Whether this is a vector-predicated binary operation opcode.
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI std::optional< unsigned > getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept)
Translate this VP Opcode to its corresponding non-VP Opcode.
bool isBitwiseLogicOp(unsigned Opcode)
Whether this is bitwise logic opcode.
bool isTrueWhenEqual(CondCode Cond)
Return true if the specified condition returns true if the two operands to the condition are equal.
LLVM_ABI std::optional< unsigned > getVPMaskIdx(unsigned Opcode)
The operand position of the vector mask.
unsigned getUnorderedFlavor(CondCode Cond)
This function returns 0 if the condition is always false if an operand is a NaN, 1 if the condition i...
LLVM_ABI std::optional< unsigned > getVPExplicitVectorLengthIdx(unsigned Opcode)
The operand position of the explicit vector length parameter.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
LLVM_ABI bool isFreezeUndef(const SDNode *N)
Return true if the specified node is FREEZE(UNDEF).
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI std::optional< unsigned > getVPForBaseOpcode(unsigned Opcode)
Translate this non-VP Opcode to its corresponding VP Opcode.
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool matchUnaryPredicateImpl(SDValue Op, std::function< bool(ConstNodeType *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant BUI...
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
LLVM_ABI NodeType getInverseMinMaxOpcode(unsigned MinMaxOpc)
Given a MinMaxOpc of ISD::(U|S)MIN or ISD::(U|S)MAX, returns ISD::(U|S)MAX and ISD::(U|S)MIN,...
LLVM_ABI bool matchBinaryPredicate(SDValue LHS, SDValue RHS, std::function< bool(ConstantSDNode *, ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTypeMismatch=false)
Attempt to match a binary predicate against a pair of scalar/splat constants or every element of a pa...
LLVM_ABI bool isVPReduction(unsigned Opcode)
Whether this is a vector-predicated reduction opcode.
bool matchUnaryPredicate(SDValue Op, std::function< bool(ConstantSDNode *)> Match, bool AllowUndefs=false, bool AllowTruncation=false)
Hook for matching ConstantSDNode predicate.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
LLVM_ABI bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LLVM_ABI bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
LLVM_ABI NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
LLVM_ABI bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
LLVM_ABI CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type)
Return the result of a logical OR between different comparisons of identical values: ((X op1 Y) | (X ...
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
LLVM_ABI Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
LLVM_ABI Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
initializer< Ty > init(const Ty &Val)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
Definition Dwarf.h:149
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
This is an optimization pass for GlobalISel generic memory operations.
GenericUniformityInfo< SSAContext > UniformityInfo
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:344
@ Offset
Definition DWP.cpp:557
bool operator<(int64_t V1, const APSInt &V2)
Definition APSInt.h:360
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
Definition Analysis.cpp:237
void fill(R &&Range, T &&Value)
Provide wrappers to std::fill which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1758
LLVM_ABI SDValue peekThroughExtractSubvectors(SDValue V)
Return the non-extracted vector source operand of V if it exists.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
Definition Utils.cpp:1564
LLVM_ABI SDValue getBitwiseNotOperand(SDValue V, SDValue Mask, bool AllowUndefs)
If V is a bitwise not, returns the inverted operand.
@ Undef
Value of the register doesn't matter.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2553
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:315
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
LLVM_ABI bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
LLVM_ABI bool isOneOrOneSplatFP(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant floating-point value, or a splatted vector of a constant float...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition bit.h:325
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
Definition APFloat.h:1728
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
LLVM_ABI bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
auto cast_or_null(const Y &Val)
Definition Casting.h:714
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
Definition Utils.cpp:1546
LLVM_ABI bool isMinSignedConstant(SDValue V)
Returns true if V is a constant min signed integer value.
LLVM_ABI ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition APFloat.h:1640
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:204
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
Definition APFloat.h:1683
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
LLVM_ABI SDValue peekThroughInsertVectorElt(SDValue V, const APInt &DemandedElts)
Recursively peek through INSERT_VECTOR_ELT nodes, returning the source vector operand of V,...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void checkForCycles(const SelectionDAG *DAG, bool force=false)
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1635
LLVM_READONLY APFloat minimumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimumNumber semantics.
Definition APFloat.h:1714
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI SDValue peekThroughTruncates(SDValue V)
Return the non-truncated source operand of V if it exists.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1752
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr std::underlying_type_t< Enum > to_underlying(Enum E)
Returns underlying integer value of an enum.
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI SDValue peekThroughOneUseBitcasts(SDValue V)
Return the non-bitcasted and one-use source operand of V if it exists.
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Other
Any other memory.
Definition ModRef.h:68
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
Definition APFloat.h:1664
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
LLVM_ABI bool isNullConstantOrUndef(SDValue V)
Returns true if V is a constant integer zero or an UNDEF node.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition Analysis.cpp:539
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1884
constexpr unsigned BitWidth
bool funcReturnsFirstArgOfCall(const CallInst &CI)
Returns true if the parent of CI returns CI's first argument after calling CI.
Definition Analysis.cpp:719
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isZeroOrZeroSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition MathExtras.h:572
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition Alignment.h:197
LLVM_ABI bool isZeroOrZeroSplatFP(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant (+/-)0.0 floating-point value or a splatted vector thereof (wi...
LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
Definition APFloat.h:1701
LLVM_READONLY APFloat maximumnum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximumNumber semantics.
Definition APFloat.h:1741
LLVM_ABI bool isOnesOrOnesSplat(SDValue N, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
LLVM_ABI bool isNeutralConstant(unsigned Opc, SDNodeFlags Flags, SDValue V, unsigned OperandNo)
Returns true if V is a neutral element of Opc with Flags.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:373
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
MDNode * TBAAStruct
The tag for type-based alias analysis (tbaa struct).
Definition Metadata.h:783
MDNode * TBAA
The tag for type-based alias analysis.
Definition Metadata.h:780
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
Represents offset+length into a ConstantDataArray.
uint64_t Length
Length of the slice.
uint64_t Offset
Slice starts at this Offset.
void move(uint64_t Delta)
Moves the Offset and adjusts Length accordingly.
const ConstantDataArray * Array
ConstantDataArray pointer.
Extended Value Type.
Definition ValueTypes.h:35
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition ValueTypes.h:403
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:145
intptr_t getRawBits() const
Definition ValueTypes.h:528
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:70
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
Definition ValueTypes.h:129
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition ValueTypes.h:292
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition ValueTypes.h:308
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition ValueTypes.h:155
ElementCount getVectorElementCount() const
Definition ValueTypes.h:358
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:381
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
Definition ValueTypes.h:367
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:393
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:324
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition ValueTypes.h:61
bool isFixedLengthVector() const
Definition ValueTypes.h:189
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:176
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:331
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Definition ValueTypes.h:300
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Definition ValueTypes.h:264
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
Definition ValueTypes.h:182
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition ValueTypes.h:336
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
Definition ValueTypes.h:150
LLVM_ABI const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:344
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
Definition ValueTypes.h:316
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
Definition ValueTypes.h:469
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:160
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
Definition KnownBits.h:315
LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
Definition KnownBits.h:269
static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:106
bool isZero() const
Returns true if value is all zero.
Definition KnownBits.h:78
void makeNonNegative()
Make this value non-negative.
Definition KnownBits.h:125
static LLVM_ABI KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:256
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
static LLVM_ABI KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
Definition KnownBits.h:64
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition KnownBits.h:288
static LLVM_ABI std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
void makeNegative()
Make this value negative.
Definition KnownBits.h:120
void setAllConflict()
Make all bits known to be both zero and one.
Definition KnownBits.h:97
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
Definition KnownBits.h:165
KnownBits byteSwap() const
Definition KnownBits.h:553
static LLVM_ABI KnownBits fshl(const KnownBits &LHS, const KnownBits &RHS, const APInt &Amt)
Compute known bits for fshl(LHS, RHS, Amt).
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition KnownBits.h:303
void setAllZero()
Make all bits known to be zero and discard any previous information.
Definition KnownBits.h:84
KnownBits reverseBits() const
Definition KnownBits.h:557
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
Definition KnownBits.h:247
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
Definition KnownBits.h:176
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:72
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false, bool SelfAdd=false)
Compute knownbits resulting from addition of LHS and RHS.
Definition KnownBits.h:361
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
bool isNonZero() const
Returns true if this value is known to be non-zero.
Definition KnownBits.h:109
static LLVM_ABI KnownBits abdu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for abdu(LHS, RHS).
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
Definition KnownBits.h:239
static LLVM_ABI KnownBits avgFloorU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorU.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
Definition KnownBits.h:325
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
Definition KnownBits.h:184
static LLVM_ABI KnownBits computeForSubBorrow(const KnownBits &LHS, KnownBits RHS, const KnownBits &Borrow)
Compute known bits results from subtracting RHS from LHS with 1-bit Borrow.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
Definition KnownBits.h:200
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition KnownBits.h:146
static LLVM_ABI KnownBits fshr(const KnownBits &LHS, const KnownBits &RHS, const APInt &Amt)
Compute known bits for fshr(LHS, RHS, Amt).
static LLVM_ABI KnownBits abds(KnownBits LHS, KnownBits RHS)
Compute known bits for abds(LHS, RHS).
static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
static LLVM_ABI KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
Definition KnownBits.h:112
static LLVM_ABI KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
static LLVM_ABI KnownBits avgFloorS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgFloorS.
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
Definition KnownBits.h:340
bool isNegative() const
Returns true if this value is known to be negative.
Definition KnownBits.h:103
LLVM_ABI KnownBits truncSSat(unsigned BitWidth) const
Truncate with signed saturation (signed input -> signed output)
static LLVM_ABI KnownBits computeForAddCarry(const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry)
Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
Definition KnownBits.cpp:54
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
Definition KnownBits.h:376
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
Definition KnownBits.h:294
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
Definition KnownBits.h:233
static LLVM_ABI KnownBits avgCeilU(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilU.
static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
Definition KnownBits.h:171
static LLVM_ABI KnownBits clmul(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for clmul(LHS, RHS).
LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
LLVM_ABI KnownBits truncUSat(unsigned BitWidth) const
Truncate with unsigned saturation (unsigned input -> unsigned output)
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
LLVM_ABI KnownBits truncSSatU(unsigned BitWidth) const
Truncate with signed saturation to unsigned (signed input -> unsigned output)
static LLVM_ABI KnownBits avgCeilS(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from APIntOps::avgCeilS.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
void copysign(const KnownFPClass &Sign)
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a zero.
bool isUnknown() const
KnownFPClass intersectWith(const KnownFPClass &RHS) const
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
static LLVM_ABI KnownFPClass bitcast(const fltSemantics &FltSemantics, const KnownBits &Bits)
Report known values for a bitcast into a float with provided semantics.
This class contains a discriminated union of information about pointers in memory operands,...
LLVM_ABI bool isDereferenceable(unsigned Size, LLVMContext &C, const DataLayout &DL) const
Return true if memory region [V, V+Offset+Size) is known to be dereferenceable.
LLVM_ABI unsigned getAddrSpace() const
Return the LLVM IR address space number that this pointer points into.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
unsigned int NumVTs
Clients of various APIs that cause global effects on the DAG can optionally implement this interface.
virtual void NodeDeleted(SDNode *N, SDNode *E)
The node N that was deleted and, if E is not null, an equivalent node E that replaced it.
virtual void NodeInserted(SDNode *N)
The node N that was inserted.
virtual void NodeUpdated(SDNode *N)
The node N that was updated.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setChain(SDValue InChain)