Bug Summary

File:llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Warning:line 7292, column 28
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name DAGCombiner.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/build-llvm/lib/CodeGen/SelectionDAG -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/build-llvm/lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG -I /build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/build-llvm/lib/CodeGen/SelectionDAG -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-05-07-005843-9350-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
1//===- DAGCombiner.cpp - Implement a DAG node combiner --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass combines dag nodes to form fewer, simpler DAG nodes. It can be run
10// both before and after the DAG is legalized.
11//
12// This pass is not a substitute for the LLVM IR instcombine pass. This pass is
13// primarily intended to handle simplification opportunities that are implicit
14// in the LLVM IR and exposed by the various codegen lowering phases.
15//
16//===----------------------------------------------------------------------===//
17
18#include "llvm/ADT/APFloat.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/DenseMap.h"
22#include "llvm/ADT/IntervalMap.h"
23#include "llvm/ADT/None.h"
24#include "llvm/ADT/Optional.h"
25#include "llvm/ADT/STLExtras.h"
26#include "llvm/ADT/SetVector.h"
27#include "llvm/ADT/SmallBitVector.h"
28#include "llvm/ADT/SmallPtrSet.h"
29#include "llvm/ADT/SmallSet.h"
30#include "llvm/ADT/SmallVector.h"
31#include "llvm/ADT/Statistic.h"
32#include "llvm/Analysis/AliasAnalysis.h"
33#include "llvm/Analysis/MemoryLocation.h"
34#include "llvm/Analysis/TargetLibraryInfo.h"
35#include "llvm/Analysis/VectorUtils.h"
36#include "llvm/CodeGen/DAGCombine.h"
37#include "llvm/CodeGen/ISDOpcodes.h"
38#include "llvm/CodeGen/MachineFrameInfo.h"
39#include "llvm/CodeGen/MachineFunction.h"
40#include "llvm/CodeGen/MachineMemOperand.h"
41#include "llvm/CodeGen/RuntimeLibcalls.h"
42#include "llvm/CodeGen/SelectionDAG.h"
43#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
44#include "llvm/CodeGen/SelectionDAGNodes.h"
45#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
46#include "llvm/CodeGen/TargetLowering.h"
47#include "llvm/CodeGen/TargetRegisterInfo.h"
48#include "llvm/CodeGen/TargetSubtargetInfo.h"
49#include "llvm/CodeGen/ValueTypes.h"
50#include "llvm/IR/Attributes.h"
51#include "llvm/IR/Constant.h"
52#include "llvm/IR/DataLayout.h"
53#include "llvm/IR/DerivedTypes.h"
54#include "llvm/IR/Function.h"
55#include "llvm/IR/LLVMContext.h"
56#include "llvm/IR/Metadata.h"
57#include "llvm/Support/Casting.h"
58#include "llvm/Support/CodeGen.h"
59#include "llvm/Support/CommandLine.h"
60#include "llvm/Support/Compiler.h"
61#include "llvm/Support/Debug.h"
62#include "llvm/Support/ErrorHandling.h"
63#include "llvm/Support/KnownBits.h"
64#include "llvm/Support/MachineValueType.h"
65#include "llvm/Support/MathExtras.h"
66#include "llvm/Support/raw_ostream.h"
67#include "llvm/Target/TargetMachine.h"
68#include "llvm/Target/TargetOptions.h"
69#include <algorithm>
70#include <cassert>
71#include <cstdint>
72#include <functional>
73#include <iterator>
74#include <string>
75#include <tuple>
76#include <utility>
77
78using namespace llvm;
79
80#define DEBUG_TYPE"dagcombine" "dagcombine"
81
82STATISTIC(NodesCombined , "Number of dag nodes combined")static llvm::Statistic NodesCombined = {"dagcombine", "NodesCombined"
, "Number of dag nodes combined"}
;
83STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created")static llvm::Statistic PreIndexedNodes = {"dagcombine", "PreIndexedNodes"
, "Number of pre-indexed nodes created"}
;
84STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created")static llvm::Statistic PostIndexedNodes = {"dagcombine", "PostIndexedNodes"
, "Number of post-indexed nodes created"}
;
85STATISTIC(OpsNarrowed , "Number of load/op/store narrowed")static llvm::Statistic OpsNarrowed = {"dagcombine", "OpsNarrowed"
, "Number of load/op/store narrowed"}
;
86STATISTIC(LdStFP2Int , "Number of fp load/store pairs transformed to int")static llvm::Statistic LdStFP2Int = {"dagcombine", "LdStFP2Int"
, "Number of fp load/store pairs transformed to int"}
;
87STATISTIC(SlicedLoads, "Number of load sliced")static llvm::Statistic SlicedLoads = {"dagcombine", "SlicedLoads"
, "Number of load sliced"}
;
88STATISTIC(NumFPLogicOpsConv, "Number of logic ops converted to fp ops")static llvm::Statistic NumFPLogicOpsConv = {"dagcombine", "NumFPLogicOpsConv"
, "Number of logic ops converted to fp ops"}
;
89
90static cl::opt<bool>
91CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden,
92 cl::desc("Enable DAG combiner's use of IR alias analysis"));
93
94static cl::opt<bool>
95UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true),
96 cl::desc("Enable DAG combiner's use of TBAA"));
97
98#ifndef NDEBUG
99static cl::opt<std::string>
100CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden,
101 cl::desc("Only use DAG-combiner alias analysis in this"
102 " function"));
103#endif
104
105/// Hidden option to stress test load slicing, i.e., when this option
106/// is enabled, load slicing bypasses most of its profitability guards.
107static cl::opt<bool>
108StressLoadSlicing("combiner-stress-load-slicing", cl::Hidden,
109 cl::desc("Bypass the profitability model of load slicing"),
110 cl::init(false));
111
112static cl::opt<bool>
113 MaySplitLoadIndex("combiner-split-load-index", cl::Hidden, cl::init(true),
114 cl::desc("DAG combiner may split indexing from loads"));
115
116static cl::opt<bool>
117 EnableStoreMerging("combiner-store-merging", cl::Hidden, cl::init(true),
118 cl::desc("DAG combiner enable merging multiple stores "
119 "into a wider store"));
120
121static cl::opt<unsigned> TokenFactorInlineLimit(
122 "combiner-tokenfactor-inline-limit", cl::Hidden, cl::init(2048),
123 cl::desc("Limit the number of operands to inline for Token Factors"));
124
125static cl::opt<unsigned> StoreMergeDependenceLimit(
126 "combiner-store-merge-dependence-limit", cl::Hidden, cl::init(10),
127 cl::desc("Limit the number of times for the same StoreNode and RootNode "
128 "to bail out in store merging dependence check"));
129
130static cl::opt<bool> EnableReduceLoadOpStoreWidth(
131 "combiner-reduce-load-op-store-width", cl::Hidden, cl::init(true),
132 cl::desc("DAG cominber enable reducing the width of load/op/store "
133 "sequence"));
134
135static cl::opt<bool> EnableShrinkLoadReplaceStoreWithStore(
136 "combiner-shrink-load-replace-store-with-store", cl::Hidden, cl::init(true),
137 cl::desc("DAG cominber enable load/<replace bytes>/store with "
138 "a narrower store"));
139
140namespace {
141
142 class DAGCombiner {
143 SelectionDAG &DAG;
144 const TargetLowering &TLI;
145 const SelectionDAGTargetInfo *STI;
146 CombineLevel Level;
147 CodeGenOpt::Level OptLevel;
148 bool LegalDAG = false;
149 bool LegalOperations = false;
150 bool LegalTypes = false;
151 bool ForCodeSize;
152 bool DisableGenericCombines;
153
154 /// Worklist of all of the nodes that need to be simplified.
155 ///
156 /// This must behave as a stack -- new nodes to process are pushed onto the
157 /// back and when processing we pop off of the back.
158 ///
159 /// The worklist will not contain duplicates but may contain null entries
160 /// due to nodes being deleted from the underlying DAG.
161 SmallVector<SDNode *, 64> Worklist;
162
163 /// Mapping from an SDNode to its position on the worklist.
164 ///
165 /// This is used to find and remove nodes from the worklist (by nulling
166 /// them) when they are deleted from the underlying DAG. It relies on
167 /// stable indices of nodes within the worklist.
168 DenseMap<SDNode *, unsigned> WorklistMap;
169 /// This records all nodes attempted to add to the worklist since we
170 /// considered a new worklist entry. As we keep do not add duplicate nodes
171 /// in the worklist, this is different from the tail of the worklist.
172 SmallSetVector<SDNode *, 32> PruningList;
173
174 /// Set of nodes which have been combined (at least once).
175 ///
176 /// This is used to allow us to reliably add any operands of a DAG node
177 /// which have not yet been combined to the worklist.
178 SmallPtrSet<SDNode *, 32> CombinedNodes;
179
180 /// Map from candidate StoreNode to the pair of RootNode and count.
181 /// The count is used to track how many times we have seen the StoreNode
182 /// with the same RootNode bail out in dependence check. If we have seen
183 /// the bail out for the same pair many times over a limit, we won't
184 /// consider the StoreNode with the same RootNode as store merging
185 /// candidate again.
186 DenseMap<SDNode *, std::pair<SDNode *, unsigned>> StoreRootCountMap;
187
188 // AA - Used for DAG load/store alias analysis.
189 AliasAnalysis *AA;
190
191 /// When an instruction is simplified, add all users of the instruction to
192 /// the work lists because they might get more simplified now.
193 void AddUsersToWorklist(SDNode *N) {
194 for (SDNode *Node : N->uses())
195 AddToWorklist(Node);
196 }
197
198 /// Convenient shorthand to add a node and all of its user to the worklist.
199 void AddToWorklistWithUsers(SDNode *N) {
200 AddUsersToWorklist(N);
201 AddToWorklist(N);
202 }
203
204 // Prune potentially dangling nodes. This is called after
205 // any visit to a node, but should also be called during a visit after any
206 // failed combine which may have created a DAG node.
207 void clearAddedDanglingWorklistEntries() {
208 // Check any nodes added to the worklist to see if they are prunable.
209 while (!PruningList.empty()) {
210 auto *N = PruningList.pop_back_val();
211 if (N->use_empty())
212 recursivelyDeleteUnusedNodes(N);
213 }
214 }
215
216 SDNode *getNextWorklistEntry() {
217 // Before we do any work, remove nodes that are not in use.
218 clearAddedDanglingWorklistEntries();
219 SDNode *N = nullptr;
220 // The Worklist holds the SDNodes in order, but it may contain null
221 // entries.
222 while (!N && !Worklist.empty()) {
223 N = Worklist.pop_back_val();
224 }
225
226 if (N) {
227 bool GoodWorklistEntry = WorklistMap.erase(N);
228 (void)GoodWorklistEntry;
229 assert(GoodWorklistEntry &&(static_cast <bool> (GoodWorklistEntry && "Found a worklist entry without a corresponding map entry!"
) ? void (0) : __assert_fail ("GoodWorklistEntry && \"Found a worklist entry without a corresponding map entry!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 230, __extension__ __PRETTY_FUNCTION__))
230 "Found a worklist entry without a corresponding map entry!")(static_cast <bool> (GoodWorklistEntry && "Found a worklist entry without a corresponding map entry!"
) ? void (0) : __assert_fail ("GoodWorklistEntry && \"Found a worklist entry without a corresponding map entry!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 230, __extension__ __PRETTY_FUNCTION__))
;
231 }
232 return N;
233 }
234
235 /// Call the node-specific routine that folds each particular type of node.
236 SDValue visit(SDNode *N);
237
238 public:
239 DAGCombiner(SelectionDAG &D, AliasAnalysis *AA, CodeGenOpt::Level OL)
240 : DAG(D), TLI(D.getTargetLoweringInfo()),
241 STI(D.getSubtarget().getSelectionDAGInfo()),
242 Level(BeforeLegalizeTypes), OptLevel(OL), AA(AA) {
243 ForCodeSize = DAG.shouldOptForSize();
244 DisableGenericCombines = STI && STI->disableGenericCombines(OptLevel);
245
246 MaximumLegalStoreInBits = 0;
247 // We use the minimum store size here, since that's all we can guarantee
248 // for the scalable vector types.
249 for (MVT VT : MVT::all_valuetypes())
250 if (EVT(VT).isSimple() && VT != MVT::Other &&
251 TLI.isTypeLegal(EVT(VT)) &&
252 VT.getSizeInBits().getKnownMinSize() >= MaximumLegalStoreInBits)
253 MaximumLegalStoreInBits = VT.getSizeInBits().getKnownMinSize();
254 }
255
256 void ConsiderForPruning(SDNode *N) {
257 // Mark this for potential pruning.
258 PruningList.insert(N);
259 }
260
261 /// Add to the worklist making sure its instance is at the back (next to be
262 /// processed.)
263 void AddToWorklist(SDNode *N) {
264 assert(N->getOpcode() != ISD::DELETED_NODE &&(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& "Deleted Node added to Worklist") ? void (0) : __assert_fail
("N->getOpcode() != ISD::DELETED_NODE && \"Deleted Node added to Worklist\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 265, __extension__ __PRETTY_FUNCTION__))
265 "Deleted Node added to Worklist")(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& "Deleted Node added to Worklist") ? void (0) : __assert_fail
("N->getOpcode() != ISD::DELETED_NODE && \"Deleted Node added to Worklist\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 265, __extension__ __PRETTY_FUNCTION__))
;
266
267 // Skip handle nodes as they can't usefully be combined and confuse the
268 // zero-use deletion strategy.
269 if (N->getOpcode() == ISD::HANDLENODE)
270 return;
271
272 ConsiderForPruning(N);
273
274 if (WorklistMap.insert(std::make_pair(N, Worklist.size())).second)
275 Worklist.push_back(N);
276 }
277
278 /// Remove all instances of N from the worklist.
279 void removeFromWorklist(SDNode *N) {
280 CombinedNodes.erase(N);
281 PruningList.remove(N);
282 StoreRootCountMap.erase(N);
283
284 auto It = WorklistMap.find(N);
285 if (It == WorklistMap.end())
286 return; // Not in the worklist.
287
288 // Null out the entry rather than erasing it to avoid a linear operation.
289 Worklist[It->second] = nullptr;
290 WorklistMap.erase(It);
291 }
292
293 void deleteAndRecombine(SDNode *N);
294 bool recursivelyDeleteUnusedNodes(SDNode *N);
295
296 /// Replaces all uses of the results of one DAG node with new values.
297 SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
298 bool AddTo = true);
299
300 /// Replaces all uses of the results of one DAG node with new values.
301 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) {
302 return CombineTo(N, &Res, 1, AddTo);
303 }
304
305 /// Replaces all uses of the results of one DAG node with new values.
306 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1,
307 bool AddTo = true) {
308 SDValue To[] = { Res0, Res1 };
309 return CombineTo(N, To, 2, AddTo);
310 }
311
312 void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO);
313
314 private:
315 unsigned MaximumLegalStoreInBits;
316
317 /// Check the specified integer node value to see if it can be simplified or
318 /// if things it uses can be simplified by bit propagation.
319 /// If so, return true.
320 bool SimplifyDemandedBits(SDValue Op) {
321 unsigned BitWidth = Op.getScalarValueSizeInBits();
322 APInt DemandedBits = APInt::getAllOnesValue(BitWidth);
323 return SimplifyDemandedBits(Op, DemandedBits);
324 }
325
326 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits) {
327 TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
328 KnownBits Known;
329 if (!TLI.SimplifyDemandedBits(Op, DemandedBits, Known, TLO, 0, false))
330 return false;
331
332 // Revisit the node.
333 AddToWorklist(Op.getNode());
334
335 CommitTargetLoweringOpt(TLO);
336 return true;
337 }
338
339 /// Check the specified vector node value to see if it can be simplified or
340 /// if things it uses can be simplified as it only uses some of the
341 /// elements. If so, return true.
342 bool SimplifyDemandedVectorElts(SDValue Op) {
343 // TODO: For now just pretend it cannot be simplified.
344 if (Op.getValueType().isScalableVector())
345 return false;
346
347 unsigned NumElts = Op.getValueType().getVectorNumElements();
348 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
349 return SimplifyDemandedVectorElts(Op, DemandedElts);
350 }
351
352 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
353 const APInt &DemandedElts,
354 bool AssumeSingleUse = false);
355 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
356 bool AssumeSingleUse = false);
357
358 bool CombineToPreIndexedLoadStore(SDNode *N);
359 bool CombineToPostIndexedLoadStore(SDNode *N);
360 SDValue SplitIndexingFromLoad(LoadSDNode *LD);
361 bool SliceUpLoad(SDNode *N);
362
363 // Scalars have size 0 to distinguish from singleton vectors.
364 SDValue ForwardStoreValueToDirectLoad(LoadSDNode *LD);
365 bool getTruncatedStoreValue(StoreSDNode *ST, SDValue &Val);
366 bool extendLoadedValueToExtension(LoadSDNode *LD, SDValue &Val);
367
368 /// Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed
369 /// load.
370 ///
371 /// \param EVE ISD::EXTRACT_VECTOR_ELT to be replaced.
372 /// \param InVecVT type of the input vector to EVE with bitcasts resolved.
373 /// \param EltNo index of the vector element to load.
374 /// \param OriginalLoad load that EVE came from to be replaced.
375 /// \returns EVE on success SDValue() on failure.
376 SDValue scalarizeExtractedVectorLoad(SDNode *EVE, EVT InVecVT,
377 SDValue EltNo,
378 LoadSDNode *OriginalLoad);
379 void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad);
380 SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace);
381 SDValue SExtPromoteOperand(SDValue Op, EVT PVT);
382 SDValue ZExtPromoteOperand(SDValue Op, EVT PVT);
383 SDValue PromoteIntBinOp(SDValue Op);
384 SDValue PromoteIntShiftOp(SDValue Op);
385 SDValue PromoteExtend(SDValue Op);
386 bool PromoteLoad(SDValue Op);
387
388 /// Call the node-specific routine that knows how to fold each
389 /// particular type of node. If that doesn't do anything, try the
390 /// target-specific DAG combines.
391 SDValue combine(SDNode *N);
392
393 // Visitation implementation - Implement dag node combining for different
394 // node types. The semantics are as follows:
395 // Return Value:
396 // SDValue.getNode() == 0 - No change was made
397 // SDValue.getNode() == N - N was replaced, is dead and has been handled.
398 // otherwise - N should be replaced by the returned Operand.
399 //
400 SDValue visitTokenFactor(SDNode *N);
401 SDValue visitMERGE_VALUES(SDNode *N);
402 SDValue visitADD(SDNode *N);
403 SDValue visitADDLike(SDNode *N);
404 SDValue visitADDLikeCommutative(SDValue N0, SDValue N1, SDNode *LocReference);
405 SDValue visitSUB(SDNode *N);
406 SDValue visitADDSAT(SDNode *N);
407 SDValue visitSUBSAT(SDNode *N);
408 SDValue visitADDC(SDNode *N);
409 SDValue visitADDO(SDNode *N);
410 SDValue visitUADDOLike(SDValue N0, SDValue N1, SDNode *N);
411 SDValue visitSUBC(SDNode *N);
412 SDValue visitSUBO(SDNode *N);
413 SDValue visitADDE(SDNode *N);
414 SDValue visitADDCARRY(SDNode *N);
415 SDValue visitSADDO_CARRY(SDNode *N);
416 SDValue visitADDCARRYLike(SDValue N0, SDValue N1, SDValue CarryIn, SDNode *N);
417 SDValue visitSUBE(SDNode *N);
418 SDValue visitSUBCARRY(SDNode *N);
419 SDValue visitSSUBO_CARRY(SDNode *N);
420 SDValue visitMUL(SDNode *N);
421 SDValue visitMULFIX(SDNode *N);
422 SDValue useDivRem(SDNode *N);
423 SDValue visitSDIV(SDNode *N);
424 SDValue visitSDIVLike(SDValue N0, SDValue N1, SDNode *N);
425 SDValue visitUDIV(SDNode *N);
426 SDValue visitUDIVLike(SDValue N0, SDValue N1, SDNode *N);
427 SDValue visitREM(SDNode *N);
428 SDValue visitMULHU(SDNode *N);
429 SDValue visitMULHS(SDNode *N);
430 SDValue visitSMUL_LOHI(SDNode *N);
431 SDValue visitUMUL_LOHI(SDNode *N);
432 SDValue visitMULO(SDNode *N);
433 SDValue visitIMINMAX(SDNode *N);
434 SDValue visitAND(SDNode *N);
435 SDValue visitANDLike(SDValue N0, SDValue N1, SDNode *N);
436 SDValue visitOR(SDNode *N);
437 SDValue visitORLike(SDValue N0, SDValue N1, SDNode *N);
438 SDValue visitXOR(SDNode *N);
439 SDValue SimplifyVBinOp(SDNode *N);
440 SDValue visitSHL(SDNode *N);
441 SDValue visitSRA(SDNode *N);
442 SDValue visitSRL(SDNode *N);
443 SDValue visitFunnelShift(SDNode *N);
444 SDValue visitRotate(SDNode *N);
445 SDValue visitABS(SDNode *N);
446 SDValue visitBSWAP(SDNode *N);
447 SDValue visitBITREVERSE(SDNode *N);
448 SDValue visitCTLZ(SDNode *N);
449 SDValue visitCTLZ_ZERO_UNDEF(SDNode *N);
450 SDValue visitCTTZ(SDNode *N);
451 SDValue visitCTTZ_ZERO_UNDEF(SDNode *N);
452 SDValue visitCTPOP(SDNode *N);
453 SDValue visitSELECT(SDNode *N);
454 SDValue visitVSELECT(SDNode *N);
455 SDValue visitSELECT_CC(SDNode *N);
456 SDValue visitSETCC(SDNode *N);
457 SDValue visitSETCCCARRY(SDNode *N);
458 SDValue visitSIGN_EXTEND(SDNode *N);
459 SDValue visitZERO_EXTEND(SDNode *N);
460 SDValue visitANY_EXTEND(SDNode *N);
461 SDValue visitAssertExt(SDNode *N);
462 SDValue visitAssertAlign(SDNode *N);
463 SDValue visitSIGN_EXTEND_INREG(SDNode *N);
464 SDValue visitSIGN_EXTEND_VECTOR_INREG(SDNode *N);
465 SDValue visitZERO_EXTEND_VECTOR_INREG(SDNode *N);
466 SDValue visitTRUNCATE(SDNode *N);
467 SDValue visitBITCAST(SDNode *N);
468 SDValue visitFREEZE(SDNode *N);
469 SDValue visitBUILD_PAIR(SDNode *N);
470 SDValue visitFADD(SDNode *N);
471 SDValue visitSTRICT_FADD(SDNode *N);
472 SDValue visitFSUB(SDNode *N);
473 SDValue visitFMUL(SDNode *N);
474 SDValue visitFMA(SDNode *N);
475 SDValue visitFDIV(SDNode *N);
476 SDValue visitFREM(SDNode *N);
477 SDValue visitFSQRT(SDNode *N);
478 SDValue visitFCOPYSIGN(SDNode *N);
479 SDValue visitFPOW(SDNode *N);
480 SDValue visitSINT_TO_FP(SDNode *N);
481 SDValue visitUINT_TO_FP(SDNode *N);
482 SDValue visitFP_TO_SINT(SDNode *N);
483 SDValue visitFP_TO_UINT(SDNode *N);
484 SDValue visitFP_ROUND(SDNode *N);
485 SDValue visitFP_EXTEND(SDNode *N);
486 SDValue visitFNEG(SDNode *N);
487 SDValue visitFABS(SDNode *N);
488 SDValue visitFCEIL(SDNode *N);
489 SDValue visitFTRUNC(SDNode *N);
490 SDValue visitFFLOOR(SDNode *N);
491 SDValue visitFMINNUM(SDNode *N);
492 SDValue visitFMAXNUM(SDNode *N);
493 SDValue visitFMINIMUM(SDNode *N);
494 SDValue visitFMAXIMUM(SDNode *N);
495 SDValue visitBRCOND(SDNode *N);
496 SDValue visitBR_CC(SDNode *N);
497 SDValue visitLOAD(SDNode *N);
498
499 SDValue replaceStoreChain(StoreSDNode *ST, SDValue BetterChain);
500 SDValue replaceStoreOfFPConstant(StoreSDNode *ST);
501
502 SDValue visitSTORE(SDNode *N);
503 SDValue visitLIFETIME_END(SDNode *N);
504 SDValue visitINSERT_VECTOR_ELT(SDNode *N);
505 SDValue visitEXTRACT_VECTOR_ELT(SDNode *N);
506 SDValue visitBUILD_VECTOR(SDNode *N);
507 SDValue visitCONCAT_VECTORS(SDNode *N);
508 SDValue visitEXTRACT_SUBVECTOR(SDNode *N);
509 SDValue visitVECTOR_SHUFFLE(SDNode *N);
510 SDValue visitSCALAR_TO_VECTOR(SDNode *N);
511 SDValue visitINSERT_SUBVECTOR(SDNode *N);
512 SDValue visitMLOAD(SDNode *N);
513 SDValue visitMSTORE(SDNode *N);
514 SDValue visitMGATHER(SDNode *N);
515 SDValue visitMSCATTER(SDNode *N);
516 SDValue visitFP_TO_FP16(SDNode *N);
517 SDValue visitFP16_TO_FP(SDNode *N);
518 SDValue visitVECREDUCE(SDNode *N);
519
520 SDValue visitFADDForFMACombine(SDNode *N);
521 SDValue visitFSUBForFMACombine(SDNode *N);
522 SDValue visitFMULForFMADistributiveCombine(SDNode *N);
523
524 SDValue XformToShuffleWithZero(SDNode *N);
525 bool reassociationCanBreakAddressingModePattern(unsigned Opc,
526 const SDLoc &DL, SDValue N0,
527 SDValue N1);
528 SDValue reassociateOpsCommutative(unsigned Opc, const SDLoc &DL, SDValue N0,
529 SDValue N1);
530 SDValue reassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0,
531 SDValue N1, SDNodeFlags Flags);
532
533 SDValue visitShiftByConstant(SDNode *N);
534
535 SDValue foldSelectOfConstants(SDNode *N);
536 SDValue foldVSelectOfConstants(SDNode *N);
537 SDValue foldBinOpIntoSelect(SDNode *BO);
538 bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS);
539 SDValue hoistLogicOpWithSameOpcodeHands(SDNode *N);
540 SDValue SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2);
541 SDValue SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
542 SDValue N2, SDValue N3, ISD::CondCode CC,
543 bool NotExtCompare = false);
544 SDValue convertSelectOfFPConstantsToLoadOffset(
545 const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2, SDValue N3,
546 ISD::CondCode CC);
547 SDValue foldSignChangeInBitcast(SDNode *N);
548 SDValue foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0, SDValue N1,
549 SDValue N2, SDValue N3, ISD::CondCode CC);
550 SDValue foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1,
551 const SDLoc &DL);
552 SDValue foldSubToUSubSat(EVT DstVT, SDNode *N);
553 SDValue unfoldMaskedMerge(SDNode *N);
554 SDValue unfoldExtremeBitClearingToShifts(SDNode *N);
555 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
556 const SDLoc &DL, bool foldBooleans);
557 SDValue rebuildSetCC(SDValue N);
558
559 bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
560 SDValue &CC, bool MatchStrict = false) const;
561 bool isOneUseSetCC(SDValue N) const;
562
563 SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
564 unsigned HiOp);
565 SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
566 SDValue CombineExtLoad(SDNode *N);
567 SDValue CombineZExtLogicopShiftLoad(SDNode *N);
568 SDValue combineRepeatedFPDivisors(SDNode *N);
569 SDValue combineInsertEltToShuffle(SDNode *N, unsigned InsIndex);
570 SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT);
571 SDValue BuildSDIV(SDNode *N);
572 SDValue BuildSDIVPow2(SDNode *N);
573 SDValue BuildUDIV(SDNode *N);
574 SDValue BuildLogBase2(SDValue V, const SDLoc &DL);
575 SDValue BuildDivEstimate(SDValue N, SDValue Op, SDNodeFlags Flags);
576 SDValue buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags);
577 SDValue buildSqrtEstimate(SDValue Op, SDNodeFlags Flags);
578 SDValue buildSqrtEstimateImpl(SDValue Op, SDNodeFlags Flags, bool Recip);
579 SDValue buildSqrtNROneConst(SDValue Arg, SDValue Est, unsigned Iterations,
580 SDNodeFlags Flags, bool Reciprocal);
581 SDValue buildSqrtNRTwoConst(SDValue Arg, SDValue Est, unsigned Iterations,
582 SDNodeFlags Flags, bool Reciprocal);
583 SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
584 bool DemandHighBits = true);
585 SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1);
586 SDValue MatchRotatePosNeg(SDValue Shifted, SDValue Pos, SDValue Neg,
587 SDValue InnerPos, SDValue InnerNeg,
588 unsigned PosOpcode, unsigned NegOpcode,
589 const SDLoc &DL);
590 SDValue MatchFunnelPosNeg(SDValue N0, SDValue N1, SDValue Pos, SDValue Neg,
591 SDValue InnerPos, SDValue InnerNeg,
592 unsigned PosOpcode, unsigned NegOpcode,
593 const SDLoc &DL);
594 SDValue MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL);
595 SDValue MatchLoadCombine(SDNode *N);
596 SDValue mergeTruncStores(StoreSDNode *N);
597 SDValue ReduceLoadWidth(SDNode *N);
598 SDValue ReduceLoadOpStoreWidth(SDNode *N);
599 SDValue splitMergedValStore(StoreSDNode *ST);
600 SDValue TransformFPLoadStorePair(SDNode *N);
601 SDValue convertBuildVecZextToZext(SDNode *N);
602 SDValue reduceBuildVecExtToExtBuildVec(SDNode *N);
603 SDValue reduceBuildVecTruncToBitCast(SDNode *N);
604 SDValue reduceBuildVecToShuffle(SDNode *N);
605 SDValue createBuildVecShuffle(const SDLoc &DL, SDNode *N,
606 ArrayRef<int> VectorMask, SDValue VecIn1,
607 SDValue VecIn2, unsigned LeftIdx,
608 bool DidSplitVec);
609 SDValue matchVSelectOpSizesWithSetCC(SDNode *Cast);
610
611 /// Walk up chain skipping non-aliasing memory nodes,
612 /// looking for aliasing nodes and adding them to the Aliases vector.
613 void GatherAllAliases(SDNode *N, SDValue OriginalChain,
614 SmallVectorImpl<SDValue> &Aliases);
615
616 /// Return true if there is any possibility that the two addresses overlap.
617 bool isAlias(SDNode *Op0, SDNode *Op1) const;
618
619 /// Walk up chain skipping non-aliasing memory nodes, looking for a better
620 /// chain (aliasing node.)
621 SDValue FindBetterChain(SDNode *N, SDValue Chain);
622
623 /// Try to replace a store and any possibly adjacent stores on
624 /// consecutive chains with better chains. Return true only if St is
625 /// replaced.
626 ///
627 /// Notice that other chains may still be replaced even if the function
628 /// returns false.
629 bool findBetterNeighborChains(StoreSDNode *St);
630
631 // Helper for findBetterNeighborChains. Walk up store chain add additional
632 // chained stores that do not overlap and can be parallelized.
633 bool parallelizeChainedStores(StoreSDNode *St);
634
635 /// Holds a pointer to an LSBaseSDNode as well as information on where it
636 /// is located in a sequence of memory operations connected by a chain.
637 struct MemOpLink {
638 // Ptr to the mem node.
639 LSBaseSDNode *MemNode;
640
641 // Offset from the base ptr.
642 int64_t OffsetFromBase;
643
644 MemOpLink(LSBaseSDNode *N, int64_t Offset)
645 : MemNode(N), OffsetFromBase(Offset) {}
646 };
647
648 // Classify the origin of a stored value.
649 enum class StoreSource { Unknown, Constant, Extract, Load };
650 StoreSource getStoreSource(SDValue StoreVal) {
651 switch (StoreVal.getOpcode()) {
652 case ISD::Constant:
653 case ISD::ConstantFP:
654 return StoreSource::Constant;
655 case ISD::EXTRACT_VECTOR_ELT:
656 case ISD::EXTRACT_SUBVECTOR:
657 return StoreSource::Extract;
658 case ISD::LOAD:
659 return StoreSource::Load;
660 default:
661 return StoreSource::Unknown;
662 }
663 }
664
665 /// This is a helper function for visitMUL to check the profitability
666 /// of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
667 /// MulNode is the original multiply, AddNode is (add x, c1),
668 /// and ConstNode is c2.
669 bool isMulAddWithConstProfitable(SDNode *MulNode,
670 SDValue &AddNode,
671 SDValue &ConstNode);
672
673 /// This is a helper function for visitAND and visitZERO_EXTEND. Returns
674 /// true if the (and (load x) c) pattern matches an extload. ExtVT returns
675 /// the type of the loaded value to be extended.
676 bool isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN,
677 EVT LoadResultTy, EVT &ExtVT);
678
679 /// Helper function to calculate whether the given Load/Store can have its
680 /// width reduced to ExtVT.
681 bool isLegalNarrowLdSt(LSBaseSDNode *LDSTN, ISD::LoadExtType ExtType,
682 EVT &MemVT, unsigned ShAmt = 0);
683
684 /// Used by BackwardsPropagateMask to find suitable loads.
685 bool SearchForAndLoads(SDNode *N, SmallVectorImpl<LoadSDNode*> &Loads,
686 SmallPtrSetImpl<SDNode*> &NodesWithConsts,
687 ConstantSDNode *Mask, SDNode *&NodeToMask);
688 /// Attempt to propagate a given AND node back to load leaves so that they
689 /// can be combined into narrow loads.
690 bool BackwardsPropagateMask(SDNode *N);
691
692 /// Helper function for mergeConsecutiveStores which merges the component
693 /// store chains.
694 SDValue getMergeStoreChains(SmallVectorImpl<MemOpLink> &StoreNodes,
695 unsigned NumStores);
696
697 /// This is a helper function for mergeConsecutiveStores. When the source
698 /// elements of the consecutive stores are all constants or all extracted
699 /// vector elements, try to merge them into one larger store introducing
700 /// bitcasts if necessary. \return True if a merged store was created.
701 bool mergeStoresOfConstantsOrVecElts(SmallVectorImpl<MemOpLink> &StoreNodes,
702 EVT MemVT, unsigned NumStores,
703 bool IsConstantSrc, bool UseVector,
704 bool UseTrunc);
705
706 /// This is a helper function for mergeConsecutiveStores. Stores that
707 /// potentially may be merged with St are placed in StoreNodes. RootNode is
708 /// a chain predecessor to all store candidates.
709 void getStoreMergeCandidates(StoreSDNode *St,
710 SmallVectorImpl<MemOpLink> &StoreNodes,
711 SDNode *&Root);
712
713 /// Helper function for mergeConsecutiveStores. Checks if candidate stores
714 /// have indirect dependency through their operands. RootNode is the
715 /// predecessor to all stores calculated by getStoreMergeCandidates and is
716 /// used to prune the dependency check. \return True if safe to merge.
717 bool checkMergeStoreCandidatesForDependencies(
718 SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumStores,
719 SDNode *RootNode);
720
721 /// This is a helper function for mergeConsecutiveStores. Given a list of
722 /// store candidates, find the first N that are consecutive in memory.
723 /// Returns 0 if there are not at least 2 consecutive stores to try merging.
724 unsigned getConsecutiveStores(SmallVectorImpl<MemOpLink> &StoreNodes,
725 int64_t ElementSizeBytes) const;
726
727 /// This is a helper function for mergeConsecutiveStores. It is used for
728 /// store chains that are composed entirely of constant values.
729 bool tryStoreMergeOfConstants(SmallVectorImpl<MemOpLink> &StoreNodes,
730 unsigned NumConsecutiveStores,
731 EVT MemVT, SDNode *Root, bool AllowVectors);
732
733 /// This is a helper function for mergeConsecutiveStores. It is used for
734 /// store chains that are composed entirely of extracted vector elements.
735 /// When extracting multiple vector elements, try to store them in one
736 /// vector store rather than a sequence of scalar stores.
737 bool tryStoreMergeOfExtracts(SmallVectorImpl<MemOpLink> &StoreNodes,
738 unsigned NumConsecutiveStores, EVT MemVT,
739 SDNode *Root);
740
741 /// This is a helper function for mergeConsecutiveStores. It is used for
742 /// store chains that are composed entirely of loaded values.
743 bool tryStoreMergeOfLoads(SmallVectorImpl<MemOpLink> &StoreNodes,
744 unsigned NumConsecutiveStores, EVT MemVT,
745 SDNode *Root, bool AllowVectors,
746 bool IsNonTemporalStore, bool IsNonTemporalLoad);
747
748 /// Merge consecutive store operations into a wide store.
749 /// This optimization uses wide integers or vectors when possible.
750 /// \return true if stores were merged.
751 bool mergeConsecutiveStores(StoreSDNode *St);
752
753 /// Try to transform a truncation where C is a constant:
754 /// (trunc (and X, C)) -> (and (trunc X), (trunc C))
755 ///
756 /// \p N needs to be a truncation and its first operand an AND. Other
757 /// requirements are checked by the function (e.g. that trunc is
758 /// single-use) and if missed an empty SDValue is returned.
759 SDValue distributeTruncateThroughAnd(SDNode *N);
760
761 /// Helper function to determine whether the target supports operation
762 /// given by \p Opcode for type \p VT, that is, whether the operation
763 /// is legal or custom before legalizing operations, and whether is
764 /// legal (but not custom) after legalization.
765 bool hasOperation(unsigned Opcode, EVT VT) {
766 return TLI.isOperationLegalOrCustom(Opcode, VT, LegalOperations);
767 }
768
769 public:
770 /// Runs the dag combiner on all nodes in the work list
771 void Run(CombineLevel AtLevel);
772
773 SelectionDAG &getDAG() const { return DAG; }
774
775 /// Returns a type large enough to hold any valid shift amount - before type
776 /// legalization these can be huge.
777 EVT getShiftAmountTy(EVT LHSTy) {
778 assert(LHSTy.isInteger() && "Shift amount is not an integer type!")(static_cast <bool> (LHSTy.isInteger() && "Shift amount is not an integer type!"
) ? void (0) : __assert_fail ("LHSTy.isInteger() && \"Shift amount is not an integer type!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 778, __extension__ __PRETTY_FUNCTION__))
;
779 return TLI.getShiftAmountTy(LHSTy, DAG.getDataLayout(), LegalTypes);
780 }
781
782 /// This method returns true if we are running before type legalization or
783 /// if the specified VT is legal.
784 bool isTypeLegal(const EVT &VT) {
785 if (!LegalTypes) return true;
786 return TLI.isTypeLegal(VT);
787 }
788
789 /// Convenience wrapper around TargetLowering::getSetCCResultType
790 EVT getSetCCResultType(EVT VT) const {
791 return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
792 }
793
794 void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
795 SDValue OrigLoad, SDValue ExtLoad,
796 ISD::NodeType ExtType);
797 };
798
799/// This class is a DAGUpdateListener that removes any deleted
800/// nodes from the worklist.
801class WorklistRemover : public SelectionDAG::DAGUpdateListener {
802 DAGCombiner &DC;
803
804public:
805 explicit WorklistRemover(DAGCombiner &dc)
806 : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {}
807
808 void NodeDeleted(SDNode *N, SDNode *E) override {
809 DC.removeFromWorklist(N);
810 }
811};
812
813class WorklistInserter : public SelectionDAG::DAGUpdateListener {
814 DAGCombiner &DC;
815
816public:
817 explicit WorklistInserter(DAGCombiner &dc)
818 : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {}
819
820 // FIXME: Ideally we could add N to the worklist, but this causes exponential
821 // compile time costs in large DAGs, e.g. Halide.
822 void NodeInserted(SDNode *N) override { DC.ConsiderForPruning(N); }
823};
824
825} // end anonymous namespace
826
827//===----------------------------------------------------------------------===//
828// TargetLowering::DAGCombinerInfo implementation
829//===----------------------------------------------------------------------===//
830
831void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) {
832 ((DAGCombiner*)DC)->AddToWorklist(N);
833}
834
835SDValue TargetLowering::DAGCombinerInfo::
836CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo) {
837 return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo);
838}
839
840SDValue TargetLowering::DAGCombinerInfo::
841CombineTo(SDNode *N, SDValue Res, bool AddTo) {
842 return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo);
843}
844
845SDValue TargetLowering::DAGCombinerInfo::
846CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) {
847 return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo);
848}
849
850bool TargetLowering::DAGCombinerInfo::
851recursivelyDeleteUnusedNodes(SDNode *N) {
852 return ((DAGCombiner*)DC)->recursivelyDeleteUnusedNodes(N);
853}
854
855void TargetLowering::DAGCombinerInfo::
856CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
857 return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO);
858}
859
860//===----------------------------------------------------------------------===//
861// Helper Functions
862//===----------------------------------------------------------------------===//
863
864void DAGCombiner::deleteAndRecombine(SDNode *N) {
865 removeFromWorklist(N);
866
867 // If the operands of this node are only used by the node, they will now be
868 // dead. Make sure to re-visit them and recursively delete dead nodes.
869 for (const SDValue &Op : N->ops())
870 // For an operand generating multiple values, one of the values may
871 // become dead allowing further simplification (e.g. split index
872 // arithmetic from an indexed load).
873 if (Op->hasOneUse() || Op->getNumValues() > 1)
874 AddToWorklist(Op.getNode());
875
876 DAG.DeleteNode(N);
877}
878
879// APInts must be the same size for most operations, this helper
880// function zero extends the shorter of the pair so that they match.
881// We provide an Offset so that we can create bitwidths that won't overflow.
882static void zeroExtendToMatch(APInt &LHS, APInt &RHS, unsigned Offset = 0) {
883 unsigned Bits = Offset + std::max(LHS.getBitWidth(), RHS.getBitWidth());
884 LHS = LHS.zextOrSelf(Bits);
885 RHS = RHS.zextOrSelf(Bits);
886}
887
888// Return true if this node is a setcc, or is a select_cc
889// that selects between the target values used for true and false, making it
890// equivalent to a setcc. Also, set the incoming LHS, RHS, and CC references to
891// the appropriate nodes based on the type of node we are checking. This
892// simplifies life a bit for the callers.
893bool DAGCombiner::isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
894 SDValue &CC, bool MatchStrict) const {
895 if (N.getOpcode() == ISD::SETCC) {
896 LHS = N.getOperand(0);
897 RHS = N.getOperand(1);
898 CC = N.getOperand(2);
899 return true;
900 }
901
902 if (MatchStrict &&
903 (N.getOpcode() == ISD::STRICT_FSETCC ||
904 N.getOpcode() == ISD::STRICT_FSETCCS)) {
905 LHS = N.getOperand(1);
906 RHS = N.getOperand(2);
907 CC = N.getOperand(3);
908 return true;
909 }
910
911 if (N.getOpcode() != ISD::SELECT_CC ||
912 !TLI.isConstTrueVal(N.getOperand(2).getNode()) ||
913 !TLI.isConstFalseVal(N.getOperand(3).getNode()))
914 return false;
915
916 if (TLI.getBooleanContents(N.getValueType()) ==
917 TargetLowering::UndefinedBooleanContent)
918 return false;
919
920 LHS = N.getOperand(0);
921 RHS = N.getOperand(1);
922 CC = N.getOperand(4);
923 return true;
924}
925
926/// Return true if this is a SetCC-equivalent operation with only one use.
927/// If this is true, it allows the users to invert the operation for free when
928/// it is profitable to do so.
929bool DAGCombiner::isOneUseSetCC(SDValue N) const {
930 SDValue N0, N1, N2;
931 if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse())
932 return true;
933 return false;
934}
935
936static bool isConstantSplatVectorMaskForType(SDNode *N, EVT ScalarTy) {
937 if (!ScalarTy.isSimple())
938 return false;
939
940 uint64_t MaskForTy = 0ULL;
941 switch (ScalarTy.getSimpleVT().SimpleTy) {
942 case MVT::i8:
943 MaskForTy = 0xFFULL;
944 break;
945 case MVT::i16:
946 MaskForTy = 0xFFFFULL;
947 break;
948 case MVT::i32:
949 MaskForTy = 0xFFFFFFFFULL;
950 break;
951 default:
952 return false;
953 break;
954 }
955
956 APInt Val;
957 if (ISD::isConstantSplatVector(N, Val))
958 return Val.getLimitedValue() == MaskForTy;
959
960 return false;
961}
962
963// Determines if it is a constant integer or a splat/build vector of constant
964// integers (and undefs).
965// Do not permit build vector implicit truncation.
966static bool isConstantOrConstantVector(SDValue N, bool NoOpaques = false) {
967 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N))
968 return !(Const->isOpaque() && NoOpaques);
969 if (N.getOpcode() != ISD::BUILD_VECTOR && N.getOpcode() != ISD::SPLAT_VECTOR)
970 return false;
971 unsigned BitWidth = N.getScalarValueSizeInBits();
972 for (const SDValue &Op : N->op_values()) {
973 if (Op.isUndef())
974 continue;
975 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Op);
976 if (!Const || Const->getAPIntValue().getBitWidth() != BitWidth ||
977 (Const->isOpaque() && NoOpaques))
978 return false;
979 }
980 return true;
981}
982
983// Determines if a BUILD_VECTOR is composed of all-constants possibly mixed with
984// undef's.
985static bool isAnyConstantBuildVector(SDValue V, bool NoOpaques = false) {
986 if (V.getOpcode() != ISD::BUILD_VECTOR)
987 return false;
988 return isConstantOrConstantVector(V, NoOpaques) ||
989 ISD::isBuildVectorOfConstantFPSDNodes(V.getNode());
990}
991
992// Determine if this an indexed load with an opaque target constant index.
993static bool canSplitIdx(LoadSDNode *LD) {
994 return MaySplitLoadIndex &&
995 (LD->getOperand(2).getOpcode() != ISD::TargetConstant ||
996 !cast<ConstantSDNode>(LD->getOperand(2))->isOpaque());
997}
998
999bool DAGCombiner::reassociationCanBreakAddressingModePattern(unsigned Opc,
1000 const SDLoc &DL,
1001 SDValue N0,
1002 SDValue N1) {
1003 // Currently this only tries to ensure we don't undo the GEP splits done by
1004 // CodeGenPrepare when shouldConsiderGEPOffsetSplit is true. To ensure this,
1005 // we check if the following transformation would be problematic:
1006 // (load/store (add, (add, x, offset1), offset2)) ->
1007 // (load/store (add, x, offset1+offset2)).
1008
1009 if (Opc != ISD::ADD || N0.getOpcode() != ISD::ADD)
1010 return false;
1011
1012 if (N0.hasOneUse())
1013 return false;
1014
1015 auto *C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
1016 auto *C2 = dyn_cast<ConstantSDNode>(N1);
1017 if (!C1 || !C2)
1018 return false;
1019
1020 const APInt &C1APIntVal = C1->getAPIntValue();
1021 const APInt &C2APIntVal = C2->getAPIntValue();
1022 if (C1APIntVal.getBitWidth() > 64 || C2APIntVal.getBitWidth() > 64)
1023 return false;
1024
1025 const APInt CombinedValueIntVal = C1APIntVal + C2APIntVal;
1026 if (CombinedValueIntVal.getBitWidth() > 64)
1027 return false;
1028 const int64_t CombinedValue = CombinedValueIntVal.getSExtValue();
1029
1030 for (SDNode *Node : N0->uses()) {
1031 auto LoadStore = dyn_cast<MemSDNode>(Node);
1032 if (LoadStore) {
1033 // Is x[offset2] already not a legal addressing mode? If so then
1034 // reassociating the constants breaks nothing (we test offset2 because
1035 // that's the one we hope to fold into the load or store).
1036 TargetLoweringBase::AddrMode AM;
1037 AM.HasBaseReg = true;
1038 AM.BaseOffs = C2APIntVal.getSExtValue();
1039 EVT VT = LoadStore->getMemoryVT();
1040 unsigned AS = LoadStore->getAddressSpace();
1041 Type *AccessTy = VT.getTypeForEVT(*DAG.getContext());
1042 if (!TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, AS))
1043 continue;
1044
1045 // Would x[offset1+offset2] still be a legal addressing mode?
1046 AM.BaseOffs = CombinedValue;
1047 if (!TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, AS))
1048 return true;
1049 }
1050 }
1051
1052 return false;
1053}
1054
1055// Helper for DAGCombiner::reassociateOps. Try to reassociate an expression
1056// such as (Opc N0, N1), if \p N0 is the same kind of operation as \p Opc.
1057SDValue DAGCombiner::reassociateOpsCommutative(unsigned Opc, const SDLoc &DL,
1058 SDValue N0, SDValue N1) {
1059 EVT VT = N0.getValueType();
1060
1061 if (N0.getOpcode() != Opc)
1062 return SDValue();
1063
1064 if (DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1))) {
1065 if (DAG.isConstantIntBuildVectorOrConstantInt(N1)) {
1066 // Reassociate: (op (op x, c1), c2) -> (op x, (op c1, c2))
1067 if (SDValue OpNode =
1068 DAG.FoldConstantArithmetic(Opc, DL, VT, {N0.getOperand(1), N1}))
1069 return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode);
1070 return SDValue();
1071 }
1072 if (N0.hasOneUse()) {
1073 // Reassociate: (op (op x, c1), y) -> (op (op x, y), c1)
1074 // iff (op x, c1) has one use
1075 SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0.getOperand(0), N1);
1076 if (!OpNode.getNode())
1077 return SDValue();
1078 return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1));
1079 }
1080 }
1081 return SDValue();
1082}
1083
1084// Try to reassociate commutative binops.
1085SDValue DAGCombiner::reassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0,
1086 SDValue N1, SDNodeFlags Flags) {
1087 assert(TLI.isCommutativeBinOp(Opc) && "Operation not commutative.")(static_cast <bool> (TLI.isCommutativeBinOp(Opc) &&
"Operation not commutative.") ? void (0) : __assert_fail ("TLI.isCommutativeBinOp(Opc) && \"Operation not commutative.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1087, __extension__ __PRETTY_FUNCTION__))
;
1088
1089 // Floating-point reassociation is not allowed without loose FP math.
1090 if (N0.getValueType().isFloatingPoint() ||
1091 N1.getValueType().isFloatingPoint())
1092 if (!Flags.hasAllowReassociation() || !Flags.hasNoSignedZeros())
1093 return SDValue();
1094
1095 if (SDValue Combined = reassociateOpsCommutative(Opc, DL, N0, N1))
1096 return Combined;
1097 if (SDValue Combined = reassociateOpsCommutative(Opc, DL, N1, N0))
1098 return Combined;
1099 return SDValue();
1100}
1101
1102SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
1103 bool AddTo) {
1104 assert(N->getNumValues() == NumTo && "Broken CombineTo call!")(static_cast <bool> (N->getNumValues() == NumTo &&
"Broken CombineTo call!") ? void (0) : __assert_fail ("N->getNumValues() == NumTo && \"Broken CombineTo call!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1104, __extension__ __PRETTY_FUNCTION__))
;
1105 ++NodesCombined;
1106 LLVM_DEBUG(dbgs() << "\nReplacing.1 "; N->dump(&DAG); dbgs() << "\nWith: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.1 "; N->dump
(&DAG); dbgs() << "\nWith: "; To[0].getNode()->dump
(&DAG); dbgs() << " and " << NumTo - 1 <<
" other values\n"; } } while (false)
1107 To[0].getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.1 "; N->dump
(&DAG); dbgs() << "\nWith: "; To[0].getNode()->dump
(&DAG); dbgs() << " and " << NumTo - 1 <<
" other values\n"; } } while (false)
1108 dbgs() << " and " << NumTo - 1 << " other values\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.1 "; N->dump
(&DAG); dbgs() << "\nWith: "; To[0].getNode()->dump
(&DAG); dbgs() << " and " << NumTo - 1 <<
" other values\n"; } } while (false)
;
1109 for (unsigned i = 0, e = NumTo; i != e; ++i)
1110 assert((!To[i].getNode() ||(static_cast <bool> ((!To[i].getNode() || N->getValueType
(i) == To[i].getValueType()) && "Cannot combine value to value of different type!"
) ? void (0) : __assert_fail ("(!To[i].getNode() || N->getValueType(i) == To[i].getValueType()) && \"Cannot combine value to value of different type!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1112, __extension__ __PRETTY_FUNCTION__))
1111 N->getValueType(i) == To[i].getValueType()) &&(static_cast <bool> ((!To[i].getNode() || N->getValueType
(i) == To[i].getValueType()) && "Cannot combine value to value of different type!"
) ? void (0) : __assert_fail ("(!To[i].getNode() || N->getValueType(i) == To[i].getValueType()) && \"Cannot combine value to value of different type!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1112, __extension__ __PRETTY_FUNCTION__))
1112 "Cannot combine value to value of different type!")(static_cast <bool> ((!To[i].getNode() || N->getValueType
(i) == To[i].getValueType()) && "Cannot combine value to value of different type!"
) ? void (0) : __assert_fail ("(!To[i].getNode() || N->getValueType(i) == To[i].getValueType()) && \"Cannot combine value to value of different type!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1112, __extension__ __PRETTY_FUNCTION__))
;
1113
1114 WorklistRemover DeadNodes(*this);
1115 DAG.ReplaceAllUsesWith(N, To);
1116 if (AddTo) {
1117 // Push the new nodes and any users onto the worklist
1118 for (unsigned i = 0, e = NumTo; i != e; ++i) {
1119 if (To[i].getNode()) {
1120 AddToWorklist(To[i].getNode());
1121 AddUsersToWorklist(To[i].getNode());
1122 }
1123 }
1124 }
1125
1126 // Finally, if the node is now dead, remove it from the graph. The node
1127 // may not be dead if the replacement process recursively simplified to
1128 // something else needing this node.
1129 if (N->use_empty())
1130 deleteAndRecombine(N);
1131 return SDValue(N, 0);
1132}
1133
1134void DAGCombiner::
1135CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
1136 // Replace the old value with the new one.
1137 ++NodesCombined;
1138 LLVM_DEBUG(dbgs() << "\nReplacing.2 "; TLO.Old.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.2 "; TLO.Old.getNode
()->dump(&DAG); dbgs() << "\nWith: "; TLO.New.getNode
()->dump(&DAG); dbgs() << '\n'; } } while (false
)
1139 dbgs() << "\nWith: "; TLO.New.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.2 "; TLO.Old.getNode
()->dump(&DAG); dbgs() << "\nWith: "; TLO.New.getNode
()->dump(&DAG); dbgs() << '\n'; } } while (false
)
1140 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.2 "; TLO.Old.getNode
()->dump(&DAG); dbgs() << "\nWith: "; TLO.New.getNode
()->dump(&DAG); dbgs() << '\n'; } } while (false
)
;
1141
1142 // Replace all uses. If any nodes become isomorphic to other nodes and
1143 // are deleted, make sure to remove them from our worklist.
1144 WorklistRemover DeadNodes(*this);
1145 DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New);
1146
1147 // Push the new node and any (possibly new) users onto the worklist.
1148 AddToWorklistWithUsers(TLO.New.getNode());
1149
1150 // Finally, if the node is now dead, remove it from the graph. The node
1151 // may not be dead if the replacement process recursively simplified to
1152 // something else needing this node.
1153 if (TLO.Old.getNode()->use_empty())
1154 deleteAndRecombine(TLO.Old.getNode());
1155}
1156
1157/// Check the specified integer node value to see if it can be simplified or if
1158/// things it uses can be simplified by bit propagation. If so, return true.
1159bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
1160 const APInt &DemandedElts,
1161 bool AssumeSingleUse) {
1162 TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
1163 KnownBits Known;
1164 if (!TLI.SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, 0,
1165 AssumeSingleUse))
1166 return false;
1167
1168 // Revisit the node.
1169 AddToWorklist(Op.getNode());
1170
1171 CommitTargetLoweringOpt(TLO);
1172 return true;
1173}
1174
1175/// Check the specified vector node value to see if it can be simplified or
1176/// if things it uses can be simplified as it only uses some of the elements.
1177/// If so, return true.
1178bool DAGCombiner::SimplifyDemandedVectorElts(SDValue Op,
1179 const APInt &DemandedElts,
1180 bool AssumeSingleUse) {
1181 TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
1182 APInt KnownUndef, KnownZero;
1183 if (!TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero,
1184 TLO, 0, AssumeSingleUse))
1185 return false;
1186
1187 // Revisit the node.
1188 AddToWorklist(Op.getNode());
1189
1190 CommitTargetLoweringOpt(TLO);
1191 return true;
1192}
1193
1194void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) {
1195 SDLoc DL(Load);
1196 EVT VT = Load->getValueType(0);
1197 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, SDValue(ExtLoad, 0));
1198
1199 LLVM_DEBUG(dbgs() << "\nReplacing.9 "; Load->dump(&DAG); dbgs() << "\nWith: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.9 "; Load->
dump(&DAG); dbgs() << "\nWith: "; Trunc.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
1200 Trunc.getNode()->dump(&DAG); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.9 "; Load->
dump(&DAG); dbgs() << "\nWith: "; Trunc.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
;
1201 WorklistRemover DeadNodes(*this);
1202 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc);
1203 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1));
1204 deleteAndRecombine(Load);
1205 AddToWorklist(Trunc.getNode());
1206}
1207
1208SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
1209 Replace = false;
1210 SDLoc DL(Op);
1211 if (ISD::isUNINDEXEDLoad(Op.getNode())) {
1212 LoadSDNode *LD = cast<LoadSDNode>(Op);
1213 EVT MemVT = LD->getMemoryVT();
1214 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) ? ISD::EXTLOAD
1215 : LD->getExtensionType();
1216 Replace = true;
1217 return DAG.getExtLoad(ExtType, DL, PVT,
1218 LD->getChain(), LD->getBasePtr(),
1219 MemVT, LD->getMemOperand());
1220 }
1221
1222 unsigned Opc = Op.getOpcode();
1223 switch (Opc) {
1224 default: break;
1225 case ISD::AssertSext:
1226 if (SDValue Op0 = SExtPromoteOperand(Op.getOperand(0), PVT))
1227 return DAG.getNode(ISD::AssertSext, DL, PVT, Op0, Op.getOperand(1));
1228 break;
1229 case ISD::AssertZext:
1230 if (SDValue Op0 = ZExtPromoteOperand(Op.getOperand(0), PVT))
1231 return DAG.getNode(ISD::AssertZext, DL, PVT, Op0, Op.getOperand(1));
1232 break;
1233 case ISD::Constant: {
1234 unsigned ExtOpc =
1235 Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
1236 return DAG.getNode(ExtOpc, DL, PVT, Op);
1237 }
1238 }
1239
1240 if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT))
1241 return SDValue();
1242 return DAG.getNode(ISD::ANY_EXTEND, DL, PVT, Op);
1243}
1244
1245SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) {
1246 if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT))
1247 return SDValue();
1248 EVT OldVT = Op.getValueType();
1249 SDLoc DL(Op);
1250 bool Replace = false;
1251 SDValue NewOp = PromoteOperand(Op, PVT, Replace);
1252 if (!NewOp.getNode())
1253 return SDValue();
1254 AddToWorklist(NewOp.getNode());
1255
1256 if (Replace)
1257 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
1258 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, NewOp.getValueType(), NewOp,
1259 DAG.getValueType(OldVT));
1260}
1261
1262SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) {
1263 EVT OldVT = Op.getValueType();
1264 SDLoc DL(Op);
1265 bool Replace = false;
1266 SDValue NewOp = PromoteOperand(Op, PVT, Replace);
1267 if (!NewOp.getNode())
1268 return SDValue();
1269 AddToWorklist(NewOp.getNode());
1270
1271 if (Replace)
1272 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
1273 return DAG.getZeroExtendInReg(NewOp, DL, OldVT);
1274}
1275
1276/// Promote the specified integer binary operation if the target indicates it is
1277/// beneficial. e.g. On x86, it's usually better to promote i16 operations to
1278/// i32 since i16 instructions are longer.
1279SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) {
1280 if (!LegalOperations)
1281 return SDValue();
1282
1283 EVT VT = Op.getValueType();
1284 if (VT.isVector() || !VT.isInteger())
1285 return SDValue();
1286
1287 // If operation type is 'undesirable', e.g. i16 on x86, consider
1288 // promoting it.
1289 unsigned Opc = Op.getOpcode();
1290 if (TLI.isTypeDesirableForOp(Opc, VT))
1291 return SDValue();
1292
1293 EVT PVT = VT;
1294 // Consult target whether it is a good idea to promote this operation and
1295 // what's the right type to promote it to.
1296 if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
1297 assert(PVT != VT && "Don't know what type to promote to!")(static_cast <bool> (PVT != VT && "Don't know what type to promote to!"
) ? void (0) : __assert_fail ("PVT != VT && \"Don't know what type to promote to!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1297, __extension__ __PRETTY_FUNCTION__))
;
1298
1299 LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nPromoting "; Op.getNode(
)->dump(&DAG); } } while (false)
;
1300
1301 bool Replace0 = false;
1302 SDValue N0 = Op.getOperand(0);
1303 SDValue NN0 = PromoteOperand(N0, PVT, Replace0);
1304
1305 bool Replace1 = false;
1306 SDValue N1 = Op.getOperand(1);
1307 SDValue NN1 = PromoteOperand(N1, PVT, Replace1);
1308 SDLoc DL(Op);
1309
1310 SDValue RV =
1311 DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getNode(Opc, DL, PVT, NN0, NN1));
1312
1313 // We are always replacing N0/N1's use in N and only need additional
1314 // replacements if there are additional uses.
1315 // Note: We are checking uses of the *nodes* (SDNode) rather than values
1316 // (SDValue) here because the node may reference multiple values
1317 // (for example, the chain value of a load node).
1318 Replace0 &= !N0->hasOneUse();
1319 Replace1 &= (N0 != N1) && !N1->hasOneUse();
1320
1321 // Combine Op here so it is preserved past replacements.
1322 CombineTo(Op.getNode(), RV);
1323
1324 // If operands have a use ordering, make sure we deal with
1325 // predecessor first.
1326 if (Replace0 && Replace1 && N0.getNode()->isPredecessorOf(N1.getNode())) {
1327 std::swap(N0, N1);
1328 std::swap(NN0, NN1);
1329 }
1330
1331 if (Replace0) {
1332 AddToWorklist(NN0.getNode());
1333 ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode());
1334 }
1335 if (Replace1) {
1336 AddToWorklist(NN1.getNode());
1337 ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode());
1338 }
1339 return Op;
1340 }
1341 return SDValue();
1342}
1343
1344/// Promote the specified integer shift operation if the target indicates it is
1345/// beneficial. e.g. On x86, it's usually better to promote i16 operations to
1346/// i32 since i16 instructions are longer.
1347SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) {
1348 if (!LegalOperations)
1349 return SDValue();
1350
1351 EVT VT = Op.getValueType();
1352 if (VT.isVector() || !VT.isInteger())
1353 return SDValue();
1354
1355 // If operation type is 'undesirable', e.g. i16 on x86, consider
1356 // promoting it.
1357 unsigned Opc = Op.getOpcode();
1358 if (TLI.isTypeDesirableForOp(Opc, VT))
1359 return SDValue();
1360
1361 EVT PVT = VT;
1362 // Consult target whether it is a good idea to promote this operation and
1363 // what's the right type to promote it to.
1364 if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
1365 assert(PVT != VT && "Don't know what type to promote to!")(static_cast <bool> (PVT != VT && "Don't know what type to promote to!"
) ? void (0) : __assert_fail ("PVT != VT && \"Don't know what type to promote to!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1365, __extension__ __PRETTY_FUNCTION__))
;
1366
1367 LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nPromoting "; Op.getNode(
)->dump(&DAG); } } while (false)
;
1368
1369 bool Replace = false;
1370 SDValue N0 = Op.getOperand(0);
1371 SDValue N1 = Op.getOperand(1);
1372 if (Opc == ISD::SRA)
1373 N0 = SExtPromoteOperand(N0, PVT);
1374 else if (Opc == ISD::SRL)
1375 N0 = ZExtPromoteOperand(N0, PVT);
1376 else
1377 N0 = PromoteOperand(N0, PVT, Replace);
1378
1379 if (!N0.getNode())
1380 return SDValue();
1381
1382 SDLoc DL(Op);
1383 SDValue RV =
1384 DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getNode(Opc, DL, PVT, N0, N1));
1385
1386 if (Replace)
1387 ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode());
1388
1389 // Deal with Op being deleted.
1390 if (Op && Op.getOpcode() != ISD::DELETED_NODE)
1391 return RV;
1392 }
1393 return SDValue();
1394}
1395
1396SDValue DAGCombiner::PromoteExtend(SDValue Op) {
1397 if (!LegalOperations)
1398 return SDValue();
1399
1400 EVT VT = Op.getValueType();
1401 if (VT.isVector() || !VT.isInteger())
1402 return SDValue();
1403
1404 // If operation type is 'undesirable', e.g. i16 on x86, consider
1405 // promoting it.
1406 unsigned Opc = Op.getOpcode();
1407 if (TLI.isTypeDesirableForOp(Opc, VT))
1408 return SDValue();
1409
1410 EVT PVT = VT;
1411 // Consult target whether it is a good idea to promote this operation and
1412 // what's the right type to promote it to.
1413 if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
1414 assert(PVT != VT && "Don't know what type to promote to!")(static_cast <bool> (PVT != VT && "Don't know what type to promote to!"
) ? void (0) : __assert_fail ("PVT != VT && \"Don't know what type to promote to!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1414, __extension__ __PRETTY_FUNCTION__))
;
1415 // fold (aext (aext x)) -> (aext x)
1416 // fold (aext (zext x)) -> (zext x)
1417 // fold (aext (sext x)) -> (sext x)
1418 LLVM_DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nPromoting "; Op.getNode(
)->dump(&DAG); } } while (false)
;
1419 return DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, Op.getOperand(0));
1420 }
1421 return SDValue();
1422}
1423
1424bool DAGCombiner::PromoteLoad(SDValue Op) {
1425 if (!LegalOperations)
1426 return false;
1427
1428 if (!ISD::isUNINDEXEDLoad(Op.getNode()))
1429 return false;
1430
1431 EVT VT = Op.getValueType();
1432 if (VT.isVector() || !VT.isInteger())
1433 return false;
1434
1435 // If operation type is 'undesirable', e.g. i16 on x86, consider
1436 // promoting it.
1437 unsigned Opc = Op.getOpcode();
1438 if (TLI.isTypeDesirableForOp(Opc, VT))
1439 return false;
1440
1441 EVT PVT = VT;
1442 // Consult target whether it is a good idea to promote this operation and
1443 // what's the right type to promote it to.
1444 if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
1445 assert(PVT != VT && "Don't know what type to promote to!")(static_cast <bool> (PVT != VT && "Don't know what type to promote to!"
) ? void (0) : __assert_fail ("PVT != VT && \"Don't know what type to promote to!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1445, __extension__ __PRETTY_FUNCTION__))
;
1446
1447 SDLoc DL(Op);
1448 SDNode *N = Op.getNode();
1449 LoadSDNode *LD = cast<LoadSDNode>(N);
1450 EVT MemVT = LD->getMemoryVT();
1451 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) ? ISD::EXTLOAD
1452 : LD->getExtensionType();
1453 SDValue NewLD = DAG.getExtLoad(ExtType, DL, PVT,
1454 LD->getChain(), LD->getBasePtr(),
1455 MemVT, LD->getMemOperand());
1456 SDValue Result = DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD);
1457
1458 LLVM_DEBUG(dbgs() << "\nPromoting "; N->dump(&DAG); dbgs() << "\nTo: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nPromoting "; N->dump(
&DAG); dbgs() << "\nTo: "; Result.getNode()->dump
(&DAG); dbgs() << '\n'; } } while (false)
1459 Result.getNode()->dump(&DAG); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nPromoting "; N->dump(
&DAG); dbgs() << "\nTo: "; Result.getNode()->dump
(&DAG); dbgs() << '\n'; } } while (false)
;
1460 WorklistRemover DeadNodes(*this);
1461 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
1462 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1));
1463 deleteAndRecombine(N);
1464 AddToWorklist(Result.getNode());
1465 return true;
1466 }
1467 return false;
1468}
1469
1470/// Recursively delete a node which has no uses and any operands for
1471/// which it is the only use.
1472///
1473/// Note that this both deletes the nodes and removes them from the worklist.
1474/// It also adds any nodes who have had a user deleted to the worklist as they
1475/// may now have only one use and subject to other combines.
1476bool DAGCombiner::recursivelyDeleteUnusedNodes(SDNode *N) {
1477 if (!N->use_empty())
1478 return false;
1479
1480 SmallSetVector<SDNode *, 16> Nodes;
1481 Nodes.insert(N);
1482 do {
1483 N = Nodes.pop_back_val();
1484 if (!N)
1485 continue;
1486
1487 if (N->use_empty()) {
1488 for (const SDValue &ChildN : N->op_values())
1489 Nodes.insert(ChildN.getNode());
1490
1491 removeFromWorklist(N);
1492 DAG.DeleteNode(N);
1493 } else {
1494 AddToWorklist(N);
1495 }
1496 } while (!Nodes.empty());
1497 return true;
1498}
1499
1500//===----------------------------------------------------------------------===//
1501// Main DAG Combiner implementation
1502//===----------------------------------------------------------------------===//
1503
1504void DAGCombiner::Run(CombineLevel AtLevel) {
1505 // set the instance variables, so that the various visit routines may use it.
1506 Level = AtLevel;
1507 LegalDAG = Level >= AfterLegalizeDAG;
1508 LegalOperations = Level >= AfterLegalizeVectorOps;
1509 LegalTypes = Level >= AfterLegalizeTypes;
1510
1511 WorklistInserter AddNodes(*this);
1512
1513 // Add all the dag nodes to the worklist.
1514 for (SDNode &Node : DAG.allnodes())
1515 AddToWorklist(&Node);
1516
1517 // Create a dummy node (which is not added to allnodes), that adds a reference
1518 // to the root node, preventing it from being deleted, and tracking any
1519 // changes of the root.
1520 HandleSDNode Dummy(DAG.getRoot());
1521
1522 // While we have a valid worklist entry node, try to combine it.
1523 while (SDNode *N = getNextWorklistEntry()) {
1524 // If N has no uses, it is dead. Make sure to revisit all N's operands once
1525 // N is deleted from the DAG, since they too may now be dead or may have a
1526 // reduced number of uses, allowing other xforms.
1527 if (recursivelyDeleteUnusedNodes(N))
1528 continue;
1529
1530 WorklistRemover DeadNodes(*this);
1531
1532 // If this combine is running after legalizing the DAG, re-legalize any
1533 // nodes pulled off the worklist.
1534 if (LegalDAG) {
1535 SmallSetVector<SDNode *, 16> UpdatedNodes;
1536 bool NIsValid = DAG.LegalizeOp(N, UpdatedNodes);
1537
1538 for (SDNode *LN : UpdatedNodes)
1539 AddToWorklistWithUsers(LN);
1540
1541 if (!NIsValid)
1542 continue;
1543 }
1544
1545 LLVM_DEBUG(dbgs() << "\nCombining: "; N->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nCombining: "; N->dump
(&DAG); } } while (false)
;
1546
1547 // Add any operands of the new node which have not yet been combined to the
1548 // worklist as well. Because the worklist uniques things already, this
1549 // won't repeatedly process the same operand.
1550 CombinedNodes.insert(N);
1551 for (const SDValue &ChildN : N->op_values())
1552 if (!CombinedNodes.count(ChildN.getNode()))
1553 AddToWorklist(ChildN.getNode());
1554
1555 SDValue RV = combine(N);
1556
1557 if (!RV.getNode())
1558 continue;
1559
1560 ++NodesCombined;
1561
1562 // If we get back the same node we passed in, rather than a new node or
1563 // zero, we know that the node must have defined multiple values and
1564 // CombineTo was used. Since CombineTo takes care of the worklist
1565 // mechanics for us, we have no work to do in this case.
1566 if (RV.getNode() == N)
1567 continue;
1568
1569 assert(N->getOpcode() != ISD::DELETED_NODE &&(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& RV.getOpcode() != ISD::DELETED_NODE && "Node was deleted but visit returned new node!"
) ? void (0) : __assert_fail ("N->getOpcode() != ISD::DELETED_NODE && RV.getOpcode() != ISD::DELETED_NODE && \"Node was deleted but visit returned new node!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1571, __extension__ __PRETTY_FUNCTION__))
1570 RV.getOpcode() != ISD::DELETED_NODE &&(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& RV.getOpcode() != ISD::DELETED_NODE && "Node was deleted but visit returned new node!"
) ? void (0) : __assert_fail ("N->getOpcode() != ISD::DELETED_NODE && RV.getOpcode() != ISD::DELETED_NODE && \"Node was deleted but visit returned new node!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1571, __extension__ __PRETTY_FUNCTION__))
1571 "Node was deleted but visit returned new node!")(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& RV.getOpcode() != ISD::DELETED_NODE && "Node was deleted but visit returned new node!"
) ? void (0) : __assert_fail ("N->getOpcode() != ISD::DELETED_NODE && RV.getOpcode() != ISD::DELETED_NODE && \"Node was deleted but visit returned new node!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1571, __extension__ __PRETTY_FUNCTION__))
;
1572
1573 LLVM_DEBUG(dbgs() << " ... into: "; RV.getNode()->dump(&DAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << " ... into: "; RV.getNode()
->dump(&DAG); } } while (false)
;
1574
1575 if (N->getNumValues() == RV.getNode()->getNumValues())
1576 DAG.ReplaceAllUsesWith(N, RV.getNode());
1577 else {
1578 assert(N->getValueType(0) == RV.getValueType() &&(static_cast <bool> (N->getValueType(0) == RV.getValueType
() && N->getNumValues() == 1 && "Type mismatch"
) ? void (0) : __assert_fail ("N->getValueType(0) == RV.getValueType() && N->getNumValues() == 1 && \"Type mismatch\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1579, __extension__ __PRETTY_FUNCTION__))
1579 N->getNumValues() == 1 && "Type mismatch")(static_cast <bool> (N->getValueType(0) == RV.getValueType
() && N->getNumValues() == 1 && "Type mismatch"
) ? void (0) : __assert_fail ("N->getValueType(0) == RV.getValueType() && N->getNumValues() == 1 && \"Type mismatch\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1579, __extension__ __PRETTY_FUNCTION__))
;
1580 DAG.ReplaceAllUsesWith(N, &RV);
1581 }
1582
1583 // Push the new node and any users onto the worklist. Omit this if the
1584 // new node is the EntryToken (e.g. if a store managed to get optimized
1585 // out), because re-visiting the EntryToken and its users will not uncover
1586 // any additional opportunities, but there may be a large number of such
1587 // users, potentially causing compile time explosion.
1588 if (RV.getOpcode() != ISD::EntryToken) {
1589 AddToWorklist(RV.getNode());
1590 AddUsersToWorklist(RV.getNode());
1591 }
1592
1593 // Finally, if the node is now dead, remove it from the graph. The node
1594 // may not be dead if the replacement process recursively simplified to
1595 // something else needing this node. This will also take care of adding any
1596 // operands which have lost a user to the worklist.
1597 recursivelyDeleteUnusedNodes(N);
1598 }
1599
1600 // If the root changed (e.g. it was a dead load, update the root).
1601 DAG.setRoot(Dummy.getValue());
1602 DAG.RemoveDeadNodes();
1603}
1604
1605SDValue DAGCombiner::visit(SDNode *N) {
1606 switch (N->getOpcode()) {
1607 default: break;
1608 case ISD::TokenFactor: return visitTokenFactor(N);
1609 case ISD::MERGE_VALUES: return visitMERGE_VALUES(N);
1610 case ISD::ADD: return visitADD(N);
1611 case ISD::SUB: return visitSUB(N);
1612 case ISD::SADDSAT:
1613 case ISD::UADDSAT: return visitADDSAT(N);
1614 case ISD::SSUBSAT:
1615 case ISD::USUBSAT: return visitSUBSAT(N);
1616 case ISD::ADDC: return visitADDC(N);
1617 case ISD::SADDO:
1618 case ISD::UADDO: return visitADDO(N);
1619 case ISD::SUBC: return visitSUBC(N);
1620 case ISD::SSUBO:
1621 case ISD::USUBO: return visitSUBO(N);
1622 case ISD::ADDE: return visitADDE(N);
1623 case ISD::ADDCARRY: return visitADDCARRY(N);
1624 case ISD::SADDO_CARRY: return visitSADDO_CARRY(N);
1625 case ISD::SUBE: return visitSUBE(N);
1626 case ISD::SUBCARRY: return visitSUBCARRY(N);
1627 case ISD::SSUBO_CARRY: return visitSSUBO_CARRY(N);
1628 case ISD::SMULFIX:
1629 case ISD::SMULFIXSAT:
1630 case ISD::UMULFIX:
1631 case ISD::UMULFIXSAT: return visitMULFIX(N);
1632 case ISD::MUL: return visitMUL(N);
1633 case ISD::SDIV: return visitSDIV(N);
1634 case ISD::UDIV: return visitUDIV(N);
1635 case ISD::SREM:
1636 case ISD::UREM: return visitREM(N);
1637 case ISD::MULHU: return visitMULHU(N);
1638 case ISD::MULHS: return visitMULHS(N);
1639 case ISD::SMUL_LOHI: return visitSMUL_LOHI(N);
1640 case ISD::UMUL_LOHI: return visitUMUL_LOHI(N);
1641 case ISD::SMULO:
1642 case ISD::UMULO: return visitMULO(N);
1643 case ISD::SMIN:
1644 case ISD::SMAX:
1645 case ISD::UMIN:
1646 case ISD::UMAX: return visitIMINMAX(N);
1647 case ISD::AND: return visitAND(N);
1648 case ISD::OR: return visitOR(N);
1649 case ISD::XOR: return visitXOR(N);
1650 case ISD::SHL: return visitSHL(N);
1651 case ISD::SRA: return visitSRA(N);
1652 case ISD::SRL: return visitSRL(N);
1653 case ISD::ROTR:
1654 case ISD::ROTL: return visitRotate(N);
1655 case ISD::FSHL:
1656 case ISD::FSHR: return visitFunnelShift(N);
1657 case ISD::ABS: return visitABS(N);
1658 case ISD::BSWAP: return visitBSWAP(N);
1659 case ISD::BITREVERSE: return visitBITREVERSE(N);
1660 case ISD::CTLZ: return visitCTLZ(N);
1661 case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N);
1662 case ISD::CTTZ: return visitCTTZ(N);
1663 case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N);
1664 case ISD::CTPOP: return visitCTPOP(N);
1665 case ISD::SELECT: return visitSELECT(N);
1666 case ISD::VSELECT: return visitVSELECT(N);
1667 case ISD::SELECT_CC: return visitSELECT_CC(N);
1668 case ISD::SETCC: return visitSETCC(N);
1669 case ISD::SETCCCARRY: return visitSETCCCARRY(N);
1670 case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N);
1671 case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N);
1672 case ISD::ANY_EXTEND: return visitANY_EXTEND(N);
1673 case ISD::AssertSext:
1674 case ISD::AssertZext: return visitAssertExt(N);
1675 case ISD::AssertAlign: return visitAssertAlign(N);
1676 case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N);
1677 case ISD::SIGN_EXTEND_VECTOR_INREG: return visitSIGN_EXTEND_VECTOR_INREG(N);
1678 case ISD::ZERO_EXTEND_VECTOR_INREG: return visitZERO_EXTEND_VECTOR_INREG(N);
1679 case ISD::TRUNCATE: return visitTRUNCATE(N);
1680 case ISD::BITCAST: return visitBITCAST(N);
1681 case ISD::BUILD_PAIR: return visitBUILD_PAIR(N);
1682 case ISD::FADD: return visitFADD(N);
1683 case ISD::STRICT_FADD: return visitSTRICT_FADD(N);
1684 case ISD::FSUB: return visitFSUB(N);
1685 case ISD::FMUL: return visitFMUL(N);
1686 case ISD::FMA: return visitFMA(N);
1687 case ISD::FDIV: return visitFDIV(N);
1688 case ISD::FREM: return visitFREM(N);
1689 case ISD::FSQRT: return visitFSQRT(N);
1690 case ISD::FCOPYSIGN: return visitFCOPYSIGN(N);
1691 case ISD::FPOW: return visitFPOW(N);
1692 case ISD::SINT_TO_FP: return visitSINT_TO_FP(N);
1693 case ISD::UINT_TO_FP: return visitUINT_TO_FP(N);
1694 case ISD::FP_TO_SINT: return visitFP_TO_SINT(N);
1695 case ISD::FP_TO_UINT: return visitFP_TO_UINT(N);
1696 case ISD::FP_ROUND: return visitFP_ROUND(N);
1697 case ISD::FP_EXTEND: return visitFP_EXTEND(N);
1698 case ISD::FNEG: return visitFNEG(N);
1699 case ISD::FABS: return visitFABS(N);
1700 case ISD::FFLOOR: return visitFFLOOR(N);
1701 case ISD::FMINNUM: return visitFMINNUM(N);
1702 case ISD::FMAXNUM: return visitFMAXNUM(N);
1703 case ISD::FMINIMUM: return visitFMINIMUM(N);
1704 case ISD::FMAXIMUM: return visitFMAXIMUM(N);
1705 case ISD::FCEIL: return visitFCEIL(N);
1706 case ISD::FTRUNC: return visitFTRUNC(N);
1707 case ISD::BRCOND: return visitBRCOND(N);
1708 case ISD::BR_CC: return visitBR_CC(N);
1709 case ISD::LOAD: return visitLOAD(N);
1710 case ISD::STORE: return visitSTORE(N);
1711 case ISD::INSERT_VECTOR_ELT: return visitINSERT_VECTOR_ELT(N);
1712 case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N);
1713 case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N);
1714 case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N);
1715 case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N);
1716 case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N);
1717 case ISD::SCALAR_TO_VECTOR: return visitSCALAR_TO_VECTOR(N);
1718 case ISD::INSERT_SUBVECTOR: return visitINSERT_SUBVECTOR(N);
1719 case ISD::MGATHER: return visitMGATHER(N);
1720 case ISD::MLOAD: return visitMLOAD(N);
1721 case ISD::MSCATTER: return visitMSCATTER(N);
1722 case ISD::MSTORE: return visitMSTORE(N);
1723 case ISD::LIFETIME_END: return visitLIFETIME_END(N);
1724 case ISD::FP_TO_FP16: return visitFP_TO_FP16(N);
1725 case ISD::FP16_TO_FP: return visitFP16_TO_FP(N);
1726 case ISD::FREEZE: return visitFREEZE(N);
1727 case ISD::VECREDUCE_FADD:
1728 case ISD::VECREDUCE_FMUL:
1729 case ISD::VECREDUCE_ADD:
1730 case ISD::VECREDUCE_MUL:
1731 case ISD::VECREDUCE_AND:
1732 case ISD::VECREDUCE_OR:
1733 case ISD::VECREDUCE_XOR:
1734 case ISD::VECREDUCE_SMAX:
1735 case ISD::VECREDUCE_SMIN:
1736 case ISD::VECREDUCE_UMAX:
1737 case ISD::VECREDUCE_UMIN:
1738 case ISD::VECREDUCE_FMAX:
1739 case ISD::VECREDUCE_FMIN: return visitVECREDUCE(N);
1740 }
1741 return SDValue();
1742}
1743
1744SDValue DAGCombiner::combine(SDNode *N) {
1745 SDValue RV;
1746 if (!DisableGenericCombines)
1747 RV = visit(N);
1748
1749 // If nothing happened, try a target-specific DAG combine.
1750 if (!RV.getNode()) {
1751 assert(N->getOpcode() != ISD::DELETED_NODE &&(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& "Node was deleted but visit returned NULL!") ? void
(0) : __assert_fail ("N->getOpcode() != ISD::DELETED_NODE && \"Node was deleted but visit returned NULL!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1752, __extension__ __PRETTY_FUNCTION__))
1752 "Node was deleted but visit returned NULL!")(static_cast <bool> (N->getOpcode() != ISD::DELETED_NODE
&& "Node was deleted but visit returned NULL!") ? void
(0) : __assert_fail ("N->getOpcode() != ISD::DELETED_NODE && \"Node was deleted but visit returned NULL!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1752, __extension__ __PRETTY_FUNCTION__))
;
1753
1754 if (N->getOpcode() >= ISD::BUILTIN_OP_END ||
1755 TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) {
1756
1757 // Expose the DAG combiner to the target combiner impls.
1758 TargetLowering::DAGCombinerInfo
1759 DagCombineInfo(DAG, Level, false, this);
1760
1761 RV = TLI.PerformDAGCombine(N, DagCombineInfo);
1762 }
1763 }
1764
1765 // If nothing happened still, try promoting the operation.
1766 if (!RV.getNode()) {
1767 switch (N->getOpcode()) {
1768 default: break;
1769 case ISD::ADD:
1770 case ISD::SUB:
1771 case ISD::MUL:
1772 case ISD::AND:
1773 case ISD::OR:
1774 case ISD::XOR:
1775 RV = PromoteIntBinOp(SDValue(N, 0));
1776 break;
1777 case ISD::SHL:
1778 case ISD::SRA:
1779 case ISD::SRL:
1780 RV = PromoteIntShiftOp(SDValue(N, 0));
1781 break;
1782 case ISD::SIGN_EXTEND:
1783 case ISD::ZERO_EXTEND:
1784 case ISD::ANY_EXTEND:
1785 RV = PromoteExtend(SDValue(N, 0));
1786 break;
1787 case ISD::LOAD:
1788 if (PromoteLoad(SDValue(N, 0)))
1789 RV = SDValue(N, 0);
1790 break;
1791 }
1792 }
1793
1794 // If N is a commutative binary node, try to eliminate it if the commuted
1795 // version is already present in the DAG.
1796 if (!RV.getNode() && TLI.isCommutativeBinOp(N->getOpcode()) &&
1797 N->getNumValues() == 1) {
1798 SDValue N0 = N->getOperand(0);
1799 SDValue N1 = N->getOperand(1);
1800
1801 // Constant operands are canonicalized to RHS.
1802 if (N0 != N1 && (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1))) {
1803 SDValue Ops[] = {N1, N0};
1804 SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops,
1805 N->getFlags());
1806 if (CSENode)
1807 return SDValue(CSENode, 0);
1808 }
1809 }
1810
1811 return RV;
1812}
1813
1814/// Given a node, return its input chain if it has one, otherwise return a null
1815/// sd operand.
1816static SDValue getInputChainForNode(SDNode *N) {
1817 if (unsigned NumOps = N->getNumOperands()) {
1818 if (N->getOperand(0).getValueType() == MVT::Other)
1819 return N->getOperand(0);
1820 if (N->getOperand(NumOps-1).getValueType() == MVT::Other)
1821 return N->getOperand(NumOps-1);
1822 for (unsigned i = 1; i < NumOps-1; ++i)
1823 if (N->getOperand(i).getValueType() == MVT::Other)
1824 return N->getOperand(i);
1825 }
1826 return SDValue();
1827}
1828
1829SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
1830 // If N has two operands, where one has an input chain equal to the other,
1831 // the 'other' chain is redundant.
1832 if (N->getNumOperands() == 2) {
1833 if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1))
1834 return N->getOperand(0);
1835 if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0))
1836 return N->getOperand(1);
1837 }
1838
1839 // Don't simplify token factors if optnone.
1840 if (OptLevel == CodeGenOpt::None)
1841 return SDValue();
1842
1843 // Don't simplify the token factor if the node itself has too many operands.
1844 if (N->getNumOperands() > TokenFactorInlineLimit)
1845 return SDValue();
1846
1847 // If the sole user is a token factor, we should make sure we have a
1848 // chance to merge them together. This prevents TF chains from inhibiting
1849 // optimizations.
1850 if (N->hasOneUse() && N->use_begin()->getOpcode() == ISD::TokenFactor)
1851 AddToWorklist(*(N->use_begin()));
1852
1853 SmallVector<SDNode *, 8> TFs; // List of token factors to visit.
1854 SmallVector<SDValue, 8> Ops; // Ops for replacing token factor.
1855 SmallPtrSet<SDNode*, 16> SeenOps;
1856 bool Changed = false; // If we should replace this token factor.
1857
1858 // Start out with this token factor.
1859 TFs.push_back(N);
1860
1861 // Iterate through token factors. The TFs grows when new token factors are
1862 // encountered.
1863 for (unsigned i = 0; i < TFs.size(); ++i) {
1864 // Limit number of nodes to inline, to avoid quadratic compile times.
1865 // We have to add the outstanding Token Factors to Ops, otherwise we might
1866 // drop Ops from the resulting Token Factors.
1867 if (Ops.size() > TokenFactorInlineLimit) {
1868 for (unsigned j = i; j < TFs.size(); j++)
1869 Ops.emplace_back(TFs[j], 0);
1870 // Drop unprocessed Token Factors from TFs, so we do not add them to the
1871 // combiner worklist later.
1872 TFs.resize(i);
1873 break;
1874 }
1875
1876 SDNode *TF = TFs[i];
1877 // Check each of the operands.
1878 for (const SDValue &Op : TF->op_values()) {
1879 switch (Op.getOpcode()) {
1880 case ISD::EntryToken:
1881 // Entry tokens don't need to be added to the list. They are
1882 // redundant.
1883 Changed = true;
1884 break;
1885
1886 case ISD::TokenFactor:
1887 if (Op.hasOneUse() && !is_contained(TFs, Op.getNode())) {
1888 // Queue up for processing.
1889 TFs.push_back(Op.getNode());
1890 Changed = true;
1891 break;
1892 }
1893 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1894
1895 default:
1896 // Only add if it isn't already in the list.
1897 if (SeenOps.insert(Op.getNode()).second)
1898 Ops.push_back(Op);
1899 else
1900 Changed = true;
1901 break;
1902 }
1903 }
1904 }
1905
1906 // Re-visit inlined Token Factors, to clean them up in case they have been
1907 // removed. Skip the first Token Factor, as this is the current node.
1908 for (unsigned i = 1, e = TFs.size(); i < e; i++)
1909 AddToWorklist(TFs[i]);
1910
1911 // Remove Nodes that are chained to another node in the list. Do so
1912 // by walking up chains breath-first stopping when we've seen
1913 // another operand. In general we must climb to the EntryNode, but we can exit
1914 // early if we find all remaining work is associated with just one operand as
1915 // no further pruning is possible.
1916
1917 // List of nodes to search through and original Ops from which they originate.
1918 SmallVector<std::pair<SDNode *, unsigned>, 8> Worklist;
1919 SmallVector<unsigned, 8> OpWorkCount; // Count of work for each Op.
1920 SmallPtrSet<SDNode *, 16> SeenChains;
1921 bool DidPruneOps = false;
1922
1923 unsigned NumLeftToConsider = 0;
1924 for (const SDValue &Op : Ops) {
1925 Worklist.push_back(std::make_pair(Op.getNode(), NumLeftToConsider++));
1926 OpWorkCount.push_back(1);
1927 }
1928
1929 auto AddToWorklist = [&](unsigned CurIdx, SDNode *Op, unsigned OpNumber) {
1930 // If this is an Op, we can remove the op from the list. Remark any
1931 // search associated with it as from the current OpNumber.
1932 if (SeenOps.contains(Op)) {
1933 Changed = true;
1934 DidPruneOps = true;
1935 unsigned OrigOpNumber = 0;
1936 while (OrigOpNumber < Ops.size() && Ops[OrigOpNumber].getNode() != Op)
1937 OrigOpNumber++;
1938 assert((OrigOpNumber != Ops.size()) &&(static_cast <bool> ((OrigOpNumber != Ops.size()) &&
"expected to find TokenFactor Operand") ? void (0) : __assert_fail
("(OrigOpNumber != Ops.size()) && \"expected to find TokenFactor Operand\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1939, __extension__ __PRETTY_FUNCTION__))
1939 "expected to find TokenFactor Operand")(static_cast <bool> ((OrigOpNumber != Ops.size()) &&
"expected to find TokenFactor Operand") ? void (0) : __assert_fail
("(OrigOpNumber != Ops.size()) && \"expected to find TokenFactor Operand\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1939, __extension__ __PRETTY_FUNCTION__))
;
1940 // Re-mark worklist from OrigOpNumber to OpNumber
1941 for (unsigned i = CurIdx + 1; i < Worklist.size(); ++i) {
1942 if (Worklist[i].second == OrigOpNumber) {
1943 Worklist[i].second = OpNumber;
1944 }
1945 }
1946 OpWorkCount[OpNumber] += OpWorkCount[OrigOpNumber];
1947 OpWorkCount[OrigOpNumber] = 0;
1948 NumLeftToConsider--;
1949 }
1950 // Add if it's a new chain
1951 if (SeenChains.insert(Op).second) {
1952 OpWorkCount[OpNumber]++;
1953 Worklist.push_back(std::make_pair(Op, OpNumber));
1954 }
1955 };
1956
1957 for (unsigned i = 0; i < Worklist.size() && i < 1024; ++i) {
1958 // We need at least be consider at least 2 Ops to prune.
1959 if (NumLeftToConsider <= 1)
1960 break;
1961 auto CurNode = Worklist[i].first;
1962 auto CurOpNumber = Worklist[i].second;
1963 assert((OpWorkCount[CurOpNumber] > 0) &&(static_cast <bool> ((OpWorkCount[CurOpNumber] > 0) &&
"Node should not appear in worklist") ? void (0) : __assert_fail
("(OpWorkCount[CurOpNumber] > 0) && \"Node should not appear in worklist\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1964, __extension__ __PRETTY_FUNCTION__))
1964 "Node should not appear in worklist")(static_cast <bool> ((OpWorkCount[CurOpNumber] > 0) &&
"Node should not appear in worklist") ? void (0) : __assert_fail
("(OpWorkCount[CurOpNumber] > 0) && \"Node should not appear in worklist\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 1964, __extension__ __PRETTY_FUNCTION__))
;
1965 switch (CurNode->getOpcode()) {
1966 case ISD::EntryToken:
1967 // Hitting EntryToken is the only way for the search to terminate without
1968 // hitting
1969 // another operand's search. Prevent us from marking this operand
1970 // considered.
1971 NumLeftToConsider++;
1972 break;
1973 case ISD::TokenFactor:
1974 for (const SDValue &Op : CurNode->op_values())
1975 AddToWorklist(i, Op.getNode(), CurOpNumber);
1976 break;
1977 case ISD::LIFETIME_START:
1978 case ISD::LIFETIME_END:
1979 case ISD::CopyFromReg:
1980 case ISD::CopyToReg:
1981 AddToWorklist(i, CurNode->getOperand(0).getNode(), CurOpNumber);
1982 break;
1983 default:
1984 if (auto *MemNode = dyn_cast<MemSDNode>(CurNode))
1985 AddToWorklist(i, MemNode->getChain().getNode(), CurOpNumber);
1986 break;
1987 }
1988 OpWorkCount[CurOpNumber]--;
1989 if (OpWorkCount[CurOpNumber] == 0)
1990 NumLeftToConsider--;
1991 }
1992
1993 // If we've changed things around then replace token factor.
1994 if (Changed) {
1995 SDValue Result;
1996 if (Ops.empty()) {
1997 // The entry token is the only possible outcome.
1998 Result = DAG.getEntryNode();
1999 } else {
2000 if (DidPruneOps) {
2001 SmallVector<SDValue, 8> PrunedOps;
2002 //
2003 for (const SDValue &Op : Ops) {
2004 if (SeenChains.count(Op.getNode()) == 0)
2005 PrunedOps.push_back(Op);
2006 }
2007 Result = DAG.getTokenFactor(SDLoc(N), PrunedOps);
2008 } else {
2009 Result = DAG.getTokenFactor(SDLoc(N), Ops);
2010 }
2011 }
2012 return Result;
2013 }
2014 return SDValue();
2015}
2016
2017/// MERGE_VALUES can always be eliminated.
2018SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {
2019 WorklistRemover DeadNodes(*this);
2020 // Replacing results may cause a different MERGE_VALUES to suddenly
2021 // be CSE'd with N, and carry its uses with it. Iterate until no
2022 // uses remain, to ensure that the node can be safely deleted.
2023 // First add the users of this node to the work list so that they
2024 // can be tried again once they have new operands.
2025 AddUsersToWorklist(N);
2026 do {
2027 // Do as a single replacement to avoid rewalking use lists.
2028 SmallVector<SDValue, 8> Ops;
2029 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
2030 Ops.push_back(N->getOperand(i));
2031 DAG.ReplaceAllUsesWith(N, Ops.data());
2032 } while (!N->use_empty());
2033 deleteAndRecombine(N);
2034 return SDValue(N, 0); // Return N so it doesn't get rechecked!
2035}
2036
2037/// If \p N is a ConstantSDNode with isOpaque() == false return it casted to a
2038/// ConstantSDNode pointer else nullptr.
2039static ConstantSDNode *getAsNonOpaqueConstant(SDValue N) {
2040 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N);
2041 return Const != nullptr && !Const->isOpaque() ? Const : nullptr;
2042}
2043
2044/// Return true if 'Use' is a load or a store that uses N as its base pointer
2045/// and that N may be folded in the load / store addressing mode.
2046static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, SelectionDAG &DAG,
2047 const TargetLowering &TLI) {
2048 EVT VT;
2049 unsigned AS;
2050
2051 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) {
2052 if (LD->isIndexed() || LD->getBasePtr().getNode() != N)
2053 return false;
2054 VT = LD->getMemoryVT();
2055 AS = LD->getAddressSpace();
2056 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) {
2057 if (ST->isIndexed() || ST->getBasePtr().getNode() != N)
2058 return false;
2059 VT = ST->getMemoryVT();
2060 AS = ST->getAddressSpace();
2061 } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(Use)) {
2062 if (LD->isIndexed() || LD->getBasePtr().getNode() != N)
2063 return false;
2064 VT = LD->getMemoryVT();
2065 AS = LD->getAddressSpace();
2066 } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(Use)) {
2067 if (ST->isIndexed() || ST->getBasePtr().getNode() != N)
2068 return false;
2069 VT = ST->getMemoryVT();
2070 AS = ST->getAddressSpace();
2071 } else
2072 return false;
2073
2074 TargetLowering::AddrMode AM;
2075 if (N->getOpcode() == ISD::ADD) {
2076 AM.HasBaseReg = true;
2077 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
2078 if (Offset)
2079 // [reg +/- imm]
2080 AM.BaseOffs = Offset->getSExtValue();
2081 else
2082 // [reg +/- reg]
2083 AM.Scale = 1;
2084 } else if (N->getOpcode() == ISD::SUB) {
2085 AM.HasBaseReg = true;
2086 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
2087 if (Offset)
2088 // [reg +/- imm]
2089 AM.BaseOffs = -Offset->getSExtValue();
2090 else
2091 // [reg +/- reg]
2092 AM.Scale = 1;
2093 } else
2094 return false;
2095
2096 return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM,
2097 VT.getTypeForEVT(*DAG.getContext()), AS);
2098}
2099
2100SDValue DAGCombiner::foldBinOpIntoSelect(SDNode *BO) {
2101 assert(TLI.isBinOp(BO->getOpcode()) && BO->getNumValues() == 1 &&(static_cast <bool> (TLI.isBinOp(BO->getOpcode()) &&
BO->getNumValues() == 1 && "Unexpected binary operator"
) ? void (0) : __assert_fail ("TLI.isBinOp(BO->getOpcode()) && BO->getNumValues() == 1 && \"Unexpected binary operator\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2102, __extension__ __PRETTY_FUNCTION__))
2102 "Unexpected binary operator")(static_cast <bool> (TLI.isBinOp(BO->getOpcode()) &&
BO->getNumValues() == 1 && "Unexpected binary operator"
) ? void (0) : __assert_fail ("TLI.isBinOp(BO->getOpcode()) && BO->getNumValues() == 1 && \"Unexpected binary operator\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2102, __extension__ __PRETTY_FUNCTION__))
;
2103
2104 // Don't do this unless the old select is going away. We want to eliminate the
2105 // binary operator, not replace a binop with a select.
2106 // TODO: Handle ISD::SELECT_CC.
2107 unsigned SelOpNo = 0;
2108 SDValue Sel = BO->getOperand(0);
2109 if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse()) {
2110 SelOpNo = 1;
2111 Sel = BO->getOperand(1);
2112 }
2113
2114 if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse())
2115 return SDValue();
2116
2117 SDValue CT = Sel.getOperand(1);
2118 if (!isConstantOrConstantVector(CT, true) &&
2119 !DAG.isConstantFPBuildVectorOrConstantFP(CT))
2120 return SDValue();
2121
2122 SDValue CF = Sel.getOperand(2);
2123 if (!isConstantOrConstantVector(CF, true) &&
2124 !DAG.isConstantFPBuildVectorOrConstantFP(CF))
2125 return SDValue();
2126
2127 // Bail out if any constants are opaque because we can't constant fold those.
2128 // The exception is "and" and "or" with either 0 or -1 in which case we can
2129 // propagate non constant operands into select. I.e.:
2130 // and (select Cond, 0, -1), X --> select Cond, 0, X
2131 // or X, (select Cond, -1, 0) --> select Cond, -1, X
2132 auto BinOpcode = BO->getOpcode();
2133 bool CanFoldNonConst =
2134 (BinOpcode == ISD::AND || BinOpcode == ISD::OR) &&
2135 (isNullOrNullSplat(CT) || isAllOnesOrAllOnesSplat(CT)) &&
2136 (isNullOrNullSplat(CF) || isAllOnesOrAllOnesSplat(CF));
2137
2138 SDValue CBO = BO->getOperand(SelOpNo ^ 1);
2139 if (!CanFoldNonConst &&
2140 !isConstantOrConstantVector(CBO, true) &&
2141 !DAG.isConstantFPBuildVectorOrConstantFP(CBO))
2142 return SDValue();
2143
2144 EVT VT = BO->getValueType(0);
2145
2146 // We have a select-of-constants followed by a binary operator with a
2147 // constant. Eliminate the binop by pulling the constant math into the select.
2148 // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO
2149 SDLoc DL(Sel);
2150 SDValue NewCT = SelOpNo ? DAG.getNode(BinOpcode, DL, VT, CBO, CT)
2151 : DAG.getNode(BinOpcode, DL, VT, CT, CBO);
2152 if (!CanFoldNonConst && !NewCT.isUndef() &&
2153 !isConstantOrConstantVector(NewCT, true) &&
2154 !DAG.isConstantFPBuildVectorOrConstantFP(NewCT))
2155 return SDValue();
2156
2157 SDValue NewCF = SelOpNo ? DAG.getNode(BinOpcode, DL, VT, CBO, CF)
2158 : DAG.getNode(BinOpcode, DL, VT, CF, CBO);
2159 if (!CanFoldNonConst && !NewCF.isUndef() &&
2160 !isConstantOrConstantVector(NewCF, true) &&
2161 !DAG.isConstantFPBuildVectorOrConstantFP(NewCF))
2162 return SDValue();
2163
2164 SDValue SelectOp = DAG.getSelect(DL, VT, Sel.getOperand(0), NewCT, NewCF);
2165 SelectOp->setFlags(BO->getFlags());
2166 return SelectOp;
2167}
2168
2169static SDValue foldAddSubBoolOfMaskedVal(SDNode *N, SelectionDAG &DAG) {
2170 assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&(static_cast <bool> ((N->getOpcode() == ISD::ADD || N
->getOpcode() == ISD::SUB) && "Expecting add or sub"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && \"Expecting add or sub\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2171, __extension__ __PRETTY_FUNCTION__))
2171 "Expecting add or sub")(static_cast <bool> ((N->getOpcode() == ISD::ADD || N
->getOpcode() == ISD::SUB) && "Expecting add or sub"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && \"Expecting add or sub\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2171, __extension__ __PRETTY_FUNCTION__))
;
2172
2173 // Match a constant operand and a zext operand for the math instruction:
2174 // add Z, C
2175 // sub C, Z
2176 bool IsAdd = N->getOpcode() == ISD::ADD;
2177 SDValue C = IsAdd ? N->getOperand(1) : N->getOperand(0);
2178 SDValue Z = IsAdd ? N->getOperand(0) : N->getOperand(1);
2179 auto *CN = dyn_cast<ConstantSDNode>(C);
2180 if (!CN || Z.getOpcode() != ISD::ZERO_EXTEND)
2181 return SDValue();
2182
2183 // Match the zext operand as a setcc of a boolean.
2184 if (Z.getOperand(0).getOpcode() != ISD::SETCC ||
2185 Z.getOperand(0).getValueType() != MVT::i1)
2186 return SDValue();
2187
2188 // Match the compare as: setcc (X & 1), 0, eq.
2189 SDValue SetCC = Z.getOperand(0);
2190 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
2191 if (CC != ISD::SETEQ || !isNullConstant(SetCC.getOperand(1)) ||
2192 SetCC.getOperand(0).getOpcode() != ISD::AND ||
2193 !isOneConstant(SetCC.getOperand(0).getOperand(1)))
2194 return SDValue();
2195
2196 // We are adding/subtracting a constant and an inverted low bit. Turn that
2197 // into a subtract/add of the low bit with incremented/decremented constant:
2198 // add (zext i1 (seteq (X & 1), 0)), C --> sub C+1, (zext (X & 1))
2199 // sub C, (zext i1 (seteq (X & 1), 0)) --> add C-1, (zext (X & 1))
2200 EVT VT = C.getValueType();
2201 SDLoc DL(N);
2202 SDValue LowBit = DAG.getZExtOrTrunc(SetCC.getOperand(0), DL, VT);
2203 SDValue C1 = IsAdd ? DAG.getConstant(CN->getAPIntValue() + 1, DL, VT) :
2204 DAG.getConstant(CN->getAPIntValue() - 1, DL, VT);
2205 return DAG.getNode(IsAdd ? ISD::SUB : ISD::ADD, DL, VT, C1, LowBit);
2206}
2207
2208/// Try to fold a 'not' shifted sign-bit with add/sub with constant operand into
2209/// a shift and add with a different constant.
2210static SDValue foldAddSubOfSignBit(SDNode *N, SelectionDAG &DAG) {
2211 assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&(static_cast <bool> ((N->getOpcode() == ISD::ADD || N
->getOpcode() == ISD::SUB) && "Expecting add or sub"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && \"Expecting add or sub\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2212, __extension__ __PRETTY_FUNCTION__))
2212 "Expecting add or sub")(static_cast <bool> ((N->getOpcode() == ISD::ADD || N
->getOpcode() == ISD::SUB) && "Expecting add or sub"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && \"Expecting add or sub\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2212, __extension__ __PRETTY_FUNCTION__))
;
2213
2214 // We need a constant operand for the add/sub, and the other operand is a
2215 // logical shift right: add (srl), C or sub C, (srl).
2216 bool IsAdd = N->getOpcode() == ISD::ADD;
2217 SDValue ConstantOp = IsAdd ? N->getOperand(1) : N->getOperand(0);
2218 SDValue ShiftOp = IsAdd ? N->getOperand(0) : N->getOperand(1);
2219 if (!DAG.isConstantIntBuildVectorOrConstantInt(ConstantOp) ||
2220 ShiftOp.getOpcode() != ISD::SRL)
2221 return SDValue();
2222
2223 // The shift must be of a 'not' value.
2224 SDValue Not = ShiftOp.getOperand(0);
2225 if (!Not.hasOneUse() || !isBitwiseNot(Not))
2226 return SDValue();
2227
2228 // The shift must be moving the sign bit to the least-significant-bit.
2229 EVT VT = ShiftOp.getValueType();
2230 SDValue ShAmt = ShiftOp.getOperand(1);
2231 ConstantSDNode *ShAmtC = isConstOrConstSplat(ShAmt);
2232 if (!ShAmtC || ShAmtC->getAPIntValue() != (VT.getScalarSizeInBits() - 1))
2233 return SDValue();
2234
2235 // Eliminate the 'not' by adjusting the shift and add/sub constant:
2236 // add (srl (not X), 31), C --> add (sra X, 31), (C + 1)
2237 // sub C, (srl (not X), 31) --> add (srl X, 31), (C - 1)
2238 SDLoc DL(N);
2239 auto ShOpcode = IsAdd ? ISD::SRA : ISD::SRL;
2240 SDValue NewShift = DAG.getNode(ShOpcode, DL, VT, Not.getOperand(0), ShAmt);
2241 if (SDValue NewC =
2242 DAG.FoldConstantArithmetic(IsAdd ? ISD::ADD : ISD::SUB, DL, VT,
2243 {ConstantOp, DAG.getConstant(1, DL, VT)}))
2244 return DAG.getNode(ISD::ADD, DL, VT, NewShift, NewC);
2245 return SDValue();
2246}
2247
2248/// Try to fold a node that behaves like an ADD (note that N isn't necessarily
2249/// an ISD::ADD here, it could for example be an ISD::OR if we know that there
2250/// are no common bits set in the operands).
2251SDValue DAGCombiner::visitADDLike(SDNode *N) {
2252 SDValue N0 = N->getOperand(0);
2253 SDValue N1 = N->getOperand(1);
2254 EVT VT = N0.getValueType();
2255 SDLoc DL(N);
2256
2257 // fold vector ops
2258 if (VT.isVector()) {
2259 if (SDValue FoldedVOp = SimplifyVBinOp(N))
2260 return FoldedVOp;
2261
2262 // fold (add x, 0) -> x, vector edition
2263 if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
2264 return N0;
2265 if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
2266 return N1;
2267 }
2268
2269 // fold (add x, undef) -> undef
2270 if (N0.isUndef())
2271 return N0;
2272
2273 if (N1.isUndef())
2274 return N1;
2275
2276 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) {
2277 // canonicalize constant to RHS
2278 if (!DAG.isConstantIntBuildVectorOrConstantInt(N1))
2279 return DAG.getNode(ISD::ADD, DL, VT, N1, N0);
2280 // fold (add c1, c2) -> c1+c2
2281 return DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, {N0, N1});
2282 }
2283
2284 // fold (add x, 0) -> x
2285 if (isNullConstant(N1))
2286 return N0;
2287
2288 if (isConstantOrConstantVector(N1, /* NoOpaque */ true)) {
2289 // fold ((A-c1)+c2) -> (A+(c2-c1))
2290 if (N0.getOpcode() == ISD::SUB &&
2291 isConstantOrConstantVector(N0.getOperand(1), /* NoOpaque */ true)) {
2292 SDValue Sub =
2293 DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {N1, N0.getOperand(1)});
2294 assert(Sub && "Constant folding failed")(static_cast <bool> (Sub && "Constant folding failed"
) ? void (0) : __assert_fail ("Sub && \"Constant folding failed\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2294, __extension__ __PRETTY_FUNCTION__))
;
2295 return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), Sub);
2296 }
2297
2298 // fold ((c1-A)+c2) -> (c1+c2)-A
2299 if (N0.getOpcode() == ISD::SUB &&
2300 isConstantOrConstantVector(N0.getOperand(0), /* NoOpaque */ true)) {
2301 SDValue Add =
2302 DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, {N1, N0.getOperand(0)});
2303 assert(Add && "Constant folding failed")(static_cast <bool> (Add && "Constant folding failed"
) ? void (0) : __assert_fail ("Add && \"Constant folding failed\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2303, __extension__ __PRETTY_FUNCTION__))
;
2304 return DAG.getNode(ISD::SUB, DL, VT, Add, N0.getOperand(1));
2305 }
2306
2307 // add (sext i1 X), 1 -> zext (not i1 X)
2308 // We don't transform this pattern:
2309 // add (zext i1 X), -1 -> sext (not i1 X)
2310 // because most (?) targets generate better code for the zext form.
2311 if (N0.getOpcode() == ISD::SIGN_EXTEND && N0.hasOneUse() &&
2312 isOneOrOneSplat(N1)) {
2313 SDValue X = N0.getOperand(0);
2314 if ((!LegalOperations ||
2315 (TLI.isOperationLegal(ISD::XOR, X.getValueType()) &&
2316 TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) &&
2317 X.getScalarValueSizeInBits() == 1) {
2318 SDValue Not = DAG.getNOT(DL, X, X.getValueType());
2319 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Not);
2320 }
2321 }
2322
2323 // Fold (add (or x, c0), c1) -> (add x, (c0 + c1)) if (or x, c0) is
2324 // equivalent to (add x, c0).
2325 if (N0.getOpcode() == ISD::OR &&
2326 isConstantOrConstantVector(N0.getOperand(1), /* NoOpaque */ true) &&
2327 DAG.haveNoCommonBitsSet(N0.getOperand(0), N0.getOperand(1))) {
2328 if (SDValue Add0 = DAG.FoldConstantArithmetic(ISD::ADD, DL, VT,
2329 {N1, N0.getOperand(1)}))
2330 return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), Add0);
2331 }
2332 }
2333
2334 if (SDValue NewSel = foldBinOpIntoSelect(N))
2335 return NewSel;
2336
2337 // reassociate add
2338 if (!reassociationCanBreakAddressingModePattern(ISD::ADD, DL, N0, N1)) {
2339 if (SDValue RADD = reassociateOps(ISD::ADD, DL, N0, N1, N->getFlags()))
2340 return RADD;
2341 }
2342 // fold ((0-A) + B) -> B-A
2343 if (N0.getOpcode() == ISD::SUB && isNullOrNullSplat(N0.getOperand(0)))
2344 return DAG.getNode(ISD::SUB, DL, VT, N1, N0.getOperand(1));
2345
2346 // fold (A + (0-B)) -> A-B
2347 if (N1.getOpcode() == ISD::SUB && isNullOrNullSplat(N1.getOperand(0)))
2348 return DAG.getNode(ISD::SUB, DL, VT, N0, N1.getOperand(1));
2349
2350 // fold (A+(B-A)) -> B
2351 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1))
2352 return N1.getOperand(0);
2353
2354 // fold ((B-A)+A) -> B
2355 if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1))
2356 return N0.getOperand(0);
2357
2358 // fold ((A-B)+(C-A)) -> (C-B)
2359 if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB &&
2360 N0.getOperand(0) == N1.getOperand(1))
2361 return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0),
2362 N0.getOperand(1));
2363
2364 // fold ((A-B)+(B-C)) -> (A-C)
2365 if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB &&
2366 N0.getOperand(1) == N1.getOperand(0))
2367 return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0),
2368 N1.getOperand(1));
2369
2370 // fold (A+(B-(A+C))) to (B-C)
2371 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
2372 N0 == N1.getOperand(1).getOperand(0))
2373 return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0),
2374 N1.getOperand(1).getOperand(1));
2375
2376 // fold (A+(B-(C+A))) to (B-C)
2377 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD &&
2378 N0 == N1.getOperand(1).getOperand(1))
2379 return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0),
2380 N1.getOperand(1).getOperand(0));
2381
2382 // fold (A+((B-A)+or-C)) to (B+or-C)
2383 if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) &&
2384 N1.getOperand(0).getOpcode() == ISD::SUB &&
2385 N0 == N1.getOperand(0).getOperand(1))
2386 return DAG.getNode(N1.getOpcode(), DL, VT, N1.getOperand(0).getOperand(0),
2387 N1.getOperand(1));
2388
2389 // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant
2390 if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) {
2391 SDValue N00 = N0.getOperand(0);
2392 SDValue N01 = N0.getOperand(1);
2393 SDValue N10 = N1.getOperand(0);
2394 SDValue N11 = N1.getOperand(1);
2395
2396 if (isConstantOrConstantVector(N00) || isConstantOrConstantVector(N10))
2397 return DAG.getNode(ISD::SUB, DL, VT,
2398 DAG.getNode(ISD::ADD, SDLoc(N0), VT, N00, N10),
2399 DAG.getNode(ISD::ADD, SDLoc(N1), VT, N01, N11));
2400 }
2401
2402 // fold (add (umax X, C), -C) --> (usubsat X, C)
2403 if (N0.getOpcode() == ISD::UMAX && hasOperation(ISD::USUBSAT, VT)) {
2404 auto MatchUSUBSAT = [](ConstantSDNode *Max, ConstantSDNode *Op) {
2405 return (!Max && !Op) ||
2406 (Max && Op && Max->getAPIntValue() == (-Op->getAPIntValue()));
2407 };
2408 if (ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchUSUBSAT,
2409 /*AllowUndefs*/ true))
2410 return DAG.getNode(ISD::USUBSAT, DL, VT, N0.getOperand(0),
2411 N0.getOperand(1));
2412 }
2413
2414 if (SimplifyDemandedBits(SDValue(N, 0)))
2415 return SDValue(N, 0);
2416
2417 if (isOneOrOneSplat(N1)) {
2418 // fold (add (xor a, -1), 1) -> (sub 0, a)
2419 if (isBitwiseNot(N0))
2420 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
2421 N0.getOperand(0));
2422
2423 // fold (add (add (xor a, -1), b), 1) -> (sub b, a)
2424 if (N0.getOpcode() == ISD::ADD ||
2425 N0.getOpcode() == ISD::UADDO ||
2426 N0.getOpcode() == ISD::SADDO) {
2427 SDValue A, Xor;
2428
2429 if (isBitwiseNot(N0.getOperand(0))) {
2430 A = N0.getOperand(1);
2431 Xor = N0.getOperand(0);
2432 } else if (isBitwiseNot(N0.getOperand(1))) {
2433 A = N0.getOperand(0);
2434 Xor = N0.getOperand(1);
2435 }
2436
2437 if (Xor)
2438 return DAG.getNode(ISD::SUB, DL, VT, A, Xor.getOperand(0));
2439 }
2440
2441 // Look for:
2442 // add (add x, y), 1
2443 // And if the target does not like this form then turn into:
2444 // sub y, (xor x, -1)
2445 if (!TLI.preferIncOfAddToSubOfNot(VT) && N0.hasOneUse() &&
2446 N0.getOpcode() == ISD::ADD) {
2447 SDValue Not = DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(0),
2448 DAG.getAllOnesConstant(DL, VT));
2449 return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(1), Not);
2450 }
2451 }
2452
2453 // (x - y) + -1 -> add (xor y, -1), x
2454 if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
2455 isAllOnesOrAllOnesSplat(N1)) {
2456 SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(1), N1);
2457 return DAG.getNode(ISD::ADD, DL, VT, Xor, N0.getOperand(0));
2458 }
2459
2460 if (SDValue Combined = visitADDLikeCommutative(N0, N1, N))
2461 return Combined;
2462
2463 if (SDValue Combined = visitADDLikeCommutative(N1, N0, N))
2464 return Combined;
2465
2466 return SDValue();
2467}
2468
2469SDValue DAGCombiner::visitADD(SDNode *N) {
2470 SDValue N0 = N->getOperand(0);
2471 SDValue N1 = N->getOperand(1);
2472 EVT VT = N0.getValueType();
2473 SDLoc DL(N);
2474
2475 if (SDValue Combined = visitADDLike(N))
2476 return Combined;
2477
2478 if (SDValue V = foldAddSubBoolOfMaskedVal(N, DAG))
2479 return V;
2480
2481 if (SDValue V = foldAddSubOfSignBit(N, DAG))
2482 return V;
2483
2484 // fold (a+b) -> (a|b) iff a and b share no bits.
2485 if ((!LegalOperations || TLI.isOperationLegal(ISD::OR, VT)) &&
2486 DAG.haveNoCommonBitsSet(N0, N1))
2487 return DAG.getNode(ISD::OR, DL, VT, N0, N1);
2488
2489 // Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)).
2490 if (N0.getOpcode() == ISD::VSCALE && N1.getOpcode() == ISD::VSCALE) {
2491 const APInt &C0 = N0->getConstantOperandAPInt(0);
2492 const APInt &C1 = N1->getConstantOperandAPInt(0);
2493 return DAG.getVScale(DL, VT, C0 + C1);
2494 }
2495
2496 // fold a+vscale(c1)+vscale(c2) -> a+vscale(c1+c2)
2497 if ((N0.getOpcode() == ISD::ADD) &&
2498 (N0.getOperand(1).getOpcode() == ISD::VSCALE) &&
2499 (N1.getOpcode() == ISD::VSCALE)) {
2500 const APInt &VS0 = N0.getOperand(1)->getConstantOperandAPInt(0);
2501 const APInt &VS1 = N1->getConstantOperandAPInt(0);
2502 SDValue VS = DAG.getVScale(DL, VT, VS0 + VS1);
2503 return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), VS);
2504 }
2505
2506 // Fold (add step_vector(c1), step_vector(c2) to step_vector(c1+c2))
2507 if (N0.getOpcode() == ISD::STEP_VECTOR &&
2508 N1.getOpcode() == ISD::STEP_VECTOR) {
2509 const APInt &C0 = N0->getConstantOperandAPInt(0);
2510 const APInt &C1 = N1->getConstantOperandAPInt(0);
2511 EVT SVT = N0.getOperand(0).getValueType();
2512 SDValue NewStep = DAG.getConstant(C0 + C1, DL, SVT);
2513 return DAG.getStepVector(DL, VT, NewStep);
2514 }
2515
2516 // Fold a + step_vector(c1) + step_vector(c2) to a + step_vector(c1+c2)
2517 if ((N0.getOpcode() == ISD::ADD) &&
2518 (N0.getOperand(1).getOpcode() == ISD::STEP_VECTOR) &&
2519 (N1.getOpcode() == ISD::STEP_VECTOR)) {
2520 const APInt &SV0 = N0.getOperand(1)->getConstantOperandAPInt(0);
2521 const APInt &SV1 = N1->getConstantOperandAPInt(0);
2522 EVT SVT = N1.getOperand(0).getValueType();
2523 assert(N1.getOperand(0).getValueType() ==(static_cast <bool> (N1.getOperand(0).getValueType() ==
N0.getOperand(1)->getOperand(0).getValueType() &&
"Different operand types of STEP_VECTOR.") ? void (0) : __assert_fail
("N1.getOperand(0).getValueType() == N0.getOperand(1)->getOperand(0).getValueType() && \"Different operand types of STEP_VECTOR.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2525, __extension__ __PRETTY_FUNCTION__))
2524 N0.getOperand(1)->getOperand(0).getValueType() &&(static_cast <bool> (N1.getOperand(0).getValueType() ==
N0.getOperand(1)->getOperand(0).getValueType() &&
"Different operand types of STEP_VECTOR.") ? void (0) : __assert_fail
("N1.getOperand(0).getValueType() == N0.getOperand(1)->getOperand(0).getValueType() && \"Different operand types of STEP_VECTOR.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2525, __extension__ __PRETTY_FUNCTION__))
2525 "Different operand types of STEP_VECTOR.")(static_cast <bool> (N1.getOperand(0).getValueType() ==
N0.getOperand(1)->getOperand(0).getValueType() &&
"Different operand types of STEP_VECTOR.") ? void (0) : __assert_fail
("N1.getOperand(0).getValueType() == N0.getOperand(1)->getOperand(0).getValueType() && \"Different operand types of STEP_VECTOR.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 2525, __extension__ __PRETTY_FUNCTION__))
;
2526 SDValue NewStep = DAG.getConstant(SV0 + SV1, DL, SVT);
2527 SDValue SV = DAG.getStepVector(DL, VT, NewStep);
2528 return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), SV);
2529 }
2530
2531 return SDValue();
2532}
2533
2534SDValue DAGCombiner::visitADDSAT(SDNode *N) {
2535 unsigned Opcode = N->getOpcode();
2536 SDValue N0 = N->getOperand(0);
2537 SDValue N1 = N->getOperand(1);
2538 EVT VT = N0.getValueType();
2539 SDLoc DL(N);
2540
2541 // fold vector ops
2542 if (VT.isVector()) {
2543 // TODO SimplifyVBinOp
2544
2545 // fold (add_sat x, 0) -> x, vector edition
2546 if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
2547 return N0;
2548 if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
2549 return N1;
2550 }
2551
2552 // fold (add_sat x, undef) -> -1
2553 if (N0.isUndef() || N1.isUndef())
2554 return DAG.getAllOnesConstant(DL, VT);
2555
2556 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) {
2557 // canonicalize constant to RHS
2558 if (!DAG.isConstantIntBuildVectorOrConstantInt(N1))
2559 return DAG.getNode(Opcode, DL, VT, N1, N0);
2560 // fold (add_sat c1, c2) -> c3
2561 return DAG.FoldConstantArithmetic(Opcode, DL, VT, {N0, N1});
2562 }
2563
2564 // fold (add_sat x, 0) -> x
2565 if (isNullConstant(N1))
2566 return N0;
2567
2568 // If it cannot overflow, transform into an add.
2569 if (Opcode == ISD::UADDSAT)
2570 if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
2571 return DAG.getNode(ISD::ADD, DL, VT, N0, N1);
2572
2573 return SDValue();
2574}
2575
2576static SDValue getAsCarry(const TargetLowering &TLI, SDValue V) {
2577 bool Masked = false;
2578
2579 // First, peel away TRUNCATE/ZERO_EXTEND/AND nodes due to legalization.
2580 while (true) {
2581 if (V.getOpcode() == ISD::TRUNCATE || V.getOpcode() == ISD::ZERO_EXTEND) {
2582 V = V.getOperand(0);
2583 continue;
2584 }
2585
2586 if (V.getOpcode() == ISD::AND && isOneConstant(V.getOperand(1))) {
2587 Masked = true;
2588 V = V.getOperand(0);
2589 continue;
2590 }
2591
2592 break;
2593 }
2594
2595 // If this is not a carry, return.
2596 if (V.getResNo() != 1)
2597 return SDValue();
2598
2599 if (V.getOpcode() != ISD::ADDCARRY && V.getOpcode() != ISD::SUBCARRY &&
2600 V.getOpcode() != ISD::UADDO && V.getOpcode() != ISD::USUBO)
2601 return SDValue();
2602
2603 EVT VT = V.getNode()->getValueType(0);
2604 if (!TLI.isOperationLegalOrCustom(V.getOpcode(), VT))
2605 return SDValue();
2606
2607 // If the result is masked, then no matter what kind of bool it is we can
2608 // return. If it isn't, then we need to make sure the bool type is either 0 or
2609 // 1 and not other values.
2610 if (Masked ||
2611 TLI.getBooleanContents(V.getValueType()) ==
2612 TargetLoweringBase::ZeroOrOneBooleanContent)
2613 return V;
2614
2615 return SDValue();
2616}
2617
2618/// Given the operands of an add/sub operation, see if the 2nd operand is a
2619/// masked 0/1 whose source operand is actually known to be 0/-1. If so, invert
2620/// the opcode and bypass the mask operation.
2621static SDValue foldAddSubMasked1(bool IsAdd, SDValue N0, SDValue N1,
2622 SelectionDAG &DAG, const SDLoc &DL) {
2623 if (N1.getOpcode() != ISD::AND || !isOneOrOneSplat(N1->getOperand(1)))
2624 return SDValue();
2625
2626 EVT VT = N0.getValueType();
2627 if (DAG.ComputeNumSignBits(N1.getOperand(0)) != VT.getScalarSizeInBits())
2628 return SDValue();
2629
2630 // add N0, (and (AssertSext X, i1), 1) --> sub N0, X
2631 // sub N0, (and (AssertSext X, i1), 1) --> add N0, X
2632 return DAG.getNode(IsAdd ? ISD::SUB : ISD::ADD, DL, VT, N0, N1.getOperand(0));
2633}
2634
2635/// Helper for doing combines based on N0 and N1 being added to each other.
2636SDValue DAGCombiner::visitADDLikeCommutative(SDValue N0, SDValue N1,
2637 SDNode *LocReference) {
2638 EVT VT = N0.getValueType();
2639 SDLoc DL(LocReference);
2640
2641 // fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n))
2642 if (N1.getOpcode() == ISD::SHL && N1.getOperand(0).getOpcode() == ISD::SUB &&
2643 isNullOrNullSplat(N1.getOperand(0).getOperand(0)))
2644 return DAG.getNode(ISD::SUB, DL, VT, N0,
2645 DAG.getNode(ISD::SHL, DL, VT,
2646 N1.getOperand(0).getOperand(1),
2647 N1.getOperand(1)));
2648
2649 if (SDValue V = foldAddSubMasked1(true, N0, N1, DAG, DL))
2650 return V;
2651
2652 // Look for:
2653 // add (add x, 1), y
2654 // And if the target does not like this form then turn into:
2655 // sub y, (xor x, -1)
2656 if (!TLI.preferIncOfAddToSubOfNot(VT) && N0.hasOneUse() &&
2657 N0.getOpcode() == ISD::ADD && isOneOrOneSplat(N0.getOperand(1))) {
2658 SDValue Not = DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(0),
2659 DAG.getAllOnesConstant(DL, VT));
2660 return DAG.getNode(ISD::SUB, DL, VT, N1, Not);
2661 }
2662
2663 // Hoist one-use subtraction by non-opaque constant:
2664 // (x - C) + y -> (x + y) - C
2665 // This is necessary because SUB(X,C) -> ADD(X,-C) doesn't work for vectors.
2666 if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
2667 isConstantOrConstantVector(N0.getOperand(1), /*NoOpaques=*/true)) {
2668 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), N1);
2669 return DAG.getNode(ISD::SUB, DL, VT, Add, N0.getOperand(1));
2670 }
2671 // Hoist one-use subtraction from non-opaque constant:
2672 // (C - x) + y -> (y - x) + C
2673 if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
2674 isConstantOrConstantVector(N0.getOperand(0), /*NoOpaques=*/true)) {
2675 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N1, N0.getOperand(1));
2676 return DAG.getNode(ISD::ADD, DL, VT, Sub, N0.getOperand(0));
2677 }
2678
2679 // If the target's bool is represented as 0/1, prefer to make this 'sub 0/1'
2680 // rather than 'add 0/-1' (the zext should get folded).
2681 // add (sext i1 Y), X --> sub X, (zext i1 Y)
2682 if (N0.getOpcode() == ISD::SIGN_EXTEND &&
2683 N0.getOperand(0).getScalarValueSizeInBits() == 1 &&
2684 TLI.getBooleanContents(VT) == TargetLowering::ZeroOrOneBooleanContent) {
2685 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
2686 return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt);
2687 }
2688
2689 // add X, (sextinreg Y i1) -> sub X, (and Y 1)
2690 if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) {
2691 VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1));
2692 if (TN->getVT() == MVT::i1) {
2693 SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0),
2694 DAG.getConstant(1, DL, VT));
2695 return DAG.getNode(ISD::SUB, DL, VT, N0, ZExt);
2696 }
2697 }
2698
2699 // (add X, (addcarry Y, 0, Carry)) -> (addcarry X, Y, Carry)
2700 if (N1.getOpcode() == ISD::ADDCARRY && isNullConstant(N1.getOperand(1)) &&
2701 N1.getResNo() == 0)
2702 return DAG.getNode(ISD::ADDCARRY, DL, N1->getVTList(),
2703 N0, N1.getOperand(0), N1.getOperand(2));
2704
2705 // (add X, Carry) -> (addcarry X, 0, Carry)
2706 if (TLI.isOperationLegalOrCustom(ISD::ADDCARRY, VT))
2707 if (SDValue Carry = getAsCarry(TLI, N1))
2708 return DAG.getNode(ISD::ADDCARRY, DL,
2709 DAG.getVTList(VT, Carry.getValueType()), N0,
2710 DAG.getConstant(0, DL, VT), Carry);
2711
2712 return SDValue();
2713}
2714
2715SDValue DAGCombiner::visitADDC(SDNode *N) {
2716 SDValue N0 = N->getOperand(0);
2717 SDValue N1 = N->getOperand(1);
2718 EVT VT = N0.getValueType();
2719 SDLoc DL(N);
2720
2721 // If the flag result is dead, turn this into an ADD.
2722 if (!N->hasAnyUseOfValue(1))
2723 return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
2724 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
2725
2726 // canonicalize constant to RHS.
2727 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2728 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2729 if (N0C && !N1C)
2730 return DAG.getNode(ISD::ADDC, DL, N->getVTList(), N1, N0);
2731
2732 // fold (addc x, 0) -> x + no carry out
2733 if (isNullConstant(N1))
2734 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE,
2735 DL, MVT::Glue));
2736
2737 // If it cannot overflow, transform into an add.
2738 if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
2739 return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
2740 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
2741
2742 return SDValue();
2743}
2744
2745/**
2746 * Flips a boolean if it is cheaper to compute. If the Force parameters is set,
2747 * then the flip also occurs if computing the inverse is the same cost.
2748 * This function returns an empty SDValue in case it cannot flip the boolean
2749 * without increasing the cost of the computation. If you want to flip a boolean
2750 * no matter what, use DAG.getLogicalNOT.
2751 */
2752static SDValue extractBooleanFlip(SDValue V, SelectionDAG &DAG,
2753 const TargetLowering &TLI,
2754 bool Force) {
2755 if (Force && isa<ConstantSDNode>(V))
2756 return DAG.getLogicalNOT(SDLoc(V), V, V.getValueType());
2757
2758 if (V.getOpcode() != ISD::XOR)
2759 return SDValue();
2760
2761 ConstantSDNode *Const = isConstOrConstSplat(V.getOperand(1), false);
2762 if (!Const)
2763 return SDValue();
2764
2765 EVT VT = V.getValueType();
2766
2767 bool IsFlip = false;
2768 switch(TLI.getBooleanContents(VT)) {
2769 case TargetLowering::ZeroOrOneBooleanContent:
2770 IsFlip = Const->isOne();
2771 break;
2772 case TargetLowering::ZeroOrNegativeOneBooleanContent:
2773 IsFlip = Const->isAllOnesValue();
2774 break;
2775 case TargetLowering::UndefinedBooleanContent:
2776 IsFlip = (Const->getAPIntValue() & 0x01) == 1;
2777 break;
2778 }
2779
2780 if (IsFlip)
2781 return V.getOperand(0);
2782 if (Force)
2783 return DAG.getLogicalNOT(SDLoc(V), V, V.getValueType());
2784 return SDValue();
2785}
2786
2787SDValue DAGCombiner::visitADDO(SDNode *N) {
2788 SDValue N0 = N->getOperand(0);
2789 SDValue N1 = N->getOperand(1);
2790 EVT VT = N0.getValueType();
2791 bool IsSigned = (ISD::SADDO == N->getOpcode());
2792
2793 EVT CarryVT = N->getValueType(1);
2794 SDLoc DL(N);
2795
2796 // If the flag result is dead, turn this into an ADD.
2797 if (!N->hasAnyUseOfValue(1))
2798 return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
2799 DAG.getUNDEF(CarryVT));
2800
2801 // canonicalize constant to RHS.
2802 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
2803 !DAG.isConstantIntBuildVectorOrConstantInt(N1))
2804 return DAG.getNode(N->getOpcode(), DL, N->getVTList(), N1, N0);
2805
2806 // fold (addo x, 0) -> x + no carry out
2807 if (isNullOrNullSplat(N1))
2808 return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT));
2809
2810 if (!IsSigned) {
2811 // If it cannot overflow, transform into an add.
2812 if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
2813 return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
2814 DAG.getConstant(0, DL, CarryVT));
2815
2816 // fold (uaddo (xor a, -1), 1) -> (usub 0, a) and flip carry.
2817 if (isBitwiseNot(N0) && isOneOrOneSplat(N1)) {
2818 SDValue Sub = DAG.getNode(ISD::USUBO, DL, N->getVTList(),
2819 DAG.getConstant(0, DL, VT), N0.getOperand(0));
2820 return CombineTo(
2821 N, Sub, DAG.getLogicalNOT(DL, Sub.getValue(1), Sub->getValueType(1)));
2822 }
2823
2824 if (SDValue Combined = visitUADDOLike(N0, N1, N))
2825 return Combined;
2826
2827 if (SDValue Combined = visitUADDOLike(N1, N0, N))
2828 return Combined;
2829 }
2830
2831 return SDValue();
2832}
2833
2834SDValue DAGCombiner::visitUADDOLike(SDValue N0, SDValue N1, SDNode *N) {
2835 EVT VT = N0.getValueType();
2836 if (VT.isVector())
2837 return SDValue();
2838
2839 // (uaddo X, (addcarry Y, 0, Carry)) -> (addcarry X, Y, Carry)
2840 // If Y + 1 cannot overflow.
2841 if (N1.getOpcode() == ISD::ADDCARRY && isNullConstant(N1.getOperand(1))) {
2842 SDValue Y = N1.getOperand(0);
2843 SDValue One = DAG.getConstant(1, SDLoc(N), Y.getValueType());
2844 if (DAG.computeOverflowKind(Y, One) == SelectionDAG::OFK_Never)
2845 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(), N0, Y,
2846 N1.getOperand(2));
2847 }
2848
2849 // (uaddo X, Carry) -> (addcarry X, 0, Carry)
2850 if (TLI.isOperationLegalOrCustom(ISD::ADDCARRY, VT))
2851 if (SDValue Carry = getAsCarry(TLI, N1))
2852 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(), N0,
2853 DAG.getConstant(0, SDLoc(N), VT), Carry);
2854
2855 return SDValue();
2856}
2857
2858SDValue DAGCombiner::visitADDE(SDNode *N) {
2859 SDValue N0 = N->getOperand(0);
2860 SDValue N1 = N->getOperand(1);
2861 SDValue CarryIn = N->getOperand(2);
2862
2863 // canonicalize constant to RHS
2864 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2865 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2866 if (N0C && !N1C)
2867 return DAG.getNode(ISD::ADDE, SDLoc(N), N->getVTList(),
2868 N1, N0, CarryIn);
2869
2870 // fold (adde x, y, false) -> (addc x, y)
2871 if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
2872 return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N0, N1);
2873
2874 return SDValue();
2875}
2876
2877SDValue DAGCombiner::visitADDCARRY(SDNode *N) {
2878 SDValue N0 = N->getOperand(0);
2879 SDValue N1 = N->getOperand(1);
2880 SDValue CarryIn = N->getOperand(2);
2881 SDLoc DL(N);
2882
2883 // canonicalize constant to RHS
2884 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2885 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2886 if (N0C && !N1C)
2887 return DAG.getNode(ISD::ADDCARRY, DL, N->getVTList(), N1, N0, CarryIn);
2888
2889 // fold (addcarry x, y, false) -> (uaddo x, y)
2890 if (isNullConstant(CarryIn)) {
2891 if (!LegalOperations ||
2892 TLI.isOperationLegalOrCustom(ISD::UADDO, N->getValueType(0)))
2893 return DAG.getNode(ISD::UADDO, DL, N->getVTList(), N0, N1);
2894 }
2895
2896 // fold (addcarry 0, 0, X) -> (and (ext/trunc X), 1) and no carry.
2897 if (isNullConstant(N0) && isNullConstant(N1)) {
2898 EVT VT = N0.getValueType();
2899 EVT CarryVT = CarryIn.getValueType();
2900 SDValue CarryExt = DAG.getBoolExtOrTrunc(CarryIn, DL, VT, CarryVT);
2901 AddToWorklist(CarryExt.getNode());
2902 return CombineTo(N, DAG.getNode(ISD::AND, DL, VT, CarryExt,
2903 DAG.getConstant(1, DL, VT)),
2904 DAG.getConstant(0, DL, CarryVT));
2905 }
2906
2907 if (SDValue Combined = visitADDCARRYLike(N0, N1, CarryIn, N))
2908 return Combined;
2909
2910 if (SDValue Combined = visitADDCARRYLike(N1, N0, CarryIn, N))
2911 return Combined;
2912
2913 return SDValue();
2914}
2915
2916SDValue DAGCombiner::visitSADDO_CARRY(SDNode *N) {
2917 SDValue N0 = N->getOperand(0);
2918 SDValue N1 = N->getOperand(1);
2919 SDValue CarryIn = N->getOperand(2);
2920 SDLoc DL(N);
2921
2922 // canonicalize constant to RHS
2923 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
2924 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2925 if (N0C && !N1C)
2926 return DAG.getNode(ISD::SADDO_CARRY, DL, N->getVTList(), N1, N0, CarryIn);
2927
2928 // fold (saddo_carry x, y, false) -> (saddo x, y)
2929 if (isNullConstant(CarryIn)) {
2930 if (!LegalOperations ||
2931 TLI.isOperationLegalOrCustom(ISD::SADDO, N->getValueType(0)))
2932 return DAG.getNode(ISD::SADDO, DL, N->getVTList(), N0, N1);
2933 }
2934
2935 return SDValue();
2936}
2937
2938/**
2939 * If we are facing some sort of diamond carry propapagtion pattern try to
2940 * break it up to generate something like:
2941 * (addcarry X, 0, (addcarry A, B, Z):Carry)
2942 *
2943 * The end result is usually an increase in operation required, but because the
2944 * carry is now linearized, other tranforms can kick in and optimize the DAG.
2945 *
2946 * Patterns typically look something like
2947 * (uaddo A, B)
2948 * / \
2949 * Carry Sum
2950 * | \
2951 * | (addcarry *, 0, Z)
2952 * | /
2953 * \ Carry
2954 * | /
2955 * (addcarry X, *, *)
2956 *
2957 * But numerous variation exist. Our goal is to identify A, B, X and Z and
2958 * produce a combine with a single path for carry propagation.
2959 */
2960static SDValue combineADDCARRYDiamond(DAGCombiner &Combiner, SelectionDAG &DAG,
2961 SDValue X, SDValue Carry0, SDValue Carry1,
2962 SDNode *N) {
2963 if (Carry1.getResNo() != 1 || Carry0.getResNo() != 1)
2964 return SDValue();
2965 if (Carry1.getOpcode() != ISD::UADDO)
2966 return SDValue();
2967
2968 SDValue Z;
2969
2970 /**
2971 * First look for a suitable Z. It will present itself in the form of
2972 * (addcarry Y, 0, Z) or its equivalent (uaddo Y, 1) for Z=true
2973 */
2974 if (Carry0.getOpcode() == ISD::ADDCARRY &&
2975 isNullConstant(Carry0.getOperand(1))) {
2976 Z = Carry0.getOperand(2);
2977 } else if (Carry0.getOpcode() == ISD::UADDO &&
2978 isOneConstant(Carry0.getOperand(1))) {
2979 EVT VT = Combiner.getSetCCResultType(Carry0.getValueType());
2980 Z = DAG.getConstant(1, SDLoc(Carry0.getOperand(1)), VT);
2981 } else {
2982 // We couldn't find a suitable Z.
2983 return SDValue();
2984 }
2985
2986
2987 auto cancelDiamond = [&](SDValue A,SDValue B) {
2988 SDLoc DL(N);
2989 SDValue NewY = DAG.getNode(ISD::ADDCARRY, DL, Carry0->getVTList(), A, B, Z);
2990 Combiner.AddToWorklist(NewY.getNode());
2991 return DAG.getNode(ISD::ADDCARRY, DL, N->getVTList(), X,
2992 DAG.getConstant(0, DL, X.getValueType()),
2993 NewY.getValue(1));
2994 };
2995
2996 /**
2997 * (uaddo A, B)
2998 * |
2999 * Sum
3000 * |
3001 * (addcarry *, 0, Z)
3002 */
3003 if (Carry0.getOperand(0) == Carry1.getValue(0)) {
3004 return cancelDiamond(Carry1.getOperand(0), Carry1.getOperand(1));
3005 }
3006
3007 /**
3008 * (addcarry A, 0, Z)
3009 * |
3010 * Sum
3011 * |
3012 * (uaddo *, B)
3013 */
3014 if (Carry1.getOperand(0) == Carry0.getValue(0)) {
3015 return cancelDiamond(Carry0.getOperand(0), Carry1.getOperand(1));
3016 }
3017
3018 if (Carry1.getOperand(1) == Carry0.getValue(0)) {
3019 return cancelDiamond(Carry1.getOperand(0), Carry0.getOperand(0));
3020 }
3021
3022 return SDValue();
3023}
3024
3025// If we are facing some sort of diamond carry/borrow in/out pattern try to
3026// match patterns like:
3027//
3028// (uaddo A, B) CarryIn
3029// | \ |
3030// | \ |
3031// PartialSum PartialCarryOutX /
3032// | | /
3033// | ____|____________/
3034// | / |
3035// (uaddo *, *) \________
3036// | \ \
3037// | \ |
3038// | PartialCarryOutY |
3039// | \ |
3040// | \ /
3041// AddCarrySum | ______/
3042// | /
3043// CarryOut = (or *, *)
3044//
3045// And generate ADDCARRY (or SUBCARRY) with two result values:
3046//
3047// {AddCarrySum, CarryOut} = (addcarry A, B, CarryIn)
3048//
3049// Our goal is to identify A, B, and CarryIn and produce ADDCARRY/SUBCARRY with
3050// a single path for carry/borrow out propagation:
3051static SDValue combineCarryDiamond(DAGCombiner &Combiner, SelectionDAG &DAG,
3052 const TargetLowering &TLI, SDValue Carry0,
3053 SDValue Carry1, SDNode *N) {
3054 if (Carry0.getResNo() != 1 || Carry1.getResNo() != 1)
3055 return SDValue();
3056 unsigned Opcode = Carry0.getOpcode();
3057 if (Opcode != Carry1.getOpcode())
3058 return SDValue();
3059 if (Opcode != ISD::UADDO && Opcode != ISD::USUBO)
3060 return SDValue();
3061
3062 // Canonicalize the add/sub of A and B as Carry0 and the add/sub of the
3063 // carry/borrow in as Carry1. (The top and middle uaddo nodes respectively in
3064 // the above ASCII art.)
3065 if (Carry1.getOperand(0) != Carry0.getValue(0) &&
3066 Carry1.getOperand(1) != Carry0.getValue(0))
3067 std::swap(Carry0, Carry1);
3068 if (Carry1.getOperand(0) != Carry0.getValue(0) &&
3069 Carry1.getOperand(1) != Carry0.getValue(0))
3070 return SDValue();
3071
3072 // The carry in value must be on the righthand side for subtraction.
3073 unsigned CarryInOperandNum =
3074 Carry1.getOperand(0) == Carry0.getValue(0) ? 1 : 0;
3075 if (Opcode == ISD::USUBO && CarryInOperandNum != 1)
3076 return SDValue();
3077 SDValue CarryIn = Carry1.getOperand(CarryInOperandNum);
3078
3079 unsigned NewOp = Opcode == ISD::UADDO ? ISD::ADDCARRY : ISD::SUBCARRY;
3080 if (!TLI.isOperationLegalOrCustom(NewOp, Carry0.getValue(0).getValueType()))
3081 return SDValue();
3082
3083 // Verify that the carry/borrow in is plausibly a carry/borrow bit.
3084 // TODO: make getAsCarry() aware of how partial carries are merged.
3085 if (CarryIn.getOpcode() != ISD::ZERO_EXTEND)
3086 return SDValue();
3087 CarryIn = CarryIn.getOperand(0);
3088 if (CarryIn.getValueType() != MVT::i1)
3089 return SDValue();
3090
3091 SDLoc DL(N);
3092 SDValue Merged =
3093 DAG.getNode(NewOp, DL, Carry1->getVTList(), Carry0.getOperand(0),
3094 Carry0.getOperand(1), CarryIn);
3095
3096 // Please note that because we have proven that the result of the UADDO/USUBO
3097 // of A and B feeds into the UADDO/USUBO that does the carry/borrow in, we can
3098 // therefore prove that if the first UADDO/USUBO overflows, the second
3099 // UADDO/USUBO cannot. For example consider 8-bit numbers where 0xFF is the
3100 // maximum value.
3101 //
3102 // 0xFF + 0xFF == 0xFE with carry but 0xFE + 1 does not carry
3103 // 0x00 - 0xFF == 1 with a carry/borrow but 1 - 1 == 0 (no carry/borrow)
3104 //
3105 // This is important because it means that OR and XOR can be used to merge
3106 // carry flags; and that AND can return a constant zero.
3107 //
3108 // TODO: match other operations that can merge flags (ADD, etc)
3109 DAG.ReplaceAllUsesOfValueWith(Carry1.getValue(0), Merged.getValue(0));
3110 if (N->getOpcode() == ISD::AND)
3111 return DAG.getConstant(0, DL, MVT::i1);
3112 return Merged.getValue(1);
3113}
3114
3115SDValue DAGCombiner::visitADDCARRYLike(SDValue N0, SDValue N1, SDValue CarryIn,
3116 SDNode *N) {
3117 // fold (addcarry (xor a, -1), b, c) -> (subcarry b, a, !c) and flip carry.
3118 if (isBitwiseNot(N0))
3119 if (SDValue NotC = extractBooleanFlip(CarryIn, DAG, TLI, true)) {
3120 SDLoc DL(N);
3121 SDValue Sub = DAG.getNode(ISD::SUBCARRY, DL, N->getVTList(), N1,
3122 N0.getOperand(0), NotC);
3123 return CombineTo(
3124 N, Sub, DAG.getLogicalNOT(DL, Sub.getValue(1), Sub->getValueType(1)));
3125 }
3126
3127 // Iff the flag result is dead:
3128 // (addcarry (add|uaddo X, Y), 0, Carry) -> (addcarry X, Y, Carry)
3129 // Don't do this if the Carry comes from the uaddo. It won't remove the uaddo
3130 // or the dependency between the instructions.
3131 if ((N0.getOpcode() == ISD::ADD ||
3132 (N0.getOpcode() == ISD::UADDO && N0.getResNo() == 0 &&
3133 N0.getValue(1) != CarryIn)) &&
3134 isNullConstant(N1) && !N->hasAnyUseOfValue(1))
3135 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(),
3136 N0.getOperand(0), N0.getOperand(1), CarryIn);
3137
3138 /**
3139 * When one of the addcarry argument is itself a carry, we may be facing
3140 * a diamond carry propagation. In which case we try to transform the DAG
3141 * to ensure linear carry propagation if that is possible.
3142 */
3143 if (auto Y = getAsCarry(TLI, N1)) {
3144 // Because both are carries, Y and Z can be swapped.
3145 if (auto R = combineADDCARRYDiamond(*this, DAG, N0, Y, CarryIn, N))
3146 return R;
3147 if (auto R = combineADDCARRYDiamond(*this, DAG, N0, CarryIn, Y, N))
3148 return R;
3149 }
3150
3151 return SDValue();
3152}
3153
3154// Attempt to create a USUBSAT(LHS, RHS) node with DstVT, performing a
3155// clamp/truncation if necessary.
3156static SDValue getTruncatedUSUBSAT(EVT DstVT, EVT SrcVT, SDValue LHS,
3157 SDValue RHS, SelectionDAG &DAG,
3158 const SDLoc &DL) {
3159 assert(DstVT.getScalarSizeInBits() <= SrcVT.getScalarSizeInBits() &&(static_cast <bool> (DstVT.getScalarSizeInBits() <= SrcVT
.getScalarSizeInBits() && "Illegal truncation") ? void
(0) : __assert_fail ("DstVT.getScalarSizeInBits() <= SrcVT.getScalarSizeInBits() && \"Illegal truncation\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3160, __extension__ __PRETTY_FUNCTION__))
3160 "Illegal truncation")(static_cast <bool> (DstVT.getScalarSizeInBits() <= SrcVT
.getScalarSizeInBits() && "Illegal truncation") ? void
(0) : __assert_fail ("DstVT.getScalarSizeInBits() <= SrcVT.getScalarSizeInBits() && \"Illegal truncation\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3160, __extension__ __PRETTY_FUNCTION__))
;
3161
3162 if (DstVT == SrcVT)
3163 return DAG.getNode(ISD::USUBSAT, DL, DstVT, LHS, RHS);
3164
3165 // If the LHS is zero-extended then we can perform the USUBSAT as DstVT by
3166 // clamping RHS.
3167 APInt UpperBits = APInt::getBitsSetFrom(SrcVT.getScalarSizeInBits(),
3168 DstVT.getScalarSizeInBits());
3169 if (!DAG.MaskedValueIsZero(LHS, UpperBits))
3170 return SDValue();
3171
3172 SDValue SatLimit =
3173 DAG.getConstant(APInt::getLowBitsSet(SrcVT.getScalarSizeInBits(),
3174 DstVT.getScalarSizeInBits()),
3175 DL, SrcVT);
3176 RHS = DAG.getNode(ISD::UMIN, DL, SrcVT, RHS, SatLimit);
3177 RHS = DAG.getNode(ISD::TRUNCATE, DL, DstVT, RHS);
3178 LHS = DAG.getNode(ISD::TRUNCATE, DL, DstVT, LHS);
3179 return DAG.getNode(ISD::USUBSAT, DL, DstVT, LHS, RHS);
3180}
3181
3182// Try to find umax(a,b) - b or a - umin(a,b) patterns that may be converted to
3183// usubsat(a,b), optionally as a truncated type.
3184SDValue DAGCombiner::foldSubToUSubSat(EVT DstVT, SDNode *N) {
3185 if (N->getOpcode() != ISD::SUB ||
3186 !(!LegalOperations || hasOperation(ISD::USUBSAT, DstVT)))
3187 return SDValue();
3188
3189 EVT SubVT = N->getValueType(0);
3190 SDValue Op0 = N->getOperand(0);
3191 SDValue Op1 = N->getOperand(1);
3192
3193 // Try to find umax(a,b) - b or a - umin(a,b) patterns
3194 // they may be converted to usubsat(a,b).
3195 if (Op0.getOpcode() == ISD::UMAX) {
3196 SDValue MaxLHS = Op0.getOperand(0);
3197 SDValue MaxRHS = Op0.getOperand(1);
3198 if (MaxLHS == Op1)
3199 return getTruncatedUSUBSAT(DstVT, SubVT, MaxRHS, Op1, DAG, SDLoc(N));
3200 if (MaxRHS == Op1)
3201 return getTruncatedUSUBSAT(DstVT, SubVT, MaxLHS, Op1, DAG, SDLoc(N));
3202 }
3203
3204 if (Op1.getOpcode() == ISD::UMIN) {
3205 SDValue MinLHS = Op1.getOperand(0);
3206 SDValue MinRHS = Op1.getOperand(1);
3207 if (MinLHS == Op0)
3208 return getTruncatedUSUBSAT(DstVT, SubVT, Op0, MinRHS, DAG, SDLoc(N));
3209 if (MinRHS == Op0)
3210 return getTruncatedUSUBSAT(DstVT, SubVT, Op0, MinLHS, DAG, SDLoc(N));
3211 }
3212
3213 // sub(a,trunc(umin(zext(a),b))) -> usubsat(a,trunc(umin(b,SatLimit)))
3214 if (Op1.getOpcode() == ISD::TRUNCATE &&
3215 Op1.getOperand(0).getOpcode() == ISD::UMIN) {
3216 SDValue MinLHS = Op1.getOperand(0).getOperand(0);
3217 SDValue MinRHS = Op1.getOperand(0).getOperand(1);
3218 if (MinLHS.getOpcode() == ISD::ZERO_EXTEND && MinLHS.getOperand(0) == Op0)
3219 return getTruncatedUSUBSAT(DstVT, MinLHS.getValueType(), MinLHS, MinRHS,
3220 DAG, SDLoc(N));
3221 if (MinRHS.getOpcode() == ISD::ZERO_EXTEND && MinRHS.getOperand(0) == Op0)
3222 return getTruncatedUSUBSAT(DstVT, MinLHS.getValueType(), MinRHS, MinLHS,
3223 DAG, SDLoc(N));
3224 }
3225
3226 return SDValue();
3227}
3228
3229// Since it may not be valid to emit a fold to zero for vector initializers
3230// check if we can before folding.
3231static SDValue tryFoldToZero(const SDLoc &DL, const TargetLowering &TLI, EVT VT,
3232 SelectionDAG &DAG, bool LegalOperations) {
3233 if (!VT.isVector())
3234 return DAG.getConstant(0, DL, VT);
3235 if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
3236 return DAG.getConstant(0, DL, VT);
3237 return SDValue();
3238}
3239
3240SDValue DAGCombiner::visitSUB(SDNode *N) {
3241 SDValue N0 = N->getOperand(0);
3242 SDValue N1 = N->getOperand(1);
3243 EVT VT = N0.getValueType();
3244 SDLoc DL(N);
3245
3246 // fold vector ops
3247 if (VT.isVector()) {
3248 if (SDValue FoldedVOp = SimplifyVBinOp(N))
3249 return FoldedVOp;
3250
3251 // fold (sub x, 0) -> x, vector edition
3252 if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
3253 return N0;
3254 }
3255
3256 // fold (sub x, x) -> 0
3257 // FIXME: Refactor this and xor and other similar operations together.
3258 if (N0 == N1)
3259 return tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);
3260
3261 // fold (sub c1, c2) -> c3
3262 if (SDValue C = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {N0, N1}))
3263 return C;
3264
3265 if (SDValue NewSel = foldBinOpIntoSelect(N))
3266 return NewSel;
3267
3268 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
3269
3270 // fold (sub x, c) -> (add x, -c)
3271 if (N1C) {
3272 return DAG.getNode(ISD::ADD, DL, VT, N0,
3273 DAG.getConstant(-N1C->getAPIntValue(), DL, VT));
3274 }
3275
3276 if (isNullOrNullSplat(N0)) {
3277 unsigned BitWidth = VT.getScalarSizeInBits();
3278 // Right-shifting everything out but the sign bit followed by negation is
3279 // the same as flipping arithmetic/logical shift type without the negation:
3280 // -(X >>u 31) -> (X >>s 31)
3281 // -(X >>s 31) -> (X >>u 31)
3282 if (N1->getOpcode() == ISD::SRA || N1->getOpcode() == ISD::SRL) {
3283 ConstantSDNode *ShiftAmt = isConstOrConstSplat(N1.getOperand(1));
3284 if (ShiftAmt && ShiftAmt->getAPIntValue() == (BitWidth - 1)) {
3285 auto NewSh = N1->getOpcode() == ISD::SRA ? ISD::SRL : ISD::SRA;
3286 if (!LegalOperations || TLI.isOperationLegal(NewSh, VT))
3287 return DAG.getNode(NewSh, DL, VT, N1.getOperand(0), N1.getOperand(1));
3288 }
3289 }
3290
3291 // 0 - X --> 0 if the sub is NUW.
3292 if (N->getFlags().hasNoUnsignedWrap())
3293 return N0;
3294
3295 if (DAG.MaskedValueIsZero(N1, ~APInt::getSignMask(BitWidth))) {
3296 // N1 is either 0 or the minimum signed value. If the sub is NSW, then
3297 // N1 must be 0 because negating the minimum signed value is undefined.
3298 if (N->getFlags().hasNoSignedWrap())
3299 return N0;
3300
3301 // 0 - X --> X if X is 0 or the minimum signed value.
3302 return N1;
3303 }
3304
3305 // Convert 0 - abs(x).
3306 SDValue Result;
3307 if (N1->getOpcode() == ISD::ABS &&
3308 !TLI.isOperationLegalOrCustom(ISD::ABS, VT) &&
3309 TLI.expandABS(N1.getNode(), Result, DAG, true))
3310 return Result;
3311 }
3312
3313 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
3314 if (isAllOnesOrAllOnesSplat(N0))
3315 return DAG.getNode(ISD::XOR, DL, VT, N1, N0);
3316
3317 // fold (A - (0-B)) -> A+B
3318 if (N1.getOpcode() == ISD::SUB && isNullOrNullSplat(N1.getOperand(0)))
3319 return DAG.getNode(ISD::ADD, DL, VT, N0, N1.getOperand(1));
3320
3321 // fold A-(A-B) -> B
3322 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0))
3323 return N1.getOperand(1);
3324
3325 // fold (A+B)-A -> B
3326 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1)
3327 return N0.getOperand(1);
3328
3329 // fold (A+B)-B -> A
3330 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1)
3331 return N0.getOperand(0);
3332
3333 // fold (A+C1)-C2 -> A+(C1-C2)
3334 if (N0.getOpcode() == ISD::ADD &&
3335 isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
3336 isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) {
3337 SDValue NewC =
3338 DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {N0.getOperand(1), N1});
3339 assert(NewC && "Constant folding failed")(static_cast <bool> (NewC && "Constant folding failed"
) ? void (0) : __assert_fail ("NewC && \"Constant folding failed\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3339, __extension__ __PRETTY_FUNCTION__))
;
3340 return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), NewC);
3341 }
3342
3343 // fold C2-(A+C1) -> (C2-C1)-A
3344 if (N1.getOpcode() == ISD::ADD) {
3345 SDValue N11 = N1.getOperand(1);
3346 if (isConstantOrConstantVector(N0, /* NoOpaques */ true) &&
3347 isConstantOrConstantVector(N11, /* NoOpaques */ true)) {
3348 SDValue NewC = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {N0, N11});
3349 assert(NewC && "Constant folding failed")(static_cast <bool> (NewC && "Constant folding failed"
) ? void (0) : __assert_fail ("NewC && \"Constant folding failed\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3349, __extension__ __PRETTY_FUNCTION__))
;
3350 return DAG.getNode(ISD::SUB, DL, VT, NewC, N1.getOperand(0));
3351 }
3352 }
3353
3354 // fold (A-C1)-C2 -> A-(C1+C2)
3355 if (N0.getOpcode() == ISD::SUB &&
3356 isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
3357 isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) {
3358 SDValue NewC =
3359 DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, {N0.getOperand(1), N1});
3360 assert(NewC && "Constant folding failed")(static_cast <bool> (NewC && "Constant folding failed"
) ? void (0) : __assert_fail ("NewC && \"Constant folding failed\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3360, __extension__ __PRETTY_FUNCTION__))
;
3361 return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), NewC);
3362 }
3363
3364 // fold (c1-A)-c2 -> (c1-c2)-A
3365 if (N0.getOpcode() == ISD::SUB &&
3366 isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
3367 isConstantOrConstantVector(N0.getOperand(0), /* NoOpaques */ true)) {
3368 SDValue NewC =
3369 DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {N0.getOperand(0), N1});
3370 assert(NewC && "Constant folding failed")(static_cast <bool> (NewC && "Constant folding failed"
) ? void (0) : __assert_fail ("NewC && \"Constant folding failed\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3370, __extension__ __PRETTY_FUNCTION__))
;
3371 return DAG.getNode(ISD::SUB, DL, VT, NewC, N0.getOperand(1));
3372 }
3373
3374 // fold ((A+(B+or-C))-B) -> A+or-C
3375 if (N0.getOpcode() == ISD::ADD &&
3376 (N0.getOperand(1).getOpcode() == ISD::SUB ||
3377 N0.getOperand(1).getOpcode() == ISD::ADD) &&
3378 N0.getOperand(1).getOperand(0) == N1)
3379 return DAG.getNode(N0.getOperand(1).getOpcode(), DL, VT, N0.getOperand(0),
3380 N0.getOperand(1).getOperand(1));
3381
3382 // fold ((A+(C+B))-B) -> A+C
3383 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1).getOpcode() == ISD::ADD &&
3384 N0.getOperand(1).getOperand(1) == N1)
3385 return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0),
3386 N0.getOperand(1).getOperand(0));
3387
3388 // fold ((A-(B-C))-C) -> A-B
3389 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1).getOpcode() == ISD::SUB &&
3390 N0.getOperand(1).getOperand(1) == N1)
3391 return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0),
3392 N0.getOperand(1).getOperand(0));
3393
3394 // fold (A-(B-C)) -> A+(C-B)
3395 if (N1.getOpcode() == ISD::SUB && N1.hasOneUse())
3396 return DAG.getNode(ISD::ADD, DL, VT, N0,
3397 DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(1),
3398 N1.getOperand(0)));
3399
3400 // A - (A & B) -> A & (~B)
3401 if (N1.getOpcode() == ISD::AND) {
3402 SDValue A = N1.getOperand(0);
3403 SDValue B = N1.getOperand(1);
3404 if (A != N0)
3405 std::swap(A, B);
3406 if (A == N0 &&
3407 (N1.hasOneUse() || isConstantOrConstantVector(B, /*NoOpaques=*/true))) {
3408 SDValue InvB =
3409 DAG.getNode(ISD::XOR, DL, VT, B, DAG.getAllOnesConstant(DL, VT));
3410 return DAG.getNode(ISD::AND, DL, VT, A, InvB);
3411 }
3412 }
3413
3414 // fold (X - (-Y * Z)) -> (X + (Y * Z))
3415 if (N1.getOpcode() == ISD::MUL && N1.hasOneUse()) {
3416 if (N1.getOperand(0).getOpcode() == ISD::SUB &&
3417 isNullOrNullSplat(N1.getOperand(0).getOperand(0))) {
3418 SDValue Mul = DAG.getNode(ISD::MUL, DL, VT,
3419 N1.getOperand(0).getOperand(1),
3420 N1.getOperand(1));
3421 return DAG.getNode(ISD::ADD, DL, VT, N0, Mul);
3422 }
3423 if (N1.getOperand(1).getOpcode() == ISD::SUB &&
3424 isNullOrNullSplat(N1.getOperand(1).getOperand(0))) {
3425 SDValue Mul = DAG.getNode(ISD::MUL, DL, VT,
3426 N1.getOperand(0),
3427 N1.getOperand(1).getOperand(1));
3428 return DAG.getNode(ISD::ADD, DL, VT, N0, Mul);
3429 }
3430 }
3431
3432 // If either operand of a sub is undef, the result is undef
3433 if (N0.isUndef())
3434 return N0;
3435 if (N1.isUndef())
3436 return N1;
3437
3438 if (SDValue V = foldAddSubBoolOfMaskedVal(N, DAG))
3439 return V;
3440
3441 if (SDValue V = foldAddSubOfSignBit(N, DAG))
3442 return V;
3443
3444 if (SDValue V = foldAddSubMasked1(false, N0, N1, DAG, SDLoc(N)))
3445 return V;
3446
3447 if (SDValue V = foldSubToUSubSat(VT, N))
3448 return V;
3449
3450 // (x - y) - 1 -> add (xor y, -1), x
3451 if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB && isOneOrOneSplat(N1)) {
3452 SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(1),
3453 DAG.getAllOnesConstant(DL, VT));
3454 return DAG.getNode(ISD::ADD, DL, VT, Xor, N0.getOperand(0));
3455 }
3456
3457 // Look for:
3458 // sub y, (xor x, -1)
3459 // And if the target does not like this form then turn into:
3460 // add (add x, y), 1
3461 if (TLI.preferIncOfAddToSubOfNot(VT) && N1.hasOneUse() && isBitwiseNot(N1)) {
3462 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, N1.getOperand(0));
3463 return DAG.getNode(ISD::ADD, DL, VT, Add, DAG.getConstant(1, DL, VT));
3464 }
3465
3466 // Hoist one-use addition by non-opaque constant:
3467 // (x + C) - y -> (x - y) + C
3468 if (N0.hasOneUse() && N0.getOpcode() == ISD::ADD &&
3469 isConstantOrConstantVector(N0.getOperand(1), /*NoOpaques=*/true)) {
3470 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), N1);
3471 return DAG.getNode(ISD::ADD, DL, VT, Sub, N0.getOperand(1));
3472 }
3473 // y - (x + C) -> (y - x) - C
3474 if (N1.hasOneUse() && N1.getOpcode() == ISD::ADD &&
3475 isConstantOrConstantVector(N1.getOperand(1), /*NoOpaques=*/true)) {
3476 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, N1.getOperand(0));
3477 return DAG.getNode(ISD::SUB, DL, VT, Sub, N1.getOperand(1));
3478 }
3479 // (x - C) - y -> (x - y) - C
3480 // This is necessary because SUB(X,C) -> ADD(X,-C) doesn't work for vectors.
3481 if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
3482 isConstantOrConstantVector(N0.getOperand(1), /*NoOpaques=*/true)) {
3483 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), N1);
3484 return DAG.getNode(ISD::SUB, DL, VT, Sub, N0.getOperand(1));
3485 }
3486 // (C - x) - y -> C - (x + y)
3487 if (N0.hasOneUse() && N0.getOpcode() == ISD::SUB &&
3488 isConstantOrConstantVector(N0.getOperand(0), /*NoOpaques=*/true)) {
3489 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(1), N1);
3490 return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), Add);
3491 }
3492
3493 // If the target's bool is represented as 0/-1, prefer to make this 'add 0/-1'
3494 // rather than 'sub 0/1' (the sext should get folded).
3495 // sub X, (zext i1 Y) --> add X, (sext i1 Y)
3496 if (N1.getOpcode() == ISD::ZERO_EXTEND &&
3497 N1.getOperand(0).getScalarValueSizeInBits() == 1 &&
3498 TLI.getBooleanContents(VT) ==
3499 TargetLowering::ZeroOrNegativeOneBooleanContent) {
3500 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, N1.getOperand(0));
3501 return DAG.getNode(ISD::ADD, DL, VT, N0, SExt);
3502 }
3503
3504 // fold Y = sra (X, size(X)-1); sub (xor (X, Y), Y) -> (abs X)
3505 if (TLI.isOperationLegalOrCustom(ISD::ABS, VT)) {
3506 if (N0.getOpcode() == ISD::XOR && N1.getOpcode() == ISD::SRA) {
3507 SDValue X0 = N0.getOperand(0), X1 = N0.getOperand(1);
3508 SDValue S0 = N1.getOperand(0);
3509 if ((X0 == S0 && X1 == N1) || (X0 == N1 && X1 == S0))
3510 if (ConstantSDNode *C = isConstOrConstSplat(N1.getOperand(1)))
3511 if (C->getAPIntValue() == (VT.getScalarSizeInBits() - 1))
3512 return DAG.getNode(ISD::ABS, SDLoc(N), VT, S0);
3513 }
3514 }
3515
3516 // If the relocation model supports it, consider symbol offsets.
3517 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
3518 if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) {
3519 // fold (sub Sym, c) -> Sym-c
3520 if (N1C && GA->getOpcode() == ISD::GlobalAddress)
3521 return DAG.getGlobalAddress(GA->getGlobal(), SDLoc(N1C), VT,
3522 GA->getOffset() -
3523 (uint64_t)N1C->getSExtValue());
3524 // fold (sub Sym+c1, Sym+c2) -> c1-c2
3525 if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1))
3526 if (GA->getGlobal() == GB->getGlobal())
3527 return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(),
3528 DL, VT);
3529 }
3530
3531 // sub X, (sextinreg Y i1) -> add X, (and Y 1)
3532 if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) {
3533 VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1));
3534 if (TN->getVT() == MVT::i1) {
3535 SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0),
3536 DAG.getConstant(1, DL, VT));
3537 return DAG.getNode(ISD::ADD, DL, VT, N0, ZExt);
3538 }
3539 }
3540
3541 // canonicalize (sub X, (vscale * C)) to (add X, (vscale * -C))
3542 if (N1.getOpcode() == ISD::VSCALE) {
3543 const APInt &IntVal = N1.getConstantOperandAPInt(0);
3544 return DAG.getNode(ISD::ADD, DL, VT, N0, DAG.getVScale(DL, VT, -IntVal));
3545 }
3546
3547 // canonicalize (sub X, step_vector(C)) to (add X, step_vector(-C))
3548 if (N1.getOpcode() == ISD::STEP_VECTOR && N1.hasOneUse()) {
3549 SDValue NewStep = DAG.getConstant(-N1.getConstantOperandAPInt(0), DL,
3550 N1.getOperand(0).getValueType());
3551 return DAG.getNode(ISD::ADD, DL, VT, N0,
3552 DAG.getStepVector(DL, VT, NewStep));
3553 }
3554
3555 // Prefer an add for more folding potential and possibly better codegen:
3556 // sub N0, (lshr N10, width-1) --> add N0, (ashr N10, width-1)
3557 if (!LegalOperations && N1.getOpcode() == ISD::SRL && N1.hasOneUse()) {
3558 SDValue ShAmt = N1.getOperand(1);
3559 ConstantSDNode *ShAmtC = isConstOrConstSplat(ShAmt);
3560 if (ShAmtC &&
3561 ShAmtC->getAPIntValue() == (N1.getScalarValueSizeInBits() - 1)) {
3562 SDValue SRA = DAG.getNode(ISD::SRA, DL, VT, N1.getOperand(0), ShAmt);
3563 return DAG.getNode(ISD::ADD, DL, VT, N0, SRA);
3564 }
3565 }
3566
3567 if (TLI.isOperationLegalOrCustom(ISD::ADDCARRY, VT)) {
3568 // (sub Carry, X) -> (addcarry (sub 0, X), 0, Carry)
3569 if (SDValue Carry = getAsCarry(TLI, N0)) {
3570 SDValue X = N1;
3571 SDValue Zero = DAG.getConstant(0, DL, VT);
3572 SDValue NegX = DAG.getNode(ISD::SUB, DL, VT, Zero, X);
3573 return DAG.getNode(ISD::ADDCARRY, DL,
3574 DAG.getVTList(VT, Carry.getValueType()), NegX, Zero,
3575 Carry);
3576 }
3577 }
3578
3579 return SDValue();
3580}
3581
3582SDValue DAGCombiner::visitSUBSAT(SDNode *N) {
3583 SDValue N0 = N->getOperand(0);
3584 SDValue N1 = N->getOperand(1);
3585 EVT VT = N0.getValueType();
3586 SDLoc DL(N);
3587
3588 // fold vector ops
3589 if (VT.isVector()) {
3590 // TODO SimplifyVBinOp
3591
3592 // fold (sub_sat x, 0) -> x, vector edition
3593 if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
3594 return N0;
3595 }
3596
3597 // fold (sub_sat x, undef) -> 0
3598 if (N0.isUndef() || N1.isUndef())
3599 return DAG.getConstant(0, DL, VT);
3600
3601 // fold (sub_sat x, x) -> 0
3602 if (N0 == N1)
3603 return DAG.getConstant(0, DL, VT);
3604
3605 // fold (sub_sat c1, c2) -> c3
3606 if (SDValue C = DAG.FoldConstantArithmetic(N->getOpcode(), DL, VT, {N0, N1}))
3607 return C;
3608
3609 // fold (sub_sat x, 0) -> x
3610 if (isNullConstant(N1))
3611 return N0;
3612
3613 return SDValue();
3614}
3615
3616SDValue DAGCombiner::visitSUBC(SDNode *N) {
3617 SDValue N0 = N->getOperand(0);
3618 SDValue N1 = N->getOperand(1);
3619 EVT VT = N0.getValueType();
3620 SDLoc DL(N);
3621
3622 // If the flag result is dead, turn this into an SUB.
3623 if (!N->hasAnyUseOfValue(1))
3624 return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1),
3625 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
3626
3627 // fold (subc x, x) -> 0 + no borrow
3628 if (N0 == N1)
3629 return CombineTo(N, DAG.getConstant(0, DL, VT),
3630 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
3631
3632 // fold (subc x, 0) -> x + no borrow
3633 if (isNullConstant(N1))
3634 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
3635
3636 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow
3637 if (isAllOnesConstant(N0))
3638 return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0),
3639 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue));
3640
3641 return SDValue();
3642}
3643
3644SDValue DAGCombiner::visitSUBO(SDNode *N) {
3645 SDValue N0 = N->getOperand(0);
3646 SDValue N1 = N->getOperand(1);
3647 EVT VT = N0.getValueType();
3648 bool IsSigned = (ISD::SSUBO == N->getOpcode());
3649
3650 EVT CarryVT = N->getValueType(1);
3651 SDLoc DL(N);
3652
3653 // If the flag result is dead, turn this into an SUB.
3654 if (!N->hasAnyUseOfValue(1))
3655 return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1),
3656 DAG.getUNDEF(CarryVT));
3657
3658 // fold (subo x, x) -> 0 + no borrow
3659 if (N0 == N1)
3660 return CombineTo(N, DAG.getConstant(0, DL, VT),
3661 DAG.getConstant(0, DL, CarryVT));
3662
3663 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
3664
3665 // fold (subox, c) -> (addo x, -c)
3666 if (IsSigned && N1C && !N1C->getAPIntValue().isMinSignedValue()) {
3667 return DAG.getNode(ISD::SADDO, DL, N->getVTList(), N0,
3668 DAG.getConstant(-N1C->getAPIntValue(), DL, VT));
3669 }
3670
3671 // fold (subo x, 0) -> x + no borrow
3672 if (isNullOrNullSplat(N1))
3673 return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT));
3674
3675 // Canonicalize (usubo -1, x) -> ~x, i.e. (xor x, -1) + no borrow
3676 if (!IsSigned && isAllOnesOrAllOnesSplat(N0))
3677 return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0),
3678 DAG.getConstant(0, DL, CarryVT));
3679
3680 return SDValue();
3681}
3682
3683SDValue DAGCombiner::visitSUBE(SDNode *N) {
3684 SDValue N0 = N->getOperand(0);
3685 SDValue N1 = N->getOperand(1);
3686 SDValue CarryIn = N->getOperand(2);
3687
3688 // fold (sube x, y, false) -> (subc x, y)
3689 if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
3690 return DAG.getNode(ISD::SUBC, SDLoc(N), N->getVTList(), N0, N1);
3691
3692 return SDValue();
3693}
3694
3695SDValue DAGCombiner::visitSUBCARRY(SDNode *N) {
3696 SDValue N0 = N->getOperand(0);
3697 SDValue N1 = N->getOperand(1);
3698 SDValue CarryIn = N->getOperand(2);
3699
3700 // fold (subcarry x, y, false) -> (usubo x, y)
3701 if (isNullConstant(CarryIn)) {
3702 if (!LegalOperations ||
3703 TLI.isOperationLegalOrCustom(ISD::USUBO, N->getValueType(0)))
3704 return DAG.getNode(ISD::USUBO, SDLoc(N), N->getVTList(), N0, N1);
3705 }
3706
3707 return SDValue();
3708}
3709
3710SDValue DAGCombiner::visitSSUBO_CARRY(SDNode *N) {
3711 SDValue N0 = N->getOperand(0);
3712 SDValue N1 = N->getOperand(1);
3713 SDValue CarryIn = N->getOperand(2);
3714
3715 // fold (ssubo_carry x, y, false) -> (ssubo x, y)
3716 if (isNullConstant(CarryIn)) {
3717 if (!LegalOperations ||
3718 TLI.isOperationLegalOrCustom(ISD::SSUBO, N->getValueType(0)))
3719 return DAG.getNode(ISD::SSUBO, SDLoc(N), N->getVTList(), N0, N1);
3720 }
3721
3722 return SDValue();
3723}
3724
3725// Notice that "mulfix" can be any of SMULFIX, SMULFIXSAT, UMULFIX and
3726// UMULFIXSAT here.
3727SDValue DAGCombiner::visitMULFIX(SDNode *N) {
3728 SDValue N0 = N->getOperand(0);
3729 SDValue N1 = N->getOperand(1);
3730 SDValue Scale = N->getOperand(2);
3731 EVT VT = N0.getValueType();
3732
3733 // fold (mulfix x, undef, scale) -> 0
3734 if (N0.isUndef() || N1.isUndef())
3735 return DAG.getConstant(0, SDLoc(N), VT);
3736
3737 // Canonicalize constant to RHS (vector doesn't have to splat)
3738 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
3739 !DAG.isConstantIntBuildVectorOrConstantInt(N1))
3740 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0, Scale);
3741
3742 // fold (mulfix x, 0, scale) -> 0
3743 if (isNullConstant(N1))
3744 return DAG.getConstant(0, SDLoc(N), VT);
3745
3746 return SDValue();
3747}
3748
3749SDValue DAGCombiner::visitMUL(SDNode *N) {
3750 SDValue N0 = N->getOperand(0);
3751 SDValue N1 = N->getOperand(1);
3752 EVT VT = N0.getValueType();
3753
3754 // fold (mul x, undef) -> 0
3755 if (N0.isUndef() || N1.isUndef())
3756 return DAG.getConstant(0, SDLoc(N), VT);
3757
3758 bool N1IsConst = false;
3759 bool N1IsOpaqueConst = false;
3760 APInt ConstValue1;
3761
3762 // fold vector ops
3763 if (VT.isVector()) {
3764 if (SDValue FoldedVOp = SimplifyVBinOp(N))
3765 return FoldedVOp;
3766
3767 N1IsConst = ISD::isConstantSplatVector(N1.getNode(), ConstValue1);
3768 assert((!N1IsConst ||(static_cast <bool> ((!N1IsConst || ConstValue1.getBitWidth
() == VT.getScalarSizeInBits()) && "Splat APInt should be element width"
) ? void (0) : __assert_fail ("(!N1IsConst || ConstValue1.getBitWidth() == VT.getScalarSizeInBits()) && \"Splat APInt should be element width\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3770, __extension__ __PRETTY_FUNCTION__))
3769 ConstValue1.getBitWidth() == VT.getScalarSizeInBits()) &&(static_cast <bool> ((!N1IsConst || ConstValue1.getBitWidth
() == VT.getScalarSizeInBits()) && "Splat APInt should be element width"
) ? void (0) : __assert_fail ("(!N1IsConst || ConstValue1.getBitWidth() == VT.getScalarSizeInBits()) && \"Splat APInt should be element width\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3770, __extension__ __PRETTY_FUNCTION__))
3770 "Splat APInt should be element width")(static_cast <bool> ((!N1IsConst || ConstValue1.getBitWidth
() == VT.getScalarSizeInBits()) && "Splat APInt should be element width"
) ? void (0) : __assert_fail ("(!N1IsConst || ConstValue1.getBitWidth() == VT.getScalarSizeInBits()) && \"Splat APInt should be element width\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3770, __extension__ __PRETTY_FUNCTION__))
;
3771 } else {
3772 N1IsConst = isa<ConstantSDNode>(N1);
3773 if (N1IsConst) {
3774 ConstValue1 = cast<ConstantSDNode>(N1)->getAPIntValue();
3775 N1IsOpaqueConst = cast<ConstantSDNode>(N1)->isOpaque();
3776 }
3777 }
3778
3779 // fold (mul c1, c2) -> c1*c2
3780 if (SDValue C = DAG.FoldConstantArithmetic(ISD::MUL, SDLoc(N), VT, {N0, N1}))
3781 return C;
3782
3783 // canonicalize constant to RHS (vector doesn't have to splat)
3784 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
3785 !DAG.isConstantIntBuildVectorOrConstantInt(N1))
3786 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N1, N0);
3787
3788 // fold (mul x, 0) -> 0
3789 if (N1IsConst && ConstValue1.isNullValue())
3790 return N1;
3791
3792 // fold (mul x, 1) -> x
3793 if (N1IsConst && ConstValue1.isOneValue())
3794 return N0;
3795
3796 if (SDValue NewSel = foldBinOpIntoSelect(N))
3797 return NewSel;
3798
3799 // fold (mul x, -1) -> 0-x
3800 if (N1IsConst && ConstValue1.isAllOnesValue()) {
3801 SDLoc DL(N);
3802 return DAG.getNode(ISD::SUB, DL, VT,
3803 DAG.getConstant(0, DL, VT), N0);
3804 }
3805
3806 // fold (mul x, (1 << c)) -> x << c
3807 if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) &&
3808 DAG.isKnownToBeAPowerOfTwo(N1) &&
3809 (!VT.isVector() || Level <= AfterLegalizeVectorOps)) {
3810 SDLoc DL(N);
3811 SDValue LogBase2 = BuildLogBase2(N1, DL);
3812 EVT ShiftVT = getShiftAmountTy(N0.getValueType());
3813 SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT);
3814 return DAG.getNode(ISD::SHL, DL, VT, N0, Trunc);
3815 }
3816
3817 // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
3818 if (N1IsConst && !N1IsOpaqueConst && (-ConstValue1).isPowerOf2()) {
3819 unsigned Log2Val = (-ConstValue1).logBase2();
3820 SDLoc DL(N);
3821 // FIXME: If the input is something that is easily negated (e.g. a
3822 // single-use add), we should put the negate there.
3823 return DAG.getNode(ISD::SUB, DL, VT,
3824 DAG.getConstant(0, DL, VT),
3825 DAG.getNode(ISD::SHL, DL, VT, N0,
3826 DAG.getConstant(Log2Val, DL,
3827 getShiftAmountTy(N0.getValueType()))));
3828 }
3829
3830 // Try to transform:
3831 // (1) multiply-by-(power-of-2 +/- 1) into shift and add/sub.
3832 // mul x, (2^N + 1) --> add (shl x, N), x
3833 // mul x, (2^N - 1) --> sub (shl x, N), x
3834 // Examples: x * 33 --> (x << 5) + x
3835 // x * 15 --> (x << 4) - x
3836 // x * -33 --> -((x << 5) + x)
3837 // x * -15 --> -((x << 4) - x) ; this reduces --> x - (x << 4)
3838 // (2) multiply-by-(power-of-2 +/- power-of-2) into shifts and add/sub.
3839 // mul x, (2^N + 2^M) --> (add (shl x, N), (shl x, M))
3840 // mul x, (2^N - 2^M) --> (sub (shl x, N), (shl x, M))
3841 // Examples: x * 0x8800 --> (x << 15) + (x << 11)
3842 // x * 0xf800 --> (x << 16) - (x << 11)
3843 // x * -0x8800 --> -((x << 15) + (x << 11))
3844 // x * -0xf800 --> -((x << 16) - (x << 11)) ; (x << 11) - (x << 16)
3845 if (N1IsConst && TLI.decomposeMulByConstant(*DAG.getContext(), VT, N1)) {
3846 // TODO: We could handle more general decomposition of any constant by
3847 // having the target set a limit on number of ops and making a
3848 // callback to determine that sequence (similar to sqrt expansion).
3849 unsigned MathOp = ISD::DELETED_NODE;
3850 APInt MulC = ConstValue1.abs();
3851 // The constant `2` should be treated as (2^0 + 1).
3852 unsigned TZeros = MulC == 2 ? 0 : MulC.countTrailingZeros();
3853 MulC.lshrInPlace(TZeros);
3854 if ((MulC - 1).isPowerOf2())
3855 MathOp = ISD::ADD;
3856 else if ((MulC + 1).isPowerOf2())
3857 MathOp = ISD::SUB;
3858
3859 if (MathOp != ISD::DELETED_NODE) {
3860 unsigned ShAmt =
3861 MathOp == ISD::ADD ? (MulC - 1).logBase2() : (MulC + 1).logBase2();
3862 ShAmt += TZeros;
3863 assert(ShAmt < VT.getScalarSizeInBits() &&(static_cast <bool> (ShAmt < VT.getScalarSizeInBits(
) && "multiply-by-constant generated out of bounds shift"
) ? void (0) : __assert_fail ("ShAmt < VT.getScalarSizeInBits() && \"multiply-by-constant generated out of bounds shift\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3864, __extension__ __PRETTY_FUNCTION__))
3864 "multiply-by-constant generated out of bounds shift")(static_cast <bool> (ShAmt < VT.getScalarSizeInBits(
) && "multiply-by-constant generated out of bounds shift"
) ? void (0) : __assert_fail ("ShAmt < VT.getScalarSizeInBits() && \"multiply-by-constant generated out of bounds shift\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3864, __extension__ __PRETTY_FUNCTION__))
;
3865 SDLoc DL(N);
3866 SDValue Shl =
3867 DAG.getNode(ISD::SHL, DL, VT, N0, DAG.getConstant(ShAmt, DL, VT));
3868 SDValue R =
3869 TZeros ? DAG.getNode(MathOp, DL, VT, Shl,
3870 DAG.getNode(ISD::SHL, DL, VT, N0,
3871 DAG.getConstant(TZeros, DL, VT)))
3872 : DAG.getNode(MathOp, DL, VT, Shl, N0);
3873 if (ConstValue1.isNegative())
3874 R = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), R);
3875 return R;
3876 }
3877 }
3878
3879 // (mul (shl X, c1), c2) -> (mul X, c2 << c1)
3880 if (N0.getOpcode() == ISD::SHL &&
3881 isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
3882 isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) {
3883 SDValue C3 = DAG.getNode(ISD::SHL, SDLoc(N), VT, N1, N0.getOperand(1));
3884 if (isConstantOrConstantVector(C3))
3885 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), C3);
3886 }
3887
3888 // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one
3889 // use.
3890 {
3891 SDValue Sh(nullptr, 0), Y(nullptr, 0);
3892
3893 // Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)).
3894 if (N0.getOpcode() == ISD::SHL &&
3895 isConstantOrConstantVector(N0.getOperand(1)) &&
3896 N0.getNode()->hasOneUse()) {
3897 Sh = N0; Y = N1;
3898 } else if (N1.getOpcode() == ISD::SHL &&
3899 isConstantOrConstantVector(N1.getOperand(1)) &&
3900 N1.getNode()->hasOneUse()) {
3901 Sh = N1; Y = N0;
3902 }
3903
3904 if (Sh.getNode()) {
3905 SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT, Sh.getOperand(0), Y);
3906 return DAG.getNode(ISD::SHL, SDLoc(N), VT, Mul, Sh.getOperand(1));
3907 }
3908 }
3909
3910 // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)
3911 if (DAG.isConstantIntBuildVectorOrConstantInt(N1) &&
3912 N0.getOpcode() == ISD::ADD &&
3913 DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1)) &&
3914 isMulAddWithConstProfitable(N, N0, N1))
3915 return DAG.getNode(ISD::ADD, SDLoc(N), VT,
3916 DAG.getNode(ISD::MUL, SDLoc(N0), VT,
3917 N0.getOperand(0), N1),
3918 DAG.getNode(ISD::MUL, SDLoc(N1), VT,
3919 N0.getOperand(1), N1));
3920
3921 // Fold (mul (vscale * C0), C1) to (vscale * (C0 * C1)).
3922 if (N0.getOpcode() == ISD::VSCALE)
3923 if (ConstantSDNode *NC1 = isConstOrConstSplat(N1)) {
3924 const APInt &C0 = N0.getConstantOperandAPInt(0);
3925 const APInt &C1 = NC1->getAPIntValue();
3926 return DAG.getVScale(SDLoc(N), VT, C0 * C1);
3927 }
3928
3929 // Fold (mul step_vector(C0), C1) to (step_vector(C0 * C1)).
3930 APInt MulVal;
3931 if (N0.getOpcode() == ISD::STEP_VECTOR)
3932 if (ISD::isConstantSplatVector(N1.getNode(), MulVal)) {
3933 const APInt &C0 = N0.getConstantOperandAPInt(0);
3934 EVT SVT = N0.getOperand(0).getValueType();
3935 SDValue NewStep = DAG.getConstant(
3936 C0 * MulVal.sextOrTrunc(SVT.getSizeInBits()), SDLoc(N), SVT);
3937 return DAG.getStepVector(SDLoc(N), VT, NewStep);
3938 }
3939
3940 // Fold ((mul x, 0/undef) -> 0,
3941 // (mul x, 1) -> x) -> x)
3942 // -> and(x, mask)
3943 // We can replace vectors with '0' and '1' factors with a clearing mask.
3944 if (VT.isFixedLengthVector()) {
3945 unsigned NumElts = VT.getVectorNumElements();
3946 SmallBitVector ClearMask;
3947 ClearMask.reserve(NumElts);
3948 auto IsClearMask = [&ClearMask](ConstantSDNode *V) {
3949 if (!V || V->isNullValue()) {
3950 ClearMask.push_back(true);
3951 return true;
3952 }
3953 ClearMask.push_back(false);
3954 return V->isOne();
3955 };
3956 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::AND, VT)) &&
3957 ISD::matchUnaryPredicate(N1, IsClearMask, /*AllowUndefs*/ true)) {
3958 assert(N1.getOpcode() == ISD::BUILD_VECTOR && "Unknown constant vector")(static_cast <bool> (N1.getOpcode() == ISD::BUILD_VECTOR
&& "Unknown constant vector") ? void (0) : __assert_fail
("N1.getOpcode() == ISD::BUILD_VECTOR && \"Unknown constant vector\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 3958, __extension__ __PRETTY_FUNCTION__))
;
3959 SDLoc DL(N);
3960 EVT LegalSVT = N1.getOperand(0).getValueType();
3961 SDValue Zero = DAG.getConstant(0, DL, LegalSVT);
3962 SDValue AllOnes = DAG.getAllOnesConstant(DL, LegalSVT);
3963 SmallVector<SDValue, 16> Mask(NumElts, AllOnes);
3964 for (unsigned I = 0; I != NumElts; ++I)
3965 if (ClearMask[I])
3966 Mask[I] = Zero;
3967 return DAG.getNode(ISD::AND, DL, VT, N0, DAG.getBuildVector(VT, DL, Mask));
3968 }
3969 }
3970
3971 // reassociate mul
3972 if (SDValue RMUL = reassociateOps(ISD::MUL, SDLoc(N), N0, N1, N->getFlags()))
3973 return RMUL;
3974
3975 return SDValue();
3976}
3977
3978/// Return true if divmod libcall is available.
3979static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned,
3980 const TargetLowering &TLI) {
3981 RTLIB::Libcall LC;
3982 EVT NodeType = Node->getValueType(0);
3983 if (!NodeType.isSimple())
3984 return false;
3985 switch (NodeType.getSimpleVT().SimpleTy) {
3986 default: return false; // No libcall for vector types.
3987 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
3988 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
3989 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
3990 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
3991 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break;
3992 }
3993
3994 return TLI.getLibcallName(LC) != nullptr;
3995}
3996
3997/// Issue divrem if both quotient and remainder are needed.
3998SDValue DAGCombiner::useDivRem(SDNode *Node) {
3999 if (Node->use_empty())
4000 return SDValue(); // This is a dead node, leave it alone.
4001
4002 unsigned Opcode = Node->getOpcode();
4003 bool isSigned = (Opcode == ISD::SDIV) || (Opcode == ISD::SREM);
4004 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
4005
4006 // DivMod lib calls can still work on non-legal types if using lib-calls.
4007 EVT VT = Node->getValueType(0);
4008 if (VT.isVector() || !VT.isInteger())
4009 return SDValue();
4010
4011 if (!TLI.isTypeLegal(VT) && !TLI.isOperationCustom(DivRemOpc, VT))
4012 return SDValue();
4013
4014 // If DIVREM is going to get expanded into a libcall,
4015 // but there is no libcall available, then don't combine.
4016 if (!TLI.isOperationLegalOrCustom(DivRemOpc, VT) &&
4017 !isDivRemLibcallAvailable(Node, isSigned, TLI))
4018 return SDValue();
4019
4020 // If div is legal, it's better to do the normal expansion
4021 unsigned OtherOpcode = 0;
4022 if ((Opcode == ISD::SDIV) || (Opcode == ISD::UDIV)) {
4023 OtherOpcode = isSigned ? ISD::SREM : ISD::UREM;
4024 if (TLI.isOperationLegalOrCustom(Opcode, VT))
4025 return SDValue();
4026 } else {
4027 OtherOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
4028 if (TLI.isOperationLegalOrCustom(OtherOpcode, VT))
4029 return SDValue();
4030 }
4031
4032 SDValue Op0 = Node->getOperand(0);
4033 SDValue Op1 = Node->getOperand(1);
4034 SDValue combined;
4035 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(),
4036 UE = Op0.getNode()->use_end(); UI != UE; ++UI) {
4037 SDNode *User = *UI;
4038 if (User == Node || User->getOpcode() == ISD::DELETED_NODE ||
4039 User->use_empty())
4040 continue;
4041 // Convert the other matching node(s), too;
4042 // otherwise, the DIVREM may get target-legalized into something
4043 // target-specific that we won't be able to recognize.
4044 unsigned UserOpc = User->getOpcode();
4045 if ((UserOpc == Opcode || UserOpc == OtherOpcode || UserOpc == DivRemOpc) &&
4046 User->getOperand(0) == Op0 &&
4047 User->getOperand(1) == Op1) {
4048 if (!combined) {
4049 if (UserOpc == OtherOpcode) {
4050 SDVTList VTs = DAG.getVTList(VT, VT);
4051 combined = DAG.getNode(DivRemOpc, SDLoc(Node), VTs, Op0, Op1);
4052 } else if (UserOpc == DivRemOpc) {
4053 combined = SDValue(User, 0);
4054 } else {
4055 assert(UserOpc == Opcode)(static_cast <bool> (UserOpc == Opcode) ? void (0) : __assert_fail
("UserOpc == Opcode", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4055, __extension__ __PRETTY_FUNCTION__))
;
4056 continue;
4057 }
4058 }
4059 if (UserOpc == ISD::SDIV || UserOpc == ISD::UDIV)
4060 CombineTo(User, combined);
4061 else if (UserOpc == ISD::SREM || UserOpc == ISD::UREM)
4062 CombineTo(User, combined.getValue(1));
4063 }
4064 }
4065 return combined;
4066}
4067
4068static SDValue simplifyDivRem(SDNode *N, SelectionDAG &DAG) {
4069 SDValue N0 = N->getOperand(0);
4070 SDValue N1 = N->getOperand(1);
4071 EVT VT = N->getValueType(0);
4072 SDLoc DL(N);
4073
4074 unsigned Opc = N->getOpcode();
4075 bool IsDiv = (ISD::SDIV == Opc) || (ISD::UDIV == Opc);
4076 ConstantSDNode *N1C = isConstOrConstSplat(N1);
4077
4078 // X / undef -> undef
4079 // X % undef -> undef
4080 // X / 0 -> undef
4081 // X % 0 -> undef
4082 // NOTE: This includes vectors where any divisor element is zero/undef.
4083 if (DAG.isUndef(Opc, {N0, N1}))
4084 return DAG.getUNDEF(VT);
4085
4086 // undef / X -> 0
4087 // undef % X -> 0
4088 if (N0.isUndef())
4089 return DAG.getConstant(0, DL, VT);
4090
4091 // 0 / X -> 0
4092 // 0 % X -> 0
4093 ConstantSDNode *N0C = isConstOrConstSplat(N0);
4094 if (N0C && N0C->isNullValue())
4095 return N0;
4096
4097 // X / X -> 1
4098 // X % X -> 0
4099 if (N0 == N1)
4100 return DAG.getConstant(IsDiv ? 1 : 0, DL, VT);
4101
4102 // X / 1 -> X
4103 // X % 1 -> 0
4104 // If this is a boolean op (single-bit element type), we can't have
4105 // division-by-zero or remainder-by-zero, so assume the divisor is 1.
4106 // TODO: Similarly, if we're zero-extending a boolean divisor, then assume
4107 // it's a 1.
4108 if ((N1C && N1C->isOne()) || (VT.getScalarType() == MVT::i1))
4109 return IsDiv ? N0 : DAG.getConstant(0, DL, VT);
4110
4111 return SDValue();
4112}
4113
4114SDValue DAGCombiner::visitSDIV(SDNode *N) {
4115 SDValue N0 = N->getOperand(0);
4116 SDValue N1 = N->getOperand(1);
4117 EVT VT = N->getValueType(0);
4118 EVT CCVT = getSetCCResultType(VT);
4119
4120 // fold vector ops
4121 if (VT.isVector())
4122 if (SDValue FoldedVOp = SimplifyVBinOp(N))
4123 return FoldedVOp;
4124
4125 SDLoc DL(N);
4126
4127 // fold (sdiv c1, c2) -> c1/c2
4128 ConstantSDNode *N1C = isConstOrConstSplat(N1);
4129 if (SDValue C = DAG.FoldConstantArithmetic(ISD::SDIV, DL, VT, {N0, N1}))
4130 return C;
4131
4132 // fold (sdiv X, -1) -> 0-X
4133 if (N1C && N1C->isAllOnesValue())
4134 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), N0);
4135
4136 // fold (sdiv X, MIN_SIGNED) -> select(X == MIN_SIGNED, 1, 0)
4137 if (N1C && N1C->getAPIntValue().isMinSignedValue())
4138 return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ),
4139 DAG.getConstant(1, DL, VT),
4140 DAG.getConstant(0, DL, VT));
4141
4142 if (SDValue V = simplifyDivRem(N, DAG))
4143 return V;
4144
4145 if (SDValue NewSel = foldBinOpIntoSelect(N))
4146 return NewSel;
4147
4148 // If we know the sign bits of both operands are zero, strength reduce to a
4149 // udiv instead. Handles (X&15) /s 4 -> X&15 >> 2
4150 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
4151 return DAG.getNode(ISD::UDIV, DL, N1.getValueType(), N0, N1);
4152
4153 if (SDValue V = visitSDIVLike(N0, N1, N)) {
4154 // If the corresponding remainder node exists, update its users with
4155 // (Dividend - (Quotient * Divisor).
4156 if (SDNode *RemNode = DAG.getNodeIfExists(ISD::SREM, N->getVTList(),
4157 { N0, N1 })) {
4158 SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, V, N1);
4159 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
4160 AddToWorklist(Mul.getNode());
4161 AddToWorklist(Sub.getNode());
4162 CombineTo(RemNode, Sub);
4163 }
4164 return V;
4165 }
4166
4167 // sdiv, srem -> sdivrem
4168 // If the divisor is constant, then return DIVREM only if isIntDivCheap() is
4169 // true. Otherwise, we break the simplification logic in visitREM().
4170 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
4171 if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr))
4172 if (SDValue DivRem = useDivRem(N))
4173 return DivRem;
4174
4175 return SDValue();
4176}
4177
4178SDValue DAGCombiner::visitSDIVLike(SDValue N0, SDValue N1, SDNode *N) {
4179 SDLoc DL(N);
4180 EVT VT = N->getValueType(0);
4181 EVT CCVT = getSetCCResultType(VT);
4182 unsigned BitWidth = VT.getScalarSizeInBits();
4183
4184 // Helper for determining whether a value is a power-2 constant scalar or a
4185 // vector of such elements.
4186 auto IsPowerOfTwo = [](ConstantSDNode *C) {
4187 if (C->isNullValue() || C->isOpaque())
4188 return false;
4189 if (C->getAPIntValue().isPowerOf2())
4190 return true;
4191 if ((-C->getAPIntValue()).isPowerOf2())
4192 return true;
4193 return false;
4194 };
4195
4196 // fold (sdiv X, pow2) -> simple ops after legalize
4197 // FIXME: We check for the exact bit here because the generic lowering gives
4198 // better results in that case. The target-specific lowering should learn how
4199 // to handle exact sdivs efficiently.
4200 if (!N->getFlags().hasExact() && ISD::matchUnaryPredicate(N1, IsPowerOfTwo)) {
4201 // Target-specific implementation of sdiv x, pow2.
4202 if (SDValue Res = BuildSDIVPow2(N))
4203 return Res;
4204
4205 // Create constants that are functions of the shift amount value.
4206 EVT ShiftAmtTy = getShiftAmountTy(N0.getValueType());
4207 SDValue Bits = DAG.getConstant(BitWidth, DL, ShiftAmtTy);
4208 SDValue C1 = DAG.getNode(ISD::CTTZ, DL, VT, N1);
4209 C1 = DAG.getZExtOrTrunc(C1, DL, ShiftAmtTy);
4210 SDValue Inexact = DAG.getNode(ISD::SUB, DL, ShiftAmtTy, Bits, C1);
4211 if (!isConstantOrConstantVector(Inexact))
4212 return SDValue();
4213
4214 // Splat the sign bit into the register
4215 SDValue Sign = DAG.getNode(ISD::SRA, DL, VT, N0,
4216 DAG.getConstant(BitWidth - 1, DL, ShiftAmtTy));
4217 AddToWorklist(Sign.getNode());
4218
4219 // Add (N0 < 0) ? abs2 - 1 : 0;
4220 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, Sign, Inexact);
4221 AddToWorklist(Srl.getNode());
4222 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Srl);
4223 AddToWorklist(Add.getNode());
4224 SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Add, C1);
4225 AddToWorklist(Sra.getNode());
4226
4227 // Special case: (sdiv X, 1) -> X
4228 // Special Case: (sdiv X, -1) -> 0-X
4229 SDValue One = DAG.getConstant(1, DL, VT);
4230 SDValue AllOnes = DAG.getAllOnesConstant(DL, VT);
4231 SDValue IsOne = DAG.getSetCC(DL, CCVT, N1, One, ISD::SETEQ);
4232 SDValue IsAllOnes = DAG.getSetCC(DL, CCVT, N1, AllOnes, ISD::SETEQ);
4233 SDValue IsOneOrAllOnes = DAG.getNode(ISD::OR, DL, CCVT, IsOne, IsAllOnes);
4234 Sra = DAG.getSelect(DL, VT, IsOneOrAllOnes, N0, Sra);
4235
4236 // If dividing by a positive value, we're done. Otherwise, the result must
4237 // be negated.
4238 SDValue Zero = DAG.getConstant(0, DL, VT);
4239 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, Zero, Sra);
4240
4241 // FIXME: Use SELECT_CC once we improve SELECT_CC constant-folding.
4242 SDValue IsNeg = DAG.getSetCC(DL, CCVT, N1, Zero, ISD::SETLT);
4243 SDValue Res = DAG.getSelect(DL, VT, IsNeg, Sub, Sra);
4244 return Res;
4245 }
4246
4247 // If integer divide is expensive and we satisfy the requirements, emit an
4248 // alternate sequence. Targets may check function attributes for size/speed
4249 // trade-offs.
4250 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
4251 if (isConstantOrConstantVector(N1) &&
4252 !TLI.isIntDivCheap(N->getValueType(0), Attr))
4253 if (SDValue Op = BuildSDIV(N))
4254 return Op;
4255
4256 return SDValue();
4257}
4258
4259SDValue DAGCombiner::visitUDIV(SDNode *N) {
4260 SDValue N0 = N->getOperand(0);
4261 SDValue N1 = N->getOperand(1);
4262 EVT VT = N->getValueType(0);
4263 EVT CCVT = getSetCCResultType(VT);
4264
4265 // fold vector ops
4266 if (VT.isVector())
4267 if (SDValue FoldedVOp = SimplifyVBinOp(N))
4268 return FoldedVOp;
4269
4270 SDLoc DL(N);
4271
4272 // fold (udiv c1, c2) -> c1/c2
4273 ConstantSDNode *N1C = isConstOrConstSplat(N1);
4274 if (SDValue C = DAG.FoldConstantArithmetic(ISD::UDIV, DL, VT, {N0, N1}))
4275 return C;
4276
4277 // fold (udiv X, -1) -> select(X == -1, 1, 0)
4278 if (N1C && N1C->getAPIntValue().isAllOnesValue())
4279 return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ),
4280 DAG.getConstant(1, DL, VT),
4281 DAG.getConstant(0, DL, VT));
4282
4283 if (SDValue V = simplifyDivRem(N, DAG))
4284 return V;
4285
4286 if (SDValue NewSel = foldBinOpIntoSelect(N))
4287 return NewSel;
4288
4289 if (SDValue V = visitUDIVLike(N0, N1, N)) {
4290 // If the corresponding remainder node exists, update its users with
4291 // (Dividend - (Quotient * Divisor).
4292 if (SDNode *RemNode = DAG.getNodeIfExists(ISD::UREM, N->getVTList(),
4293 { N0, N1 })) {
4294 SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, V, N1);
4295 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
4296 AddToWorklist(Mul.getNode());
4297 AddToWorklist(Sub.getNode());
4298 CombineTo(RemNode, Sub);
4299 }
4300 return V;
4301 }
4302
4303 // sdiv, srem -> sdivrem
4304 // If the divisor is constant, then return DIVREM only if isIntDivCheap() is
4305 // true. Otherwise, we break the simplification logic in visitREM().
4306 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
4307 if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr))
4308 if (SDValue DivRem = useDivRem(N))
4309 return DivRem;
4310
4311 return SDValue();
4312}
4313
4314SDValue DAGCombiner::visitUDIVLike(SDValue N0, SDValue N1, SDNode *N) {
4315 SDLoc DL(N);
4316 EVT VT = N->getValueType(0);
4317
4318 // fold (udiv x, (1 << c)) -> x >>u c
4319 if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) &&
4320 DAG.isKnownToBeAPowerOfTwo(N1)) {
4321 SDValue LogBase2 = BuildLogBase2(N1, DL);
4322 AddToWorklist(LogBase2.getNode());
4323
4324 EVT ShiftVT = getShiftAmountTy(N0.getValueType());
4325 SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT);
4326 AddToWorklist(Trunc.getNode());
4327 return DAG.getNode(ISD::SRL, DL, VT, N0, Trunc);
4328 }
4329
4330 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
4331 if (N1.getOpcode() == ISD::SHL) {
4332 SDValue N10 = N1.getOperand(0);
4333 if (isConstantOrConstantVector(N10, /*NoOpaques*/ true) &&
4334 DAG.isKnownToBeAPowerOfTwo(N10)) {
4335 SDValue LogBase2 = BuildLogBase2(N10, DL);
4336 AddToWorklist(LogBase2.getNode());
4337
4338 EVT ADDVT = N1.getOperand(1).getValueType();
4339 SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ADDVT);
4340 AddToWorklist(Trunc.getNode());
4341 SDValue Add = DAG.getNode(ISD::ADD, DL, ADDVT, N1.getOperand(1), Trunc);
4342 AddToWorklist(Add.getNode());
4343 return DAG.getNode(ISD::SRL, DL, VT, N0, Add);
4344 }
4345 }
4346
4347 // fold (udiv x, c) -> alternate
4348 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
4349 if (isConstantOrConstantVector(N1) &&
4350 !TLI.isIntDivCheap(N->getValueType(0), Attr))
4351 if (SDValue Op = BuildUDIV(N))
4352 return Op;
4353
4354 return SDValue();
4355}
4356
4357// handles ISD::SREM and ISD::UREM
4358SDValue DAGCombiner::visitREM(SDNode *N) {
4359 unsigned Opcode = N->getOpcode();
4360 SDValue N0 = N->getOperand(0);
4361 SDValue N1 = N->getOperand(1);
4362 EVT VT = N->getValueType(0);
4363 EVT CCVT = getSetCCResultType(VT);
4364
4365 bool isSigned = (Opcode == ISD::SREM);
4366 SDLoc DL(N);
4367
4368 // fold (rem c1, c2) -> c1%c2
4369 ConstantSDNode *N1C = isConstOrConstSplat(N1);
4370 if (SDValue C = DAG.FoldConstantArithmetic(Opcode, DL, VT, {N0, N1}))
4371 return C;
4372
4373 // fold (urem X, -1) -> select(X == -1, 0, x)
4374 if (!isSigned && N1C && N1C->getAPIntValue().isAllOnesValue())
4375 return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ),
4376 DAG.getConstant(0, DL, VT), N0);
4377
4378 if (SDValue V = simplifyDivRem(N, DAG))
4379 return V;
4380
4381 if (SDValue NewSel = foldBinOpIntoSelect(N))
4382 return NewSel;
4383
4384 if (isSigned) {
4385 // If we know the sign bits of both operands are zero, strength reduce to a
4386 // urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15
4387 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
4388 return DAG.getNode(ISD::UREM, DL, VT, N0, N1);
4389 } else {
4390 if (DAG.isKnownToBeAPowerOfTwo(N1)) {
4391 // fold (urem x, pow2) -> (and x, pow2-1)
4392 SDValue NegOne = DAG.getAllOnesConstant(DL, VT);
4393 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne);
4394 AddToWorklist(Add.getNode());
4395 return DAG.getNode(ISD::AND, DL, VT, N0, Add);
4396 }
4397 if (N1.getOpcode() == ISD::SHL &&
4398 DAG.isKnownToBeAPowerOfTwo(N1.getOperand(0))) {
4399 // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
4400 SDValue NegOne = DAG.getAllOnesConstant(DL, VT);
4401 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne);
4402 AddToWorklist(Add.getNode());
4403 return DAG.getNode(ISD::AND, DL, VT, N0, Add);
4404 }
4405 }
4406
4407 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
4408
4409 // If X/C can be simplified by the division-by-constant logic, lower
4410 // X%C to the equivalent of X-X/C*C.
4411 // Reuse the SDIVLike/UDIVLike combines - to avoid mangling nodes, the
4412 // speculative DIV must not cause a DIVREM conversion. We guard against this
4413 // by skipping the simplification if isIntDivCheap(). When div is not cheap,
4414 // combine will not return a DIVREM. Regardless, checking cheapness here
4415 // makes sense since the simplification results in fatter code.
4416 if (DAG.isKnownNeverZero(N1) && !TLI.isIntDivCheap(VT, Attr)) {
4417 SDValue OptimizedDiv =
4418 isSigned ? visitSDIVLike(N0, N1, N) : visitUDIVLike(N0, N1, N);
4419 if (OptimizedDiv.getNode()) {
4420 // If the equivalent Div node also exists, update its users.
4421 unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
4422 if (SDNode *DivNode = DAG.getNodeIfExists(DivOpcode, N->getVTList(),
4423 { N0, N1 }))
4424 CombineTo(DivNode, OptimizedDiv);
4425 SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, OptimizedDiv, N1);
4426 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
4427 AddToWorklist(OptimizedDiv.getNode());
4428 AddToWorklist(Mul.getNode());
4429 return Sub;
4430 }
4431 }
4432
4433 // sdiv, srem -> sdivrem
4434 if (SDValue DivRem = useDivRem(N))
4435 return DivRem.getValue(1);
4436
4437 return SDValue();
4438}
4439
4440SDValue DAGCombiner::visitMULHS(SDNode *N) {
4441 SDValue N0 = N->getOperand(0);
4442 SDValue N1 = N->getOperand(1);
4443 EVT VT = N->getValueType(0);
4444 SDLoc DL(N);
4445
4446 if (VT.isVector()) {
4447 // fold (mulhs x, 0) -> 0
4448 // do not return N0/N1, because undef node may exist.
4449 if (ISD::isConstantSplatVectorAllZeros(N0.getNode()) ||
4450 ISD::isConstantSplatVectorAllZeros(N1.getNode()))
4451 return DAG.getConstant(0, DL, VT);
4452 }
4453
4454 // fold (mulhs x, 0) -> 0
4455 if (isNullConstant(N1))
4456 return N1;
4457 // fold (mulhs x, 1) -> (sra x, size(x)-1)
4458 if (isOneConstant(N1))
4459 return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0,
4460 DAG.getConstant(N0.getScalarValueSizeInBits() - 1, DL,
4461 getShiftAmountTy(N0.getValueType())));
4462
4463 // fold (mulhs x, undef) -> 0
4464 if (N0.isUndef() || N1.isUndef())
4465 return DAG.getConstant(0, DL, VT);
4466
4467 // If the type twice as wide is legal, transform the mulhs to a wider multiply
4468 // plus a shift.
4469 if (!TLI.isOperationLegalOrCustom(ISD::MULHS, VT) && VT.isSimple() &&
4470 !VT.isVector()) {
4471 MVT Simple = VT.getSimpleVT();
4472 unsigned SimpleSize = Simple.getSizeInBits();
4473 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
4474 if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
4475 N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0);
4476 N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1);
4477 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
4478 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
4479 DAG.getConstant(SimpleSize, DL,
4480 getShiftAmountTy(N1.getValueType())));
4481 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
4482 }
4483 }
4484
4485 return SDValue();
4486}
4487
4488SDValue DAGCombiner::visitMULHU(SDNode *N) {
4489 SDValue N0 = N->getOperand(0);
4490 SDValue N1 = N->getOperand(1);
4491 EVT VT = N->getValueType(0);
4492 SDLoc DL(N);
4493
4494 if (VT.isVector()) {
4495 // fold (mulhu x, 0) -> 0
4496 // do not return N0/N1, because undef node may exist.
4497 if (ISD::isConstantSplatVectorAllZeros(N0.getNode()) ||
4498 ISD::isConstantSplatVectorAllZeros(N1.getNode()))
4499 return DAG.getConstant(0, DL, VT);
4500 }
4501
4502 // fold (mulhu x, 0) -> 0
4503 if (isNullConstant(N1))
4504 return N1;
4505 // fold (mulhu x, 1) -> 0
4506 if (isOneConstant(N1))
4507 return DAG.getConstant(0, DL, N0.getValueType());
4508 // fold (mulhu x, undef) -> 0
4509 if (N0.isUndef() || N1.isUndef())
4510 return DAG.getConstant(0, DL, VT);
4511
4512 // fold (mulhu x, (1 << c)) -> x >> (bitwidth - c)
4513 if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) &&
4514 DAG.isKnownToBeAPowerOfTwo(N1) && hasOperation(ISD::SRL, VT)) {
4515 unsigned NumEltBits = VT.getScalarSizeInBits();
4516 SDValue LogBase2 = BuildLogBase2(N1, DL);
4517 SDValue SRLAmt = DAG.getNode(
4518 ISD::SUB, DL, VT, DAG.getConstant(NumEltBits, DL, VT), LogBase2);
4519 EVT ShiftVT = getShiftAmountTy(N0.getValueType());
4520 SDValue Trunc = DAG.getZExtOrTrunc(SRLAmt, DL, ShiftVT);
4521 return DAG.getNode(ISD::SRL, DL, VT, N0, Trunc);
4522 }
4523
4524 // If the type twice as wide is legal, transform the mulhu to a wider multiply
4525 // plus a shift.
4526 if (!TLI.isOperationLegalOrCustom(ISD::MULHU, VT) && VT.isSimple() &&
4527 !VT.isVector()) {
4528 MVT Simple = VT.getSimpleVT();
4529 unsigned SimpleSize = Simple.getSizeInBits();
4530 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
4531 if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
4532 N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0);
4533 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1);
4534 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
4535 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
4536 DAG.getConstant(SimpleSize, DL,
4537 getShiftAmountTy(N1.getValueType())));
4538 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
4539 }
4540 }
4541
4542 return SDValue();
4543}
4544
4545/// Perform optimizations common to nodes that compute two values. LoOp and HiOp
4546/// give the opcodes for the two computations that are being performed. Return
4547/// true if a simplification was made.
4548SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
4549 unsigned HiOp) {
4550 // If the high half is not needed, just compute the low half.
4551 bool HiExists = N->hasAnyUseOfValue(1);
4552 if (!HiExists && (!LegalOperations ||
4553 TLI.isOperationLegalOrCustom(LoOp, N->getValueType(0)))) {
4554 SDValue Res = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops());
4555 return CombineTo(N, Res, Res);
4556 }
4557
4558 // If the low half is not needed, just compute the high half.
4559 bool LoExists = N->hasAnyUseOfValue(0);
4560 if (!LoExists && (!LegalOperations ||
4561 TLI.isOperationLegalOrCustom(HiOp, N->getValueType(1)))) {
4562 SDValue Res = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops());
4563 return CombineTo(N, Res, Res);
4564 }
4565
4566 // If both halves are used, return as it is.
4567 if (LoExists && HiExists)
4568 return SDValue();
4569
4570 // If the two computed results can be simplified separately, separate them.
4571 if (LoExists) {
4572 SDValue Lo = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops());
4573 AddToWorklist(Lo.getNode());
4574 SDValue LoOpt = combine(Lo.getNode());
4575 if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() &&
4576 (!LegalOperations ||
4577 TLI.isOperationLegalOrCustom(LoOpt.getOpcode(), LoOpt.getValueType())))
4578 return CombineTo(N, LoOpt, LoOpt);
4579 }
4580
4581 if (HiExists) {
4582 SDValue Hi = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops());
4583 AddToWorklist(Hi.getNode());
4584 SDValue HiOpt = combine(Hi.getNode());
4585 if (HiOpt.getNode() && HiOpt != Hi &&
4586 (!LegalOperations ||
4587 TLI.isOperationLegalOrCustom(HiOpt.getOpcode(), HiOpt.getValueType())))
4588 return CombineTo(N, HiOpt, HiOpt);
4589 }
4590
4591 return SDValue();
4592}
4593
4594SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
4595 if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS))
4596 return Res;
4597
4598 EVT VT = N->getValueType(0);
4599 SDLoc DL(N);
4600
4601 // If the type is twice as wide is legal, transform the mulhu to a wider
4602 // multiply plus a shift.
4603 if (VT.isSimple() && !VT.isVector()) {
4604 MVT Simple = VT.getSimpleVT();
4605 unsigned SimpleSize = Simple.getSizeInBits();
4606 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
4607 if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
4608 SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0));
4609 SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1));
4610 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
4611 // Compute the high part as N1.
4612 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
4613 DAG.getConstant(SimpleSize, DL,
4614 getShiftAmountTy(Lo.getValueType())));
4615 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
4616 // Compute the low part as N0.
4617 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
4618 return CombineTo(N, Lo, Hi);
4619 }
4620 }
4621
4622 return SDValue();
4623}
4624
4625SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
4626 if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU))
4627 return Res;
4628
4629 EVT VT = N->getValueType(0);
4630 SDLoc DL(N);
4631
4632 // (umul_lohi N0, 0) -> (0, 0)
4633 if (isNullConstant(N->getOperand(1))) {
4634 SDValue Zero = DAG.getConstant(0, DL, VT);
4635 return CombineTo(N, Zero, Zero);
4636 }
4637
4638 // (umul_lohi N0, 1) -> (N0, 0)
4639 if (isOneConstant(N->getOperand(1))) {
4640 SDValue Zero = DAG.getConstant(0, DL, VT);
4641 return CombineTo(N, N->getOperand(0), Zero);
4642 }
4643
4644 // If the type is twice as wide is legal, transform the mulhu to a wider
4645 // multiply plus a shift.
4646 if (VT.isSimple() && !VT.isVector()) {
4647 MVT Simple = VT.getSimpleVT();
4648 unsigned SimpleSize = Simple.getSizeInBits();
4649 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
4650 if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
4651 SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0));
4652 SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1));
4653 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
4654 // Compute the high part as N1.
4655 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
4656 DAG.getConstant(SimpleSize, DL,
4657 getShiftAmountTy(Lo.getValueType())));
4658 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
4659 // Compute the low part as N0.
4660 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
4661 return CombineTo(N, Lo, Hi);
4662 }
4663 }
4664
4665 return SDValue();
4666}
4667
4668SDValue DAGCombiner::visitMULO(SDNode *N) {
4669 SDValue N0 = N->getOperand(0);
4670 SDValue N1 = N->getOperand(1);
4671 EVT VT = N0.getValueType();
4672 bool IsSigned = (ISD::SMULO == N->getOpcode());
4673
4674 EVT CarryVT = N->getValueType(1);
4675 SDLoc DL(N);
4676
4677 ConstantSDNode *N0C = isConstOrConstSplat(N0);
4678 ConstantSDNode *N1C = isConstOrConstSplat(N1);
4679
4680 // fold operation with constant operands.
4681 // TODO: Move this to FoldConstantArithmetic when it supports nodes with
4682 // multiple results.
4683 if (N0C && N1C) {
4684 bool Overflow;
4685 APInt Result =
4686 IsSigned ? N0C->getAPIntValue().smul_ov(N1C->getAPIntValue(), Overflow)
4687 : N0C->getAPIntValue().umul_ov(N1C->getAPIntValue(), Overflow);
4688 return CombineTo(N, DAG.getConstant(Result, DL, VT),
4689 DAG.getBoolConstant(Overflow, DL, CarryVT, CarryVT));
4690 }
4691
4692 // canonicalize constant to RHS.
4693 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
4694 !DAG.isConstantIntBuildVectorOrConstantInt(N1))
4695 return DAG.getNode(N->getOpcode(), DL, N->getVTList(), N1, N0);
4696
4697 // fold (mulo x, 0) -> 0 + no carry out
4698 if (isNullOrNullSplat(N1))
4699 return CombineTo(N, DAG.getConstant(0, DL, VT),
4700 DAG.getConstant(0, DL, CarryVT));
4701
4702 // (mulo x, 2) -> (addo x, x)
4703 if (N1C && N1C->getAPIntValue() == 2)
4704 return DAG.getNode(IsSigned ? ISD::SADDO : ISD::UADDO, DL,
4705 N->getVTList(), N0, N0);
4706
4707 if (IsSigned) {
4708 // A 1 bit SMULO overflows if both inputs are 1.
4709 if (VT.getScalarSizeInBits() == 1) {
4710 SDValue And = DAG.getNode(ISD::AND, DL, VT, N0, N1);
4711 return CombineTo(N, And,
4712 DAG.getSetCC(DL, CarryVT, And,
4713 DAG.getConstant(0, DL, VT), ISD::SETNE));
4714 }
4715
4716 // Multiplying n * m significant bits yields a result of n + m significant
4717 // bits. If the total number of significant bits does not exceed the
4718 // result bit width (minus 1), there is no overflow.
4719 unsigned SignBits = DAG.ComputeNumSignBits(N0);
4720 if (SignBits > 1)
4721 SignBits += DAG.ComputeNumSignBits(N1);
4722 if (SignBits > VT.getScalarSizeInBits() + 1)
4723 return CombineTo(N, DAG.getNode(ISD::MUL, DL, VT, N0, N1),
4724 DAG.getConstant(0, DL, CarryVT));
4725 } else {
4726 KnownBits N1Known = DAG.computeKnownBits(N1);
4727 KnownBits N0Known = DAG.computeKnownBits(N0);
4728 bool Overflow;
4729 (void)N0Known.getMaxValue().umul_ov(N1Known.getMaxValue(), Overflow);
4730 if (!Overflow)
4731 return CombineTo(N, DAG.getNode(ISD::MUL, DL, VT, N0, N1),
4732 DAG.getConstant(0, DL, CarryVT));
4733 }
4734
4735 return SDValue();
4736}
4737
4738SDValue DAGCombiner::visitIMINMAX(SDNode *N) {
4739 SDValue N0 = N->getOperand(0);
4740 SDValue N1 = N->getOperand(1);
4741 EVT VT = N0.getValueType();
4742 unsigned Opcode = N->getOpcode();
4743
4744 // fold vector ops
4745 if (VT.isVector())
4746 if (SDValue FoldedVOp = SimplifyVBinOp(N))
4747 return FoldedVOp;
4748
4749 // fold operation with constant operands.
4750 if (SDValue C = DAG.FoldConstantArithmetic(Opcode, SDLoc(N), VT, {N0, N1}))
4751 return C;
4752
4753 // canonicalize constant to RHS
4754 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
4755 !DAG.isConstantIntBuildVectorOrConstantInt(N1))
4756 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0);
4757
4758 // Is sign bits are zero, flip between UMIN/UMAX and SMIN/SMAX.
4759 // Only do this if the current op isn't legal and the flipped is.
4760 if (!TLI.isOperationLegal(Opcode, VT) &&
4761 (N0.isUndef() || DAG.SignBitIsZero(N0)) &&
4762 (N1.isUndef() || DAG.SignBitIsZero(N1))) {
4763 unsigned AltOpcode;
4764 switch (Opcode) {
4765 case ISD::SMIN: AltOpcode = ISD::UMIN; break;
4766 case ISD::SMAX: AltOpcode = ISD::UMAX; break;
4767 case ISD::UMIN: AltOpcode = ISD::SMIN; break;
4768 case ISD::UMAX: AltOpcode = ISD::SMAX; break;
4769 default: llvm_unreachable("Unknown MINMAX opcode")::llvm::llvm_unreachable_internal("Unknown MINMAX opcode", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4769)
;
4770 }
4771 if (TLI.isOperationLegal(AltOpcode, VT))
4772 return DAG.getNode(AltOpcode, SDLoc(N), VT, N0, N1);
4773 }
4774
4775 // Simplify the operands using demanded-bits information.
4776 if (SimplifyDemandedBits(SDValue(N, 0)))
4777 return SDValue(N, 0);
4778
4779 return SDValue();
4780}
4781
4782/// If this is a bitwise logic instruction and both operands have the same
4783/// opcode, try to sink the other opcode after the logic instruction.
4784SDValue DAGCombiner::hoistLogicOpWithSameOpcodeHands(SDNode *N) {
4785 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
4786 EVT VT = N0.getValueType();
4787 unsigned LogicOpcode = N->getOpcode();
4788 unsigned HandOpcode = N0.getOpcode();
4789 assert((LogicOpcode == ISD::AND || LogicOpcode == ISD::OR ||(static_cast <bool> ((LogicOpcode == ISD::AND || LogicOpcode
== ISD::OR || LogicOpcode == ISD::XOR) && "Expected logic opcode"
) ? void (0) : __assert_fail ("(LogicOpcode == ISD::AND || LogicOpcode == ISD::OR || LogicOpcode == ISD::XOR) && \"Expected logic opcode\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4790, __extension__ __PRETTY_FUNCTION__))
4790 LogicOpcode == ISD::XOR) && "Expected logic opcode")(static_cast <bool> ((LogicOpcode == ISD::AND || LogicOpcode
== ISD::OR || LogicOpcode == ISD::XOR) && "Expected logic opcode"
) ? void (0) : __assert_fail ("(LogicOpcode == ISD::AND || LogicOpcode == ISD::OR || LogicOpcode == ISD::XOR) && \"Expected logic opcode\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4790, __extension__ __PRETTY_FUNCTION__))
;
4791 assert(HandOpcode == N1.getOpcode() && "Bad input!")(static_cast <bool> (HandOpcode == N1.getOpcode() &&
"Bad input!") ? void (0) : __assert_fail ("HandOpcode == N1.getOpcode() && \"Bad input!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4791, __extension__ __PRETTY_FUNCTION__))
;
4792
4793 // Bail early if none of these transforms apply.
4794 if (N0.getNumOperands() == 0)
4795 return SDValue();
4796
4797 // FIXME: We should check number of uses of the operands to not increase
4798 // the instruction count for all transforms.
4799
4800 // Handle size-changing casts.
4801 SDValue X = N0.getOperand(0);
4802 SDValue Y = N1.getOperand(0);
4803 EVT XVT = X.getValueType();
4804 SDLoc DL(N);
4805 if (HandOpcode == ISD::ANY_EXTEND || HandOpcode == ISD::ZERO_EXTEND ||
4806 HandOpcode == ISD::SIGN_EXTEND) {
4807 // If both operands have other uses, this transform would create extra
4808 // instructions without eliminating anything.
4809 if (!N0.hasOneUse() && !N1.hasOneUse())
4810 return SDValue();
4811 // We need matching integer source types.
4812 if (XVT != Y.getValueType())
4813 return SDValue();
4814 // Don't create an illegal op during or after legalization. Don't ever
4815 // create an unsupported vector op.
4816 if ((VT.isVector() || LegalOperations) &&
4817 !TLI.isOperationLegalOrCustom(LogicOpcode, XVT))
4818 return SDValue();
4819 // Avoid infinite looping with PromoteIntBinOp.
4820 // TODO: Should we apply desirable/legal constraints to all opcodes?
4821 if (HandOpcode == ISD::ANY_EXTEND && LegalTypes &&
4822 !TLI.isTypeDesirableForOp(LogicOpcode, XVT))
4823 return SDValue();
4824 // logic_op (hand_op X), (hand_op Y) --> hand_op (logic_op X, Y)
4825 SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
4826 return DAG.getNode(HandOpcode, DL, VT, Logic);
4827 }
4828
4829 // logic_op (truncate x), (truncate y) --> truncate (logic_op x, y)
4830 if (HandOpcode == ISD::TRUNCATE) {
4831 // If both operands have other uses, this transform would create extra
4832 // instructions without eliminating anything.
4833 if (!N0.hasOneUse() && !N1.hasOneUse())
4834 return SDValue();
4835 // We need matching source types.
4836 if (XVT != Y.getValueType())
4837 return SDValue();
4838 // Don't create an illegal op during or after legalization.
4839 if (LegalOperations && !TLI.isOperationLegal(LogicOpcode, XVT))
4840 return SDValue();
4841 // Be extra careful sinking truncate. If it's free, there's no benefit in
4842 // widening a binop. Also, don't create a logic op on an illegal type.
4843 if (TLI.isZExtFree(VT, XVT) && TLI.isTruncateFree(XVT, VT))
4844 return SDValue();
4845 if (!TLI.isTypeLegal(XVT))
4846 return SDValue();
4847 SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
4848 return DAG.getNode(HandOpcode, DL, VT, Logic);
4849 }
4850
4851 // For binops SHL/SRL/SRA/AND:
4852 // logic_op (OP x, z), (OP y, z) --> OP (logic_op x, y), z
4853 if ((HandOpcode == ISD::SHL || HandOpcode == ISD::SRL ||
4854 HandOpcode == ISD::SRA || HandOpcode == ISD::AND) &&
4855 N0.getOperand(1) == N1.getOperand(1)) {
4856 // If either operand has other uses, this transform is not an improvement.
4857 if (!N0.hasOneUse() || !N1.hasOneUse())
4858 return SDValue();
4859 SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
4860 return DAG.getNode(HandOpcode, DL, VT, Logic, N0.getOperand(1));
4861 }
4862
4863 // Unary ops: logic_op (bswap x), (bswap y) --> bswap (logic_op x, y)
4864 if (HandOpcode == ISD::BSWAP) {
4865 // If either operand has other uses, this transform is not an improvement.
4866 if (!N0.hasOneUse() || !N1.hasOneUse())
4867 return SDValue();
4868 SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
4869 return DAG.getNode(HandOpcode, DL, VT, Logic);
4870 }
4871
4872 // Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B))
4873 // Only perform this optimization up until type legalization, before
4874 // LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by
4875 // adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and
4876 // we don't want to undo this promotion.
4877 // We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper
4878 // on scalars.
4879 if ((HandOpcode == ISD::BITCAST || HandOpcode == ISD::SCALAR_TO_VECTOR) &&
4880 Level <= AfterLegalizeTypes) {
4881 // Input types must be integer and the same.
4882 if (XVT.isInteger() && XVT == Y.getValueType() &&
4883 !(VT.isVector() && TLI.isTypeLegal(VT) &&
4884 !XVT.isVector() && !TLI.isTypeLegal(XVT))) {
4885 SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y);
4886 return DAG.getNode(HandOpcode, DL, VT, Logic);
4887 }
4888 }
4889
4890 // Xor/and/or are indifferent to the swizzle operation (shuffle of one value).
4891 // Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B))
4892 // If both shuffles use the same mask, and both shuffle within a single
4893 // vector, then it is worthwhile to move the swizzle after the operation.
4894 // The type-legalizer generates this pattern when loading illegal
4895 // vector types from memory. In many cases this allows additional shuffle
4896 // optimizations.
4897 // There are other cases where moving the shuffle after the xor/and/or
4898 // is profitable even if shuffles don't perform a swizzle.
4899 // If both shuffles use the same mask, and both shuffles have the same first
4900 // or second operand, then it might still be profitable to move the shuffle
4901 // after the xor/and/or operation.
4902 if (HandOpcode == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG) {
4903 auto *SVN0 = cast<ShuffleVectorSDNode>(N0);
4904 auto *SVN1 = cast<ShuffleVectorSDNode>(N1);
4905 assert(X.getValueType() == Y.getValueType() &&(static_cast <bool> (X.getValueType() == Y.getValueType
() && "Inputs to shuffles are not the same type") ? void
(0) : __assert_fail ("X.getValueType() == Y.getValueType() && \"Inputs to shuffles are not the same type\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4906, __extension__ __PRETTY_FUNCTION__))
4906 "Inputs to shuffles are not the same type")(static_cast <bool> (X.getValueType() == Y.getValueType
() && "Inputs to shuffles are not the same type") ? void
(0) : __assert_fail ("X.getValueType() == Y.getValueType() && \"Inputs to shuffles are not the same type\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4906, __extension__ __PRETTY_FUNCTION__))
;
4907
4908 // Check that both shuffles use the same mask. The masks are known to be of
4909 // the same length because the result vector type is the same.
4910 // Check also that shuffles have only one use to avoid introducing extra
4911 // instructions.
4912 if (!SVN0->hasOneUse() || !SVN1->hasOneUse() ||
4913 !SVN0->getMask().equals(SVN1->getMask()))
4914 return SDValue();
4915
4916 // Don't try to fold this node if it requires introducing a
4917 // build vector of all zeros that might be illegal at this stage.
4918 SDValue ShOp = N0.getOperand(1);
4919 if (LogicOpcode == ISD::XOR && !ShOp.isUndef())
4920 ShOp = tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);
4921
4922 // (logic_op (shuf (A, C), shuf (B, C))) --> shuf (logic_op (A, B), C)
4923 if (N0.getOperand(1) == N1.getOperand(1) && ShOp.getNode()) {
4924 SDValue Logic = DAG.getNode(LogicOpcode, DL, VT,
4925 N0.getOperand(0), N1.getOperand(0));
4926 return DAG.getVectorShuffle(VT, DL, Logic, ShOp, SVN0->getMask());
4927 }
4928
4929 // Don't try to fold this node if it requires introducing a
4930 // build vector of all zeros that might be illegal at this stage.
4931 ShOp = N0.getOperand(0);
4932 if (LogicOpcode == ISD::XOR && !ShOp.isUndef())
4933 ShOp = tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);
4934
4935 // (logic_op (shuf (C, A), shuf (C, B))) --> shuf (C, logic_op (A, B))
4936 if (N0.getOperand(0) == N1.getOperand(0) && ShOp.getNode()) {
4937 SDValue Logic = DAG.getNode(LogicOpcode, DL, VT, N0.getOperand(1),
4938 N1.getOperand(1));
4939 return DAG.getVectorShuffle(VT, DL, ShOp, Logic, SVN0->getMask());
4940 }
4941 }
4942
4943 return SDValue();
4944}
4945
4946/// Try to make (and/or setcc (LL, LR), setcc (RL, RR)) more efficient.
4947SDValue DAGCombiner::foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1,
4948 const SDLoc &DL) {
4949 SDValue LL, LR, RL, RR, N0CC, N1CC;
4950 if (!isSetCCEquivalent(N0, LL, LR, N0CC) ||
4951 !isSetCCEquivalent(N1, RL, RR, N1CC))
4952 return SDValue();
4953
4954 assert(N0.getValueType() == N1.getValueType() &&(static_cast <bool> (N0.getValueType() == N1.getValueType
() && "Unexpected operand types for bitwise logic op"
) ? void (0) : __assert_fail ("N0.getValueType() == N1.getValueType() && \"Unexpected operand types for bitwise logic op\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4955, __extension__ __PRETTY_FUNCTION__))
4955 "Unexpected operand types for bitwise logic op")(static_cast <bool> (N0.getValueType() == N1.getValueType
() && "Unexpected operand types for bitwise logic op"
) ? void (0) : __assert_fail ("N0.getValueType() == N1.getValueType() && \"Unexpected operand types for bitwise logic op\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4955, __extension__ __PRETTY_FUNCTION__))
;
4956 assert(LL.getValueType() == LR.getValueType() &&(static_cast <bool> (LL.getValueType() == LR.getValueType
() && RL.getValueType() == RR.getValueType() &&
"Unexpected operand types for setcc") ? void (0) : __assert_fail
("LL.getValueType() == LR.getValueType() && RL.getValueType() == RR.getValueType() && \"Unexpected operand types for setcc\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4958, __extension__ __PRETTY_FUNCTION__))
4957 RL.getValueType() == RR.getValueType() &&(static_cast <bool> (LL.getValueType() == LR.getValueType
() && RL.getValueType() == RR.getValueType() &&
"Unexpected operand types for setcc") ? void (0) : __assert_fail
("LL.getValueType() == LR.getValueType() && RL.getValueType() == RR.getValueType() && \"Unexpected operand types for setcc\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4958, __extension__ __PRETTY_FUNCTION__))
4958 "Unexpected operand types for setcc")(static_cast <bool> (LL.getValueType() == LR.getValueType
() && RL.getValueType() == RR.getValueType() &&
"Unexpected operand types for setcc") ? void (0) : __assert_fail
("LL.getValueType() == LR.getValueType() && RL.getValueType() == RR.getValueType() && \"Unexpected operand types for setcc\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 4958, __extension__ __PRETTY_FUNCTION__))
;
4959
4960 // If we're here post-legalization or the logic op type is not i1, the logic
4961 // op type must match a setcc result type. Also, all folds require new
4962 // operations on the left and right operands, so those types must match.
4963 EVT VT = N0.getValueType();
4964 EVT OpVT = LL.getValueType();
4965 if (LegalOperations || VT.getScalarType() != MVT::i1)
4966 if (VT != getSetCCResultType(OpVT))
4967 return SDValue();
4968 if (OpVT != RL.getValueType())
4969 return SDValue();
4970
4971 ISD::CondCode CC0 = cast<CondCodeSDNode>(N0CC)->get();
4972 ISD::CondCode CC1 = cast<CondCodeSDNode>(N1CC)->get();
4973 bool IsInteger = OpVT.isInteger();
4974 if (LR == RR && CC0 == CC1 && IsInteger) {
4975 bool IsZero = isNullOrNullSplat(LR);
4976 bool IsNeg1 = isAllOnesOrAllOnesSplat(LR);
4977
4978 // All bits clear?
4979 bool AndEqZero = IsAnd && CC1 == ISD::SETEQ && IsZero;
4980 // All sign bits clear?
4981 bool AndGtNeg1 = IsAnd && CC1 == ISD::SETGT && IsNeg1;
4982 // Any bits set?
4983 bool OrNeZero = !IsAnd && CC1 == ISD::SETNE && IsZero;
4984 // Any sign bits set?
4985 bool OrLtZero = !IsAnd && CC1 == ISD::SETLT && IsZero;
4986
4987 // (and (seteq X, 0), (seteq Y, 0)) --> (seteq (or X, Y), 0)
4988 // (and (setgt X, -1), (setgt Y, -1)) --> (setgt (or X, Y), -1)
4989 // (or (setne X, 0), (setne Y, 0)) --> (setne (or X, Y), 0)
4990 // (or (setlt X, 0), (setlt Y, 0)) --> (setlt (or X, Y), 0)
4991 if (AndEqZero || AndGtNeg1 || OrNeZero || OrLtZero) {
4992 SDValue Or = DAG.getNode(ISD::OR, SDLoc(N0), OpVT, LL, RL);
4993 AddToWorklist(Or.getNode());
4994 return DAG.getSetCC(DL, VT, Or, LR, CC1);
4995 }
4996
4997 // All bits set?
4998 bool AndEqNeg1 = IsAnd && CC1 == ISD::SETEQ && IsNeg1;
4999 // All sign bits set?
5000 bool AndLtZero = IsAnd && CC1 == ISD::SETLT && IsZero;
5001 // Any bits clear?
5002 bool OrNeNeg1 = !IsAnd && CC1 == ISD::SETNE && IsNeg1;
5003 // Any sign bits clear?
5004 bool OrGtNeg1 = !IsAnd && CC1 == ISD::SETGT && IsNeg1;
5005
5006 // (and (seteq X, -1), (seteq Y, -1)) --> (seteq (and X, Y), -1)
5007 // (and (setlt X, 0), (setlt Y, 0)) --> (setlt (and X, Y), 0)
5008 // (or (setne X, -1), (setne Y, -1)) --> (setne (and X, Y), -1)
5009 // (or (setgt X, -1), (setgt Y -1)) --> (setgt (and X, Y), -1)
5010 if (AndEqNeg1 || AndLtZero || OrNeNeg1 || OrGtNeg1) {
5011 SDValue And = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, LL, RL);
5012 AddToWorklist(And.getNode());
5013 return DAG.getSetCC(DL, VT, And, LR, CC1);
5014 }
5015 }
5016
5017 // TODO: What is the 'or' equivalent of this fold?
5018 // (and (setne X, 0), (setne X, -1)) --> (setuge (add X, 1), 2)
5019 if (IsAnd && LL == RL && CC0 == CC1 && OpVT.getScalarSizeInBits() > 1 &&
5020 IsInteger && CC0 == ISD::SETNE &&
5021 ((isNullConstant(LR) && isAllOnesConstant(RR)) ||
5022 (isAllOnesConstant(LR) && isNullConstant(RR)))) {
5023 SDValue One = DAG.getConstant(1, DL, OpVT);
5024 SDValue Two = DAG.getConstant(2, DL, OpVT);
5025 SDValue Add = DAG.getNode(ISD::ADD, SDLoc(N0), OpVT, LL, One);
5026 AddToWorklist(Add.getNode());
5027 return DAG.getSetCC(DL, VT, Add, Two, ISD::SETUGE);
5028 }
5029
5030 // Try more general transforms if the predicates match and the only user of
5031 // the compares is the 'and' or 'or'.
5032 if (IsInteger && TLI.convertSetCCLogicToBitwiseLogic(OpVT) && CC0 == CC1 &&
5033 N0.hasOneUse() && N1.hasOneUse()) {
5034 // and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
5035 // or (setne A, B), (setne C, D) --> setne (or (xor A, B), (xor C, D)), 0
5036 if ((IsAnd && CC1 == ISD::SETEQ) || (!IsAnd && CC1 == ISD::SETNE)) {
5037 SDValue XorL = DAG.getNode(ISD::XOR, SDLoc(N0), OpVT, LL, LR);
5038 SDValue XorR = DAG.getNode(ISD::XOR, SDLoc(N1), OpVT, RL, RR);
5039 SDValue Or = DAG.getNode(ISD::OR, DL, OpVT, XorL, XorR);
5040 SDValue Zero = DAG.getConstant(0, DL, OpVT);
5041 return DAG.getSetCC(DL, VT, Or, Zero, CC1);
5042 }
5043
5044 // Turn compare of constants whose difference is 1 bit into add+and+setcc.
5045 // TODO - support non-uniform vector amounts.
5046 if ((IsAnd && CC1 == ISD::SETNE) || (!IsAnd && CC1 == ISD::SETEQ)) {
5047 // Match a shared variable operand and 2 non-opaque constant operands.
5048 ConstantSDNode *C0 = isConstOrConstSplat(LR);
5049 ConstantSDNode *C1 = isConstOrConstSplat(RR);
5050 if (LL == RL && C0 && C1 && !C0->isOpaque() && !C1->isOpaque()) {
5051 const APInt &CMax =
5052 APIntOps::umax(C0->getAPIntValue(), C1->getAPIntValue());
5053 const APInt &CMin =
5054 APIntOps::umin(C0->getAPIntValue(), C1->getAPIntValue());
5055 // The difference of the constants must be a single bit.
5056 if ((CMax - CMin).isPowerOf2()) {
5057 // and/or (setcc X, CMax, ne), (setcc X, CMin, ne/eq) -->
5058 // setcc ((sub X, CMin), ~(CMax - CMin)), 0, ne/eq
5059 SDValue Max = DAG.getNode(ISD::UMAX, DL, OpVT, LR, RR);
5060 SDValue Min = DAG.getNode(ISD::UMIN, DL, OpVT, LR, RR);
5061 SDValue Offset = DAG.getNode(ISD::SUB, DL, OpVT, LL, Min);
5062 SDValue Diff = DAG.getNode(ISD::SUB, DL, OpVT, Max, Min);
5063 SDValue Mask = DAG.getNOT(DL, Diff, OpVT);
5064 SDValue And = DAG.getNode(ISD::AND, DL, OpVT, Offset, Mask);
5065 SDValue Zero = DAG.getConstant(0, DL, OpVT);
5066 return DAG.getSetCC(DL, VT, And, Zero, CC0);
5067 }
5068 }
5069 }
5070 }
5071
5072 // Canonicalize equivalent operands to LL == RL.
5073 if (LL == RR && LR == RL) {
5074 CC1 = ISD::getSetCCSwappedOperands(CC1);
5075 std::swap(RL, RR);
5076 }
5077
5078 // (and (setcc X, Y, CC0), (setcc X, Y, CC1)) --> (setcc X, Y, NewCC)
5079 // (or (setcc X, Y, CC0), (setcc X, Y, CC1)) --> (setcc X, Y, NewCC)
5080 if (LL == RL && LR == RR) {
5081 ISD::CondCode NewCC = IsAnd ? ISD::getSetCCAndOperation(CC0, CC1, OpVT)
5082 : ISD::getSetCCOrOperation(CC0, CC1, OpVT);
5083 if (NewCC != ISD::SETCC_INVALID &&
5084 (!LegalOperations ||
5085 (TLI.isCondCodeLegal(NewCC, LL.getSimpleValueType()) &&
5086 TLI.isOperationLegal(ISD::SETCC, OpVT))))
5087 return DAG.getSetCC(DL, VT, LL, LR, NewCC);
5088 }
5089
5090 return SDValue();
5091}
5092
5093/// This contains all DAGCombine rules which reduce two values combined by
5094/// an And operation to a single value. This makes them reusable in the context
5095/// of visitSELECT(). Rules involving constants are not included as
5096/// visitSELECT() already handles those cases.
5097SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, SDNode *N) {
5098 EVT VT = N1.getValueType();
5099 SDLoc DL(N);
5100
5101 // fold (and x, undef) -> 0
5102 if (N0.isUndef() || N1.isUndef())
5103 return DAG.getConstant(0, DL, VT);
5104
5105 if (SDValue V = foldLogicOfSetCCs(true, N0, N1, DL))
5106 return V;
5107
5108 if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL &&
5109 VT.getSizeInBits() <= 64) {
5110 if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
5111 if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) {
5112 // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal
5113 // immediate for an add, but it is legal if its top c2 bits are set,
5114 // transform the ADD so the immediate doesn't need to be materialized
5115 // in a register.
5116 APInt ADDC = ADDI->getAPIntValue();
5117 APInt SRLC = SRLI->getAPIntValue();
5118 if (ADDC.getMinSignedBits() <= 64 &&
5119 SRLC.ult(VT.getSizeInBits()) &&
5120 !TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
5121 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
5122 SRLC.getZExtValue());
5123 if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) {
5124 ADDC |= Mask;
5125 if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) {
5126 SDLoc DL0(N0);
5127 SDValue NewAdd =
5128 DAG.getNode(ISD::ADD, DL0, VT,
5129 N0.getOperand(0), DAG.getConstant(ADDC, DL, VT));
5130 CombineTo(N0.getNode(), NewAdd);
5131 // Return N so it doesn't get rechecked!
5132 return SDValue(N, 0);
5133 }
5134 }
5135 }
5136 }
5137 }
5138 }
5139
5140 // Reduce bit extract of low half of an integer to the narrower type.
5141 // (and (srl i64:x, K), KMask) ->
5142 // (i64 zero_extend (and (srl (i32 (trunc i64:x)), K)), KMask)
5143 if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
5144 if (ConstantSDNode *CAnd = dyn_cast<ConstantSDNode>(N1)) {
5145 if (ConstantSDNode *CShift = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
5146 unsigned Size = VT.getSizeInBits();
5147 const APInt &AndMask = CAnd->getAPIntValue();
5148 unsigned ShiftBits = CShift->getZExtValue();
5149
5150 // Bail out, this node will probably disappear anyway.
5151 if (ShiftBits == 0)
5152 return SDValue();
5153
5154 unsigned MaskBits = AndMask.countTrailingOnes();
5155 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), Size / 2);
5156
5157 if (AndMask.isMask() &&
5158 // Required bits must not span the two halves of the integer and
5159 // must fit in the half size type.
5160 (ShiftBits + MaskBits <= Size / 2) &&
5161 TLI.isNarrowingProfitable(VT, HalfVT) &&
5162 TLI.isTypeDesirableForOp(ISD::AND, HalfVT) &&
5163 TLI.isTypeDesirableForOp(ISD::SRL, HalfVT) &&
5164 TLI.isTruncateFree(VT, HalfVT) &&
5165 TLI.isZExtFree(HalfVT, VT)) {
5166 // The isNarrowingProfitable is to avoid regressions on PPC and
5167 // AArch64 which match a few 64-bit bit insert / bit extract patterns
5168 // on downstream users of this. Those patterns could probably be
5169 // extended to handle extensions mixed in.
5170
5171 SDValue SL(N0);
5172 assert(MaskBits <= Size)(static_cast <bool> (MaskBits <= Size) ? void (0) : __assert_fail
("MaskBits <= Size", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5172, __extension__ __PRETTY_FUNCTION__))
;
5173
5174 // Extracting the highest bit of the low half.
5175 EVT ShiftVT = TLI.getShiftAmountTy(HalfVT, DAG.getDataLayout());
5176 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, HalfVT,
5177 N0.getOperand(0));
5178
5179 SDValue NewMask = DAG.getConstant(AndMask.trunc(Size / 2), SL, HalfVT);
5180 SDValue ShiftK = DAG.getConstant(ShiftBits, SL, ShiftVT);
5181 SDValue Shift = DAG.getNode(ISD::SRL, SL, HalfVT, Trunc, ShiftK);
5182 SDValue And = DAG.getNode(ISD::AND, SL, HalfVT, Shift, NewMask);
5183 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, And);
5184 }
5185 }
5186 }
5187 }
5188
5189 return SDValue();
5190}
5191
5192bool DAGCombiner::isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN,
5193 EVT LoadResultTy, EVT &ExtVT) {
5194 if (!AndC->getAPIntValue().isMask())
5195 return false;
5196
5197 unsigned ActiveBits = AndC->getAPIntValue().countTrailingOnes();
5198
5199 ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
5200 EVT LoadedVT = LoadN->getMemoryVT();
5201
5202 if (ExtVT == LoadedVT &&
5203 (!LegalOperations ||
5204 TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT))) {
5205 // ZEXTLOAD will match without needing to change the size of the value being
5206 // loaded.
5207 return true;
5208 }
5209
5210 // Do not change the width of a volatile or atomic loads.
5211 if (!LoadN->isSimple())
5212 return false;
5213
5214 // Do not generate loads of non-round integer types since these can
5215 // be expensive (and would be wrong if the type is not byte sized).
5216 if (!LoadedVT.bitsGT(ExtVT) || !ExtVT.isRound())
5217 return false;
5218
5219 if (LegalOperations &&
5220 !TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT))
5221 return false;
5222
5223 if (!TLI.shouldReduceLoadWidth(LoadN, ISD::ZEXTLOAD, ExtVT))
5224 return false;
5225
5226 return true;
5227}
5228
5229bool DAGCombiner::isLegalNarrowLdSt(LSBaseSDNode *LDST,
5230 ISD::LoadExtType ExtType, EVT &MemVT,
5231 unsigned ShAmt) {
5232 if (!LDST)
5233 return false;
5234 // Only allow byte offsets.
5235 if (ShAmt % 8)
5236 return false;
5237
5238 // Do not generate loads of non-round integer types since these can
5239 // be expensive (and would be wrong if the type is not byte sized).
5240 if (!MemVT.isRound())
5241 return false;
5242
5243 // Don't change the width of a volatile or atomic loads.
5244 if (!LDST->isSimple())
5245 return false;
5246
5247 EVT LdStMemVT = LDST->getMemoryVT();
5248
5249 // Bail out when changing the scalable property, since we can't be sure that
5250 // we're actually narrowing here.
5251 if (LdStMemVT.isScalableVector() != MemVT.isScalableVector())
5252 return false;
5253
5254 // Verify that we are actually reducing a load width here.
5255 if (LdStMemVT.bitsLT(MemVT))
5256 return false;
5257
5258 // Ensure that this isn't going to produce an unsupported memory access.
5259 if (ShAmt) {
5260 assert(ShAmt % 8 == 0 && "ShAmt is byte offset")(static_cast <bool> (ShAmt % 8 == 0 && "ShAmt is byte offset"
) ? void (0) : __assert_fail ("ShAmt % 8 == 0 && \"ShAmt is byte offset\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5260, __extension__ __PRETTY_FUNCTION__))
;
5261 const unsigned ByteShAmt = ShAmt / 8;
5262 const Align LDSTAlign = LDST->getAlign();
5263 const Align NarrowAlign = commonAlignment(LDSTAlign, ByteShAmt);
5264 if (!TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
5265 LDST->getAddressSpace(), NarrowAlign,
5266 LDST->getMemOperand()->getFlags()))
5267 return false;
5268 }
5269
5270 // It's not possible to generate a constant of extended or untyped type.
5271 EVT PtrType = LDST->getBasePtr().getValueType();
5272 if (PtrType == MVT::Untyped || PtrType.isExtended())
5273 return false;
5274
5275 if (isa<LoadSDNode>(LDST)) {
5276 LoadSDNode *Load = cast<LoadSDNode>(LDST);
5277 // Don't transform one with multiple uses, this would require adding a new
5278 // load.
5279 if (!SDValue(Load, 0).hasOneUse())
5280 return false;
5281
5282 if (LegalOperations &&
5283 !TLI.isLoadExtLegal(ExtType, Load->getValueType(0), MemVT))
5284 return false;
5285
5286 // For the transform to be legal, the load must produce only two values
5287 // (the value loaded and the chain). Don't transform a pre-increment
5288 // load, for example, which produces an extra value. Otherwise the
5289 // transformation is not equivalent, and the downstream logic to replace
5290 // uses gets things wrong.
5291 if (Load->getNumValues() > 2)
5292 return false;
5293
5294 // If the load that we're shrinking is an extload and we're not just
5295 // discarding the extension we can't simply shrink the load. Bail.
5296 // TODO: It would be possible to merge the extensions in some cases.
5297 if (Load->getExtensionType() != ISD::NON_EXTLOAD &&
5298 Load->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits() + ShAmt)
5299 return false;
5300
5301 if (!TLI.shouldReduceLoadWidth(Load, ExtType, MemVT))
5302 return false;
5303 } else {
5304 assert(isa<StoreSDNode>(LDST) && "It is not a Load nor a Store SDNode")(static_cast <bool> (isa<StoreSDNode>(LDST) &&
"It is not a Load nor a Store SDNode") ? void (0) : __assert_fail
("isa<StoreSDNode>(LDST) && \"It is not a Load nor a Store SDNode\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5304, __extension__ __PRETTY_FUNCTION__))
;
5305 StoreSDNode *Store = cast<StoreSDNode>(LDST);
5306 // Can't write outside the original store
5307 if (Store->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits() + ShAmt)
5308 return false;
5309
5310 if (LegalOperations &&
5311 !TLI.isTruncStoreLegal(Store->getValue().getValueType(), MemVT))
5312 return false;
5313 }
5314 return true;
5315}
5316
5317bool DAGCombiner::SearchForAndLoads(SDNode *N,
5318 SmallVectorImpl<LoadSDNode*> &Loads,
5319 SmallPtrSetImpl<SDNode*> &NodesWithConsts,
5320 ConstantSDNode *Mask,
5321 SDNode *&NodeToMask) {
5322 // Recursively search for the operands, looking for loads which can be
5323 // narrowed.
5324 for (SDValue Op : N->op_values()) {
5325 if (Op.getValueType().isVector())
5326 return false;
5327
5328 // Some constants may need fixing up later if they are too large.
5329 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
5330 if ((N->getOpcode() == ISD::OR || N->getOpcode() == ISD::XOR) &&
5331 (Mask->getAPIntValue() & C->getAPIntValue()) != C->getAPIntValue())
5332 NodesWithConsts.insert(N);
5333 continue;
5334 }
5335
5336 if (!Op.hasOneUse())
5337 return false;
5338
5339 switch(Op.getOpcode()) {
5340 case ISD::LOAD: {
5341 auto *Load = cast<LoadSDNode>(Op);
5342 EVT ExtVT;
5343 if (isAndLoadExtLoad(Mask, Load, Load->getValueType(0), ExtVT) &&
5344 isLegalNarrowLdSt(Load, ISD::ZEXTLOAD, ExtVT)) {
5345
5346 // ZEXTLOAD is already small enough.
5347 if (Load->getExtensionType() == ISD::ZEXTLOAD &&
5348 ExtVT.bitsGE(Load->getMemoryVT()))
5349 continue;
5350
5351 // Use LE to convert equal sized loads to zext.
5352 if (ExtVT.bitsLE(Load->getMemoryVT()))
5353 Loads.push_back(Load);
5354
5355 continue;
5356 }
5357 return false;
5358 }
5359 case ISD::ZERO_EXTEND:
5360 case ISD::AssertZext: {
5361 unsigned ActiveBits = Mask->getAPIntValue().countTrailingOnes();
5362 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
5363 EVT VT = Op.getOpcode() == ISD::AssertZext ?
5364 cast<VTSDNode>(Op.getOperand(1))->getVT() :
5365 Op.getOperand(0).getValueType();
5366
5367 // We can accept extending nodes if the mask is wider or an equal
5368 // width to the original type.
5369 if (ExtVT.bitsGE(VT))
5370 continue;
5371 break;
5372 }
5373 case ISD::OR:
5374 case ISD::XOR:
5375 case ISD::AND:
5376 if (!SearchForAndLoads(Op.getNode(), Loads, NodesWithConsts, Mask,
5377 NodeToMask))
5378 return false;
5379 continue;
5380 }
5381
5382 // Allow one node which will masked along with any loads found.
5383 if (NodeToMask)
5384 return false;
5385
5386 // Also ensure that the node to be masked only produces one data result.
5387 NodeToMask = Op.getNode();
5388 if (NodeToMask->getNumValues() > 1) {
5389 bool HasValue = false;
5390 for (unsigned i = 0, e = NodeToMask->getNumValues(); i < e; ++i) {
5391 MVT VT = SDValue(NodeToMask, i).getSimpleValueType();
5392 if (VT != MVT::Glue && VT != MVT::Other) {
5393 if (HasValue) {
5394 NodeToMask = nullptr;
5395 return false;
5396 }
5397 HasValue = true;
5398 }
5399 }
5400 assert(HasValue && "Node to be masked has no data result?")(static_cast <bool> (HasValue && "Node to be masked has no data result?"
) ? void (0) : __assert_fail ("HasValue && \"Node to be masked has no data result?\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5400, __extension__ __PRETTY_FUNCTION__))
;
5401 }
5402 }
5403 return true;
5404}
5405
5406bool DAGCombiner::BackwardsPropagateMask(SDNode *N) {
5407 auto *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
5408 if (!Mask)
5409 return false;
5410
5411 if (!Mask->getAPIntValue().isMask())
5412 return false;
5413
5414 // No need to do anything if the and directly uses a load.
5415 if (isa<LoadSDNode>(N->getOperand(0)))
5416 return false;
5417
5418 SmallVector<LoadSDNode*, 8> Loads;
5419 SmallPtrSet<SDNode*, 2> NodesWithConsts;
5420 SDNode *FixupNode = nullptr;
5421 if (SearchForAndLoads(N, Loads, NodesWithConsts, Mask, FixupNode)) {
5422 if (Loads.size() == 0)
5423 return false;
5424
5425 LLVM_DEBUG(dbgs() << "Backwards propagate AND: "; N->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "Backwards propagate AND: "
; N->dump(); } } while (false)
;
5426 SDValue MaskOp = N->getOperand(1);
5427
5428 // If it exists, fixup the single node we allow in the tree that needs
5429 // masking.
5430 if (FixupNode) {
5431 LLVM_DEBUG(dbgs() << "First, need to fix up: "; FixupNode->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "First, need to fix up: "; FixupNode
->dump(); } } while (false)
;
5432 SDValue And = DAG.getNode(ISD::AND, SDLoc(FixupNode),
5433 FixupNode->getValueType(0),
5434 SDValue(FixupNode, 0), MaskOp);
5435 DAG.ReplaceAllUsesOfValueWith(SDValue(FixupNode, 0), And);
5436 if (And.getOpcode() == ISD ::AND)
5437 DAG.UpdateNodeOperands(And.getNode(), SDValue(FixupNode, 0), MaskOp);
5438 }
5439
5440 // Narrow any constants that need it.
5441 for (auto *LogicN : NodesWithConsts) {
5442 SDValue Op0 = LogicN->getOperand(0);
5443 SDValue Op1 = LogicN->getOperand(1);
5444
5445 if (isa<ConstantSDNode>(Op0))
5446 std::swap(Op0, Op1);
5447
5448 SDValue And = DAG.getNode(ISD::AND, SDLoc(Op1), Op1.getValueType(),
5449 Op1, MaskOp);
5450
5451 DAG.UpdateNodeOperands(LogicN, Op0, And);
5452 }
5453
5454 // Create narrow loads.
5455 for (auto *Load : Loads) {
5456 LLVM_DEBUG(dbgs() << "Propagate AND back to: "; Load->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "Propagate AND back to: "; Load
->dump(); } } while (false)
;
5457 SDValue And = DAG.getNode(ISD::AND, SDLoc(Load), Load->getValueType(0),
5458 SDValue(Load, 0), MaskOp);
5459 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), And);
5460 if (And.getOpcode() == ISD ::AND)
5461 And = SDValue(
5462 DAG.UpdateNodeOperands(And.getNode(), SDValue(Load, 0), MaskOp), 0);
5463 SDValue NewLoad = ReduceLoadWidth(And.getNode());
5464 assert(NewLoad &&(static_cast <bool> (NewLoad && "Shouldn't be masking the load if it can't be narrowed"
) ? void (0) : __assert_fail ("NewLoad && \"Shouldn't be masking the load if it can't be narrowed\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5465, __extension__ __PRETTY_FUNCTION__))
5465 "Shouldn't be masking the load if it can't be narrowed")(static_cast <bool> (NewLoad && "Shouldn't be masking the load if it can't be narrowed"
) ? void (0) : __assert_fail ("NewLoad && \"Shouldn't be masking the load if it can't be narrowed\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5465, __extension__ __PRETTY_FUNCTION__))
;
5466 CombineTo(Load, NewLoad, NewLoad.getValue(1));
5467 }
5468 DAG.ReplaceAllUsesWith(N, N->getOperand(0).getNode());
5469 return true;
5470 }
5471 return false;
5472}
5473
5474// Unfold
5475// x & (-1 'logical shift' y)
5476// To
5477// (x 'opposite logical shift' y) 'logical shift' y
5478// if it is better for performance.
5479SDValue DAGCombiner::unfoldExtremeBitClearingToShifts(SDNode *N) {
5480 assert(N->getOpcode() == ISD::AND)(static_cast <bool> (N->getOpcode() == ISD::AND) ? void
(0) : __assert_fail ("N->getOpcode() == ISD::AND", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5480, __extension__ __PRETTY_FUNCTION__))
;
5481
5482 SDValue N0 = N->getOperand(0);
5483 SDValue N1 = N->getOperand(1);
5484
5485 // Do we actually prefer shifts over mask?
5486 if (!TLI.shouldFoldMaskToVariableShiftPair(N0))
5487 return SDValue();
5488
5489 // Try to match (-1 '[outer] logical shift' y)
5490 unsigned OuterShift;
5491 unsigned InnerShift; // The opposite direction to the OuterShift.
5492 SDValue Y; // Shift amount.
5493 auto matchMask = [&OuterShift, &InnerShift, &Y](SDValue M) -> bool {
5494 if (!M.hasOneUse())
5495 return false;
5496 OuterShift = M->getOpcode();
5497 if (OuterShift == ISD::SHL)
5498 InnerShift = ISD::SRL;
5499 else if (OuterShift == ISD::SRL)
5500 InnerShift = ISD::SHL;
5501 else
5502 return false;
5503 if (!isAllOnesConstant(M->getOperand(0)))
5504 return false;
5505 Y = M->getOperand(1);
5506 return true;
5507 };
5508
5509 SDValue X;
5510 if (matchMask(N1))
5511 X = N0;
5512 else if (matchMask(N0))
5513 X = N1;
5514 else
5515 return SDValue();
5516
5517 SDLoc DL(N);
5518 EVT VT = N->getValueType(0);
5519
5520 // tmp = x 'opposite logical shift' y
5521 SDValue T0 = DAG.getNode(InnerShift, DL, VT, X, Y);
5522 // ret = tmp 'logical shift' y
5523 SDValue T1 = DAG.getNode(OuterShift, DL, VT, T0, Y);
5524
5525 return T1;
5526}
5527
5528/// Try to replace shift/logic that tests if a bit is clear with mask + setcc.
5529/// For a target with a bit test, this is expected to become test + set and save
5530/// at least 1 instruction.
5531static SDValue combineShiftAnd1ToBitTest(SDNode *And, SelectionDAG &DAG) {
5532 assert(And->getOpcode() == ISD::AND && "Expected an 'and' op")(static_cast <bool> (And->getOpcode() == ISD::AND &&
"Expected an 'and' op") ? void (0) : __assert_fail ("And->getOpcode() == ISD::AND && \"Expected an 'and' op\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 5532, __extension__ __PRETTY_FUNCTION__))
;
5533
5534 // This is probably not worthwhile without a supported type.
5535 EVT VT = And->getValueType(0);
5536 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5537 if (!TLI.isTypeLegal(VT))
5538 return SDValue();
5539
5540 // Look through an optional extension and find a 'not'.
5541 // TODO: Should we favor test+set even without the 'not' op?
5542 SDValue Not = And->getOperand(0), And1 = And->getOperand(1);
5543 if (Not.getOpcode() == ISD::ANY_EXTEND)
5544 Not = Not.getOperand(0);
5545 if (!isBitwiseNot(Not) || !Not.hasOneUse() || !isOneConstant(And1))
5546 return SDValue();
5547
5548 // Look though an optional truncation. The source operand may not be the same
5549 // type as the original 'and', but that is ok because we are masking off
5550 // everything but the low bit.
5551 SDValue Srl = Not.getOperand(0);
5552 if (Srl.getOpcode() == ISD::TRUNCATE)
5553 Srl = Srl.getOperand(0);
5554
5555 // Match a shift-right by constant.
5556 if (Srl.getOpcode() != ISD::SRL || !Srl.hasOneUse() ||
5557 !isa<ConstantSDNode>(Srl.getOperand(1)))
5558 return SDValue();
5559
5560 // We might have looked through casts that make this transform invalid.
5561 // TODO: If the source type is wider than the result type, do the mask and
5562 // compare in the source type.
5563 const APInt &ShiftAmt = Srl.getConstantOperandAPInt(1);
5564 unsigned VTBitWidth = VT.getSizeInBits();
5565 if (ShiftAmt.uge(VTBitWidth))
5566 return SDValue();
5567
5568 // Turn this into a bit-test pattern using mask op + setcc:
5569 // and (not (srl X, C)), 1 --> (and X, 1<<C) == 0
5570 SDLoc DL(And);
5571 SDValue X = DAG.getZExtOrTrunc(Srl.getOperand(0), DL, VT);
5572 EVT CCVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
5573 SDValue Mask = DAG.getConstant(
5574 APInt::getOneBitSet(VTBitWidth, ShiftAmt.getZExtValue()), DL, VT);
5575 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, Mask);
5576 SDValue Zero = DAG.getConstant(0, DL, VT);
5577 SDValue Setcc = DAG.getSetCC(DL, CCVT, NewAnd, Zero, ISD::SETEQ);
5578 return DAG.getZExtOrTrunc(Setcc, DL, VT);
5579}
5580
5581SDValue DAGCombiner::visitAND(SDNode *N) {
5582 SDValue N0 = N->getOperand(0);
5583 SDValue N1 = N->getOperand(1);
5584 EVT VT = N1.getValueType();
5585
5586 // x & x --> x
5587 if (N0 == N1)
5588 return N0;
5589
5590 // fold vector ops
5591 if (VT.isVector()) {
5592 if (SDValue FoldedVOp = SimplifyVBinOp(N))
5593 return FoldedVOp;
5594
5595 // fold (and x, 0) -> 0, vector edition
5596 if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
5597 // do not return N0, because undef node may exist in N0
5598 return DAG.getConstant(APInt::getNullValue(N0.getScalarValueSizeInBits()),
5599 SDLoc(N), N0.getValueType());
5600 if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
5601 // do not return N1, because undef node may exist in N1
5602 return DAG.getConstant(APInt::getNullValue(N1.getScalarValueSizeInBits()),
5603 SDLoc(N), N1.getValueType());
5604
5605 // fold (and x, -1) -> x, vector edition
5606 if (ISD::isConstantSplatVectorAllOnes(N0.getNode()))
5607 return N1;
5608 if (ISD::isConstantSplatVectorAllOnes(N1.getNode()))
5609 return N0;
5610
5611 // fold (and (masked_load) (build_vec (x, ...))) to zext_masked_load
5612 auto *MLoad = dyn_cast<MaskedLoadSDNode>(N0);
5613 auto *BVec = dyn_cast<BuildVectorSDNode>(N1);
5614 if (MLoad && BVec && MLoad->getExtensionType() == ISD::EXTLOAD &&
5615 N0.hasOneUse() && N1.hasOneUse()) {
5616 EVT LoadVT = MLoad->getMemoryVT();
5617 EVT ExtVT = VT;
5618 if (TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT, LoadVT)) {
5619 // For this AND to be a zero extension of the masked load the elements
5620 // of the BuildVec must mask the bottom bits of the extended element
5621 // type
5622 if (ConstantSDNode *Splat = BVec->getConstantSplatNode()) {
5623 uint64_t ElementSize =
5624 LoadVT.getVectorElementType().getScalarSizeInBits();
5625 if (Splat->getAPIntValue().isMask(ElementSize)) {
5626 return DAG.getMaskedLoad(
5627 ExtVT, SDLoc(N), MLoad->getChain(), MLoad->getBasePtr(),
5628 MLoad->getOffset(), MLoad->getMask(), MLoad->getPassThru(),
5629 LoadVT, MLoad->getMemOperand(), MLoad->getAddressingMode(),
5630 ISD::ZEXTLOAD, MLoad->isExpandingLoad());
5631 }
5632 }
5633 }
5634 }
5635 }
5636
5637 // fold (and c1, c2) -> c1&c2
5638 ConstantSDNode *N1C = isConstOrConstSplat(N1);
5639 if (SDValue C = DAG.FoldConstantArithmetic(ISD::AND, SDLoc(N), VT, {N0, N1}))
5640 return C;
5641
5642 // canonicalize constant to RHS
5643 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
5644 !DAG.isConstantIntBuildVectorOrConstantInt(N1))
5645 return DAG.getNode(ISD::AND, SDLoc(N), VT, N1, N0);
5646
5647 // fold (and x, -1) -> x
5648 if (isAllOnesConstant(N1))
5649 return N0;
5650
5651 // if (and x, c) is known to be zero, return 0
5652 unsigned BitWidth = VT.getScalarSizeInBits();
5653 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
5654 APInt::getAllOnesValue(BitWidth)))
5655 return DAG.getConstant(0, SDLoc(N), VT);
5656
5657 if (SDValue NewSel = foldBinOpIntoSelect(N))
5658 return NewSel;
5659
5660 // reassociate and
5661 if (SDValue RAND = reassociateOps(ISD::AND, SDLoc(N), N0, N1, N->getFlags()))
5662 return RAND;
5663
5664 // Try to convert a constant mask AND into a shuffle clear mask.
5665 if (VT.isVector())
5666 if (SDValue Shuffle = XformToShuffleWithZero(N))
5667 return Shuffle;
5668
5669 if (SDValue Combined = combineCarryDiamond(*this, DAG, TLI, N0, N1, N))
5670 return Combined;
5671
5672 // fold (and (or x, C), D) -> D if (C & D) == D
5673 auto MatchSubset = [](ConstantSDNode *LHS, ConstantSDNode *RHS) {
5674 return RHS->getAPIntValue().isSubsetOf(LHS->getAPIntValue());
5675 };
5676 if (N0.getOpcode() == ISD::OR &&
5677 ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchSubset))
5678 return N1;
5679 // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits.
5680 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
5681 SDValue N0Op0 = N0.getOperand(0);
5682 APInt Mask = ~N1C->getAPIntValue();
5683 Mask = Mask.trunc(N0Op0.getScalarValueSizeInBits());
5684 if (DAG.MaskedValueIsZero(N0Op0, Mask)) {
5685 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
5686 N0.getValueType(), N0Op0);
5687
5688 // Replace uses of the AND with uses of the Zero extend node.
5689 CombineTo(N, Zext);
5690
5691 // We actually want to replace all uses of the any_extend with the
5692 // zero_extend, to avoid duplicating things. This will later cause this
5693 // AND to be folded.
5694 CombineTo(N0.getNode(), Zext);
5695 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5696 }
5697 }
5698
5699 // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) ->
5700 // (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must
5701 // already be zero by virtue of the width of the base type of the load.
5702 //
5703 // the 'X' node here can either be nothing or an extract_vector_elt to catch
5704 // more cases.
5705 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5706 N0.getValueSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits() &&
5707 N0.getOperand(0).getOpcode() == ISD::LOAD &&
5708 N0.getOperand(0).getResNo() == 0) ||
5709 (N0.getOpcode() == ISD::LOAD && N0.getResNo() == 0)) {
5710 LoadSDNode *Load = cast<LoadSDNode>( (N0.getOpcode() == ISD::LOAD) ?
5711 N0 : N0.getOperand(0) );
5712
5713 // Get the constant (if applicable) the zero'th operand is being ANDed with.
5714 // This can be a pure constant or a vector splat, in which case we treat the
5715 // vector as a scalar and use the splat value.
5716 APInt Constant = APInt::getNullValue(1);
5717 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
5718 Constant = C->getAPIntValue();
5719 } else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) {
5720 APInt SplatValue, SplatUndef;
5721 unsigned SplatBitSize;
5722 bool HasAnyUndefs;
5723 bool IsSplat = Vector->isConstantSplat(SplatValue, SplatUndef,
5724 SplatBitSize, HasAnyUndefs);
5725 if (IsSplat) {
5726 // Undef bits can contribute to a possible optimisation if set, so
5727 // set them.
5728 SplatValue |= SplatUndef;
5729
5730 // The splat value may be something like "0x00FFFFFF", which means 0 for
5731 // the first vector value and FF for the rest, repeating. We need a mask
5732 // that will apply equally to all members of the vector, so AND all the
5733 // lanes of the constant together.
5734 unsigned EltBitWidth = Vector->getValueType(0).getScalarSizeInBits();
5735
5736 // If the splat value has been compressed to a bitlength lower
5737 // than the size of the vector lane, we need to re-expand it to
5738 // the lane size.
5739 if (EltBitWidth > SplatBitSize)
5740 for (SplatValue = SplatValue.zextOrTrunc(EltBitWidth);
5741 SplatBitSize < EltBitWidth; SplatBitSize = SplatBitSize * 2)
5742 SplatValue |= SplatValue.shl(SplatBitSize);
5743
5744 // Make sure that variable 'Constant' is only set if 'SplatBitSize' is a
5745 // multiple of 'BitWidth'. Otherwise, we could propagate a wrong value.
5746 if ((SplatBitSize % EltBitWidth) == 0) {
5747 Constant = APInt::getAllOnesValue(EltBitWidth);
5748 for (unsigned i = 0, n = (SplatBitSize / EltBitWidth); i < n; ++i)
5749 Constant &= SplatValue.extractBits(EltBitWidth, i * EltBitWidth);
5750 }
5751 }
5752 }
5753
5754 // If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is
5755 // actually legal and isn't going to get expanded, else this is a false
5756 // optimisation.
5757 bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD,
5758 Load->getValueType(0),
5759 Load->getMemoryVT());
5760
5761 // Resize the constant to the same size as the original memory access before
5762 // extension. If it is still the AllOnesValue then this AND is completely
5763 // unneeded.
5764 Constant = Constant.zextOrTrunc(Load->getMemoryVT().getScalarSizeInBits());
5765
5766 bool B;
5767 switch (Load->getExtensionType()) {
5768 default: B = false; break;
5769 case ISD::EXTLOAD: B = CanZextLoadProfitably; break;
5770 case ISD::ZEXTLOAD:
5771 case ISD::NON_EXTLOAD: B = true; break;
5772 }
5773
5774 if (B && Constant.isAllOnesValue()) {
5775 // If the load type was an EXTLOAD, convert to ZEXTLOAD in order to
5776 // preserve semantics once we get rid of the AND.
5777 SDValue NewLoad(Load, 0);
5778
5779 // Fold the AND away. NewLoad may get replaced immediately.
5780 CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0);
5781
5782 if (Load->getExtensionType() == ISD::EXTLOAD) {
5783 NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD,
5784 Load->getValueType(0), SDLoc(Load),
5785 Load->getChain(), Load->getBasePtr(),
5786 Load->getOffset(), Load->getMemoryVT(),
5787 Load->getMemOperand());
5788 // Replace uses of the EXTLOAD with the new ZEXTLOAD.
5789 if (Load->getNumValues() == 3) {
5790 // PRE/POST_INC loads have 3 values.
5791 SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1),
5792 NewLoad.getValue(2) };
5793 CombineTo(Load, To, 3, true);
5794 } else {
5795 CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1));
5796 }
5797 }
5798
5799 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5800 }
5801 }
5802
5803 // fold (and (masked_gather x)) -> (zext_masked_gather x)
5804 if (auto *GN0 = dyn_cast<MaskedGatherSDNode>(N0)) {
5805 EVT MemVT = GN0->getMemoryVT();
5806 EVT ScalarVT = MemVT.getScalarType();
5807
5808 if (SDValue(GN0, 0).hasOneUse() &&
5809 isConstantSplatVectorMaskForType(N1.getNode(), ScalarVT) &&
5810 TLI.isVectorLoadExtDesirable(SDValue(SDValue(GN0, 0)))) {
5811 SDValue Ops[] = {GN0->getChain(), GN0->getPassThru(), GN0->getMask(),
5812 GN0->getBasePtr(), GN0->getIndex(), GN0->getScale()};
5813
5814 SDValue ZExtLoad = DAG.getMaskedGather(
5815 DAG.getVTList(VT, MVT::Other), MemVT, SDLoc(N), Ops,
5816 GN0->getMemOperand(), GN0->getIndexType(), ISD::ZEXTLOAD);
5817
5818 CombineTo(N, ZExtLoad);
5819 AddToWorklist(ZExtLoad.getNode());
5820 // Avoid recheck of N.
5821 return SDValue(N, 0);
5822 }
5823 }
5824
5825 // fold (and (load x), 255) -> (zextload x, i8)
5826 // fold (and (extload x, i16), 255) -> (zextload x, i8)
5827 // fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8)
5828 if (!VT.isVector() && N1C && (N0.getOpcode() == ISD::LOAD ||
5829 (N0.getOpcode() == ISD::ANY_EXTEND &&
5830 N0.getOperand(0).getOpcode() == ISD::LOAD))) {
5831 if (SDValue Res = ReduceLoadWidth(N)) {
5832 LoadSDNode *LN0 = N0->getOpcode() == ISD::ANY_EXTEND
5833 ? cast<LoadSDNode>(N0.getOperand(0)) : cast<LoadSDNode>(N0);
5834 AddToWorklist(N);
5835 DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 0), Res);
5836 return SDValue(N, 0);
5837 }
5838 }
5839
5840 if (LegalTypes) {
5841 // Attempt to propagate the AND back up to the leaves which, if they're
5842 // loads, can be combined to narrow loads and the AND node can be removed.
5843 // Perform after legalization so that extend nodes will already be
5844 // combined into the loads.
5845 if (BackwardsPropagateMask(N))
5846 return SDValue(N, 0);
5847 }
5848
5849 if (SDValue Combined = visitANDLike(N0, N1, N))
5850 return Combined;
5851
5852 // Simplify: (and (op x...), (op y...)) -> (op (and x, y))
5853 if (N0.getOpcode() == N1.getOpcode())
5854 if (SDValue V = hoistLogicOpWithSameOpcodeHands(N))
5855 return V;
5856
5857 // Masking the negated extension of a boolean is just the zero-extended
5858 // boolean:
5859 // and (sub 0, zext(bool X)), 1 --> zext(bool X)
5860 // and (sub 0, sext(bool X)), 1 --> zext(bool X)
5861 //
5862 // Note: the SimplifyDemandedBits fold below can make an information-losing
5863 // transform, and then we have no way to find this better fold.
5864 if (N1C && N1C->isOne() && N0.getOpcode() == ISD::SUB) {
5865 if (isNullOrNullSplat(N0.getOperand(0))) {
5866 SDValue SubRHS = N0.getOperand(1);
5867 if (SubRHS.getOpcode() == ISD::ZERO_EXTEND &&
5868 SubRHS.getOperand(0).getScalarValueSizeInBits() == 1)
5869 return SubRHS;
5870 if (SubRHS.getOpcode() == ISD::SIGN_EXTEND &&
5871 SubRHS.getOperand(0).getScalarValueSizeInBits() == 1)
5872 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, SubRHS.getOperand(0));
5873 }
5874 }
5875
5876 // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
5877 // fold (and (sra)) -> (and (srl)) when possible.
5878 if (SimplifyDemandedBits(SDValue(N, 0)))
5879 return SDValue(N, 0);
5880
5881 // fold (zext_inreg (extload x)) -> (zextload x)
5882 // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use
5883 if (ISD::isUNINDEXEDLoad(N0.getNode()) &&
5884 (ISD::isEXTLoad(N0.getNode()) ||
5885 (ISD::isSEXTLoad(N0.getNode()) && N0.hasOneUse()))) {
5886 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
5887 EVT MemVT = LN0->getMemoryVT();
5888 // If we zero all the possible extended bits, then we can turn this into
5889 // a zextload if we are running before legalize or the operation is legal.
5890 unsigned ExtBitSize = N1.getScalarValueSizeInBits();
5891 unsigned MemBitSize = MemVT.getScalarSizeInBits();
5892 APInt ExtBits = APInt::getHighBitsSet(ExtBitSize, ExtBitSize - MemBitSize);
5893 if (DAG.MaskedValueIsZero(N1, ExtBits) &&
5894 ((!LegalOperations && LN0->isSimple()) ||
5895 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) {
5896 SDValue ExtLoad =
5897 DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT, LN0->getChain(),
5898 LN0->getBasePtr(), MemVT, LN0->getMemOperand());
5899 AddToWorklist(N);
5900 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
5901 return SDValue(N, 0); // Return N so it doesn't get rechecked!
5902 }
5903 }
5904
5905 // fold (and (or (srl N, 8), (shl N, 8)), 0xffff) -> (srl (bswap N), const)
5906 if (N1C && N1C->getAPIntValue() == 0xffff && N0.getOpcode() == ISD::OR) {
5907 if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
5908 N0.getOperand(1), false))
5909 return BSwap;
5910 }
5911
5912 if (SDValue Shifts = unfoldExtremeBitClearingToShifts(N))
5913 return Shifts;
5914
5915 if (TLI.hasBitTest(N0, N1))
5916 if (SDValue V = combineShiftAnd1ToBitTest(N, DAG))
5917 return V;
5918
5919 // Recognize the following pattern:
5920 //
5921 // AndVT = (and (sign_extend NarrowVT to AndVT) #bitmask)
5922 //
5923 // where bitmask is a mask that clears the upper bits of AndVT. The
5924 // number of bits in bitmask must be a power of two.
5925 auto IsAndZeroExtMask = [](SDValue LHS, SDValue RHS) {
5926 if (LHS->getOpcode() != ISD::SIGN_EXTEND)
5927 return false;
5928
5929 auto *C = dyn_cast<ConstantSDNode>(RHS);
5930 if (!C)
5931 return false;
5932
5933 if (!C->getAPIntValue().isMask(
5934 LHS.getOperand(0).getValueType().getFixedSizeInBits()))
5935 return false;
5936
5937 return true;
5938 };
5939
5940 // Replace (and (sign_extend ...) #bitmask) with (zero_extend ...).
5941 if (IsAndZeroExtMask(N0, N1))
5942 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, N0.getOperand(0));
5943
5944 return SDValue();
5945}
5946
5947/// Match (a >> 8) | (a << 8) as (bswap a) >> 16.
5948SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
5949 bool DemandHighBits) {
5950 if (!LegalOperations)
5951 return SDValue();
5952
5953 EVT VT = N->getValueType(0);
5954 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16)
5955 return SDValue();
5956 if (!TLI.isOperationLegalOrCustom(ISD::BSWAP, VT))
5957 return SDValue();
5958
5959 // Recognize (and (shl a, 8), 0xff00), (and (srl a, 8), 0xff)
5960 bool LookPassAnd0 = false;
5961 bool LookPassAnd1 = false;
5962 if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL)
5963 std::swap(N0, N1);
5964 if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL)
5965 std::swap(N0, N1);
5966 if (N0.getOpcode() == ISD::AND) {
5967 if (!N0.getNode()->hasOneUse())
5968 return SDValue();
5969 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5970 // Also handle 0xffff since the LHS is guaranteed to have zeros there.
5971 // This is needed for X86.
5972 if (!N01C || (N01C->getZExtValue() != 0xFF00 &&
5973 N01C->getZExtValue() != 0xFFFF))
5974 return SDValue();
5975 N0 = N0.getOperand(0);
5976 LookPassAnd0 = true;
5977 }
5978
5979 if (N1.getOpcode() == ISD::AND) {
5980 if (!N1.getNode()->hasOneUse())
5981 return SDValue();
5982 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
5983 if (!N11C || N11C->getZExtValue() != 0xFF)
5984 return SDValue();
5985 N1 = N1.getOperand(0);
5986 LookPassAnd1 = true;
5987 }
5988
5989 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
5990 std::swap(N0, N1);
5991 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
5992 return SDValue();
5993 if (!N0.getNode()->hasOneUse() || !N1.getNode()->hasOneUse())
5994 return SDValue();
5995
5996 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5997 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
5998 if (!N01C || !N11C)
5999 return SDValue();
6000 if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8)
6001 return SDValue();
6002
6003 // Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8)
6004 SDValue N00 = N0->getOperand(0);
6005 if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) {
6006 if (!N00.getNode()->hasOneUse())
6007 return SDValue();
6008 ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1));
6009 if (!N001C || N001C->getZExtValue() != 0xFF)
6010 return SDValue();
6011 N00 = N00.getOperand(0);
6012 LookPassAnd0 = true;
6013 }
6014
6015 SDValue N10 = N1->getOperand(0);
6016 if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) {
6017 if (!N10.getNode()->hasOneUse())
6018 return SDValue();
6019 ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1));
6020 // Also allow 0xFFFF since the bits will be shifted out. This is needed
6021 // for X86.
6022 if (!N101C || (N101C->getZExtValue() != 0xFF00 &&
6023 N101C->getZExtValue() != 0xFFFF))
6024 return SDValue();
6025 N10 = N10.getOperand(0);
6026 LookPassAnd1 = true;
6027 }
6028
6029 if (N00 != N10)
6030 return SDValue();
6031
6032 // Make sure everything beyond the low halfword gets set to zero since the SRL
6033 // 16 will clear the top bits.
6034 unsigned OpSizeInBits = VT.getSizeInBits();
6035 if (DemandHighBits && OpSizeInBits > 16) {
6036 // If the left-shift isn't masked out then the only way this is a bswap is
6037 // if all bits beyond the low 8 are 0. In that case the entire pattern
6038 // reduces to a left shift anyway: leave it for other parts of the combiner.
6039 if (!LookPassAnd0)
6040 return SDValue();
6041
6042 // However, if the right shift isn't masked out then it might be because
6043 // it's not needed. See if we can spot that too.
6044 if (!LookPassAnd1 &&
6045 !DAG.MaskedValueIsZero(
6046 N10, APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - 16)))
6047 return SDValue();
6048 }
6049
6050 SDValue Res = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N00);
6051 if (OpSizeInBits > 16) {
6052 SDLoc DL(N);
6053 Res = DAG.getNode(ISD::SRL, DL, VT, Res,
6054 DAG.getConstant(OpSizeInBits - 16, DL,
6055 getShiftAmountTy(VT)));
6056 }
6057 return Res;
6058}
6059
6060/// Return true if the specified node is an element that makes up a 32-bit
6061/// packed halfword byteswap.
6062/// ((x & 0x000000ff) << 8) |
6063/// ((x & 0x0000ff00) >> 8) |
6064/// ((x & 0x00ff0000) << 8) |
6065/// ((x & 0xff000000) >> 8)
6066static bool isBSwapHWordElement(SDValue N, MutableArrayRef<SDNode *> Parts) {
6067 if (!N.getNode()->hasOneUse())
6068 return false;
6069
6070 unsigned Opc = N.getOpcode();
6071 if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL)
6072 return false;
6073
6074 SDValue N0 = N.getOperand(0);
6075 unsigned Opc0 = N0.getOpcode();
6076 if (Opc0 != ISD::AND && Opc0 != ISD::SHL && Opc0 != ISD::SRL)
6077 return false;
6078
6079 ConstantSDNode *N1C = nullptr;
6080 // SHL or SRL: look upstream for AND mask operand
6081 if (Opc == ISD::AND)
6082 N1C = dyn_cast<ConstantSDNode>(N.getOperand(1));
6083 else if (Opc0 == ISD::AND)
6084 N1C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
6085 if (!N1C)
6086 return false;
6087
6088 unsigned MaskByteOffset;
6089 switch (N1C->getZExtValue()) {
6090 default:
6091 return false;
6092 case 0xFF: MaskByteOffset = 0; break;
6093 case 0xFF00: MaskByteOffset = 1; break;
6094 case 0xFFFF:
6095 // In case demanded bits didn't clear the bits that will be shifted out.
6096 // This is needed for X86.
6097 if (Opc == ISD::SRL || (Opc == ISD::AND && Opc0 == ISD::SHL)) {
6098 MaskByteOffset = 1;
6099 break;
6100 }
6101 return false;
6102 case 0xFF0000: MaskByteOffset = 2; break;
6103 case 0xFF000000: MaskByteOffset = 3; break;
6104 }
6105
6106 // Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00).
6107 if (Opc == ISD::AND) {
6108 if (MaskByteOffset == 0 || MaskByteOffset == 2) {
6109 // (x >> 8) & 0xff
6110 // (x >> 8) & 0xff0000
6111 if (Opc0 != ISD::SRL)
6112 return false;
6113 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
6114 if (!C || C->getZExtValue() != 8)
6115 return false;
6116 } else {
6117 // (x << 8) & 0xff00
6118 // (x << 8) & 0xff000000
6119 if (Opc0 != ISD::SHL)
6120 return false;
6121 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
6122 if (!C || C->getZExtValue() != 8)
6123 return false;
6124 }
6125 } else if (Opc == ISD::SHL) {
6126 // (x & 0xff) << 8
6127 // (x & 0xff0000) << 8
6128 if (MaskByteOffset != 0 && MaskByteOffset != 2)
6129 return false;
6130 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
6131 if (!C || C->getZExtValue() != 8)
6132 return false;
6133 } else { // Opc == ISD::SRL
6134 // (x & 0xff00) >> 8
6135 // (x & 0xff000000) >> 8
6136 if (MaskByteOffset != 1 && MaskByteOffset != 3)
6137 return false;
6138 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
6139 if (!C || C->getZExtValue() != 8)
6140 return false;
6141 }
6142
6143 if (Parts[MaskByteOffset])
6144 return false;
6145
6146 Parts[MaskByteOffset] = N0.getOperand(0).getNode();
6147 return true;
6148}
6149
6150// Match 2 elements of a packed halfword bswap.
6151static bool isBSwapHWordPair(SDValue N, MutableArrayRef<SDNode *> Parts) {
6152 if (N.getOpcode() == ISD::OR)
6153 return isBSwapHWordElement(N.getOperand(0), Parts) &&
6154 isBSwapHWordElement(N.getOperand(1), Parts);
6155
6156 if (N.getOpcode() == ISD::SRL && N.getOperand(0).getOpcode() == ISD::BSWAP) {
6157 ConstantSDNode *C = isConstOrConstSplat(N.getOperand(1));
6158 if (!C || C->getAPIntValue() != 16)
6159 return false;
6160 Parts[0] = Parts[1] = N.getOperand(0).getOperand(0).getNode();
6161 return true;
6162 }
6163
6164 return false;
6165}
6166
6167// Match this pattern:
6168// (or (and (shl (A, 8)), 0xff00ff00), (and (srl (A, 8)), 0x00ff00ff))
6169// And rewrite this to:
6170// (rotr (bswap A), 16)
6171static SDValue matchBSwapHWordOrAndAnd(const TargetLowering &TLI,
6172 SelectionDAG &DAG, SDNode *N, SDValue N0,
6173 SDValue N1, EVT VT, EVT ShiftAmountTy) {
6174 assert(N->getOpcode() == ISD::OR && VT == MVT::i32 &&(static_cast <bool> (N->getOpcode() == ISD::OR &&
VT == MVT::i32 && "MatchBSwapHWordOrAndAnd: expecting i32"
) ? void (0) : __assert_fail ("N->getOpcode() == ISD::OR && VT == MVT::i32 && \"MatchBSwapHWordOrAndAnd: expecting i32\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 6175, __extension__ __PRETTY_FUNCTION__))
6175 "MatchBSwapHWordOrAndAnd: expecting i32")(static_cast <bool> (N->getOpcode() == ISD::OR &&
VT == MVT::i32 && "MatchBSwapHWordOrAndAnd: expecting i32"
) ? void (0) : __assert_fail ("N->getOpcode() == ISD::OR && VT == MVT::i32 && \"MatchBSwapHWordOrAndAnd: expecting i32\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 6175, __extension__ __PRETTY_FUNCTION__))
;
6176 if (!TLI.isOperationLegalOrCustom(ISD::ROTR, VT))
6177 return SDValue();
6178 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
6179 return SDValue();
6180 // TODO: this is too restrictive; lifting this restriction requires more tests
6181 if (!N0->hasOneUse() || !N1->hasOneUse())
6182 return SDValue();
6183 ConstantSDNode *Mask0 = isConstOrConstSplat(N0.getOperand(1));
6184 ConstantSDNode *Mask1 = isConstOrConstSplat(N1.getOperand(1));
6185 if (!Mask0 || !Mask1)
6186 return SDValue();
6187 if (Mask0->getAPIntValue() != 0xff00ff00 ||
6188 Mask1->getAPIntValue() != 0x00ff00ff)
6189 return SDValue();
6190 SDValue Shift0 = N0.getOperand(0);
6191 SDValue Shift1 = N1.getOperand(0);
6192 if (Shift0.getOpcode() != ISD::SHL || Shift1.getOpcode() != ISD::SRL)
6193 return SDValue();
6194 ConstantSDNode *ShiftAmt0 = isConstOrConstSplat(Shift0.getOperand(1));
6195 ConstantSDNode *ShiftAmt1 = isConstOrConstSplat(Shift1.getOperand(1));
6196 if (!ShiftAmt0 || !ShiftAmt1)
6197 return SDValue();
6198 if (ShiftAmt0->getAPIntValue() != 8 || ShiftAmt1->getAPIntValue() != 8)
6199 return SDValue();
6200 if (Shift0.getOperand(0) != Shift1.getOperand(0))
6201 return SDValue();
6202
6203 SDLoc DL(N);
6204 SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Shift0.getOperand(0));
6205 SDValue ShAmt = DAG.getConstant(16, DL, ShiftAmountTy);
6206 return DAG.getNode(ISD::ROTR, DL, VT, BSwap, ShAmt);
6207}
6208
6209/// Match a 32-bit packed halfword bswap. That is
6210/// ((x & 0x000000ff) << 8) |
6211/// ((x & 0x0000ff00) >> 8) |
6212/// ((x & 0x00ff0000) << 8) |
6213/// ((x & 0xff000000) >> 8)
6214/// => (rotl (bswap x), 16)
6215SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) {
6216 if (!LegalOperations)
6217 return SDValue();
6218
6219 EVT VT = N->getValueType(0);
6220 if (VT != MVT::i32)
6221 return SDValue();
6222 if (!TLI.isOperationLegalOrCustom(ISD::BSWAP, VT))
6223 return SDValue();
6224
6225 if (SDValue BSwap = matchBSwapHWordOrAndAnd(TLI, DAG, N, N0, N1, VT,
6226 getShiftAmountTy(VT)))
6227 return BSwap;
6228
6229 // Try again with commuted operands.
6230 if (SDValue BSwap = matchBSwapHWordOrAndAnd(TLI, DAG, N, N1, N0, VT,
6231 getShiftAmountTy(VT)))
6232 return BSwap;
6233
6234
6235 // Look for either
6236 // (or (bswaphpair), (bswaphpair))
6237 // (or (or (bswaphpair), (and)), (and))
6238 // (or (or (and), (bswaphpair)), (and))
6239 SDNode *Parts[4] = {};
6240
6241 if (isBSwapHWordPair(N0, Parts)) {
6242 // (or (or (and), (and)), (or (and), (and)))
6243 if (!isBSwapHWordPair(N1, Parts))
6244 return SDValue();
6245 } else if (N0.getOpcode() == ISD::OR) {
6246 // (or (or (or (and), (and)), (and)), (and))
6247 if (!isBSwapHWordElement(N1, Parts))
6248 return SDValue();
6249 SDValue N00 = N0.getOperand(0);
6250 SDValue N01 = N0.getOperand(1);
6251 if (!(isBSwapHWordElement(N01, Parts) && isBSwapHWordPair(N00, Parts)) &&
6252 !(isBSwapHWordElement(N00, Parts) && isBSwapHWordPair(N01, Parts)))
6253 return SDValue();
6254 } else
6255 return SDValue();
6256
6257 // Make sure the parts are all coming from the same node.
6258 if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3])
6259 return SDValue();
6260
6261 SDLoc DL(N);
6262 SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT,
6263 SDValue(Parts[0], 0));
6264
6265 // Result of the bswap should be rotated by 16. If it's not legal, then
6266 // do (x << 16) | (x >> 16).
6267 SDValue ShAmt = DAG.getConstant(16, DL, getShiftAmountTy(VT));
6268 if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT))
6269 return DAG.getNode(ISD::ROTL, DL, VT, BSwap, ShAmt);
6270 if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT))
6271 return DAG.getNode(ISD::ROTR, DL, VT, BSwap, ShAmt);
6272 return DAG.getNode(ISD::OR, DL, VT,
6273 DAG.getNode(ISD::SHL, DL, VT, BSwap, ShAmt),
6274 DAG.getNode(ISD::SRL, DL, VT, BSwap, ShAmt));
6275}
6276
6277/// This contains all DAGCombine rules which reduce two values combined by
6278/// an Or operation to a single value \see visitANDLike().
6279SDValue DAGCombiner::visitORLike(SDValue N0, SDValue N1, SDNode *N) {
6280 EVT VT = N1.getValueType();
6281 SDLoc DL(N);
6282
6283 // fold (or x, undef) -> -1
6284 if (!LegalOperations && (N0.isUndef() || N1.isUndef()))
6285 return DAG.getAllOnesConstant(DL, VT);
6286
6287 if (SDValue V = foldLogicOfSetCCs(false, N0, N1, DL))
6288 return V;
6289
6290 // (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible.
6291 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == ISD::AND &&
6292 // Don't increase # computations.
6293 (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
6294 // We can only do this xform if we know that bits from X that are set in C2
6295 // but not in C1 are already zero. Likewise for Y.
6296 if (const ConstantSDNode *N0O1C =
6297 getAsNonOpaqueConstant(N0.getOperand(1))) {
6298 if (const ConstantSDNode *N1O1C =
6299 getAsNonOpaqueConstant(N1.getOperand(1))) {
6300 // We can only do this xform if we know that bits from X that are set in
6301 // C2 but not in C1 are already zero. Likewise for Y.
6302 const APInt &LHSMask = N0O1C->getAPIntValue();
6303 const APInt &RHSMask = N1O1C->getAPIntValue();
6304
6305 if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) &&
6306 DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) {
6307 SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT,
6308 N0.getOperand(0), N1.getOperand(0));
6309 return DAG.getNode(ISD::AND, DL, VT, X,
6310 DAG.getConstant(LHSMask | RHSMask, DL, VT));
6311 }
6312 }
6313 }
6314 }
6315
6316 // (or (and X, M), (and X, N)) -> (and X, (or M, N))
6317 if (N0.getOpcode() == ISD::AND &&
6318 N1.getOpcode() == ISD::AND &&
6319 N0.getOperand(0) == N1.getOperand(0) &&
6320 // Don't increase # computations.
6321 (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
6322 SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT,
6323 N0.getOperand(1), N1.getOperand(1));
6324 return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), X);
6325 }
6326
6327 return SDValue();
6328}
6329
6330/// OR combines for which the commuted variant will be tried as well.
6331static SDValue visitORCommutative(
6332 SelectionDAG &DAG, SDValue N0, SDValue N1, SDNode *N) {
6333 EVT VT = N0.getValueType();
6334 if (N0.getOpcode() == ISD::AND) {
6335 // fold (or (and X, (xor Y, -1)), Y) -> (or X, Y)
6336 if (isBitwiseNot(N0.getOperand(1)) && N0.getOperand(1).getOperand(0) == N1)
6337 return DAG.getNode(ISD::OR, SDLoc(N), VT, N0.getOperand(0), N1);
6338
6339 // fold (or (and (xor Y, -1), X), Y) -> (or X, Y)
6340 if (isBitwiseNot(N0.getOperand(0)) && N0.getOperand(0).getOperand(0) == N1)
6341 return DAG.getNode(ISD::OR, SDLoc(N), VT, N0.getOperand(1), N1);
6342 }
6343
6344 return SDValue();
6345}
6346
6347SDValue DAGCombiner::visitOR(SDNode *N) {
6348 SDValue N0 = N->getOperand(0);
6349 SDValue N1 = N->getOperand(1);
6350 EVT VT = N1.getValueType();
6351
6352 // x | x --> x
6353 if (N0 == N1)
6354 return N0;
6355
6356 // fold vector ops
6357 if (VT.isVector()) {
6358 if (SDValue FoldedVOp = SimplifyVBinOp(N))
6359 return FoldedVOp;
6360
6361 // fold (or x, 0) -> x, vector edition
6362 if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
6363 return N1;
6364 if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
6365 return N0;
6366
6367 // fold (or x, -1) -> -1, vector edition
6368 if (ISD::isConstantSplatVectorAllOnes(N0.getNode()))
6369 // do not return N0, because undef node may exist in N0
6370 return DAG.getAllOnesConstant(SDLoc(N), N0.getValueType());
6371 if (ISD::isConstantSplatVectorAllOnes(N1.getNode()))
6372 // do not return N1, because undef node may exist in N1
6373 return DAG.getAllOnesConstant(SDLoc(N), N1.getValueType());
6374
6375 // fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf A, B, Mask)
6376 // Do this only if the resulting shuffle is legal.
6377 if (isa<ShuffleVectorSDNode>(N0) &&
6378 isa<ShuffleVectorSDNode>(N1) &&
6379 // Avoid folding a node with illegal type.
6380 TLI.isTypeLegal(VT)) {
6381 bool ZeroN00 = ISD::isBuildVectorAllZeros(N0.getOperand(0).getNode());
6382 bool ZeroN01 = ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode());
6383 bool ZeroN10 = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode());
6384 bool ZeroN11 = ISD::isBuildVectorAllZeros(N1.getOperand(1).getNode());
6385 // Ensure both shuffles have a zero input.
6386 if ((ZeroN00 != ZeroN01) && (ZeroN10 != ZeroN11)) {
6387 assert((!ZeroN00 || !ZeroN01) && "Both inputs zero!")(static_cast <bool> ((!ZeroN00 || !ZeroN01) && "Both inputs zero!"
) ? void (0) : __assert_fail ("(!ZeroN00 || !ZeroN01) && \"Both inputs zero!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 6387, __extension__ __PRETTY_FUNCTION__))
;
6388 assert((!ZeroN10 || !ZeroN11) && "Both inputs zero!")(static_cast <bool> ((!ZeroN10 || !ZeroN11) && "Both inputs zero!"
) ? void (0) : __assert_fail ("(!ZeroN10 || !ZeroN11) && \"Both inputs zero!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 6388, __extension__ __PRETTY_FUNCTION__))
;
6389 const ShuffleVectorSDNode *SV0 = cast<ShuffleVectorSDNode>(N0);
6390 const ShuffleVectorSDNode *SV1 = cast<ShuffleVectorSDNode>(N1);
6391 bool CanFold = true;
6392 int NumElts = VT.getVectorNumElements();
6393 SmallVector<int, 4> Mask(NumElts);
6394
6395 for (int i = 0; i != NumElts; ++i) {
6396 int M0 = SV0->getMaskElt(i);
6397 int M1 = SV1->getMaskElt(i);
6398
6399 // Determine if either index is pointing to a zero vector.
6400 bool M0Zero = M0 < 0 || (ZeroN00 == (M0 < NumElts));
6401 bool M1Zero = M1 < 0 || (ZeroN10 == (M1 < NumElts));
6402
6403 // If one element is zero and the otherside is undef, keep undef.
6404 // This also handles the case that both are undef.
6405 if ((M0Zero && M1 < 0) || (M1Zero && M0 < 0)) {
6406 Mask[i] = -1;
6407 continue;
6408 }
6409
6410 // Make sure only one of the elements is zero.
6411 if (M0Zero == M1Zero) {
6412 CanFold = false;
6413 break;
6414 }
6415
6416 assert((M0 >= 0 || M1 >= 0) && "Undef index!")(static_cast <bool> ((M0 >= 0 || M1 >= 0) &&
"Undef index!") ? void (0) : __assert_fail ("(M0 >= 0 || M1 >= 0) && \"Undef index!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 6416, __extension__ __PRETTY_FUNCTION__))
;
6417
6418 // We have a zero and non-zero element. If the non-zero came from
6419 // SV0 make the index a LHS index. If it came from SV1, make it
6420 // a RHS index. We need to mod by NumElts because we don't care
6421 // which operand it came from in the original shuffles.
6422 Mask[i] = M1Zero ? M0 % NumElts : (M1 % NumElts) + NumElts;
6423 }
6424
6425 if (CanFold) {
6426 SDValue NewLHS = ZeroN00 ? N0.getOperand(1) : N0.getOperand(0);
6427 SDValue NewRHS = ZeroN10 ? N1.getOperand(1) : N1.getOperand(0);
6428
6429 SDValue LegalShuffle =
6430 TLI.buildLegalVectorShuffle(VT, SDLoc(N), NewLHS, NewRHS,
6431 Mask, DAG);
6432 if (LegalShuffle)
6433 return LegalShuffle;
6434 }
6435 }
6436 }
6437 }
6438
6439 // fold (or c1, c2) -> c1|c2
6440 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
6441 if (SDValue C = DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N), VT, {N0, N1}))
6442 return C;
6443
6444 // canonicalize constant to RHS
6445 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
6446 !DAG.isConstantIntBuildVectorOrConstantInt(N1))
6447 return DAG.getNode(ISD::OR, SDLoc(N), VT, N1, N0);
6448
6449 // fold (or x, 0) -> x
6450 if (isNullConstant(N1))
6451 return N0;
6452
6453 // fold (or x, -1) -> -1
6454 if (isAllOnesConstant(N1))
6455 return N1;
6456
6457 if (SDValue NewSel = foldBinOpIntoSelect(N))
6458 return NewSel;
6459
6460 // fold (or x, c) -> c iff (x & ~c) == 0
6461 if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue()))
6462 return N1;
6463
6464 if (SDValue Combined = visitORLike(N0, N1, N))
6465 return Combined;
6466
6467 if (SDValue Combined = combineCarryDiamond(*this, DAG, TLI, N0, N1, N))
6468 return Combined;
6469
6470 // Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16)
6471 if (SDValue BSwap = MatchBSwapHWord(N, N0, N1))
6472 return BSwap;
6473 if (SDValue BSwap = MatchBSwapHWordLow(N, N0, N1))
6474 return BSwap;
6475
6476 // reassociate or
6477 if (SDValue ROR = reassociateOps(ISD::OR, SDLoc(N), N0, N1, N->getFlags()))
6478 return ROR;
6479
6480 // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2)
6481 // iff (c1 & c2) != 0 or c1/c2 are undef.
6482 auto MatchIntersect = [](ConstantSDNode *C1, ConstantSDNode *C2) {
6483 return !C1 || !C2 || C1->getAPIntValue().intersects(C2->getAPIntValue());
6484 };
6485 if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
6486 ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchIntersect, true)) {
6487 if (SDValue COR = DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N1), VT,
6488 {N1, N0.getOperand(1)})) {
6489 SDValue IOR = DAG.getNode(ISD::OR, SDLoc(N0), VT, N0.getOperand(0), N1);
6490 AddToWorklist(IOR.getNode());
6491 return DAG.getNode(ISD::AND, SDLoc(N), VT, COR, IOR);
6492 }
6493 }
6494
6495 if (SDValue Combined = visitORCommutative(DAG, N0, N1, N))
6496 return Combined;
6497 if (SDValue Combined = visitORCommutative(DAG, N1, N0, N))
6498 return Combined;
6499
6500 // Simplify: (or (op x...), (op y...)) -> (op (or x, y))
6501 if (N0.getOpcode() == N1.getOpcode())
6502 if (SDValue V = hoistLogicOpWithSameOpcodeHands(N))
6503 return V;
6504
6505 // See if this is some rotate idiom.
6506 if (SDValue Rot = MatchRotate(N0, N1, SDLoc(N)))
6507 return Rot;
6508
6509 if (SDValue Load = MatchLoadCombine(N))
6510 return Load;
6511
6512 // Simplify the operands using demanded-bits information.
6513 if (SimplifyDemandedBits(SDValue(N, 0)))
6514 return SDValue(N, 0);
6515
6516 // If OR can be rewritten into ADD, try combines based on ADD.
6517 if ((!LegalOperations || TLI.isOperationLegal(ISD::ADD, VT)) &&
6518 DAG.haveNoCommonBitsSet(N0, N1))
6519 if (SDValue Combined = visitADDLike(N))
6520 return Combined;
6521
6522 return SDValue();
6523}
6524
6525static SDValue stripConstantMask(SelectionDAG &DAG, SDValue Op, SDValue &Mask) {
6526 if (Op.getOpcode() == ISD::AND &&
6527 DAG.isConstantIntBuildVectorOrConstantInt(Op.getOperand(1))) {
6528 Mask = Op.getOperand(1);
6529 return Op.getOperand(0);
6530 }
6531 return Op;
6532}
6533
6534/// Match "(X shl/srl V1) & V2" where V2 may not be present.
6535static bool matchRotateHalf(SelectionDAG &DAG, SDValue Op, SDValue &Shift,
6536 SDValue &Mask) {
6537 Op = stripConstantMask(DAG, Op, Mask);
6538 if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) {
6539 Shift = Op;
6540 return true;
6541 }
6542 return false;
6543}
6544
6545/// Helper function for visitOR to extract the needed side of a rotate idiom
6546/// from a shl/srl/mul/udiv. This is meant to handle cases where
6547/// InstCombine merged some outside op with one of the shifts from
6548/// the rotate pattern.
6549/// \returns An empty \c SDValue if the needed shift couldn't be extracted.
6550/// Otherwise, returns an expansion of \p ExtractFrom based on the following
6551/// patterns:
6552///
6553/// (or (add v v) (shrl v bitwidth-1)):
6554/// expands (add v v) -> (shl v 1)
6555///
6556/// (or (mul v c0) (shrl (mul v c1) c2)):
6557/// expands (mul v c0) -> (shl (mul v c1) c3)
6558///
6559/// (or (udiv v c0) (shl (udiv v c1) c2)):
6560/// expands (udiv v c0) -> (shrl (udiv v c1) c3)
6561///
6562/// (or (shl v c0) (shrl (shl v c1) c2)):
6563/// expands (shl v c0) -> (shl (shl v c1) c3)
6564///
6565/// (or (shrl v c0) (shl (shrl v c1) c2)):
6566/// expands (shrl v c0) -> (shrl (shrl v c1) c3)
6567///
6568/// Such that in all cases, c3+c2==bitwidth(op v c1).
6569static SDValue extractShiftForRotate(SelectionDAG &DAG, SDValue OppShift,
6570 SDValue ExtractFrom, SDValue &Mask,
6571 const SDLoc &DL) {
6572 assert(OppShift && ExtractFrom && "Empty SDValue")(static_cast <bool> (OppShift && ExtractFrom &&
"Empty SDValue") ? void (0) : __assert_fail ("OppShift && ExtractFrom && \"Empty SDValue\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 6572, __extension__ __PRETTY_FUNCTION__))
;
6573 assert((static_cast <bool> ((OppShift.getOpcode() == ISD::SHL ||
OppShift.getOpcode() == ISD::SRL) && "Existing shift must be valid as a rotate half"
) ? void (0) : __assert_fail ("(OppShift.getOpcode() == ISD::SHL || OppShift.getOpcode() == ISD::SRL) && \"Existing shift must be valid as a rotate half\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 6575, __extension__ __PRETTY_FUNCTION__))
6574 (OppShift.getOpcode() == ISD::SHL || OppShift.getOpcode() == ISD::SRL) &&(static_cast <bool> ((OppShift.getOpcode() == ISD::SHL ||
OppShift.getOpcode() == ISD::SRL) && "Existing shift must be valid as a rotate half"
) ? void (0) : __assert_fail ("(OppShift.getOpcode() == ISD::SHL || OppShift.getOpcode() == ISD::SRL) && \"Existing shift must be valid as a rotate half\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 6575, __extension__ __PRETTY_FUNCTION__))
6575 "Existing shift must be valid as a rotate half")(static_cast <bool> ((OppShift.getOpcode() == ISD::SHL ||
OppShift.getOpcode() == ISD::SRL) && "Existing shift must be valid as a rotate half"
) ? void (0) : __assert_fail ("(OppShift.getOpcode() == ISD::SHL || OppShift.getOpcode() == ISD::SRL) && \"Existing shift must be valid as a rotate half\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 6575, __extension__ __PRETTY_FUNCTION__))
;
6576
6577 ExtractFrom = stripConstantMask(DAG, ExtractFrom, Mask);
6578
6579 // Value and Type of the shift.
6580 SDValue OppShiftLHS = OppShift.getOperand(0);
6581 EVT ShiftedVT = OppShiftLHS.getValueType();
6582
6583 // Amount of the existing shift.
6584 ConstantSDNode *OppShiftCst = isConstOrConstSplat(OppShift.getOperand(1));
6585
6586 // (add v v) -> (shl v 1)
6587 // TODO: Should this be a general DAG canonicalization?
6588 if (OppShift.getOpcode() == ISD::SRL && OppShiftCst &&
6589 ExtractFrom.getOpcode() == ISD::ADD &&
6590 ExtractFrom.getOperand(0) == ExtractFrom.getOperand(1) &&
6591 ExtractFrom.getOperand(0) == OppShiftLHS &&
6592 OppShiftCst->getAPIntValue() == ShiftedVT.getScalarSizeInBits() - 1)
6593 return DAG.getNode(ISD::SHL, DL, ShiftedVT, OppShiftLHS,
6594 DAG.getShiftAmountConstant(1, ShiftedVT, DL));
6595
6596 // Preconditions:
6597 // (or (op0 v c0) (shiftl/r (op0 v c1) c2))
6598 //
6599 // Find opcode of the needed shift to be extracted from (op0 v c0).
6600 unsigned Opcode = ISD::DELETED_NODE;
6601 bool IsMulOrDiv = false;
6602 // Set Opcode and IsMulOrDiv if the extract opcode matches the needed shift
6603 // opcode or its arithmetic (mul or udiv) variant.
6604 auto SelectOpcode = [&](unsigned NeededShift, unsigned MulOrDivVariant) {
6605 IsMulOrDiv = ExtractFrom.getOpcode() == MulOrDivVariant;
6606 if (!IsMulOrDiv && ExtractFrom.getOpcode() != NeededShift)
6607 return false;
6608 Opcode = NeededShift;
6609 return true;
6610 };
6611 // op0 must be either the needed shift opcode or the mul/udiv equivalent
6612 // that the needed shift can be extracted from.
6613 if ((OppShift.getOpcode() != ISD::SRL || !SelectOpcode(ISD::SHL, ISD::MUL)) &&
6614 (OppShift.getOpcode() != ISD::SHL || !SelectOpcode(ISD::SRL, ISD::UDIV)))
6615 return SDValue();
6616
6617 // op0 must be the same opcode on both sides, have the same LHS argument,
6618 // and produce the same value type.
6619 if (OppShiftLHS.getOpcode() != ExtractFrom.getOpcode() ||
6620 OppShiftLHS.getOperand(0) != ExtractFrom.getOperand(0) ||
6621 ShiftedVT != ExtractFrom.getValueType())
6622 return SDValue();
6623
6624 // Constant mul/udiv/shift amount from the RHS of the shift's LHS op.
6625 ConstantSDNode *OppLHSCst = isConstOrConstSplat(OppShiftLHS.getOperand(1));
6626 // Constant mul/udiv/shift amount from the RHS of the ExtractFrom op.
6627 ConstantSDNode *ExtractFromCst =
6628 isConstOrConstSplat(ExtractFrom.getOperand(1));
6629 // TODO: We should be able to handle non-uniform constant vectors for these values
6630 // Check that we have constant values.
6631 if (!OppShiftCst || !OppShiftCst->getAPIntValue() ||
6632 !OppLHSCst || !OppLHSCst->getAPIntValue() ||
6633 !ExtractFromCst || !ExtractFromCst->getAPIntValue())
6634 return SDValue();
6635
6636 // Compute the shift amount we need to extract to complete the rotate.
6637 const unsigned VTWidth = ShiftedVT.getScalarSizeInBits();
6638 if (OppShiftCst->getAPIntValue().ugt(VTWidth))
6639 return SDValue();
6640 APInt NeededShiftAmt = VTWidth - OppShiftCst->getAPIntValue();
6641 // Normalize the bitwidth of the two mul/udiv/shift constant operands.
6642 APInt ExtractFromAmt = ExtractFromCst->getAPIntValue();
6643 APInt OppLHSAmt = OppLHSCst->getAPIntValue();
6644 zeroExtendToMatch(ExtractFromAmt, OppLHSAmt);
6645
6646 // Now try extract the needed shift from the ExtractFrom op and see if the
6647 // result matches up with the existing shift's LHS op.
6648 if (IsMulOrDiv) {
6649 // Op to extract from is a mul or udiv by a constant.
6650 // Check:
6651 // c2 / (1 << (bitwidth(op0 v c0) - c1)) == c0
6652 // c2 % (1 << (bitwidth(op0 v c0) - c1)) == 0
6653 const APInt ExtractDiv = APInt::getOneBitSet(ExtractFromAmt.getBitWidth(),
6654 NeededShiftAmt.getZExtValue());
6655 APInt ResultAmt;
6656 APInt Rem;
6657 APInt::udivrem(ExtractFromAmt, ExtractDiv, ResultAmt, Rem);
6658 if (Rem != 0 || ResultAmt != OppLHSAmt)
6659 return SDValue();
6660 } else {
6661 // Op to extract from is a shift by a constant.
6662 // Check:
6663 // c2 - (bitwidth(op0 v c0) - c1) == c0
6664 if (OppLHSAmt != ExtractFromAmt - NeededShiftAmt.zextOrTrunc(
6665 ExtractFromAmt.getBitWidth()))
6666 return SDValue();
6667 }
6668
6669 // Return the expanded shift op that should allow a rotate to be formed.
6670 EVT ShiftVT = OppShift.getOperand(1).getValueType();
6671 EVT ResVT = ExtractFrom.getValueType();
6672 SDValue NewShiftNode = DAG.getConstant(NeededShiftAmt, DL, ShiftVT);
6673 return DAG.getNode(Opcode, DL, ResVT, OppShiftLHS, NewShiftNode);
6674}
6675
6676// Return true if we can prove that, whenever Neg and Pos are both in the
6677// range [0, EltSize), Neg == (Pos == 0 ? 0 : EltSize - Pos). This means that
6678// for two opposing shifts shift1 and shift2 and a value X with OpBits bits:
6679//
6680// (or (shift1 X, Neg), (shift2 X, Pos))
6681//
6682// reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate
6683// in direction shift1 by Neg. The range [0, EltSize) means that we only need
6684// to consider shift amounts with defined behavior.
6685//
6686// The IsRotate flag should be set when the LHS of both shifts is the same.
6687// Otherwise if matching a general funnel shift, it should be clear.
6688static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize,
6689 SelectionDAG &DAG, bool IsRotate) {
6690 // If EltSize is a power of 2 then:
6691 //
6692 // (a) (Pos == 0 ? 0 : EltSize - Pos) == (EltSize - Pos) & (EltSize - 1)
6693 // (b) Neg == Neg & (EltSize - 1) whenever Neg is in [0, EltSize).
6694 //
6695 // So if EltSize is a power of 2 and Neg is (and Neg', EltSize-1), we check
6696 // for the stronger condition:
6697 //
6698 // Neg & (EltSize - 1) == (EltSize - Pos) & (EltSize - 1) [A]
6699 //
6700 // for all Neg and Pos. Since Neg & (EltSize - 1) == Neg' & (EltSize - 1)
6701 // we can just replace Neg with Neg' for the rest of the function.
6702 //
6703 // In other cases we check for the even stronger condition:
6704 //
6705 // Neg == EltSize - Pos [B]
6706 //
6707 // for all Neg and Pos. Note that the (or ...) then invokes undefined
6708 // behavior if Pos == 0 (and consequently Neg == EltSize).
6709 //
6710 // We could actually use [A] whenever EltSize is a power of 2, but the
6711 // only extra cases that it would match are those uninteresting ones
6712 // where Neg and Pos are never in range at the same time. E.g. for
6713 // EltSize == 32, using [A] would allow a Neg of the form (sub 64, Pos)
6714 // as well as (sub 32, Pos), but:
6715 //
6716 // (or (shift1 X, (sub 64, Pos)), (shift2 X, Pos))
6717 //
6718 // always invokes undefined behavior for 32-bit X.
6719 //
6720 // Below, Mask == EltSize - 1 when using [A] and is all-ones otherwise.
6721 //
6722 // NOTE: We can only do this when matching an AND and not a general
6723 // funnel shift.
6724 unsigned MaskLoBits = 0;
6725 if (IsRotate && Neg.getOpcode() == ISD::AND && isPowerOf2_64(EltSize)) {
6726 if (ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(1))) {
6727 KnownBits Known = DAG.computeKnownBits(Neg.getOperand(0));
6728 unsigned Bits = Log2_64(EltSize);
6729 if (NegC->getAPIntValue().getActiveBits() <= Bits &&
6730 ((NegC->getAPIntValue() | Known.Zero).countTrailingOnes() >= Bits)) {
6731 Neg = Neg.getOperand(0);
6732 MaskLoBits = Bits;
6733 }
6734 }
6735 }
6736
6737 // Check whether Neg has the form (sub NegC, NegOp1) for some NegC and NegOp1.
6738 if (Neg.getOpcode() != ISD::SUB)
6739 return false;
6740 ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(0));
6741 if (!NegC)
6742 return false;
6743 SDValue NegOp1 = Neg.getOperand(1);
6744
6745 // On the RHS of [A], if Pos is Pos' & (EltSize - 1), just replace Pos with
6746 // Pos'. The truncation is redundant for the purpose of the equality.
6747 if (MaskLoBits && Pos.getOpcode() == ISD::AND) {
6748 if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1))) {
6749 KnownBits Known = DAG.computeKnownBits(Pos.getOperand(0));
6750 if (PosC->getAPIntValue().getActiveBits() <= MaskLoBits &&
6751 ((PosC->getAPIntValue() | Known.Zero).countTrailingOnes() >=
6752 MaskLoBits))
6753 Pos = Pos.getOperand(0);
6754 }
6755 }
6756
6757 // The condition we need is now:
6758 //
6759 // (NegC - NegOp1) & Mask == (EltSize - Pos) & Mask
6760 //
6761 // If NegOp1 == Pos then we need:
6762 //
6763 // EltSize & Mask == NegC & Mask
6764 //
6765 // (because "x & Mask" is a truncation and distributes through subtraction).
6766 //
6767 // We also need to account for a potential truncation of NegOp1 if the amount
6768 // has already been legalized to a shift amount type.
6769 APInt Width;
6770 if ((Pos == NegOp1) ||
6771 (NegOp1.getOpcode() == ISD::TRUNCATE && Pos == NegOp1.getOperand(0)))
6772 Width = NegC->getAPIntValue();
6773
6774 // Check for cases where Pos has the form (add NegOp1, PosC) for some PosC.
6775 // Then the condition we want to prove becomes:
6776 //
6777 // (NegC - NegOp1) & Mask == (EltSize - (NegOp1 + PosC)) & Mask
6778 //
6779 // which, again because "x & Mask" is a truncation, becomes:
6780 //
6781 // NegC & Mask == (EltSize - PosC) & Mask
6782 // EltSize & Mask == (NegC + PosC) & Mask
6783 else if (Pos.getOpcode() == ISD::ADD && Pos.getOperand(0) == NegOp1) {
6784 if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1)))
6785 Width = PosC->getAPIntValue() + NegC->getAPIntValue();
6786 else
6787 return false;
6788 } else
6789 return false;
6790
6791 // Now we just need to check that EltSize & Mask == Width & Mask.
6792 if (MaskLoBits)
6793 // EltSize & Mask is 0 since Mask is EltSize - 1.
6794 return Width.getLoBits(MaskLoBits) == 0;
6795 return Width == EltSize;
6796}
6797
6798// A subroutine of MatchRotate used once we have found an OR of two opposite
6799// shifts of Shifted. If Neg == <operand size> - Pos then the OR reduces
6800// to both (PosOpcode Shifted, Pos) and (NegOpcode Shifted, Neg), with the
6801// former being preferred if supported. InnerPos and InnerNeg are Pos and
6802// Neg with outer conversions stripped away.
6803SDValue DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos,
6804 SDValue Neg, SDValue InnerPos,
6805 SDValue InnerNeg, unsigned PosOpcode,
6806 unsigned NegOpcode, const SDLoc &DL) {
6807 // fold (or (shl x, (*ext y)),
6808 // (srl x, (*ext (sub 32, y)))) ->
6809 // (rotl x, y) or (rotr x, (sub 32, y))
6810 //
6811 // fold (or (shl x, (*ext (sub 32, y))),
6812 // (srl x, (*ext y))) ->
6813 // (rotr x, y) or (rotl x, (sub 32, y))
6814 EVT VT = Shifted.getValueType();
6815 if (matchRotateSub(InnerPos, InnerNeg, VT.getScalarSizeInBits(), DAG,
6816 /*IsRotate*/ true)) {
6817 bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT);
6818 return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, Shifted,
6819 HasPos ? Pos : Neg);
6820 }
6821
6822 return SDValue();
6823}
6824
6825// A subroutine of MatchRotate used once we have found an OR of two opposite
6826// shifts of N0 + N1. If Neg == <operand size> - Pos then the OR reduces
6827// to both (PosOpcode N0, N1, Pos) and (NegOpcode N0, N1, Neg), with the
6828// former being preferred if supported. InnerPos and InnerNeg are Pos and
6829// Neg with outer conversions stripped away.
6830// TODO: Merge with MatchRotatePosNeg.
6831SDValue DAGCombiner::MatchFunnelPosNeg(SDValue N0, SDValue N1, SDValue Pos,
6832 SDValue Neg, SDValue InnerPos,
6833 SDValue InnerNeg, unsigned PosOpcode,
6834 unsigned NegOpcode, const SDLoc &DL) {
6835 EVT VT = N0.getValueType();
6836 unsigned EltBits = VT.getScalarSizeInBits();
6837
6838 // fold (or (shl x0, (*ext y)),
6839 // (srl x1, (*ext (sub 32, y)))) ->
6840 // (fshl x0, x1, y) or (fshr x0, x1, (sub 32, y))
6841 //
6842 // fold (or (shl x0, (*ext (sub 32, y))),
6843 // (srl x1, (*ext y))) ->
6844 // (fshr x0, x1, y) or (fshl x0, x1, (sub 32, y))
6845 if (matchRotateSub(InnerPos, InnerNeg, EltBits, DAG, /*IsRotate*/ N0 == N1)) {
6846 bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT);
6847 return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, N0, N1,
6848 HasPos ? Pos : Neg);
6849 }
6850
6851 // Matching the shift+xor cases, we can't easily use the xor'd shift amount
6852 // so for now just use the PosOpcode case if its legal.
6853 // TODO: When can we use the NegOpcode case?
6854 if (PosOpcode == ISD::FSHL && isPowerOf2_32(EltBits)) {
6855 auto IsBinOpImm = [](SDValue Op, unsigned BinOpc, unsigned Imm) {
6856 if (Op.getOpcode() != BinOpc)
6857 return false;
6858 ConstantSDNode *Cst = isConstOrConstSplat(Op.getOperand(1));
6859 return Cst && (Cst->getAPIntValue() == Imm);
6860 };
6861
6862 // fold (or (shl x0, y), (srl (srl x1, 1), (xor y, 31)))
6863 // -> (fshl x0, x1, y)
6864 if (IsBinOpImm(N1, ISD::SRL, 1) &&
6865 IsBinOpImm(InnerNeg, ISD::XOR, EltBits - 1) &&
6866 InnerPos == InnerNeg.getOperand(0) &&
6867 TLI.isOperationLegalOrCustom(ISD::FSHL, VT)) {
6868 return DAG.getNode(ISD::FSHL, DL, VT, N0, N1.getOperand(0), Pos);
6869 }
6870
6871 // fold (or (shl (shl x0, 1), (xor y, 31)), (srl x1, y))
6872 // -> (fshr x0, x1, y)
6873 if (IsBinOpImm(N0, ISD::SHL, 1) &&
6874 IsBinOpImm(InnerPos, ISD::XOR, EltBits - 1) &&
6875 InnerNeg == InnerPos.getOperand(0) &&
6876 TLI.isOperationLegalOrCustom(ISD::FSHR, VT)) {
6877 return DAG.getNode(ISD::FSHR, DL, VT, N0.getOperand(0), N1, Neg);
6878 }
6879
6880 // fold (or (shl (add x0, x0), (xor y, 31)), (srl x1, y))
6881 // -> (fshr x0, x1, y)
6882 // TODO: Should add(x,x) -> shl(x,1) be a general DAG canonicalization?
6883 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N0.getOperand(1) &&
6884 IsBinOpImm(InnerPos, ISD::XOR, EltBits - 1) &&
6885 InnerNeg == InnerPos.getOperand(0) &&
6886 TLI.isOperationLegalOrCustom(ISD::FSHR, VT)) {
6887 return DAG.getNode(ISD::FSHR, DL, VT, N0.getOperand(0), N1, Neg);
6888 }
6889 }
6890
6891 return SDValue();
6892}
6893
6894// MatchRotate - Handle an 'or' of two operands. If this is one of the many
6895// idioms for rotate, and if the target supports rotation instructions, generate
6896// a rot[lr]. This also matches funnel shift patterns, similar to rotation but
6897// with different shifted sources.
6898SDValue DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL) {
6899 // Must be a legal type. Expanded 'n promoted things won't work with rotates.
6900 EVT VT = LHS.getValueType();
6901 if (!TLI.isTypeLegal(VT))
6902 return SDValue();
6903
6904 // The target must have at least one rotate/funnel flavor.
6905 bool HasROTL = hasOperation(ISD::ROTL, VT);
6906 bool HasROTR = hasOperation(ISD::ROTR, VT);
6907 bool HasFSHL = hasOperation(ISD::FSHL, VT);
6908 bool HasFSHR = hasOperation(ISD::FSHR, VT);
6909 if (!HasROTL && !HasROTR && !HasFSHL && !HasFSHR)
6910 return SDValue();
6911
6912 // Check for truncated rotate.
6913 if (LHS.getOpcode() == ISD::TRUNCATE && RHS.getOpcode() == ISD::TRUNCATE &&
6914 LHS.getOperand(0).getValueType() == RHS.getOperand(0).getValueType()) {
6915 assert(LHS.getValueType() == RHS.getValueType())(static_cast <bool> (LHS.getValueType() == RHS.getValueType
()) ? void (0) : __assert_fail ("LHS.getValueType() == RHS.getValueType()"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 6915, __extension__ __PRETTY_FUNCTION__))
;
6916 if (SDValue Rot = MatchRotate(LHS.getOperand(0), RHS.getOperand(0), DL)) {
6917 return DAG.getNode(ISD::TRUNCATE, SDLoc(LHS), LHS.getValueType(), Rot);
6918 }
6919 }
6920
6921 // Match "(X shl/srl V1) & V2" where V2 may not be present.
6922 SDValue LHSShift; // The shift.
6923 SDValue LHSMask; // AND value if any.
6924 matchRotateHalf(DAG, LHS, LHSShift, LHSMask);
6925
6926 SDValue RHSShift; // The shift.
6927 SDValue RHSMask; // AND value if any.
6928 matchRotateHalf(DAG, RHS, RHSShift, RHSMask);
6929
6930 // If neither side matched a rotate half, bail
6931 if (!LHSShift && !RHSShift)
6932 return SDValue();
6933
6934 // InstCombine may have combined a constant shl, srl, mul, or udiv with one
6935 // side of the rotate, so try to handle that here. In all cases we need to
6936 // pass the matched shift from the opposite side to compute the opcode and
6937 // needed shift amount to extract. We still want to do this if both sides
6938 // matched a rotate half because one half may be a potential overshift that
6939 // can be broken down (ie if InstCombine merged two shl or srl ops into a
6940 // single one).
6941
6942 // Have LHS side of the rotate, try to extract the needed shift from the RHS.
6943 if (LHSShift)
6944 if (SDValue NewRHSShift =
6945 extractShiftForRotate(DAG, LHSShift, RHS, RHSMask, DL))
6946 RHSShift = NewRHSShift;
6947 // Have RHS side of the rotate, try to extract the needed shift from the LHS.
6948 if (RHSShift)
6949 if (SDValue NewLHSShift =
6950 extractShiftForRotate(DAG, RHSShift, LHS, LHSMask, DL))
6951 LHSShift = NewLHSShift;
6952
6953 // If a side is still missing, nothing else we can do.
6954 if (!RHSShift || !LHSShift)
6955 return SDValue();
6956
6957 // At this point we've matched or extracted a shift op on each side.
6958
6959 if (LHSShift.getOpcode() == RHSShift.getOpcode())
6960 return SDValue(); // Shifts must disagree.
6961
6962 bool IsRotate = LHSShift.getOperand(0) == RHSShift.getOperand(0);
6963 if (!IsRotate && !(HasFSHL || HasFSHR))
6964 return SDValue(); // Requires funnel shift support.
6965
6966 // Canonicalize shl to left side in a shl/srl pair.
6967 if (RHSShift.getOpcode() == ISD::SHL) {
6968 std::swap(LHS, RHS);
6969 std::swap(LHSShift, RHSShift);
6970 std::swap(LHSMask, RHSMask);
6971 }
6972
6973 unsigned EltSizeInBits = VT.getScalarSizeInBits();
6974 SDValue LHSShiftArg = LHSShift.getOperand(0);
6975 SDValue LHSShiftAmt = LHSShift.getOperand(1);
6976 SDValue RHSShiftArg = RHSShift.getOperand(0);
6977 SDValue RHSShiftAmt = RHSShift.getOperand(1);
6978
6979 // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
6980 // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
6981 // fold (or (shl x, C1), (srl y, C2)) -> (fshl x, y, C1)
6982 // fold (or (shl x, C1), (srl y, C2)) -> (fshr x, y, C2)
6983 // iff C1+C2 == EltSizeInBits
6984 auto MatchRotateSum = [EltSizeInBits](ConstantSDNode *LHS,
6985 ConstantSDNode *RHS) {
6986 return (LHS->getAPIntValue() + RHS->getAPIntValue()) == EltSizeInBits;
6987 };
6988 if (ISD::matchBinaryPredicate(LHSShiftAmt, RHSShiftAmt, MatchRotateSum)) {
6989 SDValue Res;
6990 if (IsRotate && (HasROTL || HasROTR))
6991 Res = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, LHSShiftArg,
6992 HasROTL ? LHSShiftAmt : RHSShiftAmt);
6993 else
6994 Res = DAG.getNode(HasFSHL ? ISD::FSHL : ISD::FSHR, DL, VT, LHSShiftArg,
6995 RHSShiftArg, HasFSHL ? LHSShiftAmt : RHSShiftAmt);
6996
6997 // If there is an AND of either shifted operand, apply it to the result.
6998 if (LHSMask.getNode() || RHSMask.getNode()) {
6999 SDValue AllOnes = DAG.getAllOnesConstant(DL, VT);
7000 SDValue Mask = AllOnes;
7001
7002 if (LHSMask.getNode()) {
7003 SDValue RHSBits = DAG.getNode(ISD::SRL, DL, VT, AllOnes, RHSShiftAmt);
7004 Mask = DAG.getNode(ISD::AND, DL, VT, Mask,
7005 DAG.getNode(ISD::OR, DL, VT, LHSMask, RHSBits));
7006 }
7007 if (RHSMask.getNode()) {
7008 SDValue LHSBits = DAG.getNode(ISD::SHL, DL, VT, AllOnes, LHSShiftAmt);
7009 Mask = DAG.getNode(ISD::AND, DL, VT, Mask,
7010 DAG.getNode(ISD::OR, DL, VT, RHSMask, LHSBits));
7011 }
7012
7013 Res = DAG.getNode(ISD::AND, DL, VT, Res, Mask);
7014 }
7015
7016 return Res;
7017 }
7018
7019 // If there is a mask here, and we have a variable shift, we can't be sure
7020 // that we're masking out the right stuff.
7021 if (LHSMask.getNode() || RHSMask.getNode())
7022 return SDValue();
7023
7024 // If the shift amount is sign/zext/any-extended just peel it off.
7025 SDValue LExtOp0 = LHSShiftAmt;
7026 SDValue RExtOp0 = RHSShiftAmt;
7027 if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
7028 LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
7029 LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
7030 LHSShiftAmt.getOpcode() == ISD::TRUNCATE) &&
7031 (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND ||
7032 RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND ||
7033 RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND ||
7034 RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) {
7035 LExtOp0 = LHSShiftAmt.getOperand(0);
7036 RExtOp0 = RHSShiftAmt.getOperand(0);
7037 }
7038
7039 if (IsRotate && (HasROTL || HasROTR)) {
7040 SDValue TryL =
7041 MatchRotatePosNeg(LHSShiftArg, LHSShiftAmt, RHSShiftAmt, LExtOp0,
7042 RExtOp0, ISD::ROTL, ISD::ROTR, DL);
7043 if (TryL)
7044 return TryL;
7045
7046 SDValue TryR =
7047 MatchRotatePosNeg(RHSShiftArg, RHSShiftAmt, LHSShiftAmt, RExtOp0,
7048 LExtOp0, ISD::ROTR, ISD::ROTL, DL);
7049 if (TryR)
7050 return TryR;
7051 }
7052
7053 SDValue TryL =
7054 MatchFunnelPosNeg(LHSShiftArg, RHSShiftArg, LHSShiftAmt, RHSShiftAmt,
7055 LExtOp0, RExtOp0, ISD::FSHL, ISD::FSHR, DL);
7056 if (TryL)
7057 return TryL;
7058
7059 SDValue TryR =
7060 MatchFunnelPosNeg(LHSShiftArg, RHSShiftArg, RHSShiftAmt, LHSShiftAmt,
7061 RExtOp0, LExtOp0, ISD::FSHR, ISD::FSHL, DL);
7062 if (TryR)
7063 return TryR;
7064
7065 return SDValue();
7066}
7067
7068namespace {
7069
7070/// Represents known origin of an individual byte in load combine pattern. The
7071/// value of the byte is either constant zero or comes from memory.
7072struct ByteProvider {
7073 // For constant zero providers Load is set to nullptr. For memory providers
7074 // Load represents the node which loads the byte from memory.
7075 // ByteOffset is the offset of the byte in the value produced by the load.
7076 LoadSDNode *Load = nullptr;
7077 unsigned ByteOffset = 0;
7078
7079 ByteProvider() = default;
7080
7081 static ByteProvider getMemory(LoadSDNode *Load, unsigned ByteOffset) {
7082 return ByteProvider(Load, ByteOffset);
7083 }
7084
7085 static ByteProvider getConstantZero() { return ByteProvider(nullptr, 0); }
7086
7087 bool isConstantZero() const { return !Load; }
7088 bool isMemory() const { return Load; }
7089
7090 bool operator==(const ByteProvider &Other) const {
7091 return Other.Load == Load && Other.ByteOffset == ByteOffset;
7092 }
7093
7094private:
7095 ByteProvider(LoadSDNode *Load, unsigned ByteOffset)
7096 : Load(Load), ByteOffset(ByteOffset) {}
7097};
7098
7099} // end anonymous namespace
7100
7101/// Recursively traverses the expression calculating the origin of the requested
7102/// byte of the given value. Returns None if the provider can't be calculated.
7103///
7104/// For all the values except the root of the expression verifies that the value
7105/// has exactly one use and if it's not true return None. This way if the origin
7106/// of the byte is returned it's guaranteed that the values which contribute to
7107/// the byte are not used outside of this expression.
7108///
7109/// Because the parts of the expression are not allowed to have more than one
7110/// use this function iterates over trees, not DAGs. So it never visits the same
7111/// node more than once.
7112static const Optional<ByteProvider>
7113calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth,
7114 bool Root = false) {
7115 // Typical i64 by i8 pattern requires recursion up to 8 calls depth
7116 if (Depth == 10)
7117 return None;
7118
7119 if (!Root && !Op.hasOneUse())
7120 return None;
7121
7122 assert(Op.getValueType().isScalarInteger() && "can't handle other types")(static_cast <bool> (Op.getValueType().isScalarInteger(
) && "can't handle other types") ? void (0) : __assert_fail
("Op.getValueType().isScalarInteger() && \"can't handle other types\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7122, __extension__ __PRETTY_FUNCTION__))
;
7123 unsigned BitWidth = Op.getValueSizeInBits();
7124 if (BitWidth % 8 != 0)
7125 return None;
7126 unsigned ByteWidth = BitWidth / 8;
7127 assert(Index < ByteWidth && "invalid index requested")(static_cast <bool> (Index < ByteWidth && "invalid index requested"
) ? void (0) : __assert_fail ("Index < ByteWidth && \"invalid index requested\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7127, __extension__ __PRETTY_FUNCTION__))
;
7128 (void) ByteWidth;
7129
7130 switch (Op.getOpcode()) {
7131 case ISD::OR: {
7132 auto LHS = calculateByteProvider(Op->getOperand(0), Index, Depth + 1);
7133 if (!LHS)
7134 return None;
7135 auto RHS = calculateByteProvider(Op->getOperand(1), Index, Depth + 1);
7136 if (!RHS)
7137 return None;
7138
7139 if (LHS->isConstantZero())
7140 return RHS;
7141 if (RHS->isConstantZero())
7142 return LHS;
7143 return None;
7144 }
7145 case ISD::SHL: {
7146 auto ShiftOp = dyn_cast<ConstantSDNode>(Op->getOperand(1));
7147 if (!ShiftOp)
7148 return None;
7149
7150 uint64_t BitShift = ShiftOp->getZExtValue();
7151 if (BitShift % 8 != 0)
7152 return None;
7153 uint64_t ByteShift = BitShift / 8;
7154
7155 return Index < ByteShift
7156 ? ByteProvider::getConstantZero()
7157 : calculateByteProvider(Op->getOperand(0), Index - ByteShift,
7158 Depth + 1);
7159 }
7160 case ISD::ANY_EXTEND:
7161 case ISD::SIGN_EXTEND:
7162 case ISD::ZERO_EXTEND: {
7163 SDValue NarrowOp = Op->getOperand(0);
7164 unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits();
7165 if (NarrowBitWidth % 8 != 0)
7166 return None;
7167 uint64_t NarrowByteWidth = NarrowBitWidth / 8;
7168
7169 if (Index >= NarrowByteWidth)
7170 return Op.getOpcode() == ISD::ZERO_EXTEND
7171 ? Optional<ByteProvider>(ByteProvider::getConstantZero())
7172 : None;
7173 return calculateByteProvider(NarrowOp, Index, Depth + 1);
7174 }
7175 case ISD::BSWAP:
7176 return calculateByteProvider(Op->getOperand(0), ByteWidth - Index - 1,
7177 Depth + 1);
7178 case ISD::LOAD: {
7179 auto L = cast<LoadSDNode>(Op.getNode());
7180 if (!L->isSimple() || L->isIndexed())
7181 return None;
7182
7183 unsigned NarrowBitWidth = L->getMemoryVT().getSizeInBits();
7184 if (NarrowBitWidth % 8 != 0)
7185 return None;
7186 uint64_t NarrowByteWidth = NarrowBitWidth / 8;
7187
7188 if (Index >= NarrowByteWidth)
7189 return L->getExtensionType() == ISD::ZEXTLOAD
7190 ? Optional<ByteProvider>(ByteProvider::getConstantZero())
7191 : None;
7192 return ByteProvider::getMemory(L, Index);
7193 }
7194 }
7195
7196 return None;
7197}
7198
7199static unsigned littleEndianByteAt(unsigned BW, unsigned i) {
7200 return i;
7201}
7202
7203static unsigned bigEndianByteAt(unsigned BW, unsigned i) {
7204 return BW - i - 1;
7205}
7206
7207// Check if the bytes offsets we are looking at match with either big or
7208// little endian value loaded. Return true for big endian, false for little
7209// endian, and None if match failed.
7210static Optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets,
7211 int64_t FirstOffset) {
7212 // The endian can be decided only when it is 2 bytes at least.
7213 unsigned Width = ByteOffsets.size();
7214 if (Width < 2)
7215 return None;
7216
7217 bool BigEndian = true, LittleEndian = true;
7218 for (unsigned i = 0; i < Width; i++) {
7219 int64_t CurrentByteOffset = ByteOffsets[i] - FirstOffset;
7220 LittleEndian &= CurrentByteOffset == littleEndianByteAt(Width, i);
7221 BigEndian &= CurrentByteOffset == bigEndianByteAt(Width, i);
7222 if (!BigEndian && !LittleEndian)
7223 return None;
7224 }
7225
7226 assert((BigEndian != LittleEndian) && "It should be either big endian or"(static_cast <bool> ((BigEndian != LittleEndian) &&
"It should be either big endian or" "little endian") ? void (
0) : __assert_fail ("(BigEndian != LittleEndian) && \"It should be either big endian or\" \"little endian\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7227, __extension__ __PRETTY_FUNCTION__))
7227 "little endian")(static_cast <bool> ((BigEndian != LittleEndian) &&
"It should be either big endian or" "little endian") ? void (
0) : __assert_fail ("(BigEndian != LittleEndian) && \"It should be either big endian or\" \"little endian\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7227, __extension__ __PRETTY_FUNCTION__))
;
7228 return BigEndian;
7229}
7230
7231static SDValue stripTruncAndExt(SDValue Value) {
7232 switch (Value.getOpcode()) {
7233 case ISD::TRUNCATE:
7234 case ISD::ZERO_EXTEND:
7235 case ISD::SIGN_EXTEND:
7236 case ISD::ANY_EXTEND:
7237 return stripTruncAndExt(Value.getOperand(0));
7238 }
7239 return Value;
7240}
7241
7242/// Match a pattern where a wide type scalar value is stored by several narrow
7243/// stores. Fold it into a single store or a BSWAP and a store if the targets
7244/// supports it.
7245///
7246/// Assuming little endian target:
7247/// i8 *p = ...
7248/// i32 val = ...
7249/// p[0] = (val >> 0) & 0xFF;
7250/// p[1] = (val >> 8) & 0xFF;
7251/// p[2] = (val >> 16) & 0xFF;
7252/// p[3] = (val >> 24) & 0xFF;
7253/// =>
7254/// *((i32)p) = val;
7255///
7256/// i8 *p = ...
7257/// i32 val = ...
7258/// p[0] = (val >> 24) & 0xFF;
7259/// p[1] = (val >> 16) & 0xFF;
7260/// p[2] = (val >> 8) & 0xFF;
7261/// p[3] = (val >> 0) & 0xFF;
7262/// =>
7263/// *((i32)p) = BSWAP(val);
7264SDValue DAGCombiner::mergeTruncStores(StoreSDNode *N) {
7265 // The matching looks for "store (trunc x)" patterns that appear early but are
7266 // likely to be replaced by truncating store nodes during combining.
7267 // TODO: If there is evidence that running this later would help, this
7268 // limitation could be removed. Legality checks may need to be added
7269 // for the created store and optional bswap/rotate.
7270 if (LegalOperations)
1
Assuming field 'LegalOperations' is false
2
Taking false branch
7271 return SDValue();
7272
7273 // Collect all the stores in the chain.
7274 SDValue Chain;
7275 SmallVector<StoreSDNode *, 8> Stores;
7276 for (StoreSDNode *Store = N; Store; Store = dyn_cast<StoreSDNode>(Chain)) {
3
Assuming pointer value is null
4
Loop condition is false. Execution continues on line 7286
7277 // TODO: Allow unordered atomics when wider type is legal (see D66309)
7278 EVT MemVT = Store->getMemoryVT();
7279 if (!(MemVT == MVT::i8 || MemVT == MVT::i16 || MemVT == MVT::i32) ||
7280 !Store->isSimple() || Store->isIndexed())
7281 return SDValue();
7282 Stores.push_back(Store);
7283 Chain = Store->getChain();
7284 }
7285 // There is no reason to continue if we do not have at least a pair of stores.
7286 if (Stores.size() < 2)
5
Assuming the condition is false
6
Taking false branch
7287 return SDValue();
7288
7289 // Handle simple types only.
7290 LLVMContext &Context = *DAG.getContext();
7291 unsigned NumStores = Stores.size();
7292 unsigned NarrowNumBits = N->getMemoryVT().getScalarSizeInBits();
7
Called C++ object pointer is null
7293 unsigned WideNumBits = NumStores * NarrowNumBits;
7294 EVT WideVT = EVT::getIntegerVT(Context, WideNumBits);
7295 if (WideVT != MVT::i16 && WideVT != MVT::i32 && WideVT != MVT::i64)
7296 return SDValue();
7297
7298 // Check if all bytes of the source value that we are looking at are stored
7299 // to the same base address. Collect offsets from Base address into OffsetMap.
7300 SDValue SourceValue;
7301 SmallVector<int64_t, 8> OffsetMap(NumStores, INT64_MAX(9223372036854775807L));
7302 int64_t FirstOffset = INT64_MAX(9223372036854775807L);
7303 StoreSDNode *FirstStore = nullptr;
7304 Optional<BaseIndexOffset> Base;
7305 for (auto Store : Stores) {
7306 // All the stores store different parts of the CombinedValue. A truncate is
7307 // required to get the partial value.
7308 SDValue Trunc = Store->getValue();
7309 if (Trunc.getOpcode() != ISD::TRUNCATE)
7310 return SDValue();
7311 // Other than the first/last part, a shift operation is required to get the
7312 // offset.
7313 int64_t Offset = 0;
7314 SDValue WideVal = Trunc.getOperand(0);
7315 if ((WideVal.getOpcode() == ISD::SRL || WideVal.getOpcode() == ISD::SRA) &&
7316 isa<ConstantSDNode>(WideVal.getOperand(1))) {
7317 // The shift amount must be a constant multiple of the narrow type.
7318 // It is translated to the offset address in the wide source value "y".
7319 //
7320 // x = srl y, ShiftAmtC
7321 // i8 z = trunc x
7322 // store z, ...
7323 uint64_t ShiftAmtC = WideVal.getConstantOperandVal(1);
7324 if (ShiftAmtC % NarrowNumBits != 0)
7325 return SDValue();
7326
7327 Offset = ShiftAmtC / NarrowNumBits;
7328 WideVal = WideVal.getOperand(0);
7329 }
7330
7331 // Stores must share the same source value with different offsets.
7332 // Truncate and extends should be stripped to get the single source value.
7333 if (!SourceValue)
7334 SourceValue = WideVal;
7335 else if (stripTruncAndExt(SourceValue) != stripTruncAndExt(WideVal))
7336 return SDValue();
7337 else if (SourceValue.getValueType() != WideVT) {
7338 if (WideVal.getValueType() == WideVT ||
7339 WideVal.getScalarValueSizeInBits() >
7340 SourceValue.getScalarValueSizeInBits())
7341 SourceValue = WideVal;
7342 // Give up if the source value type is smaller than the store size.
7343 if (SourceValue.getScalarValueSizeInBits() < WideVT.getScalarSizeInBits())
7344 return SDValue();
7345 }
7346
7347 // Stores must share the same base address.
7348 BaseIndexOffset Ptr = BaseIndexOffset::match(Store, DAG);
7349 int64_t ByteOffsetFromBase = 0;
7350 if (!Base)
7351 Base = Ptr;
7352 else if (!Base->equalBaseIndex(Ptr, DAG, ByteOffsetFromBase))
7353 return SDValue();
7354
7355 // Remember the first store.
7356 if (ByteOffsetFromBase < FirstOffset) {
7357 FirstStore = Store;
7358 FirstOffset = ByteOffsetFromBase;
7359 }
7360 // Map the offset in the store and the offset in the combined value, and
7361 // early return if it has been set before.
7362 if (Offset < 0 || Offset >= NumStores || OffsetMap[Offset] != INT64_MAX(9223372036854775807L))
7363 return SDValue();
7364 OffsetMap[Offset] = ByteOffsetFromBase;
7365 }
7366
7367 assert(FirstOffset != INT64_MAX && "First byte offset must be set")(static_cast <bool> (FirstOffset != (9223372036854775807L
) && "First byte offset must be set") ? void (0) : __assert_fail
("FirstOffset != INT64_MAX && \"First byte offset must be set\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7367, __extension__ __PRETTY_FUNCTION__))
;
7368 assert(FirstStore && "First store must be set")(static_cast <bool> (FirstStore && "First store must be set"
) ? void (0) : __assert_fail ("FirstStore && \"First store must be set\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7368, __extension__ __PRETTY_FUNCTION__))
;
7369
7370 // Check that a store of the wide type is both allowed and fast on the target
7371 const DataLayout &Layout = DAG.getDataLayout();
7372 bool Fast = false;
7373 bool Allowed = TLI.allowsMemoryAccess(Context, Layout, WideVT,
7374 *FirstStore->getMemOperand(), &Fast);
7375 if (!Allowed || !Fast)
7376 return SDValue();
7377
7378 // Check if the pieces of the value are going to the expected places in memory
7379 // to merge the stores.
7380 auto checkOffsets = [&](bool MatchLittleEndian) {
7381 if (MatchLittleEndian) {
7382 for (unsigned i = 0; i != NumStores; ++i)
7383 if (OffsetMap[i] != i * (NarrowNumBits / 8) + FirstOffset)
7384 return false;
7385 } else { // MatchBigEndian by reversing loop counter.
7386 for (unsigned i = 0, j = NumStores - 1; i != NumStores; ++i, --j)
7387 if (OffsetMap[j] != i * (NarrowNumBits / 8) + FirstOffset)
7388 return false;
7389 }
7390 return true;
7391 };
7392
7393 // Check if the offsets line up for the native data layout of this target.
7394 bool NeedBswap = false;
7395 bool NeedRotate = false;
7396 if (!checkOffsets(Layout.isLittleEndian())) {
7397 // Special-case: check if byte offsets line up for the opposite endian.
7398 if (NarrowNumBits == 8 && checkOffsets(Layout.isBigEndian()))
7399 NeedBswap = true;
7400 else if (NumStores == 2 && checkOffsets(Layout.isBigEndian()))
7401 NeedRotate = true;
7402 else
7403 return SDValue();
7404 }
7405
7406 SDLoc DL(N);
7407 if (WideVT != SourceValue.getValueType()) {
7408 assert(SourceValue.getValueType().getScalarSizeInBits() > WideNumBits &&(static_cast <bool> (SourceValue.getValueType().getScalarSizeInBits
() > WideNumBits && "Unexpected store value to merge"
) ? void (0) : __assert_fail ("SourceValue.getValueType().getScalarSizeInBits() > WideNumBits && \"Unexpected store value to merge\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7409, __extension__ __PRETTY_FUNCTION__))
7409 "Unexpected store value to merge")(static_cast <bool> (SourceValue.getValueType().getScalarSizeInBits
() > WideNumBits && "Unexpected store value to merge"
) ? void (0) : __assert_fail ("SourceValue.getValueType().getScalarSizeInBits() > WideNumBits && \"Unexpected store value to merge\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7409, __extension__ __PRETTY_FUNCTION__))
;
7410 SourceValue = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SourceValue);
7411 }
7412
7413 // Before legalize we can introduce illegal bswaps/rotates which will be later
7414 // converted to an explicit bswap sequence. This way we end up with a single
7415 // store and byte shuffling instead of several stores and byte shuffling.
7416 if (NeedBswap) {
7417 SourceValue = DAG.getNode(ISD::BSWAP, DL, WideVT, SourceValue);
7418 } else if (NeedRotate) {
7419 assert(WideNumBits % 2 == 0 && "Unexpected type for rotate")(static_cast <bool> (WideNumBits % 2 == 0 && "Unexpected type for rotate"
) ? void (0) : __assert_fail ("WideNumBits % 2 == 0 && \"Unexpected type for rotate\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7419, __extension__ __PRETTY_FUNCTION__))
;
7420 SDValue RotAmt = DAG.getConstant(WideNumBits / 2, DL, WideVT);
7421 SourceValue = DAG.getNode(ISD::ROTR, DL, WideVT, SourceValue, RotAmt);
7422 }
7423
7424 SDValue NewStore =
7425 DAG.getStore(Chain, DL, SourceValue, FirstStore->getBasePtr(),
7426 FirstStore->getPointerInfo(), FirstStore->getAlign());
7427
7428 // Rely on other DAG combine rules to remove the other individual stores.
7429 DAG.ReplaceAllUsesWith(N, NewStore.getNode());
7430 return NewStore;
7431}
7432
7433/// Match a pattern where a wide type scalar value is loaded by several narrow
7434/// loads and combined by shifts and ors. Fold it into a single load or a load
7435/// and a BSWAP if the targets supports it.
7436///
7437/// Assuming little endian target:
7438/// i8 *a = ...
7439/// i32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
7440/// =>
7441/// i32 val = *((i32)a)
7442///
7443/// i8 *a = ...
7444/// i32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
7445/// =>
7446/// i32 val = BSWAP(*((i32)a))
7447///
7448/// TODO: This rule matches complex patterns with OR node roots and doesn't
7449/// interact well with the worklist mechanism. When a part of the pattern is
7450/// updated (e.g. one of the loads) its direct users are put into the worklist,
7451/// but the root node of the pattern which triggers the load combine is not
7452/// necessarily a direct user of the changed node. For example, once the address
7453/// of t28 load is reassociated load combine won't be triggered:
7454/// t25: i32 = add t4, Constant:i32<2>
7455/// t26: i64 = sign_extend t25
7456/// t27: i64 = add t2, t26
7457/// t28: i8,ch = load<LD1[%tmp9]> t0, t27, undef:i64
7458/// t29: i32 = zero_extend t28
7459/// t32: i32 = shl t29, Constant:i8<8>
7460/// t33: i32 = or t23, t32
7461/// As a possible fix visitLoad can check if the load can be a part of a load
7462/// combine pattern and add corresponding OR roots to the worklist.
7463SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {
7464 assert(N->getOpcode() == ISD::OR &&(static_cast <bool> (N->getOpcode() == ISD::OR &&
"Can only match load combining against OR nodes") ? void (0)
: __assert_fail ("N->getOpcode() == ISD::OR && \"Can only match load combining against OR nodes\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7465, __extension__ __PRETTY_FUNCTION__))
7465 "Can only match load combining against OR nodes")(static_cast <bool> (N->getOpcode() == ISD::OR &&
"Can only match load combining against OR nodes") ? void (0)
: __assert_fail ("N->getOpcode() == ISD::OR && \"Can only match load combining against OR nodes\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7465, __extension__ __PRETTY_FUNCTION__))
;
7466
7467 // Handles simple types only
7468 EVT VT = N->getValueType(0);
7469 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
7470 return SDValue();
7471 unsigned ByteWidth = VT.getSizeInBits() / 8;
7472
7473 bool IsBigEndianTarget = DAG.getDataLayout().isBigEndian();
7474 auto MemoryByteOffset = [&] (ByteProvider P) {
7475 assert(P.isMemory() && "Must be a memory byte provider")(static_cast <bool> (P.isMemory() && "Must be a memory byte provider"
) ? void (0) : __assert_fail ("P.isMemory() && \"Must be a memory byte provider\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7475, __extension__ __PRETTY_FUNCTION__))
;
7476 unsigned LoadBitWidth = P.Load->getMemoryVT().getSizeInBits();
7477 assert(LoadBitWidth % 8 == 0 &&(static_cast <bool> (LoadBitWidth % 8 == 0 && "can only analyze providers for individual bytes not bit"
) ? void (0) : __assert_fail ("LoadBitWidth % 8 == 0 && \"can only analyze providers for individual bytes not bit\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7478, __extension__ __PRETTY_FUNCTION__))
7478 "can only analyze providers for individual bytes not bit")(static_cast <bool> (LoadBitWidth % 8 == 0 && "can only analyze providers for individual bytes not bit"
) ? void (0) : __assert_fail ("LoadBitWidth % 8 == 0 && \"can only analyze providers for individual bytes not bit\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7478, __extension__ __PRETTY_FUNCTION__))
;
7479 unsigned LoadByteWidth = LoadBitWidth / 8;
7480 return IsBigEndianTarget
7481 ? bigEndianByteAt(LoadByteWidth, P.ByteOffset)
7482 : littleEndianByteAt(LoadByteWidth, P.ByteOffset);
7483 };
7484
7485 Optional<BaseIndexOffset> Base;
7486 SDValue Chain;
7487
7488 SmallPtrSet<LoadSDNode *, 8> Loads;
7489 Optional<ByteProvider> FirstByteProvider;
7490 int64_t FirstOffset = INT64_MAX(9223372036854775807L);
7491
7492 // Check if all the bytes of the OR we are looking at are loaded from the same
7493 // base address. Collect bytes offsets from Base address in ByteOffsets.
7494 SmallVector<int64_t, 8> ByteOffsets(ByteWidth);
7495 unsigned ZeroExtendedBytes = 0;
7496 for (int i = ByteWidth - 1; i >= 0; --i) {
7497 auto P = calculateByteProvider(SDValue(N, 0), i, 0, /*Root=*/true);
7498 if (!P)
7499 return SDValue();
7500
7501 if (P->isConstantZero()) {
7502 // It's OK for the N most significant bytes to be 0, we can just
7503 // zero-extend the load.
7504 if (++ZeroExtendedBytes != (ByteWidth - static_cast<unsigned>(i)))
7505 return SDValue();
7506 continue;
7507 }
7508 assert(P->isMemory() && "provenance should either be memory or zero")(static_cast <bool> (P->isMemory() && "provenance should either be memory or zero"
) ? void (0) : __assert_fail ("P->isMemory() && \"provenance should either be memory or zero\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7508, __extension__ __PRETTY_FUNCTION__))
;
7509
7510 LoadSDNode *L = P->Load;
7511 assert(L->hasNUsesOfValue(1, 0) && L->isSimple() &&(static_cast <bool> (L->hasNUsesOfValue(1, 0) &&
L->isSimple() && !L->isIndexed() && "Must be enforced by calculateByteProvider"
) ? void (0) : __assert_fail ("L->hasNUsesOfValue(1, 0) && L->isSimple() && !L->isIndexed() && \"Must be enforced by calculateByteProvider\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7513, __extension__ __PRETTY_FUNCTION__))
7512 !L->isIndexed() &&(static_cast <bool> (L->hasNUsesOfValue(1, 0) &&
L->isSimple() && !L->isIndexed() && "Must be enforced by calculateByteProvider"
) ? void (0) : __assert_fail ("L->hasNUsesOfValue(1, 0) && L->isSimple() && !L->isIndexed() && \"Must be enforced by calculateByteProvider\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7513, __extension__ __PRETTY_FUNCTION__))
7513 "Must be enforced by calculateByteProvider")(static_cast <bool> (L->hasNUsesOfValue(1, 0) &&
L->isSimple() && !L->isIndexed() && "Must be enforced by calculateByteProvider"
) ? void (0) : __assert_fail ("L->hasNUsesOfValue(1, 0) && L->isSimple() && !L->isIndexed() && \"Must be enforced by calculateByteProvider\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7513, __extension__ __PRETTY_FUNCTION__))
;
7514 assert(L->getOffset().isUndef() && "Unindexed load must have undef offset")(static_cast <bool> (L->getOffset().isUndef() &&
"Unindexed load must have undef offset") ? void (0) : __assert_fail
("L->getOffset().isUndef() && \"Unindexed load must have undef offset\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7514, __extension__ __PRETTY_FUNCTION__))
;
7515
7516 // All loads must share the same chain
7517 SDValue LChain = L->getChain();
7518 if (!Chain)
7519 Chain = LChain;
7520 else if (Chain != LChain)
7521 return SDValue();
7522
7523 // Loads must share the same base address
7524 BaseIndexOffset Ptr = BaseIndexOffset::match(L, DAG);
7525 int64_t ByteOffsetFromBase = 0;
7526 if (!Base)
7527 Base = Ptr;
7528 else if (!Base->equalBaseIndex(Ptr, DAG, ByteOffsetFromBase))
7529 return SDValue();
7530
7531 // Calculate the offset of the current byte from the base address
7532 ByteOffsetFromBase += MemoryByteOffset(*P);
7533 ByteOffsets[i] = ByteOffsetFromBase;
7534
7535 // Remember the first byte load
7536 if (ByteOffsetFromBase < FirstOffset) {
7537 FirstByteProvider = P;
7538 FirstOffset = ByteOffsetFromBase;
7539 }
7540
7541 Loads.insert(L);
7542 }
7543 assert(!Loads.empty() && "All the bytes of the value must be loaded from "(static_cast <bool> (!Loads.empty() && "All the bytes of the value must be loaded from "
"memory, so there must be at least one load which produces the value"
) ? void (0) : __assert_fail ("!Loads.empty() && \"All the bytes of the value must be loaded from \" \"memory, so there must be at least one load which produces the value\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7544, __extension__ __PRETTY_FUNCTION__))
7544 "memory, so there must be at least one load which produces the value")(static_cast <bool> (!Loads.empty() && "All the bytes of the value must be loaded from "
"memory, so there must be at least one load which produces the value"
) ? void (0) : __assert_fail ("!Loads.empty() && \"All the bytes of the value must be loaded from \" \"memory, so there must be at least one load which produces the value\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7544, __extension__ __PRETTY_FUNCTION__))
;
7545 assert(Base && "Base address of the accessed memory location must be set")(static_cast <bool> (Base && "Base address of the accessed memory location must be set"
) ? void (0) : __assert_fail ("Base && \"Base address of the accessed memory location must be set\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7545, __extension__ __PRETTY_FUNCTION__))
;
7546 assert(FirstOffset != INT64_MAX && "First byte offset must be set")(static_cast <bool> (FirstOffset != (9223372036854775807L
) && "First byte offset must be set") ? void (0) : __assert_fail
("FirstOffset != INT64_MAX && \"First byte offset must be set\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7546, __extension__ __PRETTY_FUNCTION__))
;
7547
7548 bool NeedsZext = ZeroExtendedBytes > 0;
7549
7550 EVT MemVT =
7551 EVT::getIntegerVT(*DAG.getContext(), (ByteWidth - ZeroExtendedBytes) * 8);
7552
7553 if (!MemVT.isSimple())
7554 return SDValue();
7555
7556 // Before legalize we can introduce too wide illegal loads which will be later
7557 // split into legal sized loads. This enables us to combine i64 load by i8
7558 // patterns to a couple of i32 loads on 32 bit targets.
7559 if (LegalOperations &&
7560 !TLI.isOperationLegal(NeedsZext ? ISD::ZEXTLOAD : ISD::NON_EXTLOAD,
7561 MemVT))
7562 return SDValue();
7563
7564 // Check if the bytes of the OR we are looking at match with either big or
7565 // little endian value load
7566 Optional<bool> IsBigEndian = isBigEndian(
7567 makeArrayRef(ByteOffsets).drop_back(ZeroExtendedBytes), FirstOffset);
7568 if (!IsBigEndian.hasValue())
7569 return SDValue();
7570
7571 assert(FirstByteProvider && "must be set")(static_cast <bool> (FirstByteProvider && "must be set"
) ? void (0) : __assert_fail ("FirstByteProvider && \"must be set\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7571, __extension__ __PRETTY_FUNCTION__))
;
7572
7573 // Ensure that the first byte is loaded from zero offset of the first load.
7574 // So the combined value can be loaded from the first load address.
7575 if (MemoryByteOffset(*FirstByteProvider) != 0)
7576 return SDValue();
7577 LoadSDNode *FirstLoad = FirstByteProvider->Load;
7578
7579 // The node we are looking at matches with the pattern, check if we can
7580 // replace it with a single (possibly zero-extended) load and bswap + shift if
7581 // needed.
7582
7583 // If the load needs byte swap check if the target supports it
7584 bool NeedsBswap = IsBigEndianTarget != *IsBigEndian;
7585
7586 // Before legalize we can introduce illegal bswaps which will be later
7587 // converted to an explicit bswap sequence. This way we end up with a single
7588 // load and byte shuffling instead of several loads and byte shuffling.
7589 // We do not introduce illegal bswaps when zero-extending as this tends to
7590 // introduce too many arithmetic instructions.
7591 if (NeedsBswap && (LegalOperations || NeedsZext) &&
7592 !TLI.isOperationLegal(ISD::BSWAP, VT))
7593 return SDValue();
7594
7595 // If we need to bswap and zero extend, we have to insert a shift. Check that
7596 // it is legal.
7597 if (NeedsBswap && NeedsZext && LegalOperations &&
7598 !TLI.isOperationLegal(ISD::SHL, VT))
7599 return SDValue();
7600
7601 // Check that a load of the wide type is both allowed and fast on the target
7602 bool Fast = false;
7603 bool Allowed =
7604 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
7605 *FirstLoad->getMemOperand(), &Fast);
7606 if (!Allowed || !Fast)
7607 return SDValue();
7608
7609 SDValue NewLoad =
7610 DAG.getExtLoad(NeedsZext ? ISD::ZEXTLOAD : ISD::NON_EXTLOAD, SDLoc(N), VT,
7611 Chain, FirstLoad->getBasePtr(),
7612 FirstLoad->getPointerInfo(), MemVT, FirstLoad->getAlign());
7613
7614 // Transfer chain users from old loads to the new load.
7615 for (LoadSDNode *L : Loads)
7616 DAG.ReplaceAllUsesOfValueWith(SDValue(L, 1), SDValue(NewLoad.getNode(), 1));
7617
7618 if (!NeedsBswap)
7619 return NewLoad;
7620
7621 SDValue ShiftedLoad =
7622 NeedsZext
7623 ? DAG.getNode(ISD::SHL, SDLoc(N), VT, NewLoad,
7624 DAG.getShiftAmountConstant(ZeroExtendedBytes * 8, VT,
7625 SDLoc(N), LegalOperations))
7626 : NewLoad;
7627 return DAG.getNode(ISD::BSWAP, SDLoc(N), VT, ShiftedLoad);
7628}
7629
7630// If the target has andn, bsl, or a similar bit-select instruction,
7631// we want to unfold masked merge, with canonical pattern of:
7632// | A | |B|
7633// ((x ^ y) & m) ^ y
7634// | D |
7635// Into:
7636// (x & m) | (y & ~m)
7637// If y is a constant, and the 'andn' does not work with immediates,
7638// we unfold into a different pattern:
7639// ~(~x & m) & (m | y)
7640// NOTE: we don't unfold the pattern if 'xor' is actually a 'not', because at
7641// the very least that breaks andnpd / andnps patterns, and because those
7642// patterns are simplified in IR and shouldn't be created in the DAG
7643SDValue DAGCombiner::unfoldMaskedMerge(SDNode *N) {
7644 assert(N->getOpcode() == ISD::XOR)(static_cast <bool> (N->getOpcode() == ISD::XOR) ? void
(0) : __assert_fail ("N->getOpcode() == ISD::XOR", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7644, __extension__ __PRETTY_FUNCTION__))
;
7645
7646 // Don't touch 'not' (i.e. where y = -1).
7647 if (isAllOnesOrAllOnesSplat(N->getOperand(1)))
7648 return SDValue();
7649
7650 EVT VT = N->getValueType(0);
7651
7652 // There are 3 commutable operators in the pattern,
7653 // so we have to deal with 8 possible variants of the basic pattern.
7654 SDValue X, Y, M;
7655 auto matchAndXor = [&X, &Y, &M](SDValue And, unsigned XorIdx, SDValue Other) {
7656 if (And.getOpcode() != ISD::AND || !And.hasOneUse())
7657 return false;
7658 SDValue Xor = And.getOperand(XorIdx);
7659 if (Xor.getOpcode() != ISD::XOR || !Xor.hasOneUse())
7660 return false;
7661 SDValue Xor0 = Xor.getOperand(0);
7662 SDValue Xor1 = Xor.getOperand(1);
7663 // Don't touch 'not' (i.e. where y = -1).
7664 if (isAllOnesOrAllOnesSplat(Xor1))
7665 return false;
7666 if (Other == Xor0)
7667 std::swap(Xor0, Xor1);
7668 if (Other != Xor1)
7669 return false;
7670 X = Xor0;
7671 Y = Xor1;
7672 M = And.getOperand(XorIdx ? 0 : 1);
7673 return true;
7674 };
7675
7676 SDValue N0 = N->getOperand(0);
7677 SDValue N1 = N->getOperand(1);
7678 if (!matchAndXor(N0, 0, N1) && !matchAndXor(N0, 1, N1) &&
7679 !matchAndXor(N1, 0, N0) && !matchAndXor(N1, 1, N0))
7680 return SDValue();
7681
7682 // Don't do anything if the mask is constant. This should not be reachable.
7683 // InstCombine should have already unfolded this pattern, and DAGCombiner
7684 // probably shouldn't produce it, too.
7685 if (isa<ConstantSDNode>(M.getNode()))
7686 return SDValue();
7687
7688 // We can transform if the target has AndNot
7689 if (!TLI.hasAndNot(M))
7690 return SDValue();
7691
7692 SDLoc DL(N);
7693
7694 // If Y is a constant, check that 'andn' works with immediates.
7695 if (!TLI.hasAndNot(Y)) {
7696 assert(TLI.hasAndNot(X) && "Only mask is a variable? Unreachable.")(static_cast <bool> (TLI.hasAndNot(X) && "Only mask is a variable? Unreachable."
) ? void (0) : __assert_fail ("TLI.hasAndNot(X) && \"Only mask is a variable? Unreachable.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7696, __extension__ __PRETTY_FUNCTION__))
;
7697 // If not, we need to do a bit more work to make sure andn is still used.
7698 SDValue NotX = DAG.getNOT(DL, X, VT);
7699 SDValue LHS = DAG.getNode(ISD::AND, DL, VT, NotX, M);
7700 SDValue NotLHS = DAG.getNOT(DL, LHS, VT);
7701 SDValue RHS = DAG.getNode(ISD::OR, DL, VT, M, Y);
7702 return DAG.getNode(ISD::AND, DL, VT, NotLHS, RHS);
7703 }
7704
7705 SDValue LHS = DAG.getNode(ISD::AND, DL, VT, X, M);
7706 SDValue NotM = DAG.getNOT(DL, M, VT);
7707 SDValue RHS = DAG.getNode(ISD::AND, DL, VT, Y, NotM);
7708
7709 return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
7710}
7711
7712SDValue DAGCombiner::visitXOR(SDNode *N) {
7713 SDValue N0 = N->getOperand(0);
7714 SDValue N1 = N->getOperand(1);
7715 EVT VT = N0.getValueType();
7716
7717 // fold vector ops
7718 if (VT.isVector()) {
7719 if (SDValue FoldedVOp = SimplifyVBinOp(N))
7720 return FoldedVOp;
7721
7722 // fold (xor x, 0) -> x, vector edition
7723 if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
7724 return N1;
7725 if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
7726 return N0;
7727 }
7728
7729 // fold (xor undef, undef) -> 0. This is a common idiom (misuse).
7730 SDLoc DL(N);
7731 if (N0.isUndef() && N1.isUndef())
7732 return DAG.getConstant(0, DL, VT);
7733
7734 // fold (xor x, undef) -> undef
7735 if (N0.isUndef())
7736 return N0;
7737 if (N1.isUndef())
7738 return N1;
7739
7740 // fold (xor c1, c2) -> c1^c2
7741 if (SDValue C = DAG.FoldConstantArithmetic(ISD::XOR, DL, VT, {N0, N1}))
7742 return C;
7743
7744 // canonicalize constant to RHS
7745 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
7746 !DAG.isConstantIntBuildVectorOrConstantInt(N1))
7747 return DAG.getNode(ISD::XOR, DL, VT, N1, N0);
7748
7749 // fold (xor x, 0) -> x
7750 if (isNullConstant(N1))
7751 return N0;
7752
7753 if (SDValue NewSel = foldBinOpIntoSelect(N))
7754 return NewSel;
7755
7756 // reassociate xor
7757 if (SDValue RXOR = reassociateOps(ISD::XOR, DL, N0, N1, N->getFlags()))
7758 return RXOR;
7759
7760 // fold !(x cc y) -> (x !cc y)
7761 unsigned N0Opcode = N0.getOpcode();
7762 SDValue LHS, RHS, CC;
7763 if (TLI.isConstTrueVal(N1.getNode()) &&
7764 isSetCCEquivalent(N0, LHS, RHS, CC, /*MatchStrict*/true)) {
7765 ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
7766 LHS.getValueType());
7767 if (!LegalOperations ||
7768 TLI.isCondCodeLegal(NotCC, LHS.getSimpleValueType())) {
7769 switch (N0Opcode) {
7770 default:
7771 llvm_unreachable("Unhandled SetCC Equivalent!")::llvm::llvm_unreachable_internal("Unhandled SetCC Equivalent!"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7771)
;
7772 case ISD::SETCC:
7773 return DAG.getSetCC(SDLoc(N0), VT, LHS, RHS, NotCC);
7774 case ISD::SELECT_CC:
7775 return DAG.getSelectCC(SDLoc(N0), LHS, RHS, N0.getOperand(2),
7776 N0.getOperand(3), NotCC);
7777 case ISD::STRICT_FSETCC:
7778 case ISD::STRICT_FSETCCS: {
7779 if (N0.hasOneUse()) {
7780 // FIXME Can we handle multiple uses? Could we token factor the chain
7781 // results from the new/old setcc?
7782 SDValue SetCC =
7783 DAG.getSetCC(SDLoc(N0), VT, LHS, RHS, NotCC,
7784 N0.getOperand(0), N0Opcode == ISD::STRICT_FSETCCS);
7785 CombineTo(N, SetCC);
7786 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), SetCC.getValue(1));
7787 recursivelyDeleteUnusedNodes(N0.getNode());
7788 return SDValue(N, 0); // Return N so it doesn't get rechecked!
7789 }
7790 break;
7791 }
7792 }
7793 }
7794 }
7795
7796 // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y)))
7797 if (isOneConstant(N1) && N0Opcode == ISD::ZERO_EXTEND && N0.hasOneUse() &&
7798 isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){
7799 SDValue V = N0.getOperand(0);
7800 SDLoc DL0(N0);
7801 V = DAG.getNode(ISD::XOR, DL0, V.getValueType(), V,
7802 DAG.getConstant(1, DL0, V.getValueType()));
7803 AddToWorklist(V.getNode());
7804 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, V);
7805 }
7806
7807 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc
7808 if (isOneConstant(N1) && VT == MVT::i1 && N0.hasOneUse() &&
7809 (N0Opcode == ISD::OR || N0Opcode == ISD::AND)) {
7810 SDValue N00 = N0.getOperand(0), N01 = N0.getOperand(1);
7811 if (isOneUseSetCC(N01) || isOneUseSetCC(N00)) {
7812 unsigned NewOpcode = N0Opcode == ISD::AND ? ISD::OR : ISD::AND;
7813 N00 = DAG.getNode(ISD::XOR, SDLoc(N00), VT, N00, N1); // N00 = ~N00
7814 N01 = DAG.getNode(ISD::XOR, SDLoc(N01), VT, N01, N1); // N01 = ~N01
7815 AddToWorklist(N00.getNode()); AddToWorklist(N01.getNode());
7816 return DAG.getNode(NewOpcode, DL, VT, N00, N01);
7817 }
7818 }
7819 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants
7820 if (isAllOnesConstant(N1) && N0.hasOneUse() &&
7821 (N0Opcode == ISD::OR || N0Opcode == ISD::AND)) {
7822 SDValue N00 = N0.getOperand(0), N01 = N0.getOperand(1);
7823 if (isa<ConstantSDNode>(N01) || isa<ConstantSDNode>(N00)) {
7824 unsigned NewOpcode = N0Opcode == ISD::AND ? ISD::OR : ISD::AND;
7825 N00 = DAG.getNode(ISD::XOR, SDLoc(N00), VT, N00, N1); // N00 = ~N00
7826 N01 = DAG.getNode(ISD::XOR, SDLoc(N01), VT, N01, N1); // N01 = ~N01
7827 AddToWorklist(N00.getNode()); AddToWorklist(N01.getNode());
7828 return DAG.getNode(NewOpcode, DL, VT, N00, N01);
7829 }
7830 }
7831
7832 // fold (not (neg x)) -> (add X, -1)
7833 // FIXME: This can be generalized to (not (sub Y, X)) -> (add X, ~Y) if
7834 // Y is a constant or the subtract has a single use.
7835 if (isAllOnesConstant(N1) && N0.getOpcode() == ISD::SUB &&
7836 isNullConstant(N0.getOperand(0))) {
7837 return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(1),
7838 DAG.getAllOnesConstant(DL, VT));
7839 }
7840
7841 // fold (not (add X, -1)) -> (neg X)
7842 if (isAllOnesConstant(N1) && N0.getOpcode() == ISD::ADD &&
7843 isAllOnesOrAllOnesSplat(N0.getOperand(1))) {
7844 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
7845 N0.getOperand(0));
7846 }
7847
7848 // fold (xor (and x, y), y) -> (and (not x), y)
7849 if (N0Opcode == ISD::AND && N0.hasOneUse() && N0->getOperand(1) == N1) {
7850 SDValue X = N0.getOperand(0);
7851 SDValue NotX = DAG.getNOT(SDLoc(X), X, VT);
7852 AddToWorklist(NotX.getNode());
7853 return DAG.getNode(ISD::AND, DL, VT, NotX, N1);
7854 }
7855
7856 if ((N0Opcode == ISD::SRL || N0Opcode == ISD::SHL) && N0.hasOneUse()) {
7857 ConstantSDNode *XorC = isConstOrConstSplat(N1);
7858 ConstantSDNode *ShiftC = isConstOrConstSplat(N0.getOperand(1));
7859 unsigned BitWidth = VT.getScalarSizeInBits();
7860 if (XorC && ShiftC) {
7861 // Don't crash on an oversized shift. We can not guarantee that a bogus
7862 // shift has been simplified to undef.
7863 uint64_t ShiftAmt = ShiftC->getLimitedValue();
7864 if (ShiftAmt < BitWidth) {
7865 APInt Ones = APInt::getAllOnesValue(BitWidth);
7866 Ones = N0Opcode == ISD::SHL ? Ones.shl(ShiftAmt) : Ones.lshr(ShiftAmt);
7867 if (XorC->getAPIntValue() == Ones) {
7868 // If the xor constant is a shifted -1, do a 'not' before the shift:
7869 // xor (X << ShiftC), XorC --> (not X) << ShiftC
7870 // xor (X >> ShiftC), XorC --> (not X) >> ShiftC
7871 SDValue Not = DAG.getNOT(DL, N0.getOperand(0), VT);
7872 return DAG.getNode(N0Opcode, DL, VT, Not, N0.getOperand(1));
7873 }
7874 }
7875 }
7876 }
7877
7878 // fold Y = sra (X, size(X)-1); xor (add (X, Y), Y) -> (abs X)
7879 if (TLI.isOperationLegalOrCustom(ISD::ABS, VT)) {
7880 SDValue A = N0Opcode == ISD::ADD ? N0 : N1;
7881 SDValue S = N0Opcode == ISD::SRA ? N0 : N1;
7882 if (A.getOpcode() == ISD::ADD && S.getOpcode() == ISD::SRA) {
7883 SDValue A0 = A.getOperand(0), A1 = A.getOperand(1);
7884 SDValue S0 = S.getOperand(0);
7885 if ((A0 == S && A1 == S0) || (A1 == S && A0 == S0))
7886 if (ConstantSDNode *C = isConstOrConstSplat(S.getOperand(1)))
7887 if (C->getAPIntValue() == (VT.getScalarSizeInBits() - 1))
7888 return DAG.getNode(ISD::ABS, DL, VT, S0);
7889 }
7890 }
7891
7892 // fold (xor x, x) -> 0
7893 if (N0 == N1)
7894 return tryFoldToZero(DL, TLI, VT, DAG, LegalOperations);
7895
7896 // fold (xor (shl 1, x), -1) -> (rotl ~1, x)
7897 // Here is a concrete example of this equivalence:
7898 // i16 x == 14
7899 // i16 shl == 1 << 14 == 16384 == 0b0100000000000000
7900 // i16 xor == ~(1 << 14) == 49151 == 0b1011111111111111
7901 //
7902 // =>
7903 //
7904 // i16 ~1 == 0b1111111111111110
7905 // i16 rol(~1, 14) == 0b1011111111111111
7906 //
7907 // Some additional tips to help conceptualize this transform:
7908 // - Try to see the operation as placing a single zero in a value of all ones.
7909 // - There exists no value for x which would allow the result to contain zero.
7910 // - Values of x larger than the bitwidth are undefined and do not require a
7911 // consistent result.
7912 // - Pushing the zero left requires shifting one bits in from the right.
7913 // A rotate left of ~1 is a nice way of achieving the desired result.
7914 if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT) && N0Opcode == ISD::SHL &&
7915 isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0))) {
7916 return DAG.getNode(ISD::ROTL, DL, VT, DAG.getConstant(~1, DL, VT),
7917 N0.getOperand(1));
7918 }
7919
7920 // Simplify: xor (op x...), (op y...) -> (op (xor x, y))
7921 if (N0Opcode == N1.getOpcode())
7922 if (SDValue V = hoistLogicOpWithSameOpcodeHands(N))
7923 return V;
7924
7925 // Unfold ((x ^ y) & m) ^ y into (x & m) | (y & ~m) if profitable
7926 if (SDValue MM = unfoldMaskedMerge(N))
7927 return MM;
7928
7929 // Simplify the expression using non-local knowledge.
7930 if (SimplifyDemandedBits(SDValue(N, 0)))
7931 return SDValue(N, 0);
7932
7933 if (SDValue Combined = combineCarryDiamond(*this, DAG, TLI, N0, N1, N))
7934 return Combined;
7935
7936 return SDValue();
7937}
7938
7939/// If we have a shift-by-constant of a bitwise logic op that itself has a
7940/// shift-by-constant operand with identical opcode, we may be able to convert
7941/// that into 2 independent shifts followed by the logic op. This is a
7942/// throughput improvement.
7943static SDValue combineShiftOfShiftedLogic(SDNode *Shift, SelectionDAG &DAG) {
7944 // Match a one-use bitwise logic op.
7945 SDValue LogicOp = Shift->getOperand(0);
7946 if (!LogicOp.hasOneUse())
7947 return SDValue();
7948
7949 unsigned LogicOpcode = LogicOp.getOpcode();
7950 if (LogicOpcode != ISD::AND && LogicOpcode != ISD::OR &&
7951 LogicOpcode != ISD::XOR)
7952 return SDValue();
7953
7954 // Find a matching one-use shift by constant.
7955 unsigned ShiftOpcode = Shift->getOpcode();
7956 SDValue C1 = Shift->getOperand(1);
7957 ConstantSDNode *C1Node = isConstOrConstSplat(C1);
7958 assert(C1Node && "Expected a shift with constant operand")(static_cast <bool> (C1Node && "Expected a shift with constant operand"
) ? void (0) : __assert_fail ("C1Node && \"Expected a shift with constant operand\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 7958, __extension__ __PRETTY_FUNCTION__))
;
7959 const APInt &C1Val = C1Node->getAPIntValue();
7960 auto matchFirstShift = [&](SDValue V, SDValue &ShiftOp,
7961 const APInt *&ShiftAmtVal) {
7962 if (V.getOpcode() != ShiftOpcode || !V.hasOneUse())
7963 return false;
7964
7965 ConstantSDNode *ShiftCNode = isConstOrConstSplat(V.getOperand(1));
7966 if (!ShiftCNode)
7967 return false;
7968
7969 // Capture the shifted operand and shift amount value.
7970 ShiftOp = V.getOperand(0);
7971 ShiftAmtVal = &ShiftCNode->getAPIntValue();
7972
7973 // Shift amount types do not have to match their operand type, so check that
7974 // the constants are the same width.
7975 if (ShiftAmtVal->getBitWidth() != C1Val.getBitWidth())
7976 return false;
7977
7978 // The fold is not valid if the sum of the shift values exceeds bitwidth.
7979 if ((*ShiftAmtVal + C1Val).uge(V.getScalarValueSizeInBits()))
7980 return false;
7981
7982 return true;
7983 };
7984
7985 // Logic ops are commutative, so check each operand for a match.
7986 SDValue X, Y;
7987 const APInt *C0Val;
7988 if (matchFirstShift(LogicOp.getOperand(0), X, C0Val))
7989 Y = LogicOp.getOperand(1);
7990 else if (matchFirstShift(LogicOp.getOperand(1), X, C0Val))
7991 Y = LogicOp.getOperand(0);
7992 else
7993 return SDValue();
7994
7995 // shift (logic (shift X, C0), Y), C1 -> logic (shift X, C0+C1), (shift Y, C1)
7996 SDLoc DL(Shift);
7997 EVT VT = Shift->getValueType(0);
7998 EVT ShiftAmtVT = Shift->getOperand(1).getValueType();
7999 SDValue ShiftSumC = DAG.getConstant(*C0Val + C1Val, DL, ShiftAmtVT);
8000 SDValue NewShift1 = DAG.getNode(ShiftOpcode, DL, VT, X, ShiftSumC);
8001 SDValue NewShift2 = DAG.getNode(ShiftOpcode, DL, VT, Y, C1);
8002 return DAG.getNode(LogicOpcode, DL, VT, NewShift1, NewShift2);
8003}
8004
8005/// Handle transforms common to the three shifts, when the shift amount is a
8006/// constant.
8007/// We are looking for: (shift being one of shl/sra/srl)
8008/// shift (binop X, C0), C1
8009/// And want to transform into:
8010/// binop (shift X, C1), (shift C0, C1)
8011SDValue DAGCombiner::visitShiftByConstant(SDNode *N) {
8012 assert(isConstOrConstSplat(N->getOperand(1)) && "Expected constant operand")(static_cast <bool> (isConstOrConstSplat(N->getOperand
(1)) && "Expected constant operand") ? void (0) : __assert_fail
("isConstOrConstSplat(N->getOperand(1)) && \"Expected constant operand\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8012, __extension__ __PRETTY_FUNCTION__))
;
8013
8014 // Do not turn a 'not' into a regular xor.
8015 if (isBitwiseNot(N->getOperand(0)))
8016 return SDValue();
8017
8018 // The inner binop must be one-use, since we want to replace it.
8019 SDValue LHS = N->getOperand(0);
8020 if (!LHS.hasOneUse() || !TLI.isDesirableToCommuteWithShift(N, Level))
8021 return SDValue();
8022
8023 // TODO: This is limited to early combining because it may reveal regressions
8024 // otherwise. But since we just checked a target hook to see if this is
8025 // desirable, that should have filtered out cases where this interferes
8026 // with some other pattern matching.
8027 if (!LegalTypes)
8028 if (SDValue R = combineShiftOfShiftedLogic(N, DAG))
8029 return R;
8030
8031 // We want to pull some binops through shifts, so that we have (and (shift))
8032 // instead of (shift (and)), likewise for add, or, xor, etc. This sort of
8033 // thing happens with address calculations, so it's important to canonicalize
8034 // it.
8035 switch (LHS.getOpcode()) {
8036 default:
8037 return SDValue();
8038 case ISD::OR:
8039 case ISD::XOR:
8040 case ISD::AND:
8041 break;
8042 case ISD::ADD:
8043 if (N->getOpcode() != ISD::SHL)
8044 return SDValue(); // only shl(add) not sr[al](add).
8045 break;
8046 }
8047
8048 // We require the RHS of the binop to be a constant and not opaque as well.
8049 ConstantSDNode *BinOpCst = getAsNonOpaqueConstant(LHS.getOperand(1));
8050 if (!BinOpCst)
8051 return SDValue();
8052
8053 // FIXME: disable this unless the input to the binop is a shift by a constant
8054 // or is copy/select. Enable this in other cases when figure out it's exactly
8055 // profitable.
8056 SDValue BinOpLHSVal = LHS.getOperand(0);
8057 bool IsShiftByConstant = (BinOpLHSVal.getOpcode() == ISD::SHL ||
8058 BinOpLHSVal.getOpcode() == ISD::SRA ||
8059 BinOpLHSVal.getOpcode() == ISD::SRL) &&
8060 isa<ConstantSDNode>(BinOpLHSVal.getOperand(1));
8061 bool IsCopyOrSelect = BinOpLHSVal.getOpcode() == ISD::CopyFromReg ||
8062 BinOpLHSVal.getOpcode() == ISD::SELECT;
8063
8064 if (!IsShiftByConstant && !IsCopyOrSelect)
8065 return SDValue();
8066
8067 if (IsCopyOrSelect && N->hasOneUse())
8068 return SDValue();
8069
8070 // Fold the constants, shifting the binop RHS by the shift amount.
8071 SDLoc DL(N);
8072 EVT VT = N->getValueType(0);
8073 SDValue NewRHS = DAG.getNode(N->getOpcode(), DL, VT, LHS.getOperand(1),
8074 N->getOperand(1));
8075 assert(isa<ConstantSDNode>(NewRHS) && "Folding was not successful!")(static_cast <bool> (isa<ConstantSDNode>(NewRHS) &&
"Folding was not successful!") ? void (0) : __assert_fail ("isa<ConstantSDNode>(NewRHS) && \"Folding was not successful!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8075, __extension__ __PRETTY_FUNCTION__))
;
8076
8077 SDValue NewShift = DAG.getNode(N->getOpcode(), DL, VT, LHS.getOperand(0),
8078 N->getOperand(1));
8079 return DAG.getNode(LHS.getOpcode(), DL, VT, NewShift, NewRHS);
8080}
8081
8082SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) {
8083 assert(N->getOpcode() == ISD::TRUNCATE)(static_cast <bool> (N->getOpcode() == ISD::TRUNCATE
) ? void (0) : __assert_fail ("N->getOpcode() == ISD::TRUNCATE"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8083, __extension__ __PRETTY_FUNCTION__))
;
8084 assert(N->getOperand(0).getOpcode() == ISD::AND)(static_cast <bool> (N->getOperand(0).getOpcode() ==
ISD::AND) ? void (0) : __assert_fail ("N->getOperand(0).getOpcode() == ISD::AND"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8084, __extension__ __PRETTY_FUNCTION__))
;
8085
8086 // (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC)
8087 EVT TruncVT = N->getValueType(0);
8088 if (N->hasOneUse() && N->getOperand(0).hasOneUse() &&
8089 TLI.isTypeDesirableForOp(ISD::AND, TruncVT)) {
8090 SDValue N01 = N->getOperand(0).getOperand(1);
8091 if (isConstantOrConstantVector(N01, /* NoOpaques */ true)) {
8092 SDLoc DL(N);
8093 SDValue N00 = N->getOperand(0).getOperand(0);
8094 SDValue Trunc00 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00);
8095 SDValue Trunc01 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N01);
8096 AddToWorklist(Trunc00.getNode());
8097 AddToWorklist(Trunc01.getNode());
8098 return DAG.getNode(ISD::AND, DL, TruncVT, Trunc00, Trunc01);
8099 }
8100 }
8101
8102 return SDValue();
8103}
8104
8105SDValue DAGCombiner::visitRotate(SDNode *N) {
8106 SDLoc dl(N);
8107 SDValue N0 = N->getOperand(0);
8108 SDValue N1 = N->getOperand(1);
8109 EVT VT = N->getValueType(0);
8110 unsigned Bitsize = VT.getScalarSizeInBits();
8111
8112 // fold (rot x, 0) -> x
8113 if (isNullOrNullSplat(N1))
8114 return N0;
8115
8116 // fold (rot x, c) -> x iff (c % BitSize) == 0
8117 if (isPowerOf2_32(Bitsize) && Bitsize > 1) {
8118 APInt ModuloMask(N1.getScalarValueSizeInBits(), Bitsize - 1);
8119 if (DAG.MaskedValueIsZero(N1, ModuloMask))
8120 return N0;
8121 }
8122
8123 // fold (rot x, c) -> (rot x, c % BitSize)
8124 bool OutOfRange = false;
8125 auto MatchOutOfRange = [Bitsize, &OutOfRange](ConstantSDNode *C) {
8126 OutOfRange |= C->getAPIntValue().uge(Bitsize);
8127 return true;
8128 };
8129 if (ISD::matchUnaryPredicate(N1, MatchOutOfRange) && OutOfRange) {
8130 EVT AmtVT = N1.getValueType();
8131 SDValue Bits = DAG.getConstant(Bitsize, dl, AmtVT);
8132 if (SDValue Amt =
8133 DAG.FoldConstantArithmetic(ISD::UREM, dl, AmtVT, {N1, Bits}))
8134 return DAG.getNode(N->getOpcode(), dl, VT, N0, Amt);
8135 }
8136
8137 // rot i16 X, 8 --> bswap X
8138 auto *RotAmtC = isConstOrConstSplat(N1);
8139 if (RotAmtC && RotAmtC->getAPIntValue() == 8 &&
8140 VT.getScalarSizeInBits() == 16 && hasOperation(ISD::BSWAP, VT))
8141 return DAG.getNode(ISD::BSWAP, dl, VT, N0);
8142
8143 // Simplify the operands using demanded-bits information.
8144 if (SimplifyDemandedBits(SDValue(N, 0)))
8145 return SDValue(N, 0);
8146
8147 // fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))).
8148 if (N1.getOpcode() == ISD::TRUNCATE &&
8149 N1.getOperand(0).getOpcode() == ISD::AND) {
8150 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
8151 return DAG.getNode(N->getOpcode(), dl, VT, N0, NewOp1);
8152 }
8153
8154 unsigned NextOp = N0.getOpcode();
8155 // fold (rot* (rot* x, c2), c1) -> (rot* x, c1 +- c2 % bitsize)
8156 if (NextOp == ISD::ROTL || NextOp == ISD::ROTR) {
8157 SDNode *C1 = DAG.isConstantIntBuildVectorOrConstantInt(N1);
8158 SDNode *C2 = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1));
8159 if (C1 && C2 && C1->getValueType(0) == C2->getValueType(0)) {
8160 EVT ShiftVT = C1->getValueType(0);
8161 bool SameSide = (N->getOpcode() == NextOp);
8162 unsigned CombineOp = SameSide ? ISD::ADD : ISD::SUB;
8163 if (SDValue CombinedShift = DAG.FoldConstantArithmetic(
8164 CombineOp, dl, ShiftVT, {N1, N0.getOperand(1)})) {
8165 SDValue BitsizeC = DAG.getConstant(Bitsize, dl, ShiftVT);
8166 SDValue CombinedShiftNorm = DAG.FoldConstantArithmetic(
8167 ISD::SREM, dl, ShiftVT, {CombinedShift, BitsizeC});
8168 return DAG.getNode(N->getOpcode(), dl, VT, N0->getOperand(0),
8169 CombinedShiftNorm);
8170 }
8171 }
8172 }
8173 return SDValue();
8174}
8175
8176SDValue DAGCombiner::visitSHL(SDNode *N) {
8177 SDValue N0 = N->getOperand(0);
8178 SDValue N1 = N->getOperand(1);
8179 if (SDValue V = DAG.simplifyShift(N0, N1))
8180 return V;
8181
8182 EVT VT = N0.getValueType();
8183 EVT ShiftVT = N1.getValueType();
8184 unsigned OpSizeInBits = VT.getScalarSizeInBits();
8185
8186 // fold vector ops
8187 if (VT.isVector()) {
8188 if (SDValue FoldedVOp = SimplifyVBinOp(N))
8189 return FoldedVOp;
8190
8191 BuildVectorSDNode *N1CV = dyn_cast<BuildVectorSDNode>(N1);
8192 // If setcc produces all-one true value then:
8193 // (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<<N1CV)
8194 if (N1CV && N1CV->isConstant()) {
8195 if (N0.getOpcode() == ISD::AND) {
8196 SDValue N00 = N0->getOperand(0);
8197 SDValue N01 = N0->getOperand(1);
8198 BuildVectorSDNode *N01CV = dyn_cast<BuildVectorSDNode>(N01);
8199
8200 if (N01CV && N01CV->isConstant() && N00.getOpcode() == ISD::SETCC &&
8201 TLI.getBooleanContents(N00.getOperand(0).getValueType()) ==
8202 TargetLowering::ZeroOrNegativeOneBooleanContent) {
8203 if (SDValue C =
8204 DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, {N01, N1}))
8205 return DAG.getNode(ISD::AND, SDLoc(N), VT, N00, C);
8206 }
8207 }
8208 }
8209 }
8210
8211 ConstantSDNode *N1C = isConstOrConstSplat(N1);
8212
8213 // fold (shl c1, c2) -> c1<<c2
8214 if (SDValue C = DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, {N0, N1}))
8215 return C;
8216
8217 if (SDValue NewSel = foldBinOpIntoSelect(N))
8218 return NewSel;
8219
8220 // if (shl x, c) is known to be zero, return 0
8221 if (DAG.MaskedValueIsZero(SDValue(N, 0),
8222 APInt::getAllOnesValue(OpSizeInBits)))
8223 return DAG.getConstant(0, SDLoc(N), VT);
8224
8225 // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
8226 if (N1.getOpcode() == ISD::TRUNCATE &&
8227 N1.getOperand(0).getOpcode() == ISD::AND) {
8228 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
8229 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1);
8230 }
8231
8232 if (SimplifyDemandedBits(SDValue(N, 0)))
8233 return SDValue(N, 0);
8234
8235 // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2))
8236 if (N0.getOpcode() == ISD::SHL) {
8237 auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS,
8238 ConstantSDNode *RHS) {
8239 APInt c1 = LHS->getAPIntValue();
8240 APInt c2 = RHS->getAPIntValue();
8241 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
8242 return (c1 + c2).uge(OpSizeInBits);
8243 };
8244 if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange))
8245 return DAG.getConstant(0, SDLoc(N), VT);
8246
8247 auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS,
8248 ConstantSDNode *RHS) {
8249 APInt c1 = LHS->getAPIntValue();
8250 APInt c2 = RHS->getAPIntValue();
8251 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
8252 return (c1 + c2).ult(OpSizeInBits);
8253 };
8254 if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) {
8255 SDLoc DL(N);
8256 SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1));
8257 return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), Sum);
8258 }
8259 }
8260
8261 // fold (shl (ext (shl x, c1)), c2) -> (shl (ext x), (add c1, c2))
8262 // For this to be valid, the second form must not preserve any of the bits
8263 // that are shifted out by the inner shift in the first form. This means
8264 // the outer shift size must be >= the number of bits added by the ext.
8265 // As a corollary, we don't care what kind of ext it is.
8266 if ((N0.getOpcode() == ISD::ZERO_EXTEND ||
8267 N0.getOpcode() == ISD::ANY_EXTEND ||
8268 N0.getOpcode() == ISD::SIGN_EXTEND) &&
8269 N0.getOperand(0).getOpcode() == ISD::SHL) {
8270 SDValue N0Op0 = N0.getOperand(0);
8271 SDValue InnerShiftAmt = N0Op0.getOperand(1);
8272 EVT InnerVT = N0Op0.getValueType();
8273 uint64_t InnerBitwidth = InnerVT.getScalarSizeInBits();
8274
8275 auto MatchOutOfRange = [OpSizeInBits, InnerBitwidth](ConstantSDNode *LHS,
8276 ConstantSDNode *RHS) {
8277 APInt c1 = LHS->getAPIntValue();
8278 APInt c2 = RHS->getAPIntValue();
8279 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
8280 return c2.uge(OpSizeInBits - InnerBitwidth) &&
8281 (c1 + c2).uge(OpSizeInBits);
8282 };
8283 if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchOutOfRange,
8284 /*AllowUndefs*/ false,
8285 /*AllowTypeMismatch*/ true))
8286 return DAG.getConstant(0, SDLoc(N), VT);
8287
8288 auto MatchInRange = [OpSizeInBits, InnerBitwidth](ConstantSDNode *LHS,
8289 ConstantSDNode *RHS) {
8290 APInt c1 = LHS->getAPIntValue();
8291 APInt c2 = RHS->getAPIntValue();
8292 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
8293 return c2.uge(OpSizeInBits - InnerBitwidth) &&
8294 (c1 + c2).ult(OpSizeInBits);
8295 };
8296 if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchInRange,
8297 /*AllowUndefs*/ false,
8298 /*AllowTypeMismatch*/ true)) {
8299 SDLoc DL(N);
8300 SDValue Ext = DAG.getNode(N0.getOpcode(), DL, VT, N0Op0.getOperand(0));
8301 SDValue Sum = DAG.getZExtOrTrunc(InnerShiftAmt, DL, ShiftVT);
8302 Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, Sum, N1);
8303 return DAG.getNode(ISD::SHL, DL, VT, Ext, Sum);
8304 }
8305 }
8306
8307 // fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
8308 // Only fold this if the inner zext has no other uses to avoid increasing
8309 // the total number of instructions.
8310 if (N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() &&
8311 N0.getOperand(0).getOpcode() == ISD::SRL) {
8312 SDValue N0Op0 = N0.getOperand(0);
8313 SDValue InnerShiftAmt = N0Op0.getOperand(1);
8314
8315 auto MatchEqual = [VT](ConstantSDNode *LHS, ConstantSDNode *RHS) {
8316 APInt c1 = LHS->getAPIntValue();
8317 APInt c2 = RHS->getAPIntValue();
8318 zeroExtendToMatch(c1, c2);
8319 return c1.ult(VT.getScalarSizeInBits()) && (c1 == c2);
8320 };
8321 if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchEqual,
8322 /*AllowUndefs*/ false,
8323 /*AllowTypeMismatch*/ true)) {
8324 SDLoc DL(N);
8325 EVT InnerShiftAmtVT = N0Op0.getOperand(1).getValueType();
8326 SDValue NewSHL = DAG.getZExtOrTrunc(N1, DL, InnerShiftAmtVT);
8327 NewSHL = DAG.getNode(ISD::SHL, DL, N0Op0.getValueType(), N0Op0, NewSHL);
8328 AddToWorklist(NewSHL.getNode());
8329 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N0), VT, NewSHL);
8330 }
8331 }
8332
8333 // fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
8334 // fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2
8335 // TODO - support non-uniform vector shift amounts.
8336 if (N1C && (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA) &&
8337 N0->getFlags().hasExact()) {
8338 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
8339 uint64_t C1 = N0C1->getZExtValue();
8340 uint64_t C2 = N1C->getZExtValue();
8341 SDLoc DL(N);
8342 if (C1 <= C2)
8343 return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
8344 DAG.getConstant(C2 - C1, DL, ShiftVT));
8345 return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0),
8346 DAG.getConstant(C1 - C2, DL, ShiftVT));
8347 }
8348 }
8349
8350 // fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or
8351 // (and (srl x, (sub c1, c2), MASK)
8352 // Only fold this if the inner shift has no other uses -- if it does, folding
8353 // this will increase the total number of instructions.
8354 // TODO - drop hasOneUse requirement if c1 == c2?
8355 // TODO - support non-uniform vector shift amounts.
8356 if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse() &&
8357 TLI.shouldFoldConstantShiftPairToMask(N, Level)) {
8358 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
8359 if (N0C1->getAPIntValue().ult(OpSizeInBits)) {
8360 uint64_t c1 = N0C1->getZExtValue();
8361 uint64_t c2 = N1C->getZExtValue();
8362 APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1);
8363 SDValue Shift;
8364 if (c2 > c1) {
8365 Mask <<= c2 - c1;
8366 SDLoc DL(N);
8367 Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
8368 DAG.getConstant(c2 - c1, DL, ShiftVT));
8369 } else {
8370 Mask.lshrInPlace(c1 - c2);
8371 SDLoc DL(N);
8372 Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0),
8373 DAG.getConstant(c1 - c2, DL, ShiftVT));
8374 }
8375 SDLoc DL(N0);
8376 return DAG.getNode(ISD::AND, DL, VT, Shift,
8377 DAG.getConstant(Mask, DL, VT));
8378 }
8379 }
8380 }
8381
8382 // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
8383 if (N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1) &&
8384 isConstantOrConstantVector(N1, /* No Opaques */ true)) {
8385 SDLoc DL(N);
8386 SDValue AllBits = DAG.getAllOnesConstant(DL, VT);
8387 SDValue HiBitsMask = DAG.getNode(ISD::SHL, DL, VT, AllBits, N1);
8388 return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), HiBitsMask);
8389 }
8390
8391 // fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
8392 // fold (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
8393 // Variant of version done on multiply, except mul by a power of 2 is turned
8394 // into a shift.
8395 if ((N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR) &&
8396 N0.getNode()->hasOneUse() &&
8397 isConstantOrConstantVector(N1, /* No Opaques */ true) &&
8398 isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true) &&
8399 TLI.isDesirableToCommuteWithShift(N, Level)) {
8400 SDValue Shl0 = DAG.getNode(ISD::SHL, SDLoc(N0), VT, N0.getOperand(0), N1);
8401 SDValue Shl1 = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1);
8402 AddToWorklist(Shl0.getNode());
8403 AddToWorklist(Shl1.getNode());
8404 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, Shl0, Shl1);
8405 }
8406
8407 // fold (shl (mul x, c1), c2) -> (mul x, c1 << c2)
8408 if (N0.getOpcode() == ISD::MUL && N0.getNode()->hasOneUse() &&
8409 isConstantOrConstantVector(N1, /* No Opaques */ true) &&
8410 isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true)) {
8411 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1);
8412 if (isConstantOrConstantVector(Shl))
8413 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), Shl);
8414 }
8415
8416 if (N1C && !N1C->isOpaque())
8417 if (SDValue NewSHL = visitShiftByConstant(N))
8418 return NewSHL;
8419
8420 // Fold (shl (vscale * C0), C1) to (vscale * (C0 << C1)).
8421 if (N0.getOpcode() == ISD::VSCALE)
8422 if (ConstantSDNode *NC1 = isConstOrConstSplat(N->getOperand(1))) {
8423 const APInt &C0 = N0.getConstantOperandAPInt(0);
8424 const APInt &C1 = NC1->getAPIntValue();
8425 return DAG.getVScale(SDLoc(N), VT, C0 << C1);
8426 }
8427
8428 // Fold (shl step_vector(C0), C1) to (step_vector(C0 << C1)).
8429 APInt ShlVal;
8430 if (N0.getOpcode() == ISD::STEP_VECTOR)
8431 if (ISD::isConstantSplatVector(N1.getNode(), ShlVal)) {
8432 const APInt &C0 = N0.getConstantOperandAPInt(0);
8433 EVT SVT = N0.getOperand(0).getValueType();
8434 SDValue NewStep = DAG.getConstant(
8435 C0 << ShlVal.sextOrTrunc(SVT.getSizeInBits()), SDLoc(N), SVT);
8436 return DAG.getStepVector(SDLoc(N), VT, NewStep);
8437 }
8438
8439 return SDValue();
8440}
8441
8442// Transform a right shift of a multiply into a multiply-high.
8443// Examples:
8444// (srl (mul (zext i32:$a to i64), (zext i32:$a to i64)), 32) -> (mulhu $a, $b)
8445// (sra (mul (sext i32:$a to i64), (sext i32:$a to i64)), 32) -> (mulhs $a, $b)
8446static SDValue combineShiftToMULH(SDNode *N, SelectionDAG &DAG,
8447 const TargetLowering &TLI) {
8448 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&(static_cast <bool> ((N->getOpcode() == ISD::SRL || N
->getOpcode() == ISD::SRA) && "SRL or SRA node is required here!"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && \"SRL or SRA node is required here!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8449, __extension__ __PRETTY_FUNCTION__))
8449 "SRL or SRA node is required here!")(static_cast <bool> ((N->getOpcode() == ISD::SRL || N
->getOpcode() == ISD::SRA) && "SRL or SRA node is required here!"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && \"SRL or SRA node is required here!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8449, __extension__ __PRETTY_FUNCTION__))
;
8450
8451 // Check the shift amount. Proceed with the transformation if the shift
8452 // amount is constant.
8453 ConstantSDNode *ShiftAmtSrc = isConstOrConstSplat(N->getOperand(1));
8454 if (!ShiftAmtSrc)
8455 return SDValue();
8456
8457 SDLoc DL(N);
8458
8459 // The operation feeding into the shift must be a multiply.
8460 SDValue ShiftOperand = N->getOperand(0);
8461 if (ShiftOperand.getOpcode() != ISD::MUL)
8462 return SDValue();
8463
8464 // Both operands must be equivalent extend nodes.
8465 SDValue LeftOp = ShiftOperand.getOperand(0);
8466 SDValue RightOp = ShiftOperand.getOperand(1);
8467 bool IsSignExt = LeftOp.getOpcode() == ISD::SIGN_EXTEND;
8468 bool IsZeroExt = LeftOp.getOpcode() == ISD::ZERO_EXTEND;
8469
8470 if ((!(IsSignExt || IsZeroExt)) || LeftOp.getOpcode() != RightOp.getOpcode())
8471 return SDValue();
8472
8473 EVT WideVT1 = LeftOp.getValueType();
8474 EVT WideVT2 = RightOp.getValueType();
8475 (void)WideVT2;
8476 // Proceed with the transformation if the wide types match.
8477 assert((WideVT1 == WideVT2) &&(static_cast <bool> ((WideVT1 == WideVT2) && "Cannot have a multiply node with two different operand types."
) ? void (0) : __assert_fail ("(WideVT1 == WideVT2) && \"Cannot have a multiply node with two different operand types.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8478, __extension__ __PRETTY_FUNCTION__))
8478 "Cannot have a multiply node with two different operand types.")(static_cast <bool> ((WideVT1 == WideVT2) && "Cannot have a multiply node with two different operand types."
) ? void (0) : __assert_fail ("(WideVT1 == WideVT2) && \"Cannot have a multiply node with two different operand types.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 8478, __extension__ __PRETTY_FUNCTION__))
;
8479
8480 EVT NarrowVT = LeftOp.getOperand(0).getValueType();
8481 // Check that the two extend nodes are the same type.
8482 if (NarrowVT != RightOp.getOperand(0).getValueType())
8483 return SDValue();
8484
8485 // Proceed with the transformation if the wide type is twice as large
8486 // as the narrow type.
8487 unsigned NarrowVTSize = NarrowVT.getScalarSizeInBits();
8488 if (WideVT1.getScalarSizeInBits() != 2 * NarrowVTSize)
8489 return SDValue();
8490
8491 // Check the shift amount with the narrow type size.
8492 // Proceed with the transformation if the shift amount is the width
8493 // of the narrow type.
8494 unsigned ShiftAmt = ShiftAmtSrc->getZExtValue();
8495 if (ShiftAmt != NarrowVTSize)
8496 return SDValue();
8497
8498 // If the operation feeding into the MUL is a sign extend (sext),
8499 // we use mulhs. Othewise, zero extends (zext) use mulhu.
8500 unsigned MulhOpcode = IsSignExt ? ISD::MULHS : ISD::MULHU;
8501
8502 // Combine to mulh if mulh is legal/custom for the narrow type on the target.
8503 if (!TLI.isOperationLegalOrCustom(MulhOpcode, NarrowVT))
8504 return SDValue();
8505
8506 SDValue Result = DAG.getNode(MulhOpcode, DL, NarrowVT, LeftOp.getOperand(0),
8507 RightOp.getOperand(0));
8508 return (N->getOpcode() == ISD::SRA ? DAG.getSExtOrTrunc(Result, DL, WideVT1)
8509 : DAG.getZExtOrTrunc(Result, DL, WideVT1));
8510}
8511
8512SDValue DAGCombiner::visitSRA(SDNode *N) {
8513 SDValue N0 = N->getOperand(0);
8514 SDValue N1 = N->getOperand(1);
8515 if (SDValue V = DAG.simplifyShift(N0, N1))
8516 return V;
8517
8518 EVT VT = N0.getValueType();
8519 unsigned OpSizeInBits = VT.getScalarSizeInBits();
8520
8521 // Arithmetic shifting an all-sign-bit value is a no-op.
8522 // fold (sra 0, x) -> 0
8523 // fold (sra -1, x) -> -1
8524 if (DAG.ComputeNumSignBits(N0) == OpSizeInBits)
8525 return N0;
8526
8527 // fold vector ops
8528 if (VT.isVector())
8529 if (SDValue FoldedVOp = SimplifyVBinOp(N))
8530 return FoldedVOp;
8531
8532 ConstantSDNode *N1C = isConstOrConstSplat(N1);
8533
8534 // fold (sra c1, c2) -> (sra c1, c2)
8535 if (SDValue C = DAG.FoldConstantArithmetic(ISD::SRA, SDLoc(N), VT, {N0, N1}))
8536 return C;
8537
8538 if (SDValue NewSel = foldBinOpIntoSelect(N))
8539 return NewSel;
8540
8541 // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports
8542 // sext_inreg.
8543 if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) {
8544 unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue();
8545 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits);
8546 if (VT.isVector())
8547 ExtVT = EVT::getVectorVT(*DAG.getContext(), ExtVT,
8548 VT.getVectorElementCount());
8549 if (!LegalOperations ||
8550 TLI.getOperationAction(ISD::SIGN_EXTEND_INREG, ExtVT) ==
8551 TargetLowering::Legal)
8552 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
8553 N0.getOperand(0), DAG.getValueType(ExtVT));
8554 // Even if we can't convert to sext_inreg, we might be able to remove
8555 // this shift pair if the input is already sign extended.
8556 if (DAG.ComputeNumSignBits(N0.getOperand(0)) > N1C->getZExtValue())
8557 return N0.getOperand(0);
8558 }
8559
8560 // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
8561 // clamp (add c1, c2) to max shift.
8562 if (N0.getOpcode() == ISD::SRA) {
8563 SDLoc DL(N);
8564 EVT ShiftVT = N1.getValueType();
8565 EVT ShiftSVT = ShiftVT.getScalarType();
8566 SmallVector<SDValue, 16> ShiftValues;
8567
8568 auto SumOfShifts = [&](ConstantSDNode *LHS, ConstantSDNode *RHS) {
8569 APInt c1 = LHS->getAPIntValue();
8570 APInt c2 = RHS->getAPIntValue();
8571 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
8572 APInt Sum = c1 + c2;
8573 unsigned ShiftSum =
8574 Sum.uge(OpSizeInBits) ? (OpSizeInBits - 1) : Sum.getZExtValue();
8575 ShiftValues.push_back(DAG.getConstant(ShiftSum, DL, ShiftSVT));
8576 return true;
8577 };
8578 if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), SumOfShifts)) {
8579 SDValue ShiftValue;
8580 if (VT.isVector())
8581 ShiftValue = DAG.getBuildVector(ShiftVT, DL, ShiftValues);
8582 else
8583 ShiftValue = ShiftValues[0];
8584 return DAG.getNode(ISD::SRA, DL, VT, N0.getOperand(0), ShiftValue);
8585 }
8586 }
8587
8588 // fold (sra (shl X, m), (sub result_size, n))
8589 // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for
8590 // result_size - n != m.
8591 // If truncate is free for the target sext(shl) is likely to result in better
8592 // code.
8593 if (N0.getOpcode() == ISD::SHL && N1C) {
8594 // Get the two constanst of the shifts, CN0 = m, CN = n.
8595 const ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1));
8596 if (N01C) {
8597 LLVMContext &Ctx = *DAG.getContext();
8598 // Determine what the truncate's result bitsize and type would be.
8599 EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue());
8600
8601 if (VT.isVector())
8602 TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorElementCount());
8603
8604 // Determine the residual right-shift amount.
8605 int ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue();
8606
8607 // If the shift is not a no-op (in which case this should be just a sign
8608 // extend already), the truncated to type is legal, sign_extend is legal
8609 // on that type, and the truncate to that type is both legal and free,
8610 // perform the transform.
8611 if ((ShiftAmt > 0) &&
8612 TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) &&
8613 TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) &&
8614 TLI.isTruncateFree(VT, TruncVT)) {
8615 SDLoc DL(N);
8616 SDValue Amt = DAG.getConstant(ShiftAmt, DL,
8617 getShiftAmountTy(N0.getOperand(0).getValueType()));
8618 SDValue Shift = DAG.getNode(ISD::SRL, DL, VT,
8619 N0.getOperand(0), Amt);
8620 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT,
8621 Shift);
8622 return DAG.getNode(ISD::SIGN_EXTEND, DL,
8623 N->getValueType(0), Trunc);
8624 }
8625 }
8626 }
8627
8628 // We convert trunc/ext to opposing shifts in IR, but casts may be cheaper.
8629 // sra (add (shl X, N1C), AddC), N1C -->
8630 // sext (add (trunc X to (width - N1C)), AddC')
8631 if (N0.getOpcode() == ISD::ADD && N0.hasOneUse() && N1C &&
8632 N0.getOperand(0).getOpcode() == ISD::SHL &&
8633 N0.getOperand(0).getOperand(1) == N1 && N0.getOperand(0).hasOneUse()) {
8634 if (ConstantSDNode *AddC = isConstOrConstSplat(N0.getOperand(1))) {
8635 SDValue Shl = N0.getOperand(0);
8636 // Determine what the truncate's type would be and ask the target if that
8637 // is a free operation.
8638 LLVMContext &Ctx = *DAG.getContext();
8639 unsigned ShiftAmt = N1C->getZExtValue();
8640 EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - ShiftAmt);
8641 if (VT.isVector())
8642 TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorElementCount());
8643
8644 // TODO: The simple type check probably belongs in the default hook
8645 // implementation and/or target-specific overrides (because
8646 // non-simple types likely require masking when legalized), but that
8647 // restriction may conflict with other transforms.
8648 if (TruncVT.isSimple() && isTypeLegal(TruncVT) &&
8649 TLI.isTruncateFree(VT, TruncVT)) {
8650 SDLoc DL(N);
8651 SDValue Trunc = DAG.getZExtOrTrunc(Shl.getOperand(0), DL, TruncVT);
8652 SDValue ShiftC = DAG.getConstant(AddC->getAPIntValue().lshr(ShiftAmt).
8653 trunc(TruncVT.getScalarSizeInBits()), DL, TruncVT);
8654 SDValue Add = DAG.getNode(ISD::ADD, DL, TruncVT, Trunc, ShiftC);
8655 return DAG.getSExtOrTrunc(Add, DL, VT);
8656 }
8657 }
8658 }
8659
8660 // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
8661 if (N1.getOpcode() == ISD::TRUNCATE &&
8662 N1.getOperand(0).getOpcode() == ISD::AND) {
8663 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
8664 return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0, NewOp1);
8665 }
8666
8667 // fold (sra (trunc (sra x, c1)), c2) -> (trunc (sra x, c1 + c2))
8668 // fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2))
8669 // if c1 is equal to the number of bits the trunc removes
8670 // TODO - support non-uniform vector shift amounts.
8671 if (N0.getOpcode() == ISD::TRUNCATE &&
8672 (N0.getOperand(0).getOpcode() == ISD::SRL ||
8673 N0.getOperand(0).getOpcode() == ISD::SRA) &&
8674 N0.getOperand(0).hasOneUse() &&
8675 N0.getOperand(0).getOperand(1).hasOneUse() && N1C) {
8676 SDValue N0Op0 = N0.getOperand(0);
8677 if (ConstantSDNode *LargeShift = isConstOrConstSplat(N0Op0.getOperand(1))) {
8678 EVT LargeVT = N0Op0.getValueType();
8679 unsigned TruncBits = LargeVT.getScalarSizeInBits() - OpSizeInBits;
8680 if (LargeShift->getAPIntValue() == TruncBits) {
8681 SDLoc DL(N);
8682 SDValue Amt = DAG.getConstant(N1C->getZExtValue() + TruncBits, DL,
8683 getShiftAmountTy(LargeVT));
8684 SDValue SRA =
8685 DAG.getNode(ISD::SRA, DL, LargeVT, N0Op0.getOperand(0), Amt);
8686 return DAG.getNode(ISD::TRUNCATE, DL, VT, SRA);
8687 }
8688 }
8689 }
8690
8691 // Simplify, based on bits shifted out of the LHS.
8692 if (SimplifyDemandedBits(SDValue(N, 0)))
8693 return SDValue(N, 0);
8694
8695 // If the sign bit is known to be zero, switch this to a SRL.
8696 if (DAG.SignBitIsZero(N0))
8697 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, N1);
8698
8699 if (N1C && !N1C->isOpaque())
8700 if (SDValue NewSRA = visitShiftByConstant(N))
8701 return NewSRA;
8702
8703 // Try to transform this shift into a multiply-high if
8704 // it matches the appropriate pattern detected in combineShiftToMULH.
8705 if (SDValue MULH = combineShiftToMULH(N, DAG, TLI))
8706 return MULH;
8707
8708 return SDValue();
8709}
8710
8711SDValue DAGCombiner::visitSRL(SDNode *N) {
8712 SDValue N0 = N->getOperand(0);
8713 SDValue N1 = N->getOperand(1);
8714 if (SDValue V = DAG.simplifyShift(N0, N1))
8715 return V;
8716
8717 EVT VT = N0.getValueType();
8718 unsigned OpSizeInBits = VT.getScalarSizeInBits();
8719
8720 // fold vector ops
8721 if (VT.isVector())
8722 if (SDValue FoldedVOp = SimplifyVBinOp(N))
8723 return FoldedVOp;
8724
8725 ConstantSDNode *N1C = isConstOrConstSplat(N1);
8726
8727 // fold (srl c1, c2) -> c1 >>u c2
8728 if (SDValue C = DAG.FoldConstantArithmetic(ISD::SRL, SDLoc(N), VT, {N0, N1}))
8729 return C;
8730
8731 if (SDValue NewSel = foldBinOpIntoSelect(N))
8732 return NewSel;
8733
8734 // if (srl x, c) is known to be zero, return 0
8735 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
8736 APInt::getAllOnesValue(OpSizeInBits)))
8737 return DAG.getConstant(0, SDLoc(N), VT);
8738
8739 // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2))
8740 if (N0.getOpcode() == ISD::SRL) {
8741 auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS,
8742 ConstantSDNode *RHS) {
8743 APInt c1 = LHS->getAPIntValue();
8744 APInt c2 = RHS->getAPIntValue();
8745 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
8746 return (c1 + c2).uge(OpSizeInBits);
8747 };
8748 if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange))
8749 return DAG.getConstant(0, SDLoc(N), VT);
8750
8751 auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS,
8752 ConstantSDNode *RHS) {
8753 APInt c1 = LHS->getAPIntValue();
8754 APInt c2 = RHS->getAPIntValue();
8755 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
8756 return (c1 + c2).ult(OpSizeInBits);
8757 };
8758 if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) {
8759 SDLoc DL(N);
8760 EVT ShiftVT = N1.getValueType();
8761 SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1));
8762 return DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), Sum);
8763 }
8764 }
8765
8766 if (N1C && N0.getOpcode() == ISD::TRUNCATE &&
8767 N0.getOperand(0).getOpcode() == ISD::SRL) {
8768 SDValue InnerShift = N0.getOperand(0);
8769 // TODO - support non-uniform vector shift amounts.
8770 if (auto *N001C = isConstOrConstSplat(InnerShift.getOperand(1))) {
8771 uint64_t c1 = N001C->getZExtValue();
8772 uint64_t c2 = N1C->getZExtValue();
8773 EVT InnerShiftVT = InnerShift.getValueType();
8774 EVT ShiftAmtVT = InnerShift.getOperand(1).getValueType();
8775 uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits();
8776 // srl (trunc (srl x, c1)), c2 --> 0 or (trunc (srl x, (add c1, c2)))
8777 // This is only valid if the OpSizeInBits + c1 = size of inner shift.
8778 if (c1 + OpSizeInBits == InnerShiftSize) {
8779 SDLoc DL(N);
8780 if (c1 + c2 >= InnerShiftSize)
8781 return DAG.getConstant(0, DL, VT);
8782 SDValue NewShiftAmt = DAG.getConstant(c1 + c2, DL, ShiftAmtVT);
8783 SDValue NewShift = DAG.getNode(ISD::SRL, DL, InnerShiftVT,
8784 InnerShift.getOperand(0), NewShiftAmt);
8785 return DAG.getNode(ISD::TRUNCATE, DL, VT, NewShift);
8786 }
8787 // In the more general case, we can clear the high bits after the shift:
8788 // srl (trunc (srl x, c1)), c2 --> trunc (and (srl x, (c1+c2)), Mask)
8789 if (N0.hasOneUse() && InnerShift.hasOneUse() &&
8790 c1 + c2 < InnerShiftSize) {
8791 SDLoc DL(N);
8792 SDValue NewShiftAmt = DAG.getConstant(c1 + c2, DL, ShiftAmtVT);
8793 SDValue NewShift = DAG.getNode(ISD::SRL, DL, InnerShiftVT,
8794 InnerShift.getOperand(0), NewShiftAmt);
8795 SDValue Mask = DAG.getConstant(APInt::getLowBitsSet(InnerShiftSize,
8796 OpSizeInBits - c2),
8797 DL, InnerShiftVT);
8798 SDValue And = DAG.getNode(ISD::AND, DL, InnerShiftVT, NewShift, Mask);
8799 return DAG.getNode(ISD::TRUNCATE, DL, VT, And);
8800 }
8801 }
8802 }
8803
8804 // fold (srl (shl x, c), c) -> (and x, cst2)
8805 // TODO - (srl (shl x, c1), c2).
8806 if (N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 &&
8807 isConstantOrConstantVector(N1, /* NoOpaques */ true)) {
8808 SDLoc DL(N);
8809 SDValue Mask =
8810 DAG.getNode(ISD::SRL, DL, VT, DAG.getAllOnesConstant(DL, VT), N1);
8811 AddToWorklist(Mask.getNode());
8812 return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), Mask);
8813 }
8814
8815 // fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask)
8816 // TODO - support non-uniform vector shift amounts.
8817 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
8818 // Shifting in all undef bits?
8819 EVT SmallVT = N0.getOperand(0).getValueType();
8820 unsigned BitSize = SmallVT.getScalarSizeInBits();
8821 if (N1C->getAPIntValue().uge(BitSize))
8822 return DAG.getUNDEF(VT);
8823
8824 if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) {
8825 uint64_t ShiftAmt = N1C->getZExtValue();
8826 SDLoc DL0(N0);
8827 SDValue SmallShift = DAG.getNode(ISD::SRL, DL0, SmallVT,
8828 N0.getOperand(0),
8829 DAG.getConstant(ShiftAmt, DL0,
8830 getShiftAmountTy(SmallVT)));
8831 AddToWorklist(SmallShift.getNode());
8832 APInt Mask = APInt::getLowBitsSet(OpSizeInBits, OpSizeInBits - ShiftAmt);
8833 SDLoc DL(N);
8834 return DAG.getNode(ISD::AND, DL, VT,
8835 DAG.getNode(ISD::ANY_EXTEND, DL, VT, SmallShift),
8836 DAG.getConstant(Mask, DL, VT));
8837 }
8838 }
8839
8840 // fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign
8841 // bit, which is unmodified by sra.
8842 if (N1C && N1C->getAPIntValue() == (OpSizeInBits - 1)) {
8843 if (N0.getOpcode() == ISD::SRA)
8844 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0), N1);
8845 }
8846
8847 // fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
8848 if (N1C && N0.getOpcode() == ISD::CTLZ &&
8849 N1C->getAPIntValue() == Log2_32(OpSizeInBits)) {
8850 KnownBits Known = DAG.computeKnownBits(N0.getOperand(0));
8851
8852 // If any of the input bits are KnownOne, then the input couldn't be all
8853 // zeros, thus the result of the srl will always be zero.
8854 if (Known.One.getBoolValue()) return DAG.getConstant(0, SDLoc(N0), VT);
8855
8856 // If all of the bits input the to ctlz node are known to be zero, then
8857 // the result of the ctlz is "32" and the result of the shift is one.
8858 APInt UnknownBits = ~Known.Zero;
8859 if (UnknownBits == 0) return DAG.getConstant(1, SDLoc(N0), VT);
8860
8861 // Otherwise, check to see if there is exactly one bit input to the ctlz.
8862 if (UnknownBits.isPowerOf2()) {
8863 // Okay, we know that only that the single bit specified by UnknownBits
8864 // could be set on input to the CTLZ node. If this bit is set, the SRL
8865 // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair
8866 // to an SRL/XOR pair, which is likely to simplify more.
8867 unsigned ShAmt = UnknownBits.countTrailingZeros();
8868 SDValue Op = N0.getOperand(0);
8869
8870 if (ShAmt) {
8871 SDLoc DL(N0);
8872 Op = DAG.getNode(ISD::SRL, DL, VT, Op,
8873 DAG.getConstant(ShAmt, DL,
8874 getShiftAmountTy(Op.getValueType())));
8875 AddToWorklist(Op.getNode());
8876 }
8877
8878 SDLoc DL(N);
8879 return DAG.getNode(ISD::XOR, DL, VT,
8880 Op, DAG.getConstant(1, DL, VT));
8881 }
8882 }
8883
8884 // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
8885 if (N1.getOpcode() == ISD::TRUNCATE &&
8886 N1.getOperand(0).getOpcode() == ISD::AND) {
8887 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
8888 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, NewOp1);
8889 }
8890
8891 // fold operands of srl based on knowledge that the low bits are not
8892 // demanded.
8893 if (SimplifyDemandedBits(SDValue(N, 0)))
8894 return SDValue(N, 0);
8895
8896 if (N1C && !N1C->isOpaque())
8897 if (SDValue NewSRL = visitShiftByConstant(N))
8898 return NewSRL;
8899
8900 // Attempt to convert a srl of a load into a narrower zero-extending load.
8901 if (SDValue NarrowLoad = ReduceLoadWidth(N))
8902 return NarrowLoad;
8903
8904 // Here is a common situation. We want to optimize:
8905 //
8906 // %a = ...
8907 // %b = and i32 %a, 2
8908 // %c = srl i32 %b, 1
8909 // brcond i32 %c ...
8910 //
8911 // into
8912 //
8913 // %a = ...
8914 // %b = and %a, 2
8915 // %c = setcc eq %b, 0
8916 // brcond %c ...
8917 //
8918 // However when after the source operand of SRL is optimized into AND, the SRL
8919 // itself may not be optimized further. Look for it and add the BRCOND into
8920 // the worklist.
8921 if (N->hasOneUse()) {
8922 SDNode *Use = *N->use_begin();
8923 if (Use->getOpcode() == ISD::BRCOND)
8924 AddToWorklist(Use);
8925 else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) {
8926 // Also look pass the truncate.
8927 Use = *Use->use_begin();
8928 if (Use->getOpcode() == ISD::BRCOND)
8929 AddToWorklist(Use);
8930 }
8931 }
8932
8933 // Try to transform this shift into a multiply-high if
8934 // it matches the appropriate pattern detected in combineShiftToMULH.
8935 if (SDValue MULH = combineShiftToMULH(N, DAG, TLI))
8936 return MULH;
8937
8938 return SDValue();
8939}
8940
8941SDValue DAGCombiner::visitFunnelShift(SDNode *N) {
8942 EVT VT = N->getValueType(0);
8943 SDValue N0 = N->getOperand(0);
8944 SDValue N1 = N->getOperand(1);
8945 SDValue N2 = N->getOperand(2);
8946 bool IsFSHL = N->getOpcode() == ISD::FSHL;
8947 unsigned BitWidth = VT.getScalarSizeInBits();
8948
8949 // fold (fshl N0, N1, 0) -> N0
8950 // fold (fshr N0, N1, 0) -> N1
8951 if (isPowerOf2_32(BitWidth))
8952 if (DAG.MaskedValueIsZero(
8953 N2, APInt(N2.getScalarValueSizeInBits(), BitWidth - 1)))
8954 return IsFSHL ? N0 : N1;
8955
8956 auto IsUndefOrZero = [](SDValue V) {
8957 return V.isUndef() || isNullOrNullSplat(V, /*AllowUndefs*/ true);
8958 };
8959
8960 // TODO - support non-uniform vector shift amounts.
8961 if (ConstantSDNode *Cst = isConstOrConstSplat(N2)) {
8962 EVT ShAmtTy = N2.getValueType();
8963
8964 // fold (fsh* N0, N1, c) -> (fsh* N0, N1, c % BitWidth)
8965 if (Cst->getAPIntValue().uge(BitWidth)) {
8966 uint64_t RotAmt = Cst->getAPIntValue().urem(BitWidth);
8967 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N0, N1,
8968 DAG.getConstant(RotAmt, SDLoc(N), ShAmtTy));
8969 }
8970
8971 unsigned ShAmt = Cst->getZExtValue();
8972 if (ShAmt == 0)
8973 return IsFSHL ? N0 : N1;
8974
8975 // fold fshl(undef_or_zero, N1, C) -> lshr(N1, BW-C)
8976 // fold fshr(undef_or_zero, N1, C) -> lshr(N1, C)
8977 // fold fshl(N0, undef_or_zero, C) -> shl(N0, C)
8978 // fold fshr(N0, undef_or_zero, C) -> shl(N0, BW-C)
8979 if (IsUndefOrZero(N0))
8980 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N1,
8981 DAG.getConstant(IsFSHL ? BitWidth - ShAmt : ShAmt,
8982 SDLoc(N), ShAmtTy));
8983 if (IsUndefOrZero(N1))
8984 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0,
8985 DAG.getConstant(IsFSHL ? ShAmt : BitWidth - ShAmt,
8986 SDLoc(N), ShAmtTy));
8987
8988 // fold (fshl ld1, ld0, c) -> (ld0[ofs]) iff ld0 and ld1 are consecutive.
8989 // fold (fshr ld1, ld0, c) -> (ld0[ofs]) iff ld0 and ld1 are consecutive.
8990 // TODO - bigendian support once we have test coverage.
8991 // TODO - can we merge this with CombineConseutiveLoads/MatchLoadCombine?
8992 // TODO - permit LHS EXTLOAD if extensions are shifted out.
8993 if ((BitWidth % 8) == 0 && (ShAmt % 8) == 0 && !VT.isVector() &&
8994 !DAG.getDataLayout().isBigEndian()) {
8995 auto *LHS = dyn_cast<LoadSDNode>(N0);
8996 auto *RHS = dyn_cast<LoadSDNode>(N1);
8997 if (LHS && RHS && LHS->isSimple() && RHS->isSimple() &&
8998 LHS->getAddressSpace() == RHS->getAddressSpace() &&
8999 (LHS->hasOneUse() || RHS->hasOneUse()) && ISD::isNON_EXTLoad(RHS) &&
9000 ISD::isNON_EXTLoad(LHS)) {
9001 if (DAG.areNonVolatileConsecutiveLoads(LHS, RHS, BitWidth / 8, 1)) {
9002 SDLoc DL(RHS);
9003 uint64_t PtrOff =
9004 IsFSHL ? (((BitWidth - ShAmt) % BitWidth) / 8) : (ShAmt / 8);
9005 Align NewAlign = commonAlignment(RHS->getAlign(), PtrOff);
9006 bool Fast = false;
9007 if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
9008 RHS->getAddressSpace(), NewAlign,
9009 RHS->getMemOperand()->getFlags(), &Fast) &&
9010 Fast) {
9011 SDValue NewPtr = DAG.getMemBasePlusOffset(
9012 RHS->getBasePtr(), TypeSize::Fixed(PtrOff), DL);
9013 AddToWorklist(NewPtr.getNode());
9014 SDValue Load = DAG.getLoad(
9015 VT, DL, RHS->getChain(), NewPtr,
9016 RHS->getPointerInfo().getWithOffset(PtrOff), NewAlign,
9017 RHS->getMemOperand()->getFlags(), RHS->getAAInfo());
9018 // Replace the old load's chain with the new load's chain.
9019 WorklistRemover DeadNodes(*this);
9020 DAG.ReplaceAllUsesOfValueWith(N1.getValue(1), Load.getValue(1));
9021 return Load;
9022 }
9023 }
9024 }
9025 }
9026 }
9027
9028 // fold fshr(undef_or_zero, N1, N2) -> lshr(N1, N2)
9029 // fold fshl(N0, undef_or_zero, N2) -> shl(N0, N2)
9030 // iff We know the shift amount is in range.
9031 // TODO: when is it worth doing SUB(BW, N2) as well?
9032 if (isPowerOf2_32(BitWidth)) {
9033 APInt ModuloBits(N2.getScalarValueSizeInBits(), BitWidth - 1);
9034 if (IsUndefOrZero(N0) && !IsFSHL && DAG.MaskedValueIsZero(N2, ~ModuloBits))
9035 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N1, N2);
9036 if (IsUndefOrZero(N1) && IsFSHL && DAG.MaskedValueIsZero(N2, ~ModuloBits))
9037 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, N2);
9038 }
9039
9040 // fold (fshl N0, N0, N2) -> (rotl N0, N2)
9041 // fold (fshr N0, N0, N2) -> (rotr N0, N2)
9042 // TODO: Investigate flipping this rotate if only one is legal, if funnel shift
9043 // is legal as well we might be better off avoiding non-constant (BW - N2).
9044 unsigned RotOpc = IsFSHL ? ISD::ROTL : ISD::ROTR;
9045 if (N0 == N1 && hasOperation(RotOpc, VT))
9046 return DAG.getNode(RotOpc, SDLoc(N), VT, N0, N2);
9047
9048 // Simplify, based on bits shifted out of N0/N1.
9049 if (SimplifyDemandedBits(SDValue(N, 0)))
9050 return SDValue(N, 0);
9051
9052 return SDValue();
9053}
9054
9055SDValue DAGCombiner::visitABS(SDNode *N) {
9056 SDValue N0 = N->getOperand(0);
9057 EVT VT = N->getValueType(0);
9058
9059 // fold (abs c1) -> c2
9060 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
9061 return DAG.getNode(ISD::ABS, SDLoc(N), VT, N0);
9062 // fold (abs (abs x)) -> (abs x)
9063 if (N0.getOpcode() == ISD::ABS)
9064 return N0;
9065 // fold (abs x) -> x iff not-negative
9066 if (DAG.SignBitIsZero(N0))
9067 return N0;
9068 return SDValue();
9069}
9070
9071SDValue DAGCombiner::visitBSWAP(SDNode *N) {
9072 SDValue N0 = N->getOperand(0);
9073 EVT VT = N->getValueType(0);
9074
9075 // fold (bswap c1) -> c2
9076 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
9077 return DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N0);
9078 // fold (bswap (bswap x)) -> x
9079 if (N0.getOpcode() == ISD::BSWAP)
9080 return N0->getOperand(0);
9081 return SDValue();
9082}
9083
9084SDValue DAGCombiner::visitBITREVERSE(SDNode *N) {
9085 SDValue N0 = N->getOperand(0);
9086 EVT VT = N->getValueType(0);
9087
9088 // fold (bitreverse c1) -> c2
9089 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
9090 return DAG.getNode(ISD::BITREVERSE, SDLoc(N), VT, N0);
9091 // fold (bitreverse (bitreverse x)) -> x
9092 if (N0.getOpcode() == ISD::BITREVERSE)
9093 return N0.getOperand(0);
9094 return SDValue();
9095}
9096
9097SDValue DAGCombiner::visitCTLZ(SDNode *N) {
9098 SDValue N0 = N->getOperand(0);
9099 EVT VT = N->getValueType(0);
9100
9101 // fold (ctlz c1) -> c2
9102 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
9103 return DAG.getNode(ISD::CTLZ, SDLoc(N), VT, N0);
9104
9105 // If the value is known never to be zero, switch to the undef version.
9106 if (!LegalOperations || TLI.isOperationLegal(ISD::CTLZ_ZERO_UNDEF, VT)) {
9107 if (DAG.isKnownNeverZero(N0))
9108 return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0);
9109 }
9110
9111 return SDValue();
9112}
9113
9114SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) {
9115 SDValue N0 = N->getOperand(0);
9116 EVT VT = N->getValueType(0);
9117
9118 // fold (ctlz_zero_undef c1) -> c2
9119 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
9120 return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0);
9121 return SDValue();
9122}
9123
9124SDValue DAGCombiner::visitCTTZ(SDNode *N) {
9125 SDValue N0 = N->getOperand(0);
9126 EVT VT = N->getValueType(0);
9127
9128 // fold (cttz c1) -> c2
9129 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
9130 return DAG.getNode(ISD::CTTZ, SDLoc(N), VT, N0);
9131
9132 // If the value is known never to be zero, switch to the undef version.
9133 if (!LegalOperations || TLI.isOperationLegal(ISD::CTTZ_ZERO_UNDEF, VT)) {
9134 if (DAG.isKnownNeverZero(N0))
9135 return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0);
9136 }
9137
9138 return SDValue();
9139}
9140
9141SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) {
9142 SDValue N0 = N->getOperand(0);
9143 EVT VT = N->getValueType(0);
9144
9145 // fold (cttz_zero_undef c1) -> c2
9146 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
9147 return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0);
9148 return SDValue();
9149}
9150
9151SDValue DAGCombiner::visitCTPOP(SDNode *N) {
9152 SDValue N0 = N->getOperand(0);
9153 EVT VT = N->getValueType(0);
9154
9155 // fold (ctpop c1) -> c2
9156 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
9157 return DAG.getNode(ISD::CTPOP, SDLoc(N), VT, N0);
9158 return SDValue();
9159}
9160
9161// FIXME: This should be checking for no signed zeros on individual operands, as
9162// well as no nans.
9163static bool isLegalToCombineMinNumMaxNum(SelectionDAG &DAG, SDValue LHS,
9164 SDValue RHS,
9165 const TargetLowering &TLI) {
9166 const TargetOptions &Options = DAG.getTarget().Options;
9167 EVT VT = LHS.getValueType();
9168
9169 return Options.NoSignedZerosFPMath && VT.isFloatingPoint() &&
9170 TLI.isProfitableToCombineMinNumMaxNum(VT) &&
9171 DAG.isKnownNeverNaN(LHS) && DAG.isKnownNeverNaN(RHS);
9172}
9173
9174/// Generate Min/Max node
9175static SDValue combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS,
9176 SDValue RHS, SDValue True, SDValue False,
9177 ISD::CondCode CC, const TargetLowering &TLI,
9178 SelectionDAG &DAG) {
9179 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
9180 return SDValue();
9181
9182 EVT TransformVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
9183 switch (CC) {
9184 case ISD::SETOLT:
9185 case ISD::SETOLE:
9186 case ISD::SETLT:
9187 case ISD::SETLE:
9188 case ISD::SETULT:
9189 case ISD::SETULE: {
9190 // Since it's known never nan to get here already, either fminnum or
9191 // fminnum_ieee are OK. Try the ieee version first, since it's fminnum is
9192 // expanded in terms of it.
9193 unsigned IEEEOpcode = (LHS == True) ? ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE;
9194 if (TLI.isOperationLegalOrCustom(IEEEOpcode, VT))
9195 return DAG.getNode(IEEEOpcode, DL, VT, LHS, RHS);
9196
9197 unsigned Opcode = (LHS == True) ? ISD::FMINNUM : ISD::FMAXNUM;
9198 if (TLI.isOperationLegalOrCustom(Opcode, TransformVT))
9199 return DAG.getNode(Opcode, DL, VT, LHS, RHS);
9200 return SDValue();
9201 }
9202 case ISD::SETOGT:
9203 case ISD::SETOGE:
9204 case ISD::SETGT:
9205 case ISD::SETGE:
9206 case ISD::SETUGT:
9207 case ISD::SETUGE: {
9208 unsigned IEEEOpcode = (LHS == True) ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;
9209 if (TLI.isOperationLegalOrCustom(IEEEOpcode, VT))
9210 return DAG.getNode(IEEEOpcode, DL, VT, LHS, RHS);
9211
9212 unsigned Opcode = (LHS == True) ? ISD::FMAXNUM : ISD::FMINNUM;
9213 if (TLI.isOperationLegalOrCustom(Opcode, TransformVT))
9214 return DAG.getNode(Opcode, DL, VT, LHS, RHS);
9215 return SDValue();
9216 }
9217 default:
9218 return SDValue();
9219 }
9220}
9221
9222/// If a (v)select has a condition value that is a sign-bit test, try to smear
9223/// the condition operand sign-bit across the value width and use it as a mask.
9224static SDValue foldSelectOfConstantsUsingSra(SDNode *N, SelectionDAG &DAG) {
9225 SDValue Cond = N->getOperand(0);
9226 SDValue C1 = N->getOperand(1);
9227 SDValue C2 = N->getOperand(2);
9228 assert(isConstantOrConstantVector(C1) && isConstantOrConstantVector(C2) &&(static_cast <bool> (isConstantOrConstantVector(C1) &&
isConstantOrConstantVector(C2) && "Expected select-of-constants"
) ? void (0) : __assert_fail ("isConstantOrConstantVector(C1) && isConstantOrConstantVector(C2) && \"Expected select-of-constants\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9229, __extension__ __PRETTY_FUNCTION__))
9229 "Expected select-of-constants")(static_cast <bool> (isConstantOrConstantVector(C1) &&
isConstantOrConstantVector(C2) && "Expected select-of-constants"
) ? void (0) : __assert_fail ("isConstantOrConstantVector(C1) && isConstantOrConstantVector(C2) && \"Expected select-of-constants\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9229, __extension__ __PRETTY_FUNCTION__))
;
9230
9231 EVT VT = N->getValueType(0);
9232 if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse() ||
9233 VT != Cond.getOperand(0).getValueType())
9234 return SDValue();
9235
9236 // The inverted-condition + commuted-select variants of these patterns are
9237 // canonicalized to these forms in IR.
9238 SDValue X = Cond.getOperand(0);
9239 SDValue CondC = Cond.getOperand(1);
9240 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
9241 if (CC == ISD::SETGT && isAllOnesOrAllOnesSplat(CondC) &&
9242 isAllOnesOrAllOnesSplat(C2)) {
9243 // i32 X > -1 ? C1 : -1 --> (X >>s 31) | C1
9244 SDLoc DL(N);
9245 SDValue ShAmtC = DAG.getConstant(X.getScalarValueSizeInBits() - 1, DL, VT);
9246 SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, X, ShAmtC);
9247 return DAG.getNode(ISD::OR, DL, VT, Sra, C1);
9248 }
9249 if (CC == ISD::SETLT && isNullOrNullSplat(CondC) && isNullOrNullSplat(C2)) {
9250 // i8 X < 0 ? C1 : 0 --> (X >>s 7) & C1
9251 SDLoc DL(N);
9252 SDValue ShAmtC = DAG.getConstant(X.getScalarValueSizeInBits() - 1, DL, VT);
9253 SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, X, ShAmtC);
9254 return DAG.getNode(ISD::AND, DL, VT, Sra, C1);
9255 }
9256 return SDValue();
9257}
9258
9259SDValue DAGCombiner::foldSelectOfConstants(SDNode *N) {
9260 SDValue Cond = N->getOperand(0);
9261 SDValue N1 = N->getOperand(1);
9262 SDValue N2 = N->getOperand(2);
9263 EVT VT = N->getValueType(0);
9264 EVT CondVT = Cond.getValueType();
9265 SDLoc DL(N);
9266
9267 if (!VT.isInteger())
9268 return SDValue();
9269
9270 auto *C1 = dyn_cast<ConstantSDNode>(N1);
9271 auto *C2 = dyn_cast<ConstantSDNode>(N2);
9272 if (!C1 || !C2)
9273 return SDValue();
9274
9275 // Only do this before legalization to avoid conflicting with target-specific
9276 // transforms in the other direction (create a select from a zext/sext). There
9277 // is also a target-independent combine here in DAGCombiner in the other
9278 // direction for (select Cond, -1, 0) when the condition is not i1.
9279 if (CondVT == MVT::i1 && !LegalOperations) {
9280 if (C1->isNullValue() && C2->isOne()) {
9281 // select Cond, 0, 1 --> zext (!Cond)
9282 SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1);
9283 if (VT != MVT::i1)
9284 NotCond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, NotCond);
9285 return NotCond;
9286 }
9287 if (C1->isNullValue() && C2->isAllOnesValue()) {
9288 // select Cond, 0, -1 --> sext (!Cond)
9289 SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1);
9290 if (VT != MVT::i1)
9291 NotCond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NotCond);
9292 return NotCond;
9293 }
9294 if (C1->isOne() && C2->isNullValue()) {
9295 // select Cond, 1, 0 --> zext (Cond)
9296 if (VT != MVT::i1)
9297 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
9298 return Cond;
9299 }
9300 if (C1->isAllOnesValue() && C2->isNullValue()) {
9301 // select Cond, -1, 0 --> sext (Cond)
9302 if (VT != MVT::i1)
9303 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
9304 return Cond;
9305 }
9306
9307 // Use a target hook because some targets may prefer to transform in the
9308 // other direction.
9309 if (TLI.convertSelectOfConstantsToMath(VT)) {
9310 // For any constants that differ by 1, we can transform the select into an
9311 // extend and add.
9312 const APInt &C1Val = C1->getAPIntValue();
9313 const APInt &C2Val = C2->getAPIntValue();
9314 if (C1Val - 1 == C2Val) {
9315 // select Cond, C1, C1-1 --> add (zext Cond), C1-1
9316 if (VT != MVT::i1)
9317 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
9318 return DAG.getNode(ISD::ADD, DL, VT, Cond, N2);
9319 }
9320 if (C1Val + 1 == C2Val) {
9321 // select Cond, C1, C1+1 --> add (sext Cond), C1+1
9322 if (VT != MVT::i1)
9323 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
9324 return DAG.getNode(ISD::ADD, DL, VT, Cond, N2);
9325 }
9326
9327 // select Cond, Pow2, 0 --> (zext Cond) << log2(Pow2)
9328 if (C1Val.isPowerOf2() && C2Val.isNullValue()) {
9329 if (VT != MVT::i1)
9330 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
9331 SDValue ShAmtC = DAG.getConstant(C1Val.exactLogBase2(), DL, VT);
9332 return DAG.getNode(ISD::SHL, DL, VT, Cond, ShAmtC);
9333 }
9334
9335 if (SDValue V = foldSelectOfConstantsUsingSra(N, DAG))
9336 return V;
9337 }
9338
9339 return SDValue();
9340 }
9341
9342 // fold (select Cond, 0, 1) -> (xor Cond, 1)
9343 // We can't do this reliably if integer based booleans have different contents
9344 // to floating point based booleans. This is because we can't tell whether we
9345 // have an integer-based boolean or a floating-point-based boolean unless we
9346 // can find the SETCC that produced it and inspect its operands. This is
9347 // fairly easy if C is the SETCC node, but it can potentially be
9348 // undiscoverable (or not reasonably discoverable). For example, it could be
9349 // in another basic block or it could require searching a complicated
9350 // expression.
9351 if (CondVT.isInteger() &&
9352 TLI.getBooleanContents(/*isVec*/false, /*isFloat*/true) ==
9353 TargetLowering::ZeroOrOneBooleanContent &&
9354 TLI.getBooleanContents(/*isVec*/false, /*isFloat*/false) ==
9355 TargetLowering::ZeroOrOneBooleanContent &&
9356 C1->isNullValue() && C2->isOne()) {
9357 SDValue NotCond =
9358 DAG.getNode(ISD::XOR, DL, CondVT, Cond, DAG.getConstant(1, DL, CondVT));
9359 if (VT.bitsEq(CondVT))
9360 return NotCond;
9361 return DAG.getZExtOrTrunc(NotCond, DL, VT);
9362 }
9363
9364 return SDValue();
9365}
9366
9367static SDValue foldBoolSelectToLogic(SDNode *N, SelectionDAG &DAG) {
9368 assert((N->getOpcode() == ISD::SELECT || N->getOpcode() == ISD::VSELECT) &&(static_cast <bool> ((N->getOpcode() == ISD::SELECT ||
N->getOpcode() == ISD::VSELECT) && "Expected a (v)select"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SELECT || N->getOpcode() == ISD::VSELECT) && \"Expected a (v)select\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9369, __extension__ __PRETTY_FUNCTION__))
9369 "Expected a (v)select")(static_cast <bool> ((N->getOpcode() == ISD::SELECT ||
N->getOpcode() == ISD::VSELECT) && "Expected a (v)select"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SELECT || N->getOpcode() == ISD::VSELECT) && \"Expected a (v)select\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9369, __extension__ __PRETTY_FUNCTION__))
;
9370 SDValue Cond = N->getOperand(0);
9371 SDValue T = N->getOperand(1), F = N->getOperand(2);
9372 EVT VT = N->getValueType(0);
9373 if (VT != Cond.getValueType() || VT.getScalarSizeInBits() != 1)
9374 return SDValue();
9375
9376 // select Cond, Cond, F --> or Cond, F
9377 // select Cond, 1, F --> or Cond, F
9378 if (Cond == T || isOneOrOneSplat(T, /* AllowUndefs */ true))
9379 return DAG.getNode(ISD::OR, SDLoc(N), VT, Cond, F);
9380
9381 // select Cond, T, Cond --> and Cond, T
9382 // select Cond, T, 0 --> and Cond, T
9383 if (Cond == F || isNullOrNullSplat(F, /* AllowUndefs */ true))
9384 return DAG.getNode(ISD::AND, SDLoc(N), VT, Cond, T);
9385
9386 // select Cond, T, 1 --> or (not Cond), T
9387 if (isOneOrOneSplat(F, /* AllowUndefs */ true)) {
9388 SDValue NotCond = DAG.getNOT(SDLoc(N), Cond, VT);
9389 return DAG.getNode(ISD::OR, SDLoc(N), VT, NotCond, T);
9390 }
9391
9392 // select Cond, 0, F --> and (not Cond), F
9393 if (isNullOrNullSplat(T, /* AllowUndefs */ true)) {
9394 SDValue NotCond = DAG.getNOT(SDLoc(N), Cond, VT);
9395 return DAG.getNode(ISD::AND, SDLoc(N), VT, NotCond, F);
9396 }
9397
9398 return SDValue();
9399}
9400
9401SDValue DAGCombiner::visitSELECT(SDNode *N) {
9402 SDValue N0 = N->getOperand(0);
9403 SDValue N1 = N->getOperand(1);
9404 SDValue N2 = N->getOperand(2);
9405 EVT VT = N->getValueType(0);
9406 EVT VT0 = N0.getValueType();
9407 SDLoc DL(N);
9408 SDNodeFlags Flags = N->getFlags();
9409
9410 if (SDValue V = DAG.simplifySelect(N0, N1, N2))
9411 return V;
9412
9413 if (SDValue V = foldSelectOfConstants(N))
9414 return V;
9415
9416 if (SDValue V = foldBoolSelectToLogic(N, DAG))
9417 return V;
9418
9419 // If we can fold this based on the true/false value, do so.
9420 if (SimplifySelectOps(N, N1, N2))
9421 return SDValue(N, 0); // Don't revisit N.
9422
9423 if (VT0 == MVT::i1) {
9424 // The code in this block deals with the following 2 equivalences:
9425 // select(C0|C1, x, y) <=> select(C0, x, select(C1, x, y))
9426 // select(C0&C1, x, y) <=> select(C0, select(C1, x, y), y)
9427 // The target can specify its preferred form with the
9428 // shouldNormalizeToSelectSequence() callback. However we always transform
9429 // to the right anyway if we find the inner select exists in the DAG anyway
9430 // and we always transform to the left side if we know that we can further
9431 // optimize the combination of the conditions.
9432 bool normalizeToSequence =
9433 TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT);
9434 // select (and Cond0, Cond1), X, Y
9435 // -> select Cond0, (select Cond1, X, Y), Y
9436 if (N0->getOpcode() == ISD::AND && N0->hasOneUse()) {
9437 SDValue Cond0 = N0->getOperand(0);
9438 SDValue Cond1 = N0->getOperand(1);
9439 SDValue InnerSelect =
9440 DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond1, N1, N2, Flags);
9441 if (normalizeToSequence || !InnerSelect.use_empty())
9442 return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond0,
9443 InnerSelect, N2, Flags);
9444 // Cleanup on failure.
9445 if (InnerSelect.use_empty())
9446 recursivelyDeleteUnusedNodes(InnerSelect.getNode());
9447 }
9448 // select (or Cond0, Cond1), X, Y -> select Cond0, X, (select Cond1, X, Y)
9449 if (N0->getOpcode() == ISD::OR && N0->hasOneUse()) {
9450 SDValue Cond0 = N0->getOperand(0);
9451 SDValue Cond1 = N0->getOperand(1);
9452 SDValue InnerSelect = DAG.getNode(ISD::SELECT, DL, N1.getValueType(),
9453 Cond1, N1, N2, Flags);
9454 if (normalizeToSequence || !InnerSelect.use_empty())
9455 return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond0, N1,
9456 InnerSelect, Flags);
9457 // Cleanup on failure.
9458 if (InnerSelect.use_empty())
9459 recursivelyDeleteUnusedNodes(InnerSelect.getNode());
9460 }
9461
9462 // select Cond0, (select Cond1, X, Y), Y -> select (and Cond0, Cond1), X, Y
9463 if (N1->getOpcode() == ISD::SELECT && N1->hasOneUse()) {
9464 SDValue N1_0 = N1->getOperand(0);
9465 SDValue N1_1 = N1->getOperand(1);
9466 SDValue N1_2 = N1->getOperand(2);
9467 if (N1_2 == N2 && N0.getValueType() == N1_0.getValueType()) {
9468 // Create the actual and node if we can generate good code for it.
9469 if (!normalizeToSequence) {
9470 SDValue And = DAG.getNode(ISD::AND, DL, N0.getValueType(), N0, N1_0);
9471 return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), And, N1_1,
9472 N2, Flags);
9473 }
9474 // Otherwise see if we can optimize the "and" to a better pattern.
9475 if (SDValue Combined = visitANDLike(N0, N1_0, N)) {
9476 return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Combined, N1_1,
9477 N2, Flags);
9478 }
9479 }
9480 }
9481 // select Cond0, X, (select Cond1, X, Y) -> select (or Cond0, Cond1), X, Y
9482 if (N2->getOpcode() == ISD::SELECT && N2->hasOneUse()) {
9483 SDValue N2_0 = N2->getOperand(0);
9484 SDValue N2_1 = N2->getOperand(1);
9485 SDValue N2_2 = N2->getOperand(2);
9486 if (N2_1 == N1 && N0.getValueType() == N2_0.getValueType()) {
9487 // Create the actual or node if we can generate good code for it.
9488 if (!normalizeToSequence) {
9489 SDValue Or = DAG.getNode(ISD::OR, DL, N0.getValueType(), N0, N2_0);
9490 return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Or, N1,
9491 N2_2, Flags);
9492 }
9493 // Otherwise see if we can optimize to a better pattern.
9494 if (SDValue Combined = visitORLike(N0, N2_0, N))
9495 return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Combined, N1,
9496 N2_2, Flags);
9497 }
9498 }
9499 }
9500
9501 // select (not Cond), N1, N2 -> select Cond, N2, N1
9502 if (SDValue F = extractBooleanFlip(N0, DAG, TLI, false)) {
9503 SDValue SelectOp = DAG.getSelect(DL, VT, F, N2, N1);
9504 SelectOp->setFlags(Flags);
9505 return SelectOp;
9506 }
9507
9508 // Fold selects based on a setcc into other things, such as min/max/abs.
9509 if (N0.getOpcode() == ISD::SETCC) {
9510 SDValue Cond0 = N0.getOperand(0), Cond1 = N0.getOperand(1);
9511 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
9512
9513 // select (fcmp lt x, y), x, y -> fminnum x, y
9514 // select (fcmp gt x, y), x, y -> fmaxnum x, y
9515 //
9516 // This is OK if we don't care what happens if either operand is a NaN.
9517 if (N0.hasOneUse() && isLegalToCombineMinNumMaxNum(DAG, N1, N2, TLI))
9518 if (SDValue FMinMax = combineMinNumMaxNum(DL, VT, Cond0, Cond1, N1, N2,
9519 CC, TLI, DAG))
9520 return FMinMax;
9521
9522 // Use 'unsigned add with overflow' to optimize an unsigned saturating add.
9523 // This is conservatively limited to pre-legal-operations to give targets
9524 // a chance to reverse the transform if they want to do that. Also, it is
9525 // unlikely that the pattern would be formed late, so it's probably not
9526 // worth going through the other checks.
9527 if (!LegalOperations && TLI.isOperationLegalOrCustom(ISD::UADDO, VT) &&
9528 CC == ISD::SETUGT && N0.hasOneUse() && isAllOnesConstant(N1) &&
9529 N2.getOpcode() == ISD::ADD && Cond0 == N2.getOperand(0)) {
9530 auto *C = dyn_cast<ConstantSDNode>(N2.getOperand(1));
9531 auto *NotC = dyn_cast<ConstantSDNode>(Cond1);
9532 if (C && NotC && C->getAPIntValue() == ~NotC->getAPIntValue()) {
9533 // select (setcc Cond0, ~C, ugt), -1, (add Cond0, C) -->
9534 // uaddo Cond0, C; select uaddo.1, -1, uaddo.0
9535 //
9536 // The IR equivalent of this transform would have this form:
9537 // %a = add %x, C
9538 // %c = icmp ugt %x, ~C
9539 // %r = select %c, -1, %a
9540 // =>
9541 // %u = call {iN,i1} llvm.uadd.with.overflow(%x, C)
9542 // %u0 = extractvalue %u, 0
9543 // %u1 = extractvalue %u, 1
9544 // %r = select %u1, -1, %u0
9545 SDVTList VTs = DAG.getVTList(VT, VT0);
9546 SDValue UAO = DAG.getNode(ISD::UADDO, DL, VTs, Cond0, N2.getOperand(1));
9547 return DAG.getSelect(DL, VT, UAO.getValue(1), N1, UAO.getValue(0));
9548 }
9549 }
9550
9551 if (TLI.isOperationLegal(ISD::SELECT_CC, VT) ||
9552 (!LegalOperations &&
9553 TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT))) {
9554 // Any flags available in a select/setcc fold will be on the setcc as they
9555 // migrated from fcmp
9556 Flags = N0.getNode()->getFlags();
9557 SDValue SelectNode = DAG.getNode(ISD::SELECT_CC, DL, VT, Cond0, Cond1, N1,
9558 N2, N0.getOperand(2));
9559 SelectNode->setFlags(Flags);
9560 return SelectNode;
9561 }
9562
9563 return SimplifySelect(DL, N0, N1, N2);
9564 }
9565
9566 return SDValue();
9567}
9568
9569// This function assumes all the vselect's arguments are CONCAT_VECTOR
9570// nodes and that the condition is a BV of ConstantSDNodes (or undefs).
9571static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) {
9572 SDLoc DL(N);
9573 SDValue Cond = N->getOperand(0);
9574 SDValue LHS = N->getOperand(1);
9575 SDValue RHS = N->getOperand(2);
9576 EVT VT = N->getValueType(0);
9577 int NumElems = VT.getVectorNumElements();
9578 assert(LHS.getOpcode() == ISD::CONCAT_VECTORS &&(static_cast <bool> (LHS.getOpcode() == ISD::CONCAT_VECTORS
&& RHS.getOpcode() == ISD::CONCAT_VECTORS &&
Cond.getOpcode() == ISD::BUILD_VECTOR) ? void (0) : __assert_fail
("LHS.getOpcode() == ISD::CONCAT_VECTORS && RHS.getOpcode() == ISD::CONCAT_VECTORS && Cond.getOpcode() == ISD::BUILD_VECTOR"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9580, __extension__ __PRETTY_FUNCTION__))
9579 RHS.getOpcode() == ISD::CONCAT_VECTORS &&(static_cast <bool> (LHS.getOpcode() == ISD::CONCAT_VECTORS
&& RHS.getOpcode() == ISD::CONCAT_VECTORS &&
Cond.getOpcode() == ISD::BUILD_VECTOR) ? void (0) : __assert_fail
("LHS.getOpcode() == ISD::CONCAT_VECTORS && RHS.getOpcode() == ISD::CONCAT_VECTORS && Cond.getOpcode() == ISD::BUILD_VECTOR"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9580, __extension__ __PRETTY_FUNCTION__))
9580 Cond.getOpcode() == ISD::BUILD_VECTOR)(static_cast <bool> (LHS.getOpcode() == ISD::CONCAT_VECTORS
&& RHS.getOpcode() == ISD::CONCAT_VECTORS &&
Cond.getOpcode() == ISD::BUILD_VECTOR) ? void (0) : __assert_fail
("LHS.getOpcode() == ISD::CONCAT_VECTORS && RHS.getOpcode() == ISD::CONCAT_VECTORS && Cond.getOpcode() == ISD::BUILD_VECTOR"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9580, __extension__ __PRETTY_FUNCTION__))
;
9581
9582 // CONCAT_VECTOR can take an arbitrary number of arguments. We only care about
9583 // binary ones here.
9584 if (LHS->getNumOperands() != 2 || RHS->getNumOperands() != 2)
9585 return SDValue();
9586
9587 // We're sure we have an even number of elements due to the
9588 // concat_vectors we have as arguments to vselect.
9589 // Skip BV elements until we find one that's not an UNDEF
9590 // After we find an UNDEF element, keep looping until we get to half the
9591 // length of the BV and see if all the non-undef nodes are the same.
9592 ConstantSDNode *BottomHalf = nullptr;
9593 for (int i = 0; i < NumElems / 2; ++i) {
9594 if (Cond->getOperand(i)->isUndef())
9595 continue;
9596
9597 if (BottomHalf == nullptr)
9598 BottomHalf = cast<ConstantSDNode>(Cond.getOperand(i));
9599 else if (Cond->getOperand(i).getNode() != BottomHalf)
9600 return SDValue();
9601 }
9602
9603 // Do the same for the second half of the BuildVector
9604 ConstantSDNode *TopHalf = nullptr;
9605 for (int i = NumElems / 2; i < NumElems; ++i) {
9606 if (Cond->getOperand(i)->isUndef())
9607 continue;
9608
9609 if (TopHalf == nullptr)
9610 TopHalf = cast<ConstantSDNode>(Cond.getOperand(i));
9611 else if (Cond->getOperand(i).getNode() != TopHalf)
9612 return SDValue();
9613 }
9614
9615 assert(TopHalf && BottomHalf &&(static_cast <bool> (TopHalf && BottomHalf &&
"One half of the selector was all UNDEFs and the other was all the "
"same value. This should have been addressed before this function."
) ? void (0) : __assert_fail ("TopHalf && BottomHalf && \"One half of the selector was all UNDEFs and the other was all the \" \"same value. This should have been addressed before this function.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9617, __extension__ __PRETTY_FUNCTION__))
9616 "One half of the selector was all UNDEFs and the other was all the "(static_cast <bool> (TopHalf && BottomHalf &&
"One half of the selector was all UNDEFs and the other was all the "
"same value. This should have been addressed before this function."
) ? void (0) : __assert_fail ("TopHalf && BottomHalf && \"One half of the selector was all UNDEFs and the other was all the \" \"same value. This should have been addressed before this function.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9617, __extension__ __PRETTY_FUNCTION__))
9617 "same value. This should have been addressed before this function.")(static_cast <bool> (TopHalf && BottomHalf &&
"One half of the selector was all UNDEFs and the other was all the "
"same value. This should have been addressed before this function."
) ? void (0) : __assert_fail ("TopHalf && BottomHalf && \"One half of the selector was all UNDEFs and the other was all the \" \"same value. This should have been addressed before this function.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 9617, __extension__ __PRETTY_FUNCTION__))
;
9618 return DAG.getNode(
9619 ISD::CONCAT_VECTORS, DL, VT,
9620 BottomHalf->isNullValue() ? RHS->getOperand(0) : LHS->getOperand(0),
9621 TopHalf->isNullValue() ? RHS->getOperand(1) : LHS->getOperand(1));
9622}
9623
9624bool refineUniformBase(SDValue &BasePtr, SDValue &Index, SelectionDAG &DAG) {
9625 if (!isNullConstant(BasePtr) || Index.getOpcode() != ISD::ADD)
9626 return false;
9627
9628 // For now we check only the LHS of the add.
9629 SDValue LHS = Index.getOperand(0);
9630 SDValue SplatVal = DAG.getSplatValue(LHS);
9631 if (!SplatVal)
9632 return false;
9633
9634 BasePtr = SplatVal;
9635 Index = Index.getOperand(1);
9636 return true;
9637}
9638
9639// Fold sext/zext of index into index type.
9640bool refineIndexType(MaskedGatherScatterSDNode *MGS, SDValue &Index,
9641 bool Scaled, SelectionDAG &DAG) {
9642 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9643
9644 if (Index.getOpcode() == ISD::ZERO_EXTEND) {
9645 SDValue Op = Index.getOperand(0);
9646 MGS->setIndexType(Scaled ? ISD::UNSIGNED_SCALED : ISD::UNSIGNED_UNSCALED);
9647 if (TLI.shouldRemoveExtendFromGSIndex(Op.getValueType())) {
9648 Index = Op;
9649 return true;
9650 }
9651 }
9652
9653 if (Index.getOpcode() == ISD::SIGN_EXTEND) {
9654 SDValue Op = Index.getOperand(0);
9655 MGS->setIndexType(Scaled ? ISD::SIGNED_SCALED : ISD::SIGNED_UNSCALED);
9656 if (TLI.shouldRemoveExtendFromGSIndex(Op.getValueType())) {
9657 Index = Op;
9658 return true;
9659 }
9660 }
9661
9662 return false;
9663}
9664
9665SDValue DAGCombiner::visitMSCATTER(SDNode *N) {
9666 MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(N);
9667 SDValue Mask = MSC->getMask();
9668 SDValue Chain = MSC->getChain();
9669 SDValue Index = MSC->getIndex();
9670 SDValue Scale = MSC->getScale();
9671 SDValue StoreVal = MSC->getValue();
9672 SDValue BasePtr = MSC->getBasePtr();
9673 SDLoc DL(N);
9674
9675 // Zap scatters with a zero mask.
9676 if (ISD::isConstantSplatVectorAllZeros(Mask.getNode()))
9677 return Chain;
9678
9679 if (refineUniformBase(BasePtr, Index, DAG)) {
9680 SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
9681 return DAG.getMaskedScatter(
9682 DAG.getVTList(MVT::Other), StoreVal.getValueType(), DL, Ops,
9683 MSC->getMemOperand(), MSC->getIndexType(), MSC->isTruncatingStore());
9684 }
9685
9686 if (refineIndexType(MSC, Index, MSC->isIndexScaled(), DAG)) {
9687 SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
9688 return DAG.getMaskedScatter(
9689 DAG.getVTList(MVT::Other), StoreVal.getValueType(), DL, Ops,
9690 MSC->getMemOperand(), MSC->getIndexType(), MSC->isTruncatingStore());
9691 }
9692
9693 return SDValue();
9694}
9695
9696SDValue DAGCombiner::visitMSTORE(SDNode *N) {
9697 MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
9698 SDValue Mask = MST->getMask();
9699 SDValue Chain = MST->getChain();
9700 SDLoc DL(N);
9701
9702 // Zap masked stores with a zero mask.
9703 if (ISD::isConstantSplatVectorAllZeros(Mask.getNode()))
9704 return Chain;
9705
9706 // If this is a masked load with an all ones mask, we can use a unmasked load.
9707 // FIXME: Can we do this for indexed, compressing, or truncating stores?
9708 if (ISD::isConstantSplatVectorAllOnes(Mask.getNode()) &&
9709 MST->isUnindexed() && !MST->isCompressingStore() &&
9710 !MST->isTruncatingStore())
9711 return DAG.getStore(MST->getChain(), SDLoc(N), MST->getValue(),
9712 MST->getBasePtr(), MST->getMemOperand());
9713
9714 // Try transforming N to an indexed store.
9715 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
9716 return SDValue(N, 0);
9717
9718 return SDValue();
9719}
9720
9721SDValue DAGCombiner::visitMGATHER(SDNode *N) {
9722 MaskedGatherSDNode *MGT = cast<MaskedGatherSDNode>(N);
9723 SDValue Mask = MGT->getMask();
9724 SDValue Chain = MGT->getChain();
9725 SDValue Index = MGT->getIndex();
9726 SDValue Scale = MGT->getScale();
9727 SDValue PassThru = MGT->getPassThru();
9728 SDValue BasePtr = MGT->getBasePtr();
9729 SDLoc DL(N);
9730
9731 // Zap gathers with a zero mask.
9732 if (ISD::isConstantSplatVectorAllZeros(Mask.getNode()))
9733 return CombineTo(N, PassThru, MGT->getChain());
9734
9735 if (refineUniformBase(BasePtr, Index, DAG)) {
9736 SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
9737 return DAG.getMaskedGather(DAG.getVTList(N->getValueType(0), MVT::Other),
9738 PassThru.getValueType(), DL, Ops,
9739 MGT->getMemOperand(), MGT->getIndexType(),
9740 MGT->getExtensionType());
9741 }
9742
9743 if (refineIndexType(MGT, Index, MGT->isIndexScaled(), DAG)) {
9744 SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
9745 return DAG.getMaskedGather(DAG.getVTList(N->getValueType(0), MVT::Other),
9746 PassThru.getValueType(), DL, Ops,
9747 MGT->getMemOperand(), MGT->getIndexType(),
9748 MGT->getExtensionType());
9749 }
9750
9751 return SDValue();
9752}
9753
9754SDValue DAGCombiner::visitMLOAD(SDNode *N) {
9755 MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
9756 SDValue Mask = MLD->getMask();
9757 SDLoc DL(N);
9758
9759 // Zap masked loads with a zero mask.
9760 if (ISD::isConstantSplatVectorAllZeros(Mask.getNode()))
9761 return CombineTo(N, MLD->getPassThru(), MLD->getChain());
9762
9763 // If this is a masked load with an all ones mask, we can use a unmasked load.
9764 // FIXME: Can we do this for indexed, expanding, or extending loads?
9765 if (ISD::isConstantSplatVectorAllOnes(Mask.getNode()) &&
9766 MLD->isUnindexed() && !MLD->isExpandingLoad() &&
9767 MLD->getExtensionType() == ISD::NON_EXTLOAD) {
9768 SDValue NewLd = DAG.getLoad(N->getValueType(0), SDLoc(N), MLD->getChain(),
9769 MLD->getBasePtr(), MLD->getMemOperand());
9770 return CombineTo(N, NewLd, NewLd.getValue(1));
9771 }
9772
9773 // Try transforming N to an indexed load.
9774 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
9775 return SDValue(N, 0);
9776
9777 return SDValue();
9778}
9779
9780/// A vector select of 2 constant vectors can be simplified to math/logic to
9781/// avoid a variable select instruction and possibly avoid constant loads.
9782SDValue DAGCombiner::foldVSelectOfConstants(SDNode *N) {
9783 SDValue Cond = N->getOperand(0);
9784 SDValue N1 = N->getOperand(1);
9785 SDValue N2 = N->getOperand(2);
9786 EVT VT = N->getValueType(0);
9787 if (!Cond.hasOneUse() || Cond.getScalarValueSizeInBits() != 1 ||
9788 !TLI.convertSelectOfConstantsToMath(VT) ||
9789 !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()) ||
9790 !ISD::isBuildVectorOfConstantSDNodes(N2.getNode()))
9791 return SDValue();
9792
9793 // Check if we can use the condition value to increment/decrement a single
9794 // constant value. This simplifies a select to an add and removes a constant
9795 // load/materialization from the general case.
9796 bool AllAddOne = true;
9797 bool AllSubOne = true;
9798 unsigned Elts = VT.getVectorNumElements();
9799 for (unsigned i = 0; i != Elts; ++i) {
9800 SDValue N1Elt = N1.getOperand(i);
9801 SDValue N2Elt = N2.getOperand(i);
9802 if (N1Elt.isUndef() || N2Elt.isUndef())
9803 continue;
9804 if (N1Elt.getValueType() != N2Elt.getValueType())
9805 continue;
9806
9807 const APInt &C1 = cast<ConstantSDNode>(N1Elt)->getAPIntValue();
9808 const APInt &C2 = cast<ConstantSDNode>(N2Elt)->getAPIntValue();
9809 if (C1 != C2 + 1)
9810 AllAddOne = false;
9811 if (C1 != C2 - 1)
9812 AllSubOne = false;
9813 }
9814
9815 // Further simplifications for the extra-special cases where the constants are
9816 // all 0 or all -1 should be implemented as folds of these patterns.
9817 SDLoc DL(N);
9818 if (AllAddOne || AllSubOne) {
9819 // vselect <N x i1> Cond, C+1, C --> add (zext Cond), C
9820 // vselect <N x i1> Cond, C-1, C --> add (sext Cond), C
9821 auto ExtendOpcode = AllAddOne ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
9822 SDValue ExtendedCond = DAG.getNode(ExtendOpcode, DL, VT, Cond);
9823 return DAG.getNode(ISD::ADD, DL, VT, ExtendedCond, N2);
9824 }
9825
9826 // select Cond, Pow2C, 0 --> (zext Cond) << log2(Pow2C)
9827 APInt Pow2C;
9828 if (ISD::isConstantSplatVector(N1.getNode(), Pow2C) && Pow2C.isPowerOf2() &&
9829 isNullOrNullSplat(N2)) {
9830 SDValue ZextCond = DAG.getZExtOrTrunc(Cond, DL, VT);
9831 SDValue ShAmtC = DAG.getConstant(Pow2C.exactLogBase2(), DL, VT);
9832 return DAG.getNode(ISD::SHL, DL, VT, ZextCond, ShAmtC);
9833 }
9834
9835 if (SDValue V = foldSelectOfConstantsUsingSra(N, DAG))
9836 return V;
9837
9838 // The general case for select-of-constants:
9839 // vselect <N x i1> Cond, C1, C2 --> xor (and (sext Cond), (C1^C2)), C2
9840 // ...but that only makes sense if a vselect is slower than 2 logic ops, so
9841 // leave that to a machine-specific pass.
9842 return SDValue();
9843}
9844
9845SDValue DAGCombiner::visitVSELECT(SDNode *N) {
9846 SDValue N0 = N->getOperand(0);
9847 SDValue N1 = N->getOperand(1);
9848 SDValue N2 = N->getOperand(2);
9849 EVT VT = N->getValueType(0);
9850 SDLoc DL(N);
9851
9852 if (SDValue V = DAG.simplifySelect(N0, N1, N2))
9853 return V;
9854
9855 if (SDValue V = foldBoolSelectToLogic(N, DAG))
9856 return V;
9857
9858 // vselect (not Cond), N1, N2 -> vselect Cond, N2, N1
9859 if (SDValue F = extractBooleanFlip(N0, DAG, TLI, false))
9860 return DAG.getSelect(DL, VT, F, N2, N1);
9861
9862 // Canonicalize integer abs.
9863 // vselect (setg[te] X, 0), X, -X ->
9864 // vselect (setgt X, -1), X, -X ->
9865 // vselect (setl[te] X, 0), -X, X ->
9866 // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
9867 if (N0.getOpcode() == ISD::SETCC) {
9868 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
9869 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
9870 bool isAbs = false;
9871 bool RHSIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
9872
9873 if (((RHSIsAllZeros && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
9874 (ISD::isBuildVectorAllOnes(RHS.getNode()) && CC == ISD::SETGT)) &&
9875 N1 == LHS && N2.getOpcode() == ISD::SUB && N1 == N2.getOperand(1))
9876 isAbs = ISD::isBuildVectorAllZeros(N2.getOperand(0).getNode());
9877 else if ((RHSIsAllZeros && (CC == ISD::SETLT || CC == ISD::SETLE)) &&
9878 N2 == LHS && N1.getOpcode() == ISD::SUB && N2 == N1.getOperand(1))
9879 isAbs = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode());
9880
9881 if (isAbs) {
9882 if (TLI.isOperationLegalOrCustom(ISD::ABS, VT))
9883 return DAG.getNode(ISD::ABS, DL, VT, LHS);
9884
9885 SDValue Shift = DAG.getNode(ISD::SRA, DL, VT, LHS,
9886 DAG.getConstant(VT.getScalarSizeInBits() - 1,
9887 DL, getShiftAmountTy(VT)));
9888 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift);
9889 AddToWorklist(Shift.getNode());
9890 AddToWorklist(Add.getNode());
9891 return DAG.getNode(ISD::XOR, DL, VT, Add, Shift);
9892 }
9893
9894 // vselect x, y (fcmp lt x, y) -> fminnum x, y
9895 // vselect x, y (fcmp gt x, y) -> fmaxnum x, y
9896 //
9897 // This is OK if we don't care about what happens if either operand is a
9898 // NaN.
9899 //
9900 if (N0.hasOneUse() && isLegalToCombineMinNumMaxNum(DAG, LHS, RHS, TLI)) {
9901 if (SDValue FMinMax =
9902 combineMinNumMaxNum(DL, VT, LHS, RHS, N1, N2, CC, TLI, DAG))
9903 return FMinMax;
9904 }
9905
9906 // If this select has a condition (setcc) with narrower operands than the
9907 // select, try to widen the compare to match the select width.
9908 // TODO: This should be extended to handle any constant.
9909 // TODO: This could be extended to handle non-loading patterns, but that
9910 // requires thorough testing to avoid regressions.
9911 if (isNullOrNullSplat(RHS)) {
9912 EVT NarrowVT = LHS.getValueType();
9913 EVT WideVT = N1.getValueType().changeVectorElementTypeToInteger();
9914 EVT SetCCVT = getSetCCResultType(LHS.getValueType());
9915 unsigned SetCCWidth = SetCCVT.getScalarSizeInBits();
9916 unsigned WideWidth = WideVT.getScalarSizeInBits();
9917 bool IsSigned = isSignedIntSetCC(CC);
9918 auto LoadExtOpcode = IsSigned ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
9919 if (LHS.getOpcode() == ISD::LOAD && LHS.hasOneUse() &&
9920 SetCCWidth != 1 && SetCCWidth < WideWidth &&
9921 TLI.isLoadExtLegalOrCustom(LoadExtOpcode, WideVT, NarrowVT) &&
9922 TLI.isOperationLegalOrCustom(ISD::SETCC, WideVT)) {
9923 // Both compare operands can be widened for free. The LHS can use an
9924 // extended load, and the RHS is a constant:
9925 // vselect (ext (setcc load(X), C)), N1, N2 -->
9926 // vselect (setcc extload(X), C'), N1, N2
9927 auto ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
9928 SDValue WideLHS = DAG.getNode(ExtOpcode, DL, WideVT, LHS);
9929 SDValue WideRHS = DAG.getNode(ExtOpcode, DL, WideVT, RHS);
9930 EVT WideSetCCVT = getSetCCResultType(WideVT);
9931 SDValue WideSetCC = DAG.getSetCC(DL, WideSetCCVT, WideLHS, WideRHS, CC);
9932 return DAG.getSelect(DL, N1.getValueType(), WideSetCC, N1, N2);
9933 }
9934 }
9935
9936 // Match VSELECTs into add with unsigned saturation.
9937 if (hasOperation(ISD::UADDSAT, VT)) {
9938 // Check if one of the arms of the VSELECT is vector with all bits set.
9939 // If it's on the left side invert the predicate to simplify logic below.
9940 SDValue Other;
9941 ISD::CondCode SatCC = CC;
9942 if (ISD::isBuildVectorAllOnes(N1.getNode())) {
9943 Other = N2;
9944 SatCC = ISD::getSetCCInverse(SatCC, VT.getScalarType());
9945 } else if (ISD::isBuildVectorAllOnes(N2.getNode())) {
9946 Other = N1;
9947 }
9948
9949 if (Other && Other.getOpcode() == ISD::ADD) {
9950 SDValue CondLHS = LHS, CondRHS = RHS;
9951 SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
9952
9953 // Canonicalize condition operands.
9954 if (SatCC == ISD::SETUGE) {
9955 std::swap(CondLHS, CondRHS);
9956 SatCC = ISD::SETULE;
9957 }
9958
9959 // We can test against either of the addition operands.
9960 // x <= x+y ? x+y : ~0 --> uaddsat x, y
9961 // x+y >= x ? x+y : ~0 --> uaddsat x, y
9962 if (SatCC == ISD::SETULE && Other == CondRHS &&
9963 (OpLHS == CondLHS || OpRHS == CondLHS))
9964 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
9965
9966 if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
9967 CondLHS == OpLHS) {
9968 // If the RHS is a constant we have to reverse the const
9969 // canonicalization.
9970 // x >= ~C ? x+C : ~0 --> uaddsat x, C
9971 auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
9972 return Cond->getAPIntValue() == ~Op->getAPIntValue();
9973 };
9974 if (SatCC == ISD::SETULE &&
9975 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
9976 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
9977 }
9978 }
9979 }
9980
9981 // Match VSELECTs into sub with unsigned saturation.
9982 if (hasOperation(ISD::USUBSAT, VT)) {
9983 // Check if one of the arms of the VSELECT is a zero vector. If it's on
9984 // the left side invert the predicate to simplify logic below.
9985 SDValue Other;
9986 ISD::CondCode SatCC = CC;
9987 if (ISD::isBuildVectorAllZeros(N1.getNode())) {
9988 Other = N2;
9989 SatCC = ISD::getSetCCInverse(SatCC, VT.getScalarType());
9990 } else if (ISD::isBuildVectorAllZeros(N2.getNode())) {
9991 Other = N1;
9992 }
9993
9994 if (Other && Other.getNumOperands() == 2) {
9995 SDValue CondRHS = RHS;
9996 SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
9997
9998 if (Other.getOpcode() == ISD::SUB &&
9999 LHS.getOpcode() == ISD::ZERO_EXTEND && LHS.getOperand(0) == OpLHS &&
10000 OpRHS.getOpcode() == ISD::TRUNCATE && OpRHS.getOperand(0) == RHS) {
10001 // Look for a general sub with unsigned saturation first.
10002 // zext(x) >= y ? x - trunc(y) : 0
10003 // --> usubsat(x,trunc(umin(y,SatLimit)))
10004 // zext(x) > y ? x - trunc(y) : 0
10005 // --> usubsat(x,trunc(umin(y,SatLimit)))
10006 if (SatCC == ISD::SETUGE || SatCC == ISD::SETUGT)
10007 return getTruncatedUSUBSAT(VT, LHS.getValueType(), LHS, RHS, DAG,
10008 DL);
10009 }
10010
10011 if (OpLHS == LHS) {
10012 // Look for a general sub with unsigned saturation first.
10013 // x >= y ? x-y : 0 --> usubsat x, y
10014 // x > y ? x-y : 0 --> usubsat x, y
10015 if ((SatCC == ISD::SETUGE || SatCC == ISD::SETUGT) &&
10016 Other.getOpcode() == ISD::SUB && OpRHS == CondRHS)
10017 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
10018
10019 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
10020 if (isa<BuildVectorSDNode>(CondRHS)) {
10021 // If the RHS is a constant we have to reverse the const
10022 // canonicalization.
10023 // x > C-1 ? x+-C : 0 --> usubsat x, C
10024 auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
10025 return (!Op && !Cond) ||
10026 (Op && Cond &&
10027 Cond->getAPIntValue() == (-Op->getAPIntValue() - 1));
10028 };
10029 if (SatCC == ISD::SETUGT && Other.getOpcode() == ISD::ADD &&
10030 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT,
10031 /*AllowUndefs*/ true)) {
10032 OpRHS = DAG.getNode(ISD::SUB, DL, VT,
10033 DAG.getConstant(0, DL, VT), OpRHS);
10034 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
10035 }
10036
10037 // Another special case: If C was a sign bit, the sub has been
10038 // canonicalized into a xor.
10039 // FIXME: Would it be better to use computeKnownBits to determine
10040 // whether it's safe to decanonicalize the xor?
10041 // x s< 0 ? x^C : 0 --> usubsat x, C
10042 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
10043 if (SatCC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
10044 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
10045 OpRHSConst->getAPIntValue().isSignMask()) {
10046 // Note that we have to rebuild the RHS constant here to
10047 // ensure we don't rely on particular values of undef lanes.
10048 OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
10049 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
10050 }
10051 }
10052 }
10053 }
10054 }
10055 }
10056 }
10057 }
10058
10059 if (SimplifySelectOps(N, N1, N2))
10060 return SDValue(N, 0); // Don't revisit N.
10061
10062 // Fold (vselect all_ones, N1, N2) -> N1
10063 if (ISD::isConstantSplatVectorAllOnes(N0.getNode()))
10064 return N1;
10065 // Fold (vselect all_zeros, N1, N2) -> N2
10066 if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
10067 return N2;
10068
10069 // The ConvertSelectToConcatVector function is assuming both the above
10070 // checks for (vselect (build_vector all{ones,zeros) ...) have been made
10071 // and addressed.
10072 if (N1.getOpcode() == ISD::CONCAT_VECTORS &&
10073 N2.getOpcode() == ISD::CONCAT_VECTORS &&
10074 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
10075 if (SDValue CV = ConvertSelectToConcatVector(N, DAG))
10076 return CV;
10077 }
10078
10079 if (SDValue V = foldVSelectOfConstants(N))
10080 return V;
10081
10082 return SDValue();
10083}
10084
10085SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
10086 SDValue N0 = N->getOperand(0);
10087 SDValue N1 = N->getOperand(1);
10088 SDValue N2 = N->getOperand(2);
10089 SDValue N3 = N->getOperand(3);
10090 SDValue N4 = N->getOperand(4);
10091 ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get();
10092
10093 // fold select_cc lhs, rhs, x, x, cc -> x
10094 if (N2 == N3)
10095 return N2;
10096
10097 // Determine if the condition we're dealing with is constant
10098 if (SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), N0, N1,
10099 CC, SDLoc(N), false)) {
10100 AddToWorklist(SCC.getNode());
10101
10102 if (ConstantSDNode *SCCC = dyn_cast<ConstantSDNode>(SCC.getNode())) {
10103 if (!SCCC->isNullValue())
10104 return N2; // cond always true -> true val
10105 else
10106 return N3; // cond always false -> false val
10107 } else if (SCC->isUndef()) {
10108 // When the condition is UNDEF, just return the first operand. This is
10109 // coherent the DAG creation, no setcc node is created in this case
10110 return N2;
10111 } else if (SCC.getOpcode() == ISD::SETCC) {
10112 // Fold to a simpler select_cc
10113 SDValue SelectOp = DAG.getNode(
10114 ISD::SELECT_CC, SDLoc(N), N2.getValueType(), SCC.getOperand(0),
10115 SCC.getOperand(1), N2, N3, SCC.getOperand(2));
10116 SelectOp->setFlags(SCC->getFlags());
10117 return SelectOp;
10118 }
10119 }
10120
10121 // If we can fold this based on the true/false value, do so.
10122 if (SimplifySelectOps(N, N2, N3))
10123 return SDValue(N, 0); // Don't revisit N.
10124
10125 // fold select_cc into other things, such as min/max/abs
10126 return SimplifySelectCC(SDLoc(N), N0, N1, N2, N3, CC);
10127}
10128
10129SDValue DAGCombiner::visitSETCC(SDNode *N) {
10130 // setcc is very commonly used as an argument to brcond. This pattern
10131 // also lend itself to numerous combines and, as a result, it is desired
10132 // we keep the argument to a brcond as a setcc as much as possible.
10133 bool PreferSetCC =
10134 N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BRCOND;
10135
10136 SDValue Combined = SimplifySetCC(
10137 N->getValueType(0), N->getOperand(0), N->getOperand(1),
10138 cast<CondCodeSDNode>(N->getOperand(2))->get(), SDLoc(N), !PreferSetCC);
10139
10140 if (!Combined)
10141 return SDValue();
10142
10143 // If we prefer to have a setcc, and we don't, we'll try our best to
10144 // recreate one using rebuildSetCC.
10145 if (PreferSetCC && Combined.getOpcode() != ISD::SETCC) {
10146 SDValue NewSetCC = rebuildSetCC(Combined);
10147
10148 // We don't have anything interesting to combine to.
10149 if (NewSetCC.getNode() == N)
10150 return SDValue();
10151
10152 if (NewSetCC)
10153 return NewSetCC;
10154 }
10155
10156 return Combined;
10157}
10158
10159SDValue DAGCombiner::visitSETCCCARRY(SDNode *N) {
10160 SDValue LHS = N->getOperand(0);
10161 SDValue RHS = N->getOperand(1);
10162 SDValue Carry = N->getOperand(2);
10163 SDValue Cond = N->getOperand(3);
10164
10165 // If Carry is false, fold to a regular SETCC.
10166 if (isNullConstant(Carry))
10167 return DAG.getNode(ISD::SETCC, SDLoc(N), N->getVTList(), LHS, RHS, Cond);
10168
10169 return SDValue();
10170}
10171
10172/// Check if N satisfies:
10173/// N is used once.
10174/// N is a Load.
10175/// The load is compatible with ExtOpcode. It means
10176/// If load has explicit zero/sign extension, ExpOpcode must have the same
10177/// extension.
10178/// Otherwise returns true.
10179static bool isCompatibleLoad(SDValue N, unsigned ExtOpcode) {
10180 if (!N.hasOneUse())
10181 return false;
10182
10183 if (!isa<LoadSDNode>(N))
10184 return false;
10185
10186 LoadSDNode *Load = cast<LoadSDNode>(N);
10187 ISD::LoadExtType LoadExt = Load->getExtensionType();
10188 if (LoadExt == ISD::NON_EXTLOAD || LoadExt == ISD::EXTLOAD)
10189 return true;
10190
10191 // Now LoadExt is either SEXTLOAD or ZEXTLOAD, ExtOpcode must have the same
10192 // extension.
10193 if ((LoadExt == ISD::SEXTLOAD && ExtOpcode != ISD::SIGN_EXTEND) ||
10194 (LoadExt == ISD::ZEXTLOAD && ExtOpcode != ISD::ZERO_EXTEND))
10195 return false;
10196
10197 return true;
10198}
10199
10200/// Fold
10201/// (sext (select c, load x, load y)) -> (select c, sextload x, sextload y)
10202/// (zext (select c, load x, load y)) -> (select c, zextload x, zextload y)
10203/// (aext (select c, load x, load y)) -> (select c, extload x, extload y)
10204/// This function is called by the DAGCombiner when visiting sext/zext/aext
10205/// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND).
10206static SDValue tryToFoldExtendSelectLoad(SDNode *N, const TargetLowering &TLI,
10207 SelectionDAG &DAG) {
10208 unsigned Opcode = N->getOpcode();
10209 SDValue N0 = N->getOperand(0);
10210 EVT VT = N->getValueType(0);
10211 SDLoc DL(N);
10212
10213 assert((Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND ||(static_cast <bool> ((Opcode == ISD::SIGN_EXTEND || Opcode
== ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND) &&
"Expected EXTEND dag node in input!") ? void (0) : __assert_fail
("(Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND) && \"Expected EXTEND dag node in input!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10215, __extension__ __PRETTY_FUNCTION__))
10214 Opcode == ISD::ANY_EXTEND) &&(static_cast <bool> ((Opcode == ISD::SIGN_EXTEND || Opcode
== ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND) &&
"Expected EXTEND dag node in input!") ? void (0) : __assert_fail
("(Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND) && \"Expected EXTEND dag node in input!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10215, __extension__ __PRETTY_FUNCTION__))
10215 "Expected EXTEND dag node in input!")(static_cast <bool> ((Opcode == ISD::SIGN_EXTEND || Opcode
== ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND) &&
"Expected EXTEND dag node in input!") ? void (0) : __assert_fail
("(Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND) && \"Expected EXTEND dag node in input!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10215, __extension__ __PRETTY_FUNCTION__))
;
10216
10217 if (!(N0->getOpcode() == ISD::SELECT || N0->getOpcode() == ISD::VSELECT) ||
10218 !N0.hasOneUse())
10219 return SDValue();
10220
10221 SDValue Op1 = N0->getOperand(1);
10222 SDValue Op2 = N0->getOperand(2);
10223 if (!isCompatibleLoad(Op1, Opcode) || !isCompatibleLoad(Op2, Opcode))
10224 return SDValue();
10225
10226 auto ExtLoadOpcode = ISD::EXTLOAD;
10227 if (Opcode == ISD::SIGN_EXTEND)
10228 ExtLoadOpcode = ISD::SEXTLOAD;
10229 else if (Opcode == ISD::ZERO_EXTEND)
10230 ExtLoadOpcode = ISD::ZEXTLOAD;
10231
10232 LoadSDNode *Load1 = cast<LoadSDNode>(Op1);
10233 LoadSDNode *Load2 = cast<LoadSDNode>(Op2);
10234 if (!TLI.isLoadExtLegal(ExtLoadOpcode, VT, Load1->getMemoryVT()) ||
10235 !TLI.isLoadExtLegal(ExtLoadOpcode, VT, Load2->getMemoryVT()))
10236 return SDValue();
10237
10238 SDValue Ext1 = DAG.getNode(Opcode, DL, VT, Op1);
10239 SDValue Ext2 = DAG.getNode(Opcode, DL, VT, Op2);
10240 return DAG.getSelect(DL, VT, N0->getOperand(0), Ext1, Ext2);
10241}
10242
10243/// Try to fold a sext/zext/aext dag node into a ConstantSDNode or
10244/// a build_vector of constants.
10245/// This function is called by the DAGCombiner when visiting sext/zext/aext
10246/// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND).
10247/// Vector extends are not folded if operations are legal; this is to
10248/// avoid introducing illegal build_vector dag nodes.
10249static SDValue tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI,
10250 SelectionDAG &DAG, bool LegalTypes) {
10251 unsigned Opcode = N->getOpcode();
10252 SDValue N0 = N->getOperand(0);
10253 EVT VT = N->getValueType(0);
10254 SDLoc DL(N);
10255
10256 assert((Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND ||(static_cast <bool> ((Opcode == ISD::SIGN_EXTEND || Opcode
== ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode ==
ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG
) && "Expected EXTEND dag node in input!") ? void (0)
: __assert_fail ("(Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) && \"Expected EXTEND dag node in input!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10259, __extension__ __PRETTY_FUNCTION__))
10257 Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||(static_cast <bool> ((Opcode == ISD::SIGN_EXTEND || Opcode
== ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode ==
ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG
) && "Expected EXTEND dag node in input!") ? void (0)
: __assert_fail ("(Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) && \"Expected EXTEND dag node in input!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10259, __extension__ __PRETTY_FUNCTION__))
10258 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG)(static_cast <bool> ((Opcode == ISD::SIGN_EXTEND || Opcode
== ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode ==
ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG
) && "Expected EXTEND dag node in input!") ? void (0)
: __assert_fail ("(Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) && \"Expected EXTEND dag node in input!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10259, __extension__ __PRETTY_FUNCTION__))
10259 && "Expected EXTEND dag node in input!")(static_cast <bool> ((Opcode == ISD::SIGN_EXTEND || Opcode
== ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode ==
ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG
) && "Expected EXTEND dag node in input!") ? void (0)
: __assert_fail ("(Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) && \"Expected EXTEND dag node in input!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10259, __extension__ __PRETTY_FUNCTION__))
;
10260
10261 // fold (sext c1) -> c1
10262 // fold (zext c1) -> c1
10263 // fold (aext c1) -> c1
10264 if (isa<ConstantSDNode>(N0))
10265 return DAG.getNode(Opcode, DL, VT, N0);
10266
10267 // fold (sext (select cond, c1, c2)) -> (select cond, sext c1, sext c2)
10268 // fold (zext (select cond, c1, c2)) -> (select cond, zext c1, zext c2)
10269 // fold (aext (select cond, c1, c2)) -> (select cond, sext c1, sext c2)
10270 if (N0->getOpcode() == ISD::SELECT) {
10271 SDValue Op1 = N0->getOperand(1);
10272 SDValue Op2 = N0->getOperand(2);
10273 if (isa<ConstantSDNode>(Op1) && isa<ConstantSDNode>(Op2) &&
10274 (Opcode != ISD::ZERO_EXTEND || !TLI.isZExtFree(N0.getValueType(), VT))) {
10275 // For any_extend, choose sign extension of the constants to allow a
10276 // possible further transform to sign_extend_inreg.i.e.
10277 //
10278 // t1: i8 = select t0, Constant:i8<-1>, Constant:i8<0>
10279 // t2: i64 = any_extend t1
10280 // -->
10281 // t3: i64 = select t0, Constant:i64<-1>, Constant:i64<0>
10282 // -->
10283 // t4: i64 = sign_extend_inreg t3
10284 unsigned FoldOpc = Opcode;
10285 if (FoldOpc == ISD::ANY_EXTEND)
10286 FoldOpc = ISD::SIGN_EXTEND;
10287 return DAG.getSelect(DL, VT, N0->getOperand(0),
10288 DAG.getNode(FoldOpc, DL, VT, Op1),
10289 DAG.getNode(FoldOpc, DL, VT, Op2));
10290 }
10291 }
10292
10293 // fold (sext (build_vector AllConstants) -> (build_vector AllConstants)
10294 // fold (zext (build_vector AllConstants) -> (build_vector AllConstants)
10295 // fold (aext (build_vector AllConstants) -> (build_vector AllConstants)
10296 EVT SVT = VT.getScalarType();
10297 if (!(VT.isVector() && (!LegalTypes || TLI.isTypeLegal(SVT)) &&
10298 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())))
10299 return SDValue();
10300
10301 // We can fold this node into a build_vector.
10302 unsigned VTBits = SVT.getSizeInBits();
10303 unsigned EVTBits = N0->getValueType(0).getScalarSizeInBits();
10304 SmallVector<SDValue, 8> Elts;
10305 unsigned NumElts = VT.getVectorNumElements();
10306
10307 // For zero-extensions, UNDEF elements still guarantee to have the upper
10308 // bits set to zero.
10309 bool IsZext =
10310 Opcode == ISD::ZERO_EXTEND || Opcode == ISD::ZERO_EXTEND_VECTOR_INREG;
10311
10312 for (unsigned i = 0; i != NumElts; ++i) {
10313 SDValue Op = N0.getOperand(i);
10314 if (Op.isUndef()) {
10315 Elts.push_back(IsZext ? DAG.getConstant(0, DL, SVT) : DAG.getUNDEF(SVT));
10316 continue;
10317 }
10318
10319 SDLoc DL(Op);
10320 // Get the constant value and if needed trunc it to the size of the type.
10321 // Nodes like build_vector might have constants wider than the scalar type.
10322 APInt C = cast<ConstantSDNode>(Op)->getAPIntValue().zextOrTrunc(EVTBits);
10323 if (Opcode == ISD::SIGN_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG)
10324 Elts.push_back(DAG.getConstant(C.sext(VTBits), DL, SVT));
10325 else
10326 Elts.push_back(DAG.getConstant(C.zext(VTBits), DL, SVT));
10327 }
10328
10329 return DAG.getBuildVector(VT, DL, Elts);
10330}
10331
10332// ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this:
10333// "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))"
10334// transformation. Returns true if extension are possible and the above
10335// mentioned transformation is profitable.
10336static bool ExtendUsesToFormExtLoad(EVT VT, SDNode *N, SDValue N0,
10337 unsigned ExtOpc,
10338 SmallVectorImpl<SDNode *> &ExtendNodes,
10339 const TargetLowering &TLI) {
10340 bool HasCopyToRegUses = false;
10341 bool isTruncFree = TLI.isTruncateFree(VT, N0.getValueType());
10342 for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
10343 UE = N0.getNode()->use_end();
10344 UI != UE; ++UI) {
10345 SDNode *User = *UI;
10346 if (User == N)
10347 continue;
10348 if (UI.getUse().getResNo() != N0.getResNo())
10349 continue;
10350 // FIXME: Only extend SETCC N, N and SETCC N, c for now.
10351 if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) {
10352 ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get();
10353 if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC))
10354 // Sign bits will be lost after a zext.
10355 return false;
10356 bool Add = false;
10357 for (unsigned i = 0; i != 2; ++i) {
10358 SDValue UseOp = User->getOperand(i);
10359 if (UseOp == N0)
10360 continue;
10361 if (!isa<ConstantSDNode>(UseOp))
10362 return false;
10363 Add = true;
10364 }
10365 if (Add)
10366 ExtendNodes.push_back(User);
10367 continue;
10368 }
10369 // If truncates aren't free and there are users we can't
10370 // extend, it isn't worthwhile.
10371 if (!isTruncFree)
10372 return false;
10373 // Remember if this value is live-out.
10374 if (User->getOpcode() == ISD::CopyToReg)
10375 HasCopyToRegUses = true;
10376 }
10377
10378 if (HasCopyToRegUses) {
10379 bool BothLiveOut = false;
10380 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
10381 UI != UE; ++UI) {
10382 SDUse &Use = UI.getUse();
10383 if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) {
10384 BothLiveOut = true;
10385 break;
10386 }
10387 }
10388 if (BothLiveOut)
10389 // Both unextended and extended values are live out. There had better be
10390 // a good reason for the transformation.
10391 return ExtendNodes.size();
10392 }
10393 return true;
10394}
10395
10396void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
10397 SDValue OrigLoad, SDValue ExtLoad,
10398 ISD::NodeType ExtType) {
10399 // Extend SetCC uses if necessary.
10400 SDLoc DL(ExtLoad);
10401 for (SDNode *SetCC : SetCCs) {
10402 SmallVector<SDValue, 4> Ops;
10403
10404 for (unsigned j = 0; j != 2; ++j) {
10405 SDValue SOp = SetCC->getOperand(j);
10406 if (SOp == OrigLoad)
10407 Ops.push_back(ExtLoad);
10408 else
10409 Ops.push_back(DAG.getNode(ExtType, DL, ExtLoad->getValueType(0), SOp));
10410 }
10411
10412 Ops.push_back(SetCC->getOperand(2));
10413 CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
10414 }
10415}
10416
10417// FIXME: Bring more similar combines here, common to sext/zext (maybe aext?).
10418SDValue DAGCombiner::CombineExtLoad(SDNode *N) {
10419 SDValue N0 = N->getOperand(0);
10420 EVT DstVT = N->getValueType(0);
10421 EVT SrcVT = N0.getValueType();
10422
10423 assert((N->getOpcode() == ISD::SIGN_EXTEND ||(static_cast <bool> ((N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) && "Unexpected node type (not an extend)!"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) && \"Unexpected node type (not an extend)!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10425, __extension__ __PRETTY_FUNCTION__))
10424 N->getOpcode() == ISD::ZERO_EXTEND) &&(static_cast <bool> ((N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) && "Unexpected node type (not an extend)!"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) && \"Unexpected node type (not an extend)!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10425, __extension__ __PRETTY_FUNCTION__))
10425 "Unexpected node type (not an extend)!")(static_cast <bool> ((N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) && "Unexpected node type (not an extend)!"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) && \"Unexpected node type (not an extend)!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10425, __extension__ __PRETTY_FUNCTION__))
;
10426
10427 // fold (sext (load x)) to multiple smaller sextloads; same for zext.
10428 // For example, on a target with legal v4i32, but illegal v8i32, turn:
10429 // (v8i32 (sext (v8i16 (load x))))
10430 // into:
10431 // (v8i32 (concat_vectors (v4i32 (sextload x)),
10432 // (v4i32 (sextload (x + 16)))))
10433 // Where uses of the original load, i.e.:
10434 // (v8i16 (load x))
10435 // are replaced with:
10436 // (v8i16 (truncate
10437 // (v8i32 (concat_vectors (v4i32 (sextload x)),
10438 // (v4i32 (sextload (x + 16)))))))
10439 //
10440 // This combine is only applicable to illegal, but splittable, vectors.
10441 // All legal types, and illegal non-vector types, are handled elsewhere.
10442 // This combine is controlled by TargetLowering::isVectorLoadExtDesirable.
10443 //
10444 if (N0->getOpcode() != ISD::LOAD)
10445 return SDValue();
10446
10447 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
10448
10449 if (!ISD::isNON_EXTLoad(LN0) || !ISD::isUNINDEXEDLoad(LN0) ||
10450 !N0.hasOneUse() || !LN0->isSimple() ||
10451 !DstVT.isVector() || !DstVT.isPow2VectorType() ||
10452 !TLI.isVectorLoadExtDesirable(SDValue(N, 0)))
10453 return SDValue();
10454
10455 SmallVector<SDNode *, 4> SetCCs;
10456 if (!ExtendUsesToFormExtLoad(DstVT, N, N0, N->getOpcode(), SetCCs, TLI))
10457 return SDValue();
10458
10459 ISD::LoadExtType ExtType =
10460 N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
10461
10462 // Try to split the vector types to get down to legal types.
10463 EVT SplitSrcVT = SrcVT;
10464 EVT SplitDstVT = DstVT;
10465 while (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT) &&
10466 SplitSrcVT.getVectorNumElements() > 1) {
10467 SplitDstVT = DAG.GetSplitDestVTs(SplitDstVT).first;
10468 SplitSrcVT = DAG.GetSplitDestVTs(SplitSrcVT).first;
10469 }
10470
10471 if (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT))
10472 return SDValue();
10473
10474 assert(!DstVT.isScalableVector() && "Unexpected scalable vector type")(static_cast <bool> (!DstVT.isScalableVector() &&
"Unexpected scalable vector type") ? void (0) : __assert_fail
("!DstVT.isScalableVector() && \"Unexpected scalable vector type\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10474, __extension__ __PRETTY_FUNCTION__))
;
10475
10476 SDLoc DL(N);
10477 const unsigned NumSplits =
10478 DstVT.getVectorNumElements() / SplitDstVT.getVectorNumElements();
10479 const unsigned Stride = SplitSrcVT.getStoreSize();
10480 SmallVector<SDValue, 4> Loads;
10481 SmallVector<SDValue, 4> Chains;
10482
10483 SDValue BasePtr = LN0->getBasePtr();
10484 for (unsigned Idx = 0; Idx < NumSplits; Idx++) {
10485 const unsigned Offset = Idx * Stride;
10486 const Align Align = commonAlignment(LN0->getAlign(), Offset);
10487
10488 SDValue SplitLoad = DAG.getExtLoad(
10489 ExtType, SDLoc(LN0), SplitDstVT, LN0->getChain(), BasePtr,
10490 LN0->getPointerInfo().getWithOffset(Offset), SplitSrcVT, Align,
10491 LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
10492
10493 BasePtr = DAG.getMemBasePlusOffset(BasePtr, TypeSize::Fixed(Stride), DL);
10494
10495 Loads.push_back(SplitLoad.getValue(0));
10496 Chains.push_back(SplitLoad.getValue(1));
10497 }
10498
10499 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
10500 SDValue NewValue = DAG.getNode(ISD::CONCAT_VECTORS, DL, DstVT, Loads);
10501
10502 // Simplify TF.
10503 AddToWorklist(NewChain.getNode());
10504
10505 CombineTo(N, NewValue);
10506
10507 // Replace uses of the original load (before extension)
10508 // with a truncate of the concatenated sextloaded vectors.
10509 SDValue Trunc =
10510 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), NewValue);
10511 ExtendSetCCUses(SetCCs, N0, NewValue, (ISD::NodeType)N->getOpcode());
10512 CombineTo(N0.getNode(), Trunc, NewChain);
10513 return SDValue(N, 0); // Return N so it doesn't get rechecked!
10514}
10515
10516// fold (zext (and/or/xor (shl/shr (load x), cst), cst)) ->
10517// (and/or/xor (shl/shr (zextload x), (zext cst)), (zext cst))
10518SDValue DAGCombiner::CombineZExtLogicopShiftLoad(SDNode *N) {
10519 assert(N->getOpcode() == ISD::ZERO_EXTEND)(static_cast <bool> (N->getOpcode() == ISD::ZERO_EXTEND
) ? void (0) : __assert_fail ("N->getOpcode() == ISD::ZERO_EXTEND"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10519, __extension__ __PRETTY_FUNCTION__))
;
10520 EVT VT = N->getValueType(0);
10521 EVT OrigVT = N->getOperand(0).getValueType();
10522 if (TLI.isZExtFree(OrigVT, VT))
10523 return SDValue();
10524
10525 // and/or/xor
10526 SDValue N0 = N->getOperand(0);
10527 if (!(N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
10528 N0.getOpcode() == ISD::XOR) ||
10529 N0.getOperand(1).getOpcode() != ISD::Constant ||
10530 (LegalOperations && !TLI.isOperationLegal(N0.getOpcode(), VT)))
10531 return SDValue();
10532
10533 // shl/shr
10534 SDValue N1 = N0->getOperand(0);
10535 if (!(N1.getOpcode() == ISD::SHL || N1.getOpcode() == ISD::SRL) ||
10536 N1.getOperand(1).getOpcode() != ISD::Constant ||
10537 (LegalOperations && !TLI.isOperationLegal(N1.getOpcode(), VT)))
10538 return SDValue();
10539
10540 // load
10541 if (!isa<LoadSDNode>(N1.getOperand(0)))
10542 return SDValue();
10543 LoadSDNode *Load = cast<LoadSDNode>(N1.getOperand(0));
10544 EVT MemVT = Load->getMemoryVT();
10545 if (!TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT) ||
10546 Load->getExtensionType() == ISD::SEXTLOAD || Load->isIndexed())
10547 return SDValue();
10548
10549
10550 // If the shift op is SHL, the logic op must be AND, otherwise the result
10551 // will be wrong.
10552 if (N1.getOpcode() == ISD::SHL && N0.getOpcode() != ISD::AND)
10553 return SDValue();
10554
10555 if (!N0.hasOneUse() || !N1.hasOneUse())
10556 return SDValue();
10557
10558 SmallVector<SDNode*, 4> SetCCs;
10559 if (!ExtendUsesToFormExtLoad(VT, N1.getNode(), N1.getOperand(0),
10560 ISD::ZERO_EXTEND, SetCCs, TLI))
10561 return SDValue();
10562
10563 // Actually do the transformation.
10564 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(Load), VT,
10565 Load->getChain(), Load->getBasePtr(),
10566 Load->getMemoryVT(), Load->getMemOperand());
10567
10568 SDLoc DL1(N1);
10569 SDValue Shift = DAG.getNode(N1.getOpcode(), DL1, VT, ExtLoad,
10570 N1.getOperand(1));
10571
10572 APInt Mask = N0.getConstantOperandAPInt(1).zext(VT.getSizeInBits());
10573 SDLoc DL0(N0);
10574 SDValue And = DAG.getNode(N0.getOpcode(), DL0, VT, Shift,
10575 DAG.getConstant(Mask, DL0, VT));
10576
10577 ExtendSetCCUses(SetCCs, N1.getOperand(0), ExtLoad, ISD::ZERO_EXTEND);
10578 CombineTo(N, And);
10579 if (SDValue(Load, 0).hasOneUse()) {
10580 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), ExtLoad.getValue(1));
10581 } else {
10582 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(Load),
10583 Load->getValueType(0), ExtLoad);
10584 CombineTo(Load, Trunc, ExtLoad.getValue(1));
10585 }
10586
10587 // N0 is dead at this point.
10588 recursivelyDeleteUnusedNodes(N0.getNode());
10589
10590 return SDValue(N,0); // Return N so it doesn't get rechecked!
10591}
10592
10593/// If we're narrowing or widening the result of a vector select and the final
10594/// size is the same size as a setcc (compare) feeding the select, then try to
10595/// apply the cast operation to the select's operands because matching vector
10596/// sizes for a select condition and other operands should be more efficient.
10597SDValue DAGCombiner::matchVSelectOpSizesWithSetCC(SDNode *Cast) {
10598 unsigned CastOpcode = Cast->getOpcode();
10599 assert((CastOpcode == ISD::SIGN_EXTEND || CastOpcode == ISD::ZERO_EXTEND ||(static_cast <bool> ((CastOpcode == ISD::SIGN_EXTEND ||
CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE
|| CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND
) && "Unexpected opcode for vector select narrowing/widening"
) ? void (0) : __assert_fail ("(CastOpcode == ISD::SIGN_EXTEND || CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE || CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND) && \"Unexpected opcode for vector select narrowing/widening\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10602, __extension__ __PRETTY_FUNCTION__))
10600 CastOpcode == ISD::TRUNCATE || CastOpcode == ISD::FP_EXTEND ||(static_cast <bool> ((CastOpcode == ISD::SIGN_EXTEND ||
CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE
|| CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND
) && "Unexpected opcode for vector select narrowing/widening"
) ? void (0) : __assert_fail ("(CastOpcode == ISD::SIGN_EXTEND || CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE || CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND) && \"Unexpected opcode for vector select narrowing/widening\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10602, __extension__ __PRETTY_FUNCTION__))
10601 CastOpcode == ISD::FP_ROUND) &&(static_cast <bool> ((CastOpcode == ISD::SIGN_EXTEND ||
CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE
|| CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND
) && "Unexpected opcode for vector select narrowing/widening"
) ? void (0) : __assert_fail ("(CastOpcode == ISD::SIGN_EXTEND || CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE || CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND) && \"Unexpected opcode for vector select narrowing/widening\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10602, __extension__ __PRETTY_FUNCTION__))
10602 "Unexpected opcode for vector select narrowing/widening")(static_cast <bool> ((CastOpcode == ISD::SIGN_EXTEND ||
CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE
|| CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND
) && "Unexpected opcode for vector select narrowing/widening"
) ? void (0) : __assert_fail ("(CastOpcode == ISD::SIGN_EXTEND || CastOpcode == ISD::ZERO_EXTEND || CastOpcode == ISD::TRUNCATE || CastOpcode == ISD::FP_EXTEND || CastOpcode == ISD::FP_ROUND) && \"Unexpected opcode for vector select narrowing/widening\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10602, __extension__ __PRETTY_FUNCTION__))
;
10603
10604 // We only do this transform before legal ops because the pattern may be
10605 // obfuscated by target-specific operations after legalization. Do not create
10606 // an illegal select op, however, because that may be difficult to lower.
10607 EVT VT = Cast->getValueType(0);
10608 if (LegalOperations || !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
10609 return SDValue();
10610
10611 SDValue VSel = Cast->getOperand(0);
10612 if (VSel.getOpcode() != ISD::VSELECT || !VSel.hasOneUse() ||
10613 VSel.getOperand(0).getOpcode() != ISD::SETCC)
10614 return SDValue();
10615
10616 // Does the setcc have the same vector size as the casted select?
10617 SDValue SetCC = VSel.getOperand(0);
10618 EVT SetCCVT = getSetCCResultType(SetCC.getOperand(0).getValueType());
10619 if (SetCCVT.getSizeInBits() != VT.getSizeInBits())
10620 return SDValue();
10621
10622 // cast (vsel (setcc X), A, B) --> vsel (setcc X), (cast A), (cast B)
10623 SDValue A = VSel.getOperand(1);
10624 SDValue B = VSel.getOperand(2);
10625 SDValue CastA, CastB;
10626 SDLoc DL(Cast);
10627 if (CastOpcode == ISD::FP_ROUND) {
10628 // FP_ROUND (fptrunc) has an extra flag operand to pass along.
10629 CastA = DAG.getNode(CastOpcode, DL, VT, A, Cast->getOperand(1));
10630 CastB = DAG.getNode(CastOpcode, DL, VT, B, Cast->getOperand(1));
10631 } else {
10632 CastA = DAG.getNode(CastOpcode, DL, VT, A);
10633 CastB = DAG.getNode(CastOpcode, DL, VT, B);
10634 }
10635 return DAG.getNode(ISD::VSELECT, DL, VT, SetCC, CastA, CastB);
10636}
10637
10638// fold ([s|z]ext ([s|z]extload x)) -> ([s|z]ext (truncate ([s|z]extload x)))
10639// fold ([s|z]ext ( extload x)) -> ([s|z]ext (truncate ([s|z]extload x)))
10640static SDValue tryToFoldExtOfExtload(SelectionDAG &DAG, DAGCombiner &Combiner,
10641 const TargetLowering &TLI, EVT VT,
10642 bool LegalOperations, SDNode *N,
10643 SDValue N0, ISD::LoadExtType ExtLoadType) {
10644 SDNode *N0Node = N0.getNode();
10645 bool isAExtLoad = (ExtLoadType == ISD::SEXTLOAD) ? ISD::isSEXTLoad(N0Node)
10646 : ISD::isZEXTLoad(N0Node);
10647 if ((!isAExtLoad && !ISD::isEXTLoad(N0Node)) ||
10648 !ISD::isUNINDEXEDLoad(N0Node) || !N0.hasOneUse())
10649 return SDValue();
10650
10651 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
10652 EVT MemVT = LN0->getMemoryVT();
10653 if ((LegalOperations || !LN0->isSimple() ||
10654 VT.isVector()) &&
10655 !TLI.isLoadExtLegal(ExtLoadType, VT, MemVT))
10656 return SDValue();
10657
10658 SDValue ExtLoad =
10659 DAG.getExtLoad(ExtLoadType, SDLoc(LN0), VT, LN0->getChain(),
10660 LN0->getBasePtr(), MemVT, LN0->getMemOperand());
10661 Combiner.CombineTo(N, ExtLoad);
10662 DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
10663 if (LN0->use_empty())
10664 Combiner.recursivelyDeleteUnusedNodes(LN0);
10665 return SDValue(N, 0); // Return N so it doesn't get rechecked!
10666}
10667
10668// fold ([s|z]ext (load x)) -> ([s|z]ext (truncate ([s|z]extload x)))
10669// Only generate vector extloads when 1) they're legal, and 2) they are
10670// deemed desirable by the target.
10671static SDValue tryToFoldExtOfLoad(SelectionDAG &DAG, DAGCombiner &Combiner,
10672 const TargetLowering &TLI, EVT VT,
10673 bool LegalOperations, SDNode *N, SDValue N0,
10674 ISD::LoadExtType ExtLoadType,
10675 ISD::NodeType ExtOpc) {
10676 if (!ISD::isNON_EXTLoad(N0.getNode()) ||
10677 !ISD::isUNINDEXEDLoad(N0.getNode()) ||
10678 ((LegalOperations || VT.isVector() ||
10679 !cast<LoadSDNode>(N0)->isSimple()) &&
10680 !TLI.isLoadExtLegal(ExtLoadType, VT, N0.getValueType())))
10681 return {};
10682
10683 bool DoXform = true;
10684 SmallVector<SDNode *, 4> SetCCs;
10685 if (!N0.hasOneUse())
10686 DoXform = ExtendUsesToFormExtLoad(VT, N, N0, ExtOpc, SetCCs, TLI);
10687 if (VT.isVector())
10688 DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0));
10689 if (!DoXform)
10690 return {};
10691
10692 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
10693 SDValue ExtLoad = DAG.getExtLoad(ExtLoadType, SDLoc(LN0), VT, LN0->getChain(),
10694 LN0->getBasePtr(), N0.getValueType(),
10695 LN0->getMemOperand());
10696 Combiner.ExtendSetCCUses(SetCCs, N0, ExtLoad, ExtOpc);
10697 // If the load value is used only by N, replace it via CombineTo N.
10698 bool NoReplaceTrunc = SDValue(LN0, 0).hasOneUse();
10699 Combiner.CombineTo(N, ExtLoad);
10700 if (NoReplaceTrunc) {
10701 DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
10702 Combiner.recursivelyDeleteUnusedNodes(LN0);
10703 } else {
10704 SDValue Trunc =
10705 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), ExtLoad);
10706 Combiner.CombineTo(LN0, Trunc, ExtLoad.getValue(1));
10707 }
10708 return SDValue(N, 0); // Return N so it doesn't get rechecked!
10709}
10710
10711static SDValue tryToFoldExtOfMaskedLoad(SelectionDAG &DAG,
10712 const TargetLowering &TLI, EVT VT,
10713 SDNode *N, SDValue N0,
10714 ISD::LoadExtType ExtLoadType,
10715 ISD::NodeType ExtOpc) {
10716 if (!N0.hasOneUse())
10717 return SDValue();
10718
10719 MaskedLoadSDNode *Ld = dyn_cast<MaskedLoadSDNode>(N0);
10720 if (!Ld || Ld->getExtensionType() != ISD::NON_EXTLOAD)
10721 return SDValue();
10722
10723 if (!TLI.isLoadExtLegal(ExtLoadType, VT, Ld->getValueType(0)))
10724 return SDValue();
10725
10726 if (!TLI.isVectorLoadExtDesirable(SDValue(N, 0)))
10727 return SDValue();
10728
10729 SDLoc dl(Ld);
10730 SDValue PassThru = DAG.getNode(ExtOpc, dl, VT, Ld->getPassThru());
10731 SDValue NewLoad = DAG.getMaskedLoad(
10732 VT, dl, Ld->getChain(), Ld->getBasePtr(), Ld->getOffset(), Ld->getMask(),
10733 PassThru, Ld->getMemoryVT(), Ld->getMemOperand(), Ld->getAddressingMode(),
10734 ExtLoadType, Ld->isExpandingLoad());
10735 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), SDValue(NewLoad.getNode(), 1));
10736 return NewLoad;
10737}
10738
10739static SDValue foldExtendedSignBitTest(SDNode *N, SelectionDAG &DAG,
10740 bool LegalOperations) {
10741 assert((N->getOpcode() == ISD::SIGN_EXTEND ||(static_cast <bool> ((N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) && "Expected sext or zext"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) && \"Expected sext or zext\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10742, __extension__ __PRETTY_FUNCTION__))
10742 N->getOpcode() == ISD::ZERO_EXTEND) && "Expected sext or zext")(static_cast <bool> ((N->getOpcode() == ISD::SIGN_EXTEND
|| N->getOpcode() == ISD::ZERO_EXTEND) && "Expected sext or zext"
) ? void (0) : __assert_fail ("(N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) && \"Expected sext or zext\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 10742, __extension__ __PRETTY_FUNCTION__))
;
10743
10744 SDValue SetCC = N->getOperand(0);
10745 if (LegalOperations || SetCC.getOpcode() != ISD::SETCC ||
10746 !SetCC.hasOneUse() || SetCC.getValueType() != MVT::i1)
10747 return SDValue();
10748
10749 SDValue X = SetCC.getOperand(0);
10750 SDValue Ones = SetCC.getOperand(1);
10751 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
10752 EVT VT = N->getValueType(0);
10753 EVT XVT = X.getValueType();
10754 // setge X, C is canonicalized to setgt, so we do not need to match that
10755 // pattern. The setlt sibling is folded in SimplifySelectCC() because it does
10756 // not require the 'not' op.
10757 if (CC == ISD::SETGT && isAllOnesConstant(Ones) && VT == XVT) {
10758 // Invert and smear/shift the sign bit:
10759 // sext i1 (setgt iN X, -1) --> sra (not X), (N - 1)
10760 // zext i1 (setgt iN X, -1) --> srl (not X), (N - 1)
10761 SDLoc DL(N);
10762 unsigned ShCt = VT.getSizeInBits() - 1;
10763 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10764 if (!TLI.shouldAvoidTransformToShift(VT, ShCt)) {
10765 SDValue NotX = DAG.getNOT(DL, X, VT);
10766 SDValue ShiftAmount = DAG.getConstant(ShCt, DL, VT);
10767 auto ShiftOpcode =
10768 N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SRA : ISD::SRL;
10769 return DAG.getNode(ShiftOpcode, DL, VT, NotX, ShiftAmount);
10770 }
10771 }
10772 return SDValue();
10773}
10774
10775SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
10776 SDValue N0 = N->getOperand(0);
10777 EVT VT = N->getValueType(0);
10778 SDLoc DL(N);
10779
10780 if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
10781 return Res;
10782
10783 // fold (sext (sext x)) -> (sext x)
10784 // fold (sext (aext x)) -> (sext x)
10785 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
10786 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, N0.getOperand(0));
10787
10788 if (N0.getOpcode() == ISD::TRUNCATE) {
10789 // fold (sext (truncate (load x))) -> (sext (smaller load x))
10790 // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
10791 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
10792 SDNode *oye = N0.getOperand(0).getNode();
10793 if (NarrowLoad.getNode() != N0.getNode()) {
10794 CombineTo(N0.getNode(), NarrowLoad);
10795 // CombineTo deleted the truncate, if needed, but not what's under it.
10796 AddToWorklist(oye);
10797 }
10798 return SDValue(N, 0); // Return N so it doesn't get rechecked!
10799 }
10800
10801 // See if the value being truncated is already sign extended. If so, just
10802 // eliminate the trunc/sext pair.
10803 SDValue Op = N0.getOperand(0);
10804 unsigned OpBits = Op.getScalarValueSizeInBits();
10805 unsigned MidBits = N0.getScalarValueSizeInBits();
10806 unsigned DestBits = VT.getScalarSizeInBits();
10807 unsigned NumSignBits = DAG.ComputeNumSignBits(Op);
10808
10809 if (OpBits == DestBits) {
10810 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
10811 // bits, it is already ready.
10812 if (NumSignBits > DestBits-MidBits)
10813 return Op;
10814 } else if (OpBits < DestBits) {
10815 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
10816 // bits, just sext from i32.
10817 if (NumSignBits > OpBits-MidBits)
10818 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op);
10819 } else {
10820 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
10821 // bits, just truncate to i32.
10822 if (NumSignBits > OpBits-MidBits)
10823 return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
10824 }
10825
10826 // fold (sext (truncate x)) -> (sextinreg x).
10827 if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG,
10828 N0.getValueType())) {
10829 if (OpBits < DestBits)
10830 Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N0), VT, Op);
10831 else if (OpBits > DestBits)
10832 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), VT, Op);
10833 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Op,
10834 DAG.getValueType(N0.getValueType()));
10835 }
10836 }
10837
10838 // Try to simplify (sext (load x)).
10839 if (SDValue foldedExt =
10840 tryToFoldExtOfLoad(DAG, *this, TLI, VT, LegalOperations, N, N0,
10841 ISD::SEXTLOAD, ISD::SIGN_EXTEND))
10842 return foldedExt;
10843
10844 if (SDValue foldedExt =
10845 tryToFoldExtOfMaskedLoad(DAG, TLI, VT, N, N0, ISD::SEXTLOAD,
10846 ISD::SIGN_EXTEND))
10847 return foldedExt;
10848
10849 // fold (sext (load x)) to multiple smaller sextloads.
10850 // Only on illegal but splittable vectors.
10851 if (SDValue ExtLoad = CombineExtLoad(N))
10852 return ExtLoad;
10853
10854 // Try to simplify (sext (sextload x)).
10855 if (SDValue foldedExt = tryToFoldExtOfExtload(
10856 DAG, *this, TLI, VT, LegalOperations, N, N0, ISD::SEXTLOAD))
10857 return foldedExt;
10858
10859 // fold (sext (and/or/xor (load x), cst)) ->
10860 // (and/or/xor (sextload x), (sext cst))
10861 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
10862 N0.getOpcode() == ISD::XOR) &&
10863 isa<LoadSDNode>(N0.getOperand(0)) &&
10864 N0.getOperand(1).getOpcode() == ISD::Constant &&
10865 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
10866 LoadSDNode *LN00 = cast<LoadSDNode>(N0.getOperand(0));
10867 EVT MemVT = LN00->getMemoryVT();
10868 if (TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, MemVT) &&
10869 LN00->getExtensionType() != ISD::ZEXTLOAD && LN00->isUnindexed()) {
10870 SmallVector<SDNode*, 4> SetCCs;
10871 bool DoXform = ExtendUsesToFormExtLoad(VT, N0.getNode(), N0.getOperand(0),
10872 ISD::SIGN_EXTEND, SetCCs, TLI);
10873 if (DoXform) {
10874 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(LN00), VT,
10875 LN00->getChain(), LN00->getBasePtr(),
10876 LN00->getMemoryVT(),
10877 LN00->getMemOperand());
10878 APInt Mask = N0.getConstantOperandAPInt(1).sext(VT.getSizeInBits());
10879 SDValue And = DAG.getNode(N0.getOpcode(), DL, VT,
10880 ExtLoad, DAG.getConstant(Mask, DL, VT));
10881 ExtendSetCCUses(SetCCs, N0.getOperand(0), ExtLoad, ISD::SIGN_EXTEND);
10882 bool NoReplaceTruncAnd = !N0.hasOneUse();
10883 bool NoReplaceTrunc = SDValue(LN00, 0).hasOneUse();
10884 CombineTo(N, And);
10885 // If N0 has multiple uses, change other uses as well.
10886 if (NoReplaceTruncAnd) {
10887 SDValue TruncAnd =
10888 DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), And);
10889 CombineTo(N0.getNode(), TruncAnd);
10890 }
10891 if (NoReplaceTrunc) {
10892 DAG.ReplaceAllUsesOfValueWith(SDValue(LN00, 1), ExtLoad.getValue(1));
10893 } else {
10894 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(LN00),
10895 LN00->getValueType(0), ExtLoad);
10896 CombineTo(LN00, Trunc, ExtLoad.getValue(1));
10897 }
10898 return SDValue(N,0); // Return N so it doesn't get rechecked!
10899 }
10900 }
10901 }
10902
10903 if (SDValue V = foldExtendedSignBitTest(N, DAG, LegalOperations))
10904 return V;
10905
10906 if (N0.getOpcode() == ISD::SETCC) {
10907 SDValue N00 = N0.getOperand(0);
10908 SDValue N01 = N0.getOperand(1);
10909 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
10910 EVT N00VT = N00.getValueType();
10911
10912 // sext(setcc) -> sext_in_reg(vsetcc) for vectors.
10913 // Only do this before legalize for now.
10914 if (VT.isVector() && !LegalOperations &&
10915 TLI.getBooleanContents(N00VT) ==
10916 TargetLowering::ZeroOrNegativeOneBooleanContent) {
10917 // On some architectures (such as SSE/NEON/etc) the SETCC result type is
10918 // of the same size as the compared operands. Only optimize sext(setcc())
10919 // if this is the case.
10920 EVT SVT = getSetCCResultType(N00VT);
10921
10922 // If we already have the desired type, don't change it.
10923 if (SVT != N0.getValueType()) {
10924 // We know that the # elements of the results is the same as the
10925 // # elements of the compare (and the # elements of the compare result
10926 // for that matter). Check to see that they are the same size. If so,
10927 // we know that the element size of the sext'd result matches the
10928 // element size of the compare operands.
10929 if (VT.getSizeInBits() == SVT.getSizeInBits())
10930 return DAG.getSetCC(DL, VT, N00, N01, CC);
10931
10932 // If the desired elements are smaller or larger than the source
10933 // elements, we can use a matching integer vector type and then
10934 // truncate/sign extend.
10935 EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
10936 if (SVT == MatchingVecType) {
10937 SDValue VsetCC = DAG.getSetCC(DL, MatchingVecType, N00, N01, CC);
10938 return DAG.getSExtOrTrunc(VsetCC, DL, VT);
10939 }
10940 }
10941 }
10942
10943 // sext(setcc x, y, cc) -> (select (setcc x, y, cc), T, 0)
10944 // Here, T can be 1 or -1, depending on the type of the setcc and
10945 // getBooleanContents().
10946 unsigned SetCCWidth = N0.getScalarValueSizeInBits();
10947
10948 // To determine the "true" side of the select, we need to know the high bit
10949 // of the value returned by the setcc if it evaluates to true.
10950 // If the type of the setcc is i1, then the true case of the select is just
10951 // sext(i1 1), that is, -1.
10952 // If the type of the setcc is larger (say, i8) then the value of the high
10953 // bit depends on getBooleanContents(), so ask TLI for a real "true" value
10954 // of the appropriate width.
10955 SDValue ExtTrueVal = (SetCCWidth == 1)
10956 ? DAG.getAllOnesConstant(DL, VT)
10957 : DAG.getBoolConstant(true, DL, VT, N00VT);
10958 SDValue Zero = DAG.getConstant(0, DL, VT);
10959 if (SDValue SCC =
10960 SimplifySelectCC(DL, N00, N01, ExtTrueVal, Zero, CC, true))
10961 return SCC;
10962
10963 if (!VT.isVector() && !TLI.convertSelectOfConstantsToMath(VT)) {
10964 EVT SetCCVT = getSetCCResultType(N00VT);
10965 // Don't do this transform for i1 because there's a select transform
10966 // that would reverse it.
10967 // TODO: We should not do this transform at all without a target hook
10968 // because a sext is likely cheaper than a select?
10969 if (SetCCVT.getScalarSizeInBits() != 1 &&
10970 (!LegalOperations || TLI.isOperationLegal(ISD::SETCC, N00VT))) {
10971 SDValue SetCC = DAG.getSetCC(DL, SetCCVT, N00, N01, CC);
10972 return DAG.getSelect(DL, VT, SetCC, ExtTrueVal, Zero);
10973 }
10974 }
10975 }
10976
10977 // fold (sext x) -> (zext x) if the sign bit is known zero.
10978 if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
10979 DAG.SignBitIsZero(N0))
10980 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0);
10981
10982 if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
10983 return NewVSel;
10984
10985 // Eliminate this sign extend by doing a negation in the destination type:
10986 // sext i32 (0 - (zext i8 X to i32)) to i64 --> 0 - (zext i8 X to i64)
10987 if (N0.getOpcode() == ISD::SUB && N0.hasOneUse() &&
10988 isNullOrNullSplat(N0.getOperand(0)) &&
10989 N0.getOperand(1).getOpcode() == ISD::ZERO_EXTEND &&
10990 TLI.isOperationLegalOrCustom(ISD::SUB, VT)) {
10991 SDValue Zext = DAG.getZExtOrTrunc(N0.getOperand(1).getOperand(0), DL, VT);
10992 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Zext);
10993 }
10994 // Eliminate this sign extend by doing a decrement in the destination type:
10995 // sext i32 ((zext i8 X to i32) + (-1)) to i64 --> (zext i8 X to i64) + (-1)
10996 if (N0.getOpcode() == ISD::ADD && N0.hasOneUse() &&
10997 isAllOnesOrAllOnesSplat(N0.getOperand(1)) &&
10998 N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND &&
10999 TLI.isOperationLegalOrCustom(ISD::ADD, VT)) {
11000 SDValue Zext = DAG.getZExtOrTrunc(N0.getOperand(0).getOperand(0), DL, VT);
11001 return DAG.getNode(ISD::ADD, DL, VT, Zext, DAG.getAllOnesConstant(DL, VT));
11002 }
11003
11004 // fold sext (not i1 X) -> add (zext i1 X), -1
11005 // TODO: This could be extended to handle bool vectors.
11006 if (N0.getValueType() == MVT::i1 && isBitwiseNot(N0) && N0.hasOneUse() &&
11007 (!LegalOperations || (TLI.isOperationLegal(ISD::ZERO_EXTEND, VT) &&
11008 TLI.isOperationLegal(ISD::ADD, VT)))) {
11009 // If we can eliminate the 'not', the sext form should be better
11010 if (SDValue NewXor = visitXOR(N0.getNode())) {
11011 // Returning N0 is a form of in-visit replacement that may have
11012 // invalidated N0.
11013 if (NewXor.getNode() == N0.getNode()) {
11014 // Return SDValue here as the xor should have already been replaced in
11015 // this sext.
11016 return SDValue();
11017 } else {
11018 // Return a new sext with the new xor.
11019 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NewXor);
11020 }
11021 }
11022
11023 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
11024 return DAG.getNode(ISD::ADD, DL, VT, Zext, DAG.getAllOnesConstant(DL, VT));
11025 }
11026
11027 if (SDValue Res = tryToFoldExtendSelectLoad(N, TLI, DAG))
11028 return Res;
11029
11030 return SDValue();
11031}
11032
11033// isTruncateOf - If N is a truncate of some other value, return true, record
11034// the value being truncated in Op and which of Op's bits are zero/one in Known.
11035// This function computes KnownBits to avoid a duplicated call to
11036// computeKnownBits in the caller.
11037static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op,
11038 KnownBits &Known) {
11039 if (N->getOpcode() == ISD::TRUNCATE) {
11040 Op = N->getOperand(0);
11041 Known = DAG.computeKnownBits(Op);
11042 return true;
11043 }
11044
11045 if (N.getOpcode() != ISD::SETCC ||
11046 N.getValueType().getScalarType() != MVT::i1 ||
11047 cast<CondCodeSDNode>(N.getOperand(2))->get() != ISD::SETNE)
11048 return false;
11049
11050 SDValue Op0 = N->getOperand(0);
11051 SDValue Op1 = N->getOperand(1);
11052 assert(Op0.getValueType() == Op1.getValueType())(static_cast <bool> (Op0.getValueType() == Op1.getValueType
()) ? void (0) : __assert_fail ("Op0.getValueType() == Op1.getValueType()"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 11052, __extension__ __PRETTY_FUNCTION__))
;
11053
11054 if (isNullOrNullSplat(Op0))
11055 Op = Op1;
11056 else if (isNullOrNullSplat(Op1))
11057 Op = Op0;
11058 else
11059 return false;
11060
11061 Known = DAG.computeKnownBits(Op);
11062
11063 return (Known.Zero | 1).isAllOnesValue();
11064}
11065
11066/// Given an extending node with a pop-count operand, if the target does not
11067/// support a pop-count in the narrow source type but does support it in the
11068/// destination type, widen the pop-count to the destination type.
11069static SDValue widenCtPop(SDNode *Extend, SelectionDAG &DAG) {
11070 assert((Extend->getOpcode() == ISD::ZERO_EXTEND ||(static_cast <bool> ((Extend->getOpcode() == ISD::ZERO_EXTEND
|| Extend->getOpcode() == ISD::ANY_EXTEND) && "Expected extend op"
) ? void (0) : __assert_fail ("(Extend->getOpcode() == ISD::ZERO_EXTEND || Extend->getOpcode() == ISD::ANY_EXTEND) && \"Expected extend op\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 11071, __extension__ __PRETTY_FUNCTION__))
11071 Extend->getOpcode() == ISD::ANY_EXTEND) && "Expected extend op")(static_cast <bool> ((Extend->getOpcode() == ISD::ZERO_EXTEND
|| Extend->getOpcode() == ISD::ANY_EXTEND) && "Expected extend op"
) ? void (0) : __assert_fail ("(Extend->getOpcode() == ISD::ZERO_EXTEND || Extend->getOpcode() == ISD::ANY_EXTEND) && \"Expected extend op\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 11071, __extension__ __PRETTY_FUNCTION__))
;
11072
11073 SDValue CtPop = Extend->getOperand(0);
11074 if (CtPop.getOpcode() != ISD::CTPOP || !CtPop.hasOneUse())
11075 return SDValue();
11076
11077 EVT VT = Extend->getValueType(0);
11078 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11079 if (TLI.isOperationLegalOrCustom(ISD::CTPOP, CtPop.getValueType()) ||
11080 !TLI.isOperationLegalOrCustom(ISD::CTPOP, VT))
11081 return SDValue();
11082
11083 // zext (ctpop X) --> ctpop (zext X)
11084 SDLoc DL(Extend);
11085 SDValue NewZext = DAG.getZExtOrTrunc(CtPop.getOperand(0), DL, VT);
11086 return DAG.getNode(ISD::CTPOP, DL, VT, NewZext);
11087}
11088
11089SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
11090 SDValue N0 = N->getOperand(0);
11091 EVT VT = N->getValueType(0);
11092
11093 if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
11094 return Res;
11095
11096 // fold (zext (zext x)) -> (zext x)
11097 // fold (zext (aext x)) -> (zext x)
11098 if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
11099 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT,
11100 N0.getOperand(0));
11101
11102 // fold (zext (truncate x)) -> (zext x) or
11103 // (zext (truncate x)) -> (truncate x)
11104 // This is valid when the truncated bits of x are already zero.
11105 SDValue Op;
11106 KnownBits Known;
11107 if (isTruncateOf(DAG, N0, Op, Known)) {
11108 APInt TruncatedBits =
11109 (Op.getScalarValueSizeInBits() == N0.getScalarValueSizeInBits()) ?
11110 APInt(Op.getScalarValueSizeInBits(), 0) :
11111 APInt::getBitsSet(Op.getScalarValueSizeInBits(),
11112 N0.getScalarValueSizeInBits(),
11113 std::min(Op.getScalarValueSizeInBits(),
11114 VT.getScalarSizeInBits()));
11115 if (TruncatedBits.isSubsetOf(Known.Zero))
11116 return DAG.getZExtOrTrunc(Op, SDLoc(N), VT);
11117 }
11118
11119 // fold (zext (truncate x)) -> (and x, mask)
11120 if (N0.getOpcode() == ISD::TRUNCATE) {
11121 // fold (zext (truncate (load x))) -> (zext (smaller load x))
11122 // fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n)))
11123 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
11124 SDNode *oye = N0.getOperand(0).getNode();
11125 if (NarrowLoad.getNode() != N0.getNode()) {
11126 CombineTo(N0.getNode(), NarrowLoad);
11127 // CombineTo deleted the truncate, if needed, but not what's under it.
11128 AddToWorklist(oye);
11129 }
11130 return SDValue(N, 0); // Return N so it doesn't get rechecked!
11131 }
11132
11133 EVT SrcVT = N0.getOperand(0).getValueType();
11134 EVT MinVT = N0.getValueType();
11135
11136 // Try to mask before the extension to avoid having to generate a larger mask,
11137 // possibly over several sub-vectors.
11138 if (SrcVT.bitsLT(VT) && VT.isVector()) {
11139 if (!LegalOperations || (TLI.isOperationLegal(ISD::AND, SrcVT) &&
11140 TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) {
11141 SDValue Op = N0.getOperand(0);
11142 Op = DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT);
11143 AddToWorklist(Op.getNode());
11144 SDValue ZExtOrTrunc = DAG.getZExtOrTrunc(Op, SDLoc(N), VT);
11145 // Transfer the debug info; the new node is equivalent to N0.
11146 DAG.transferDbgValues(N0, ZExtOrTrunc);
11147 return ZExtOrTrunc;
11148 }
11149 }
11150
11151 if (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT)) {
11152 SDValue Op = DAG.getAnyExtOrTrunc(N0.getOperand(0), SDLoc(N), VT);
11153 AddToWorklist(Op.getNode());
11154 SDValue And = DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT);
11155 // We may safely transfer the debug info describing the truncate node over
11156 // to the equivalent and operation.
11157 DAG.transferDbgValues(N0, And);
11158 return And;
11159 }
11160 }
11161
11162 // Fold (zext (and (trunc x), cst)) -> (and x, cst),
11163 // if either of the casts is not free.
11164 if (N0.getOpcode() == ISD::AND &&
11165 N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
11166 N0.getOperand(1).getOpcode() == ISD::Constant &&
11167 (!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
11168 N0.getValueType()) ||
11169 !TLI.isZExtFree(N0.getValueType(), VT))) {
11170 SDValue X = N0.getOperand(0).getOperand(0);
11171 X = DAG.getAnyExtOrTrunc(X, SDLoc(X), VT);
11172 APInt Mask = N0.getConstantOperandAPInt(1).zext(VT.getSizeInBits());
11173 SDLoc DL(N);
11174 return DAG.getNode(ISD::AND, DL, VT,
11175 X, DAG.getConstant(Mask, DL, VT));
11176 }
11177
11178 // Try to simplify (zext (load x)).
11179 if (SDValue foldedExt =
11180 tryToFoldExtOfLoad(DAG, *this, TLI, VT, LegalOperations, N, N0,
11181 ISD::ZEXTLOAD, ISD::ZERO_EXTEND))
11182 return foldedExt;
11183
11184 if (SDValue foldedExt =
11185 tryToFoldExtOfMaskedLoad(DAG, TLI, VT, N, N0, ISD::ZEXTLOAD,
11186 ISD::ZERO_EXTEND))
11187 return foldedExt;
11188
11189 // fold (zext (load x)) to multiple smaller zextloads.
11190 // Only on illegal but splittable vectors.
11191 if (SDValue ExtLoad = CombineExtLoad(N))
11192 return ExtLoad;
11193
11194 // fold (zext (and/or/xor (load x), cst)) ->
11195 // (and/or/xor (zextload x), (zext cst))
11196 // Unless (and (load x) cst) will match as a zextload already and has
11197 // additional users.
11198 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR ||
11199 N0.getOpcode() == ISD::XOR) &&
11200 isa<LoadSDNode>(N0.getOperand(0)) &&
11201 N0.getOperand(1).getOpcode() == ISD::Constant &&
11202 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) {
11203 LoadSDNode *LN00 = cast<LoadSDNode>(N0.getOperand(0));
11204 EVT MemVT = LN00->getMemoryVT();
11205 if (TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT) &&
11206 LN00->getExtensionType() != ISD::SEXTLOAD && LN00->isUnindexed()) {
11207 bool DoXform = true;
11208 SmallVector<SDNode*, 4> SetCCs;
11209 if (!N0.hasOneUse()) {
11210 if (N0.getOpcode() == ISD::AND) {
11211 auto *AndC = cast<ConstantSDNode>(N0.getOperand(1));
11212 EVT LoadResultTy = AndC->getValueType(0);
11213 EVT ExtVT;
11214 if (isAndLoadExtLoad(AndC, LN00, LoadResultTy, ExtVT))
11215 DoXform = false;
11216 }
11217 }
11218 if (DoXform)
11219 DoXform = ExtendUsesToFormExtLoad(VT, N0.getNode(), N0.getOperand(0),
11220 ISD::ZERO_EXTEND, SetCCs, TLI);
11221 if (DoXform) {
11222 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN00), VT,
11223 LN00->getChain(), LN00->getBasePtr(),
11224 LN00->getMemoryVT(),
11225 LN00->getMemOperand());
11226 APInt Mask = N0.getConstantOperandAPInt(1).zext(VT.getSizeInBits());
11227 SDLoc DL(N);
11228 SDValue And = DAG.getNode(N0.getOpcode(), DL, VT,
11229 ExtLoad, DAG.getConstant(Mask, DL, VT));
11230 ExtendSetCCUses(SetCCs, N0.getOperand(0), ExtLoad, ISD::ZERO_EXTEND);
11231 bool NoReplaceTruncAnd = !N0.hasOneUse();
11232 bool NoReplaceTrunc = SDValue(LN00, 0).hasOneUse();
11233 CombineTo(N, And);
11234 // If N0 has multiple uses, change other uses as well.
11235 if (NoReplaceTruncAnd) {
11236 SDValue TruncAnd =
11237 DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), And);
11238 CombineTo(N0.getNode(), TruncAnd);
11239 }
11240 if (NoReplaceTrunc) {
11241 DAG.ReplaceAllUsesOfValueWith(SDValue(LN00, 1), ExtLoad.getValue(1));
11242 } else {
11243 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(LN00),
11244 LN00->getValueType(0), ExtLoad);
11245 CombineTo(LN00, Trunc, ExtLoad.getValue(1));
11246 }
11247 return SDValue(N,0); // Return N so it doesn't get rechecked!
11248 }
11249 }
11250 }
11251
11252 // fold (zext (and/or/xor (shl/shr (load x), cst), cst)) ->
11253 // (and/or/xor (shl/shr (zextload x), (zext cst)), (zext cst))
11254 if (SDValue ZExtLoad = CombineZExtLogicopShiftLoad(N))
11255 return ZExtLoad;
11256
11257 // Try to simplify (zext (zextload x)).
11258 if (SDValue foldedExt = tryToFoldExtOfExtload(
11259 DAG, *this, TLI, VT, LegalOperations, N, N0, ISD::ZEXTLOAD))
11260 return foldedExt;
11261
11262 if (SDValue V = foldExtendedSignBitTest(N, DAG, LegalOperations))
11263 return V;
11264
11265 if (N0.getOpcode() == ISD::SETCC) {
11266 // Only do this before legalize for now.
11267 if (!LegalOperations && VT.isVector() &&
11268 N0.getValueType().getVectorElementType() == MVT::i1) {
11269 EVT N00VT = N0.getOperand(0).getValueType();
11270 if (getSetCCResultType(N00VT) == N0.getValueType())
11271 return SDValue();
11272
11273 // We know that the # elements of the results is the same as the #
11274 // elements of the compare (and the # elements of the compare result for
11275 // that matter). Check to see that they are the same size. If so, we know
11276 // that the element size of the sext'd result matches the element size of
11277 // the compare operands.
11278 SDLoc DL(N);
11279 if (VT.getSizeInBits() == N00VT.getSizeInBits()) {
11280 // zext(setcc) -> zext_in_reg(vsetcc) for vectors.
11281 SDValue VSetCC = DAG.getNode(ISD::SETCC, DL, VT, N0.getOperand(0),
11282 N0.getOperand(1), N0.getOperand(2));
11283 return DAG.getZeroExtendInReg(VSetCC, DL, N0.getValueType());
11284 }
11285
11286 // If the desired elements are smaller or larger than the source
11287 // elements we can use a matching integer vector type and then
11288 // truncate/any extend followed by zext_in_reg.
11289 EVT MatchingVectorType = N00VT.changeVectorElementTypeToInteger();
11290 SDValue VsetCC =
11291 DAG.getNode(ISD::SETCC, DL, MatchingVectorType, N0.getOperand(0),
11292 N0.getOperand(1), N0.getOperand(2));
11293 return DAG.getZeroExtendInReg(DAG.getAnyExtOrTrunc(VsetCC, DL, VT), DL,
11294 N0.getValueType());
11295 }
11296
11297 // zext(setcc x,y,cc) -> zext(select x, y, true, false, cc)
11298 SDLoc DL(N);
11299 EVT N0VT = N0.getValueType();
11300 EVT N00VT = N0.getOperand(0).getValueType();
11301 if (SDValue SCC = SimplifySelectCC(
11302 DL, N0.getOperand(0), N0.getOperand(1),
11303 DAG.getBoolConstant(true, DL, N0VT, N00VT),
11304 DAG.getBoolConstant(false, DL, N0VT, N00VT),
11305 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true))
11306 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, SCC);
11307 }
11308
11309 // (zext (shl (zext x), cst)) -> (shl (zext x), cst)
11310 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) &&
11311 isa<ConstantSDNode>(N0.getOperand(1)) &&
11312 N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND &&
11313 N0.hasOneUse()) {
11314 SDValue ShAmt = N0.getOperand(1);
11315 if (N0.getOpcode() == ISD::SHL) {
11316 SDValue InnerZExt = N0.getOperand(0);
11317 // If the original shl may be shifting out bits, do not perform this
11318 // transformation.
11319 unsigned KnownZeroBits = InnerZExt.getValueSizeInBits() -
11320 InnerZExt.getOperand(0).getValueSizeInBits();
11321 if (cast<ConstantSDNode>(ShAmt)->getAPIntValue().ugt(KnownZeroBits))
11322 return SDValue();
11323 }
11324
11325 SDLoc DL(N);
11326
11327 // Ensure that the shift amount is wide enough for the shifted value.
11328 if (Log2_32_Ceil(VT.getSizeInBits()) > ShAmt.getValueSizeInBits())
11329 ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt);
11330
11331 return DAG.getNode(N0.getOpcode(), DL, VT,
11332 DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)),
11333 ShAmt);
11334 }
11335
11336 if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
11337 return NewVSel;
11338
11339 if (SDValue NewCtPop = widenCtPop(N, DAG))
11340 return NewCtPop;
11341
11342 if (SDValue Res = tryToFoldExtendSelectLoad(N, TLI, DAG))
11343 return Res;
11344
11345 return SDValue();
11346}
11347
11348SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
11349 SDValue N0 = N->getOperand(0);
11350 EVT VT = N->getValueType(0);
11351
11352 if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
11353 return Res;
11354
11355 // fold (aext (aext x)) -> (aext x)
11356 // fold (aext (zext x)) -> (zext x)
11357 // fold (aext (sext x)) -> (sext x)
11358 if (N0.getOpcode() == ISD::ANY_EXTEND ||
11359 N0.getOpcode() == ISD::ZERO_EXTEND ||
11360 N0.getOpcode() == ISD::SIGN_EXTEND)
11361 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0));
11362
11363 // fold (aext (truncate (load x))) -> (aext (smaller load x))
11364 // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n)))
11365 if (N0.getOpcode() == ISD::TRUNCATE) {
11366 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
11367 SDNode *oye = N0.getOperand(0).getNode();
11368 if (NarrowLoad.getNode() != N0.getNode()) {
11369 CombineTo(N0.getNode(), NarrowLoad);
11370 // CombineTo deleted the truncate, if needed, but not what's under it.
11371 AddToWorklist(oye);
11372 }
11373 return SDValue(N, 0); // Return N so it doesn't get rechecked!
11374 }
11375 }
11376
11377 // fold (aext (truncate x))
11378 if (N0.getOpcode() == ISD::TRUNCATE)
11379 return DAG.getAnyExtOrTrunc(N0.getOperand(0), SDLoc(N), VT);
11380
11381 // Fold (aext (and (trunc x), cst)) -> (and x, cst)
11382 // if the trunc is not free.
11383 if (N0.getOpcode() == ISD::AND &&
11384 N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
11385 N0.getOperand(1).getOpcode() == ISD::Constant &&
11386 !TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(),
11387 N0.getValueType())) {
11388 SDLoc DL(N);
11389 SDValue X = N0.getOperand(0).getOperand(0);
11390 X = DAG.getAnyExtOrTrunc(X, DL, VT);
11391 APInt Mask = N0.getConstantOperandAPInt(1).zext(VT.getSizeInBits());
11392 return DAG.getNode(ISD::AND, DL, VT,
11393 X, DAG.getConstant(Mask, DL, VT));
11394 }
11395
11396 // fold (aext (load x)) -> (aext (truncate (extload x)))
11397 // None of the supported targets knows how to perform load and any_ext
11398 // on vectors in one instruction, so attempt to fold to zext instead.
11399 if (VT.isVector()) {
11400 // Try to simplify (zext (load x)).
11401 if (SDValue foldedExt =
11402 tryToFoldExtOfLoad(DAG, *this, TLI, VT, LegalOperations, N, N0,
11403 ISD::ZEXTLOAD, ISD::ZERO_EXTEND))
11404 return foldedExt;
11405 } else if (ISD::isNON_EXTLoad(N0.getNode()) &&
11406 ISD::isUNINDEXEDLoad(N0.getNode()) &&
11407 TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) {
11408 bool DoXform = true;
11409 SmallVector<SDNode *, 4> SetCCs;
11410 if (!N0.hasOneUse())
11411 DoXform =
11412 ExtendUsesToFormExtLoad(VT, N, N0, ISD::ANY_EXTEND, SetCCs, TLI);
11413 if (DoXform) {
11414 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
11415 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
11416 LN0->getChain(), LN0->getBasePtr(),
11417 N0.getValueType(), LN0->getMemOperand());
11418 ExtendSetCCUses(SetCCs, N0, ExtLoad, ISD::ANY_EXTEND);
11419 // If the load value is used only by N, replace it via CombineTo N.
11420 bool NoReplaceTrunc = N0.hasOneUse();
11421 CombineTo(N, ExtLoad);
11422 if (NoReplaceTrunc) {
11423 DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
11424 recursivelyDeleteUnusedNodes(LN0);
11425 } else {
11426 SDValue Trunc =
11427 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), ExtLoad);
11428 CombineTo(LN0, Trunc, ExtLoad.getValue(1));
11429 }
11430 return SDValue(N, 0); // Return N so it doesn't get rechecked!
11431 }
11432 }
11433
11434 // fold (aext (zextload x)) -> (aext (truncate (zextload x)))
11435 // fold (aext (sextload x)) -> (aext (truncate (sextload x)))
11436 // fold (aext ( extload x)) -> (aext (truncate (extload x)))
11437 if (N0.getOpcode() == ISD::LOAD && !ISD::isNON_EXTLoad(N0.getNode()) &&
11438 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
11439 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
11440 ISD::LoadExtType ExtType = LN0->getExtensionType();
11441 EVT MemVT = LN0->getMemoryVT();
11442 if (!LegalOperations || TLI.isLoadExtLegal(ExtType, VT, MemVT)) {
11443 SDValue ExtLoad = DAG.getExtLoad(ExtType, SDLoc(N),
11444 VT, LN0->getChain(), LN0->getBasePtr(),
11445 MemVT, LN0->getMemOperand());
11446 CombineTo(N, ExtLoad);
11447 DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
11448 recursivelyDeleteUnusedNodes(LN0);
11449 return SDValue(N, 0); // Return N so it doesn't get rechecked!
11450 }
11451 }
11452
11453 if (N0.getOpcode() == ISD::SETCC) {
11454 // For vectors:
11455 // aext(setcc) -> vsetcc
11456 // aext(setcc) -> truncate(vsetcc)
11457 // aext(setcc) -> aext(vsetcc)
11458 // Only do this before legalize for now.
11459 if (VT.isVector() && !LegalOperations) {
11460 EVT N00VT = N0.getOperand(0).getValueType();
11461 if (getSetCCResultType(N00VT) == N0.getValueType())
11462 return SDValue();
11463
11464 // We know that the # elements of the results is the same as the
11465 // # elements of the compare (and the # elements of the compare result
11466 // for that matter). Check to see that they are the same size. If so,
11467 // we know that the element size of the sext'd result matches the
11468 // element size of the compare operands.
11469 if (VT.getSizeInBits() == N00VT.getSizeInBits())
11470 return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0),
11471 N0.getOperand(1),
11472 cast<CondCodeSDNode>(N0.getOperand(2))->get());
11473
11474 // If the desired elements are smaller or larger than the source
11475 // elements we can use a matching integer vector type and then
11476 // truncate/any extend
11477 EVT MatchingVectorType = N00VT.changeVectorElementTypeToInteger();
11478 SDValue VsetCC =
11479 DAG.getSetCC(SDLoc(N), MatchingVectorType, N0.getOperand(0),
11480 N0.getOperand(1),
11481 cast<CondCodeSDNode>(N0.getOperand(2))->get());
11482 return DAG.getAnyExtOrTrunc(VsetCC, SDLoc(N), VT);
11483 }
11484
11485 // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
11486 SDLoc DL(N);
11487 if (SDValue SCC = SimplifySelectCC(
11488 DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT),
11489 DAG.getConstant(0, DL, VT),
11490 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true))
11491 return SCC;
11492 }
11493
11494 if (SDValue NewCtPop = widenCtPop(N, DAG))
11495 return NewCtPop;
11496
11497 if (SDValue Res = tryToFoldExtendSelectLoad(N, TLI, DAG))
11498 return Res;
11499
11500 return SDValue();
11501}
11502
11503SDValue DAGCombiner::visitAssertExt(SDNode *N) {
11504 unsigned Opcode = N->getOpcode();
11505 SDValue N0 = N->getOperand(0);
11506 SDValue N1 = N->getOperand(1);
11507 EVT AssertVT = cast<VTSDNode>(N1)->getVT();
11508
11509 // fold (assert?ext (assert?ext x, vt), vt) -> (assert?ext x, vt)
11510 if (N0.getOpcode() == Opcode &&
11511 AssertVT == cast<VTSDNode>(N0.getOperand(1))->getVT())
11512 return N0;
11513
11514 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
11515 N0.getOperand(0).getOpcode() == Opcode) {
11516 // We have an assert, truncate, assert sandwich. Make one stronger assert
11517 // by asserting on the smallest asserted type to the larger source type.
11518 // This eliminates the later assert:
11519 // assert (trunc (assert X, i8) to iN), i1 --> trunc (assert X, i1) to iN
11520 // assert (trunc (assert X, i1) to iN), i8 --> trunc (assert X, i1) to iN
11521 SDValue BigA = N0.getOperand(0);
11522 EVT BigA_AssertVT = cast<VTSDNode>(BigA.getOperand(1))->getVT();
11523 assert(BigA_AssertVT.bitsLE(N0.getValueType()) &&(static_cast <bool> (BigA_AssertVT.bitsLE(N0.getValueType
()) && "Asserting zero/sign-extended bits to a type larger than the "
"truncated destination does not provide information") ? void
(0) : __assert_fail ("BigA_AssertVT.bitsLE(N0.getValueType()) && \"Asserting zero/sign-extended bits to a type larger than the \" \"truncated destination does not provide information\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 11525, __extension__ __PRETTY_FUNCTION__))
11524 "Asserting zero/sign-extended bits to a type larger than the "(static_cast <bool> (BigA_AssertVT.bitsLE(N0.getValueType
()) && "Asserting zero/sign-extended bits to a type larger than the "
"truncated destination does not provide information") ? void
(0) : __assert_fail ("BigA_AssertVT.bitsLE(N0.getValueType()) && \"Asserting zero/sign-extended bits to a type larger than the \" \"truncated destination does not provide information\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 11525, __extension__ __PRETTY_FUNCTION__))
11525 "truncated destination does not provide information")(static_cast <bool> (BigA_AssertVT.bitsLE(N0.getValueType
()) && "Asserting zero/sign-extended bits to a type larger than the "
"truncated destination does not provide information") ? void
(0) : __assert_fail ("BigA_AssertVT.bitsLE(N0.getValueType()) && \"Asserting zero/sign-extended bits to a type larger than the \" \"truncated destination does not provide information\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 11525, __extension__ __PRETTY_FUNCTION__))
;
11526
11527 SDLoc DL(N);
11528 EVT MinAssertVT = AssertVT.bitsLT(BigA_AssertVT) ? AssertVT : BigA_AssertVT;
11529 SDValue MinAssertVTVal = DAG.getValueType(MinAssertVT);
11530 SDValue NewAssert = DAG.getNode(Opcode, DL, BigA.getValueType(),
11531 BigA.getOperand(0), MinAssertVTVal);
11532 return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewAssert);
11533 }
11534
11535 // If we have (AssertZext (truncate (AssertSext X, iX)), iY) and Y is smaller
11536 // than X. Just move the AssertZext in front of the truncate and drop the
11537 // AssertSExt.
11538 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
11539 N0.getOperand(0).getOpcode() == ISD::AssertSext &&
11540 Opcode == ISD::AssertZext) {
11541 SDValue BigA = N0.getOperand(0);
11542 EVT BigA_AssertVT = cast<VTSDNode>(BigA.getOperand(1))->getVT();
11543 assert(BigA_AssertVT.bitsLE(N0.getValueType()) &&(static_cast <bool> (BigA_AssertVT.bitsLE(N0.getValueType
()) && "Asserting zero/sign-extended bits to a type larger than the "
"truncated destination does not provide information") ? void
(0) : __assert_fail ("BigA_AssertVT.bitsLE(N0.getValueType()) && \"Asserting zero/sign-extended bits to a type larger than the \" \"truncated destination does not provide information\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 11545, __extension__ __PRETTY_FUNCTION__))
11544 "Asserting zero/sign-extended bits to a type larger than the "(static_cast <bool> (BigA_AssertVT.bitsLE(N0.getValueType
()) && "Asserting zero/sign-extended bits to a type larger than the "
"truncated destination does not provide information") ? void
(0) : __assert_fail ("BigA_AssertVT.bitsLE(N0.getValueType()) && \"Asserting zero/sign-extended bits to a type larger than the \" \"truncated destination does not provide information\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 11545, __extension__ __PRETTY_FUNCTION__))
11545 "truncated destination does not provide information")(static_cast <bool> (BigA_AssertVT.bitsLE(N0.getValueType
()) && "Asserting zero/sign-extended bits to a type larger than the "
"truncated destination does not provide information") ? void
(0) : __assert_fail ("BigA_AssertVT.bitsLE(N0.getValueType()) && \"Asserting zero/sign-extended bits to a type larger than the \" \"truncated destination does not provide information\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 11545, __extension__ __PRETTY_FUNCTION__))
;
11546
11547 if (AssertVT.bitsLT(BigA_AssertVT)) {
11548 SDLoc DL(N);
11549 SDValue NewAssert = DAG.getNode(Opcode, DL, BigA.getValueType(),
11550 BigA.getOperand(0), N1);
11551 return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewAssert);
11552 }
11553 }
11554
11555 return SDValue();
11556}
11557
11558SDValue DAGCombiner::visitAssertAlign(SDNode *N) {
11559 SDLoc DL(N);
11560
11561 Align AL = cast<AssertAlignSDNode>(N)->getAlign();
11562 SDValue N0 = N->getOperand(0);
11563
11564 // Fold (assertalign (assertalign x, AL0), AL1) ->
11565 // (assertalign x, max(AL0, AL1))
11566 if (auto *AAN = dyn_cast<AssertAlignSDNode>(N0))
11567 return DAG.getAssertAlign(DL, N0.getOperand(0),
11568 std::max(AL, AAN->getAlign()));
11569
11570 // In rare cases, there are trivial arithmetic ops in source operands. Sink
11571 // this assert down to source operands so that those arithmetic ops could be
11572 // exposed to the DAG combining.
11573 switch (N0.getOpcode()) {
11574 default:
11575 break;
11576 case ISD::ADD:
11577 case ISD::SUB: {
11578 unsigned AlignShift = Log2(AL);
11579 SDValue LHS = N0.getOperand(0);
11580 SDValue RHS = N0.getOperand(1);
11581 unsigned LHSAlignShift = DAG.computeKnownBits(LHS).countMinTrailingZeros();
11582 unsigned RHSAlignShift = DAG.computeKnownBits(RHS).countMinTrailingZeros();
11583 if (LHSAlignShift >= AlignShift || RHSAlignShift >= AlignShift) {
11584 if (LHSAlignShift < AlignShift)
11585 LHS = DAG.getAssertAlign(DL, LHS, AL);
11586 if (RHSAlignShift < AlignShift)
11587 RHS = DAG.getAssertAlign(DL, RHS, AL);
11588 return DAG.getNode(N0.getOpcode(), DL, N0.getValueType(), LHS, RHS);
11589 }
11590 break;
11591 }
11592 }
11593
11594 return SDValue();
11595}
11596
11597/// If the result of a wider load is shifted to right of N bits and then
11598/// truncated to a narrower type and where N is a multiple of number of bits of
11599/// the narrower type, transform it to a narrower load from address + N / num of
11600/// bits of new type. Also narrow the load if the result is masked with an AND
11601/// to effectively produce a smaller type. If the result is to be extended, also
11602/// fold the extension to form a extending load.
11603SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
11604 unsigned Opc = N->getOpcode();
11605
11606 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
11607 SDValue N0 = N->getOperand(0);
11608 EVT VT = N->getValueType(0);
11609 EVT ExtVT = VT;
11610
11611 // This transformation isn't valid for vector loads.
11612 if (VT.isVector())
11613 return SDValue();
11614
11615 unsigned ShAmt = 0;
11616 bool HasShiftedOffset = false;
11617 // Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then
11618 // extended to VT.
11619 if (Opc == ISD::SIGN_EXTEND_INREG) {
11620 ExtType = ISD::SEXTLOAD;
11621 ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
11622 } else if (Opc == ISD::SRL) {
11623 // Another special-case: SRL is basically zero-extending a narrower value,
11624 // or it maybe shifting a higher subword, half or byte into the lowest
11625 // bits.
11626 ExtType = ISD::ZEXTLOAD;
11627 N0 = SDValue(N, 0);
11628
11629 auto *LN0 = dyn_cast<LoadSDNode>(N0.getOperand(0));
11630 auto *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
11631 if (!N01 || !LN0)
11632 return SDValue();
11633
11634 uint64_t ShiftAmt = N01->getZExtValue();
11635 uint64_t MemoryWidth = LN0->getMemoryVT().getScalarSizeInBits();
11636 if (LN0->getExtensionType() != ISD::SEXTLOAD && MemoryWidth > ShiftAmt)
11637 ExtVT = EVT::getIntegerVT(*DAG.getContext(), MemoryWidth - ShiftAmt);
11638 else
11639 ExtVT = EVT::getIntegerVT(*DAG.getContext(),
11640 VT.getScalarSizeInBits() - ShiftAmt);
11641 } else if (Opc == ISD::AND) {
11642 // An AND with a constant mask is the same as a truncate + zero-extend.
11643 auto AndC = dyn_cast<ConstantSDNode>(N->getOperand(1));
11644 if (!AndC)
11645 return SDValue();
11646
11647 const APInt &Mask = AndC->getAPIntValue();
11648 unsigned ActiveBits = 0;
11649 if (Mask.isMask()) {
11650 ActiveBits = Mask.countTrailingOnes();
11651 } else if (Mask.isShiftedMask()) {
11652 ShAmt = Mask.countTrailingZeros();
11653 APInt ShiftedMask = Mask.lshr(ShAmt);
11654 ActiveBits = ShiftedMask.countTrailingOnes();
11655 HasShiftedOffset = true;
11656 } else
11657 return SDValue();
11658
11659 ExtType = ISD::ZEXTLOAD;
11660 ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
11661 }
11662
11663 if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
11664 SDValue SRL = N0;
11665 if (auto *ConstShift = dyn_cast<ConstantSDNode>(SRL.getOperand(1))) {
11666 ShAmt = ConstShift->getZExtValue();
11667 unsigned EVTBits = ExtVT.getScalarSizeInBits();
11668 // Is the shift amount a multiple of size of VT?
11669 if ((ShAmt & (EVTBits-1)) == 0) {
11670 N0 = N0.getOperand(0);
11671 // Is the load width a multiple of size of VT?
11672 if ((N0.getScalarValueSizeInBits() & (EVTBits - 1)) != 0)
11673 return SDValue();
11674 }
11675
11676 // At this point, we must have a load or else we can't do the transform.
11677 auto *LN0 = dyn_cast<LoadSDNode>(N0);
11678 if (!LN0) return SDValue();
11679
11680 // Because a SRL must be assumed to *need* to zero-extend the high bits
11681 // (as opposed to anyext the high bits), we can't combine the zextload
11682 // lowering of SRL and an sextload.
11683 if (LN0->getExtensionType() == ISD::SEXTLOAD)
11684 return SDValue();
11685
11686 // If the shift amount is larger than the input type then we're not
11687 // accessing any of the loaded bytes. If the load was a zextload/extload
11688 // then the result of the shift+trunc is zero/undef (handled elsewhere).
11689 if (ShAmt >= LN0->getMemoryVT().getSizeInBits())
11690 return SDValue();
11691
11692 // If the SRL is only used by a masking AND, we may be able to adjust
11693 // the ExtVT to make the AND redundant.
11694 SDNode *Mask = *(SRL->use_begin());
11695 if (Mask->getOpcode() == ISD::AND &&
11696 isa<ConstantSDNode>(Mask->getOperand(1))) {
11697 const APInt& ShiftMask = Mask->getConstantOperandAPInt(1);
11698 if (ShiftMask.isMask()) {
11699 EVT MaskedVT = EVT::getIntegerVT(*DAG.getContext(),
11700 ShiftMask.countTrailingOnes());
11701 // If the mask is smaller, recompute the type.
11702 if ((ExtVT.getScalarSizeInBits() > MaskedVT.getScalarSizeInBits()) &&
11703 TLI.isLoadExtLegal(ExtType, N0.getValueType(), MaskedVT))
11704 ExtVT = MaskedVT;
11705 }
11706 }
11707 }
11708 }
11709
11710 // If the load is shifted left (and the result isn't shifted back right),
11711 // we can fold the truncate through the shift.
11712 unsigned ShLeftAmt = 0;
11713 if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() &&
11714 ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) {
11715 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
11716 ShLeftAmt = N01->getZExtValue();
11717 N0 = N0.getOperand(0);
11718 }
11719 }
11720
11721 // If we haven't found a load, we can't narrow it.
11722 if (!isa<LoadSDNode>(N0))
11723 return SDValue();
11724
11725 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
11726 // Reducing the width of a volatile load is illegal. For atomics, we may be
11727 // able to reduce the width provided we never widen again. (see D66309)
11728 if (!LN0->isSimple() ||
11729 !isLegalNarrowLdSt(LN0, ExtType, ExtVT, ShAmt))
11730 return SDValue();
11731
11732 auto AdjustBigEndianShift = [&](unsigned ShAmt) {
11733 unsigned LVTStoreBits =
11734 LN0->getMemoryVT().getStoreSizeInBits().getFixedSize();
11735 unsigned EVTStoreBits = ExtVT.getStoreSizeInBits().getFixedSize();
11736 return LVTStoreBits - EVTStoreBits - ShAmt;
11737 };
11738
11739 // For big endian targets, we need to adjust the offset to the pointer to
11740 // load the correct bytes.
11741 if (DAG.getDataLayout().isBigEndian())
11742 ShAmt = AdjustBigEndianShift(ShAmt);
11743
11744 uint64_t PtrOff = ShAmt / 8;
11745 Align NewAlign = commonAlignment(LN0->getAlign(), PtrOff);
11746 SDLoc DL(LN0);
11747 // The original load itself didn't wrap, so an offset within it doesn't.
11748 SDNodeFlags Flags;
11749 Flags.setNoUnsignedWrap(true);
11750 SDValue NewPtr = DAG.getMemBasePlusOffset(LN0->getBasePtr(),
11751 TypeSize::Fixed(PtrOff), DL, Flags);
11752 AddToWorklist(NewPtr.getNode());
11753
11754 SDValue Load;
11755 if (ExtType == ISD::NON_EXTLOAD)
11756 Load = DAG.getLoad(VT, DL, LN0->getChain(), NewPtr,
11757 LN0->getPointerInfo().getWithOffset(PtrOff), NewAlign,
11758 LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
11759 else
11760 Load = DAG.getExtLoad(ExtType, DL, VT, LN0->getChain(), NewPtr,
11761 LN0->getPointerInfo().getWithOffset(PtrOff), ExtVT,
11762 NewAlign, LN0->getMemOperand()->getFlags(),
11763 LN0->getAAInfo());
11764
11765 // Replace the old load's chain with the new load's chain.
11766 WorklistRemover DeadNodes(*this);
11767 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
11768
11769 // Shift the result left, if we've swallowed a left shift.
11770 SDValue Result = Load;
11771 if (ShLeftAmt != 0) {
11772 EVT ShImmTy = getShiftAmountTy(Result.getValueType());
11773 if (!isUIntN(ShImmTy.getScalarSizeInBits(), ShLeftAmt))
11774 ShImmTy = VT;
11775 // If the shift amount is as large as the result size (but, presumably,
11776 // no larger than the source) then the useful bits of the result are
11777 // zero; we can't simply return the shortened shift, because the result
11778 // of that operation is undefined.
11779 if (ShLeftAmt >= VT.getScalarSizeInBits())
11780 Result = DAG.getConstant(0, DL, VT);
11781 else
11782 Result = DAG.getNode(ISD::SHL, DL, VT,
11783 Result, DAG.getConstant(ShLeftAmt, DL, ShImmTy));
11784 }
11785
11786 if (HasShiftedOffset) {
11787 // Recalculate the shift amount after it has been altered to calculate
11788 // the offset.
11789 if (DAG.getDataLayout().isBigEndian())
11790 ShAmt = AdjustBigEndianShift(ShAmt);
11791
11792 // We're using a shifted mask, so the load now has an offset. This means
11793 // that data has been loaded into the lower bytes than it would have been
11794 // before, so we need to shl the loaded data into the correct position in the
11795 // register.
11796 SDValue ShiftC = DAG.getConstant(ShAmt, DL, VT);
11797 Result = DAG.getNode(ISD::SHL, DL, VT, Result, ShiftC);
11798 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
11799 }
11800
11801 // Return the new loaded value.
11802 return Result;
11803}
11804
11805SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
11806 SDValue N0 = N->getOperand(0);
11807 SDValue N1 = N->getOperand(1);
11808 EVT VT = N->getValueType(0);
11809 EVT ExtVT = cast<VTSDNode>(N1)->getVT();
11810 unsigned VTBits = VT.getScalarSizeInBits();
11811 unsigned ExtVTBits = ExtVT.getScalarSizeInBits();
11812
11813 // sext_vector_inreg(undef) = 0 because the top bit will all be the same.
11814 if (N0.isUndef())
11815 return DAG.getConstant(0, SDLoc(N), VT);
11816
11817 // fold (sext_in_reg c1) -> c1
11818 if (DAG.isConstantIntBuildVectorOrConstantInt(N0))
11819 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, N0, N1);
11820
11821 // If the input is already sign extended, just drop the extension.
11822 if (DAG.ComputeNumSignBits(N0) >= (VTBits - ExtVTBits + 1))
11823 return N0;
11824
11825 // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
11826 if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
11827 ExtVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT()))
11828 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, N0.getOperand(0),
11829 N1);
11830
11831 // fold (sext_in_reg (sext x)) -> (sext x)
11832 // fold (sext_in_reg (aext x)) -> (sext x)
11833 // if x is small enough or if we know that x has more than 1 sign bit and the
11834 // sign_extend_inreg is extending from one of them.
11835 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) {
11836 SDValue N00 = N0.getOperand(0);
11837 unsigned N00Bits = N00.getScalarValueSizeInBits();
11838 if ((N00Bits <= ExtVTBits ||
11839 (N00Bits - DAG.ComputeNumSignBits(N00)) < ExtVTBits) &&
11840 (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
11841 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00);
11842 }
11843
11844 // fold (sext_in_reg (*_extend_vector_inreg x)) -> (sext_vector_inreg x)
11845 // if x is small enough or if we know that x has more than 1 sign bit and the
11846 // sign_extend_inreg is extending from one of them.
11847 if (N0.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG ||
11848 N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ||
11849 N0.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) {
11850 SDValue N00 = N0.getOperand(0);
11851 unsigned N00Bits = N00.getScalarValueSizeInBits();
11852 unsigned DstElts = N0.getValueType().getVectorMinNumElements();
11853 unsigned SrcElts = N00.getValueType().getVectorMinNumElements();
11854 bool IsZext = N0.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG;
11855 APInt DemandedSrcElts = APInt::getLowBitsSet(SrcElts, DstElts);
11856 if ((N00Bits == ExtVTBits ||
11857 (!IsZext && (N00Bits < ExtVTBits ||
11858 (N00Bits - DAG.ComputeNumSignBits(N00, DemandedSrcElts)) <
11859 ExtVTBits))) &&
11860 (!LegalOperations ||
11861 TLI.isOperationLegal(ISD::SIGN_EXTEND_VECTOR_INREG, VT)))
11862 return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, SDLoc(N), VT, N00);
11863 }
11864
11865 // fold (sext_in_reg (zext x)) -> (sext x)
11866 // iff we are extending the source sign bit.
11867 if (N0.getOpcode() == ISD::ZERO_EXTEND) {
11868 SDValue N00 = N0.getOperand(0);
11869 if (N00.getScalarValueSizeInBits() == ExtVTBits &&
11870 (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
11871 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1);
11872 }
11873
11874 // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero.
11875 if (DAG.MaskedValueIsZero(N0, APInt::getOneBitSet(VTBits, ExtVTBits - 1)))
11876 return DAG.getZeroExtendInReg(N0, SDLoc(N), ExtVT);
11877
11878 // fold operands of sext_in_reg based on knowledge that the top bits are not
11879 // demanded.
11880 if (SimplifyDemandedBits(SDValue(N, 0)))
11881 return SDValue(N, 0);
11882
11883 // fold (sext_in_reg (load x)) -> (smaller sextload x)
11884 // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits))
11885 if (SDValue NarrowLoad = ReduceLoadWidth(N))
11886 return NarrowLoad;
11887
11888 // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24)
11889 // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible.
11890 // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above.
11891 if (N0.getOpcode() == ISD::SRL) {
11892 if (auto *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
11893 if (ShAmt->getAPIntValue().ule(VTBits - ExtVTBits)) {
11894 // We can turn this into an SRA iff the input to the SRL is already sign
11895 // extended enough.
11896 unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0));
11897 if (((VTBits - ExtVTBits) - ShAmt->getZExtValue()) < InSignBits)
11898 return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0.getOperand(0),
11899 N0.getOperand(1));
11900 }
11901 }
11902
11903 // fold (sext_inreg (extload x)) -> (sextload x)
11904 // If sextload is not supported by target, we can only do the combine when
11905 // load has one use. Doing otherwise can block folding the extload with other
11906 // extends that the target does support.
11907 if (ISD::isEXTLoad(N0.getNode()) &&
11908 ISD::isUNINDEXEDLoad(N0.getNode()) &&
11909 ExtVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
11910 ((!LegalOperations && cast<LoadSDNode>(N0)->isSimple() &&
11911 N0.hasOneUse()) ||
11912 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, ExtVT))) {
11913 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
11914 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
11915 LN0->getChain(),
11916 LN0->getBasePtr(), ExtVT,
11917 LN0->getMemOperand());
11918 CombineTo(N, ExtLoad);
11919 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
11920 AddToWorklist(ExtLoad.getNode());
11921 return SDValue(N, 0); // Return N so it doesn't get rechecked!
11922 }
11923 // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use
11924 if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
11925 N0.hasOneUse() &&
11926 ExtVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
11927 ((!LegalOperations && cast<LoadSDNode>(N0)->isSimple()) &&
11928 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, ExtVT))) {
11929 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
11930 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT,
11931 LN0->getChain(),
11932 LN0->getBasePtr(), ExtVT,
11933 LN0->getMemOperand());
11934 CombineTo(N, ExtLoad);
11935 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
11936 return SDValue(N, 0); // Return N so it doesn't get rechecked!
11937 }
11938
11939 // fold (sext_inreg (masked_load x)) -> (sext_masked_load x)
11940 // ignore it if the masked load is already sign extended
11941 if (MaskedLoadSDNode *Ld = dyn_cast<MaskedLoadSDNode>(N0)) {
11942 if (ExtVT == Ld->getMemoryVT() && N0.hasOneUse() &&
11943 Ld->getExtensionType() != ISD::LoadExtType::NON_EXTLOAD &&
11944 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, ExtVT)) {
11945 SDValue ExtMaskedLoad = DAG.getMaskedLoad(
11946 VT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(), Ld->getOffset(),
11947 Ld->getMask(), Ld->getPassThru(), ExtVT, Ld->getMemOperand(),
11948 Ld->getAddressingMode(), ISD::SEXTLOAD, Ld->isExpandingLoad());
11949 CombineTo(N, ExtMaskedLoad);
11950 CombineTo(N0.getNode(), ExtMaskedLoad, ExtMaskedLoad.getValue(1));
11951 return SDValue(N, 0); // Return N so it doesn't get rechecked!
11952 }
11953 }
11954
11955 // fold (sext_inreg (masked_gather x)) -> (sext_masked_gather x)
11956 if (auto *GN0 = dyn_cast<MaskedGatherSDNode>(N0)) {
11957 if (SDValue(GN0, 0).hasOneUse() &&
11958 ExtVT == GN0->getMemoryVT() &&
11959 TLI.isVectorLoadExtDesirable(SDValue(SDValue(GN0, 0)))) {
11960 SDValue Ops[] = {GN0->getChain(), GN0->getPassThru(), GN0->getMask(),
11961 GN0->getBasePtr(), GN0->getIndex(), GN0->getScale()};
11962
11963 SDValue ExtLoad = DAG.getMaskedGather(
11964 DAG.getVTList(VT, MVT::Other), ExtVT, SDLoc(N), Ops,
11965 GN0->getMemOperand(), GN0->getIndexType(), ISD::SEXTLOAD);
11966
11967 CombineTo(N, ExtLoad);
11968 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
11969 AddToWorklist(ExtLoad.getNode());
11970 return SDValue(N, 0); // Return N so it doesn't get rechecked!
11971 }
11972 }
11973
11974 // Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16))
11975 if (ExtVTBits <= 16 && N0.getOpcode() == ISD::OR) {
11976 if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
11977 N0.getOperand(1), false))
11978 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, BSwap, N1);
11979 }
11980
11981 return SDValue();
11982}
11983
11984SDValue DAGCombiner::visitSIGN_EXTEND_VECTOR_INREG(SDNode *N) {
11985 SDValue N0 = N->getOperand(0);
11986 EVT VT = N->getValueType(0);
11987
11988 // sext_vector_inreg(undef) = 0 because the top bit will all be the same.
11989 if (N0.isUndef())
11990 return DAG.getConstant(0, SDLoc(N), VT);
11991
11992 if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
11993 return Res;
11994
11995 if (SimplifyDemandedVectorElts(SDValue(N, 0)))
11996 return SDValue(N, 0);
11997
11998 return SDValue();
11999}
12000
12001SDValue DAGCombiner::visitZERO_EXTEND_VECTOR_INREG(SDNode *N) {
12002 SDValue N0 = N->getOperand(0);
12003 EVT VT = N->getValueType(0);
12004
12005 // zext_vector_inreg(undef) = 0 because the top bits will be zero.
12006 if (N0.isUndef())
12007 return DAG.getConstant(0, SDLoc(N), VT);
12008
12009 if (SDValue Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes))
12010 return Res;
12011
12012 if (SimplifyDemandedVectorElts(SDValue(N, 0)))
12013 return SDValue(N, 0);
12014
12015 return SDValue();
12016}
12017
12018SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
12019 SDValue N0 = N->getOperand(0);
12020 EVT VT = N->getValueType(0);
12021 EVT SrcVT = N0.getValueType();
12022 bool isLE = DAG.getDataLayout().isLittleEndian();
12023
12024 // noop truncate
12025 if (SrcVT == VT)
12026 return N0;
12027
12028 // fold (truncate (truncate x)) -> (truncate x)
12029 if (N0.getOpcode() == ISD::TRUNCATE)
12030 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0));
12031
12032 // fold (truncate c1) -> c1
12033 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) {
12034 SDValue C = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0);
12035 if (C.getNode() != N)
12036 return C;
12037 }
12038
12039 // fold (truncate (ext x)) -> (ext x) or (truncate x) or x
12040 if (N0.getOpcode() == ISD::ZERO_EXTEND ||
12041 N0.getOpcode() == ISD::SIGN_EXTEND ||
12042 N0.getOpcode() == ISD::ANY_EXTEND) {
12043 // if the source is smaller than the dest, we still need an extend.
12044 if (N0.getOperand(0).getValueType().bitsLT(VT))
12045 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0));
12046 // if the source is larger than the dest, than we just need the truncate.
12047 if (N0.getOperand(0).getValueType().bitsGT(VT))
12048 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0));
12049 // if the source and dest are the same type, we can drop both the extend
12050 // and the truncate.
12051 return N0.getOperand(0);
12052 }
12053
12054 // If this is anyext(trunc), don't fold it, allow ourselves to be folded.
12055 if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ANY_EXTEND))
12056 return SDValue();
12057
12058 // Fold extract-and-trunc into a narrow extract. For example:
12059 // i64 x = EXTRACT_VECTOR_ELT(v2i64 val, i32 1)
12060 // i32 y = TRUNCATE(i64 x)
12061 // -- becomes --
12062 // v16i8 b = BITCAST (v2i64 val)
12063 // i8 x = EXTRACT_VECTOR_ELT(v16i8 b, i32 8)
12064 //
12065 // Note: We only run this optimization after type legalization (which often
12066 // creates this pattern) and before operation legalization after which
12067 // we need to be more careful about the vector instructions that we generate.
12068 if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
12069 LegalTypes && !LegalOperations && N0->hasOneUse() && VT != MVT::i1) {
12070 EVT VecTy = N0.getOperand(0).getValueType();
12071 EVT ExTy = N0.getValueType();
12072 EVT TrTy = N->getValueType(0);
12073
12074 auto EltCnt = VecTy.getVectorElementCount();
12075 unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits();
12076 auto NewEltCnt = EltCnt * SizeRatio;
12077
12078 EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, NewEltCnt);
12079 assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size")(static_cast <bool> (NVT.getSizeInBits() == VecTy.getSizeInBits
() && "Invalid Size") ? void (0) : __assert_fail ("NVT.getSizeInBits() == VecTy.getSizeInBits() && \"Invalid Size\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12079, __extension__ __PRETTY_FUNCTION__))
;
12080
12081 SDValue EltNo = N0->getOperand(1);
12082 if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
12083 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
12084 int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
12085
12086 SDLoc DL(N);
12087 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TrTy,
12088 DAG.getBitcast(NVT, N0.getOperand(0)),
12089 DAG.getVectorIdxConstant(Index, DL));
12090 }
12091 }
12092
12093 // trunc (select c, a, b) -> select c, (trunc a), (trunc b)
12094 if (N0.getOpcode() == ISD::SELECT && N0.hasOneUse()) {
12095 if ((!LegalOperations || TLI.isOperationLegal(ISD::SELECT, SrcVT)) &&
12096 TLI.isTruncateFree(SrcVT, VT)) {
12097 SDLoc SL(N0);
12098 SDValue Cond = N0.getOperand(0);
12099 SDValue TruncOp0 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1));
12100 SDValue TruncOp1 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(2));
12101 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, Cond, TruncOp0, TruncOp1);
12102 }
12103 }
12104
12105 // trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits()
12106 if (N0.getOpcode() == ISD::SHL && N0.hasOneUse() &&
12107 (!LegalOperations || TLI.isOperationLegal(ISD::SHL, VT)) &&
12108 TLI.isTypeDesirableForOp(ISD::SHL, VT)) {
12109 SDValue Amt = N0.getOperand(1);
12110 KnownBits Known = DAG.computeKnownBits(Amt);
12111 unsigned Size = VT.getScalarSizeInBits();
12112 if (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size)) {
12113 SDLoc SL(N);
12114 EVT AmtVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
12115
12116 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(0));
12117 if (AmtVT != Amt.getValueType()) {
12118 Amt = DAG.getZExtOrTrunc(Amt, SL, AmtVT);
12119 AddToWorklist(Amt.getNode());
12120 }
12121 return DAG.getNode(ISD::SHL, SL, VT, Trunc, Amt);
12122 }
12123 }
12124
12125 if (SDValue V = foldSubToUSubSat(VT, N0.getNode()))
12126 return V;
12127
12128 // Attempt to pre-truncate BUILD_VECTOR sources.
12129 if (N0.getOpcode() == ISD::BUILD_VECTOR && !LegalOperations &&
12130 TLI.isTruncateFree(SrcVT.getScalarType(), VT.getScalarType()) &&
12131 // Avoid creating illegal types if running after type legalizer.
12132 (!LegalTypes || TLI.isTypeLegal(VT.getScalarType()))) {
12133 SDLoc DL(N);
12134 EVT SVT = VT.getScalarType();
12135 SmallVector<SDValue, 8> TruncOps;
12136 for (const SDValue &Op : N0->op_values()) {
12137 SDValue TruncOp = DAG.getNode(ISD::TRUNCATE, DL, SVT, Op);
12138 TruncOps.push_back(TruncOp);
12139 }
12140 return DAG.getBuildVector(VT, DL, TruncOps);
12141 }
12142
12143 // Fold a series of buildvector, bitcast, and truncate if possible.
12144 // For example fold
12145 // (2xi32 trunc (bitcast ((4xi32)buildvector x, x, y, y) 2xi64)) to
12146 // (2xi32 (buildvector x, y)).
12147 if (Level == AfterLegalizeVectorOps && VT.isVector() &&
12148 N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() &&
12149 N0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR &&
12150 N0.getOperand(0).hasOneUse()) {
12151 SDValue BuildVect = N0.getOperand(0);
12152 EVT BuildVectEltTy = BuildVect.getValueType().getVectorElementType();
12153 EVT TruncVecEltTy = VT.getVectorElementType();
12154
12155 // Check that the element types match.
12156 if (BuildVectEltTy == TruncVecEltTy) {
12157 // Now we only need to compute the offset of the truncated elements.
12158 unsigned BuildVecNumElts = BuildVect.getNumOperands();
12159 unsigned TruncVecNumElts = VT.getVectorNumElements();
12160 unsigned TruncEltOffset = BuildVecNumElts / TruncVecNumElts;
12161
12162 assert((BuildVecNumElts % TruncVecNumElts) == 0 &&(static_cast <bool> ((BuildVecNumElts % TruncVecNumElts
) == 0 && "Invalid number of elements") ? void (0) : __assert_fail
("(BuildVecNumElts % TruncVecNumElts) == 0 && \"Invalid number of elements\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12163, __extension__ __PRETTY_FUNCTION__))
12163 "Invalid number of elements")(static_cast <bool> ((BuildVecNumElts % TruncVecNumElts
) == 0 && "Invalid number of elements") ? void (0) : __assert_fail
("(BuildVecNumElts % TruncVecNumElts) == 0 && \"Invalid number of elements\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12163, __extension__ __PRETTY_FUNCTION__))
;
12164
12165 SmallVector<SDValue, 8> Opnds;
12166 for (unsigned i = 0, e = BuildVecNumElts; i != e; i += TruncEltOffset)
12167 Opnds.push_back(BuildVect.getOperand(i));
12168
12169 return DAG.getBuildVector(VT, SDLoc(N), Opnds);
12170 }
12171 }
12172
12173 // See if we can simplify the input to this truncate through knowledge that
12174 // only the low bits are being used.
12175 // For example "trunc (or (shl x, 8), y)" // -> trunc y
12176 // Currently we only perform this optimization on scalars because vectors
12177 // may have different active low bits.
12178 if (!VT.isVector()) {
12179 APInt Mask =
12180 APInt::getLowBitsSet(N0.getValueSizeInBits(), VT.getSizeInBits());
12181 if (SDValue Shorter = DAG.GetDemandedBits(N0, Mask))
12182 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Shorter);
12183 }
12184
12185 // fold (truncate (load x)) -> (smaller load x)
12186 // fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
12187 if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) {
12188 if (SDValue Reduced = ReduceLoadWidth(N))
12189 return Reduced;
12190
12191 // Handle the case where the load remains an extending load even
12192 // after truncation.
12193 if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) {
12194 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
12195 if (LN0->isSimple() && LN0->getMemoryVT().bitsLT(VT)) {
12196 SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0),
12197 VT, LN0->getChain(), LN0->getBasePtr(),
12198 LN0->getMemoryVT(),
12199 LN0->getMemOperand());
12200 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLoad.getValue(1));
12201 return NewLoad;
12202 }
12203 }
12204 }
12205
12206 // fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)),
12207 // where ... are all 'undef'.
12208 if (N0.getOpcode() == ISD::CONCAT_VECTORS && !LegalTypes) {
12209 SmallVector<EVT, 8> VTs;
12210 SDValue V;
12211 unsigned Idx = 0;
12212 unsigned NumDefs = 0;
12213
12214 for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) {
12215 SDValue X = N0.getOperand(i);
12216 if (!X.isUndef()) {
12217 V = X;
12218 Idx = i;
12219 NumDefs++;
12220 }
12221 // Stop if more than one members are non-undef.
12222 if (NumDefs > 1)
12223 break;
12224
12225 VTs.push_back(EVT::getVectorVT(*DAG.getContext(),
12226 VT.getVectorElementType(),
12227 X.getValueType().getVectorElementCount()));
12228 }
12229
12230 if (NumDefs == 0)
12231 return DAG.getUNDEF(VT);
12232
12233 if (NumDefs == 1) {
12234 assert(V.getNode() && "The single defined operand is empty!")(static_cast <bool> (V.getNode() && "The single defined operand is empty!"
) ? void (0) : __assert_fail ("V.getNode() && \"The single defined operand is empty!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12234, __extension__ __PRETTY_FUNCTION__))
;
12235 SmallVector<SDValue, 8> Opnds;
12236 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
12237 if (i != Idx) {
12238 Opnds.push_back(DAG.getUNDEF(VTs[i]));
12239 continue;
12240 }
12241 SDValue NV = DAG.getNode(ISD::TRUNCATE, SDLoc(V), VTs[i], V);
12242 AddToWorklist(NV.getNode());
12243 Opnds.push_back(NV);
12244 }
12245 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Opnds);
12246 }
12247 }
12248
12249 // Fold truncate of a bitcast of a vector to an extract of the low vector
12250 // element.
12251 //
12252 // e.g. trunc (i64 (bitcast v2i32:x)) -> extract_vector_elt v2i32:x, idx
12253 if (N0.getOpcode() == ISD::BITCAST && !VT.isVector()) {
12254 SDValue VecSrc = N0.getOperand(0);
12255 EVT VecSrcVT = VecSrc.getValueType();
12256 if (VecSrcVT.isVector() && VecSrcVT.getScalarType() == VT &&
12257 (!LegalOperations ||
12258 TLI.isOperationLegal(ISD::EXTRACT_VECTOR_ELT, VecSrcVT))) {
12259 SDLoc SL(N);
12260
12261 unsigned Idx = isLE ? 0 : VecSrcVT.getVectorNumElements() - 1;
12262 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, VT, VecSrc,
12263 DAG.getVectorIdxConstant(Idx, SL));
12264 }
12265 }
12266
12267 // Simplify the operands using demanded-bits information.
12268 if (SimplifyDemandedBits(SDValue(N, 0)))
12269 return SDValue(N, 0);
12270
12271 // (trunc adde(X, Y, Carry)) -> (adde trunc(X), trunc(Y), Carry)
12272 // (trunc addcarry(X, Y, Carry)) -> (addcarry trunc(X), trunc(Y), Carry)
12273 // When the adde's carry is not used.
12274 if ((N0.getOpcode() == ISD::ADDE || N0.getOpcode() == ISD::ADDCARRY) &&
12275 N0.hasOneUse() && !N0.getNode()->hasAnyUseOfValue(1) &&
12276 // We only do for addcarry before legalize operation
12277 ((!LegalOperations && N0.getOpcode() == ISD::ADDCARRY) ||
12278 TLI.isOperationLegal(N0.getOpcode(), VT))) {
12279 SDLoc SL(N);
12280 auto X = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(0));
12281 auto Y = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1));
12282 auto VTs = DAG.getVTList(VT, N0->getValueType(1));
12283 return DAG.getNode(N0.getOpcode(), SL, VTs, X, Y, N0.getOperand(2));
12284 }
12285
12286 // fold (truncate (extract_subvector(ext x))) ->
12287 // (extract_subvector x)
12288 // TODO: This can be generalized to cover cases where the truncate and extract
12289 // do not fully cancel each other out.
12290 if (!LegalTypes && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
12291 SDValue N00 = N0.getOperand(0);
12292 if (N00.getOpcode() == ISD::SIGN_EXTEND ||
12293 N00.getOpcode() == ISD::ZERO_EXTEND ||
12294 N00.getOpcode() == ISD::ANY_EXTEND) {
12295 if (N00.getOperand(0)->getValueType(0).getVectorElementType() ==
12296 VT.getVectorElementType())
12297 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N0->getOperand(0)), VT,
12298 N00.getOperand(0), N0.getOperand(1));
12299 }
12300 }
12301
12302 if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
12303 return NewVSel;
12304
12305 // Narrow a suitable binary operation with a non-opaque constant operand by
12306 // moving it ahead of the truncate. This is limited to pre-legalization
12307 // because targets may prefer a wider type during later combines and invert
12308 // this transform.
12309 switch (N0.getOpcode()) {
12310 case ISD::ADD:
12311 case ISD::SUB:
12312 case ISD::MUL:
12313 case ISD::AND:
12314 case ISD::OR:
12315 case ISD::XOR:
12316 if (!LegalOperations && N0.hasOneUse() &&
12317 (isConstantOrConstantVector(N0.getOperand(0), true) ||
12318 isConstantOrConstantVector(N0.getOperand(1), true))) {
12319 // TODO: We already restricted this to pre-legalization, but for vectors
12320 // we are extra cautious to not create an unsupported operation.
12321 // Target-specific changes are likely needed to avoid regressions here.
12322 if (VT.isScalarInteger() || TLI.isOperationLegal(N0.getOpcode(), VT)) {
12323 SDLoc DL(N);
12324 SDValue NarrowL = DAG.getNode(ISD::TRUNCATE, DL, VT, N0.getOperand(0));
12325 SDValue NarrowR = DAG.getNode(ISD::TRUNCATE, DL, VT, N0.getOperand(1));
12326 return DAG.getNode(N0.getOpcode(), DL, VT, NarrowL, NarrowR);
12327 }
12328 }
12329 break;
12330 case ISD::USUBSAT:
12331 // Truncate the USUBSAT only if LHS is a known zero-extension, its not
12332 // enough to know that the upper bits are zero we must ensure that we don't
12333 // introduce an extra truncate.
12334 if (!LegalOperations && N0.hasOneUse() &&
12335 N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND &&
12336 N0.getOperand(0).getOperand(0).getScalarValueSizeInBits() <=
12337 VT.getScalarSizeInBits() &&
12338 hasOperation(N0.getOpcode(), VT)) {
12339 return getTruncatedUSUBSAT(VT, SrcVT, N0.getOperand(0), N0.getOperand(1),
12340 DAG, SDLoc(N));
12341 }
12342 break;
12343 }
12344
12345 return SDValue();
12346}
12347
12348static SDNode *getBuildPairElt(SDNode *N, unsigned i) {
12349 SDValue Elt = N->getOperand(i);
12350 if (Elt.getOpcode() != ISD::MERGE_VALUES)
12351 return Elt.getNode();
12352 return Elt.getOperand(Elt.getResNo()).getNode();
12353}
12354
12355/// build_pair (load, load) -> load
12356/// if load locations are consecutive.
12357SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
12358 assert(N->getOpcode() == ISD::BUILD_PAIR)(static_cast <bool> (N->getOpcode() == ISD::BUILD_PAIR
) ? void (0) : __assert_fail ("N->getOpcode() == ISD::BUILD_PAIR"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12358, __extension__ __PRETTY_FUNCTION__))
;
12359
12360 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0));
12361 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1));
12362
12363 // A BUILD_PAIR is always having the least significant part in elt 0 and the
12364 // most significant part in elt 1. So when combining into one large load, we
12365 // need to consider the endianness.
12366 if (DAG.getDataLayout().isBigEndian())
12367 std::swap(LD1, LD2);
12368
12369 if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() ||
12370 LD1->getAddressSpace() != LD2->getAddressSpace())
12371 return SDValue();
12372 EVT LD1VT = LD1->getValueType(0);
12373 unsigned LD1Bytes = LD1VT.getStoreSize();
12374 if (ISD::isNON_EXTLoad(LD2) && LD2->hasOneUse() &&
12375 DAG.areNonVolatileConsecutiveLoads(LD2, LD1, LD1Bytes, 1)) {
12376 Align Alignment = LD1->getAlign();
12377 Align NewAlign = DAG.getDataLayout().getABITypeAlign(
12378 VT.getTypeForEVT(*DAG.getContext()));
12379
12380 if (NewAlign <= Alignment &&
12381 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
12382 return DAG.getLoad(VT, SDLoc(N), LD1->getChain(), LD1->getBasePtr(),
12383 LD1->getPointerInfo(), Alignment);
12384 }
12385
12386 return SDValue();
12387}
12388
12389static unsigned getPPCf128HiElementSelector(const SelectionDAG &DAG) {
12390 // On little-endian machines, bitcasting from ppcf128 to i128 does swap the Hi
12391 // and Lo parts; on big-endian machines it doesn't.
12392 return DAG.getDataLayout().isBigEndian() ? 1 : 0;
12393}
12394
12395static SDValue foldBitcastedFPLogic(SDNode *N, SelectionDAG &DAG,
12396 const TargetLowering &TLI) {
12397 // If this is not a bitcast to an FP type or if the target doesn't have
12398 // IEEE754-compliant FP logic, we're done.
12399 EVT VT = N->getValueType(0);
12400 if (!VT.isFloatingPoint() || !TLI.hasBitPreservingFPLogic(VT))
12401 return SDValue();
12402
12403 // TODO: Handle cases where the integer constant is a different scalar
12404 // bitwidth to the FP.
12405 SDValue N0 = N->getOperand(0);
12406 EVT SourceVT = N0.getValueType();
12407 if (VT.getScalarSizeInBits() != SourceVT.getScalarSizeInBits())
12408 return SDValue();
12409
12410 unsigned FPOpcode;
12411 APInt SignMask;
12412 switch (N0.getOpcode()) {
12413 case ISD::AND:
12414 FPOpcode = ISD::FABS;
12415 SignMask = ~APInt::getSignMask(SourceVT.getScalarSizeInBits());
12416 break;
12417 case ISD::XOR:
12418 FPOpcode = ISD::FNEG;
12419 SignMask = APInt::getSignMask(SourceVT.getScalarSizeInBits());
12420 break;
12421 case ISD::OR:
12422 FPOpcode = ISD::FABS;
12423 SignMask = APInt::getSignMask(SourceVT.getScalarSizeInBits());
12424 break;
12425 default:
12426 return SDValue();
12427 }
12428
12429 // Fold (bitcast int (and (bitcast fp X to int), 0x7fff...) to fp) -> fabs X
12430 // Fold (bitcast int (xor (bitcast fp X to int), 0x8000...) to fp) -> fneg X
12431 // Fold (bitcast int (or (bitcast fp X to int), 0x8000...) to fp) ->
12432 // fneg (fabs X)
12433 SDValue LogicOp0 = N0.getOperand(0);
12434 ConstantSDNode *LogicOp1 = isConstOrConstSplat(N0.getOperand(1), true);
12435 if (LogicOp1 && LogicOp1->getAPIntValue() == SignMask &&
12436 LogicOp0.getOpcode() == ISD::BITCAST &&
12437 LogicOp0.getOperand(0).getValueType() == VT) {
12438 SDValue FPOp = DAG.getNode(FPOpcode, SDLoc(N), VT, LogicOp0.getOperand(0));
12439 NumFPLogicOpsConv++;
12440 if (N0.getOpcode() == ISD::OR)
12441 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, FPOp);
12442 return FPOp;
12443 }
12444
12445 return SDValue();
12446}
12447
12448SDValue DAGCombiner::visitBITCAST(SDNode *N) {
12449 SDValue N0 = N->getOperand(0);
12450 EVT VT = N->getValueType(0);
12451
12452 if (N0.isUndef())
12453 return DAG.getUNDEF(VT);
12454
12455 // If the input is a BUILD_VECTOR with all constant elements, fold this now.
12456 // Only do this before legalize types, unless both types are integer and the
12457 // scalar type is legal. Only do this before legalize ops, since the target
12458 // maybe depending on the bitcast.
12459 // First check to see if this is all constant.
12460 // TODO: Support FP bitcasts after legalize types.
12461 if (VT.isVector() &&
12462 (!LegalTypes ||
12463 (!LegalOperations && VT.isInteger() && N0.getValueType().isInteger() &&
12464 TLI.isTypeLegal(VT.getVectorElementType()))) &&
12465 N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() &&
12466 cast<BuildVectorSDNode>(N0)->isConstant())
12467 return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(),
12468 VT.getVectorElementType());
12469
12470 // If the input is a constant, let getNode fold it.
12471 if (isIntOrFPConstant(N0)) {
12472 // If we can't allow illegal operations, we need to check that this is just
12473 // a fp -> int or int -> conversion and that the resulting operation will
12474 // be legal.
12475 if (!LegalOperations ||
12476 (isa<ConstantSDNode>(N0) && VT.isFloatingPoint() && !VT.isVector() &&
12477 TLI.isOperationLegal(ISD::ConstantFP, VT)) ||
12478 (isa<ConstantFPSDNode>(N0) && VT.isInteger() && !VT.isVector() &&
12479 TLI.isOperationLegal(ISD::Constant, VT))) {
12480 SDValue C = DAG.getBitcast(VT, N0);
12481 if (C.getNode() != N)
12482 return C;
12483 }
12484 }
12485
12486 // (conv (conv x, t1), t2) -> (conv x, t2)
12487 if (N0.getOpcode() == ISD::BITCAST)
12488 return DAG.getBitcast(VT, N0.getOperand(0));
12489
12490 // fold (conv (load x)) -> (load (conv*)x)
12491 // If the resultant load doesn't need a higher alignment than the original!
12492 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
12493 // Do not remove the cast if the types differ in endian layout.
12494 TLI.hasBigEndianPartOrdering(N0.getValueType(), DAG.getDataLayout()) ==
12495 TLI.hasBigEndianPartOrdering(VT, DAG.getDataLayout()) &&
12496 // If the load is volatile, we only want to change the load type if the
12497 // resulting load is legal. Otherwise we might increase the number of
12498 // memory accesses. We don't care if the original type was legal or not
12499 // as we assume software couldn't rely on the number of accesses of an
12500 // illegal type.
12501 ((!LegalOperations && cast<LoadSDNode>(N0)->isSimple()) ||
12502 TLI.isOperationLegal(ISD::LOAD, VT))) {
12503 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
12504
12505 if (TLI.isLoadBitCastBeneficial(N0.getValueType(), VT, DAG,
12506 *LN0->getMemOperand())) {
12507 SDValue Load =
12508 DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
12509 LN0->getPointerInfo(), LN0->getAlign(),
12510 LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
12511 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
12512 return Load;
12513 }
12514 }
12515
12516 if (SDValue V = foldBitcastedFPLogic(N, DAG, TLI))
12517 return V;
12518
12519 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
12520 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
12521 //
12522 // For ppc_fp128:
12523 // fold (bitcast (fneg x)) ->
12524 // flipbit = signbit
12525 // (xor (bitcast x) (build_pair flipbit, flipbit))
12526 //
12527 // fold (bitcast (fabs x)) ->
12528 // flipbit = (and (extract_element (bitcast x), 0), signbit)
12529 // (xor (bitcast x) (build_pair flipbit, flipbit))
12530 // This often reduces constant pool loads.
12531 if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(N0.getValueType())) ||
12532 (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) &&
12533 N0.getNode()->hasOneUse() && VT.isInteger() &&
12534 !VT.isVector() && !N0.getValueType().isVector()) {
12535 SDValue NewConv = DAG.getBitcast(VT, N0.getOperand(0));
12536 AddToWorklist(NewConv.getNode());
12537
12538 SDLoc DL(N);
12539 if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) {
12540 assert(VT.getSizeInBits() == 128)(static_cast <bool> (VT.getSizeInBits() == 128) ? void (
0) : __assert_fail ("VT.getSizeInBits() == 128", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12540, __extension__ __PRETTY_FUNCTION__))
;
12541 SDValue SignBit = DAG.getConstant(
12542 APInt::getSignMask(VT.getSizeInBits() / 2), SDLoc(N0), MVT::i64);
12543 SDValue FlipBit;
12544 if (N0.getOpcode() == ISD::FNEG) {
12545 FlipBit = SignBit;
12546 AddToWorklist(FlipBit.getNode());
12547 } else {
12548 assert(N0.getOpcode() == ISD::FABS)(static_cast <bool> (N0.getOpcode() == ISD::FABS) ? void
(0) : __assert_fail ("N0.getOpcode() == ISD::FABS", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12548, __extension__ __PRETTY_FUNCTION__))
;
12549 SDValue Hi =
12550 DAG.getNode(ISD::EXTRACT_ELEMENT, SDLoc(NewConv), MVT::i64, NewConv,
12551 DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG),
12552 SDLoc(NewConv)));
12553 AddToWorklist(Hi.getNode());
12554 FlipBit = DAG.getNode(ISD::AND, SDLoc(N0), MVT::i64, Hi, SignBit);
12555 AddToWorklist(FlipBit.getNode());
12556 }
12557 SDValue FlipBits =
12558 DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit);
12559 AddToWorklist(FlipBits.getNode());
12560 return DAG.getNode(ISD::XOR, DL, VT, NewConv, FlipBits);
12561 }
12562 APInt SignBit = APInt::getSignMask(VT.getSizeInBits());
12563 if (N0.getOpcode() == ISD::FNEG)
12564 return DAG.getNode(ISD::XOR, DL, VT,
12565 NewConv, DAG.getConstant(SignBit, DL, VT));
12566 assert(N0.getOpcode() == ISD::FABS)(static_cast <bool> (N0.getOpcode() == ISD::FABS) ? void
(0) : __assert_fail ("N0.getOpcode() == ISD::FABS", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12566, __extension__ __PRETTY_FUNCTION__))
;
12567 return DAG.getNode(ISD::AND, DL, VT,
12568 NewConv, DAG.getConstant(~SignBit, DL, VT));
12569 }
12570
12571 // fold (bitconvert (fcopysign cst, x)) ->
12572 // (or (and (bitconvert x), sign), (and cst, (not sign)))
12573 // Note that we don't handle (copysign x, cst) because this can always be
12574 // folded to an fneg or fabs.
12575 //
12576 // For ppc_fp128:
12577 // fold (bitcast (fcopysign cst, x)) ->
12578 // flipbit = (and (extract_element
12579 // (xor (bitcast cst), (bitcast x)), 0),
12580 // signbit)
12581 // (xor (bitcast cst) (build_pair flipbit, flipbit))
12582 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() &&
12583 isa<ConstantFPSDNode>(N0.getOperand(0)) &&
12584 VT.isInteger() && !VT.isVector()) {
12585 unsigned OrigXWidth = N0.getOperand(1).getValueSizeInBits();
12586 EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
12587 if (isTypeLegal(IntXVT)) {
12588 SDValue X = DAG.getBitcast(IntXVT, N0.getOperand(1));
12589 AddToWorklist(X.getNode());
12590
12591 // If X has a different width than the result/lhs, sext it or truncate it.
12592 unsigned VTWidth = VT.getSizeInBits();
12593 if (OrigXWidth < VTWidth) {
12594 X = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, X);
12595 AddToWorklist(X.getNode());
12596 } else if (OrigXWidth > VTWidth) {
12597 // To get the sign bit in the right place, we have to shift it right
12598 // before truncating.
12599 SDLoc DL(X);
12600 X = DAG.getNode(ISD::SRL, DL,
12601 X.getValueType(), X,
12602 DAG.getConstant(OrigXWidth-VTWidth, DL,
12603 X.getValueType()));
12604 AddToWorklist(X.getNode());
12605 X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X);
12606 AddToWorklist(X.getNode());
12607 }
12608
12609 if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) {
12610 APInt SignBit = APInt::getSignMask(VT.getSizeInBits() / 2);
12611 SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0));
12612 AddToWorklist(Cst.getNode());
12613 SDValue X = DAG.getBitcast(VT, N0.getOperand(1));
12614 AddToWorklist(X.getNode());
12615 SDValue XorResult = DAG.getNode(ISD::XOR, SDLoc(N0), VT, Cst, X);
12616 AddToWorklist(XorResult.getNode());
12617 SDValue XorResult64 = DAG.getNode(
12618 ISD::EXTRACT_ELEMENT, SDLoc(XorResult), MVT::i64, XorResult,
12619 DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG),
12620 SDLoc(XorResult)));
12621 AddToWorklist(XorResult64.getNode());
12622 SDValue FlipBit =
12623 DAG.getNode(ISD::AND, SDLoc(XorResult64), MVT::i64, XorResult64,
12624 DAG.getConstant(SignBit, SDLoc(XorResult64), MVT::i64));
12625 AddToWorklist(FlipBit.getNode());
12626 SDValue FlipBits =
12627 DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit);
12628 AddToWorklist(FlipBits.getNode());
12629 return DAG.getNode(ISD::XOR, SDLoc(N), VT, Cst, FlipBits);
12630 }
12631 APInt SignBit = APInt::getSignMask(VT.getSizeInBits());
12632 X = DAG.getNode(ISD::AND, SDLoc(X), VT,
12633 X, DAG.getConstant(SignBit, SDLoc(X), VT));
12634 AddToWorklist(X.getNode());
12635
12636 SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0));
12637 Cst = DAG.getNode(ISD::AND, SDLoc(Cst), VT,
12638 Cst, DAG.getConstant(~SignBit, SDLoc(Cst), VT));
12639 AddToWorklist(Cst.getNode());
12640
12641 return DAG.getNode(ISD::OR, SDLoc(N), VT, X, Cst);
12642 }
12643 }
12644
12645 // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive.
12646 if (N0.getOpcode() == ISD::BUILD_PAIR)
12647 if (SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT))
12648 return CombineLD;
12649
12650 // Remove double bitcasts from shuffles - this is often a legacy of
12651 // XformToShuffleWithZero being used to combine bitmaskings (of
12652 // float vectors bitcast to integer vectors) into shuffles.
12653 // bitcast(shuffle(bitcast(s0),bitcast(s1))) -> shuffle(s0,s1)
12654 if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT) && VT.isVector() &&
12655 N0->getOpcode() == ISD::VECTOR_SHUFFLE && N0.hasOneUse() &&
12656 VT.getVectorNumElements() >= N0.getValueType().getVectorNumElements() &&
12657 !(VT.getVectorNumElements() % N0.getValueType().getVectorNumElements())) {
12658 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N0);
12659
12660 // If operands are a bitcast, peek through if it casts the original VT.
12661 // If operands are a constant, just bitcast back to original VT.
12662 auto PeekThroughBitcast = [&](SDValue Op) {
12663 if (Op.getOpcode() == ISD::BITCAST &&
12664 Op.getOperand(0).getValueType() == VT)
12665 return SDValue(Op.getOperand(0));
12666 if (Op.isUndef() || ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
12667 ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()))
12668 return DAG.getBitcast(VT, Op);
12669 return SDValue();
12670 };
12671
12672 // FIXME: If either input vector is bitcast, try to convert the shuffle to
12673 // the result type of this bitcast. This would eliminate at least one
12674 // bitcast. See the transform in InstCombine.
12675 SDValue SV0 = PeekThroughBitcast(N0->getOperand(0));
12676 SDValue SV1 = PeekThroughBitcast(N0->getOperand(1));
12677 if (!(SV0 && SV1))
12678 return SDValue();
12679
12680 int MaskScale =
12681 VT.getVectorNumElements() / N0.getValueType().getVectorNumElements();
12682 SmallVector<int, 8> NewMask;
12683 for (int M : SVN->getMask())
12684 for (int i = 0; i != MaskScale; ++i)
12685 NewMask.push_back(M < 0 ? -1 : M * MaskScale + i);
12686
12687 SDValue LegalShuffle =
12688 TLI.buildLegalVectorShuffle(VT, SDLoc(N), SV0, SV1, NewMask, DAG);
12689 if (LegalShuffle)
12690 return LegalShuffle;
12691 }
12692
12693 return SDValue();
12694}
12695
12696SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
12697 EVT VT = N->getValueType(0);
12698 return CombineConsecutiveLoads(N, VT);
12699}
12700
12701SDValue DAGCombiner::visitFREEZE(SDNode *N) {
12702 SDValue N0 = N->getOperand(0);
12703
12704 // (freeze (freeze x)) -> (freeze x)
12705 if (N0.getOpcode() == ISD::FREEZE)
12706 return N0;
12707
12708 // If the input is a constant, return it.
12709 if (isIntOrFPConstant(N0))
12710 return N0;
12711
12712 return SDValue();
12713}
12714
12715/// We know that BV is a build_vector node with Constant, ConstantFP or Undef
12716/// operands. DstEltVT indicates the destination element value type.
12717SDValue DAGCombiner::
12718ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
12719 EVT SrcEltVT = BV->getValueType(0).getVectorElementType();
12720
12721 // If this is already the right type, we're done.
12722 if (SrcEltVT == DstEltVT) return SDValue(BV, 0);
12723
12724 unsigned SrcBitSize = SrcEltVT.getSizeInBits();
12725 unsigned DstBitSize = DstEltVT.getSizeInBits();
12726
12727 // If this is a conversion of N elements of one type to N elements of another
12728 // type, convert each element. This handles FP<->INT cases.
12729 if (SrcBitSize == DstBitSize) {
12730 SmallVector<SDValue, 8> Ops;
12731 for (SDValue Op : BV->op_values()) {
12732 // If the vector element type is not legal, the BUILD_VECTOR operands
12733 // are promoted and implicitly truncated. Make that explicit here.
12734 if (Op.getValueType() != SrcEltVT)
12735 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(BV), SrcEltVT, Op);
12736 Ops.push_back(DAG.getBitcast(DstEltVT, Op));
12737 AddToWorklist(Ops.back().getNode());
12738 }
12739 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
12740 BV->getValueType(0).getVectorNumElements());
12741 return DAG.getBuildVector(VT, SDLoc(BV), Ops);
12742 }
12743
12744 // Otherwise, we're growing or shrinking the elements. To avoid having to
12745 // handle annoying details of growing/shrinking FP values, we convert them to
12746 // int first.
12747 if (SrcEltVT.isFloatingPoint()) {
12748 // Convert the input float vector to a int vector where the elements are the
12749 // same sizes.
12750 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits());
12751 BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode();
12752 SrcEltVT = IntVT;
12753 }
12754
12755 // Now we know the input is an integer vector. If the output is a FP type,
12756 // convert to integer first, then to FP of the right size.
12757 if (DstEltVT.isFloatingPoint()) {
12758 EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits());
12759 SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode();
12760
12761 // Next, convert to FP elements of the same size.
12762 return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT);
12763 }
12764
12765 SDLoc DL(BV);
12766
12767 // Okay, we know the src/dst types are both integers of differing types.
12768 // Handling growing first.
12769 assert(SrcEltVT.isInteger() && DstEltVT.isInteger())(static_cast <bool> (SrcEltVT.isInteger() && DstEltVT
.isInteger()) ? void (0) : __assert_fail ("SrcEltVT.isInteger() && DstEltVT.isInteger()"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 12769, __extension__ __PRETTY_FUNCTION__))
;
12770 if (SrcBitSize < DstBitSize) {
12771 unsigned NumInputsPerOutput = DstBitSize/SrcBitSize;
12772
12773 SmallVector<SDValue, 8> Ops;
12774 for (unsigned i = 0, e = BV->getNumOperands(); i != e;
12775 i += NumInputsPerOutput) {
12776 bool isLE = DAG.getDataLayout().isLittleEndian();
12777 APInt NewBits = APInt(DstBitSize, 0);
12778 bool EltIsUndef = true;
12779 for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
12780 // Shift the previously computed bits over.
12781 NewBits <<= SrcBitSize;
12782 SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j));
12783 if (Op.isUndef()) continue;
12784 EltIsUndef = false;
12785
12786 NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue().
12787 zextOrTrunc(SrcBitSize).zext(DstBitSize);
12788 }
12789
12790 if (EltIsUndef)
12791 Ops.push_back(DAG.getUNDEF(DstEltVT));
12792 else
12793 Ops.push_back(DAG.getConstant(NewBits, DL, DstEltVT));
12794 }
12795
12796 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size());
12797 return DAG.getBuildVector(VT, DL, Ops);
12798 }
12799
12800 // Finally, this must be the case where we are shrinking elements: each input
12801 // turns into multiple outputs.
12802 unsigned NumOutputsPerInput = SrcBitSize/DstBitSize;
12803 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
12804 NumOutputsPerInput*BV->getNumOperands());
12805 SmallVector<SDValue, 8> Ops;
12806
12807 for (const SDValue &Op : BV->op_values()) {
12808 if (Op.isUndef()) {
12809 Ops.append(NumOutputsPerInput, DAG.getUNDEF(DstEltVT));
12810 continue;
12811 }
12812
12813 APInt OpVal = cast<ConstantSDNode>(Op)->
12814 getAPIntValue().zextOrTrunc(SrcBitSize);
12815
12816 for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
12817 APInt ThisVal = OpVal.trunc(DstBitSize);
12818 Ops.push_back(DAG.getConstant(ThisVal, DL, DstEltVT));
12819 OpVal.lshrInPlace(DstBitSize);
12820 }
12821
12822 // For big endian targets, swap the order of the pieces of each element.
12823 if (DAG.getDataLayout().isBigEndian())
12824 std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
12825 }
12826
12827 return DAG.getBuildVector(VT, DL, Ops);
12828}
12829
12830static bool isContractable(SDNode *N) {
12831 SDNodeFlags F = N->getFlags();
12832 return F.hasAllowContract() || F.hasAllowReassociation();
12833}
12834
12835/// Try to perform FMA combining on a given FADD node.
12836SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
12837 SDValue N0 = N->getOperand(0);
12838 SDValue N1 = N->getOperand(1);
12839 EVT VT = N->getValueType(0);
12840 SDLoc SL(N);
12841
12842 const TargetOptions &Options = DAG.getTarget().Options;
12843
12844 // Floating-point multiply-add with intermediate rounding.
12845 bool HasFMAD = (LegalOperations && TLI.isFMADLegal(DAG, N));
12846
12847 // Floating-point multiply-add without intermediate rounding.
12848 bool HasFMA =
12849 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT) &&
12850 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT));
12851
12852 // No valid opcode, do not combine.
12853 if (!HasFMAD && !HasFMA)
12854 return SDValue();
12855
12856 bool CanFuse = Options.UnsafeFPMath || isContractable(N);
12857 bool CanReassociate =
12858 Options.UnsafeFPMath || N->getFlags().hasAllowReassociation();
12859 bool AllowFusionGlobally = (Options.AllowFPOpFusion == FPOpFusion::Fast ||
12860 CanFuse || HasFMAD);
12861 // If the addition is not contractable, do not combine.
12862 if (!AllowFusionGlobally && !isContractable(N))
12863 return SDValue();
12864
12865 if (TLI.generateFMAsInMachineCombiner(VT, OptLevel))
12866 return SDValue();
12867
12868 // Always prefer FMAD to FMA for precision.
12869 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
12870 bool Aggressive = TLI.enableAggressiveFMAFusion(VT);
12871
12872 // Is the node an FMUL and contractable either due to global flags or
12873 // SDNodeFlags.
12874 auto isContractableFMUL = [AllowFusionGlobally](SDValue N) {
12875 if (N.getOpcode() != ISD::FMUL)
12876 return false;
12877 return AllowFusionGlobally || isContractable(N.getNode());
12878 };
12879 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
12880 // prefer to fold the multiply with fewer uses.
12881 if (Aggressive && isContractableFMUL(N0) && isContractableFMUL(N1)) {
12882 if (N0.getNode()->use_size() > N1.getNode()->use_size())
12883 std::swap(N0, N1);
12884 }
12885
12886 // fold (fadd (fmul x, y), z) -> (fma x, y, z)
12887 if (isContractableFMUL(N0) && (Aggressive || N0->hasOneUse())) {
12888 return DAG.getNode(PreferredFusedOpcode, SL, VT, N0.getOperand(0),
12889 N0.getOperand(1), N1);
12890 }
12891
12892 // fold (fadd x, (fmul y, z)) -> (fma y, z, x)
12893 // Note: Commutes FADD operands.
12894 if (isContractableFMUL(N1) && (Aggressive || N1->hasOneUse())) {
12895 return DAG.getNode(PreferredFusedOpcode, SL, VT, N1.getOperand(0),
12896 N1.getOperand(1), N0);
12897 }
12898
12899 // fadd (fma A, B, (fmul C, D)), E --> fma A, B, (fma C, D, E)
12900 // fadd E, (fma A, B, (fmul C, D)) --> fma A, B, (fma C, D, E)
12901 // This requires reassociation because it changes the order of operations.
12902 SDValue FMA, E;
12903 if (CanReassociate && N0.getOpcode() == PreferredFusedOpcode &&
12904 N0.getOperand(2).getOpcode() == ISD::FMUL && N0.hasOneUse() &&
12905 N0.getOperand(2).hasOneUse()) {
12906 FMA = N0;
12907 E = N1;
12908 } else if (CanReassociate && N1.getOpcode() == PreferredFusedOpcode &&
12909 N1.getOperand(2).getOpcode() == ISD::FMUL && N1.hasOneUse() &&
12910 N1.getOperand(2).hasOneUse()) {
12911 FMA = N1;
12912 E = N0;
12913 }
12914 if (FMA && E) {
12915 SDValue A = FMA.getOperand(0);
12916 SDValue B = FMA.getOperand(1);
12917 SDValue C = FMA.getOperand(2).getOperand(0);
12918 SDValue D = FMA.getOperand(2).getOperand(1);
12919 SDValue CDE = DAG.getNode(PreferredFusedOpcode, SL, VT, C, D, E);
12920 return DAG.getNode(PreferredFusedOpcode, SL, VT, A, B, CDE);
12921 }
12922
12923 // Look through FP_EXTEND nodes to do more combining.
12924
12925 // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
12926 if (N0.getOpcode() == ISD::FP_EXTEND) {
12927 SDValue N00 = N0.getOperand(0);
12928 if (isContractableFMUL(N00) &&
12929 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
12930 N00.getValueType())) {
12931 return DAG.getNode(PreferredFusedOpcode, SL, VT,
12932 DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(0)),
12933 DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(1)),
12934 N1);
12935 }
12936 }
12937
12938 // fold (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
12939 // Note: Commutes FADD operands.
12940 if (N1.getOpcode() == ISD::FP_EXTEND) {
12941 SDValue N10 = N1.getOperand(0);
12942 if (isContractableFMUL(N10) &&
12943 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
12944 N10.getValueType())) {
12945 return DAG.getNode(PreferredFusedOpcode, SL, VT,
12946 DAG.getNode(ISD::FP_EXTEND, SL, VT, N10.getOperand(0)),
12947 DAG.getNode(ISD::FP_EXTEND, SL, VT, N10.getOperand(1)),
12948 N0);
12949 }
12950 }
12951
12952 // More folding opportunities when target permits.
12953 if (Aggressive) {
12954 // fold (fadd (fma x, y, (fpext (fmul u, v))), z)
12955 // -> (fma x, y, (fma (fpext u), (fpext v), z))
12956 auto FoldFAddFMAFPExtFMul = [&](SDValue X, SDValue Y, SDValue U, SDValue V,
12957 SDValue Z) {
12958 return DAG.getNode(PreferredFusedOpcode, SL, VT, X, Y,
12959 DAG.getNode(PreferredFusedOpcode, SL, VT,
12960 DAG.getNode(ISD::FP_EXTEND, SL, VT, U),
12961 DAG.getNode(ISD::FP_EXTEND, SL, VT, V),
12962 Z));
12963 };
12964 if (N0.getOpcode() == PreferredFusedOpcode) {
12965 SDValue N02 = N0.getOperand(2);
12966 if (N02.getOpcode() == ISD::FP_EXTEND) {
12967 SDValue N020 = N02.getOperand(0);
12968 if (isContractableFMUL(N020) &&
12969 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
12970 N020.getValueType())) {
12971 return FoldFAddFMAFPExtFMul(N0.getOperand(0), N0.getOperand(1),
12972 N020.getOperand(0), N020.getOperand(1),
12973 N1);
12974 }
12975 }
12976 }
12977
12978 // fold (fadd (fpext (fma x, y, (fmul u, v))), z)
12979 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
12980 // FIXME: This turns two single-precision and one double-precision
12981 // operation into two double-precision operations, which might not be
12982 // interesting for all targets, especially GPUs.
12983 auto FoldFAddFPExtFMAFMul = [&](SDValue X, SDValue Y, SDValue U, SDValue V,
12984 SDValue Z) {
12985 return DAG.getNode(
12986 PreferredFusedOpcode, SL, VT, DAG.getNode(ISD::FP_EXTEND, SL, VT, X),
12987 DAG.getNode(ISD::FP_EXTEND, SL, VT, Y),
12988 DAG.getNode(PreferredFusedOpcode, SL, VT,
12989 DAG.getNode(ISD::FP_EXTEND, SL, VT, U),
12990 DAG.getNode(ISD::FP_EXTEND, SL, VT, V), Z));
12991 };
12992 if (N0.getOpcode() == ISD::FP_EXTEND) {
12993 SDValue N00 = N0.getOperand(0);
12994 if (N00.getOpcode() == PreferredFusedOpcode) {
12995 SDValue N002 = N00.getOperand(2);
12996 if (isContractableFMUL(N002) &&
12997 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
12998 N00.getValueType())) {
12999 return FoldFAddFPExtFMAFMul(N00.getOperand(0), N00.getOperand(1),
13000 N002.getOperand(0), N002.getOperand(1),
13001 N1);
13002 }
13003 }
13004 }
13005
13006 // fold (fadd x, (fma y, z, (fpext (fmul u, v)))
13007 // -> (fma y, z, (fma (fpext u), (fpext v), x))
13008 if (N1.getOpcode() == PreferredFusedOpcode) {
13009 SDValue N12 = N1.getOperand(2);
13010 if (N12.getOpcode() == ISD::FP_EXTEND) {
13011 SDValue N120 = N12.getOperand(0);
13012 if (isContractableFMUL(N120) &&
13013 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
13014 N120.getValueType())) {
13015 return FoldFAddFMAFPExtFMul(N1.getOperand(0), N1.getOperand(1),
13016 N120.getOperand(0), N120.getOperand(1),
13017 N0);
13018 }
13019 }
13020 }
13021
13022 // fold (fadd x, (fpext (fma y, z, (fmul u, v)))
13023 // -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x))
13024 // FIXME: This turns two single-precision and one double-precision
13025 // operation into two double-precision operations, which might not be
13026 // interesting for all targets, especially GPUs.
13027 if (N1.getOpcode() == ISD::FP_EXTEND) {
13028 SDValue N10 = N1.getOperand(0);
13029 if (N10.getOpcode() == PreferredFusedOpcode) {
13030 SDValue N102 = N10.getOperand(2);
13031 if (isContractableFMUL(N102) &&
13032 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
13033 N10.getValueType())) {
13034 return FoldFAddFPExtFMAFMul(N10.getOperand(0), N10.getOperand(1),
13035 N102.getOperand(0), N102.getOperand(1),
13036 N0);
13037 }
13038 }
13039 }
13040 }
13041
13042 return SDValue();
13043}
13044
13045/// Try to perform FMA combining on a given FSUB node.
13046SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
13047 SDValue N0 = N->getOperand(0);
13048 SDValue N1 = N->getOperand(1);
13049 EVT VT = N->getValueType(0);
13050 SDLoc SL(N);
13051
13052 const TargetOptions &Options = DAG.getTarget().Options;
13053 // Floating-point multiply-add with intermediate rounding.
13054 bool HasFMAD = (LegalOperations && TLI.isFMADLegal(DAG, N));
13055
13056 // Floating-point multiply-add without intermediate rounding.
13057 bool HasFMA =
13058 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT) &&
13059 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT));
13060
13061 // No valid opcode, do not combine.
13062 if (!HasFMAD && !HasFMA)
13063 return SDValue();
13064
13065 const SDNodeFlags Flags = N->getFlags();
13066 bool CanFuse = Options.UnsafeFPMath || isContractable(N);
13067 bool AllowFusionGlobally = (Options.AllowFPOpFusion == FPOpFusion::Fast ||
13068 CanFuse || HasFMAD);
13069
13070 // If the subtraction is not contractable, do not combine.
13071 if (!AllowFusionGlobally && !isContractable(N))
13072 return SDValue();
13073
13074 if (TLI.generateFMAsInMachineCombiner(VT, OptLevel))
13075 return SDValue();
13076
13077 // Always prefer FMAD to FMA for precision.
13078 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
13079 bool Aggressive = TLI.enableAggressiveFMAFusion(VT);
13080 bool NoSignedZero = Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros();
13081
13082 // Is the node an FMUL and contractable either due to global flags or
13083 // SDNodeFlags.
13084 auto isContractableFMUL = [AllowFusionGlobally](SDValue N) {
13085 if (N.getOpcode() != ISD::FMUL)
13086 return false;
13087 return AllowFusionGlobally || isContractable(N.getNode());
13088 };
13089
13090 // fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
13091 auto tryToFoldXYSubZ = [&](SDValue XY, SDValue Z) {
13092 if (isContractableFMUL(XY) && (Aggressive || XY->hasOneUse())) {
13093 return DAG.getNode(PreferredFusedOpcode, SL, VT, XY.getOperand(0),
13094 XY.getOperand(1), DAG.getNode(ISD::FNEG, SL, VT, Z));
13095 }
13096 return SDValue();
13097 };
13098
13099 // fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x)
13100 // Note: Commutes FSUB operands.
13101 auto tryToFoldXSubYZ = [&](SDValue X, SDValue YZ) {
13102 if (isContractableFMUL(YZ) && (Aggressive || YZ->hasOneUse())) {
13103 return DAG.getNode(PreferredFusedOpcode, SL, VT,
13104 DAG.getNode(ISD::FNEG, SL, VT, YZ.getOperand(0)),
13105 YZ.getOperand(1), X);
13106 }
13107 return SDValue();
13108 };
13109
13110 // If we have two choices trying to fold (fsub (fmul u, v), (fmul x, y)),
13111 // prefer to fold the multiply with fewer uses.
13112 if (isContractableFMUL(N0) && isContractableFMUL(N1) &&
13113 (N0.getNode()->use_size() > N1.getNode()->use_size())) {
13114 // fold (fsub (fmul a, b), (fmul c, d)) -> (fma (fneg c), d, (fmul a, b))
13115 if (SDValue V = tryToFoldXSubYZ(N0, N1))
13116 return V;
13117 // fold (fsub (fmul a, b), (fmul c, d)) -> (fma a, b, (fneg (fmul c, d)))
13118 if (SDValue V = tryToFoldXYSubZ(N0, N1))
13119 return V;
13120 } else {
13121 // fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z))
13122 if (SDValue V = tryToFoldXYSubZ(N0, N1))
13123 return V;
13124 // fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x)
13125 if (SDValue V = tryToFoldXSubYZ(N0, N1))
13126 return V;
13127 }
13128
13129 // fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
13130 if (N0.getOpcode() == ISD::FNEG && isContractableFMUL(N0.getOperand(0)) &&
13131 (Aggressive || (N0->hasOneUse() && N0.getOperand(0).hasOneUse()))) {
13132 SDValue N00 = N0.getOperand(0).getOperand(0);
13133 SDValue N01 = N0.getOperand(0).getOperand(1);
13134 return DAG.getNode(PreferredFusedOpcode, SL, VT,
13135 DAG.getNode(ISD::FNEG, SL, VT, N00), N01,
13136 DAG.getNode(ISD::FNEG, SL, VT, N1));
13137 }
13138
13139 // Look through FP_EXTEND nodes to do more combining.
13140
13141 // fold (fsub (fpext (fmul x, y)), z)
13142 // -> (fma (fpext x), (fpext y), (fneg z))
13143 if (N0.getOpcode() == ISD::FP_EXTEND) {
13144 SDValue N00 = N0.getOperand(0);
13145 if (isContractableFMUL(N00) &&
13146 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
13147 N00.getValueType())) {
13148 return DAG.getNode(PreferredFusedOpcode, SL, VT,
13149 DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(0)),
13150 DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(1)),
13151 DAG.getNode(ISD::FNEG, SL, VT, N1));
13152 }
13153 }
13154
13155 // fold (fsub x, (fpext (fmul y, z)))
13156 // -> (fma (fneg (fpext y)), (fpext z), x)
13157 // Note: Commutes FSUB operands.
13158 if (N1.getOpcode() == ISD::FP_EXTEND) {
13159 SDValue N10 = N1.getOperand(0);
13160 if (isContractableFMUL(N10) &&
13161 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
13162 N10.getValueType())) {
13163 return DAG.getNode(
13164 PreferredFusedOpcode, SL, VT,
13165 DAG.getNode(ISD::FNEG, SL, VT,
13166 DAG.getNode(ISD::FP_EXTEND, SL, VT, N10.getOperand(0))),
13167 DAG.getNode(ISD::FP_EXTEND, SL, VT, N10.getOperand(1)), N0);
13168 }
13169 }
13170
13171 // fold (fsub (fpext (fneg (fmul, x, y))), z)
13172 // -> (fneg (fma (fpext x), (fpext y), z))
13173 // Note: This could be removed with appropriate canonicalization of the
13174 // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the
13175 // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent
13176 // from implementing the canonicalization in visitFSUB.
13177 if (N0.getOpcode() == ISD::FP_EXTEND) {
13178 SDValue N00 = N0.getOperand(0);
13179 if (N00.getOpcode() == ISD::FNEG) {
13180 SDValue N000 = N00.getOperand(0);
13181 if (isContractableFMUL(N000) &&
13182 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
13183 N00.getValueType())) {
13184 return DAG.getNode(
13185 ISD::FNEG, SL, VT,
13186 DAG.getNode(PreferredFusedOpcode, SL, VT,
13187 DAG.getNode(ISD::FP_EXTEND, SL, VT, N000.getOperand(0)),
13188 DAG.getNode(ISD::FP_EXTEND, SL, VT, N000.getOperand(1)),
13189 N1));
13190 }
13191 }
13192 }
13193
13194 // fold (fsub (fneg (fpext (fmul, x, y))), z)
13195 // -> (fneg (fma (fpext x)), (fpext y), z)
13196 // Note: This could be removed with appropriate canonicalization of the
13197 // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the
13198 // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent
13199 // from implementing the canonicalization in visitFSUB.
13200 if (N0.getOpcode() == ISD::FNEG) {
13201 SDValue N00 = N0.getOperand(0);
13202 if (N00.getOpcode() == ISD::FP_EXTEND) {
13203 SDValue N000 = N00.getOperand(0);
13204 if (isContractableFMUL(N000) &&
13205 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
13206 N000.getValueType())) {
13207 return DAG.getNode(
13208 ISD::FNEG, SL, VT,
13209 DAG.getNode(PreferredFusedOpcode, SL, VT,
13210 DAG.getNode(ISD::FP_EXTEND, SL, VT, N000.getOperand(0)),
13211 DAG.getNode(ISD::FP_EXTEND, SL, VT, N000.getOperand(1)),
13212 N1));
13213 }
13214 }
13215 }
13216
13217 // More folding opportunities when target permits.
13218 if (Aggressive) {
13219 // fold (fsub (fma x, y, (fmul u, v)), z)
13220 // -> (fma x, y (fma u, v, (fneg z)))
13221 if (CanFuse && N0.getOpcode() == PreferredFusedOpcode &&
13222 isContractableFMUL(N0.getOperand(2)) && N0->hasOneUse() &&
13223 N0.getOperand(2)->hasOneUse()) {
13224 return DAG.getNode(PreferredFusedOpcode, SL, VT, N0.getOperand(0),
13225 N0.getOperand(1),
13226 DAG.getNode(PreferredFusedOpcode, SL, VT,
13227 N0.getOperand(2).getOperand(0),
13228 N0.getOperand(2).getOperand(1),
13229 DAG.getNode(ISD::FNEG, SL, VT, N1)));
13230 }
13231
13232 // fold (fsub x, (fma y, z, (fmul u, v)))
13233 // -> (fma (fneg y), z, (fma (fneg u), v, x))
13234 if (CanFuse && N1.getOpcode() == PreferredFusedOpcode &&
13235 isContractableFMUL(N1.getOperand(2)) &&
13236 N1->hasOneUse() && NoSignedZero) {
13237 SDValue N20 = N1.getOperand(2).getOperand(0);
13238 SDValue N21 = N1.getOperand(2).getOperand(1);
13239 return DAG.getNode(
13240 PreferredFusedOpcode, SL, VT,
13241 DAG.getNode(ISD::FNEG, SL, VT, N1.getOperand(0)), N1.getOperand(1),
13242 DAG.getNode(PreferredFusedOpcode, SL, VT,
13243 DAG.getNode(ISD::FNEG, SL, VT, N20), N21, N0));
13244 }
13245
13246
13247 // fold (fsub (fma x, y, (fpext (fmul u, v))), z)
13248 // -> (fma x, y (fma (fpext u), (fpext v), (fneg z)))
13249 if (N0.getOpcode() == PreferredFusedOpcode &&
13250 N0->hasOneUse()) {
13251 SDValue N02 = N0.getOperand(2);
13252 if (N02.getOpcode() == ISD::FP_EXTEND) {
13253 SDValue N020 = N02.getOperand(0);
13254 if (isContractableFMUL(N020) &&
13255 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
13256 N020.getValueType())) {
13257 return DAG.getNode(
13258 PreferredFusedOpcode, SL, VT, N0.getOperand(0), N0.getOperand(1),
13259 DAG.getNode(
13260 PreferredFusedOpcode, SL, VT,
13261 DAG.getNode(ISD::FP_EXTEND, SL, VT, N020.getOperand(0)),
13262 DAG.getNode(ISD::FP_EXTEND, SL, VT, N020.getOperand(1)),
13263 DAG.getNode(ISD::FNEG, SL, VT, N1)));
13264 }
13265 }
13266 }
13267
13268 // fold (fsub (fpext (fma x, y, (fmul u, v))), z)
13269 // -> (fma (fpext x), (fpext y),
13270 // (fma (fpext u), (fpext v), (fneg z)))
13271 // FIXME: This turns two single-precision and one double-precision
13272 // operation into two double-precision operations, which might not be
13273 // interesting for all targets, especially GPUs.
13274 if (N0.getOpcode() == ISD::FP_EXTEND) {
13275 SDValue N00 = N0.getOperand(0);
13276 if (N00.getOpcode() == PreferredFusedOpcode) {
13277 SDValue N002 = N00.getOperand(2);
13278 if (isContractableFMUL(N002) &&
13279 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
13280 N00.getValueType())) {
13281 return DAG.getNode(
13282 PreferredFusedOpcode, SL, VT,
13283 DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(0)),
13284 DAG.getNode(ISD::FP_EXTEND, SL, VT, N00.getOperand(1)),
13285 DAG.getNode(
13286 PreferredFusedOpcode, SL, VT,
13287 DAG.getNode(ISD::FP_EXTEND, SL, VT, N002.getOperand(0)),
13288 DAG.getNode(ISD::FP_EXTEND, SL, VT, N002.getOperand(1)),
13289 DAG.getNode(ISD::FNEG, SL, VT, N1)));
13290 }
13291 }
13292 }
13293
13294 // fold (fsub x, (fma y, z, (fpext (fmul u, v))))
13295 // -> (fma (fneg y), z, (fma (fneg (fpext u)), (fpext v), x))
13296 if (N1.getOpcode() == PreferredFusedOpcode &&
13297 N1.getOperand(2).getOpcode() == ISD::FP_EXTEND &&
13298 N1->hasOneUse()) {
13299 SDValue N120 = N1.getOperand(2).getOperand(0);
13300 if (isContractableFMUL(N120) &&
13301 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
13302 N120.getValueType())) {
13303 SDValue N1200 = N120.getOperand(0);
13304 SDValue N1201 = N120.getOperand(1);
13305 return DAG.getNode(
13306 PreferredFusedOpcode, SL, VT,
13307 DAG.getNode(ISD::FNEG, SL, VT, N1.getOperand(0)), N1.getOperand(1),
13308 DAG.getNode(PreferredFusedOpcode, SL, VT,
13309 DAG.getNode(ISD::FNEG, SL, VT,
13310 DAG.getNode(ISD::FP_EXTEND, SL, VT, N1200)),
13311 DAG.getNode(ISD::FP_EXTEND, SL, VT, N1201), N0));
13312 }
13313 }
13314
13315 // fold (fsub x, (fpext (fma y, z, (fmul u, v))))
13316 // -> (fma (fneg (fpext y)), (fpext z),
13317 // (fma (fneg (fpext u)), (fpext v), x))
13318 // FIXME: This turns two single-precision and one double-precision
13319 // operation into two double-precision operations, which might not be
13320 // interesting for all targets, especially GPUs.
13321 if (N1.getOpcode() == ISD::FP_EXTEND &&
13322 N1.getOperand(0).getOpcode() == PreferredFusedOpcode) {
13323 SDValue CvtSrc = N1.getOperand(0);
13324 SDValue N100 = CvtSrc.getOperand(0);
13325 SDValue N101 = CvtSrc.getOperand(1);
13326 SDValue N102 = CvtSrc.getOperand(2);
13327 if (isContractableFMUL(N102) &&
13328 TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
13329 CvtSrc.getValueType())) {
13330 SDValue N1020 = N102.getOperand(0);
13331 SDValue N1021 = N102.getOperand(1);
13332 return DAG.getNode(
13333 PreferredFusedOpcode, SL, VT,
13334 DAG.getNode(ISD::FNEG, SL, VT,
13335 DAG.getNode(ISD::FP_EXTEND, SL, VT, N100)),
13336 DAG.getNode(ISD::FP_EXTEND, SL, VT, N101),
13337 DAG.getNode(PreferredFusedOpcode, SL, VT,
13338 DAG.getNode(ISD::FNEG, SL, VT,
13339 DAG.getNode(ISD::FP_EXTEND, SL, VT, N1020)),
13340 DAG.getNode(ISD::FP_EXTEND, SL, VT, N1021), N0));
13341 }
13342 }
13343 }
13344
13345 return SDValue();
13346}
13347
13348/// Try to perform FMA combining on a given FMUL node based on the distributive
13349/// law x * (y + 1) = x * y + x and variants thereof (commuted versions,
13350/// subtraction instead of addition).
13351SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) {
13352 SDValue N0 = N->getOperand(0);
13353 SDValue N1 = N->getOperand(1);
13354 EVT VT = N->getValueType(0);
13355 SDLoc SL(N);
13356
13357 assert(N->getOpcode() == ISD::FMUL && "Expected FMUL Operation")(static_cast <bool> (N->getOpcode() == ISD::FMUL &&
"Expected FMUL Operation") ? void (0) : __assert_fail ("N->getOpcode() == ISD::FMUL && \"Expected FMUL Operation\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 13357, __extension__ __PRETTY_FUNCTION__))
;
13358
13359 const TargetOptions &Options = DAG.getTarget().Options;
13360
13361 // The transforms below are incorrect when x == 0 and y == inf, because the
13362 // intermediate multiplication produces a nan.
13363 if (!Options.NoInfsFPMath)
13364 return SDValue();
13365
13366 // Floating-point multiply-add without intermediate rounding.
13367 bool HasFMA =
13368 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath) &&
13369 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT) &&
13370 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT));
13371
13372 // Floating-point multiply-add with intermediate rounding. This can result
13373 // in a less precise result due to the changed rounding order.
13374 bool HasFMAD = Options.UnsafeFPMath &&
13375 (LegalOperations && TLI.isFMADLegal(DAG, N));
13376
13377 // No valid opcode, do not combine.
13378 if (!HasFMAD && !HasFMA)
13379 return SDValue();
13380
13381 // Always prefer FMAD to FMA for precision.
13382 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA;
13383 bool Aggressive = TLI.enableAggressiveFMAFusion(VT);
13384
13385 // fold (fmul (fadd x0, +1.0), y) -> (fma x0, y, y)
13386 // fold (fmul (fadd x0, -1.0), y) -> (fma x0, y, (fneg y))
13387 auto FuseFADD = [&](SDValue X, SDValue Y) {
13388 if (X.getOpcode() == ISD::FADD && (Aggressive || X->hasOneUse())) {
13389 if (auto *C = isConstOrConstSplatFP(X.getOperand(1), true)) {
13390 if (C->isExactlyValue(+1.0))
13391 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
13392 Y);
13393 if (C->isExactlyValue(-1.0))
13394 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
13395 DAG.getNode(ISD::FNEG, SL, VT, Y));
13396 }
13397 }
13398 return SDValue();
13399 };
13400
13401 if (SDValue FMA = FuseFADD(N0, N1))
13402 return FMA;
13403 if (SDValue FMA = FuseFADD(N1, N0))
13404 return FMA;
13405
13406 // fold (fmul (fsub +1.0, x1), y) -> (fma (fneg x1), y, y)
13407 // fold (fmul (fsub -1.0, x1), y) -> (fma (fneg x1), y, (fneg y))
13408 // fold (fmul (fsub x0, +1.0), y) -> (fma x0, y, (fneg y))
13409 // fold (fmul (fsub x0, -1.0), y) -> (fma x0, y, y)
13410 auto FuseFSUB = [&](SDValue X, SDValue Y) {
13411 if (X.getOpcode() == ISD::FSUB && (Aggressive || X->hasOneUse())) {
13412 if (auto *C0 = isConstOrConstSplatFP(X.getOperand(0), true)) {
13413 if (C0->isExactlyValue(+1.0))
13414 return DAG.getNode(PreferredFusedOpcode, SL, VT,
13415 DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y,
13416 Y);
13417 if (C0->isExactlyValue(-1.0))
13418 return DAG.getNode(PreferredFusedOpcode, SL, VT,
13419 DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y,
13420 DAG.getNode(ISD::FNEG, SL, VT, Y));
13421 }
13422 if (auto *C1 = isConstOrConstSplatFP(X.getOperand(1), true)) {
13423 if (C1->isExactlyValue(+1.0))
13424 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
13425 DAG.getNode(ISD::FNEG, SL, VT, Y));
13426 if (C1->isExactlyValue(-1.0))
13427 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
13428 Y);
13429 }
13430 }
13431 return SDValue();
13432 };
13433
13434 if (SDValue FMA = FuseFSUB(N0, N1))
13435 return FMA;
13436 if (SDValue FMA = FuseFSUB(N1, N0))
13437 return FMA;
13438
13439 return SDValue();
13440}
13441
13442SDValue DAGCombiner::visitFADD(SDNode *N) {
13443 SDValue N0 = N->getOperand(0);
13444 SDValue N1 = N->getOperand(1);
13445 bool N0CFP = DAG.isConstantFPBuildVectorOrConstantFP(N0);
13446 bool N1CFP = DAG.isConstantFPBuildVectorOrConstantFP(N1);
13447 EVT VT = N->getValueType(0);
13448 SDLoc DL(N);
13449 const TargetOptions &Options = DAG.getTarget().Options;
13450 SDNodeFlags Flags = N->getFlags();
13451 SelectionDAG::FlagInserter FlagsInserter(DAG, N);
13452
13453 if (SDValue R = DAG.simplifyFPBinop(N->getOpcode(), N0, N1, Flags))
13454 return R;
13455
13456 // fold vector ops
13457 if (VT.isVector())
13458 if (SDValue FoldedVOp = SimplifyVBinOp(N))
13459 return FoldedVOp;
13460
13461 // fold (fadd c1, c2) -> c1 + c2
13462 if (N0CFP && N1CFP)
13463 return DAG.getNode(ISD::FADD, DL, VT, N0, N1);
13464
13465 // canonicalize constant to RHS
13466 if (N0CFP && !N1CFP)
13467 return DAG.getNode(ISD::FADD, DL, VT, N1, N0);
13468
13469 // N0 + -0.0 --> N0 (also allowed with +0.0 and fast-math)
13470 ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, true);
13471 if (N1C && N1C->isZero())
13472 if (N1C->isNegative() || Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros())
13473 return N0;
13474
13475 if (SDValue NewSel = foldBinOpIntoSelect(N))
13476 return NewSel;
13477
13478 // fold (fadd A, (fneg B)) -> (fsub A, B)
13479 if (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT))
13480 if (SDValue NegN1 = TLI.getCheaperNegatedExpression(
13481 N1, DAG, LegalOperations, ForCodeSize))
13482 return DAG.getNode(ISD::FSUB, DL, VT, N0, NegN1);
13483
13484 // fold (fadd (fneg A), B) -> (fsub B, A)
13485 if (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT))
13486 if (SDValue NegN0 = TLI.getCheaperNegatedExpression(
13487 N0, DAG, LegalOperations, ForCodeSize))
13488 return DAG.getNode(ISD::FSUB, DL, VT, N1, NegN0);
13489
13490 auto isFMulNegTwo = [](SDValue FMul) {
13491 if (!FMul.hasOneUse() || FMul.getOpcode() != ISD::FMUL)
13492 return false;
13493 auto *C = isConstOrConstSplatFP(FMul.getOperand(1), true);
13494 return C && C->isExactlyValue(-2.0);
13495 };
13496
13497 // fadd (fmul B, -2.0), A --> fsub A, (fadd B, B)
13498 if (isFMulNegTwo(N0)) {
13499 SDValue B = N0.getOperand(0);
13500 SDValue Add = DAG.getNode(ISD::FADD, DL, VT, B, B);
13501 return DAG.getNode(ISD::FSUB, DL, VT, N1, Add);
13502 }
13503 // fadd A, (fmul B, -2.0) --> fsub A, (fadd B, B)
13504 if (isFMulNegTwo(N1)) {
13505 SDValue B = N1.getOperand(0);
13506 SDValue Add = DAG.getNode(ISD::FADD, DL, VT, B, B);
13507 return DAG.getNode(ISD::FSUB, DL, VT, N0, Add);
13508 }
13509
13510 // No FP constant should be created after legalization as Instruction
13511 // Selection pass has a hard time dealing with FP constants.
13512 bool AllowNewConst = (Level < AfterLegalizeDAG);
13513
13514 // If nnan is enabled, fold lots of things.
13515 if ((Options.NoNaNsFPMath || Flags.hasNoNaNs()) && AllowNewConst) {
13516 // If allowed, fold (fadd (fneg x), x) -> 0.0
13517 if (N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1)
13518 return DAG.getConstantFP(0.0, DL, VT);
13519
13520 // If allowed, fold (fadd x, (fneg x)) -> 0.0
13521 if (N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0)
13522 return DAG.getConstantFP(0.0, DL, VT);
13523 }
13524
13525 // If 'unsafe math' or reassoc and nsz, fold lots of things.
13526 // TODO: break out portions of the transformations below for which Unsafe is
13527 // considered and which do not require both nsz and reassoc
13528 if (((Options.UnsafeFPMath && Options.NoSignedZerosFPMath) ||
13529 (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros())) &&
13530 AllowNewConst) {
13531 // fadd (fadd x, c1), c2 -> fadd x, c1 + c2
13532 if (N1CFP && N0.getOpcode() == ISD::FADD &&
13533 DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) {
13534 SDValue NewC = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), N1);
13535 return DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(0), NewC);
13536 }
13537
13538 // We can fold chains of FADD's of the same value into multiplications.
13539 // This transform is not safe in general because we are reducing the number
13540 // of rounding steps.
13541 if (TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && !N0CFP && !N1CFP) {
13542 if (N0.getOpcode() == ISD::FMUL) {
13543 bool CFP00 = DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
13544 bool CFP01 = DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(1));
13545
13546 // (fadd (fmul x, c), x) -> (fmul x, c+1)
13547 if (CFP01 && !CFP00 && N0.getOperand(0) == N1) {
13548 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1),
13549 DAG.getConstantFP(1.0, DL, VT));
13550 return DAG.getNode(ISD::FMUL, DL, VT, N1, NewCFP);
13551 }
13552
13553 // (fadd (fmul x, c), (fadd x, x)) -> (fmul x, c+2)
13554 if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD &&
13555 N1.getOperand(0) == N1.getOperand(1) &&
13556 N0.getOperand(0) == N1.getOperand(0)) {
13557 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1),
13558 DAG.getConstantFP(2.0, DL, VT));
13559 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), NewCFP);
13560 }
13561 }
13562
13563 if (N1.getOpcode() == ISD::FMUL) {
13564 bool CFP10 = DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
13565 bool CFP11 = DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(1));
13566
13567 // (fadd x, (fmul x, c)) -> (fmul x, c+1)
13568 if (CFP11 && !CFP10 && N1.getOperand(0) == N0) {
13569 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1),
13570 DAG.getConstantFP(1.0, DL, VT));
13571 return DAG.getNode(ISD::FMUL, DL, VT, N0, NewCFP);
13572 }
13573
13574 // (fadd (fadd x, x), (fmul x, c)) -> (fmul x, c+2)
13575 if (CFP11 && !CFP10 && N0.getOpcode() == ISD::FADD &&
13576 N0.getOperand(0) == N0.getOperand(1) &&
13577 N1.getOperand(0) == N0.getOperand(0)) {
13578 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1),
13579 DAG.getConstantFP(2.0, DL, VT));
13580 return DAG.getNode(ISD::FMUL, DL, VT, N1.getOperand(0), NewCFP);
13581 }
13582 }
13583
13584 if (N0.getOpcode() == ISD::FADD) {
13585 bool CFP00 = DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(0));
13586 // (fadd (fadd x, x), x) -> (fmul x, 3.0)
13587 if (!CFP00 && N0.getOperand(0) == N0.getOperand(1) &&
13588 (N0.getOperand(0) == N1)) {
13589 return DAG.getNode(ISD::FMUL, DL, VT, N1,
13590 DAG.getConstantFP(3.0, DL, VT));
13591 }
13592 }
13593
13594 if (N1.getOpcode() == ISD::FADD) {
13595 bool CFP10 = DAG.isConstantFPBuildVectorOrConstantFP(N1.getOperand(0));
13596 // (fadd x, (fadd x, x)) -> (fmul x, 3.0)
13597 if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) &&
13598 N1.getOperand(0) == N0) {
13599 return DAG.getNode(ISD::FMUL, DL, VT, N0,
13600 DAG.getConstantFP(3.0, DL, VT));
13601 }
13602 }
13603
13604 // (fadd (fadd x, x), (fadd x, x)) -> (fmul x, 4.0)
13605 if (N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD &&
13606 N0.getOperand(0) == N0.getOperand(1) &&
13607 N1.getOperand(0) == N1.getOperand(1) &&
13608 N0.getOperand(0) == N1.getOperand(0)) {
13609 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0),
13610 DAG.getConstantFP(4.0, DL, VT));
13611 }
13612 }
13613 } // enable-unsafe-fp-math
13614
13615 // FADD -> FMA combines:
13616 if (SDValue Fused = visitFADDForFMACombine(N)) {
13617 AddToWorklist(Fused.getNode());
13618 return Fused;
13619 }
13620 return SDValue();
13621}
13622
13623SDValue DAGCombiner::visitSTRICT_FADD(SDNode *N) {
13624 SDValue Chain = N->getOperand(0);
13625 SDValue N0 = N->getOperand(1);
13626 SDValue N1 = N->getOperand(2);
13627 EVT VT = N->getValueType(0);
13628 EVT ChainVT = N->getValueType(1);
13629 SDLoc DL(N);
13630 SelectionDAG::FlagInserter FlagsInserter(DAG, N);
13631
13632 // fold (strict_fadd A, (fneg B)) -> (strict_fsub A, B)
13633 if (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::STRICT_FSUB, VT))
13634 if (SDValue NegN1 = TLI.getCheaperNegatedExpression(
13635 N1, DAG, LegalOperations, ForCodeSize)) {
13636 return DAG.getNode(ISD::STRICT_FSUB, DL, DAG.getVTList(VT, ChainVT),
13637 {Chain, N0, NegN1});
13638 }
13639
13640 // fold (strict_fadd (fneg A), B) -> (strict_fsub B, A)
13641 if (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::STRICT_FSUB, VT))
13642 if (SDValue NegN0 = TLI.getCheaperNegatedExpression(
13643 N0, DAG, LegalOperations, ForCodeSize)) {
13644 return DAG.getNode(ISD::STRICT_FSUB, DL, DAG.getVTList(VT, ChainVT),
13645 {Chain, N1, NegN0});
13646 }
13647 return SDValue();
13648}
13649
13650SDValue DAGCombiner::visitFSUB(SDNode *N) {
13651 SDValue N0 = N->getOperand(0);
13652 SDValue N1 = N->getOperand(1);
13653 ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0, true);
13654 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1, true);
13655 EVT VT = N->getValueType(0);
13656 SDLoc DL(N);
13657 const TargetOptions &Options = DAG.getTarget().Options;
13658 const SDNodeFlags Flags = N->getFlags();
13659 SelectionDAG::FlagInserter FlagsInserter(DAG, N);
13660
13661 if (SDValue R = DAG.simplifyFPBinop(N->getOpcode(), N0, N1, Flags))
13662 return R;
13663
13664 // fold vector ops
13665 if (VT.isVector())
13666 if (SDValue FoldedVOp = SimplifyVBinOp(N))
13667 return FoldedVOp;
13668
13669 // fold (fsub c1, c2) -> c1-c2
13670 if (N0CFP && N1CFP)
13671 return DAG.getNode(ISD::FSUB, DL, VT, N0, N1);
13672
13673 if (SDValue NewSel = foldBinOpIntoSelect(N))
13674 return NewSel;
13675
13676 // (fsub A, 0) -> A
13677 if (N1CFP && N1CFP->isZero()) {
13678 if (!N1CFP->isNegative() || Options.NoSignedZerosFPMath ||
13679 Flags.hasNoSignedZeros()) {
13680 return N0;
13681 }
13682 }
13683
13684 if (N0 == N1) {
13685 // (fsub x, x) -> 0.0
13686 if (Options.NoNaNsFPMath || Flags.hasNoNaNs())
13687 return DAG.getConstantFP(0.0f, DL, VT);
13688 }
13689
13690 // (fsub -0.0, N1) -> -N1
13691 if (N0CFP && N0CFP->isZero()) {
13692 if (N0CFP->isNegative() ||
13693 (Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros())) {
13694 // We cannot replace an FSUB(+-0.0,X) with FNEG(X) when denormals are
13695 // flushed to zero, unless all users treat denorms as zero (DAZ).
13696 // FIXME: This transform will change the sign of a NaN and the behavior
13697 // of a signaling NaN. It is only valid when a NoNaN flag is present.
13698 DenormalMode DenormMode = DAG.getDenormalMode(VT);
13699 if (DenormMode == DenormalMode::getIEEE()) {
13700 if (SDValue NegN1 =
13701 TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize))
13702 return NegN1;
13703 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
13704 return DAG.getNode(ISD::FNEG, DL, VT, N1);
13705 }
13706 }
13707 }
13708
13709 if (((Options.UnsafeFPMath && Options.NoSignedZerosFPMath) ||
13710 (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros())) &&
13711 N1.getOpcode() == ISD::FADD) {
13712 // X - (X + Y) -> -Y
13713 if (N0 == N1->getOperand(0))
13714 return DAG.getNode(ISD::FNEG, DL, VT, N1->getOperand(1));
13715 // X - (Y + X) -> -Y
13716 if (N0 == N1->getOperand(1))
13717 return DAG.getNode(ISD::FNEG, DL, VT, N1->getOperand(0));
13718 }
13719
13720 // fold (fsub A, (fneg B)) -> (fadd A, B)
13721 if (SDValue NegN1 =
13722 TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize))
13723 return DAG.getNode(ISD::FADD, DL, VT, N0, NegN1);
13724
13725 // FSUB -> FMA combines:
13726 if (SDValue Fused = visitFSUBForFMACombine(N)) {
13727 AddToWorklist(Fused.getNode());
13728 return Fused;
13729 }
13730
13731 return SDValue();
13732}
13733
13734SDValue DAGCombiner::visitFMUL(SDNode *N) {
13735 SDValue N0 = N->getOperand(0);
13736 SDValue N1 = N->getOperand(1);
13737 ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0, true);
13738 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1, true);
13739 EVT VT = N->getValueType(0);
13740 SDLoc DL(N);
13741 const TargetOptions &Options = DAG.getTarget().Options;
13742 const SDNodeFlags Flags = N->getFlags();
13743 SelectionDAG::FlagInserter FlagsInserter(DAG, N);
13744
13745 if (SDValue R = DAG.simplifyFPBinop(N->getOpcode(), N0, N1, Flags))
13746 return R;
13747
13748 // fold vector ops
13749 if (VT.isVector()) {
13750 // This just handles C1 * C2 for vectors. Other vector folds are below.
13751 if (SDValue FoldedVOp = SimplifyVBinOp(N))
13752 return FoldedVOp;
13753 }
13754
13755 // fold (fmul c1, c2) -> c1*c2
13756 if (N0CFP && N1CFP)
13757 return DAG.getNode(ISD::FMUL, DL, VT, N0, N1);
13758
13759 // canonicalize constant to RHS
13760 if (DAG.isConstantFPBuildVectorOrConstantFP(N0) &&
13761 !DAG.isConstantFPBuildVectorOrConstantFP(N1))
13762 return DAG.getNode(ISD::FMUL, DL, VT, N1, N0);
13763
13764 if (SDValue NewSel = foldBinOpIntoSelect(N))
13765 return NewSel;
13766
13767 if (Options.UnsafeFPMath || Flags.hasAllowReassociation()) {
13768 // fmul (fmul X, C1), C2 -> fmul X, C1 * C2
13769 if (DAG.isConstantFPBuildVectorOrConstantFP(N1) &&
13770 N0.getOpcode() == ISD::FMUL) {
13771 SDValue N00 = N0.getOperand(0);
13772 SDValue N01 = N0.getOperand(1);
13773 // Avoid an infinite loop by making sure that N00 is not a constant
13774 // (the inner multiply has not been constant folded yet).
13775 if (DAG.isConstantFPBuildVectorOrConstantFP(N01) &&
13776 !DAG.isConstantFPBuildVectorOrConstantFP(N00)) {
13777 SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, N01, N1);
13778 return DAG.getNode(ISD::FMUL, DL, VT, N00, MulConsts);
13779 }
13780 }
13781
13782 // Match a special-case: we convert X * 2.0 into fadd.
13783 // fmul (fadd X, X), C -> fmul X, 2.0 * C
13784 if (N0.getOpcode() == ISD::FADD && N0.hasOneUse() &&
13785 N0.getOperand(0) == N0.getOperand(1)) {
13786 const SDValue Two = DAG.getConstantFP(2.0, DL, VT);
13787 SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, Two, N1);
13788 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), MulConsts);
13789 }
13790 }
13791
13792 // fold (fmul X, 2.0) -> (fadd X, X)
13793 if (N1CFP && N1CFP->isExactlyValue(+2.0))
13794 return DAG.getNode(ISD::FADD, DL, VT, N0, N0);
13795
13796 // fold (fmul X, -1.0) -> (fneg X)
13797 if (N1CFP && N1CFP->isExactlyValue(-1.0))
13798 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
13799 return DAG.getNode(ISD::FNEG, DL, VT, N0);
13800
13801 // -N0 * -N1 --> N0 * N1
13802 TargetLowering::NegatibleCost CostN0 =
13803 TargetLowering::NegatibleCost::Expensive;
13804 TargetLowering::NegatibleCost CostN1 =
13805 TargetLowering::NegatibleCost::Expensive;
13806 SDValue NegN0 =
13807 TLI.getNegatedExpression(N0, DAG, LegalOperations, ForCodeSize, CostN0);
13808 SDValue NegN1 =
13809 TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize, CostN1);
13810 if (NegN0 && NegN1 &&
13811 (CostN0 == TargetLowering::NegatibleCost::Cheaper ||
13812 CostN1 == TargetLowering::NegatibleCost::Cheaper))
13813 return DAG.getNode(ISD::FMUL, DL, VT, NegN0, NegN1);
13814
13815 // fold (fmul X, (select (fcmp X > 0.0), -1.0, 1.0)) -> (fneg (fabs X))
13816 // fold (fmul X, (select (fcmp X > 0.0), 1.0, -1.0)) -> (fabs X)
13817 if (Flags.hasNoNaNs() && Flags.hasNoSignedZeros() &&
13818 (N0.getOpcode() == ISD::SELECT || N1.getOpcode() == ISD::SELECT) &&
13819 TLI.isOperationLegal(ISD::FABS, VT)) {
13820 SDValue Select = N0, X = N1;
13821 if (Select.getOpcode() != ISD::SELECT)
13822 std::swap(Select, X);
13823
13824 SDValue Cond = Select.getOperand(0);
13825 auto TrueOpnd = dyn_cast<ConstantFPSDNode>(Select.getOperand(1));
13826 auto FalseOpnd = dyn_cast<ConstantFPSDNode>(Select.getOperand(2));
13827
13828 if (TrueOpnd && FalseOpnd &&
13829 Cond.getOpcode() == ISD::SETCC && Cond.getOperand(0) == X &&
13830 isa<ConstantFPSDNode>(Cond.getOperand(1)) &&
13831 cast<ConstantFPSDNode>(Cond.getOperand(1))->isExactlyValue(0.0)) {
13832 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
13833 switch (CC) {
13834 default: break;
13835 case ISD::SETOLT:
13836 case ISD::SETULT:
13837 case ISD::SETOLE:
13838 case ISD::SETULE:
13839 case ISD::SETLT:
13840 case ISD::SETLE:
13841 std::swap(TrueOpnd, FalseOpnd);
13842 LLVM_FALLTHROUGH[[gnu::fallthrough]];
13843 case ISD::SETOGT:
13844 case ISD::SETUGT:
13845 case ISD::SETOGE:
13846 case ISD::SETUGE:
13847 case ISD::SETGT:
13848 case ISD::SETGE:
13849 if (TrueOpnd->isExactlyValue(-1.0) && FalseOpnd->isExactlyValue(1.0) &&
13850 TLI.isOperationLegal(ISD::FNEG, VT))
13851 return DAG.getNode(ISD::FNEG, DL, VT,
13852 DAG.getNode(ISD::FABS, DL, VT, X));
13853 if (TrueOpnd->isExactlyValue(1.0) && FalseOpnd->isExactlyValue(-1.0))
13854 return DAG.getNode(ISD::FABS, DL, VT, X);
13855
13856 break;
13857 }
13858 }
13859 }
13860
13861 // FMUL -> FMA combines:
13862 if (SDValue Fused = visitFMULForFMADistributiveCombine(N)) {
13863 AddToWorklist(Fused.getNode());
13864 return Fused;
13865 }
13866
13867 return SDValue();
13868}
13869
13870SDValue DAGCombiner::visitFMA(SDNode *N) {
13871 SDValue N0 = N->getOperand(0);
13872 SDValue N1 = N->getOperand(1);
13873 SDValue N2 = N->getOperand(2);
13874 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
13875 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
13876 EVT VT = N->getValueType(0);
13877 SDLoc DL(N);
13878 const TargetOptions &Options = DAG.getTarget().Options;
13879 // FMA nodes have flags that propagate to the created nodes.
13880 SelectionDAG::FlagInserter FlagsInserter(DAG, N);
13881
13882 bool UnsafeFPMath =
13883 Options.UnsafeFPMath || N->getFlags().hasAllowReassociation();
13884
13885 // Constant fold FMA.
13886 if (isa<ConstantFPSDNode>(N0) &&
13887 isa<ConstantFPSDNode>(N1) &&
13888 isa<ConstantFPSDNode>(N2)) {
13889 return DAG.getNode(ISD::FMA, DL, VT, N0, N1, N2);
13890 }
13891
13892 // (-N0 * -N1) + N2 --> (N0 * N1) + N2
13893 TargetLowering::NegatibleCost CostN0 =
13894 TargetLowering::NegatibleCost::Expensive;
13895 TargetLowering::NegatibleCost CostN1 =
13896 TargetLowering::NegatibleCost::Expensive;
13897 SDValue NegN0 =
13898 TLI.getNegatedExpression(N0, DAG, LegalOperations, ForCodeSize, CostN0);
13899 SDValue NegN1 =
13900 TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize, CostN1);
13901 if (NegN0 && NegN1 &&
13902 (CostN0 == TargetLowering::NegatibleCost::Cheaper ||
13903 CostN1 == TargetLowering::NegatibleCost::Cheaper))
13904 return DAG.getNode(ISD::FMA, DL, VT, NegN0, NegN1, N2);
13905
13906 if (UnsafeFPMath) {
13907 if (N0CFP && N0CFP->isZero())
13908 return N2;
13909 if (N1CFP && N1CFP->isZero())
13910 return N2;
13911 }
13912
13913 if (N0CFP && N0CFP->isExactlyValue(1.0))
13914 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N1, N2);
13915 if (N1CFP && N1CFP->isExactlyValue(1.0))
13916 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N2);
13917
13918 // Canonicalize (fma c, x, y) -> (fma x, c, y)
13919 if (DAG.isConstantFPBuildVectorOrConstantFP(N0) &&
13920 !DAG.isConstantFPBuildVectorOrConstantFP(N1))
13921 return DAG.getNode(ISD::FMA, SDLoc(N), VT, N1, N0, N2);
13922
13923 if (UnsafeFPMath) {
13924 // (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2)
13925 if (N2.getOpcode() == ISD::FMUL && N0 == N2.getOperand(0) &&
13926 DAG.isConstantFPBuildVectorOrConstantFP(N1) &&
13927 DAG.isConstantFPBuildVectorOrConstantFP(N2.getOperand(1))) {
13928 return DAG.getNode(ISD::FMUL, DL, VT, N0,
13929 DAG.getNode(ISD::FADD, DL, VT, N1, N2.getOperand(1)));
13930 }
13931
13932 // (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y)
13933 if (N0.getOpcode() == ISD::FMUL &&
13934 DAG.isConstantFPBuildVectorOrConstantFP(N1) &&
13935 DAG.isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) {
13936 return DAG.getNode(ISD::FMA, DL, VT, N0.getOperand(0),
13937 DAG.getNode(ISD::FMUL, DL, VT, N1, N0.getOperand(1)),
13938 N2);
13939 }
13940 }
13941
13942 // (fma x, -1, y) -> (fadd (fneg x), y)
13943 if (N1CFP) {
13944 if (N1CFP->isExactlyValue(1.0))
13945 return DAG.getNode(ISD::FADD, DL, VT, N0, N2);
13946
13947 if (N1CFP->isExactlyValue(-1.0) &&
13948 (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) {
13949 SDValue RHSNeg = DAG.getNode(ISD::FNEG, DL, VT, N0);
13950 AddToWorklist(RHSNeg.getNode());
13951 return DAG.getNode(ISD::FADD, DL, VT, N2, RHSNeg);
13952 }
13953
13954 // fma (fneg x), K, y -> fma x -K, y
13955 if (N0.getOpcode() == ISD::FNEG &&
13956 (TLI.isOperationLegal(ISD::ConstantFP, VT) ||
13957 (N1.hasOneUse() && !TLI.isFPImmLegal(N1CFP->getValueAPF(), VT,
13958 ForCodeSize)))) {
13959 return DAG.getNode(ISD::FMA, DL, VT, N0.getOperand(0),
13960 DAG.getNode(ISD::FNEG, DL, VT, N1), N2);
13961 }
13962 }
13963
13964 if (UnsafeFPMath) {
13965 // (fma x, c, x) -> (fmul x, (c+1))
13966 if (N1CFP && N0 == N2) {
13967 return DAG.getNode(
13968 ISD::FMUL, DL, VT, N0,
13969 DAG.getNode(ISD::FADD, DL, VT, N1, DAG.getConstantFP(1.0, DL, VT)));
13970 }
13971
13972 // (fma x, c, (fneg x)) -> (fmul x, (c-1))
13973 if (N1CFP && N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) {
13974 return DAG.getNode(
13975 ISD::FMUL, DL, VT, N0,
13976 DAG.getNode(ISD::FADD, DL, VT, N1, DAG.getConstantFP(-1.0, DL, VT)));
13977 }
13978 }
13979
13980 // fold ((fma (fneg X), Y, (fneg Z)) -> fneg (fma X, Y, Z))
13981 // fold ((fma X, (fneg Y), (fneg Z)) -> fneg (fma X, Y, Z))
13982 if (!TLI.isFNegFree(VT))
13983 if (SDValue Neg = TLI.getCheaperNegatedExpression(
13984 SDValue(N, 0), DAG, LegalOperations, ForCodeSize))
13985 return DAG.getNode(ISD::FNEG, DL, VT, Neg);
13986 return SDValue();
13987}
13988
13989// Combine multiple FDIVs with the same divisor into multiple FMULs by the
13990// reciprocal.
13991// E.g., (a / D; b / D;) -> (recip = 1.0 / D; a * recip; b * recip)
13992// Notice that this is not always beneficial. One reason is different targets
13993// may have different costs for FDIV and FMUL, so sometimes the cost of two
13994// FDIVs may be lower than the cost of one FDIV and two FMULs. Another reason
13995// is the critical path is increased from "one FDIV" to "one FDIV + one FMUL".
13996SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) {
13997 // TODO: Limit this transform based on optsize/minsize - it always creates at
13998 // least 1 extra instruction. But the perf win may be substantial enough
13999 // that only minsize should restrict this.
14000 bool UnsafeMath = DAG.getTarget().Options.UnsafeFPMath;
14001 const SDNodeFlags Flags = N->getFlags();
14002 if (LegalDAG || (!UnsafeMath && !Flags.hasAllowReciprocal()))
14003 return SDValue();
14004
14005 // Skip if current node is a reciprocal/fneg-reciprocal.
14006 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
14007 ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0, /* AllowUndefs */ true);
14008 if (N0CFP && (N0CFP->isExactlyValue(1.0) || N0CFP->isExactlyValue(-1.0)))
14009 return SDValue();
14010
14011 // Exit early if the target does not want this transform or if there can't
14012 // possibly be enough uses of the divisor to make the transform worthwhile.
14013 unsigned MinUses = TLI.combineRepeatedFPDivisors();
14014
14015 // For splat vectors, scale the number of uses by the splat factor. If we can
14016 // convert the division into a scalar op, that will likely be much faster.
14017 unsigned NumElts = 1;
14018 EVT VT = N->getValueType(0);
14019 if (VT.isVector() && DAG.isSplatValue(N1))
14020 NumElts = VT.getVectorNumElements();
14021
14022 if (!MinUses || (N1->use_size() * NumElts) < MinUses)
14023 return SDValue();
14024
14025 // Find all FDIV users of the same divisor.
14026 // Use a set because duplicates may be present in the user list.
14027 SetVector<SDNode *> Users;
14028 for (auto *U : N1->uses()) {
14029 if (U->getOpcode() == ISD::FDIV && U->getOperand(1) == N1) {
14030 // Skip X/sqrt(X) that has not been simplified to sqrt(X) yet.
14031 if (U->getOperand(1).getOpcode() == ISD::FSQRT &&
14032 U->getOperand(0) == U->getOperand(1).getOperand(0) &&
14033 U->getFlags().hasAllowReassociation() &&
14034 U->getFlags().hasNoSignedZeros())
14035 continue;
14036
14037 // This division is eligible for optimization only if global unsafe math
14038 // is enabled or if this division allows reciprocal formation.
14039 if (UnsafeMath || U->getFlags().hasAllowReciprocal())
14040 Users.insert(U);
14041 }
14042 }
14043
14044 // Now that we have the actual number of divisor uses, make sure it meets
14045 // the minimum threshold specified by the target.
14046 if ((Users.size() * NumElts) < MinUses)
14047 return SDValue();
14048
14049 SDLoc DL(N);
14050 SDValue FPOne = DAG.getConstantFP(1.0, DL, VT);
14051 SDValue Reciprocal = DAG.getNode(ISD::FDIV, DL, VT, FPOne, N1, Flags);
14052
14053 // Dividend / Divisor -> Dividend * Reciprocal
14054 for (auto *U : Users) {
14055 SDValue Dividend = U->getOperand(0);
14056 if (Dividend != FPOne) {
14057 SDValue NewNode = DAG.getNode(ISD::FMUL, SDLoc(U), VT, Dividend,
14058 Reciprocal, Flags);
14059 CombineTo(U, NewNode);
14060 } else if (U != Reciprocal.getNode()) {
14061 // In the absence of fast-math-flags, this user node is always the
14062 // same node as Reciprocal, but with FMF they may be different nodes.
14063 CombineTo(U, Reciprocal);
14064 }
14065 }
14066 return SDValue(N, 0); // N was replaced.
14067}
14068
14069SDValue DAGCombiner::visitFDIV(SDNode *N) {
14070 SDValue N0 = N->getOperand(0);
14071 SDValue N1 = N->getOperand(1);
14072 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
14073 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
14074 EVT VT = N->getValueType(0);
14075 SDLoc DL(N);
14076 const TargetOptions &Options = DAG.getTarget().Options;
14077 SDNodeFlags Flags = N->getFlags();
14078 SelectionDAG::FlagInserter FlagsInserter(DAG, N);
14079
14080 if (SDValue R = DAG.simplifyFPBinop(N->getOpcode(), N0, N1, Flags))
14081 return R;
14082
14083 // fold vector ops
14084 if (VT.isVector())
14085 if (SDValue FoldedVOp = SimplifyVBinOp(N))
14086 return FoldedVOp;
14087
14088 // fold (fdiv c1, c2) -> c1/c2
14089 if (N0CFP && N1CFP)
14090 return DAG.getNode(ISD::FDIV, SDLoc(N), VT, N0, N1);
14091
14092 if (SDValue NewSel = foldBinOpIntoSelect(N))
14093 return NewSel;
14094
14095 if (SDValue V = combineRepeatedFPDivisors(N))
14096 return V;
14097
14098 if (Options.UnsafeFPMath || Flags.hasAllowReciprocal()) {
14099 // fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable.
14100 if (N1CFP) {
14101 // Compute the reciprocal 1.0 / c2.
14102 const APFloat &N1APF = N1CFP->getValueAPF();
14103 APFloat Recip(N1APF.getSemantics(), 1); // 1.0
14104 APFloat::opStatus st = Recip.divide(N1APF, APFloat::rmNearestTiesToEven);
14105 // Only do the transform if the reciprocal is a legal fp immediate that
14106 // isn't too nasty (eg NaN, denormal, ...).
14107 if ((st == APFloat::opOK || st == APFloat::opInexact) && // Not too nasty
14108 (!LegalOperations ||
14109 // FIXME: custom lowering of ConstantFP might fail (see e.g. ARM
14110 // backend)... we should handle this gracefully after Legalize.
14111 // TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT) ||
14112 TLI.isOperationLegal(ISD::ConstantFP, VT) ||
14113 TLI.isFPImmLegal(Recip, VT, ForCodeSize)))
14114 return DAG.getNode(ISD::FMUL, DL, VT, N0,
14115 DAG.getConstantFP(Recip, DL, VT));
14116 }
14117
14118 // If this FDIV is part of a reciprocal square root, it may be folded
14119 // into a target-specific square root estimate instruction.
14120 if (N1.getOpcode() == ISD::FSQRT) {
14121 if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0), Flags))
14122 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
14123 } else if (N1.getOpcode() == ISD::FP_EXTEND &&
14124 N1.getOperand(0).getOpcode() == ISD::FSQRT) {
14125 if (SDValue RV =
14126 buildRsqrtEstimate(N1.getOperand(0).getOperand(0), Flags)) {
14127 RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N1), VT, RV);
14128 AddToWorklist(RV.getNode());
14129 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
14130 }
14131 } else if (N1.getOpcode() == ISD::FP_ROUND &&
14132 N1.getOperand(0).getOpcode() == ISD::FSQRT) {
14133 if (SDValue RV =
14134 buildRsqrtEstimate(N1.getOperand(0).getOperand(0), Flags)) {
14135 RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N1), VT, RV, N1.getOperand(1));
14136 AddToWorklist(RV.getNode());
14137 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV);
14138 }
14139 } else if (N1.getOpcode() == ISD::FMUL) {
14140 // Look through an FMUL. Even though this won't remove the FDIV directly,
14141 // it's still worthwhile to get rid of the FSQRT if possible.
14142 SDValue Sqrt, Y;
14143 if (N1.getOperand(0).getOpcode() == ISD::FSQRT) {
14144 Sqrt = N1.getOperand(0);
14145 Y = N1.getOperand(1);
14146 } else if (N1.getOperand(1).getOpcode() == ISD::FSQRT) {
14147 Sqrt = N1.getOperand(1);
14148 Y = N1.getOperand(0);
14149 }
14150 if (Sqrt.getNode()) {
14151 // If the other multiply operand is known positive, pull it into the
14152 // sqrt. That will eliminate the division if we convert to an estimate.
14153 if (Flags.hasAllowReassociation() && N1.hasOneUse() &&
14154 N1->getFlags().hasAllowReassociation() && Sqrt.hasOneUse()) {
14155 SDValue A;
14156 if (Y.getOpcode() == ISD::FABS && Y.hasOneUse())
14157 A = Y.getOperand(0);
14158 else if (Y == Sqrt.getOperand(0))
14159 A = Y;
14160 if (A) {
14161 // X / (fabs(A) * sqrt(Z)) --> X / sqrt(A*A*Z) --> X * rsqrt(A*A*Z)
14162 // X / (A * sqrt(A)) --> X / sqrt(A*A*A) --> X * rsqrt(A*A*A)
14163 SDValue AA = DAG.getNode(ISD::FMUL, DL, VT, A, A);
14164 SDValue AAZ =
14165 DAG.getNode(ISD::FMUL, DL, VT, AA, Sqrt.getOperand(0));
14166 if (SDValue Rsqrt = buildRsqrtEstimate(AAZ, Flags))
14167 return DAG.getNode(ISD::FMUL, DL, VT, N0, Rsqrt);
14168
14169 // Estimate creation failed. Clean up speculatively created nodes.
14170 recursivelyDeleteUnusedNodes(AAZ.getNode());
14171 }
14172 }
14173
14174 // We found a FSQRT, so try to make this fold:
14175 // X / (Y * sqrt(Z)) -> X * (rsqrt(Z) / Y)
14176 if (SDValue Rsqrt = buildRsqrtEstimate(Sqrt.getOperand(0), Flags)) {
14177 SDValue Div = DAG.getNode(ISD::FDIV, SDLoc(N1), VT, Rsqrt, Y);
14178 AddToWorklist(Div.getNode());
14179 return DAG.getNode(ISD::FMUL, DL, VT, N0, Div);
14180 }
14181 }
14182 }
14183
14184 // Fold into a reciprocal estimate and multiply instead of a real divide.
14185 if (Options.NoInfsFPMath || Flags.hasNoInfs())
14186 if (SDValue RV = BuildDivEstimate(N0, N1, Flags))
14187 return RV;
14188 }
14189
14190 // Fold X/Sqrt(X) -> Sqrt(X)
14191 if ((Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros()) &&
14192 (Options.UnsafeFPMath || Flags.hasAllowReassociation()))
14193 if (N1.getOpcode() == ISD::FSQRT && N0 == N1.getOperand(0))
14194 return N1;
14195
14196 // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y)
14197 TargetLowering::NegatibleCost CostN0 =
14198 TargetLowering::NegatibleCost::Expensive;
14199 TargetLowering::NegatibleCost CostN1 =
14200 TargetLowering::NegatibleCost::Expensive;
14201 SDValue NegN0 =
14202 TLI.getNegatedExpression(N0, DAG, LegalOperations, ForCodeSize, CostN0);
14203 SDValue NegN1 =
14204 TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize, CostN1);
14205 if (NegN0 && NegN1 &&
14206 (CostN0 == TargetLowering::NegatibleCost::Cheaper ||
14207 CostN1 == TargetLowering::NegatibleCost::Cheaper))
14208 return DAG.getNode(ISD::FDIV, SDLoc(N), VT, NegN0, NegN1);
14209
14210 return SDValue();
14211}
14212
14213SDValue DAGCombiner::visitFREM(SDNode *N) {
14214 SDValue N0 = N->getOperand(0);
14215 SDValue N1 = N->getOperand(1);
14216 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
14217 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
14218 EVT VT = N->getValueType(0);
14219 SDNodeFlags Flags = N->getFlags();
14220 SelectionDAG::FlagInserter FlagsInserter(DAG, N);
14221
14222 if (SDValue R = DAG.simplifyFPBinop(N->getOpcode(), N0, N1, Flags))
14223 return R;
14224
14225 // fold (frem c1, c2) -> fmod(c1,c2)
14226 if (N0CFP && N1CFP)
14227 return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1);
14228
14229 if (SDValue NewSel = foldBinOpIntoSelect(N))
14230 return NewSel;
14231
14232 return SDValue();
14233}
14234
14235SDValue DAGCombiner::visitFSQRT(SDNode *N) {
14236 SDNodeFlags Flags = N->getFlags();
14237 const TargetOptions &Options = DAG.getTarget().Options;
14238
14239 // Require 'ninf' flag since sqrt(+Inf) = +Inf, but the estimation goes as:
14240 // sqrt(+Inf) == rsqrt(+Inf) * +Inf = 0 * +Inf = NaN
14241 if (!Flags.hasApproximateFuncs() ||
14242 (!Options.NoInfsFPMath && !Flags.hasNoInfs()))
14243 return SDValue();
14244
14245 SDValue N0 = N->getOperand(0);
14246 if (TLI.isFsqrtCheap(N0, DAG))
14247 return SDValue();
14248
14249 // FSQRT nodes have flags that propagate to the created nodes.
14250 // TODO: If this is N0/sqrt(N0), and we reach this node before trying to
14251 // transform the fdiv, we may produce a sub-optimal estimate sequence
14252 // because the reciprocal calculation may not have to filter out a
14253 // 0.0 input.
14254 return buildSqrtEstimate(N0, Flags);
14255}
14256
14257/// copysign(x, fp_extend(y)) -> copysign(x, y)
14258/// copysign(x, fp_round(y)) -> copysign(x, y)
14259static inline bool CanCombineFCOPYSIGN_EXTEND_ROUND(SDNode *N) {
14260 SDValue N1 = N->getOperand(1);
14261 if ((N1.getOpcode() == ISD::FP_EXTEND ||
14262 N1.getOpcode() == ISD::FP_ROUND)) {
14263 EVT N1VT = N1->getValueType(0);
14264 EVT N1Op0VT = N1->getOperand(0).getValueType();
14265
14266 // Always fold no-op FP casts.
14267 if (N1VT == N1Op0VT)
14268 return true;
14269
14270 // Do not optimize out type conversion of f128 type yet.
14271 // For some targets like x86_64, configuration is changed to keep one f128
14272 // value in one SSE register, but instruction selection cannot handle
14273 // FCOPYSIGN on SSE registers yet.
14274 if (N1Op0VT == MVT::f128)
14275 return false;
14276
14277 // Avoid mismatched vector operand types, for better instruction selection.
14278 if (N1Op0VT.isVector())
14279 return false;
14280
14281 return true;
14282 }
14283 return false;
14284}
14285
14286SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
14287 SDValue N0 = N->getOperand(0);
14288 SDValue N1 = N->getOperand(1);
14289 bool N0CFP = DAG.isConstantFPBuildVectorOrConstantFP(N0);
14290 bool N1CFP = DAG.isConstantFPBuildVectorOrConstantFP(N1);
14291 EVT VT = N->getValueType(0);
14292
14293 if (N0CFP && N1CFP) // Constant fold
14294 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1);
14295
14296 if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N->getOperand(1))) {
14297 const APFloat &V = N1C->getValueAPF();
14298 // copysign(x, c1) -> fabs(x) iff ispos(c1)
14299 // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
14300 if (!V.isNegative()) {
14301 if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT))
14302 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
14303 } else {
14304 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
14305 return DAG.getNode(ISD::FNEG, SDLoc(N), VT,
14306 DAG.getNode(ISD::FABS, SDLoc(N0), VT, N0));
14307 }
14308 }
14309
14310 // copysign(fabs(x), y) -> copysign(x, y)
14311 // copysign(fneg(x), y) -> copysign(x, y)
14312 // copysign(copysign(x,z), y) -> copysign(x, y)
14313 if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG ||
14314 N0.getOpcode() == ISD::FCOPYSIGN)
14315 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0.getOperand(0), N1);
14316
14317 // copysign(x, abs(y)) -> abs(x)
14318 if (N1.getOpcode() == ISD::FABS)
14319 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
14320
14321 // copysign(x, copysign(y,z)) -> copysign(x, z)
14322 if (N1.getOpcode() == ISD::FCOPYSIGN)
14323 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(1));
14324
14325 // copysign(x, fp_extend(y)) -> copysign(x, y)
14326 // copysign(x, fp_round(y)) -> copysign(x, y)
14327 if (CanCombineFCOPYSIGN_EXTEND_ROUND(N))
14328 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(0));
14329
14330 return SDValue();
14331}
14332
14333SDValue DAGCombiner::visitFPOW(SDNode *N) {
14334 ConstantFPSDNode *ExponentC = isConstOrConstSplatFP(N->getOperand(1));
14335 if (!ExponentC)
14336 return SDValue();
14337 SelectionDAG::FlagInserter FlagsInserter(DAG, N);
14338
14339 // Try to convert x ** (1/3) into cube root.
14340 // TODO: Handle the various flavors of long double.
14341 // TODO: Since we're approximating, we don't need an exact 1/3 exponent.
14342 // Some range near 1/3 should be fine.
14343 EVT VT = N->getValueType(0);
14344 if ((VT == MVT::f32 && ExponentC->getValueAPF().isExactlyValue(1.0f/3.0f)) ||
14345 (VT == MVT::f64 && ExponentC->getValueAPF().isExactlyValue(1.0/3.0))) {
14346 // pow(-0.0, 1/3) = +0.0; cbrt(-0.0) = -0.0.
14347 // pow(-inf, 1/3) = +inf; cbrt(-inf) = -inf.
14348 // pow(-val, 1/3) = nan; cbrt(-val) = -num.
14349 // For regular numbers, rounding may cause the results to differ.
14350 // Therefore, we require { nsz ninf nnan afn } for this transform.
14351 // TODO: We could select out the special cases if we don't have nsz/ninf.
14352 SDNodeFlags Flags = N->getFlags();
14353 if (!Flags.hasNoSignedZeros() || !Flags.hasNoInfs() || !Flags.hasNoNaNs() ||
14354 !Flags.hasApproximateFuncs())
14355 return SDValue();
14356
14357 // Do not create a cbrt() libcall if the target does not have it, and do not
14358 // turn a pow that has lowering support into a cbrt() libcall.
14359 if (!DAG.getLibInfo().has(LibFunc_cbrt) ||
14360 (!DAG.getTargetLoweringInfo().isOperationExpand(ISD::FPOW, VT) &&
14361 DAG.getTargetLoweringInfo().isOperationExpand(ISD::FCBRT, VT)))
14362 return SDValue();
14363
14364 return DAG.getNode(ISD::FCBRT, SDLoc(N), VT, N->getOperand(0));
14365 }
14366
14367 // Try to convert x ** (1/4) and x ** (3/4) into square roots.
14368 // x ** (1/2) is canonicalized to sqrt, so we do not bother with that case.
14369 // TODO: This could be extended (using a target hook) to handle smaller
14370 // power-of-2 fractional exponents.
14371 bool ExponentIs025 = ExponentC->getValueAPF().isExactlyValue(0.25);
14372 bool ExponentIs075 = ExponentC->getValueAPF().isExactlyValue(0.75);
14373 if (ExponentIs025 || ExponentIs075) {
14374 // pow(-0.0, 0.25) = +0.0; sqrt(sqrt(-0.0)) = -0.0.
14375 // pow(-inf, 0.25) = +inf; sqrt(sqrt(-inf)) = NaN.
14376 // pow(-0.0, 0.75) = +0.0; sqrt(-0.0) * sqrt(sqrt(-0.0)) = +0.0.
14377 // pow(-inf, 0.75) = +inf; sqrt(-inf) * sqrt(sqrt(-inf)) = NaN.
14378 // For regular numbers, rounding may cause the results to differ.
14379 // Therefore, we require { nsz ninf afn } for this transform.
14380 // TODO: We could select out the special cases if we don't have nsz/ninf.
14381 SDNodeFlags Flags = N->getFlags();
14382
14383 // We only need no signed zeros for the 0.25 case.
14384 if ((!Flags.hasNoSignedZeros() && ExponentIs025) || !Flags.hasNoInfs() ||
14385 !Flags.hasApproximateFuncs())
14386 return SDValue();
14387
14388 // Don't double the number of libcalls. We are trying to inline fast code.
14389 if (!DAG.getTargetLoweringInfo().isOperationLegalOrCustom(ISD::FSQRT, VT))
14390 return SDValue();
14391
14392 // Assume that libcalls are the smallest code.
14393 // TODO: This restriction should probably be lifted for vectors.
14394 if (ForCodeSize)
14395 return SDValue();
14396
14397 // pow(X, 0.25) --> sqrt(sqrt(X))
14398 SDLoc DL(N);
14399 SDValue Sqrt = DAG.getNode(ISD::FSQRT, DL, VT, N->getOperand(0));
14400 SDValue SqrtSqrt = DAG.getNode(ISD::FSQRT, DL, VT, Sqrt);
14401 if (ExponentIs025)
14402 return SqrtSqrt;
14403 // pow(X, 0.75) --> sqrt(X) * sqrt(sqrt(X))
14404 return DAG.getNode(ISD::FMUL, DL, VT, Sqrt, SqrtSqrt);
14405 }
14406
14407 return SDValue();
14408}
14409
14410static SDValue foldFPToIntToFP(SDNode *N, SelectionDAG &DAG,
14411 const TargetLowering &TLI) {
14412 // This optimization is guarded by a function attribute because it may produce
14413 // unexpected results. Ie, programs may be relying on the platform-specific
14414 // undefined behavior when the float-to-int conversion overflows.
14415 const Function &F = DAG.getMachineFunction().getFunction();
14416 Attribute StrictOverflow = F.getFnAttribute("strict-float-cast-overflow");
14417 if (StrictOverflow.getValueAsString().equals("false"))
14418 return SDValue();
14419
14420 // We only do this if the target has legal ftrunc. Otherwise, we'd likely be
14421 // replacing casts with a libcall. We also must be allowed to ignore -0.0
14422 // because FTRUNC will return -0.0 for (-1.0, -0.0), but using integer
14423 // conversions would return +0.0.
14424 // FIXME: We should be able to use node-level FMF here.
14425 // TODO: If strict math, should we use FABS (+ range check for signed cast)?
14426 EVT VT = N->getValueType(0);
14427 if (!TLI.isOperationLegal(ISD::FTRUNC, VT) ||
14428 !DAG.getTarget().Options.NoSignedZerosFPMath)
14429 return SDValue();
14430
14431 // fptosi/fptoui round towards zero, so converting from FP to integer and
14432 // back is the same as an 'ftrunc': [us]itofp (fpto[us]i X) --> ftrunc X
14433 SDValue N0 = N->getOperand(0);
14434 if (N->getOpcode() == ISD::SINT_TO_FP && N0.getOpcode() == ISD::FP_TO_SINT &&
14435 N0.getOperand(0).getValueType() == VT)
14436 return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0.getOperand(0));
14437
14438 if (N->getOpcode() == ISD::UINT_TO_FP && N0.getOpcode() == ISD::FP_TO_UINT &&
14439 N0.getOperand(0).getValueType() == VT)
14440 return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0.getOperand(0));
14441
14442 return SDValue();
14443}
14444
14445SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
14446 SDValue N0 = N->getOperand(0);
14447 EVT VT = N->getValueType(0);
14448 EVT OpVT = N0.getValueType();
14449
14450 // [us]itofp(undef) = 0, because the result value is bounded.
14451 if (N0.isUndef())
14452 return DAG.getConstantFP(0.0, SDLoc(N), VT);
14453
14454 // fold (sint_to_fp c1) -> c1fp
14455 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
14456 // ...but only if the target supports immediate floating-point values
14457 (!LegalOperations ||
14458 TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT)))
14459 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
14460
14461 // If the input is a legal type, and SINT_TO_FP is not legal on this target,
14462 // but UINT_TO_FP is legal on this target, try to convert.
14463 if (!hasOperation(ISD::SINT_TO_FP, OpVT) &&
14464 hasOperation(ISD::UINT_TO_FP, OpVT)) {
14465 // If the sign bit is known to be zero, we can change this to UINT_TO_FP.
14466 if (DAG.SignBitIsZero(N0))
14467 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
14468 }
14469
14470 // The next optimizations are desirable only if SELECT_CC can be lowered.
14471 // fold (sint_to_fp (setcc x, y, cc)) -> (select (setcc x, y, cc), -1.0, 0.0)
14472 if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 &&
14473 !VT.isVector() &&
14474 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT))) {
14475 SDLoc DL(N);
14476 return DAG.getSelect(DL, VT, N0, DAG.getConstantFP(-1.0, DL, VT),
14477 DAG.getConstantFP(0.0, DL, VT));
14478 }
14479
14480 // fold (sint_to_fp (zext (setcc x, y, cc))) ->
14481 // (select (setcc x, y, cc), 1.0, 0.0)
14482 if (N0.getOpcode() == ISD::ZERO_EXTEND &&
14483 N0.getOperand(0).getOpcode() == ISD::SETCC && !VT.isVector() &&
14484 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT))) {
14485 SDLoc DL(N);
14486 return DAG.getSelect(DL, VT, N0.getOperand(0),
14487 DAG.getConstantFP(1.0, DL, VT),
14488 DAG.getConstantFP(0.0, DL, VT));
14489 }
14490
14491 if (SDValue FTrunc = foldFPToIntToFP(N, DAG, TLI))
14492 return FTrunc;
14493
14494 return SDValue();
14495}
14496
14497SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
14498 SDValue N0 = N->getOperand(0);
14499 EVT VT = N->getValueType(0);
14500 EVT OpVT = N0.getValueType();
14501
14502 // [us]itofp(undef) = 0, because the result value is bounded.
14503 if (N0.isUndef())
14504 return DAG.getConstantFP(0.0, SDLoc(N), VT);
14505
14506 // fold (uint_to_fp c1) -> c1fp
14507 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
14508 // ...but only if the target supports immediate floating-point values
14509 (!LegalOperations ||
14510 TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT)))
14511 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
14512
14513 // If the input is a legal type, and UINT_TO_FP is not legal on this target,
14514 // but SINT_TO_FP is legal on this target, try to convert.
14515 if (!hasOperation(ISD::UINT_TO_FP, OpVT) &&
14516 hasOperation(ISD::SINT_TO_FP, OpVT)) {
14517 // If the sign bit is known to be zero, we can change this to SINT_TO_FP.
14518 if (DAG.SignBitIsZero(N0))
14519 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
14520 }
14521
14522 // fold (uint_to_fp (setcc x, y, cc)) -> (select (setcc x, y, cc), 1.0, 0.0)
14523 if (N0.getOpcode() == ISD::SETCC && !VT.isVector() &&
14524 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::ConstantFP, VT))) {
14525 SDLoc DL(N);
14526 return DAG.getSelect(DL, VT, N0, DAG.getConstantFP(1.0, DL, VT),
14527 DAG.getConstantFP(0.0, DL, VT));
14528 }
14529
14530 if (SDValue FTrunc = foldFPToIntToFP(N, DAG, TLI))
14531 return FTrunc;
14532
14533 return SDValue();
14534}
14535
14536// Fold (fp_to_{s/u}int ({s/u}int_to_fpx)) -> zext x, sext x, trunc x, or x
14537static SDValue FoldIntToFPToInt(SDNode *N, SelectionDAG &DAG) {
14538 SDValue N0 = N->getOperand(0);
14539 EVT VT = N->getValueType(0);
14540
14541 if (N0.getOpcode() != ISD::UINT_TO_FP && N0.getOpcode() != ISD::SINT_TO_FP)
14542 return SDValue();
14543
14544 SDValue Src = N0.getOperand(0);
14545 EVT SrcVT = Src.getValueType();
14546 bool IsInputSigned = N0.getOpcode() == ISD::SINT_TO_FP;
14547 bool IsOutputSigned = N->getOpcode() == ISD::FP_TO_SINT;
14548
14549 // We can safely assume the conversion won't overflow the output range,
14550 // because (for example) (uint8_t)18293.f is undefined behavior.
14551
14552 // Since we can assume the conversion won't overflow, our decision as to
14553 // whether the input will fit in the float should depend on the minimum
14554 // of the input range and output range.
14555
14556 // This means this is also safe for a signed input and unsigned output, since
14557 // a negative input would lead to undefined behavior.
14558 unsigned InputSize = (int)SrcVT.getScalarSizeInBits() - IsInputSigned;
14559 unsigned OutputSize = (int)VT.getScalarSizeInBits() - IsOutputSigned;
14560 unsigned ActualSize = std::min(InputSize, OutputSize);
14561 const fltSemantics &sem = DAG.EVTToAPFloatSemantics(N0.getValueType());
14562
14563 // We can only fold away the float conversion if the input range can be
14564 // represented exactly in the float range.
14565 if (APFloat::semanticsPrecision(sem) >= ActualSize) {
14566 if (VT.getScalarSizeInBits() > SrcVT.getScalarSizeInBits()) {
14567 unsigned ExtOp = IsInputSigned && IsOutputSigned ? ISD::SIGN_EXTEND
14568 : ISD::ZERO_EXTEND;
14569 return DAG.getNode(ExtOp, SDLoc(N), VT, Src);
14570 }
14571 if (VT.getScalarSizeInBits() < SrcVT.getScalarSizeInBits())
14572 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Src);
14573 return DAG.getBitcast(VT, Src);
14574 }
14575 return SDValue();
14576}
14577
14578SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
14579 SDValue N0 = N->getOperand(0);
14580 EVT VT = N->getValueType(0);
14581
14582 // fold (fp_to_sint undef) -> undef
14583 if (N0.isUndef())
14584 return DAG.getUNDEF(VT);
14585
14586 // fold (fp_to_sint c1fp) -> c1
14587 if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
14588 return DAG.getNode(ISD::FP_TO_SINT, SDLoc(N), VT, N0);
14589
14590 return FoldIntToFPToInt(N, DAG);
14591}
14592
14593SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
14594 SDValue N0 = N->getOperand(0);
14595 EVT VT = N->getValueType(0);
14596
14597 // fold (fp_to_uint undef) -> undef
14598 if (N0.isUndef())
14599 return DAG.getUNDEF(VT);
14600
14601 // fold (fp_to_uint c1fp) -> c1
14602 if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
14603 return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), VT, N0);
14604
14605 return FoldIntToFPToInt(N, DAG);
14606}
14607
14608SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
14609 SDValue N0 = N->getOperand(0);
14610 SDValue N1 = N->getOperand(1);
14611 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
14612 EVT VT = N->getValueType(0);
14613
14614 // fold (fp_round c1fp) -> c1fp
14615 if (N0CFP)
14616 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, N0, N1);
14617
14618 // fold (fp_round (fp_extend x)) -> x
14619 if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType())
14620 return N0.getOperand(0);
14621
14622 // fold (fp_round (fp_round x)) -> (fp_round x)
14623 if (N0.getOpcode() == ISD::FP_ROUND) {
14624 const bool NIsTrunc = N->getConstantOperandVal(1) == 1;
14625 const bool N0IsTrunc = N0.getConstantOperandVal(1) == 1;
14626
14627 // Skip this folding if it results in an fp_round from f80 to f16.
14628 //
14629 // f80 to f16 always generates an expensive (and as yet, unimplemented)
14630 // libcall to __truncxfhf2 instead of selecting native f16 conversion
14631 // instructions from f32 or f64. Moreover, the first (value-preserving)
14632 // fp_round from f80 to either f32 or f64 may become a NOP in platforms like
14633 // x86.
14634 if (N0.getOperand(0).getValueType() == MVT::f80 && VT == MVT::f16)
14635 return SDValue();
14636
14637 // If the first fp_round isn't a value preserving truncation, it might
14638 // introduce a tie in the second fp_round, that wouldn't occur in the
14639 // single-step fp_round we want to fold to.
14640 // In other words, double rounding isn't the same as rounding.
14641 // Also, this is a value preserving truncation iff both fp_round's are.
14642 if (DAG.getTarget().Options.UnsafeFPMath || N0IsTrunc) {
14643 SDLoc DL(N);
14644 return DAG.getNode(ISD::FP_ROUND, DL, VT, N0.getOperand(0),
14645 DAG.getIntPtrConstant(NIsTrunc && N0IsTrunc, DL));
14646 }
14647 }
14648
14649 // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y)
14650 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) {
14651 SDValue Tmp = DAG.getNode(ISD::FP_ROUND, SDLoc(N0), VT,
14652 N0.getOperand(0), N1);
14653 AddToWorklist(Tmp.getNode());
14654 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT,
14655 Tmp, N0.getOperand(1));
14656 }
14657
14658 if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
14659 return NewVSel;
14660
14661 return SDValue();
14662}
14663
14664SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
14665 SDValue N0 = N->getOperand(0);
14666 EVT VT = N->getValueType(0);
14667
14668 // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
14669 if (N->hasOneUse() &&
14670 N->use_begin()->getOpcode() == ISD::FP_ROUND)
14671 return SDValue();
14672
14673 // fold (fp_extend c1fp) -> c1fp
14674 if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
14675 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, N0);
14676
14677 // fold (fp_extend (fp16_to_fp op)) -> (fp16_to_fp op)
14678 if (N0.getOpcode() == ISD::FP16_TO_FP &&
14679 TLI.getOperationAction(ISD::FP16_TO_FP, VT) == TargetLowering::Legal)
14680 return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), VT, N0.getOperand(0));
14681
14682 // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the
14683 // value of X.
14684 if (N0.getOpcode() == ISD::FP_ROUND
14685 && N0.getConstantOperandVal(1) == 1) {
14686 SDValue In = N0.getOperand(0);
14687 if (In.getValueType() == VT) return In;
14688 if (VT.bitsLT(In.getValueType()))
14689 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT,
14690 In, N0.getOperand(1));
14691 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, In);
14692 }
14693
14694 // fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
14695 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
14696 TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) {
14697 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
14698 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT,
14699 LN0->getChain(),
14700 LN0->getBasePtr(), N0.getValueType(),
14701 LN0->getMemOperand());
14702 CombineTo(N, ExtLoad);
14703 CombineTo(N0.getNode(),
14704 DAG.getNode(ISD::FP_ROUND, SDLoc(N0),
14705 N0.getValueType(), ExtLoad,
14706 DAG.getIntPtrConstant(1, SDLoc(N0))),
14707 ExtLoad.getValue(1));
14708 return SDValue(N, 0); // Return N so it doesn't get rechecked!
14709 }
14710
14711 if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
14712 return NewVSel;
14713
14714 return SDValue();
14715}
14716
14717SDValue DAGCombiner::visitFCEIL(SDNode *N) {
14718 SDValue N0 = N->getOperand(0);
14719 EVT VT = N->getValueType(0);
14720
14721 // fold (fceil c1) -> fceil(c1)
14722 if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
14723 return DAG.getNode(ISD::FCEIL, SDLoc(N), VT, N0);
14724
14725 return SDValue();
14726}
14727
14728SDValue DAGCombiner::visitFTRUNC(SDNode *N) {
14729 SDValue N0 = N->getOperand(0);
14730 EVT VT = N->getValueType(0);
14731
14732 // fold (ftrunc c1) -> ftrunc(c1)
14733 if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
14734 return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0);
14735
14736 // fold ftrunc (known rounded int x) -> x
14737 // ftrunc is a part of fptosi/fptoui expansion on some targets, so this is
14738 // likely to be generated to extract integer from a rounded floating value.
14739 switch (N0.getOpcode()) {
14740 default: break;
14741 case ISD::FRINT:
14742 case ISD::FTRUNC:
14743 case ISD::FNEARBYINT:
14744 case ISD::FFLOOR:
14745 case ISD::FCEIL:
14746 return N0;
14747 }
14748
14749 return SDValue();
14750}
14751
14752SDValue DAGCombiner::visitFFLOOR(SDNode *N) {
14753 SDValue N0 = N->getOperand(0);
14754 EVT VT = N->getValueType(0);
14755
14756 // fold (ffloor c1) -> ffloor(c1)
14757 if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
14758 return DAG.getNode(ISD::FFLOOR, SDLoc(N), VT, N0);
14759
14760 return SDValue();
14761}
14762
14763SDValue DAGCombiner::visitFNEG(SDNode *N) {
14764 SDValue N0 = N->getOperand(0);
14765 EVT VT = N->getValueType(0);
14766 SelectionDAG::FlagInserter FlagsInserter(DAG, N);
14767
14768 // Constant fold FNEG.
14769 if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
14770 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0);
14771
14772 if (SDValue NegN0 =
14773 TLI.getNegatedExpression(N0, DAG, LegalOperations, ForCodeSize))
14774 return NegN0;
14775
14776 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
14777 // FIXME: This is duplicated in getNegatibleCost, but getNegatibleCost doesn't
14778 // know it was called from a context with a nsz flag if the input fsub does
14779 // not.
14780 if (N0.getOpcode() == ISD::FSUB &&
14781 (DAG.getTarget().Options.NoSignedZerosFPMath ||
14782 N->getFlags().hasNoSignedZeros()) && N0.hasOneUse()) {
14783 return DAG.getNode(ISD::FSUB, SDLoc(N), VT, N0.getOperand(1),
14784 N0.getOperand(0));
14785 }
14786
14787 if (SDValue Cast = foldSignChangeInBitcast(N))
14788 return Cast;
14789
14790 return SDValue();
14791}
14792
14793static SDValue visitFMinMax(SelectionDAG &DAG, SDNode *N,
14794 APFloat (*Op)(const APFloat &, const APFloat &)) {
14795 SDValue N0 = N->getOperand(0);
14796 SDValue N1 = N->getOperand(1);
14797 EVT VT = N->getValueType(0);
14798 const ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0);
14799 const ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1);
14800 const SDNodeFlags Flags = N->getFlags();
14801 unsigned Opc = N->getOpcode();
14802 bool PropagatesNaN = Opc == ISD::FMINIMUM || Opc == ISD::FMAXIMUM;
14803 bool IsMin = Opc == ISD::FMINNUM || Opc == ISD::FMINIMUM;
14804 SelectionDAG::FlagInserter FlagsInserter(DAG, N);
14805
14806 if (N0CFP && N1CFP) {
14807 const APFloat &C0 = N0CFP->getValueAPF();
14808 const APFloat &C1 = N1CFP->getValueAPF();
14809 return DAG.getConstantFP(Op(C0, C1), SDLoc(N), VT);
14810 }
14811
14812 // Canonicalize to constant on RHS.
14813 if (DAG.isConstantFPBuildVectorOrConstantFP(N0) &&
14814 !DAG.isConstantFPBuildVectorOrConstantFP(N1))
14815 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0);
14816
14817 if (N1CFP) {
14818 const APFloat &AF = N1CFP->getValueAPF();
14819
14820 // minnum(X, nan) -> X
14821 // maxnum(X, nan) -> X
14822 // minimum(X, nan) -> nan
14823 // maximum(X, nan) -> nan
14824 if (AF.isNaN())
14825 return PropagatesNaN ? N->getOperand(1) : N->getOperand(0);
14826
14827 // In the following folds, inf can be replaced with the largest finite
14828 // float, if the ninf flag is set.
14829 if (AF.isInfinity() || (Flags.hasNoInfs() && AF.isLargest())) {
14830 // minnum(X, -inf) -> -inf
14831 // maxnum(X, +inf) -> +inf
14832 // minimum(X, -inf) -> -inf if nnan
14833 // maximum(X, +inf) -> +inf if nnan
14834 if (IsMin == AF.isNegative() && (!PropagatesNaN || Flags.hasNoNaNs()))
14835 return N->getOperand(1);
14836
14837 // minnum(X, +inf) -> X if nnan
14838 // maxnum(X, -inf) -> X if nnan
14839 // minimum(X, +inf) -> X
14840 // maximum(X, -inf) -> X
14841 if (IsMin != AF.isNegative() && (PropagatesNaN || Flags.hasNoNaNs()))
14842 return N->getOperand(0);
14843 }
14844 }
14845
14846 return SDValue();
14847}
14848
14849SDValue DAGCombiner::visitFMINNUM(SDNode *N) {
14850 return visitFMinMax(DAG, N, minnum);
14851}
14852
14853SDValue DAGCombiner::visitFMAXNUM(SDNode *N) {
14854 return visitFMinMax(DAG, N, maxnum);
14855}
14856
14857SDValue DAGCombiner::visitFMINIMUM(SDNode *N) {
14858 return visitFMinMax(DAG, N, minimum);
14859}
14860
14861SDValue DAGCombiner::visitFMAXIMUM(SDNode *N) {
14862 return visitFMinMax(DAG, N, maximum);
14863}
14864
14865SDValue DAGCombiner::visitFABS(SDNode *N) {
14866 SDValue N0 = N->getOperand(0);
14867 EVT VT = N->getValueType(0);
14868
14869 // fold (fabs c1) -> fabs(c1)
14870 if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
14871 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0);
14872
14873 // fold (fabs (fabs x)) -> (fabs x)
14874 if (N0.getOpcode() == ISD::FABS)
14875 return N->getOperand(0);
14876
14877 // fold (fabs (fneg x)) -> (fabs x)
14878 // fold (fabs (fcopysign x, y)) -> (fabs x)
14879 if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN)
14880 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0.getOperand(0));
14881
14882 if (SDValue Cast = foldSignChangeInBitcast(N))
14883 return Cast;
14884
14885 return SDValue();
14886}
14887
14888SDValue DAGCombiner::visitBRCOND(SDNode *N) {
14889 SDValue Chain = N->getOperand(0);
14890 SDValue N1 = N->getOperand(1);
14891 SDValue N2 = N->getOperand(2);
14892
14893 // BRCOND(FREEZE(cond)) is equivalent to BRCOND(cond) (both are
14894 // nondeterministic jumps).
14895 if (N1->getOpcode() == ISD::FREEZE && N1.hasOneUse()) {
14896 return DAG.getNode(ISD::BRCOND, SDLoc(N), MVT::Other, Chain,
14897 N1->getOperand(0), N2);
14898 }
14899
14900 // If N is a constant we could fold this into a fallthrough or unconditional
14901 // branch. However that doesn't happen very often in normal code, because
14902 // Instcombine/SimplifyCFG should have handled the available opportunities.
14903 // If we did this folding here, it would be necessary to update the
14904 // MachineBasicBlock CFG, which is awkward.
14905
14906 // fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal
14907 // on the target.
14908 if (N1.getOpcode() == ISD::SETCC &&
14909 TLI.isOperationLegalOrCustom(ISD::BR_CC,
14910 N1.getOperand(0).getValueType())) {
14911 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other,
14912 Chain, N1.getOperand(2),
14913 N1.getOperand(0), N1.getOperand(1), N2);
14914 }
14915
14916 if (N1.hasOneUse()) {
14917 // rebuildSetCC calls visitXor which may change the Chain when there is a
14918 // STRICT_FSETCC/STRICT_FSETCCS involved. Use a handle to track changes.
14919 HandleSDNode ChainHandle(Chain);
14920 if (SDValue NewN1 = rebuildSetCC(N1))
14921 return DAG.getNode(ISD::BRCOND, SDLoc(N), MVT::Other,
14922 ChainHandle.getValue(), NewN1, N2);
14923 }
14924
14925 return SDValue();
14926}
14927
14928SDValue DAGCombiner::rebuildSetCC(SDValue N) {
14929 if (N.getOpcode() == ISD::SRL ||
14930 (N.getOpcode() == ISD::TRUNCATE &&
14931 (N.getOperand(0).hasOneUse() &&
14932 N.getOperand(0).getOpcode() == ISD::SRL))) {
14933 // Look pass the truncate.
14934 if (N.getOpcode() == ISD::TRUNCATE)
14935 N = N.getOperand(0);
14936
14937 // Match this pattern so that we can generate simpler code:
14938 //
14939 // %a = ...
14940 // %b = and i32 %a, 2
14941 // %c = srl i32 %b, 1
14942 // brcond i32 %c ...
14943 //
14944 // into
14945 //
14946 // %a = ...
14947 // %b = and i32 %a, 2
14948 // %c = setcc eq %b, 0
14949 // brcond %c ...
14950 //
14951 // This applies only when the AND constant value has one bit set and the
14952 // SRL constant is equal to the log2 of the AND constant. The back-end is
14953 // smart enough to convert the result into a TEST/JMP sequence.
14954 SDValue Op0 = N.getOperand(0);
14955 SDValue Op1 = N.getOperand(1);
14956
14957 if (Op0.getOpcode() == ISD::AND && Op1.getOpcode() == ISD::Constant) {
14958 SDValue AndOp1 = Op0.getOperand(1);
14959
14960 if (AndOp1.getOpcode() == ISD::Constant) {
14961 const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue();
14962
14963 if (AndConst.isPowerOf2() &&
14964 cast<ConstantSDNode>(Op1)->getAPIntValue() == AndConst.logBase2()) {
14965 SDLoc DL(N);
14966 return DAG.getSetCC(DL, getSetCCResultType(Op0.getValueType()),
14967 Op0, DAG.getConstant(0, DL, Op0.getValueType()),
14968 ISD::SETNE);
14969 }
14970 }
14971 }
14972 }
14973
14974 // Transform (brcond (xor x, y)) -> (brcond (setcc, x, y, ne))
14975 // Transform (brcond (xor (xor x, y), -1)) -> (brcond (setcc, x, y, eq))
14976 if (N.getOpcode() == ISD::XOR) {
14977 // Because we may call this on a speculatively constructed
14978 // SimplifiedSetCC Node, we need to simplify this node first.
14979 // Ideally this should be folded into SimplifySetCC and not
14980 // here. For now, grab a handle to N so we don't lose it from
14981 // replacements interal to the visit.
14982 HandleSDNode XORHandle(N);
14983 while (N.getOpcode() == ISD::XOR) {
14984 SDValue Tmp = visitXOR(N.getNode());
14985 // No simplification done.
14986 if (!Tmp.getNode())
14987 break;
14988 // Returning N is form in-visit replacement that may invalidated
14989 // N. Grab value from Handle.
14990 if (Tmp.getNode() == N.getNode())
14991 N = XORHandle.getValue();
14992 else // Node simplified. Try simplifying again.
14993 N = Tmp;
14994 }
14995
14996 if (N.getOpcode() != ISD::XOR)
14997 return N;
14998
14999 SDValue Op0 = N->getOperand(0);
15000 SDValue Op1 = N->getOperand(1);
15001
15002 if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) {
15003 bool Equal = false;
15004 // (brcond (xor (xor x, y), -1)) -> (brcond (setcc x, y, eq))
15005 if (isBitwiseNot(N) && Op0.hasOneUse() && Op0.getOpcode() == ISD::XOR &&
15006 Op0.getValueType() == MVT::i1) {
15007 N = Op0;
15008 Op0 = N->getOperand(0);
15009 Op1 = N->getOperand(1);
15010 Equal = true;
15011 }
15012
15013 EVT SetCCVT = N.getValueType();
15014 if (LegalTypes)
15015 SetCCVT = getSetCCResultType(SetCCVT);
15016 // Replace the uses of XOR with SETCC
15017 return DAG.getSetCC(SDLoc(N), SetCCVT, Op0, Op1,
15018 Equal ? ISD::SETEQ : ISD::SETNE);
15019 }
15020 }
15021
15022 return SDValue();
15023}
15024
15025// Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB.
15026//
15027SDValue DAGCombiner::visitBR_CC(SDNode *N) {
15028 CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1));
15029 SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3);
15030
15031 // If N is a constant we could fold this into a fallthrough or unconditional
15032 // branch. However that doesn't happen very often in normal code, because
15033 // Instcombine/SimplifyCFG should have handled the available opportunities.
15034 // If we did this folding here, it would be necessary to update the
15035 // MachineBasicBlock CFG, which is awkward.
15036
15037 // Use SimplifySetCC to simplify SETCC's.
15038 SDValue Simp = SimplifySetCC(getSetCCResultType(CondLHS.getValueType()),
15039 CondLHS, CondRHS, CC->get(), SDLoc(N),
15040 false);
15041 if (Simp.getNode()) AddToWorklist(Simp.getNode());
15042
15043 // fold to a simpler setcc
15044 if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC)
15045 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other,
15046 N->getOperand(0), Simp.getOperand(2),
15047 Simp.getOperand(0), Simp.getOperand(1),
15048 N->getOperand(4));
15049
15050 return SDValue();
15051}
15052
15053static bool getCombineLoadStoreParts(SDNode *N, unsigned Inc, unsigned Dec,
15054 bool &IsLoad, bool &IsMasked, SDValue &Ptr,
15055 const TargetLowering &TLI) {
15056 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
15057 if (LD->isIndexed())
15058 return false;
15059 EVT VT = LD->getMemoryVT();
15060 if (!TLI.isIndexedLoadLegal(Inc, VT) && !TLI.isIndexedLoadLegal(Dec, VT))
15061 return false;
15062 Ptr = LD->getBasePtr();
15063 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
15064 if (ST->isIndexed())
15065 return false;
15066 EVT VT = ST->getMemoryVT();
15067 if (!TLI.isIndexedStoreLegal(Inc, VT) && !TLI.isIndexedStoreLegal(Dec, VT))
15068 return false;
15069 Ptr = ST->getBasePtr();
15070 IsLoad = false;
15071 } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
15072 if (LD->isIndexed())
15073 return false;
15074 EVT VT = LD->getMemoryVT();
15075 if (!TLI.isIndexedMaskedLoadLegal(Inc, VT) &&
15076 !TLI.isIndexedMaskedLoadLegal(Dec, VT))
15077 return false;
15078 Ptr = LD->getBasePtr();
15079 IsMasked = true;
15080 } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) {
15081 if (ST->isIndexed())
15082 return false;
15083 EVT VT = ST->getMemoryVT();
15084 if (!TLI.isIndexedMaskedStoreLegal(Inc, VT) &&
15085 !TLI.isIndexedMaskedStoreLegal(Dec, VT))
15086 return false;
15087 Ptr = ST->getBasePtr();
15088 IsLoad = false;
15089 IsMasked = true;
15090 } else {
15091 return false;
15092 }
15093 return true;
15094}
15095
15096/// Try turning a load/store into a pre-indexed load/store when the base
15097/// pointer is an add or subtract and it has other uses besides the load/store.
15098/// After the transformation, the new indexed load/store has effectively folded
15099/// the add/subtract in and all of its other uses are redirected to the
15100/// new load/store.
15101bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
15102 if (Level < AfterLegalizeDAG)
15103 return false;
15104
15105 bool IsLoad = true;
15106 bool IsMasked = false;
15107 SDValue Ptr;
15108 if (!getCombineLoadStoreParts(N, ISD::PRE_INC, ISD::PRE_DEC, IsLoad, IsMasked,
15109 Ptr, TLI))
15110 return false;
15111
15112 // If the pointer is not an add/sub, or if it doesn't have multiple uses, bail
15113 // out. There is no reason to make this a preinc/predec.
15114 if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) ||
15115 Ptr.getNode()->hasOneUse())
15116 return false;
15117
15118 // Ask the target to do addressing mode selection.
15119 SDValue BasePtr;
15120 SDValue Offset;
15121 ISD::MemIndexedMode AM = ISD::UNINDEXED;
15122 if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG))
15123 return false;
15124
15125 // Backends without true r+i pre-indexed forms may need to pass a
15126 // constant base with a variable offset so that constant coercion
15127 // will work with the patterns in canonical form.
15128 bool Swapped = false;
15129 if (isa<ConstantSDNode>(BasePtr)) {
15130 std::swap(BasePtr, Offset);
15131 Swapped = true;
15132 }
15133
15134 // Don't create a indexed load / store with zero offset.
15135 if (isNullConstant(Offset))
15136 return false;
15137
15138 // Try turning it into a pre-indexed load / store except when:
15139 // 1) The new base ptr is a frame index.
15140 // 2) If N is a store and the new base ptr is either the same as or is a
15141 // predecessor of the value being stored.
15142 // 3) Another use of old base ptr is a predecessor of N. If ptr is folded
15143 // that would create a cycle.
15144 // 4) All uses are load / store ops that use it as old base ptr.
15145
15146 // Check #1. Preinc'ing a frame index would require copying the stack pointer
15147 // (plus the implicit offset) to a register to preinc anyway.
15148 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
15149 return false;
15150
15151 // Check #2.
15152 if (!IsLoad) {
15153 SDValue Val = IsMasked ? cast<MaskedStoreSDNode>(N)->getValue()
15154 : cast<StoreSDNode>(N)->getValue();
15155
15156 // Would require a copy.
15157 if (Val == BasePtr)
15158 return false;
15159
15160 // Would create a cycle.
15161 if (Val == Ptr || Ptr->isPredecessorOf(Val.getNode()))
15162 return false;
15163 }
15164
15165 // Caches for hasPredecessorHelper.
15166 SmallPtrSet<const SDNode *, 32> Visited;
15167 SmallVector<const SDNode *, 16> Worklist;
15168 Worklist.push_back(N);
15169
15170 // If the offset is a constant, there may be other adds of constants that
15171 // can be folded with this one. We should do this to avoid having to keep
15172 // a copy of the original base pointer.
15173 SmallVector<SDNode *, 16> OtherUses;
15174 if (isa<ConstantSDNode>(Offset))
15175 for (SDNode::use_iterator UI = BasePtr.getNode()->use_begin(),
15176 UE = BasePtr.getNode()->use_end();
15177 UI != UE; ++UI) {
15178 SDUse &Use = UI.getUse();
15179 // Skip the use that is Ptr and uses of other results from BasePtr's
15180 // node (important for nodes that return multiple results).
15181 if (Use.getUser() == Ptr.getNode() || Use != BasePtr)
15182 continue;
15183
15184 if (SDNode::hasPredecessorHelper(Use.getUser(), Visited, Worklist))
15185 continue;
15186
15187 if (Use.getUser()->getOpcode() != ISD::ADD &&
15188 Use.getUser()->getOpcode() != ISD::SUB) {
15189 OtherUses.clear();
15190 break;
15191 }
15192
15193 SDValue Op1 = Use.getUser()->getOperand((UI.getOperandNo() + 1) & 1);
15194 if (!isa<ConstantSDNode>(Op1)) {
15195 OtherUses.clear();
15196 break;
15197 }
15198
15199 // FIXME: In some cases, we can be smarter about this.
15200 if (Op1.getValueType() != Offset.getValueType()) {
15201 OtherUses.clear();
15202 break;
15203 }
15204
15205 OtherUses.push_back(Use.getUser());
15206 }
15207
15208 if (Swapped)
15209 std::swap(BasePtr, Offset);
15210
15211 // Now check for #3 and #4.
15212 bool RealUse = false;
15213
15214 for (SDNode *Use : Ptr.getNode()->uses()) {
15215 if (Use == N)
15216 continue;
15217 if (SDNode::hasPredecessorHelper(Use, Visited, Worklist))
15218 return false;
15219
15220 // If Ptr may be folded in addressing mode of other use, then it's
15221 // not profitable to do this transformation.
15222 if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI))
15223 RealUse = true;
15224 }
15225
15226 if (!RealUse)
15227 return false;
15228
15229 SDValue Result;
15230 if (!IsMasked) {
15231 if (IsLoad)
15232 Result = DAG.getIndexedLoad(SDValue(N, 0), SDLoc(N), BasePtr, Offset, AM);
15233 else
15234 Result =
15235 DAG.getIndexedStore(SDValue(N, 0), SDLoc(N), BasePtr, Offset, AM);
15236 } else {
15237 if (IsLoad)
15238 Result = DAG.getIndexedMaskedLoad(SDValue(N, 0), SDLoc(N), BasePtr,
15239 Offset, AM);
15240 else
15241 Result = DAG.getIndexedMaskedStore(SDValue(N, 0), SDLoc(N), BasePtr,
15242 Offset, AM);
15243 }
15244 ++PreIndexedNodes;
15245 ++NodesCombined;
15246 LLVM_DEBUG(dbgs() << "\nReplacing.4 "; N->dump(&DAG); dbgs() << "\nWith: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.4 "; N->dump
(&DAG); dbgs() << "\nWith: "; Result.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
15247 Result.getNode()->dump(&DAG); dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.4 "; N->dump
(&DAG); dbgs() << "\nWith: "; Result.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
;
15248 WorklistRemover DeadNodes(*this);
15249 if (IsLoad) {
15250 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
15251 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
15252 } else {
15253 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
15254 }
15255
15256 // Finally, since the node is now dead, remove it from the graph.
15257 deleteAndRecombine(N);
15258
15259 if (Swapped)
15260 std::swap(BasePtr, Offset);
15261
15262 // Replace other uses of BasePtr that can be updated to use Ptr
15263 for (unsigned i = 0, e = OtherUses.size(); i != e; ++i) {
15264 unsigned OffsetIdx = 1;
15265 if (OtherUses[i]->getOperand(OffsetIdx).getNode() == BasePtr.getNode())
15266 OffsetIdx = 0;
15267 assert(OtherUses[i]->getOperand(!OffsetIdx).getNode() ==(static_cast <bool> (OtherUses[i]->getOperand(!OffsetIdx
).getNode() == BasePtr.getNode() && "Expected BasePtr operand"
) ? void (0) : __assert_fail ("OtherUses[i]->getOperand(!OffsetIdx).getNode() == BasePtr.getNode() && \"Expected BasePtr operand\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15268, __extension__ __PRETTY_FUNCTION__))
15268 BasePtr.getNode() && "Expected BasePtr operand")(static_cast <bool> (OtherUses[i]->getOperand(!OffsetIdx
).getNode() == BasePtr.getNode() && "Expected BasePtr operand"
) ? void (0) : __assert_fail ("OtherUses[i]->getOperand(!OffsetIdx).getNode() == BasePtr.getNode() && \"Expected BasePtr operand\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15268, __extension__ __PRETTY_FUNCTION__))
;
15269
15270 // We need to replace ptr0 in the following expression:
15271 // x0 * offset0 + y0 * ptr0 = t0
15272 // knowing that
15273 // x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store)
15274 //
15275 // where x0, x1, y0 and y1 in {-1, 1} are given by the types of the
15276 // indexed load/store and the expression that needs to be re-written.
15277 //
15278 // Therefore, we have:
15279 // t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1
15280
15281 auto *CN = cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx));
15282 const APInt &Offset0 = CN->getAPIntValue();
15283 const APInt &Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue();
15284 int X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1;
15285 int Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1;
15286 int X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1;
15287 int Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1;
15288
15289 unsigned Opcode = (Y0 * Y1 < 0) ? ISD::SUB : ISD::ADD;
15290
15291 APInt CNV = Offset0;
15292 if (X0 < 0) CNV = -CNV;
15293 if (X1 * Y0 * Y1 < 0) CNV = CNV + Offset1;
15294 else CNV = CNV - Offset1;
15295
15296 SDLoc DL(OtherUses[i]);
15297
15298 // We can now generate the new expression.
15299 SDValue NewOp1 = DAG.getConstant(CNV, DL, CN->getValueType(0));
15300 SDValue NewOp2 = Result.getValue(IsLoad ? 1 : 0);
15301
15302 SDValue NewUse = DAG.getNode(Opcode,
15303 DL,
15304 OtherUses[i]->getValueType(0), NewOp1, NewOp2);
15305 DAG.ReplaceAllUsesOfValueWith(SDValue(OtherUses[i], 0), NewUse);
15306 deleteAndRecombine(OtherUses[i]);
15307 }
15308
15309 // Replace the uses of Ptr with uses of the updated base value.
15310 DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(IsLoad ? 1 : 0));
15311 deleteAndRecombine(Ptr.getNode());
15312 AddToWorklist(Result.getNode());
15313
15314 return true;
15315}
15316
15317static bool shouldCombineToPostInc(SDNode *N, SDValue Ptr, SDNode *PtrUse,
15318 SDValue &BasePtr, SDValue &Offset,
15319 ISD::MemIndexedMode &AM,
15320 SelectionDAG &DAG,
15321 const TargetLowering &TLI) {
15322 if (PtrUse == N ||
15323 (PtrUse->getOpcode() != ISD::ADD && PtrUse->getOpcode() != ISD::SUB))
15324 return false;
15325
15326 if (!TLI.getPostIndexedAddressParts(N, PtrUse, BasePtr, Offset, AM, DAG))
15327 return false;
15328
15329 // Don't create a indexed load / store with zero offset.
15330 if (isNullConstant(Offset))
15331 return false;
15332
15333 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr))
15334 return false;
15335
15336 SmallPtrSet<const SDNode *, 32> Visited;
15337 for (SDNode *Use : BasePtr.getNode()->uses()) {
15338 if (Use == Ptr.getNode())
15339 continue;
15340
15341 // No if there's a later user which could perform the index instead.
15342 if (isa<MemSDNode>(Use)) {
15343 bool IsLoad = true;
15344 bool IsMasked = false;
15345 SDValue OtherPtr;
15346 if (getCombineLoadStoreParts(Use, ISD::POST_INC, ISD::POST_DEC, IsLoad,
15347 IsMasked, OtherPtr, TLI)) {
15348 SmallVector<const SDNode *, 2> Worklist;
15349 Worklist.push_back(Use);
15350 if (SDNode::hasPredecessorHelper(N, Visited, Worklist))
15351 return false;
15352 }
15353 }
15354
15355 // If all the uses are load / store addresses, then don't do the
15356 // transformation.
15357 if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB) {
15358 for (SDNode *UseUse : Use->uses())
15359 if (canFoldInAddressingMode(Use, UseUse, DAG, TLI))
15360 return false;
15361 }
15362 }
15363 return true;
15364}
15365
15366static SDNode *getPostIndexedLoadStoreOp(SDNode *N, bool &IsLoad,
15367 bool &IsMasked, SDValue &Ptr,
15368 SDValue &BasePtr, SDValue &Offset,
15369 ISD::MemIndexedMode &AM,
15370 SelectionDAG &DAG,
15371 const TargetLowering &TLI) {
15372 if (!getCombineLoadStoreParts(N, ISD::POST_INC, ISD::POST_DEC, IsLoad,
15373 IsMasked, Ptr, TLI) ||
15374 Ptr.getNode()->hasOneUse())
15375 return nullptr;
15376
15377 // Try turning it into a post-indexed load / store except when
15378 // 1) All uses are load / store ops that use it as base ptr (and
15379 // it may be folded as addressing mmode).
15380 // 2) Op must be independent of N, i.e. Op is neither a predecessor
15381 // nor a successor of N. Otherwise, if Op is folded that would
15382 // create a cycle.
15383 for (SDNode *Op : Ptr->uses()) {
15384 // Check for #1.
15385 if (!shouldCombineToPostInc(N, Ptr, Op, BasePtr, Offset, AM, DAG, TLI))
15386 continue;
15387
15388 // Check for #2.
15389 SmallPtrSet<const SDNode *, 32> Visited;
15390 SmallVector<const SDNode *, 8> Worklist;
15391 // Ptr is predecessor to both N and Op.
15392 Visited.insert(Ptr.getNode());
15393 Worklist.push_back(N);
15394 Worklist.push_back(Op);
15395 if (!SDNode::hasPredecessorHelper(N, Visited, Worklist) &&
15396 !SDNode::hasPredecessorHelper(Op, Visited, Worklist))
15397 return Op;
15398 }
15399 return nullptr;
15400}
15401
15402/// Try to combine a load/store with a add/sub of the base pointer node into a
15403/// post-indexed load/store. The transformation folded the add/subtract into the
15404/// new indexed load/store effectively and all of its uses are redirected to the
15405/// new load/store.
15406bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
15407 if (Level < AfterLegalizeDAG)
15408 return false;
15409
15410 bool IsLoad = true;
15411 bool IsMasked = false;
15412 SDValue Ptr;
15413 SDValue BasePtr;
15414 SDValue Offset;
15415 ISD::MemIndexedMode AM = ISD::UNINDEXED;
15416 SDNode *Op = getPostIndexedLoadStoreOp(N, IsLoad, IsMasked, Ptr, BasePtr,
15417 Offset, AM, DAG, TLI);
15418 if (!Op)
15419 return false;
15420
15421 SDValue Result;
15422 if (!IsMasked)
15423 Result = IsLoad ? DAG.getIndexedLoad(SDValue(N, 0), SDLoc(N), BasePtr,
15424 Offset, AM)
15425 : DAG.getIndexedStore(SDValue(N, 0), SDLoc(N),
15426 BasePtr, Offset, AM);
15427 else
15428 Result = IsLoad ? DAG.getIndexedMaskedLoad(SDValue(N, 0), SDLoc(N),
15429 BasePtr, Offset, AM)
15430 : DAG.getIndexedMaskedStore(SDValue(N, 0), SDLoc(N),
15431 BasePtr, Offset, AM);
15432 ++PostIndexedNodes;
15433 ++NodesCombined;
15434 LLVM_DEBUG(dbgs() << "\nReplacing.5 "; N->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.5 "; N->dump
(&DAG); dbgs() << "\nWith: "; Result.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
15435 dbgs() << "\nWith: "; Result.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.5 "; N->dump
(&DAG); dbgs() << "\nWith: "; Result.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
15436 dbgs() << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.5 "; N->dump
(&DAG); dbgs() << "\nWith: "; Result.getNode()->
dump(&DAG); dbgs() << '\n'; } } while (false)
;
15437 WorklistRemover DeadNodes(*this);
15438 if (IsLoad) {
15439 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0));
15440 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2));
15441 } else {
15442 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1));
15443 }
15444
15445 // Finally, since the node is now dead, remove it from the graph.
15446 deleteAndRecombine(N);
15447
15448 // Replace the uses of Use with uses of the updated base value.
15449 DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0),
15450 Result.getValue(IsLoad ? 1 : 0));
15451 deleteAndRecombine(Op);
15452 return true;
15453}
15454
15455/// Return the base-pointer arithmetic from an indexed \p LD.
15456SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) {
15457 ISD::MemIndexedMode AM = LD->getAddressingMode();
15458 assert(AM != ISD::UNINDEXED)(static_cast <bool> (AM != ISD::UNINDEXED) ? void (0) :
__assert_fail ("AM != ISD::UNINDEXED", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15458, __extension__ __PRETTY_FUNCTION__))
;
15459 SDValue BP = LD->getOperand(1);
15460 SDValue Inc = LD->getOperand(2);
15461
15462 // Some backends use TargetConstants for load offsets, but don't expect
15463 // TargetConstants in general ADD nodes. We can convert these constants into
15464 // regular Constants (if the constant is not opaque).
15465 assert((Inc.getOpcode() != ISD::TargetConstant ||(static_cast <bool> ((Inc.getOpcode() != ISD::TargetConstant
|| !cast<ConstantSDNode>(Inc)->isOpaque()) &&
"Cannot split out indexing using opaque target constants") ?
void (0) : __assert_fail ("(Inc.getOpcode() != ISD::TargetConstant || !cast<ConstantSDNode>(Inc)->isOpaque()) && \"Cannot split out indexing using opaque target constants\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15467, __extension__ __PRETTY_FUNCTION__))
15466 !cast<ConstantSDNode>(Inc)->isOpaque()) &&(static_cast <bool> ((Inc.getOpcode() != ISD::TargetConstant
|| !cast<ConstantSDNode>(Inc)->isOpaque()) &&
"Cannot split out indexing using opaque target constants") ?
void (0) : __assert_fail ("(Inc.getOpcode() != ISD::TargetConstant || !cast<ConstantSDNode>(Inc)->isOpaque()) && \"Cannot split out indexing using opaque target constants\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15467, __extension__ __PRETTY_FUNCTION__))
15467 "Cannot split out indexing using opaque target constants")(static_cast <bool> ((Inc.getOpcode() != ISD::TargetConstant
|| !cast<ConstantSDNode>(Inc)->isOpaque()) &&
"Cannot split out indexing using opaque target constants") ?
void (0) : __assert_fail ("(Inc.getOpcode() != ISD::TargetConstant || !cast<ConstantSDNode>(Inc)->isOpaque()) && \"Cannot split out indexing using opaque target constants\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15467, __extension__ __PRETTY_FUNCTION__))
;
15468 if (Inc.getOpcode() == ISD::TargetConstant) {
15469 ConstantSDNode *ConstInc = cast<ConstantSDNode>(Inc);
15470 Inc = DAG.getConstant(*ConstInc->getConstantIntValue(), SDLoc(Inc),
15471 ConstInc->getValueType(0));
15472 }
15473
15474 unsigned Opc =
15475 (AM == ISD::PRE_INC || AM == ISD::POST_INC ? ISD::ADD : ISD::SUB);
15476 return DAG.getNode(Opc, SDLoc(LD), BP.getSimpleValueType(), BP, Inc);
15477}
15478
15479static inline ElementCount numVectorEltsOrZero(EVT T) {
15480 return T.isVector() ? T.getVectorElementCount() : ElementCount::getFixed(0);
15481}
15482
15483bool DAGCombiner::getTruncatedStoreValue(StoreSDNode *ST, SDValue &Val) {
15484 Val = ST->getValue();
15485 EVT STType = Val.getValueType();
15486 EVT STMemType = ST->getMemoryVT();
15487 if (STType == STMemType)
15488 return true;
15489 if (isTypeLegal(STMemType))
15490 return false; // fail.
15491 if (STType.isFloatingPoint() && STMemType.isFloatingPoint() &&
15492 TLI.isOperationLegal(ISD::FTRUNC, STMemType)) {
15493 Val = DAG.getNode(ISD::FTRUNC, SDLoc(ST), STMemType, Val);
15494 return true;
15495 }
15496 if (numVectorEltsOrZero(STType) == numVectorEltsOrZero(STMemType) &&
15497 STType.isInteger() && STMemType.isInteger()) {
15498 Val = DAG.getNode(ISD::TRUNCATE, SDLoc(ST), STMemType, Val);
15499 return true;
15500 }
15501 if (STType.getSizeInBits() == STMemType.getSizeInBits()) {
15502 Val = DAG.getBitcast(STMemType, Val);
15503 return true;
15504 }
15505 return false; // fail.
15506}
15507
15508bool DAGCombiner::extendLoadedValueToExtension(LoadSDNode *LD, SDValue &Val) {
15509 EVT LDMemType = LD->getMemoryVT();
15510 EVT LDType = LD->getValueType(0);
15511 assert(Val.getValueType() == LDMemType &&(static_cast <bool> (Val.getValueType() == LDMemType &&
"Attempting to extend value of non-matching type") ? void (0
) : __assert_fail ("Val.getValueType() == LDMemType && \"Attempting to extend value of non-matching type\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15512, __extension__ __PRETTY_FUNCTION__))
15512 "Attempting to extend value of non-matching type")(static_cast <bool> (Val.getValueType() == LDMemType &&
"Attempting to extend value of non-matching type") ? void (0
) : __assert_fail ("Val.getValueType() == LDMemType && \"Attempting to extend value of non-matching type\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15512, __extension__ __PRETTY_FUNCTION__))
;
15513 if (LDType == LDMemType)
15514 return true;
15515 if (LDMemType.isInteger() && LDType.isInteger()) {
15516 switch (LD->getExtensionType()) {
15517 case ISD::NON_EXTLOAD:
15518 Val = DAG.getBitcast(LDType, Val);
15519 return true;
15520 case ISD::EXTLOAD:
15521 Val = DAG.getNode(ISD::ANY_EXTEND, SDLoc(LD), LDType, Val);
15522 return true;
15523 case ISD::SEXTLOAD:
15524 Val = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(LD), LDType, Val);
15525 return true;
15526 case ISD::ZEXTLOAD:
15527 Val = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(LD), LDType, Val);
15528 return true;
15529 }
15530 }
15531 return false;
15532}
15533
15534SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
15535 if (OptLevel == CodeGenOpt::None || !LD->isSimple())
15536 return SDValue();
15537 SDValue Chain = LD->getOperand(0);
15538 StoreSDNode *ST = dyn_cast<StoreSDNode>(Chain.getNode());
15539 // TODO: Relax this restriction for unordered atomics (see D66309)
15540 if (!ST || !ST->isSimple())
15541 return SDValue();
15542
15543 EVT LDType = LD->getValueType(0);
15544 EVT LDMemType = LD->getMemoryVT();
15545 EVT STMemType = ST->getMemoryVT();
15546 EVT STType = ST->getValue().getValueType();
15547
15548 // There are two cases to consider here:
15549 // 1. The store is fixed width and the load is scalable. In this case we
15550 // don't know at compile time if the store completely envelops the load
15551 // so we abandon the optimisation.
15552 // 2. The store is scalable and the load is fixed width. We could
15553 // potentially support a limited number of cases here, but there has been
15554 // no cost-benefit analysis to prove it's worth it.
15555 bool LdStScalable = LDMemType.isScalableVector();
15556 if (LdStScalable != STMemType.isScalableVector())
15557 return SDValue();
15558
15559 // If we are dealing with scalable vectors on a big endian platform the
15560 // calculation of offsets below becomes trickier, since we do not know at
15561 // compile time the absolute size of the vector. Until we've done more
15562 // analysis on big-endian platforms it seems better to bail out for now.
15563 if (LdStScalable && DAG.getDataLayout().isBigEndian())
15564 return SDValue();
15565
15566 BaseIndexOffset BasePtrLD = BaseIndexOffset::match(LD, DAG);
15567 BaseIndexOffset BasePtrST = BaseIndexOffset::match(ST, DAG);
15568 int64_t Offset;
15569 if (!BasePtrST.equalBaseIndex(BasePtrLD, DAG, Offset))
15570 return SDValue();
15571
15572 // Normalize for Endianness. After this Offset=0 will denote that the least
15573 // significant bit in the loaded value maps to the least significant bit in
15574 // the stored value). With Offset=n (for n > 0) the loaded value starts at the
15575 // n:th least significant byte of the stored value.
15576 if (DAG.getDataLayout().isBigEndian())
15577 Offset = ((int64_t)STMemType.getStoreSizeInBits().getFixedSize() -
15578 (int64_t)LDMemType.getStoreSizeInBits().getFixedSize()) /
15579 8 -
15580 Offset;
15581
15582 // Check that the stored value cover all bits that are loaded.
15583 bool STCoversLD;
15584
15585 TypeSize LdMemSize = LDMemType.getSizeInBits();
15586 TypeSize StMemSize = STMemType.getSizeInBits();
15587 if (LdStScalable)
15588 STCoversLD = (Offset == 0) && LdMemSize == StMemSize;
15589 else
15590 STCoversLD = (Offset >= 0) && (Offset * 8 + LdMemSize.getFixedSize() <=
15591 StMemSize.getFixedSize());
15592
15593 auto ReplaceLd = [&](LoadSDNode *LD, SDValue Val, SDValue Chain) -> SDValue {
15594 if (LD->isIndexed()) {
15595 // Cannot handle opaque target constants and we must respect the user's
15596 // request not to split indexes from loads.
15597 if (!canSplitIdx(LD))
15598 return SDValue();
15599 SDValue Idx = SplitIndexingFromLoad(LD);
15600 SDValue Ops[] = {Val, Idx, Chain};
15601 return CombineTo(LD, Ops, 3);
15602 }
15603 return CombineTo(LD, Val, Chain);
15604 };
15605
15606 if (!STCoversLD)
15607 return SDValue();
15608
15609 // Memory as copy space (potentially masked).
15610 if (Offset == 0 && LDType == STType && STMemType == LDMemType) {
15611 // Simple case: Direct non-truncating forwarding
15612 if (LDType.getSizeInBits() == LdMemSize)
15613 return ReplaceLd(LD, ST->getValue(), Chain);
15614 // Can we model the truncate and extension with an and mask?
15615 if (STType.isInteger() && LDMemType.isInteger() && !STType.isVector() &&
15616 !LDMemType.isVector() && LD->getExtensionType() != ISD::SEXTLOAD) {
15617 // Mask to size of LDMemType
15618 auto Mask =
15619 DAG.getConstant(APInt::getLowBitsSet(STType.getFixedSizeInBits(),
15620 StMemSize.getFixedSize()),
15621 SDLoc(ST), STType);
15622 auto Val = DAG.getNode(ISD::AND, SDLoc(LD), LDType, ST->getValue(), Mask);
15623 return ReplaceLd(LD, Val, Chain);
15624 }
15625 }
15626
15627 // TODO: Deal with nonzero offset.
15628 if (LD->getBasePtr().isUndef() || Offset != 0)
15629 return SDValue();
15630 // Model necessary truncations / extenstions.
15631 SDValue Val;
15632 // Truncate Value To Stored Memory Size.
15633 do {
15634 if (!getTruncatedStoreValue(ST, Val))
15635 continue;
15636 if (!isTypeLegal(LDMemType))
15637 continue;
15638 if (STMemType != LDMemType) {
15639 // TODO: Support vectors? This requires extract_subvector/bitcast.
15640 if (!STMemType.isVector() && !LDMemType.isVector() &&
15641 STMemType.isInteger() && LDMemType.isInteger())
15642 Val = DAG.getNode(ISD::TRUNCATE, SDLoc(LD), LDMemType, Val);
15643 else
15644 continue;
15645 }
15646 if (!extendLoadedValueToExtension(LD, Val))
15647 continue;
15648 return ReplaceLd(LD, Val, Chain);
15649 } while (false);
15650
15651 // On failure, cleanup dead nodes we may have created.
15652 if (Val->use_empty())
15653 deleteAndRecombine(Val.getNode());
15654 return SDValue();
15655}
15656
15657SDValue DAGCombiner::visitLOAD(SDNode *N) {
15658 LoadSDNode *LD = cast<LoadSDNode>(N);
15659 SDValue Chain = LD->getChain();
15660 SDValue Ptr = LD->getBasePtr();
15661
15662 // If load is not volatile and there are no uses of the loaded value (and
15663 // the updated indexed value in case of indexed loads), change uses of the
15664 // chain value into uses of the chain input (i.e. delete the dead load).
15665 // TODO: Allow this for unordered atomics (see D66309)
15666 if (LD->isSimple()) {
15667 if (N->getValueType(1) == MVT::Other) {
15668 // Unindexed loads.
15669 if (!N->hasAnyUseOfValue(0)) {
15670 // It's not safe to use the two value CombineTo variant here. e.g.
15671 // v1, chain2 = load chain1, loc
15672 // v2, chain3 = load chain2, loc
15673 // v3 = add v2, c
15674 // Now we replace use of chain2 with chain1. This makes the second load
15675 // isomorphic to the one we are deleting, and thus makes this load live.
15676 LLVM_DEBUG(dbgs() << "\nReplacing.6 "; N->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.6 "; N->dump
(&DAG); dbgs() << "\nWith chain: "; Chain.getNode()
->dump(&DAG); dbgs() << "\n"; } } while (false)
15677 dbgs() << "\nWith chain: "; Chain.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.6 "; N->dump
(&DAG); dbgs() << "\nWith chain: "; Chain.getNode()
->dump(&DAG); dbgs() << "\n"; } } while (false)
15678 dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.6 "; N->dump
(&DAG); dbgs() << "\nWith chain: "; Chain.getNode()
->dump(&DAG); dbgs() << "\n"; } } while (false)
;
15679 WorklistRemover DeadNodes(*this);
15680 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
15681 AddUsersToWorklist(Chain.getNode());
15682 if (N->use_empty())
15683 deleteAndRecombine(N);
15684
15685 return SDValue(N, 0); // Return N so it doesn't get rechecked!
15686 }
15687 } else {
15688 // Indexed loads.
15689 assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?")(static_cast <bool> (N->getValueType(2) == MVT::Other
&& "Malformed indexed loads?") ? void (0) : __assert_fail
("N->getValueType(2) == MVT::Other && \"Malformed indexed loads?\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15689, __extension__ __PRETTY_FUNCTION__))
;
15690
15691 // If this load has an opaque TargetConstant offset, then we cannot split
15692 // the indexing into an add/sub directly (that TargetConstant may not be
15693 // valid for a different type of node, and we cannot convert an opaque
15694 // target constant into a regular constant).
15695 bool CanSplitIdx = canSplitIdx(LD);
15696
15697 if (!N->hasAnyUseOfValue(0) && (CanSplitIdx || !N->hasAnyUseOfValue(1))) {
15698 SDValue Undef = DAG.getUNDEF(N->getValueType(0));
15699 SDValue Index;
15700 if (N->hasAnyUseOfValue(1) && CanSplitIdx) {
15701 Index = SplitIndexingFromLoad(LD);
15702 // Try to fold the base pointer arithmetic into subsequent loads and
15703 // stores.
15704 AddUsersToWorklist(N);
15705 } else
15706 Index = DAG.getUNDEF(N->getValueType(1));
15707 LLVM_DEBUG(dbgs() << "\nReplacing.7 "; N->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.7 "; N->dump
(&DAG); dbgs() << "\nWith: "; Undef.getNode()->dump
(&DAG); dbgs() << " and 2 other values\n"; } } while
(false)
15708 dbgs() << "\nWith: "; Undef.getNode()->dump(&DAG);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.7 "; N->dump
(&DAG); dbgs() << "\nWith: "; Undef.getNode()->dump
(&DAG); dbgs() << " and 2 other values\n"; } } while
(false)
15709 dbgs() << " and 2 other values\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nReplacing.7 "; N->dump
(&DAG); dbgs() << "\nWith: "; Undef.getNode()->dump
(&DAG); dbgs() << " and 2 other values\n"; } } while
(false)
;
15710 WorklistRemover DeadNodes(*this);
15711 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef);
15712 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Index);
15713 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain);
15714 deleteAndRecombine(N);
15715 return SDValue(N, 0); // Return N so it doesn't get rechecked!
15716 }
15717 }
15718 }
15719
15720 // If this load is directly stored, replace the load value with the stored
15721 // value.
15722 if (auto V = ForwardStoreValueToDirectLoad(LD))
15723 return V;
15724
15725 // Try to infer better alignment information than the load already has.
15726 if (OptLevel != CodeGenOpt::None && LD->isUnindexed() && !LD->isAtomic()) {
15727 if (MaybeAlign Alignment = DAG.InferPtrAlign(Ptr)) {
15728 if (*Alignment > LD->getAlign() &&
15729 isAligned(*Alignment, LD->getSrcValueOffset())) {
15730 SDValue NewLoad = DAG.getExtLoad(
15731 LD->getExtensionType(), SDLoc(N), LD->getValueType(0), Chain, Ptr,
15732 LD->getPointerInfo(), LD->getMemoryVT(), *Alignment,
15733 LD->getMemOperand()->getFlags(), LD->getAAInfo());
15734 // NewLoad will always be N as we are only refining the alignment
15735 assert(NewLoad.getNode() == N)(static_cast <bool> (NewLoad.getNode() == N) ? void (0)
: __assert_fail ("NewLoad.getNode() == N", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15735, __extension__ __PRETTY_FUNCTION__))
;
15736 (void)NewLoad;
15737 }
15738 }
15739 }
15740
15741 if (LD->isUnindexed()) {
15742 // Walk up chain skipping non-aliasing memory nodes.
15743 SDValue BetterChain = FindBetterChain(LD, Chain);
15744
15745 // If there is a better chain.
15746 if (Chain != BetterChain) {
15747 SDValue ReplLoad;
15748
15749 // Replace the chain to void dependency.
15750 if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
15751 ReplLoad = DAG.getLoad(N->getValueType(0), SDLoc(LD),
15752 BetterChain, Ptr, LD->getMemOperand());
15753 } else {
15754 ReplLoad = DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD),
15755 LD->getValueType(0),
15756 BetterChain, Ptr, LD->getMemoryVT(),
15757 LD->getMemOperand());
15758 }
15759
15760 // Create token factor to keep old chain connected.
15761 SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N),
15762 MVT::Other, Chain, ReplLoad.getValue(1));
15763
15764 // Replace uses with load result and token factor
15765 return CombineTo(N, ReplLoad.getValue(0), Token);
15766 }
15767 }
15768
15769 // Try transforming N to an indexed load.
15770 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
15771 return SDValue(N, 0);
15772
15773 // Try to slice up N to more direct loads if the slices are mapped to
15774 // different register banks or pairing can take place.
15775 if (SliceUpLoad(N))
15776 return SDValue(N, 0);
15777
15778 return SDValue();
15779}
15780
15781namespace {
15782
15783/// Helper structure used to slice a load in smaller loads.
15784/// Basically a slice is obtained from the following sequence:
15785/// Origin = load Ty1, Base
15786/// Shift = srl Ty1 Origin, CstTy Amount
15787/// Inst = trunc Shift to Ty2
15788///
15789/// Then, it will be rewritten into:
15790/// Slice = load SliceTy, Base + SliceOffset
15791/// [Inst = zext Slice to Ty2], only if SliceTy <> Ty2
15792///
15793/// SliceTy is deduced from the number of bits that are actually used to
15794/// build Inst.
15795struct LoadedSlice {
15796 /// Helper structure used to compute the cost of a slice.
15797 struct Cost {
15798 /// Are we optimizing for code size.
15799 bool ForCodeSize = false;
15800
15801 /// Various cost.
15802 unsigned Loads = 0;
15803 unsigned Truncates = 0;
15804 unsigned CrossRegisterBanksCopies = 0;
15805 unsigned ZExts = 0;
15806 unsigned Shift = 0;
15807
15808 explicit Cost(bool ForCodeSize) : ForCodeSize(ForCodeSize) {}
15809
15810 /// Get the cost of one isolated slice.
15811 Cost(const LoadedSlice &LS, bool ForCodeSize)
15812 : ForCodeSize(ForCodeSize), Loads(1) {
15813 EVT TruncType = LS.Inst->getValueType(0);
15814 EVT LoadedType = LS.getLoadedType();
15815 if (TruncType != LoadedType &&
15816 !LS.DAG->getTargetLoweringInfo().isZExtFree(LoadedType, TruncType))
15817 ZExts = 1;
15818 }
15819
15820 /// Account for slicing gain in the current cost.
15821 /// Slicing provide a few gains like removing a shift or a
15822 /// truncate. This method allows to grow the cost of the original
15823 /// load with the gain from this slice.
15824 void addSliceGain(const LoadedSlice &LS) {
15825 // Each slice saves a truncate.
15826 const TargetLowering &TLI = LS.DAG->getTargetLoweringInfo();
15827 if (!TLI.isTruncateFree(LS.Inst->getOperand(0).getValueType(),
15828 LS.Inst->getValueType(0)))
15829 ++Truncates;
15830 // If there is a shift amount, this slice gets rid of it.
15831 if (LS.Shift)
15832 ++Shift;
15833 // If this slice can merge a cross register bank copy, account for it.
15834 if (LS.canMergeExpensiveCrossRegisterBankCopy())
15835 ++CrossRegisterBanksCopies;
15836 }
15837
15838 Cost &operator+=(const Cost &RHS) {
15839 Loads += RHS.Loads;
15840 Truncates += RHS.Truncates;
15841 CrossRegisterBanksCopies += RHS.CrossRegisterBanksCopies;
15842 ZExts += RHS.ZExts;
15843 Shift += RHS.Shift;
15844 return *this;
15845 }
15846
15847 bool operator==(const Cost &RHS) const {
15848 return Loads == RHS.Loads && Truncates == RHS.Truncates &&
15849 CrossRegisterBanksCopies == RHS.CrossRegisterBanksCopies &&
15850 ZExts == RHS.ZExts && Shift == RHS.Shift;
15851 }
15852
15853 bool operator!=(const Cost &RHS) const { return !(*this == RHS); }
15854
15855 bool operator<(const Cost &RHS) const {
15856 // Assume cross register banks copies are as expensive as loads.
15857 // FIXME: Do we want some more target hooks?
15858 unsigned ExpensiveOpsLHS = Loads + CrossRegisterBanksCopies;
15859 unsigned ExpensiveOpsRHS = RHS.Loads + RHS.CrossRegisterBanksCopies;
15860 // Unless we are optimizing for code size, consider the
15861 // expensive operation first.
15862 if (!ForCodeSize && ExpensiveOpsLHS != ExpensiveOpsRHS)
15863 return ExpensiveOpsLHS < ExpensiveOpsRHS;
15864 return (Truncates + ZExts + Shift + ExpensiveOpsLHS) <
15865 (RHS.Truncates + RHS.ZExts + RHS.Shift + ExpensiveOpsRHS);
15866 }
15867
15868 bool operator>(const Cost &RHS) const { return RHS < *this; }
15869
15870 bool operator<=(const Cost &RHS) const { return !(RHS < *this); }
15871
15872 bool operator>=(const Cost &RHS) const { return !(*this < RHS); }
15873 };
15874
15875 // The last instruction that represent the slice. This should be a
15876 // truncate instruction.
15877 SDNode *Inst;
15878
15879 // The original load instruction.
15880 LoadSDNode *Origin;
15881
15882 // The right shift amount in bits from the original load.
15883 unsigned Shift;
15884
15885 // The DAG from which Origin came from.
15886 // This is used to get some contextual information about legal types, etc.
15887 SelectionDAG *DAG;
15888
15889 LoadedSlice(SDNode *Inst = nullptr, LoadSDNode *Origin = nullptr,
15890 unsigned Shift = 0, SelectionDAG *DAG = nullptr)
15891 : Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {}
15892
15893 /// Get the bits used in a chunk of bits \p BitWidth large.
15894 /// \return Result is \p BitWidth and has used bits set to 1 and
15895 /// not used bits set to 0.
15896 APInt getUsedBits() const {
15897 // Reproduce the trunc(lshr) sequence:
15898 // - Start from the truncated value.
15899 // - Zero extend to the desired bit width.
15900 // - Shift left.
15901 assert(Origin && "No original load to compare against.")(static_cast <bool> (Origin && "No original load to compare against."
) ? void (0) : __assert_fail ("Origin && \"No original load to compare against.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15901, __extension__ __PRETTY_FUNCTION__))
;
15902 unsigned BitWidth = Origin->getValueSizeInBits(0);
15903 assert(Inst && "This slice is not bound to an instruction")(static_cast <bool> (Inst && "This slice is not bound to an instruction"
) ? void (0) : __assert_fail ("Inst && \"This slice is not bound to an instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15903, __extension__ __PRETTY_FUNCTION__))
;
15904 assert(Inst->getValueSizeInBits(0) <= BitWidth &&(static_cast <bool> (Inst->getValueSizeInBits(0) <=
BitWidth && "Extracted slice is bigger than the whole type!"
) ? void (0) : __assert_fail ("Inst->getValueSizeInBits(0) <= BitWidth && \"Extracted slice is bigger than the whole type!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15905, __extension__ __PRETTY_FUNCTION__))
15905 "Extracted slice is bigger than the whole type!")(static_cast <bool> (Inst->getValueSizeInBits(0) <=
BitWidth && "Extracted slice is bigger than the whole type!"
) ? void (0) : __assert_fail ("Inst->getValueSizeInBits(0) <= BitWidth && \"Extracted slice is bigger than the whole type!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15905, __extension__ __PRETTY_FUNCTION__))
;
15906 APInt UsedBits(Inst->getValueSizeInBits(0), 0);
15907 UsedBits.setAllBits();
15908 UsedBits = UsedBits.zext(BitWidth);
15909 UsedBits <<= Shift;
15910 return UsedBits;
15911 }
15912
15913 /// Get the size of the slice to be loaded in bytes.
15914 unsigned getLoadedSize() const {
15915 unsigned SliceSize = getUsedBits().countPopulation();
15916 assert(!(SliceSize & 0x7) && "Size is not a multiple of a byte.")(static_cast <bool> (!(SliceSize & 0x7) && "Size is not a multiple of a byte."
) ? void (0) : __assert_fail ("!(SliceSize & 0x7) && \"Size is not a multiple of a byte.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15916, __extension__ __PRETTY_FUNCTION__))
;
15917 return SliceSize / 8;
15918 }
15919
15920 /// Get the type that will be loaded for this slice.
15921 /// Note: This may not be the final type for the slice.
15922 EVT getLoadedType() const {
15923 assert(DAG && "Missing context")(static_cast <bool> (DAG && "Missing context") ?
void (0) : __assert_fail ("DAG && \"Missing context\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15923, __extension__ __PRETTY_FUNCTION__))
;
15924 LLVMContext &Ctxt = *DAG->getContext();
15925 return EVT::getIntegerVT(Ctxt, getLoadedSize() * 8);
15926 }
15927
15928 /// Get the alignment of the load used for this slice.
15929 Align getAlign() const {
15930 Align Alignment = Origin->getAlign();
15931 uint64_t Offset = getOffsetFromBase();
15932 if (Offset != 0)
15933 Alignment = commonAlignment(Alignment, Alignment.value() + Offset);
15934 return Alignment;
15935 }
15936
15937 /// Check if this slice can be rewritten with legal operations.
15938 bool isLegal() const {
15939 // An invalid slice is not legal.
15940 if (!Origin || !Inst || !DAG)
15941 return false;
15942
15943 // Offsets are for indexed load only, we do not handle that.
15944 if (!Origin->getOffset().isUndef())
15945 return false;
15946
15947 const TargetLowering &TLI = DAG->getTargetLoweringInfo();
15948
15949 // Check that the type is legal.
15950 EVT SliceType = getLoadedType();
15951 if (!TLI.isTypeLegal(SliceType))
15952 return false;
15953
15954 // Check that the load is legal for this type.
15955 if (!TLI.isOperationLegal(ISD::LOAD, SliceType))
15956 return false;
15957
15958 // Check that the offset can be computed.
15959 // 1. Check its type.
15960 EVT PtrType = Origin->getBasePtr().getValueType();
15961 if (PtrType == MVT::Untyped || PtrType.isExtended())
15962 return false;
15963
15964 // 2. Check that it fits in the immediate.
15965 if (!TLI.isLegalAddImmediate(getOffsetFromBase()))
15966 return false;
15967
15968 // 3. Check that the computation is legal.
15969 if (!TLI.isOperationLegal(ISD::ADD, PtrType))
15970 return false;
15971
15972 // Check that the zext is legal if it needs one.
15973 EVT TruncateType = Inst->getValueType(0);
15974 if (TruncateType != SliceType &&
15975 !TLI.isOperationLegal(ISD::ZERO_EXTEND, TruncateType))
15976 return false;
15977
15978 return true;
15979 }
15980
15981 /// Get the offset in bytes of this slice in the original chunk of
15982 /// bits.
15983 /// \pre DAG != nullptr.
15984 uint64_t getOffsetFromBase() const {
15985 assert(DAG && "Missing context.")(static_cast <bool> (DAG && "Missing context.")
? void (0) : __assert_fail ("DAG && \"Missing context.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15985, __extension__ __PRETTY_FUNCTION__))
;
15986 bool IsBigEndian = DAG->getDataLayout().isBigEndian();
15987 assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported.")(static_cast <bool> (!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported."
) ? void (0) : __assert_fail ("!(Shift & 0x7) && \"Shifts not aligned on Bytes are not supported.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15987, __extension__ __PRETTY_FUNCTION__))
;
15988 uint64_t Offset = Shift / 8;
15989 unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8;
15990 assert(!(Origin->getValueSizeInBits(0) & 0x7) &&(static_cast <bool> (!(Origin->getValueSizeInBits(0)
& 0x7) && "The size of the original loaded type is not a multiple of a"
" byte.") ? void (0) : __assert_fail ("!(Origin->getValueSizeInBits(0) & 0x7) && \"The size of the original loaded type is not a multiple of a\" \" byte.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15992, __extension__ __PRETTY_FUNCTION__))
15991 "The size of the original loaded type is not a multiple of a"(static_cast <bool> (!(Origin->getValueSizeInBits(0)
& 0x7) && "The size of the original loaded type is not a multiple of a"
" byte.") ? void (0) : __assert_fail ("!(Origin->getValueSizeInBits(0) & 0x7) && \"The size of the original loaded type is not a multiple of a\" \" byte.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15992, __extension__ __PRETTY_FUNCTION__))
15992 " byte.")(static_cast <bool> (!(Origin->getValueSizeInBits(0)
& 0x7) && "The size of the original loaded type is not a multiple of a"
" byte.") ? void (0) : __assert_fail ("!(Origin->getValueSizeInBits(0) & 0x7) && \"The size of the original loaded type is not a multiple of a\" \" byte.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15992, __extension__ __PRETTY_FUNCTION__))
;
15993 // If Offset is bigger than TySizeInBytes, it means we are loading all
15994 // zeros. This should have been optimized before in the process.
15995 assert(TySizeInBytes > Offset &&(static_cast <bool> (TySizeInBytes > Offset &&
"Invalid shift amount for given loaded size") ? void (0) : __assert_fail
("TySizeInBytes > Offset && \"Invalid shift amount for given loaded size\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15996, __extension__ __PRETTY_FUNCTION__))
15996 "Invalid shift amount for given loaded size")(static_cast <bool> (TySizeInBytes > Offset &&
"Invalid shift amount for given loaded size") ? void (0) : __assert_fail
("TySizeInBytes > Offset && \"Invalid shift amount for given loaded size\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 15996, __extension__ __PRETTY_FUNCTION__))
;
15997 if (IsBigEndian)
15998 Offset = TySizeInBytes - Offset - getLoadedSize();
15999 return Offset;
16000 }
16001
16002 /// Generate the sequence of instructions to load the slice
16003 /// represented by this object and redirect the uses of this slice to
16004 /// this new sequence of instructions.
16005 /// \pre this->Inst && this->Origin are valid Instructions and this
16006 /// object passed the legal check: LoadedSlice::isLegal returned true.
16007 /// \return The last instruction of the sequence used to load the slice.
16008 SDValue loadSlice() const {
16009 assert(Inst && Origin && "Unable to replace a non-existing slice.")(static_cast <bool> (Inst && Origin && "Unable to replace a non-existing slice."
) ? void (0) : __assert_fail ("Inst && Origin && \"Unable to replace a non-existing slice.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16009, __extension__ __PRETTY_FUNCTION__))
;
16010 const SDValue &OldBaseAddr = Origin->getBasePtr();
16011 SDValue BaseAddr = OldBaseAddr;
16012 // Get the offset in that chunk of bytes w.r.t. the endianness.
16013 int64_t Offset = static_cast<int64_t>(getOffsetFromBase());
16014 assert(Offset >= 0 && "Offset too big to fit in int64_t!")(static_cast <bool> (Offset >= 0 && "Offset too big to fit in int64_t!"
) ? void (0) : __assert_fail ("Offset >= 0 && \"Offset too big to fit in int64_t!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16014, __extension__ __PRETTY_FUNCTION__))
;
16015 if (Offset) {
16016 // BaseAddr = BaseAddr + Offset.
16017 EVT ArithType = BaseAddr.getValueType();
16018 SDLoc DL(Origin);
16019 BaseAddr = DAG->getNode(ISD::ADD, DL, ArithType, BaseAddr,
16020 DAG->getConstant(Offset, DL, ArithType));
16021 }
16022
16023 // Create the type of the loaded slice according to its size.
16024 EVT SliceType = getLoadedType();
16025
16026 // Create the load for the slice.
16027 SDValue LastInst =
16028 DAG->getLoad(SliceType, SDLoc(Origin), Origin->getChain(), BaseAddr,
16029 Origin->getPointerInfo().getWithOffset(Offset), getAlign(),
16030 Origin->getMemOperand()->getFlags());
16031 // If the final type is not the same as the loaded type, this means that
16032 // we have to pad with zero. Create a zero extend for that.
16033 EVT FinalType = Inst->getValueType(0);
16034 if (SliceType != FinalType)
16035 LastInst =
16036 DAG->getNode(ISD::ZERO_EXTEND, SDLoc(LastInst), FinalType, LastInst);
16037 return LastInst;
16038 }
16039
16040 /// Check if this slice can be merged with an expensive cross register
16041 /// bank copy. E.g.,
16042 /// i = load i32
16043 /// f = bitcast i32 i to float
16044 bool canMergeExpensiveCrossRegisterBankCopy() const {
16045 if (!Inst || !Inst->hasOneUse())
16046 return false;
16047 SDNode *Use = *Inst->use_begin();
16048 if (Use->getOpcode() != ISD::BITCAST)
16049 return false;
16050 assert(DAG && "Missing context")(static_cast <bool> (DAG && "Missing context") ?
void (0) : __assert_fail ("DAG && \"Missing context\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16050, __extension__ __PRETTY_FUNCTION__))
;
16051 const TargetLowering &TLI = DAG->getTargetLoweringInfo();
16052 EVT ResVT = Use->getValueType(0);
16053 const TargetRegisterClass *ResRC =
16054 TLI.getRegClassFor(ResVT.getSimpleVT(), Use->isDivergent());
16055 const TargetRegisterClass *ArgRC =
16056 TLI.getRegClassFor(Use->getOperand(0).getValueType().getSimpleVT(),
16057 Use->getOperand(0)->isDivergent());
16058 if (ArgRC == ResRC || !TLI.isOperationLegal(ISD::LOAD, ResVT))
16059 return false;
16060
16061 // At this point, we know that we perform a cross-register-bank copy.
16062 // Check if it is expensive.
16063 const TargetRegisterInfo *TRI = DAG->getSubtarget().getRegisterInfo();
16064 // Assume bitcasts are cheap, unless both register classes do not
16065 // explicitly share a common sub class.
16066 if (!TRI || TRI->getCommonSubClass(ArgRC, ResRC))
16067 return false;
16068
16069 // Check if it will be merged with the load.
16070 // 1. Check the alignment constraint.
16071 Align RequiredAlignment = DAG->getDataLayout().getABITypeAlign(
16072 ResVT.getTypeForEVT(*DAG->getContext()));
16073
16074 if (RequiredAlignment > getAlign())
16075 return false;
16076
16077 // 2. Check that the load is a legal operation for that type.
16078 if (!TLI.isOperationLegal(ISD::LOAD, ResVT))
16079 return false;
16080
16081 // 3. Check that we do not have a zext in the way.
16082 if (Inst->getValueType(0) != getLoadedType())
16083 return false;
16084
16085 return true;
16086 }
16087};
16088
16089} // end anonymous namespace
16090
16091/// Check that all bits set in \p UsedBits form a dense region, i.e.,
16092/// \p UsedBits looks like 0..0 1..1 0..0.
16093static bool areUsedBitsDense(const APInt &UsedBits) {
16094 // If all the bits are one, this is dense!
16095 if (UsedBits.isAllOnesValue())
16096 return true;
16097
16098 // Get rid of the unused bits on the right.
16099 APInt NarrowedUsedBits = UsedBits.lshr(UsedBits.countTrailingZeros());
16100 // Get rid of the unused bits on the left.
16101 if (NarrowedUsedBits.countLeadingZeros())
16102 NarrowedUsedBits = NarrowedUsedBits.trunc(NarrowedUsedBits.getActiveBits());
16103 // Check that the chunk of bits is completely used.
16104 return NarrowedUsedBits.isAllOnesValue();
16105}
16106
16107/// Check whether or not \p First and \p Second are next to each other
16108/// in memory. This means that there is no hole between the bits loaded
16109/// by \p First and the bits loaded by \p Second.
16110static bool areSlicesNextToEachOther(const LoadedSlice &First,
16111 const LoadedSlice &Second) {
16112 assert(First.Origin == Second.Origin && First.Origin &&(static_cast <bool> (First.Origin == Second.Origin &&
First.Origin && "Unable to match different memory origins."
) ? void (0) : __assert_fail ("First.Origin == Second.Origin && First.Origin && \"Unable to match different memory origins.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16113, __extension__ __PRETTY_FUNCTION__))
16113 "Unable to match different memory origins.")(static_cast <bool> (First.Origin == Second.Origin &&
First.Origin && "Unable to match different memory origins."
) ? void (0) : __assert_fail ("First.Origin == Second.Origin && First.Origin && \"Unable to match different memory origins.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16113, __extension__ __PRETTY_FUNCTION__))
;
16114 APInt UsedBits = First.getUsedBits();
16115 assert((UsedBits & Second.getUsedBits()) == 0 &&(static_cast <bool> ((UsedBits & Second.getUsedBits
()) == 0 && "Slices are not supposed to overlap.") ? void
(0) : __assert_fail ("(UsedBits & Second.getUsedBits()) == 0 && \"Slices are not supposed to overlap.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16116, __extension__ __PRETTY_FUNCTION__))
16116 "Slices are not supposed to overlap.")(static_cast <bool> ((UsedBits & Second.getUsedBits
()) == 0 && "Slices are not supposed to overlap.") ? void
(0) : __assert_fail ("(UsedBits & Second.getUsedBits()) == 0 && \"Slices are not supposed to overlap.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16116, __extension__ __PRETTY_FUNCTION__))
;
16117 UsedBits |= Second.getUsedBits();
16118 return areUsedBitsDense(UsedBits);
16119}
16120
16121/// Adjust the \p GlobalLSCost according to the target
16122/// paring capabilities and the layout of the slices.
16123/// \pre \p GlobalLSCost should account for at least as many loads as
16124/// there is in the slices in \p LoadedSlices.
16125static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
16126 LoadedSlice::Cost &GlobalLSCost) {
16127 unsigned NumberOfSlices = LoadedSlices.size();
16128 // If there is less than 2 elements, no pairing is possible.
16129 if (NumberOfSlices < 2)
16130 return;
16131
16132 // Sort the slices so that elements that are likely to be next to each
16133 // other in memory are next to each other in the list.
16134 llvm::sort(LoadedSlices, [](const LoadedSlice &LHS, const LoadedSlice &RHS) {
16135 assert(LHS.Origin == RHS.Origin && "Different bases not implemented.")(static_cast <bool> (LHS.Origin == RHS.Origin &&
"Different bases not implemented.") ? void (0) : __assert_fail
("LHS.Origin == RHS.Origin && \"Different bases not implemented.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16135, __extension__ __PRETTY_FUNCTION__))
;
16136 return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
16137 });
16138 const TargetLowering &TLI = LoadedSlices[0].DAG->getTargetLoweringInfo();
16139 // First (resp. Second) is the first (resp. Second) potentially candidate
16140 // to be placed in a paired load.
16141 const LoadedSlice *First = nullptr;
16142 const LoadedSlice *Second = nullptr;
16143 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice,
16144 // Set the beginning of the pair.
16145 First = Second) {
16146 Second = &LoadedSlices[CurrSlice];
16147
16148 // If First is NULL, it means we start a new pair.
16149 // Get to the next slice.
16150 if (!First)
16151 continue;
16152
16153 EVT LoadedType = First->getLoadedType();
16154
16155 // If the types of the slices are different, we cannot pair them.
16156 if (LoadedType != Second->getLoadedType())
16157 continue;
16158
16159 // Check if the target supplies paired loads for this type.
16160 Align RequiredAlignment;
16161 if (!TLI.hasPairedLoad(LoadedType, RequiredAlignment)) {
16162 // move to the next pair, this type is hopeless.
16163 Second = nullptr;
16164 continue;
16165 }
16166 // Check if we meet the alignment requirement.
16167 if (First->getAlign() < RequiredAlignment)
16168 continue;
16169
16170 // Check that both loads are next to each other in memory.
16171 if (!areSlicesNextToEachOther(*First, *Second))
16172 continue;
16173
16174 assert(GlobalLSCost.Loads > 0 && "We save more loads than we created!")(static_cast <bool> (GlobalLSCost.Loads > 0 &&
"We save more loads than we created!") ? void (0) : __assert_fail
("GlobalLSCost.Loads > 0 && \"We save more loads than we created!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16174, __extension__ __PRETTY_FUNCTION__))
;
16175 --GlobalLSCost.Loads;
16176 // Move to the next pair.
16177 Second = nullptr;
16178 }
16179}
16180
16181/// Check the profitability of all involved LoadedSlice.
16182/// Currently, it is considered profitable if there is exactly two
16183/// involved slices (1) which are (2) next to each other in memory, and
16184/// whose cost (\see LoadedSlice::Cost) is smaller than the original load (3).
16185///
16186/// Note: The order of the elements in \p LoadedSlices may be modified, but not
16187/// the elements themselves.
16188///
16189/// FIXME: When the cost model will be mature enough, we can relax
16190/// constraints (1) and (2).
16191static bool isSlicingProfitable(SmallVectorImpl<LoadedSlice> &LoadedSlices,
16192 const APInt &UsedBits, bool ForCodeSize) {
16193 unsigned NumberOfSlices = LoadedSlices.size();
16194 if (StressLoadSlicing)
16195 return NumberOfSlices > 1;
16196
16197 // Check (1).
16198 if (NumberOfSlices != 2)
16199 return false;
16200
16201 // Check (2).
16202 if (!areUsedBitsDense(UsedBits))
16203 return false;
16204
16205 // Check (3).
16206 LoadedSlice::Cost OrigCost(ForCodeSize), GlobalSlicingCost(ForCodeSize);
16207 // The original code has one big load.
16208 OrigCost.Loads = 1;
16209 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice) {
16210 const LoadedSlice &LS = LoadedSlices[CurrSlice];
16211 // Accumulate the cost of all the slices.
16212 LoadedSlice::Cost SliceCost(LS, ForCodeSize);
16213 GlobalSlicingCost += SliceCost;
16214
16215 // Account as cost in the original configuration the gain obtained
16216 // with the current slices.
16217 OrigCost.addSliceGain(LS);
16218 }
16219
16220 // If the target supports paired load, adjust the cost accordingly.
16221 adjustCostForPairing(LoadedSlices, GlobalSlicingCost);
16222 return OrigCost > GlobalSlicingCost;
16223}
16224
16225/// If the given load, \p LI, is used only by trunc or trunc(lshr)
16226/// operations, split it in the various pieces being extracted.
16227///
16228/// This sort of thing is introduced by SROA.
16229/// This slicing takes care not to insert overlapping loads.
16230/// \pre LI is a simple load (i.e., not an atomic or volatile load).
16231bool DAGCombiner::SliceUpLoad(SDNode *N) {
16232 if (Level < AfterLegalizeDAG)
16233 return false;
16234
16235 LoadSDNode *LD = cast<LoadSDNode>(N);
16236 if (!LD->isSimple() || !ISD::isNormalLoad(LD) ||
16237 !LD->getValueType(0).isInteger())
16238 return false;
16239
16240 // The algorithm to split up a load of a scalable vector into individual
16241 // elements currently requires knowing the length of the loaded type,
16242 // so will need adjusting to work on scalable vectors.
16243 if (LD->getValueType(0).isScalableVector())
16244 return false;
16245
16246 // Keep track of already used bits to detect overlapping values.
16247 // In that case, we will just abort the transformation.
16248 APInt UsedBits(LD->getValueSizeInBits(0), 0);
16249
16250 SmallVector<LoadedSlice, 4> LoadedSlices;
16251
16252 // Check if this load is used as several smaller chunks of bits.
16253 // Basically, look for uses in trunc or trunc(lshr) and record a new chain
16254 // of computation for each trunc.
16255 for (SDNode::use_iterator UI = LD->use_begin(), UIEnd = LD->use_end();
16256 UI != UIEnd; ++UI) {
16257 // Skip the uses of the chain.
16258 if (UI.getUse().getResNo() != 0)
16259 continue;
16260
16261 SDNode *User = *UI;
16262 unsigned Shift = 0;
16263
16264 // Check if this is a trunc(lshr).
16265 if (User->getOpcode() == ISD::SRL && User->hasOneUse() &&
16266 isa<ConstantSDNode>(User->getOperand(1))) {
16267 Shift = User->getConstantOperandVal(1);
16268 User = *User->use_begin();
16269 }
16270
16271 // At this point, User is a Truncate, iff we encountered, trunc or
16272 // trunc(lshr).
16273 if (User->getOpcode() != ISD::TRUNCATE)
16274 return false;
16275
16276 // The width of the type must be a power of 2 and greater than 8-bits.
16277 // Otherwise the load cannot be represented in LLVM IR.
16278 // Moreover, if we shifted with a non-8-bits multiple, the slice
16279 // will be across several bytes. We do not support that.
16280 unsigned Width = User->getValueSizeInBits(0);
16281 if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7))
16282 return false;
16283
16284 // Build the slice for this chain of computations.
16285 LoadedSlice LS(User, LD, Shift, &DAG);
16286 APInt CurrentUsedBits = LS.getUsedBits();
16287
16288 // Check if this slice overlaps with another.
16289 if ((CurrentUsedBits & UsedBits) != 0)
16290 return false;
16291 // Update the bits used globally.
16292 UsedBits |= CurrentUsedBits;
16293
16294 // Check if the new slice would be legal.
16295 if (!LS.isLegal())
16296 return false;
16297
16298 // Record the slice.
16299 LoadedSlices.push_back(LS);
16300 }
16301
16302 // Abort slicing if it does not seem to be profitable.
16303 if (!isSlicingProfitable(LoadedSlices, UsedBits, ForCodeSize))
16304 return false;
16305
16306 ++SlicedLoads;
16307
16308 // Rewrite each chain to use an independent load.
16309 // By construction, each chain can be represented by a unique load.
16310
16311 // Prepare the argument for the new token factor for all the slices.
16312 SmallVector<SDValue, 8> ArgChains;
16313 for (const LoadedSlice &LS : LoadedSlices) {
16314 SDValue SliceInst = LS.loadSlice();
16315 CombineTo(LS.Inst, SliceInst, true);
16316 if (SliceInst.getOpcode() != ISD::LOAD)
16317 SliceInst = SliceInst.getOperand(0);
16318 assert(SliceInst->getOpcode() == ISD::LOAD &&(static_cast <bool> (SliceInst->getOpcode() == ISD::
LOAD && "It takes more than a zext to get to the loaded slice!!"
) ? void (0) : __assert_fail ("SliceInst->getOpcode() == ISD::LOAD && \"It takes more than a zext to get to the loaded slice!!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16319, __extension__ __PRETTY_FUNCTION__))
16319 "It takes more than a zext to get to the loaded slice!!")(static_cast <bool> (SliceInst->getOpcode() == ISD::
LOAD && "It takes more than a zext to get to the loaded slice!!"
) ? void (0) : __assert_fail ("SliceInst->getOpcode() == ISD::LOAD && \"It takes more than a zext to get to the loaded slice!!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16319, __extension__ __PRETTY_FUNCTION__))
;
16320 ArgChains.push_back(SliceInst.getValue(1));
16321 }
16322
16323 SDValue Chain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other,
16324 ArgChains);
16325 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain);
16326 AddToWorklist(Chain.getNode());
16327 return true;
16328}
16329
16330/// Check to see if V is (and load (ptr), imm), where the load is having
16331/// specific bytes cleared out. If so, return the byte size being masked out
16332/// and the shift amount.
16333static std::pair<unsigned, unsigned>
16334CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
16335 std::pair<unsigned, unsigned> Result(0, 0);
16336
16337 // Check for the structure we're looking for.
16338 if (V->getOpcode() != ISD::AND ||
16339 !isa<ConstantSDNode>(V->getOperand(1)) ||
16340 !ISD::isNormalLoad(V->getOperand(0).getNode()))
16341 return Result;
16342
16343 // Check the chain and pointer.
16344 LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0));
16345 if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer.
16346
16347 // This only handles simple types.
16348 if (V.getValueType() != MVT::i16 &&
16349 V.getValueType() != MVT::i32 &&
16350 V.getValueType() != MVT::i64)
16351 return Result;
16352
16353 // Check the constant mask. Invert it so that the bits being masked out are
16354 // 0 and the bits being kept are 1. Use getSExtValue so that leading bits
16355 // follow the sign bit for uniformity.
16356 uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue();
16357 unsigned NotMaskLZ = countLeadingZeros(NotMask);
16358 if (NotMaskLZ & 7) return Result; // Must be multiple of a byte.
16359 unsigned NotMaskTZ = countTrailingZeros(NotMask);
16360 if (NotMaskTZ & 7) return Result; // Must be multiple of a byte.
16361 if (NotMaskLZ == 64) return Result; // All zero mask.
16362
16363 // See if we have a continuous run of bits. If so, we have 0*1+0*
16364 if (countTrailingOnes(NotMask >> NotMaskTZ) + NotMaskTZ + NotMaskLZ != 64)
16365 return Result;
16366
16367 // Adjust NotMaskLZ down to be from the actual size of the int instead of i64.
16368 if (V.getValueType() != MVT::i64 && NotMaskLZ)
16369 NotMaskLZ -= 64-V.getValueSizeInBits();
16370
16371 unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8;
16372 switch (MaskedBytes) {
16373 case 1:
16374 case 2:
16375 case 4: break;
16376 default: return Result; // All one mask, or 5-byte mask.
16377 }
16378
16379 // Verify that the first bit starts at a multiple of mask so that the access
16380 // is aligned the same as the access width.
16381 if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result;
16382
16383 // For narrowing to be valid, it must be the case that the load the
16384 // immediately preceding memory operation before the store.
16385 if (LD == Chain.getNode())
16386 ; // ok.
16387 else if (Chain->getOpcode() == ISD::TokenFactor &&
16388 SDValue(LD, 1).hasOneUse()) {
16389 // LD has only 1 chain use so they are no indirect dependencies.
16390 if (!LD->isOperandOf(Chain.getNode()))
16391 return Result;
16392 } else
16393 return Result; // Fail.
16394
16395 Result.first = MaskedBytes;
16396 Result.second = NotMaskTZ/8;
16397 return Result;
16398}
16399
16400/// Check to see if IVal is something that provides a value as specified by
16401/// MaskInfo. If so, replace the specified store with a narrower store of
16402/// truncated IVal.
16403static SDValue
16404ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
16405 SDValue IVal, StoreSDNode *St,
16406 DAGCombiner *DC) {
16407 unsigned NumBytes = MaskInfo.first;
16408 unsigned ByteShift = MaskInfo.second;
16409 SelectionDAG &DAG = DC->getDAG();
16410
16411 // Check to see if IVal is all zeros in the part being masked in by the 'or'
16412 // that uses this. If not, this is not a replacement.
16413 APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(),
16414 ByteShift*8, (ByteShift+NumBytes)*8);
16415 if (!DAG.MaskedValueIsZero(IVal, Mask)) return SDValue();
16416
16417 // Check that it is legal on the target to do this. It is legal if the new
16418 // VT we're shrinking to (i8/i16/i32) is legal or we're still before type
16419 // legalization (and the target doesn't explicitly think this is a bad idea).
16420 MVT VT = MVT::getIntegerVT(NumBytes * 8);
16421 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16422 if (!DC->isTypeLegal(VT))
16423 return SDValue();
16424 if (St->getMemOperand() &&
16425 !TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
16426 *St->getMemOperand()))
16427 return SDValue();
16428
16429 // Okay, we can do this! Replace the 'St' store with a store of IVal that is
16430 // shifted by ByteShift and truncated down to NumBytes.
16431 if (ByteShift) {
16432 SDLoc DL(IVal);
16433 IVal = DAG.getNode(ISD::SRL, DL, IVal.getValueType(), IVal,
16434 DAG.getConstant(ByteShift*8, DL,
16435 DC->getShiftAmountTy(IVal.getValueType())));
16436 }
16437
16438 // Figure out the offset for the store and the alignment of the access.
16439 unsigned StOffset;
16440 if (DAG.getDataLayout().isLittleEndian())
16441 StOffset = ByteShift;
16442 else
16443 StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
16444
16445 SDValue Ptr = St->getBasePtr();
16446 if (StOffset) {
16447 SDLoc DL(IVal);
16448 Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(StOffset), DL);
16449 }
16450
16451 // Truncate down to the new size.
16452 IVal = DAG.getNode(ISD::TRUNCATE, SDLoc(IVal), VT, IVal);
16453
16454 ++OpsNarrowed;
16455 return DAG
16456 .getStore(St->getChain(), SDLoc(St), IVal, Ptr,
16457 St->getPointerInfo().getWithOffset(StOffset),
16458 St->getOriginalAlign());
16459}
16460
16461/// Look for sequence of load / op / store where op is one of 'or', 'xor', and
16462/// 'and' of immediates. If 'op' is only touching some of the loaded bits, try
16463/// narrowing the load and store if it would end up being a win for performance
16464/// or code size.
16465SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
16466 StoreSDNode *ST = cast<StoreSDNode>(N);
16467 if (!ST->isSimple())
16468 return SDValue();
16469
16470 SDValue Chain = ST->getChain();
16471 SDValue Value = ST->getValue();
16472 SDValue Ptr = ST->getBasePtr();
16473 EVT VT = Value.getValueType();
16474
16475 if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse())
16476 return SDValue();
16477
16478 unsigned Opc = Value.getOpcode();
16479
16480 // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst
16481 // is a byte mask indicating a consecutive number of bytes, check to see if
16482 // Y is known to provide just those bytes. If so, we try to replace the
16483 // load + replace + store sequence with a single (narrower) store, which makes
16484 // the load dead.
16485 if (Opc == ISD::OR && EnableShrinkLoadReplaceStoreWithStore) {
16486 std::pair<unsigned, unsigned> MaskedLoad;
16487 MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain);
16488 if (MaskedLoad.first)
16489 if (SDValue NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
16490 Value.getOperand(1), ST,this))
16491 return NewST;
16492
16493 // Or is commutative, so try swapping X and Y.
16494 MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain);
16495 if (MaskedLoad.first)
16496 if (SDValue NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
16497 Value.getOperand(0), ST,this))
16498 return NewST;
16499 }
16500
16501 if (!EnableReduceLoadOpStoreWidth)
16502 return SDValue();
16503
16504 if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
16505 Value.getOperand(1).getOpcode() != ISD::Constant)
16506 return SDValue();
16507
16508 SDValue N0 = Value.getOperand(0);
16509 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
16510 Chain == SDValue(N0.getNode(), 1)) {
16511 LoadSDNode *LD = cast<LoadSDNode>(N0);
16512 if (LD->getBasePtr() != Ptr ||
16513 LD->getPointerInfo().getAddrSpace() !=
16514 ST->getPointerInfo().getAddrSpace())
16515 return SDValue();
16516
16517 // Find the type to narrow it the load / op / store to.
16518 SDValue N1 = Value.getOperand(1);
16519 unsigned BitWidth = N1.getValueSizeInBits();
16520 APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue();
16521 if (Opc == ISD::AND)
16522 Imm ^= APInt::getAllOnesValue(BitWidth);
16523 if (Imm == 0 || Imm.isAllOnesValue())
16524 return SDValue();
16525 unsigned ShAmt = Imm.countTrailingZeros();
16526 unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1;
16527 unsigned NewBW = NextPowerOf2(MSB - ShAmt);
16528 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
16529 // The narrowing should be profitable, the load/store operation should be
16530 // legal (or custom) and the store size should be equal to the NewVT width.
16531 while (NewBW < BitWidth &&
16532 (NewVT.getStoreSizeInBits() != NewBW ||
16533 !TLI.isOperationLegalOrCustom(Opc, NewVT) ||
16534 !TLI.isNarrowingProfitable(VT, NewVT))) {
16535 NewBW = NextPowerOf2(NewBW);
16536 NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
16537 }
16538 if (NewBW >= BitWidth)
16539 return SDValue();
16540
16541 // If the lsb changed does not start at the type bitwidth boundary,
16542 // start at the previous one.
16543 if (ShAmt % NewBW)
16544 ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW;
16545 APInt Mask = APInt::getBitsSet(BitWidth, ShAmt,
16546 std::min(BitWidth, ShAmt + NewBW));
16547 if ((Imm & Mask) == Imm) {
16548 APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW);
16549 if (Opc == ISD::AND)
16550 NewImm ^= APInt::getAllOnesValue(NewBW);
16551 uint64_t PtrOff = ShAmt / 8;
16552 // For big endian targets, we need to adjust the offset to the pointer to
16553 // load the correct bytes.
16554 if (DAG.getDataLayout().isBigEndian())
16555 PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
16556
16557 Align NewAlign = commonAlignment(LD->getAlign(), PtrOff);
16558 Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
16559 if (NewAlign < DAG.getDataLayout().getABITypeAlign(NewVTTy))
16560 return SDValue();
16561
16562 SDValue NewPtr =
16563 DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(PtrOff), SDLoc(LD));
16564 SDValue NewLD =
16565 DAG.getLoad(NewVT, SDLoc(N0), LD->getChain(), NewPtr,
16566 LD->getPointerInfo().getWithOffset(PtrOff), NewAlign,
16567 LD->getMemOperand()->getFlags(), LD->getAAInfo());
16568 SDValue NewVal = DAG.getNode(Opc, SDLoc(Value), NewVT, NewLD,
16569 DAG.getConstant(NewImm, SDLoc(Value),
16570 NewVT));
16571 SDValue NewST =
16572 DAG.getStore(Chain, SDLoc(N), NewVal, NewPtr,
16573 ST->getPointerInfo().getWithOffset(PtrOff), NewAlign);
16574
16575 AddToWorklist(NewPtr.getNode());
16576 AddToWorklist(NewLD.getNode());
16577 AddToWorklist(NewVal.getNode());
16578 WorklistRemover DeadNodes(*this);
16579 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1));
16580 ++OpsNarrowed;
16581 return NewST;
16582 }
16583 }
16584
16585 return SDValue();
16586}
16587
16588/// For a given floating point load / store pair, if the load value isn't used
16589/// by any other operations, then consider transforming the pair to integer
16590/// load / store operations if the target deems the transformation profitable.
16591SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
16592 StoreSDNode *ST = cast<StoreSDNode>(N);
16593 SDValue Value = ST->getValue();
16594 if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) &&
16595 Value.hasOneUse()) {
16596 LoadSDNode *LD = cast<LoadSDNode>(Value);
16597 EVT VT = LD->getMemoryVT();
16598 if (!VT.isFloatingPoint() ||
16599 VT != ST->getMemoryVT() ||
16600 LD->isNonTemporal() ||
16601 ST->isNonTemporal() ||
16602 LD->getPointerInfo().getAddrSpace() != 0 ||
16603 ST->getPointerInfo().getAddrSpace() != 0)
16604 return SDValue();
16605
16606 TypeSize VTSize = VT.getSizeInBits();
16607
16608 // We don't know the size of scalable types at compile time so we cannot
16609 // create an integer of the equivalent size.
16610 if (VTSize.isScalable())
16611 return SDValue();
16612
16613 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VTSize.getFixedSize());
16614 if (!TLI.isOperationLegal(ISD::LOAD, IntVT) ||
16615 !TLI.isOperationLegal(ISD::STORE, IntVT) ||
16616 !TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) ||
16617 !TLI.isDesirableToTransformToIntegerOp(ISD::STORE, VT))
16618 return SDValue();
16619
16620 Align LDAlign = LD->getAlign();
16621 Align STAlign = ST->getAlign();
16622 Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
16623 Align ABIAlign = DAG.getDataLayout().getABITypeAlign(IntVTTy);
16624 if (LDAlign < ABIAlign || STAlign < ABIAlign)
16625 return SDValue();
16626
16627 SDValue NewLD =
16628 DAG.getLoad(IntVT, SDLoc(Value), LD->getChain(), LD->getBasePtr(),
16629 LD->getPointerInfo(), LDAlign);
16630
16631 SDValue NewST =
16632 DAG.getStore(ST->getChain(), SDLoc(N), NewLD, ST->getBasePtr(),
16633 ST->getPointerInfo(), STAlign);
16634
16635 AddToWorklist(NewLD.getNode());
16636 AddToWorklist(NewST.getNode());
16637 WorklistRemover DeadNodes(*this);
16638 DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1));
16639 ++LdStFP2Int;
16640 return NewST;
16641 }
16642
16643 return SDValue();
16644}
16645
16646// This is a helper function for visitMUL to check the profitability
16647// of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
16648// MulNode is the original multiply, AddNode is (add x, c1),
16649// and ConstNode is c2.
16650//
16651// If the (add x, c1) has multiple uses, we could increase
16652// the number of adds if we make this transformation.
16653// It would only be worth doing this if we can remove a
16654// multiply in the process. Check for that here.
16655// To illustrate:
16656// (A + c1) * c3
16657// (A + c2) * c3
16658// We're checking for cases where we have common "c3 * A" expressions.
16659bool DAGCombiner::isMulAddWithConstProfitable(SDNode *MulNode,
16660 SDValue &AddNode,
16661 SDValue &ConstNode) {
16662 APInt Val;
16663
16664 // If the add only has one use, this would be OK to do.
16665 if (AddNode.getNode()->hasOneUse())
16666 return true;
16667
16668 // Walk all the users of the constant with which we're multiplying.
16669 for (SDNode *Use : ConstNode->uses()) {
16670 if (Use == MulNode) // This use is the one we're on right now. Skip it.
16671 continue;
16672
16673 if (Use->getOpcode() == ISD::MUL) { // We have another multiply use.
16674 SDNode *OtherOp;
16675 SDNode *MulVar = AddNode.getOperand(0).getNode();
16676
16677 // OtherOp is what we're multiplying against the constant.
16678 if (Use->getOperand(0) == ConstNode)
16679 OtherOp = Use->getOperand(1).getNode();
16680 else
16681 OtherOp = Use->getOperand(0).getNode();
16682
16683 // Check to see if multiply is with the same operand of our "add".
16684 //
16685 // ConstNode = CONST
16686 // Use = ConstNode * A <-- visiting Use. OtherOp is A.
16687 // ...
16688 // AddNode = (A + c1) <-- MulVar is A.
16689 // = AddNode * ConstNode <-- current visiting instruction.
16690 //
16691 // If we make this transformation, we will have a common
16692 // multiply (ConstNode * A) that we can save.
16693 if (OtherOp == MulVar)
16694 return true;
16695
16696 // Now check to see if a future expansion will give us a common
16697 // multiply.
16698 //
16699 // ConstNode = CONST
16700 // AddNode = (A + c1)
16701 // ... = AddNode * ConstNode <-- current visiting instruction.
16702 // ...
16703 // OtherOp = (A + c2)
16704 // Use = OtherOp * ConstNode <-- visiting Use.
16705 //
16706 // If we make this transformation, we will have a common
16707 // multiply (CONST * A) after we also do the same transformation
16708 // to the "t2" instruction.
16709 if (OtherOp->getOpcode() == ISD::ADD &&
16710 DAG.isConstantIntBuildVectorOrConstantInt(OtherOp->getOperand(1)) &&
16711 OtherOp->getOperand(0).getNode() == MulVar)
16712 return true;
16713 }
16714 }
16715
16716 // Didn't find a case where this would be profitable.
16717 return false;
16718}
16719
16720SDValue DAGCombiner::getMergeStoreChains(SmallVectorImpl<MemOpLink> &StoreNodes,
16721 unsigned NumStores) {
16722 SmallVector<SDValue, 8> Chains;
16723 SmallPtrSet<const SDNode *, 8> Visited;
16724 SDLoc StoreDL(StoreNodes[0].MemNode);
16725
16726 for (unsigned i = 0; i < NumStores; ++i) {
16727 Visited.insert(StoreNodes[i].MemNode);
16728 }
16729
16730 // don't include nodes that are children or repeated nodes.
16731 for (unsigned i = 0; i < NumStores; ++i) {
16732 if (Visited.insert(StoreNodes[i].MemNode->getChain().getNode()).second)
16733 Chains.push_back(StoreNodes[i].MemNode->getChain());
16734 }
16735
16736 assert(Chains.size() > 0 && "Chain should have generated a chain")(static_cast <bool> (Chains.size() > 0 && "Chain should have generated a chain"
) ? void (0) : __assert_fail ("Chains.size() > 0 && \"Chain should have generated a chain\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16736, __extension__ __PRETTY_FUNCTION__))
;
16737 return DAG.getTokenFactor(StoreDL, Chains);
16738}
16739
16740bool DAGCombiner::mergeStoresOfConstantsOrVecElts(
16741 SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT, unsigned NumStores,
16742 bool IsConstantSrc, bool UseVector, bool UseTrunc) {
16743 // Make sure we have something to merge.
16744 if (NumStores < 2)
16745 return false;
16746
16747 // The latest Node in the DAG.
16748 SDLoc DL(StoreNodes[0].MemNode);
16749
16750 TypeSize ElementSizeBits = MemVT.getStoreSizeInBits();
16751 unsigned SizeInBits = NumStores * ElementSizeBits;
16752 unsigned NumMemElts = MemVT.isVector() ? MemVT.getVectorNumElements() : 1;
16753
16754 EVT StoreTy;
16755 if (UseVector) {
16756 unsigned Elts = NumStores * NumMemElts;
16757 // Get the type for the merged vector store.
16758 StoreTy = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts);
16759 } else
16760 StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
16761
16762 SDValue StoredVal;
16763 if (UseVector) {
16764 if (IsConstantSrc) {
16765 SmallVector<SDValue, 8> BuildVector;
16766 for (unsigned I = 0; I != NumStores; ++I) {
16767 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[I].MemNode);
16768 SDValue Val = St->getValue();
16769 // If constant is of the wrong type, convert it now.
16770 if (MemVT != Val.getValueType()) {
16771 Val = peekThroughBitcasts(Val);
16772 // Deal with constants of wrong size.
16773 if (ElementSizeBits != Val.getValueSizeInBits()) {
16774 EVT IntMemVT =
16775 EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
16776 if (isa<ConstantFPSDNode>(Val)) {
16777 // Not clear how to truncate FP values.
16778 return false;
16779 } else if (auto *C = dyn_cast<ConstantSDNode>(Val))
16780 Val = DAG.getConstant(C->getAPIntValue()
16781 .zextOrTrunc(Val.getValueSizeInBits())
16782 .zextOrTrunc(ElementSizeBits),
16783 SDLoc(C), IntMemVT);
16784 }
16785 // Make sure correctly size type is the correct type.
16786 Val = DAG.getBitcast(MemVT, Val);
16787 }
16788 BuildVector.push_back(Val);
16789 }
16790 StoredVal = DAG.getNode(MemVT.isVector() ? ISD::CONCAT_VECTORS
16791 : ISD::BUILD_VECTOR,
16792 DL, StoreTy, BuildVector);
16793 } else {
16794 SmallVector<SDValue, 8> Ops;
16795 for (unsigned i = 0; i < NumStores; ++i) {
16796 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
16797 SDValue Val = peekThroughBitcasts(St->getValue());
16798 // All operands of BUILD_VECTOR / CONCAT_VECTOR must be of
16799 // type MemVT. If the underlying value is not the correct
16800 // type, but it is an extraction of an appropriate vector we
16801 // can recast Val to be of the correct type. This may require
16802 // converting between EXTRACT_VECTOR_ELT and
16803 // EXTRACT_SUBVECTOR.
16804 if ((MemVT != Val.getValueType()) &&
16805 (Val.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
16806 Val.getOpcode() == ISD::EXTRACT_SUBVECTOR)) {
16807 EVT MemVTScalarTy = MemVT.getScalarType();
16808 // We may need to add a bitcast here to get types to line up.
16809 if (MemVTScalarTy != Val.getValueType().getScalarType()) {
16810 Val = DAG.getBitcast(MemVT, Val);
16811 } else {
16812 unsigned OpC = MemVT.isVector() ? ISD::EXTRACT_SUBVECTOR
16813 : ISD::EXTRACT_VECTOR_ELT;
16814 SDValue Vec = Val.getOperand(0);
16815 SDValue Idx = Val.getOperand(1);
16816 Val = DAG.getNode(OpC, SDLoc(Val), MemVT, Vec, Idx);
16817 }
16818 }
16819 Ops.push_back(Val);
16820 }
16821
16822 // Build the extracted vector elements back into a vector.
16823 StoredVal = DAG.getNode(MemVT.isVector() ? ISD::CONCAT_VECTORS
16824 : ISD::BUILD_VECTOR,
16825 DL, StoreTy, Ops);
16826 }
16827 } else {
16828 // We should always use a vector store when merging extracted vector
16829 // elements, so this path implies a store of constants.
16830 assert(IsConstantSrc && "Merged vector elements should use vector store")(static_cast <bool> (IsConstantSrc && "Merged vector elements should use vector store"
) ? void (0) : __assert_fail ("IsConstantSrc && \"Merged vector elements should use vector store\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16830, __extension__ __PRETTY_FUNCTION__))
;
16831
16832 APInt StoreInt(SizeInBits, 0);
16833
16834 // Construct a single integer constant which is made of the smaller
16835 // constant inputs.
16836 bool IsLE = DAG.getDataLayout().isLittleEndian();
16837 for (unsigned i = 0; i < NumStores; ++i) {
16838 unsigned Idx = IsLE ? (NumStores - 1 - i) : i;
16839 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode);
16840
16841 SDValue Val = St->getValue();
16842 Val = peekThroughBitcasts(Val);
16843 StoreInt <<= ElementSizeBits;
16844 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) {
16845 StoreInt |= C->getAPIntValue()
16846 .zextOrTrunc(ElementSizeBits)
16847 .zextOrTrunc(SizeInBits);
16848 } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) {
16849 StoreInt |= C->getValueAPF()
16850 .bitcastToAPInt()
16851 .zextOrTrunc(ElementSizeBits)
16852 .zextOrTrunc(SizeInBits);
16853 // If fp truncation is necessary give up for now.
16854 if (MemVT.getSizeInBits() != ElementSizeBits)
16855 return false;
16856 } else {
16857 llvm_unreachable("Invalid constant element type")::llvm::llvm_unreachable_internal("Invalid constant element type"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16857)
;
16858 }
16859 }
16860
16861 // Create the new Load and Store operations.
16862 StoredVal = DAG.getConstant(StoreInt, DL, StoreTy);
16863 }
16864
16865 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
16866 SDValue NewChain = getMergeStoreChains(StoreNodes, NumStores);
16867
16868 // make sure we use trunc store if it's necessary to be legal.
16869 SDValue NewStore;
16870 if (!UseTrunc) {
16871 NewStore =
16872 DAG.getStore(NewChain, DL, StoredVal, FirstInChain->getBasePtr(),
16873 FirstInChain->getPointerInfo(), FirstInChain->getAlign());
16874 } else { // Must be realized as a trunc store
16875 EVT LegalizedStoredValTy =
16876 TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType());
16877 unsigned LegalizedStoreSize = LegalizedStoredValTy.getSizeInBits();
16878 ConstantSDNode *C = cast<ConstantSDNode>(StoredVal);
16879 SDValue ExtendedStoreVal =
16880 DAG.getConstant(C->getAPIntValue().zextOrTrunc(LegalizedStoreSize), DL,
16881 LegalizedStoredValTy);
16882 NewStore = DAG.getTruncStore(
16883 NewChain, DL, ExtendedStoreVal, FirstInChain->getBasePtr(),
16884 FirstInChain->getPointerInfo(), StoredVal.getValueType() /*TVT*/,
16885 FirstInChain->getAlign(), FirstInChain->getMemOperand()->getFlags());
16886 }
16887
16888 // Replace all merged stores with the new store.
16889 for (unsigned i = 0; i < NumStores; ++i)
16890 CombineTo(StoreNodes[i].MemNode, NewStore);
16891
16892 AddToWorklist(NewChain.getNode());
16893 return true;
16894}
16895
16896void DAGCombiner::getStoreMergeCandidates(
16897 StoreSDNode *St, SmallVectorImpl<MemOpLink> &StoreNodes,
16898 SDNode *&RootNode) {
16899 // This holds the base pointer, index, and the offset in bytes from the base
16900 // pointer. We must have a base and an offset. Do not handle stores to undef
16901 // base pointers.
16902 BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
16903 if (!BasePtr.getBase().getNode() || BasePtr.getBase().isUndef())
16904 return;
16905
16906 SDValue Val = peekThroughBitcasts(St->getValue());
16907 StoreSource StoreSrc = getStoreSource(Val);
16908 assert(StoreSrc != StoreSource::Unknown && "Expected known source for store")(static_cast <bool> (StoreSrc != StoreSource::Unknown &&
"Expected known source for store") ? void (0) : __assert_fail
("StoreSrc != StoreSource::Unknown && \"Expected known source for store\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16908, __extension__ __PRETTY_FUNCTION__))
;
16909
16910 // Match on loadbaseptr if relevant.
16911 EVT MemVT = St->getMemoryVT();
16912 BaseIndexOffset LBasePtr;
16913 EVT LoadVT;
16914 if (StoreSrc == StoreSource::Load) {
16915 auto *Ld = cast<LoadSDNode>(Val);
16916 LBasePtr = BaseIndexOffset::match(Ld, DAG);
16917 LoadVT = Ld->getMemoryVT();
16918 // Load and store should be the same type.
16919 if (MemVT != LoadVT)
16920 return;
16921 // Loads must only have one use.
16922 if (!Ld->hasNUsesOfValue(1, 0))
16923 return;
16924 // The memory operands must not be volatile/indexed/atomic.
16925 // TODO: May be able to relax for unordered atomics (see D66309)
16926 if (!Ld->isSimple() || Ld->isIndexed())
16927 return;
16928 }
16929 auto CandidateMatch = [&](StoreSDNode *Other, BaseIndexOffset &Ptr,
16930 int64_t &Offset) -> bool {
16931 // The memory operands must not be volatile/indexed/atomic.
16932 // TODO: May be able to relax for unordered atomics (see D66309)
16933 if (!Other->isSimple() || Other->isIndexed())
16934 return false;
16935 // Don't mix temporal stores with non-temporal stores.
16936 if (St->isNonTemporal() != Other->isNonTemporal())
16937 return false;
16938 SDValue OtherBC = peekThroughBitcasts(Other->getValue());
16939 // Allow merging constants of different types as integers.
16940 bool NoTypeMatch = (MemVT.isInteger()) ? !MemVT.bitsEq(Other->getMemoryVT())
16941 : Other->getMemoryVT() != MemVT;
16942 switch (StoreSrc) {
16943 case StoreSource::Load: {
16944 if (NoTypeMatch)
16945 return false;
16946 // The Load's Base Ptr must also match.
16947 auto *OtherLd = dyn_cast<LoadSDNode>(OtherBC);
16948 if (!OtherLd)
16949 return false;
16950 BaseIndexOffset LPtr = BaseIndexOffset::match(OtherLd, DAG);
16951 if (LoadVT != OtherLd->getMemoryVT())
16952 return false;
16953 // Loads must only have one use.
16954 if (!OtherLd->hasNUsesOfValue(1, 0))
16955 return false;
16956 // The memory operands must not be volatile/indexed/atomic.
16957 // TODO: May be able to relax for unordered atomics (see D66309)
16958 if (!OtherLd->isSimple() || OtherLd->isIndexed())
16959 return false;
16960 // Don't mix temporal loads with non-temporal loads.
16961 if (cast<LoadSDNode>(Val)->isNonTemporal() != OtherLd->isNonTemporal())
16962 return false;
16963 if (!(LBasePtr.equalBaseIndex(LPtr, DAG)))
16964 return false;
16965 break;
16966 }
16967 case StoreSource::Constant:
16968 if (NoTypeMatch)
16969 return false;
16970 if (!isIntOrFPConstant(OtherBC))
16971 return false;
16972 break;
16973 case StoreSource::Extract:
16974 // Do not merge truncated stores here.
16975 if (Other->isTruncatingStore())
16976 return false;
16977 if (!MemVT.bitsEq(OtherBC.getValueType()))
16978 return false;
16979 if (OtherBC.getOpcode() != ISD::EXTRACT_VECTOR_ELT &&
16980 OtherBC.getOpcode() != ISD::EXTRACT_SUBVECTOR)
16981 return false;
16982 break;
16983 default:
16984 llvm_unreachable("Unhandled store source for merging")::llvm::llvm_unreachable_internal("Unhandled store source for merging"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 16984)
;
16985 }
16986 Ptr = BaseIndexOffset::match(Other, DAG);
16987 return (BasePtr.equalBaseIndex(Ptr, DAG, Offset));
16988 };
16989
16990 // Check if the pair of StoreNode and the RootNode already bail out many
16991 // times which is over the limit in dependence check.
16992 auto OverLimitInDependenceCheck = [&](SDNode *StoreNode,
16993 SDNode *RootNode) -> bool {
16994 auto RootCount = StoreRootCountMap.find(StoreNode);
16995 return RootCount != StoreRootCountMap.end() &&
16996 RootCount->second.first == RootNode &&
16997 RootCount->second.second > StoreMergeDependenceLimit;
16998 };
16999
17000 auto TryToAddCandidate = [&](SDNode::use_iterator UseIter) {
17001 // This must be a chain use.
17002 if (UseIter.getOperandNo() != 0)
17003 return;
17004 if (auto *OtherStore = dyn_cast<StoreSDNode>(*UseIter)) {
17005 BaseIndexOffset Ptr;
17006 int64_t PtrDiff;
17007 if (CandidateMatch(OtherStore, Ptr, PtrDiff) &&
17008 !OverLimitInDependenceCheck(OtherStore, RootNode))
17009 StoreNodes.push_back(MemOpLink(OtherStore, PtrDiff));
17010 }
17011 };
17012
17013 // We looking for a root node which is an ancestor to all mergable
17014 // stores. We search up through a load, to our root and then down
17015 // through all children. For instance we will find Store{1,2,3} if
17016 // St is Store1, Store2. or Store3 where the root is not a load
17017 // which always true for nonvolatile ops. TODO: Expand
17018 // the search to find all valid candidates through multiple layers of loads.
17019 //
17020 // Root
17021 // |-------|-------|
17022 // Load Load Store3
17023 // | |
17024 // Store1 Store2
17025 //
17026 // FIXME: We should be able to climb and
17027 // descend TokenFactors to find candidates as well.
17028
17029 RootNode = St->getChain().getNode();
17030
17031 unsigned NumNodesExplored = 0;
17032 const unsigned MaxSearchNodes = 1024;
17033 if (auto *Ldn = dyn_cast<LoadSDNode>(RootNode)) {
17034 RootNode = Ldn->getChain().getNode();
17035 for (auto I = RootNode->use_begin(), E = RootNode->use_end();
17036 I != E && NumNodesExplored < MaxSearchNodes; ++I, ++NumNodesExplored) {
17037 if (I.getOperandNo() == 0 && isa<LoadSDNode>(*I)) { // walk down chain
17038 for (auto I2 = (*I)->use_begin(), E2 = (*I)->use_end(); I2 != E2; ++I2)
17039 TryToAddCandidate(I2);
17040 }
17041 }
17042 } else {
17043 for (auto I = RootNode->use_begin(), E = RootNode->use_end();
17044 I != E && NumNodesExplored < MaxSearchNodes; ++I, ++NumNodesExplored)
17045 TryToAddCandidate(I);
17046 }
17047}
17048
17049// We need to check that merging these stores does not cause a loop in
17050// the DAG. Any store candidate may depend on another candidate
17051// indirectly through its operand (we already consider dependencies
17052// through the chain). Check in parallel by searching up from
17053// non-chain operands of candidates.
17054bool DAGCombiner::checkMergeStoreCandidatesForDependencies(
17055 SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumStores,
17056 SDNode *RootNode) {
17057 // FIXME: We should be able to truncate a full search of
17058 // predecessors by doing a BFS and keeping tabs the originating
17059 // stores from which worklist nodes come from in a similar way to
17060 // TokenFactor simplfication.
17061
17062 SmallPtrSet<const SDNode *, 32> Visited;
17063 SmallVector<const SDNode *, 8> Worklist;
17064
17065 // RootNode is a predecessor to all candidates so we need not search
17066 // past it. Add RootNode (peeking through TokenFactors). Do not count
17067 // these towards size check.
17068
17069 Worklist.push_back(RootNode);
17070 while (!Worklist.empty()) {
17071 auto N = Worklist.pop_back_val();
17072 if (!Visited.insert(N).second)
17073 continue; // Already present in Visited.
17074 if (N->getOpcode() == ISD::TokenFactor) {
17075 for (SDValue Op : N->ops())
17076 Worklist.push_back(Op.getNode());
17077 }
17078 }
17079
17080 // Don't count pruning nodes towards max.
17081 unsigned int Max = 1024 + Visited.size();
17082 // Search Ops of store candidates.
17083 for (unsigned i = 0; i < NumStores; ++i) {
17084 SDNode *N = StoreNodes[i].MemNode;
17085 // Of the 4 Store Operands:
17086 // * Chain (Op 0) -> We have already considered these
17087 // in candidate selection and can be
17088 // safely ignored
17089 // * Value (Op 1) -> Cycles may happen (e.g. through load chains)
17090 // * Address (Op 2) -> Merged addresses may only vary by a fixed constant,
17091 // but aren't necessarily fromt the same base node, so
17092 // cycles possible (e.g. via indexed store).
17093 // * (Op 3) -> Represents the pre or post-indexing offset (or undef for
17094 // non-indexed stores). Not constant on all targets (e.g. ARM)
17095 // and so can participate in a cycle.
17096 for (unsigned j = 1; j < N->getNumOperands(); ++j)
17097 Worklist.push_back(N->getOperand(j).getNode());
17098 }
17099 // Search through DAG. We can stop early if we find a store node.
17100 for (unsigned i = 0; i < NumStores; ++i)
17101 if (SDNode::hasPredecessorHelper(StoreNodes[i].MemNode, Visited, Worklist,
17102 Max)) {
17103 // If the searching bail out, record the StoreNode and RootNode in the
17104 // StoreRootCountMap. If we have seen the pair many times over a limit,
17105 // we won't add the StoreNode into StoreNodes set again.
17106 if (Visited.size() >= Max) {
17107 auto &RootCount = StoreRootCountMap[StoreNodes[i].MemNode];
17108 if (RootCount.first == RootNode)
17109 RootCount.second++;
17110 else
17111 RootCount = {RootNode, 1};
17112 }
17113 return false;
17114 }
17115 return true;
17116}
17117
17118unsigned
17119DAGCombiner::getConsecutiveStores(SmallVectorImpl<MemOpLink> &StoreNodes,
17120 int64_t ElementSizeBytes) const {
17121 while (true) {
17122 // Find a store past the width of the first store.
17123 size_t StartIdx = 0;
17124 while ((StartIdx + 1 < StoreNodes.size()) &&
17125 StoreNodes[StartIdx].OffsetFromBase + ElementSizeBytes !=
17126 StoreNodes[StartIdx + 1].OffsetFromBase)
17127 ++StartIdx;
17128
17129 // Bail if we don't have enough candidates to merge.
17130 if (StartIdx + 1 >= StoreNodes.size())
17131 return 0;
17132
17133 // Trim stores that overlapped with the first store.
17134 if (StartIdx)
17135 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + StartIdx);
17136
17137 // Scan the memory operations on the chain and find the first
17138 // non-consecutive store memory address.
17139 unsigned NumConsecutiveStores = 1;
17140 int64_t StartAddress = StoreNodes[0].OffsetFromBase;
17141 // Check that the addresses are consecutive starting from the second
17142 // element in the list of stores.
17143 for (unsigned i = 1, e = StoreNodes.size(); i < e; ++i) {
17144 int64_t CurrAddress = StoreNodes[i].OffsetFromBase;
17145 if (CurrAddress - StartAddress != (ElementSizeBytes * i))
17146 break;
17147 NumConsecutiveStores = i + 1;
17148 }
17149 if (NumConsecutiveStores > 1)
17150 return NumConsecutiveStores;
17151
17152 // There are no consecutive stores at the start of the list.
17153 // Remove the first store and try again.
17154 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + 1);
17155 }
17156}
17157
17158bool DAGCombiner::tryStoreMergeOfConstants(
17159 SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumConsecutiveStores,
17160 EVT MemVT, SDNode *RootNode, bool AllowVectors) {
17161 LLVMContext &Context = *DAG.getContext();
17162 const DataLayout &DL = DAG.getDataLayout();
17163 int64_t ElementSizeBytes = MemVT.getStoreSize();
17164 unsigned NumMemElts = MemVT.isVector() ? MemVT.getVectorNumElements() : 1;
17165 bool MadeChange = false;
17166
17167 // Store the constants into memory as one consecutive store.
17168 while (NumConsecutiveStores >= 2) {
17169 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
17170 unsigned FirstStoreAS = FirstInChain->getAddressSpace();
17171 unsigned FirstStoreAlign = FirstInChain->getAlignment();
17172 unsigned LastLegalType = 1;
17173 unsigned LastLegalVectorType = 1;
17174 bool LastIntegerTrunc = false;
17175 bool NonZero = false;
17176 unsigned FirstZeroAfterNonZero = NumConsecutiveStores;
17177 for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
17178 StoreSDNode *ST = cast<StoreSDNode>(StoreNodes[i].MemNode);
17179 SDValue StoredVal = ST->getValue();
17180 bool IsElementZero = false;
17181 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(StoredVal))
17182 IsElementZero = C->isNullValue();
17183 else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(StoredVal))
17184 IsElementZero = C->getConstantFPValue()->isNullValue();
17185 if (IsElementZero) {
17186 if (NonZero && FirstZeroAfterNonZero == NumConsecutiveStores)
17187 FirstZeroAfterNonZero = i;
17188 }
17189 NonZero |= !IsElementZero;
17190
17191 // Find a legal type for the constant store.
17192 unsigned SizeInBits = (i + 1) * ElementSizeBytes * 8;
17193 EVT StoreTy = EVT::getIntegerVT(Context, SizeInBits);
17194 bool IsFast = false;
17195
17196 // Break early when size is too large to be legal.
17197 if (StoreTy.getSizeInBits() > MaximumLegalStoreInBits)
17198 break;
17199
17200 if (TLI.isTypeLegal(StoreTy) &&
17201 TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
17202 TLI.allowsMemoryAccess(Context, DL, StoreTy,
17203 *FirstInChain->getMemOperand(), &IsFast) &&
17204 IsFast) {
17205 LastIntegerTrunc = false;
17206 LastLegalType = i + 1;
17207 // Or check whether a truncstore is legal.
17208 } else if (TLI.getTypeAction(Context, StoreTy) ==
17209 TargetLowering::TypePromoteInteger) {
17210 EVT LegalizedStoredValTy =
17211 TLI.getTypeToTransformTo(Context, StoredVal.getValueType());
17212 if (TLI.isTruncStoreLegal(LegalizedStoredValTy, StoreTy) &&
17213 TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, DAG) &&
17214 TLI.allowsMemoryAccess(Context, DL, StoreTy,
17215 *FirstInChain->getMemOperand(), &IsFast) &&
17216 IsFast) {
17217 LastIntegerTrunc = true;
17218 LastLegalType = i + 1;
17219 }
17220 }
17221
17222 // We only use vectors if the constant is known to be zero or the
17223 // target allows it and the function is not marked with the
17224 // noimplicitfloat attribute.
17225 if ((!NonZero ||
17226 TLI.storeOfVectorConstantIsCheap(MemVT, i + 1, FirstStoreAS)) &&
17227 AllowVectors) {
17228 // Find a legal type for the vector store.
17229 unsigned Elts = (i + 1) * NumMemElts;
17230 EVT Ty = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
17231 if (TLI.isTypeLegal(Ty) && TLI.isTypeLegal(MemVT) &&
17232 TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
17233 TLI.allowsMemoryAccess(Context, DL, Ty,
17234 *FirstInChain->getMemOperand(), &IsFast) &&
17235 IsFast)
17236 LastLegalVectorType = i + 1;
17237 }
17238 }
17239
17240 bool UseVector = (LastLegalVectorType > LastLegalType) && AllowVectors;
17241 unsigned NumElem = (UseVector) ? LastLegalVectorType : LastLegalType;
17242
17243 // Check if we found a legal integer type that creates a meaningful
17244 // merge.
17245 if (NumElem < 2) {
17246 // We know that candidate stores are in order and of correct
17247 // shape. While there is no mergeable sequence from the
17248 // beginning one may start later in the sequence. The only
17249 // reason a merge of size N could have failed where another of
17250 // the same size would not have, is if the alignment has
17251 // improved or we've dropped a non-zero value. Drop as many
17252 // candidates as we can here.
17253 unsigned NumSkip = 1;
17254 while ((NumSkip < NumConsecutiveStores) &&
17255 (NumSkip < FirstZeroAfterNonZero) &&
17256 (StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign))
17257 NumSkip++;
17258
17259 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip);
17260 NumConsecutiveStores -= NumSkip;
17261 continue;
17262 }
17263
17264 // Check that we can merge these candidates without causing a cycle.
17265 if (!checkMergeStoreCandidatesForDependencies(StoreNodes, NumElem,
17266 RootNode)) {
17267 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
17268 NumConsecutiveStores -= NumElem;
17269 continue;
17270 }
17271
17272 MadeChange |= mergeStoresOfConstantsOrVecElts(
17273 StoreNodes, MemVT, NumElem, true, UseVector, LastIntegerTrunc);
17274
17275 // Remove merged stores for next iteration.
17276 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
17277 NumConsecutiveStores -= NumElem;
17278 }
17279 return MadeChange;
17280}
17281
17282bool DAGCombiner::tryStoreMergeOfExtracts(
17283 SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumConsecutiveStores,
17284 EVT MemVT, SDNode *RootNode) {
17285 LLVMContext &Context = *DAG.getContext();
17286 const DataLayout &DL = DAG.getDataLayout();
17287 unsigned NumMemElts = MemVT.isVector() ? MemVT.getVectorNumElements() : 1;
17288 bool MadeChange = false;
17289
17290 // Loop on Consecutive Stores on success.
17291 while (NumConsecutiveStores >= 2) {
17292 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
17293 unsigned FirstStoreAS = FirstInChain->getAddressSpace();
17294 unsigned FirstStoreAlign = FirstInChain->getAlignment();
17295 unsigned NumStoresToMerge = 1;
17296 for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
17297 // Find a legal type for the vector store.
17298 unsigned Elts = (i + 1) * NumMemElts;
17299 EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts);
17300 bool IsFast = false;
17301
17302 // Break early when size is too large to be legal.
17303 if (Ty.getSizeInBits() > MaximumLegalStoreInBits)
17304 break;
17305
17306 if (TLI.isTypeLegal(Ty) && TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
17307 TLI.allowsMemoryAccess(Context, DL, Ty,
17308 *FirstInChain->getMemOperand(), &IsFast) &&
17309 IsFast)
17310 NumStoresToMerge = i + 1;
17311 }
17312
17313 // Check if we found a legal integer type creating a meaningful
17314 // merge.
17315 if (NumStoresToMerge < 2) {
17316 // We know that candidate stores are in order and of correct
17317 // shape. While there is no mergeable sequence from the
17318 // beginning one may start later in the sequence. The only
17319 // reason a merge of size N could have failed where another of
17320 // the same size would not have, is if the alignment has
17321 // improved. Drop as many candidates as we can here.
17322 unsigned NumSkip = 1;
17323 while ((NumSkip < NumConsecutiveStores) &&
17324 (StoreNodes[NumSkip].MemNode->getAlignment() <= FirstStoreAlign))
17325 NumSkip++;
17326
17327 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip);
17328 NumConsecutiveStores -= NumSkip;
17329 continue;
17330 }
17331
17332 // Check that we can merge these candidates without causing a cycle.
17333 if (!checkMergeStoreCandidatesForDependencies(StoreNodes, NumStoresToMerge,
17334 RootNode)) {
17335 StoreNodes.erase(StoreNodes.begin(),
17336 StoreNodes.begin() + NumStoresToMerge);
17337 NumConsecutiveStores -= NumStoresToMerge;
17338 continue;
17339 }
17340
17341 MadeChange |= mergeStoresOfConstantsOrVecElts(
17342 StoreNodes, MemVT, NumStoresToMerge, false, true, false);
17343
17344 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumStoresToMerge);
17345 NumConsecutiveStores -= NumStoresToMerge;
17346 }
17347 return MadeChange;
17348}
17349
17350bool DAGCombiner::tryStoreMergeOfLoads(SmallVectorImpl<MemOpLink> &StoreNodes,
17351 unsigned NumConsecutiveStores, EVT MemVT,
17352 SDNode *RootNode, bool AllowVectors,
17353 bool IsNonTemporalStore,
17354 bool IsNonTemporalLoad) {
17355 LLVMContext &Context = *DAG.getContext();
17356 const DataLayout &DL = DAG.getDataLayout();
17357 int64_t ElementSizeBytes = MemVT.getStoreSize();
17358 unsigned NumMemElts = MemVT.isVector() ? MemVT.getVectorNumElements() : 1;
17359 bool MadeChange = false;
17360
17361 // Look for load nodes which are used by the stored values.
17362 SmallVector<MemOpLink, 8> LoadNodes;
17363
17364 // Find acceptable loads. Loads need to have the same chain (token factor),
17365 // must not be zext, volatile, indexed, and they must be consecutive.
17366 BaseIndexOffset LdBasePtr;
17367
17368 for (unsigned i = 0; i < NumConsecutiveStores; ++i) {
17369 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode);
17370 SDValue Val = peekThroughBitcasts(St->getValue());
17371 LoadSDNode *Ld = cast<LoadSDNode>(Val);
17372
17373 BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld, DAG);
17374 // If this is not the first ptr that we check.
17375 int64_t LdOffset = 0;
17376 if (LdBasePtr.getBase().getNode()) {
17377 // The base ptr must be the same.
17378 if (!LdBasePtr.equalBaseIndex(LdPtr, DAG, LdOffset))
17379 break;
17380 } else {
17381 // Check that all other base pointers are the same as this one.
17382 LdBasePtr = LdPtr;
17383 }
17384
17385 // We found a potential memory operand to merge.
17386 LoadNodes.push_back(MemOpLink(Ld, LdOffset));
17387 }
17388
17389 while (NumConsecutiveStores >= 2 && LoadNodes.size() >= 2) {
17390 Align RequiredAlignment;
17391 bool NeedRotate = false;
17392 if (LoadNodes.size() == 2) {
17393 // If we have load/store pair instructions and we only have two values,
17394 // don't bother merging.
17395 if (TLI.hasPairedLoad(MemVT, RequiredAlignment) &&
17396 StoreNodes[0].MemNode->getAlign() >= RequiredAlignment) {
17397 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + 2);
17398 LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + 2);
17399 break;
17400 }
17401 // If the loads are reversed, see if we can rotate the halves into place.
17402 int64_t Offset0 = LoadNodes[0].OffsetFromBase;
17403 int64_t Offset1 = LoadNodes[1].OffsetFromBase;
17404 EVT PairVT = EVT::getIntegerVT(Context, ElementSizeBytes * 8 * 2);
17405 if (Offset0 - Offset1 == ElementSizeBytes &&
17406 (hasOperation(ISD::ROTL, PairVT) ||
17407 hasOperation(ISD::ROTR, PairVT))) {
17408 std::swap(LoadNodes[0], LoadNodes[1]);
17409 NeedRotate = true;
17410 }
17411 }
17412 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
17413 unsigned FirstStoreAS = FirstInChain->getAddressSpace();
17414 Align FirstStoreAlign = FirstInChain->getAlign();
17415 LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
17416
17417 // Scan the memory operations on the chain and find the first
17418 // non-consecutive load memory address. These variables hold the index in
17419 // the store node array.
17420
17421 unsigned LastConsecutiveLoad = 1;
17422
17423 // This variable refers to the size and not index in the array.
17424 unsigned LastLegalVectorType = 1;
17425 unsigned LastLegalIntegerType = 1;
17426 bool isDereferenceable = true;
17427 bool DoIntegerTruncate = false;
17428 int64_t StartAddress = LoadNodes[0].OffsetFromBase;
17429 SDValue LoadChain = FirstLoad->getChain();
17430 for (unsigned i = 1; i < LoadNodes.size(); ++i) {
17431 // All loads must share the same chain.
17432 if (LoadNodes[i].MemNode->getChain() != LoadChain)
17433 break;
17434
17435 int64_t CurrAddress = LoadNodes[i].OffsetFromBase;
17436 if (CurrAddress - StartAddress != (ElementSizeBytes * i))
17437 break;
17438 LastConsecutiveLoad = i;
17439
17440 if (isDereferenceable && !LoadNodes[i].MemNode->isDereferenceable())
17441 isDereferenceable = false;
17442
17443 // Find a legal type for the vector store.
17444 unsigned Elts = (i + 1) * NumMemElts;
17445 EVT StoreTy = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
17446
17447 // Break early when size is too large to be legal.
17448 if (StoreTy.getSizeInBits() > MaximumLegalStoreInBits)
17449 break;
17450
17451 bool IsFastSt = false;
17452 bool IsFastLd = false;
17453 if (TLI.isTypeLegal(StoreTy) &&
17454 TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
17455 TLI.allowsMemoryAccess(Context, DL, StoreTy,
17456 *FirstInChain->getMemOperand(), &IsFastSt) &&
17457 IsFastSt &&
17458 TLI.allowsMemoryAccess(Context, DL, StoreTy,
17459 *FirstLoad->getMemOperand(), &IsFastLd) &&
17460 IsFastLd) {
17461 LastLegalVectorType = i + 1;
17462 }
17463
17464 // Find a legal type for the integer store.
17465 unsigned SizeInBits = (i + 1) * ElementSizeBytes * 8;
17466 StoreTy = EVT::getIntegerVT(Context, SizeInBits);
17467 if (TLI.isTypeLegal(StoreTy) &&
17468 TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
17469 TLI.allowsMemoryAccess(Context, DL, StoreTy,
17470 *FirstInChain->getMemOperand(), &IsFastSt) &&
17471 IsFastSt &&
17472 TLI.allowsMemoryAccess(Context, DL, StoreTy,
17473 *FirstLoad->getMemOperand(), &IsFastLd) &&
17474 IsFastLd) {
17475 LastLegalIntegerType = i + 1;
17476 DoIntegerTruncate = false;
17477 // Or check whether a truncstore and extload is legal.
17478 } else if (TLI.getTypeAction(Context, StoreTy) ==
17479 TargetLowering::TypePromoteInteger) {
17480 EVT LegalizedStoredValTy = TLI.getTypeToTransformTo(Context, StoreTy);
17481 if (TLI.isTruncStoreLegal(LegalizedStoredValTy, StoreTy) &&
17482 TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, DAG) &&
17483 TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValTy, StoreTy) &&
17484 TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValTy, StoreTy) &&
17485 TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValTy, StoreTy) &&
17486 TLI.allowsMemoryAccess(Context, DL, StoreTy,
17487 *FirstInChain->getMemOperand(), &IsFastSt) &&
17488 IsFastSt &&
17489 TLI.allowsMemoryAccess(Context, DL, StoreTy,
17490 *FirstLoad->getMemOperand(), &IsFastLd) &&
17491 IsFastLd) {
17492 LastLegalIntegerType = i + 1;
17493 DoIntegerTruncate = true;
17494 }
17495 }
17496 }
17497
17498 // Only use vector types if the vector type is larger than the integer
17499 // type. If they are the same, use integers.
17500 bool UseVectorTy =
17501 LastLegalVectorType > LastLegalIntegerType && AllowVectors;
17502 unsigned LastLegalType =
17503 std::max(LastLegalVectorType, LastLegalIntegerType);
17504
17505 // We add +1 here because the LastXXX variables refer to location while
17506 // the NumElem refers to array/index size.
17507 unsigned NumElem = std::min(NumConsecutiveStores, LastConsecutiveLoad + 1);
17508 NumElem = std::min(LastLegalType, NumElem);
17509 Align FirstLoadAlign = FirstLoad->getAlign();
17510
17511 if (NumElem < 2) {
17512 // We know that candidate stores are in order and of correct
17513 // shape. While there is no mergeable sequence from the
17514 // beginning one may start later in the sequence. The only
17515 // reason a merge of size N could have failed where another of
17516 // the same size would not have is if the alignment or either
17517 // the load or store has improved. Drop as many candidates as we
17518 // can here.
17519 unsigned NumSkip = 1;
17520 while ((NumSkip < LoadNodes.size()) &&
17521 (LoadNodes[NumSkip].MemNode->getAlign() <= FirstLoadAlign) &&
17522 (StoreNodes[NumSkip].MemNode->getAlign() <= FirstStoreAlign))
17523 NumSkip++;
17524 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumSkip);
17525 LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + NumSkip);
17526 NumConsecutiveStores -= NumSkip;
17527 continue;
17528 }
17529
17530 // Check that we can merge these candidates without causing a cycle.
17531 if (!checkMergeStoreCandidatesForDependencies(StoreNodes, NumElem,
17532 RootNode)) {
17533 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
17534 LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + NumElem);
17535 NumConsecutiveStores -= NumElem;
17536 continue;
17537 }
17538
17539 // Find if it is better to use vectors or integers to load and store
17540 // to memory.
17541 EVT JointMemOpVT;
17542 if (UseVectorTy) {
17543 // Find a legal type for the vector store.
17544 unsigned Elts = NumElem * NumMemElts;
17545 JointMemOpVT = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
17546 } else {
17547 unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
17548 JointMemOpVT = EVT::getIntegerVT(Context, SizeInBits);
17549 }
17550
17551 SDLoc LoadDL(LoadNodes[0].MemNode);
17552 SDLoc StoreDL(StoreNodes[0].MemNode);
17553
17554 // The merged loads are required to have the same incoming chain, so
17555 // using the first's chain is acceptable.
17556
17557 SDValue NewStoreChain = getMergeStoreChains(StoreNodes, NumElem);
17558 AddToWorklist(NewStoreChain.getNode());
17559
17560 MachineMemOperand::Flags LdMMOFlags =
17561 isDereferenceable ? MachineMemOperand::MODereferenceable
17562 : MachineMemOperand::MONone;
17563 if (IsNonTemporalLoad)
17564 LdMMOFlags |= MachineMemOperand::MONonTemporal;
17565
17566 MachineMemOperand::Flags StMMOFlags = IsNonTemporalStore
17567 ? MachineMemOperand::MONonTemporal
17568 : MachineMemOperand::MONone;
17569
17570 SDValue NewLoad, NewStore;
17571 if (UseVectorTy || !DoIntegerTruncate) {
17572 NewLoad = DAG.getLoad(
17573 JointMemOpVT, LoadDL, FirstLoad->getChain(), FirstLoad->getBasePtr(),
17574 FirstLoad->getPointerInfo(), FirstLoadAlign, LdMMOFlags);
17575 SDValue StoreOp = NewLoad;
17576 if (NeedRotate) {
17577 unsigned LoadWidth = ElementSizeBytes * 8 * 2;
17578 assert(JointMemOpVT == EVT::getIntegerVT(Context, LoadWidth) &&(static_cast <bool> (JointMemOpVT == EVT::getIntegerVT(
Context, LoadWidth) && "Unexpected type for rotate-able load pair"
) ? void (0) : __assert_fail ("JointMemOpVT == EVT::getIntegerVT(Context, LoadWidth) && \"Unexpected type for rotate-able load pair\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 17579, __extension__ __PRETTY_FUNCTION__))
17579 "Unexpected type for rotate-able load pair")(static_cast <bool> (JointMemOpVT == EVT::getIntegerVT(
Context, LoadWidth) && "Unexpected type for rotate-able load pair"
) ? void (0) : __assert_fail ("JointMemOpVT == EVT::getIntegerVT(Context, LoadWidth) && \"Unexpected type for rotate-able load pair\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 17579, __extension__ __PRETTY_FUNCTION__))
;
17580 SDValue RotAmt =
17581 DAG.getShiftAmountConstant(LoadWidth / 2, JointMemOpVT, LoadDL);
17582 // Target can convert to the identical ROTR if it does not have ROTL.
17583 StoreOp = DAG.getNode(ISD::ROTL, LoadDL, JointMemOpVT, NewLoad, RotAmt);
17584 }
17585 NewStore = DAG.getStore(
17586 NewStoreChain, StoreDL, StoreOp, FirstInChain->getBasePtr(),
17587 FirstInChain->getPointerInfo(), FirstStoreAlign, StMMOFlags);
17588 } else { // This must be the truncstore/extload case
17589 EVT ExtendedTy =
17590 TLI.getTypeToTransformTo(*DAG.getContext(), JointMemOpVT);
17591 NewLoad = DAG.getExtLoad(ISD::EXTLOAD, LoadDL, ExtendedTy,
17592 FirstLoad->getChain(), FirstLoad->getBasePtr(),
17593 FirstLoad->getPointerInfo(), JointMemOpVT,
17594 FirstLoadAlign, LdMMOFlags);
17595 NewStore = DAG.getTruncStore(
17596 NewStoreChain, StoreDL, NewLoad, FirstInChain->getBasePtr(),
17597 FirstInChain->getPointerInfo(), JointMemOpVT,
17598 FirstInChain->getAlign(), FirstInChain->getMemOperand()->getFlags());
17599 }
17600
17601 // Transfer chain users from old loads to the new load.
17602 for (unsigned i = 0; i < NumElem; ++i) {
17603 LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode);
17604 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1),
17605 SDValue(NewLoad.getNode(), 1));
17606 }
17607
17608 // Replace all stores with the new store. Recursively remove corresponding
17609 // values if they are no longer used.
17610 for (unsigned i = 0; i < NumElem; ++i) {
17611 SDValue Val = StoreNodes[i].MemNode->getOperand(1);
17612 CombineTo(StoreNodes[i].MemNode, NewStore);
17613 if (Val.getNode()->use_empty())
17614 recursivelyDeleteUnusedNodes(Val.getNode());
17615 }
17616
17617 MadeChange = true;
17618 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem);
17619 LoadNodes.erase(LoadNodes.begin(), LoadNodes.begin() + NumElem);
17620 NumConsecutiveStores -= NumElem;
17621 }
17622 return MadeChange;
17623}
17624
17625bool DAGCombiner::mergeConsecutiveStores(StoreSDNode *St) {
17626 if (OptLevel == CodeGenOpt::None || !EnableStoreMerging)
17627 return false;
17628
17629 // TODO: Extend this function to merge stores of scalable vectors.
17630 // (i.e. two <vscale x 8 x i8> stores can be merged to one <vscale x 16 x i8>
17631 // store since we know <vscale x 16 x i8> is exactly twice as large as
17632 // <vscale x 8 x i8>). Until then, bail out for scalable vectors.
17633 EVT MemVT = St->getMemoryVT();
17634 if (MemVT.isScalableVector())
17635 return false;
17636 if (!MemVT.isSimple() || MemVT.getSizeInBits() * 2 > MaximumLegalStoreInBits)
17637 return false;
17638
17639 // This function cannot currently deal with non-byte-sized memory sizes.
17640 int64_t ElementSizeBytes = MemVT.getStoreSize();
17641 if (ElementSizeBytes * 8 != (int64_t)MemVT.getSizeInBits())
17642 return false;
17643
17644 // Do not bother looking at stored values that are not constants, loads, or
17645 // extracted vector elements.
17646 SDValue StoredVal = peekThroughBitcasts(St->getValue());
17647 const StoreSource StoreSrc = getStoreSource(StoredVal);
17648 if (StoreSrc == StoreSource::Unknown)
17649 return false;
17650
17651 SmallVector<MemOpLink, 8> StoreNodes;
17652 SDNode *RootNode;
17653 // Find potential store merge candidates by searching through chain sub-DAG
17654 getStoreMergeCandidates(St, StoreNodes, RootNode);
17655
17656 // Check if there is anything to merge.
17657 if (StoreNodes.size() < 2)
17658 return false;
17659
17660 // Sort the memory operands according to their distance from the
17661 // base pointer.
17662 llvm::sort(StoreNodes, [](MemOpLink LHS, MemOpLink RHS) {
17663 return LHS.OffsetFromBase < RHS.OffsetFromBase;
17664 });
17665
17666 bool AllowVectors = !DAG.getMachineFunction().getFunction().hasFnAttribute(
17667 Attribute::NoImplicitFloat);
17668 bool IsNonTemporalStore = St->isNonTemporal();
17669 bool IsNonTemporalLoad = StoreSrc == StoreSource::Load &&
17670 cast<LoadSDNode>(StoredVal)->isNonTemporal();
17671
17672 // Store Merge attempts to merge the lowest stores. This generally
17673 // works out as if successful, as the remaining stores are checked
17674 // after the first collection of stores is merged. However, in the
17675 // case that a non-mergeable store is found first, e.g., {p[-2],
17676 // p[0], p[1], p[2], p[3]}, we would fail and miss the subsequent
17677 // mergeable cases. To prevent this, we prune such stores from the
17678 // front of StoreNodes here.
17679 bool MadeChange = false;
17680 while (StoreNodes.size() > 1) {
17681 unsigned NumConsecutiveStores =
17682 getConsecutiveStores(StoreNodes, ElementSizeBytes);
17683 // There are no more stores in the list to examine.
17684 if (NumConsecutiveStores == 0)
17685 return MadeChange;
17686
17687 // We have at least 2 consecutive stores. Try to merge them.
17688 assert(NumConsecutiveStores >= 2 && "Expected at least 2 stores")(static_cast <bool> (NumConsecutiveStores >= 2 &&
"Expected at least 2 stores") ? void (0) : __assert_fail ("NumConsecutiveStores >= 2 && \"Expected at least 2 stores\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 17688, __extension__ __PRETTY_FUNCTION__))
;
17689 switch (StoreSrc) {
17690 case StoreSource::Constant:
17691 MadeChange |= tryStoreMergeOfConstants(StoreNodes, NumConsecutiveStores,
17692 MemVT, RootNode, AllowVectors);
17693 break;
17694
17695 case StoreSource::Extract:
17696 MadeChange |= tryStoreMergeOfExtracts(StoreNodes, NumConsecutiveStores,
17697 MemVT, RootNode);
17698 break;
17699
17700 case StoreSource::Load:
17701 MadeChange |= tryStoreMergeOfLoads(StoreNodes, NumConsecutiveStores,
17702 MemVT, RootNode, AllowVectors,
17703 IsNonTemporalStore, IsNonTemporalLoad);
17704 break;
17705
17706 default:
17707 llvm_unreachable("Unhandled store source type")::llvm::llvm_unreachable_internal("Unhandled store source type"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 17707)
;
17708 }
17709 }
17710 return MadeChange;
17711}
17712
17713SDValue DAGCombiner::replaceStoreChain(StoreSDNode *ST, SDValue BetterChain) {
17714 SDLoc SL(ST);
17715 SDValue ReplStore;
17716
17717 // Replace the chain to avoid dependency.
17718 if (ST->isTruncatingStore()) {
17719 ReplStore = DAG.getTruncStore(BetterChain, SL, ST->getValue(),
17720 ST->getBasePtr(), ST->getMemoryVT(),
17721 ST->getMemOperand());
17722 } else {
17723 ReplStore = DAG.getStore(BetterChain, SL, ST->getValue(), ST->getBasePtr(),
17724 ST->getMemOperand());
17725 }
17726
17727 // Create token to keep both nodes around.
17728 SDValue Token = DAG.getNode(ISD::TokenFactor, SL,
17729 MVT::Other, ST->getChain(), ReplStore);
17730
17731 // Make sure the new and old chains are cleaned up.
17732 AddToWorklist(Token.getNode());
17733
17734 // Don't add users to work list.
17735 return CombineTo(ST, Token, false);
17736}
17737
17738SDValue DAGCombiner::replaceStoreOfFPConstant(StoreSDNode *ST) {
17739 SDValue Value = ST->getValue();
17740 if (Value.getOpcode() == ISD::TargetConstantFP)
17741 return SDValue();
17742
17743 if (!ISD::isNormalStore(ST))
17744 return SDValue();
17745
17746 SDLoc DL(ST);
17747
17748 SDValue Chain = ST->getChain();
17749 SDValue Ptr = ST->getBasePtr();
17750
17751 const ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Value);
17752
17753 // NOTE: If the original store is volatile, this transform must not increase
17754 // the number of stores. For example, on x86-32 an f64 can be stored in one
17755 // processor operation but an i64 (which is not legal) requires two. So the
17756 // transform should not be done in this case.
17757
17758 SDValue Tmp;
17759 switch (CFP->getSimpleValueType(0).SimpleTy) {
17760 default:
17761 llvm_unreachable("Unknown FP type")::llvm::llvm_unreachable_internal("Unknown FP type", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 17761)
;
17762 case MVT::f16: // We don't do this for these yet.
17763 case MVT::f80:
17764 case MVT::f128:
17765 case MVT::ppcf128:
17766 return SDValue();
17767 case MVT::f32:
17768 if ((isTypeLegal(MVT::i32) && !LegalOperations && ST->isSimple()) ||
17769 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
17770 ;
17771 Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
17772 bitcastToAPInt().getZExtValue(), SDLoc(CFP),
17773 MVT::i32);
17774 return DAG.getStore(Chain, DL, Tmp, Ptr, ST->getMemOperand());
17775 }
17776
17777 return SDValue();
17778 case MVT::f64:
17779 if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations &&
17780 ST->isSimple()) ||
17781 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) {
17782 ;
17783 Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
17784 getZExtValue(), SDLoc(CFP), MVT::i64);
17785 return DAG.getStore(Chain, DL, Tmp,
17786 Ptr, ST->getMemOperand());
17787 }
17788
17789 if (ST->isSimple() &&
17790 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
17791 // Many FP stores are not made apparent until after legalize, e.g. for
17792 // argument passing. Since this is so common, custom legalize the
17793 // 64-bit integer store into two 32-bit stores.
17794 uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
17795 SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, SDLoc(CFP), MVT::i32);
17796 SDValue Hi = DAG.getConstant(Val >> 32, SDLoc(CFP), MVT::i32);
17797 if (DAG.getDataLayout().isBigEndian())
17798 std::swap(Lo, Hi);
17799
17800 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags();
17801 AAMDNodes AAInfo = ST->getAAInfo();
17802
17803 SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(),
17804 ST->getOriginalAlign(), MMOFlags, AAInfo);
17805 Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(4), DL);
17806 SDValue St1 = DAG.getStore(Chain, DL, Hi, Ptr,
17807 ST->getPointerInfo().getWithOffset(4),
17808 ST->getOriginalAlign(), MMOFlags, AAInfo);
17809 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
17810 St0, St1);
17811 }
17812
17813 return SDValue();
17814 }
17815}
17816
17817SDValue DAGCombiner::visitSTORE(SDNode *N) {
17818 StoreSDNode *ST = cast<StoreSDNode>(N);
17819 SDValue Chain = ST->getChain();
17820 SDValue Value = ST->getValue();
17821 SDValue Ptr = ST->getBasePtr();
17822
17823 // If this is a store of a bit convert, store the input value if the
17824 // resultant store does not need a higher alignment than the original.
17825 if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() &&
17826 ST->isUnindexed()) {
17827 EVT SVT = Value.getOperand(0).getValueType();
17828 // If the store is volatile, we only want to change the store type if the
17829 // resulting store is legal. Otherwise we might increase the number of
17830 // memory accesses. We don't care if the original type was legal or not
17831 // as we assume software couldn't rely on the number of accesses of an
17832 // illegal type.
17833 // TODO: May be able to relax for unordered atomics (see D66309)
17834 if (((!LegalOperations && ST->isSimple()) ||
17835 TLI.isOperationLegal(ISD::STORE, SVT)) &&
17836 TLI.isStoreBitCastBeneficial(Value.getValueType(), SVT,
17837 DAG, *ST->getMemOperand())) {
17838 return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
17839 ST->getMemOperand());
17840 }
17841 }
17842
17843 // Turn 'store undef, Ptr' -> nothing.
17844 if (Value.isUndef() && ST->isUnindexed())
17845 return Chain;
17846
17847 // Try to infer better alignment information than the store already has.
17848 if (OptLevel != CodeGenOpt::None && ST->isUnindexed() && !ST->isAtomic()) {
17849 if (MaybeAlign Alignment = DAG.InferPtrAlign(Ptr)) {
17850 if (*Alignment > ST->getAlign() &&
17851 isAligned(*Alignment, ST->getSrcValueOffset())) {
17852 SDValue NewStore =
17853 DAG.getTruncStore(Chain, SDLoc(N), Value, Ptr, ST->getPointerInfo(),
17854 ST->getMemoryVT(), *Alignment,
17855 ST->getMemOperand()->getFlags(), ST->getAAInfo());
17856 // NewStore will always be N as we are only refining the alignment
17857 assert(NewStore.getNode() == N)(static_cast <bool> (NewStore.getNode() == N) ? void (0
) : __assert_fail ("NewStore.getNode() == N", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 17857, __extension__ __PRETTY_FUNCTION__))
;
17858 (void)NewStore;
17859 }
17860 }
17861 }
17862
17863 // Try transforming a pair floating point load / store ops to integer
17864 // load / store ops.
17865 if (SDValue NewST = TransformFPLoadStorePair(N))
17866 return NewST;
17867
17868 // Try transforming several stores into STORE (BSWAP).
17869 if (SDValue Store = mergeTruncStores(ST))
17870 return Store;
17871
17872 if (ST->isUnindexed()) {
17873 // Walk up chain skipping non-aliasing memory nodes, on this store and any
17874 // adjacent stores.
17875 if (findBetterNeighborChains(ST)) {
17876 // replaceStoreChain uses CombineTo, which handled all of the worklist
17877 // manipulation. Return the original node to not do anything else.
17878 return SDValue(ST, 0);
17879 }
17880 Chain = ST->getChain();
17881 }
17882
17883 // FIXME: is there such a thing as a truncating indexed store?
17884 if (ST->isTruncatingStore() && ST->isUnindexed() &&
17885 Value.getValueType().isInteger() &&
17886 (!isa<ConstantSDNode>(Value) ||
17887 !cast<ConstantSDNode>(Value)->isOpaque())) {
17888 APInt TruncDemandedBits =
17889 APInt::getLowBitsSet(Value.getScalarValueSizeInBits(),
17890 ST->getMemoryVT().getScalarSizeInBits());
17891
17892 // See if we can simplify the input to this truncstore with knowledge that
17893 // only the low bits are being used. For example:
17894 // "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8"
17895 AddToWorklist(Value.getNode());
17896 if (SDValue Shorter = DAG.GetDemandedBits(Value, TruncDemandedBits))
17897 return DAG.getTruncStore(Chain, SDLoc(N), Shorter, Ptr, ST->getMemoryVT(),
17898 ST->getMemOperand());
17899
17900 // Otherwise, see if we can simplify the operation with
17901 // SimplifyDemandedBits, which only works if the value has a single use.
17902 if (SimplifyDemandedBits(Value, TruncDemandedBits)) {
17903 // Re-visit the store if anything changed and the store hasn't been merged
17904 // with another node (N is deleted) SimplifyDemandedBits will add Value's
17905 // node back to the worklist if necessary, but we also need to re-visit
17906 // the Store node itself.
17907 if (N->getOpcode() != ISD::DELETED_NODE)
17908 AddToWorklist(N);
17909 return SDValue(N, 0);
17910 }
17911 }
17912
17913 // If this is a load followed by a store to the same location, then the store
17914 // is dead/noop.
17915 // TODO: Can relax for unordered atomics (see D66309)
17916 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) {
17917 if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() &&
17918 ST->isUnindexed() && ST->isSimple() &&
17919 // There can't be any side effects between the load and store, such as
17920 // a call or store.
17921 Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) {
17922 // The store is dead, remove it.
17923 return Chain;
17924 }
17925 }
17926
17927 // TODO: Can relax for unordered atomics (see D66309)
17928 if (StoreSDNode *ST1 = dyn_cast<StoreSDNode>(Chain)) {
17929 if (ST->isUnindexed() && ST->isSimple() &&
17930 ST1->isUnindexed() && ST1->isSimple()) {
17931 if (ST1->getBasePtr() == Ptr && ST1->getValue() == Value &&
17932 ST->getMemoryVT() == ST1->getMemoryVT()) {
17933 // If this is a store followed by a store with the same value to the
17934 // same location, then the store is dead/noop.
17935 return Chain;
17936 }
17937
17938 if (OptLevel != CodeGenOpt::None && ST1->hasOneUse() &&
17939 !ST1->getBasePtr().isUndef() &&
17940 // BaseIndexOffset and the code below requires knowing the size
17941 // of a vector, so bail out if MemoryVT is scalable.
17942 !ST->getMemoryVT().isScalableVector() &&
17943 !ST1->getMemoryVT().isScalableVector()) {
17944 const BaseIndexOffset STBase = BaseIndexOffset::match(ST, DAG);
17945 const BaseIndexOffset ChainBase = BaseIndexOffset::match(ST1, DAG);
17946 unsigned STBitSize = ST->getMemoryVT().getFixedSizeInBits();
17947 unsigned ChainBitSize = ST1->getMemoryVT().getFixedSizeInBits();
17948 // If this is a store who's preceding store to a subset of the current
17949 // location and no one other node is chained to that store we can
17950 // effectively drop the store. Do not remove stores to undef as they may
17951 // be used as data sinks.
17952 if (STBase.contains(DAG, STBitSize, ChainBase, ChainBitSize)) {
17953 CombineTo(ST1, ST1->getChain());
17954 return SDValue();
17955 }
17956 }
17957 }
17958 }
17959
17960 // If this is an FP_ROUND or TRUNC followed by a store, fold this into a
17961 // truncating store. We can do this even if this is already a truncstore.
17962 if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE)
17963 && Value.getNode()->hasOneUse() && ST->isUnindexed() &&
17964 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
17965 ST->getMemoryVT())) {
17966 return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0),
17967 Ptr, ST->getMemoryVT(), ST->getMemOperand());
17968 }
17969
17970 // Always perform this optimization before types are legal. If the target
17971 // prefers, also try this after legalization to catch stores that were created
17972 // by intrinsics or other nodes.
17973 if (!LegalTypes || (TLI.mergeStoresAfterLegalization(ST->getMemoryVT()))) {
17974 while (true) {
17975 // There can be multiple store sequences on the same chain.
17976 // Keep trying to merge store sequences until we are unable to do so
17977 // or until we merge the last store on the chain.
17978 bool Changed = mergeConsecutiveStores(ST);
17979 if (!Changed) break;
17980 // Return N as merge only uses CombineTo and no worklist clean
17981 // up is necessary.
17982 if (N->getOpcode() == ISD::DELETED_NODE || !isa<StoreSDNode>(N))
17983 return SDValue(N, 0);
17984 }
17985 }
17986
17987 // Try transforming N to an indexed store.
17988 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
17989 return SDValue(N, 0);
17990
17991 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
17992 //
17993 // Make sure to do this only after attempting to merge stores in order to
17994 // avoid changing the types of some subset of stores due to visit order,
17995 // preventing their merging.
17996 if (isa<ConstantFPSDNode>(ST->getValue())) {
17997 if (SDValue NewSt = replaceStoreOfFPConstant(ST))
17998 return NewSt;
17999 }
18000
18001 if (SDValue NewSt = splitMergedValStore(ST))
18002 return NewSt;
18003
18004 return ReduceLoadOpStoreWidth(N);
18005}
18006
18007SDValue DAGCombiner::visitLIFETIME_END(SDNode *N) {
18008 const auto *LifetimeEnd = cast<LifetimeSDNode>(N);
18009 if (!LifetimeEnd->hasOffset())
18010 return SDValue();
18011
18012 const BaseIndexOffset LifetimeEndBase(N->getOperand(1), SDValue(),
18013 LifetimeEnd->getOffset(), false);
18014
18015 // We walk up the chains to find stores.
18016 SmallVector<SDValue, 8> Chains = {N->getOperand(0)};
18017 while (!Chains.empty()) {
18018 SDValue Chain = Chains.pop_back_val();
18019 if (!Chain.hasOneUse())
18020 continue;
18021 switch (Chain.getOpcode()) {
18022 case ISD::TokenFactor:
18023 for (unsigned Nops = Chain.getNumOperands(); Nops;)
18024 Chains.push_back(Chain.getOperand(--Nops));
18025 break;
18026 case ISD::LIFETIME_START:
18027 case ISD::LIFETIME_END:
18028 // We can forward past any lifetime start/end that can be proven not to
18029 // alias the node.
18030 if (!isAlias(Chain.getNode(), N))
18031 Chains.push_back(Chain.getOperand(0));
18032 break;
18033 case ISD::STORE: {
18034 StoreSDNode *ST = dyn_cast<StoreSDNode>(Chain);
18035 // TODO: Can relax for unordered atomics (see D66309)
18036 if (!ST->isSimple() || ST->isIndexed())
18037 continue;
18038 const TypeSize StoreSize = ST->getMemoryVT().getStoreSize();
18039 // The bounds of a scalable store are not known until runtime, so this
18040 // store cannot be elided.
18041 if (StoreSize.isScalable())
18042 continue;
18043 const BaseIndexOffset StoreBase = BaseIndexOffset::match(ST, DAG);
18044 // If we store purely within object bounds just before its lifetime ends,
18045 // we can remove the store.
18046 if (LifetimeEndBase.contains(DAG, LifetimeEnd->getSize() * 8, StoreBase,
18047 StoreSize.getFixedSize() * 8)) {
18048 LLVM_DEBUG(dbgs() << "\nRemoving store:"; StoreBase.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nRemoving store:"; StoreBase
.dump(); dbgs() << "\nwithin LIFETIME_END of : "; LifetimeEndBase
.dump(); dbgs() << "\n"; } } while (false)
18049 dbgs() << "\nwithin LIFETIME_END of : ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nRemoving store:"; StoreBase
.dump(); dbgs() << "\nwithin LIFETIME_END of : "; LifetimeEndBase
.dump(); dbgs() << "\n"; } } while (false)
18050 LifetimeEndBase.dump(); dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("dagcombine")) { dbgs() << "\nRemoving store:"; StoreBase
.dump(); dbgs() << "\nwithin LIFETIME_END of : "; LifetimeEndBase
.dump(); dbgs() << "\n"; } } while (false)
;
18051 CombineTo(ST, ST->getChain());
18052 return SDValue(N, 0);
18053 }
18054 }
18055 }
18056 }
18057 return SDValue();
18058}
18059
18060/// For the instruction sequence of store below, F and I values
18061/// are bundled together as an i64 value before being stored into memory.
18062/// Sometimes it is more efficent to generate separate stores for F and I,
18063/// which can remove the bitwise instructions or sink them to colder places.
18064///
18065/// (store (or (zext (bitcast F to i32) to i64),
18066/// (shl (zext I to i64), 32)), addr) -->
18067/// (store F, addr) and (store I, addr+4)
18068///
18069/// Similarly, splitting for other merged store can also be beneficial, like:
18070/// For pair of {i32, i32}, i64 store --> two i32 stores.
18071/// For pair of {i32, i16}, i64 store --> two i32 stores.
18072/// For pair of {i16, i16}, i32 store --> two i16 stores.
18073/// For pair of {i16, i8}, i32 store --> two i16 stores.
18074/// For pair of {i8, i8}, i16 store --> two i8 stores.
18075///
18076/// We allow each target to determine specifically which kind of splitting is
18077/// supported.
18078///
18079/// The store patterns are commonly seen from the simple code snippet below
18080/// if only std::make_pair(...) is sroa transformed before inlined into hoo.
18081/// void goo(const std::pair<int, float> &);
18082/// hoo() {
18083/// ...
18084/// goo(std::make_pair(tmp, ftmp));
18085/// ...
18086/// }
18087///
18088SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) {
18089 if (OptLevel == CodeGenOpt::None)
18090 return SDValue();
18091
18092 // Can't change the number of memory accesses for a volatile store or break
18093 // atomicity for an atomic one.
18094 if (!ST->isSimple())
18095 return SDValue();
18096
18097 SDValue Val = ST->getValue();
18098 SDLoc DL(ST);
18099
18100 // Match OR operand.
18101 if (!Val.getValueType().isScalarInteger() || Val.getOpcode() != ISD::OR)
18102 return SDValue();
18103
18104 // Match SHL operand and get Lower and Higher parts of Val.
18105 SDValue Op1 = Val.getOperand(0);
18106 SDValue Op2 = Val.getOperand(1);
18107 SDValue Lo, Hi;
18108 if (Op1.getOpcode() != ISD::SHL) {
18109 std::swap(Op1, Op2);
18110 if (Op1.getOpcode() != ISD::SHL)
18111 return SDValue();
18112 }
18113 Lo = Op2;
18114 Hi = Op1.getOperand(0);
18115 if (!Op1.hasOneUse())
18116 return SDValue();
18117
18118 // Match shift amount to HalfValBitSize.
18119 unsigned HalfValBitSize = Val.getValueSizeInBits() / 2;
18120 ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(Op1.getOperand(1));
18121 if (!ShAmt || ShAmt->getAPIntValue() != HalfValBitSize)
18122 return SDValue();
18123
18124 // Lo and Hi are zero-extended from int with size less equal than 32
18125 // to i64.
18126 if (Lo.getOpcode() != ISD::ZERO_EXTEND || !Lo.hasOneUse() ||
18127 !Lo.getOperand(0).getValueType().isScalarInteger() ||
18128 Lo.getOperand(0).getValueSizeInBits() > HalfValBitSize ||
18129 Hi.getOpcode() != ISD::ZERO_EXTEND || !Hi.hasOneUse() ||
18130 !Hi.getOperand(0).getValueType().isScalarInteger() ||
18131 Hi.getOperand(0).getValueSizeInBits() > HalfValBitSize)
18132 return SDValue();
18133
18134 // Use the EVT of low and high parts before bitcast as the input
18135 // of target query.
18136 EVT LowTy = (Lo.getOperand(0).getOpcode() == ISD::BITCAST)
18137 ? Lo.getOperand(0).getValueType()
18138 : Lo.getValueType();
18139 EVT HighTy = (Hi.getOperand(0).getOpcode() == ISD::BITCAST)
18140 ? Hi.getOperand(0).getValueType()
18141 : Hi.getValueType();
18142 if (!TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
18143 return SDValue();
18144
18145 // Start to split store.
18146 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags();
18147 AAMDNodes AAInfo = ST->getAAInfo();
18148
18149 // Change the sizes of Lo and Hi's value types to HalfValBitSize.
18150 EVT VT = EVT::getIntegerVT(*DAG.getContext(), HalfValBitSize);
18151 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Lo.getOperand(0));
18152 Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Hi.getOperand(0));
18153
18154 SDValue Chain = ST->getChain();
18155 SDValue Ptr = ST->getBasePtr();
18156 // Lower value store.
18157 SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(),
18158 ST->getOriginalAlign(), MMOFlags, AAInfo);
18159 Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(HalfValBitSize / 8), DL);
18160 // Higher value store.
18161 SDValue St1 = DAG.getStore(
18162 St0, DL, Hi, Ptr, ST->getPointerInfo().getWithOffset(HalfValBitSize / 8),
18163 ST->getOriginalAlign(), MMOFlags, AAInfo);
18164 return St1;
18165}
18166
18167/// Convert a disguised subvector insertion into a shuffle:
18168SDValue DAGCombiner::combineInsertEltToShuffle(SDNode *N, unsigned InsIndex) {
18169 assert(N->getOpcode() == ISD::INSERT_VECTOR_ELT &&(static_cast <bool> (N->getOpcode() == ISD::INSERT_VECTOR_ELT
&& "Expected extract_vector_elt") ? void (0) : __assert_fail
("N->getOpcode() == ISD::INSERT_VECTOR_ELT && \"Expected extract_vector_elt\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18170, __extension__ __PRETTY_FUNCTION__))
18170 "Expected extract_vector_elt")(static_cast <bool> (N->getOpcode() == ISD::INSERT_VECTOR_ELT
&& "Expected extract_vector_elt") ? void (0) : __assert_fail
("N->getOpcode() == ISD::INSERT_VECTOR_ELT && \"Expected extract_vector_elt\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18170, __extension__ __PRETTY_FUNCTION__))
;
18171 SDValue InsertVal = N->getOperand(1);
18172 SDValue Vec = N->getOperand(0);
18173
18174 // (insert_vector_elt (vector_shuffle X, Y), (extract_vector_elt X, N),
18175 // InsIndex)
18176 // --> (vector_shuffle X, Y) and variations where shuffle operands may be
18177 // CONCAT_VECTORS.
18178 if (Vec.getOpcode() == ISD::VECTOR_SHUFFLE && Vec.hasOneUse() &&
18179 InsertVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
18180 isa<ConstantSDNode>(InsertVal.getOperand(1))) {
18181 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Vec.getNode());
18182 ArrayRef<int> Mask = SVN->getMask();
18183
18184 SDValue X = Vec.getOperand(0);
18185 SDValue Y = Vec.getOperand(1);
18186
18187 // Vec's operand 0 is using indices from 0 to N-1 and
18188 // operand 1 from N to 2N - 1, where N is the number of
18189 // elements in the vectors.
18190 SDValue InsertVal0 = InsertVal.getOperand(0);
18191 int ElementOffset = -1;
18192
18193 // We explore the inputs of the shuffle in order to see if we find the
18194 // source of the extract_vector_elt. If so, we can use it to modify the
18195 // shuffle rather than perform an insert_vector_elt.
18196 SmallVector<std::pair<int, SDValue>, 8> ArgWorkList;
18197 ArgWorkList.emplace_back(Mask.size(), Y);
18198 ArgWorkList.emplace_back(0, X);
18199
18200 while (!ArgWorkList.empty()) {
18201 int ArgOffset;
18202 SDValue ArgVal;
18203 std::tie(ArgOffset, ArgVal) = ArgWorkList.pop_back_val();
18204
18205 if (ArgVal == InsertVal0) {
18206 ElementOffset = ArgOffset;
18207 break;
18208 }
18209
18210 // Peek through concat_vector.
18211 if (ArgVal.getOpcode() == ISD::CONCAT_VECTORS) {
18212 int CurrentArgOffset =
18213 ArgOffset + ArgVal.getValueType().getVectorNumElements();
18214 int Step = ArgVal.getOperand(0).getValueType().getVectorNumElements();
18215 for (SDValue Op : reverse(ArgVal->ops())) {
18216 CurrentArgOffset -= Step;
18217 ArgWorkList.emplace_back(CurrentArgOffset, Op);
18218 }
18219
18220 // Make sure we went through all the elements and did not screw up index
18221 // computation.
18222 assert(CurrentArgOffset == ArgOffset)(static_cast <bool> (CurrentArgOffset == ArgOffset) ? void
(0) : __assert_fail ("CurrentArgOffset == ArgOffset", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18222, __extension__ __PRETTY_FUNCTION__))
;
18223 }
18224 }
18225
18226 if (ElementOffset != -1) {
18227 SmallVector<int, 16> NewMask(Mask.begin(), Mask.end());
18228
18229 auto *ExtrIndex = cast<ConstantSDNode>(InsertVal.getOperand(1));
18230 NewMask[InsIndex] = ElementOffset + ExtrIndex->getZExtValue();
18231 assert(NewMask[InsIndex] <(static_cast <bool> (NewMask[InsIndex] < (int)(2 * Vec
.getValueType().getVectorNumElements()) && NewMask[InsIndex
] >= 0 && "NewMask[InsIndex] is out of bound") ? void
(0) : __assert_fail ("NewMask[InsIndex] < (int)(2 * Vec.getValueType().getVectorNumElements()) && NewMask[InsIndex] >= 0 && \"NewMask[InsIndex] is out of bound\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18233, __extension__ __PRETTY_FUNCTION__))
18232 (int)(2 * Vec.getValueType().getVectorNumElements()) &&(static_cast <bool> (NewMask[InsIndex] < (int)(2 * Vec
.getValueType().getVectorNumElements()) && NewMask[InsIndex
] >= 0 && "NewMask[InsIndex] is out of bound") ? void
(0) : __assert_fail ("NewMask[InsIndex] < (int)(2 * Vec.getValueType().getVectorNumElements()) && NewMask[InsIndex] >= 0 && \"NewMask[InsIndex] is out of bound\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18233, __extension__ __PRETTY_FUNCTION__))
18233 NewMask[InsIndex] >= 0 && "NewMask[InsIndex] is out of bound")(static_cast <bool> (NewMask[InsIndex] < (int)(2 * Vec
.getValueType().getVectorNumElements()) && NewMask[InsIndex
] >= 0 && "NewMask[InsIndex] is out of bound") ? void
(0) : __assert_fail ("NewMask[InsIndex] < (int)(2 * Vec.getValueType().getVectorNumElements()) && NewMask[InsIndex] >= 0 && \"NewMask[InsIndex] is out of bound\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18233, __extension__ __PRETTY_FUNCTION__))
;
18234
18235 SDValue LegalShuffle =
18236 TLI.buildLegalVectorShuffle(Vec.getValueType(), SDLoc(N), X,
18237 Y, NewMask, DAG);
18238 if (LegalShuffle)
18239 return LegalShuffle;
18240 }
18241 }
18242
18243 // insert_vector_elt V, (bitcast X from vector type), IdxC -->
18244 // bitcast(shuffle (bitcast V), (extended X), Mask)
18245 // Note: We do not use an insert_subvector node because that requires a
18246 // legal subvector type.
18247 if (InsertVal.getOpcode() != ISD::BITCAST || !InsertVal.hasOneUse() ||
18248 !InsertVal.getOperand(0).getValueType().isVector())
18249 return SDValue();
18250
18251 SDValue SubVec = InsertVal.getOperand(0);
18252 SDValue DestVec = N->getOperand(0);
18253 EVT SubVecVT = SubVec.getValueType();
18254 EVT VT = DestVec.getValueType();
18255 unsigned NumSrcElts = SubVecVT.getVectorNumElements();
18256 // If the source only has a single vector element, the cost of creating adding
18257 // it to a vector is likely to exceed the cost of a insert_vector_elt.
18258 if (NumSrcElts == 1)
18259 return SDValue();
18260 unsigned ExtendRatio = VT.getSizeInBits() / SubVecVT.getSizeInBits();
18261 unsigned NumMaskVals = ExtendRatio * NumSrcElts;
18262
18263 // Step 1: Create a shuffle mask that implements this insert operation. The
18264 // vector that we are inserting into will be operand 0 of the shuffle, so
18265 // those elements are just 'i'. The inserted subvector is in the first
18266 // positions of operand 1 of the shuffle. Example:
18267 // insert v4i32 V, (v2i16 X), 2 --> shuffle v8i16 V', X', {0,1,2,3,8,9,6,7}
18268 SmallVector<int, 16> Mask(NumMaskVals);
18269 for (unsigned i = 0; i != NumMaskVals; ++i) {
18270 if (i / NumSrcElts == InsIndex)
18271 Mask[i] = (i % NumSrcElts) + NumMaskVals;
18272 else
18273 Mask[i] = i;
18274 }
18275
18276 // Bail out if the target can not handle the shuffle we want to create.
18277 EVT SubVecEltVT = SubVecVT.getVectorElementType();
18278 EVT ShufVT = EVT::getVectorVT(*DAG.getContext(), SubVecEltVT, NumMaskVals);
18279 if (!TLI.isShuffleMaskLegal(Mask, ShufVT))
18280 return SDValue();
18281
18282 // Step 2: Create a wide vector from the inserted source vector by appending
18283 // undefined elements. This is the same size as our destination vector.
18284 SDLoc DL(N);
18285 SmallVector<SDValue, 8> ConcatOps(ExtendRatio, DAG.getUNDEF(SubVecVT));
18286 ConcatOps[0] = SubVec;
18287 SDValue PaddedSubV = DAG.getNode(ISD::CONCAT_VECTORS, DL, ShufVT, ConcatOps);
18288
18289 // Step 3: Shuffle in the padded subvector.
18290 SDValue DestVecBC = DAG.getBitcast(ShufVT, DestVec);
18291 SDValue Shuf = DAG.getVectorShuffle(ShufVT, DL, DestVecBC, PaddedSubV, Mask);
18292 AddToWorklist(PaddedSubV.getNode());
18293 AddToWorklist(DestVecBC.getNode());
18294 AddToWorklist(Shuf.getNode());
18295 return DAG.getBitcast(VT, Shuf);
18296}
18297
18298SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
18299 SDValue InVec = N->getOperand(0);
18300 SDValue InVal = N->getOperand(1);
18301 SDValue EltNo = N->getOperand(2);
18302 SDLoc DL(N);
18303
18304 EVT VT = InVec.getValueType();
18305 auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
18306
18307 // Insert into out-of-bounds element is undefined.
18308 if (IndexC && VT.isFixedLengthVector() &&
18309 IndexC->getZExtValue() >= VT.getVectorNumElements())
18310 return DAG.getUNDEF(VT);
18311
18312 // Remove redundant insertions:
18313 // (insert_vector_elt x (extract_vector_elt x idx) idx) -> x
18314 if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
18315 InVec == InVal.getOperand(0) && EltNo == InVal.getOperand(1))
18316 return InVec;
18317
18318 if (!IndexC) {
18319 // If this is variable insert to undef vector, it might be better to splat:
18320 // inselt undef, InVal, EltNo --> build_vector < InVal, InVal, ... >
18321 if (InVec.isUndef() && TLI.shouldSplatInsEltVarIndex(VT)) {
18322 if (VT.isScalableVector())
18323 return DAG.getSplatVector(VT, DL, InVal);
18324 else {
18325 SmallVector<SDValue, 8> Ops(VT.getVectorNumElements(), InVal);
18326 return DAG.getBuildVector(VT, DL, Ops);
18327 }
18328 }
18329 return SDValue();
18330 }
18331
18332 if (VT.isScalableVector())
18333 return SDValue();
18334
18335 unsigned NumElts = VT.getVectorNumElements();
18336
18337 // We must know which element is being inserted for folds below here.
18338 unsigned Elt = IndexC->getZExtValue();
18339 if (SDValue Shuf = combineInsertEltToShuffle(N, Elt))
18340 return Shuf;
18341
18342 // Canonicalize insert_vector_elt dag nodes.
18343 // Example:
18344 // (insert_vector_elt (insert_vector_elt A, Idx0), Idx1)
18345 // -> (insert_vector_elt (insert_vector_elt A, Idx1), Idx0)
18346 //
18347 // Do this only if the child insert_vector node has one use; also
18348 // do this only if indices are both constants and Idx1 < Idx0.
18349 if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && InVec.hasOneUse()
18350 && isa<ConstantSDNode>(InVec.getOperand(2))) {
18351 unsigned OtherElt = InVec.getConstantOperandVal(2);
18352 if (Elt < OtherElt) {
18353 // Swap nodes.
18354 SDValue NewOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
18355 InVec.getOperand(0), InVal, EltNo);
18356 AddToWorklist(NewOp.getNode());
18357 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(InVec.getNode()),
18358 VT, NewOp, InVec.getOperand(1), InVec.getOperand(2));
18359 }
18360 }
18361
18362 // If we can't generate a legal BUILD_VECTOR, exit
18363 if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
18364 return SDValue();
18365
18366 // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
18367 // be converted to a BUILD_VECTOR). Fill in the Ops vector with the
18368 // vector elements.
18369 SmallVector<SDValue, 8> Ops;
18370 // Do not combine these two vectors if the output vector will not replace
18371 // the input vector.
18372 if (InVec.getOpcode() == ISD::BUILD_VECTOR && InVec.hasOneUse()) {
18373 Ops.append(InVec.getNode()->op_begin(),
18374 InVec.getNode()->op_end());
18375 } else if (InVec.isUndef()) {
18376 Ops.append(NumElts, DAG.getUNDEF(InVal.getValueType()));
18377 } else {
18378 return SDValue();
18379 }
18380 assert(Ops.size() == NumElts && "Unexpected vector size")(static_cast <bool> (Ops.size() == NumElts && "Unexpected vector size"
) ? void (0) : __assert_fail ("Ops.size() == NumElts && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18380, __extension__ __PRETTY_FUNCTION__))
;
18381
18382 // Insert the element
18383 if (Elt < Ops.size()) {
18384 // All the operands of BUILD_VECTOR must have the same type;
18385 // we enforce that here.
18386 EVT OpVT = Ops[0].getValueType();
18387 Ops[Elt] = OpVT.isInteger() ? DAG.getAnyExtOrTrunc(InVal, DL, OpVT) : InVal;
18388 }
18389
18390 // Return the new vector
18391 return DAG.getBuildVector(VT, DL, Ops);
18392}
18393
18394SDValue DAGCombiner::scalarizeExtractedVectorLoad(SDNode *EVE, EVT InVecVT,
18395 SDValue EltNo,
18396 LoadSDNode *OriginalLoad) {
18397 assert(OriginalLoad->isSimple())(static_cast <bool> (OriginalLoad->isSimple()) ? void
(0) : __assert_fail ("OriginalLoad->isSimple()", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18397, __extension__ __PRETTY_FUNCTION__))
;
18398
18399 EVT ResultVT = EVE->getValueType(0);
18400 EVT VecEltVT = InVecVT.getVectorElementType();
18401
18402 // If the vector element type is not a multiple of a byte then we are unable
18403 // to correctly compute an address to load only the extracted element as a
18404 // scalar.
18405 if (!VecEltVT.isByteSized())
18406 return SDValue();
18407
18408 Align Alignment = OriginalLoad->getAlign();
18409 Align NewAlign = DAG.getDataLayout().getABITypeAlign(
18410 VecEltVT.getTypeForEVT(*DAG.getContext()));
18411
18412 if (NewAlign > Alignment ||
18413 !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT))
18414 return SDValue();
18415
18416 ISD::LoadExtType ExtTy = ResultVT.bitsGT(VecEltVT) ?
18417 ISD::NON_EXTLOAD : ISD::EXTLOAD;
18418 if (!TLI.shouldReduceLoadWidth(OriginalLoad, ExtTy, VecEltVT))
18419 return SDValue();
18420
18421 Alignment = NewAlign;
18422
18423 SDValue NewPtr = OriginalLoad->getBasePtr();
18424 SDValue Offset;
18425 EVT PtrType = NewPtr.getValueType();
18426 MachinePointerInfo MPI;
18427 SDLoc DL(EVE);
18428 if (auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo)) {
18429 int Elt = ConstEltNo->getZExtValue();
18430 unsigned PtrOff = VecEltVT.getSizeInBits() * Elt / 8;
18431 Offset = DAG.getConstant(PtrOff, DL, PtrType);
18432 MPI = OriginalLoad->getPointerInfo().getWithOffset(PtrOff);
18433 } else {
18434 Offset = DAG.getZExtOrTrunc(EltNo, DL, PtrType);
18435 Offset = DAG.getNode(
18436 ISD::MUL, DL, PtrType, Offset,
18437 DAG.getConstant(VecEltVT.getStoreSize(), DL, PtrType));
18438 // Discard the pointer info except the address space because the memory
18439 // operand can't represent this new access since the offset is variable.
18440 MPI = MachinePointerInfo(OriginalLoad->getPointerInfo().getAddrSpace());
18441 }
18442 NewPtr = DAG.getMemBasePlusOffset(NewPtr, Offset, DL);
18443
18444 // The replacement we need to do here is a little tricky: we need to
18445 // replace an extractelement of a load with a load.
18446 // Use ReplaceAllUsesOfValuesWith to do the replacement.
18447 // Note that this replacement assumes that the extractvalue is the only
18448 // use of the load; that's okay because we don't want to perform this
18449 // transformation in other cases anyway.
18450 SDValue Load;
18451 SDValue Chain;
18452 if (ResultVT.bitsGT(VecEltVT)) {
18453 // If the result type of vextract is wider than the load, then issue an
18454 // extending load instead.
18455 ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, ResultVT,
18456 VecEltVT)
18457 ? ISD::ZEXTLOAD
18458 : ISD::EXTLOAD;
18459 Load = DAG.getExtLoad(ExtType, SDLoc(EVE), ResultVT,
18460 OriginalLoad->getChain(), NewPtr, MPI, VecEltVT,
18461 Alignment, OriginalLoad->getMemOperand()->getFlags(),
18462 OriginalLoad->getAAInfo());
18463 Chain = Load.getValue(1);
18464 } else {
18465 Load = DAG.getLoad(
18466 VecEltVT, SDLoc(EVE), OriginalLoad->getChain(), NewPtr, MPI, Alignment,
18467 OriginalLoad->getMemOperand()->getFlags(), OriginalLoad->getAAInfo());
18468 Chain = Load.getValue(1);
18469 if (ResultVT.bitsLT(VecEltVT))
18470 Load = DAG.getNode(ISD::TRUNCATE, SDLoc(EVE), ResultVT, Load);
18471 else
18472 Load = DAG.getBitcast(ResultVT, Load);
18473 }
18474 WorklistRemover DeadNodes(*this);
18475 SDValue From[] = { SDValue(EVE, 0), SDValue(OriginalLoad, 1) };
18476 SDValue To[] = { Load, Chain };
18477 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
18478 // Make sure to revisit this node to clean it up; it will usually be dead.
18479 AddToWorklist(EVE);
18480 // Since we're explicitly calling ReplaceAllUses, add the new node to the
18481 // worklist explicitly as well.
18482 AddToWorklistWithUsers(Load.getNode());
18483 ++OpsNarrowed;
18484 return SDValue(EVE, 0);
18485}
18486
18487/// Transform a vector binary operation into a scalar binary operation by moving
18488/// the math/logic after an extract element of a vector.
18489static SDValue scalarizeExtractedBinop(SDNode *ExtElt, SelectionDAG &DAG,
18490 bool LegalOperations) {
18491 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
18492 SDValue Vec = ExtElt->getOperand(0);
18493 SDValue Index = ExtElt->getOperand(1);
18494 auto *IndexC = dyn_cast<ConstantSDNode>(Index);
18495 if (!IndexC || !TLI.isBinOp(Vec.getOpcode()) || !Vec.hasOneUse() ||
18496 Vec.getNode()->getNumValues() != 1)
18497 return SDValue();
18498
18499 // Targets may want to avoid this to prevent an expensive register transfer.
18500 if (!TLI.shouldScalarizeBinop(Vec))
18501 return SDValue();
18502
18503 // Extracting an element of a vector constant is constant-folded, so this
18504 // transform is just replacing a vector op with a scalar op while moving the
18505 // extract.
18506 SDValue Op0 = Vec.getOperand(0);
18507 SDValue Op1 = Vec.getOperand(1);
18508 if (isAnyConstantBuildVector(Op0, true) ||
18509 isAnyConstantBuildVector(Op1, true)) {
18510 // extractelt (binop X, C), IndexC --> binop (extractelt X, IndexC), C'
18511 // extractelt (binop C, X), IndexC --> binop C', (extractelt X, IndexC)
18512 SDLoc DL(ExtElt);
18513 EVT VT = ExtElt->getValueType(0);
18514 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op0, Index);
18515 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op1, Index);
18516 return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1);
18517 }
18518
18519 return SDValue();
18520}
18521
18522SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
18523 SDValue VecOp = N->getOperand(0);
18524 SDValue Index = N->getOperand(1);
18525 EVT ScalarVT = N->getValueType(0);
18526 EVT VecVT = VecOp.getValueType();
18527 if (VecOp.isUndef())
18528 return DAG.getUNDEF(ScalarVT);
18529
18530 // extract_vector_elt (insert_vector_elt vec, val, idx), idx) -> val
18531 //
18532 // This only really matters if the index is non-constant since other combines
18533 // on the constant elements already work.
18534 SDLoc DL(N);
18535 if (VecOp.getOpcode() == ISD::INSERT_VECTOR_ELT &&
18536 Index == VecOp.getOperand(2)) {
18537 SDValue Elt = VecOp.getOperand(1);
18538 return VecVT.isInteger() ? DAG.getAnyExtOrTrunc(Elt, DL, ScalarVT) : Elt;
18539 }
18540
18541 // (vextract (scalar_to_vector val, 0) -> val
18542 if (VecOp.getOpcode() == ISD::SCALAR_TO_VECTOR) {
18543 // Only 0'th element of SCALAR_TO_VECTOR is defined.
18544 if (DAG.isKnownNeverZero(Index))
18545 return DAG.getUNDEF(ScalarVT);
18546
18547 // Check if the result type doesn't match the inserted element type. A
18548 // SCALAR_TO_VECTOR may truncate the inserted element and the
18549 // EXTRACT_VECTOR_ELT may widen the extracted vector.
18550 SDValue InOp = VecOp.getOperand(0);
18551 if (InOp.getValueType() != ScalarVT) {
18552 assert(InOp.getValueType().isInteger() && ScalarVT.isInteger())(static_cast <bool> (InOp.getValueType().isInteger() &&
ScalarVT.isInteger()) ? void (0) : __assert_fail ("InOp.getValueType().isInteger() && ScalarVT.isInteger()"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18552, __extension__ __PRETTY_FUNCTION__))
;
18553 return DAG.getSExtOrTrunc(InOp, DL, ScalarVT);
18554 }
18555 return InOp;
18556 }
18557
18558 // extract_vector_elt of out-of-bounds element -> UNDEF
18559 auto *IndexC = dyn_cast<ConstantSDNode>(Index);
18560 if (IndexC && VecVT.isFixedLengthVector() &&
18561 IndexC->getAPIntValue().uge(VecVT.getVectorNumElements()))
18562 return DAG.getUNDEF(ScalarVT);
18563
18564 // extract_vector_elt (build_vector x, y), 1 -> y
18565 if (((IndexC && VecOp.getOpcode() == ISD::BUILD_VECTOR) ||
18566 VecOp.getOpcode() == ISD::SPLAT_VECTOR) &&
18567 TLI.isTypeLegal(VecVT) &&
18568 (VecOp.hasOneUse() || TLI.aggressivelyPreferBuildVectorSources(VecVT))) {
18569 assert((VecOp.getOpcode() != ISD::BUILD_VECTOR ||(static_cast <bool> ((VecOp.getOpcode() != ISD::BUILD_VECTOR
|| VecVT.isFixedLengthVector()) && "BUILD_VECTOR used for scalable vectors"
) ? void (0) : __assert_fail ("(VecOp.getOpcode() != ISD::BUILD_VECTOR || VecVT.isFixedLengthVector()) && \"BUILD_VECTOR used for scalable vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18571, __extension__ __PRETTY_FUNCTION__))
18570 VecVT.isFixedLengthVector()) &&(static_cast <bool> ((VecOp.getOpcode() != ISD::BUILD_VECTOR
|| VecVT.isFixedLengthVector()) && "BUILD_VECTOR used for scalable vectors"
) ? void (0) : __assert_fail ("(VecOp.getOpcode() != ISD::BUILD_VECTOR || VecVT.isFixedLengthVector()) && \"BUILD_VECTOR used for scalable vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18571, __extension__ __PRETTY_FUNCTION__))
18571 "BUILD_VECTOR used for scalable vectors")(static_cast <bool> ((VecOp.getOpcode() != ISD::BUILD_VECTOR
|| VecVT.isFixedLengthVector()) && "BUILD_VECTOR used for scalable vectors"
) ? void (0) : __assert_fail ("(VecOp.getOpcode() != ISD::BUILD_VECTOR || VecVT.isFixedLengthVector()) && \"BUILD_VECTOR used for scalable vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18571, __extension__ __PRETTY_FUNCTION__))
;
18572 unsigned IndexVal =
18573 VecOp.getOpcode() == ISD::BUILD_VECTOR ? IndexC->getZExtValue() : 0;
18574 SDValue Elt = VecOp.getOperand(IndexVal);
18575 EVT InEltVT = Elt.getValueType();
18576
18577 // Sometimes build_vector's scalar input types do not match result type.
18578 if (ScalarVT == InEltVT)
18579 return Elt;
18580
18581 // TODO: It may be useful to truncate if free if the build_vector implicitly
18582 // converts.
18583 }
18584
18585 if (VecVT.isScalableVector())
18586 return SDValue();
18587
18588 // All the code from this point onwards assumes fixed width vectors, but it's
18589 // possible that some of the combinations could be made to work for scalable
18590 // vectors too.
18591 unsigned NumElts = VecVT.getVectorNumElements();
18592 unsigned VecEltBitWidth = VecVT.getScalarSizeInBits();
18593
18594 // TODO: These transforms should not require the 'hasOneUse' restriction, but
18595 // there are regressions on multiple targets without it. We can end up with a
18596 // mess of scalar and vector code if we reduce only part of the DAG to scalar.
18597 if (IndexC && VecOp.getOpcode() == ISD::BITCAST && VecVT.isInteger() &&
18598 VecOp.hasOneUse()) {
18599 // The vector index of the LSBs of the source depend on the endian-ness.
18600 bool IsLE = DAG.getDataLayout().isLittleEndian();
18601 unsigned ExtractIndex = IndexC->getZExtValue();
18602 // extract_elt (v2i32 (bitcast i64:x)), BCTruncElt -> i32 (trunc i64:x)
18603 unsigned BCTruncElt = IsLE ? 0 : NumElts - 1;
18604 SDValue BCSrc = VecOp.getOperand(0);
18605 if (ExtractIndex == BCTruncElt && BCSrc.getValueType().isScalarInteger())
18606 return DAG.getNode(ISD::TRUNCATE, DL, ScalarVT, BCSrc);
18607
18608 if (LegalTypes && BCSrc.getValueType().isInteger() &&
18609 BCSrc.getOpcode() == ISD::SCALAR_TO_VECTOR) {
18610 // ext_elt (bitcast (scalar_to_vec i64 X to v2i64) to v4i32), TruncElt -->
18611 // trunc i64 X to i32
18612 SDValue X = BCSrc.getOperand(0);
18613 assert(X.getValueType().isScalarInteger() && ScalarVT.isScalarInteger() &&(static_cast <bool> (X.getValueType().isScalarInteger()
&& ScalarVT.isScalarInteger() && "Extract element and scalar to vector can't change element type "
"from FP to integer.") ? void (0) : __assert_fail ("X.getValueType().isScalarInteger() && ScalarVT.isScalarInteger() && \"Extract element and scalar to vector can't change element type \" \"from FP to integer.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18615, __extension__ __PRETTY_FUNCTION__))
18614 "Extract element and scalar to vector can't change element type "(static_cast <bool> (X.getValueType().isScalarInteger()
&& ScalarVT.isScalarInteger() && "Extract element and scalar to vector can't change element type "
"from FP to integer.") ? void (0) : __assert_fail ("X.getValueType().isScalarInteger() && ScalarVT.isScalarInteger() && \"Extract element and scalar to vector can't change element type \" \"from FP to integer.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18615, __extension__ __PRETTY_FUNCTION__))
18615 "from FP to integer.")(static_cast <bool> (X.getValueType().isScalarInteger()
&& ScalarVT.isScalarInteger() && "Extract element and scalar to vector can't change element type "
"from FP to integer.") ? void (0) : __assert_fail ("X.getValueType().isScalarInteger() && ScalarVT.isScalarInteger() && \"Extract element and scalar to vector can't change element type \" \"from FP to integer.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18615, __extension__ __PRETTY_FUNCTION__))
;
18616 unsigned XBitWidth = X.getValueSizeInBits();
18617 BCTruncElt = IsLE ? 0 : XBitWidth / VecEltBitWidth - 1;
18618
18619 // An extract element return value type can be wider than its vector
18620 // operand element type. In that case, the high bits are undefined, so
18621 // it's possible that we may need to extend rather than truncate.
18622 if (ExtractIndex == BCTruncElt && XBitWidth > VecEltBitWidth) {
18623 assert(XBitWidth % VecEltBitWidth == 0 &&(static_cast <bool> (XBitWidth % VecEltBitWidth == 0 &&
"Scalar bitwidth must be a multiple of vector element bitwidth"
) ? void (0) : __assert_fail ("XBitWidth % VecEltBitWidth == 0 && \"Scalar bitwidth must be a multiple of vector element bitwidth\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18624, __extension__ __PRETTY_FUNCTION__))
18624 "Scalar bitwidth must be a multiple of vector element bitwidth")(static_cast <bool> (XBitWidth % VecEltBitWidth == 0 &&
"Scalar bitwidth must be a multiple of vector element bitwidth"
) ? void (0) : __assert_fail ("XBitWidth % VecEltBitWidth == 0 && \"Scalar bitwidth must be a multiple of vector element bitwidth\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18624, __extension__ __PRETTY_FUNCTION__))
;
18625 return DAG.getAnyExtOrTrunc(X, DL, ScalarVT);
18626 }
18627 }
18628 }
18629
18630 if (SDValue BO = scalarizeExtractedBinop(N, DAG, LegalOperations))
18631 return BO;
18632
18633 // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT.
18634 // We only perform this optimization before the op legalization phase because
18635 // we may introduce new vector instructions which are not backed by TD
18636 // patterns. For example on AVX, extracting elements from a wide vector
18637 // without using extract_subvector. However, if we can find an underlying
18638 // scalar value, then we can always use that.
18639 if (IndexC && VecOp.getOpcode() == ISD::VECTOR_SHUFFLE) {
18640 auto *Shuf = cast<ShuffleVectorSDNode>(VecOp);
18641 // Find the new index to extract from.
18642 int OrigElt = Shuf->getMaskElt(IndexC->getZExtValue());
18643
18644 // Extracting an undef index is undef.
18645 if (OrigElt == -1)
18646 return DAG.getUNDEF(ScalarVT);
18647
18648 // Select the right vector half to extract from.
18649 SDValue SVInVec;
18650 if (OrigElt < (int)NumElts) {
18651 SVInVec = VecOp.getOperand(0);
18652 } else {
18653 SVInVec = VecOp.getOperand(1);
18654 OrigElt -= NumElts;
18655 }
18656
18657 if (SVInVec.getOpcode() == ISD::BUILD_VECTOR) {
18658 SDValue InOp = SVInVec.getOperand(OrigElt);
18659 if (InOp.getValueType() != ScalarVT) {
18660 assert(InOp.getValueType().isInteger() && ScalarVT.isInteger())(static_cast <bool> (InOp.getValueType().isInteger() &&
ScalarVT.isInteger()) ? void (0) : __assert_fail ("InOp.getValueType().isInteger() && ScalarVT.isInteger()"
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18660, __extension__ __PRETTY_FUNCTION__))
;
18661 InOp = DAG.getSExtOrTrunc(InOp, DL, ScalarVT);
18662 }
18663
18664 return InOp;
18665 }
18666
18667 // FIXME: We should handle recursing on other vector shuffles and
18668 // scalar_to_vector here as well.
18669
18670 if (!LegalOperations ||
18671 // FIXME: Should really be just isOperationLegalOrCustom.
18672 TLI.isOperationLegal(ISD::EXTRACT_VECTOR_ELT, VecVT) ||
18673 TLI.isOperationExpand(ISD::VECTOR_SHUFFLE, VecVT)) {
18674 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarVT, SVInVec,
18675 DAG.getVectorIdxConstant(OrigElt, DL));
18676 }
18677 }
18678
18679 // If only EXTRACT_VECTOR_ELT nodes use the source vector we can
18680 // simplify it based on the (valid) extraction indices.
18681 if (llvm::all_of(VecOp->uses(), [&](SDNode *Use) {
18682 return Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
18683 Use->getOperand(0) == VecOp &&
18684 isa<ConstantSDNode>(Use->getOperand(1));
18685 })) {
18686 APInt DemandedElts = APInt::getNullValue(NumElts);
18687 for (SDNode *Use : VecOp->uses()) {
18688 auto *CstElt = cast<ConstantSDNode>(Use->getOperand(1));
18689 if (CstElt->getAPIntValue().ult(NumElts))
18690 DemandedElts.setBit(CstElt->getZExtValue());
18691 }
18692 if (SimplifyDemandedVectorElts(VecOp, DemandedElts, true)) {
18693 // We simplified the vector operand of this extract element. If this
18694 // extract is not dead, visit it again so it is folded properly.
18695 if (N->getOpcode() != ISD::DELETED_NODE)
18696 AddToWorklist(N);
18697 return SDValue(N, 0);
18698 }
18699 APInt DemandedBits = APInt::getAllOnesValue(VecEltBitWidth);
18700 if (SimplifyDemandedBits(VecOp, DemandedBits, DemandedElts, true)) {
18701 // We simplified the vector operand of this extract element. If this
18702 // extract is not dead, visit it again so it is folded properly.
18703 if (N->getOpcode() != ISD::DELETED_NODE)
18704 AddToWorklist(N);
18705 return SDValue(N, 0);
18706 }
18707 }
18708
18709 // Everything under here is trying to match an extract of a loaded value.
18710 // If the result of load has to be truncated, then it's not necessarily
18711 // profitable.
18712 bool BCNumEltsChanged = false;
18713 EVT ExtVT = VecVT.getVectorElementType();
18714 EVT LVT = ExtVT;
18715 if (ScalarVT.bitsLT(LVT) && !TLI.isTruncateFree(LVT, ScalarVT))
18716 return SDValue();
18717
18718 if (VecOp.getOpcode() == ISD::BITCAST) {
18719 // Don't duplicate a load with other uses.
18720 if (!VecOp.hasOneUse())
18721 return SDValue();
18722
18723 EVT BCVT = VecOp.getOperand(0).getValueType();
18724 if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType()))
18725 return SDValue();
18726 if (NumElts != BCVT.getVectorNumElements())
18727 BCNumEltsChanged = true;
18728 VecOp = VecOp.getOperand(0);
18729 ExtVT = BCVT.getVectorElementType();
18730 }
18731
18732 // extract (vector load $addr), i --> load $addr + i * size
18733 if (!LegalOperations && !IndexC && VecOp.hasOneUse() &&
18734 ISD::isNormalLoad(VecOp.getNode()) &&
18735 !Index->hasPredecessor(VecOp.getNode())) {
18736 auto *VecLoad = dyn_cast<LoadSDNode>(VecOp);
18737 if (VecLoad && VecLoad->isSimple())
18738 return scalarizeExtractedVectorLoad(N, VecVT, Index, VecLoad);
18739 }
18740
18741 // Perform only after legalization to ensure build_vector / vector_shuffle
18742 // optimizations have already been done.
18743 if (!LegalOperations || !IndexC)
18744 return SDValue();
18745
18746 // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size)
18747 // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size)
18748 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr)
18749 int Elt = IndexC->getZExtValue();
18750 LoadSDNode *LN0 = nullptr;
18751 if (ISD::isNormalLoad(VecOp.getNode())) {
18752 LN0 = cast<LoadSDNode>(VecOp);
18753 } else if (VecOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
18754 VecOp.getOperand(0).getValueType() == ExtVT &&
18755 ISD::isNormalLoad(VecOp.getOperand(0).getNode())) {
18756 // Don't duplicate a load with other uses.
18757 if (!VecOp.hasOneUse())
18758 return SDValue();
18759
18760 LN0 = cast<LoadSDNode>(VecOp.getOperand(0));
18761 }
18762 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(VecOp)) {
18763 // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1)
18764 // =>
18765 // (load $addr+1*size)
18766
18767 // Don't duplicate a load with other uses.
18768 if (!VecOp.hasOneUse())
18769 return SDValue();
18770
18771 // If the bit convert changed the number of elements, it is unsafe
18772 // to examine the mask.
18773 if (BCNumEltsChanged)
18774 return SDValue();
18775
18776 // Select the input vector, guarding against out of range extract vector.
18777 int Idx = (Elt > (int)NumElts) ? -1 : Shuf->getMaskElt(Elt);
18778 VecOp = (Idx < (int)NumElts) ? VecOp.getOperand(0) : VecOp.getOperand(1);
18779
18780 if (VecOp.getOpcode() == ISD::BITCAST) {
18781 // Don't duplicate a load with other uses.
18782 if (!VecOp.hasOneUse())
18783 return SDValue();
18784
18785 VecOp = VecOp.getOperand(0);
18786 }
18787 if (ISD::isNormalLoad(VecOp.getNode())) {
18788 LN0 = cast<LoadSDNode>(VecOp);
18789 Elt = (Idx < (int)NumElts) ? Idx : Idx - (int)NumElts;
18790 Index = DAG.getConstant(Elt, DL, Index.getValueType());
18791 }
18792 } else if (VecOp.getOpcode() == ISD::CONCAT_VECTORS && !BCNumEltsChanged &&
18793 VecVT.getVectorElementType() == ScalarVT &&
18794 (!LegalTypes ||
18795 TLI.isTypeLegal(
18796 VecOp.getOperand(0).getValueType().getVectorElementType()))) {
18797 // extract_vector_elt (concat_vectors v2i16:a, v2i16:b), 0
18798 // -> extract_vector_elt a, 0
18799 // extract_vector_elt (concat_vectors v2i16:a, v2i16:b), 1
18800 // -> extract_vector_elt a, 1
18801 // extract_vector_elt (concat_vectors v2i16:a, v2i16:b), 2
18802 // -> extract_vector_elt b, 0
18803 // extract_vector_elt (concat_vectors v2i16:a, v2i16:b), 3
18804 // -> extract_vector_elt b, 1
18805 SDLoc SL(N);
18806 EVT ConcatVT = VecOp.getOperand(0).getValueType();
18807 unsigned ConcatNumElts = ConcatVT.getVectorNumElements();
18808 SDValue NewIdx = DAG.getConstant(Elt % ConcatNumElts, SL,
18809 Index.getValueType());
18810
18811 SDValue ConcatOp = VecOp.getOperand(Elt / ConcatNumElts);
18812 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL,
18813 ConcatVT.getVectorElementType(),
18814 ConcatOp, NewIdx);
18815 return DAG.getNode(ISD::BITCAST, SL, ScalarVT, Elt);
18816 }
18817
18818 // Make sure we found a non-volatile load and the extractelement is
18819 // the only use.
18820 if (!LN0 || !LN0->hasNUsesOfValue(1,0) || !LN0->isSimple())
18821 return SDValue();
18822
18823 // If Idx was -1 above, Elt is going to be -1, so just return undef.
18824 if (Elt == -1)
18825 return DAG.getUNDEF(LVT);
18826
18827 return scalarizeExtractedVectorLoad(N, VecVT, Index, LN0);
18828}
18829
18830// Simplify (build_vec (ext )) to (bitcast (build_vec ))
18831SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
18832 // We perform this optimization post type-legalization because
18833 // the type-legalizer often scalarizes integer-promoted vectors.
18834 // Performing this optimization before may create bit-casts which
18835 // will be type-legalized to complex code sequences.
18836 // We perform this optimization only before the operation legalizer because we
18837 // may introduce illegal operations.
18838 if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes)
18839 return SDValue();
18840
18841 unsigned NumInScalars = N->getNumOperands();
18842 SDLoc DL(N);
18843 EVT VT = N->getValueType(0);
18844
18845 // Check to see if this is a BUILD_VECTOR of a bunch of values
18846 // which come from any_extend or zero_extend nodes. If so, we can create
18847 // a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR
18848 // optimizations. We do not handle sign-extend because we can't fill the sign
18849 // using shuffles.
18850 EVT SourceType = MVT::Other;
18851 bool AllAnyExt = true;
18852
18853 for (unsigned i = 0; i != NumInScalars; ++i) {
18854 SDValue In = N->getOperand(i);
18855 // Ignore undef inputs.
18856 if (In.isUndef()) continue;
18857
18858 bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND;
18859 bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND;
18860
18861 // Abort if the element is not an extension.
18862 if (!ZeroExt && !AnyExt) {
18863 SourceType = MVT::Other;
18864 break;
18865 }
18866
18867 // The input is a ZeroExt or AnyExt. Check the original type.
18868 EVT InTy = In.getOperand(0).getValueType();
18869
18870 // Check that all of the widened source types are the same.
18871 if (SourceType == MVT::Other)
18872 // First time.
18873 SourceType = InTy;
18874 else if (InTy != SourceType) {
18875 // Multiple income types. Abort.
18876 SourceType = MVT::Other;
18877 break;
18878 }
18879
18880 // Check if all of the extends are ANY_EXTENDs.
18881 AllAnyExt &= AnyExt;
18882 }
18883
18884 // In order to have valid types, all of the inputs must be extended from the
18885 // same source type and all of the inputs must be any or zero extend.
18886 // Scalar sizes must be a power of two.
18887 EVT OutScalarTy = VT.getScalarType();
18888 bool ValidTypes = SourceType != MVT::Other &&
18889 isPowerOf2_32(OutScalarTy.getSizeInBits()) &&
18890 isPowerOf2_32(SourceType.getSizeInBits());
18891
18892 // Create a new simpler BUILD_VECTOR sequence which other optimizations can
18893 // turn into a single shuffle instruction.
18894 if (!ValidTypes)
18895 return SDValue();
18896
18897 // If we already have a splat buildvector, then don't fold it if it means
18898 // introducing zeros.
18899 if (!AllAnyExt && DAG.isSplatValue(SDValue(N, 0), /*AllowUndefs*/ true))
18900 return SDValue();
18901
18902 bool isLE = DAG.getDataLayout().isLittleEndian();
18903 unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits();
18904 assert(ElemRatio > 1 && "Invalid element size ratio")(static_cast <bool> (ElemRatio > 1 && "Invalid element size ratio"
) ? void (0) : __assert_fail ("ElemRatio > 1 && \"Invalid element size ratio\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18904, __extension__ __PRETTY_FUNCTION__))
;
18905 SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType):
18906 DAG.getConstant(0, DL, SourceType);
18907
18908 unsigned NewBVElems = ElemRatio * VT.getVectorNumElements();
18909 SmallVector<SDValue, 8> Ops(NewBVElems, Filler);
18910
18911 // Populate the new build_vector
18912 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
18913 SDValue Cast = N->getOperand(i);
18914 assert((Cast.getOpcode() == ISD::ANY_EXTEND ||(static_cast <bool> ((Cast.getOpcode() == ISD::ANY_EXTEND
|| Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndef()) &&
"Invalid cast opcode") ? void (0) : __assert_fail ("(Cast.getOpcode() == ISD::ANY_EXTEND || Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndef()) && \"Invalid cast opcode\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18916, __extension__ __PRETTY_FUNCTION__))
18915 Cast.getOpcode() == ISD::ZERO_EXTEND ||(static_cast <bool> ((Cast.getOpcode() == ISD::ANY_EXTEND
|| Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndef()) &&
"Invalid cast opcode") ? void (0) : __assert_fail ("(Cast.getOpcode() == ISD::ANY_EXTEND || Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndef()) && \"Invalid cast opcode\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18916, __extension__ __PRETTY_FUNCTION__))
18916 Cast.isUndef()) && "Invalid cast opcode")(static_cast <bool> ((Cast.getOpcode() == ISD::ANY_EXTEND
|| Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndef()) &&
"Invalid cast opcode") ? void (0) : __assert_fail ("(Cast.getOpcode() == ISD::ANY_EXTEND || Cast.getOpcode() == ISD::ZERO_EXTEND || Cast.isUndef()) && \"Invalid cast opcode\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18916, __extension__ __PRETTY_FUNCTION__))
;
18917 SDValue In;
18918 if (Cast.isUndef())
18919 In = DAG.getUNDEF(SourceType);
18920 else
18921 In = Cast->getOperand(0);
18922 unsigned Index = isLE ? (i * ElemRatio) :
18923 (i * ElemRatio + (ElemRatio - 1));
18924
18925 assert(Index < Ops.size() && "Invalid index")(static_cast <bool> (Index < Ops.size() && "Invalid index"
) ? void (0) : __assert_fail ("Index < Ops.size() && \"Invalid index\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18925, __extension__ __PRETTY_FUNCTION__))
;
18926 Ops[Index] = In;
18927 }
18928
18929 // The type of the new BUILD_VECTOR node.
18930 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems);
18931 assert(VecVT.getSizeInBits() == VT.getSizeInBits() &&(static_cast <bool> (VecVT.getSizeInBits() == VT.getSizeInBits
() && "Invalid vector size") ? void (0) : __assert_fail
("VecVT.getSizeInBits() == VT.getSizeInBits() && \"Invalid vector size\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18932, __extension__ __PRETTY_FUNCTION__))
18932 "Invalid vector size")(static_cast <bool> (VecVT.getSizeInBits() == VT.getSizeInBits
() && "Invalid vector size") ? void (0) : __assert_fail
("VecVT.getSizeInBits() == VT.getSizeInBits() && \"Invalid vector size\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18932, __extension__ __PRETTY_FUNCTION__))
;
18933 // Check if the new vector type is legal.
18934 if (!isTypeLegal(VecVT) ||
18935 (!TLI.isOperationLegal(ISD::BUILD_VECTOR, VecVT) &&
18936 TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)))
18937 return SDValue();
18938
18939 // Make the new BUILD_VECTOR.
18940 SDValue BV = DAG.getBuildVector(VecVT, DL, Ops);
18941
18942 // The new BUILD_VECTOR node has the potential to be further optimized.
18943 AddToWorklist(BV.getNode());
18944 // Bitcast to the desired type.
18945 return DAG.getBitcast(VT, BV);
18946}
18947
18948// Simplify (build_vec (trunc $1)
18949// (trunc (srl $1 half-width))
18950// (trunc (srl $1 (2 * half-width))) …)
18951// to (bitcast $1)
18952SDValue DAGCombiner::reduceBuildVecTruncToBitCast(SDNode *N) {
18953 assert(N->getOpcode() == ISD::BUILD_VECTOR && "Expected build vector")(static_cast <bool> (N->getOpcode() == ISD::BUILD_VECTOR
&& "Expected build vector") ? void (0) : __assert_fail
("N->getOpcode() == ISD::BUILD_VECTOR && \"Expected build vector\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 18953, __extension__ __PRETTY_FUNCTION__))
;
18954
18955 // Only for little endian
18956 if (!DAG.getDataLayout().isLittleEndian())
18957 return SDValue();
18958
18959 SDLoc DL(N);
18960 EVT VT = N->getValueType(0);
18961 EVT OutScalarTy = VT.getScalarType();
18962 uint64_t ScalarTypeBitsize = OutScalarTy.getSizeInBits();
18963
18964 // Only for power of two types to be sure that bitcast works well
18965 if (!isPowerOf2_64(ScalarTypeBitsize))
18966 return SDValue();
18967
18968 unsigned NumInScalars = N->getNumOperands();
18969
18970 // Look through bitcasts
18971 auto PeekThroughBitcast = [](SDValue Op) {
18972 if (Op.getOpcode() == ISD::BITCAST)
18973 return Op.getOperand(0);
18974 return Op;
18975 };
18976
18977 // The source value where all the parts are extracted.
18978 SDValue Src;
18979 for (unsigned i = 0; i != NumInScalars; ++i) {
18980 SDValue In = PeekThroughBitcast(N->getOperand(i));
18981 // Ignore undef inputs.
18982 if (In.isUndef()) continue;
18983
18984 if (In.getOpcode() != ISD::TRUNCATE)
18985 return SDValue();
18986
18987 In = PeekThroughBitcast(In.getOperand(0));
18988
18989 if (In.getOpcode() != ISD::SRL) {
18990 // For now only build_vec without shuffling, handle shifts here in the
18991 // future.
18992 if (i != 0)
18993 return SDValue();
18994
18995 Src = In;
18996 } else {
18997 // In is SRL
18998 SDValue part = PeekThroughBitcast(In.getOperand(0));
18999
19000 if (!Src) {
19001 Src = part;
19002 } else if (Src != part) {
19003 // Vector parts do not stem from the same variable
19004 return SDValue();
19005 }
19006
19007 SDValue ShiftAmtVal = In.getOperand(1);
19008 if (!isa<ConstantSDNode>(ShiftAmtVal))
19009 return SDValue();
19010
19011 uint64_t ShiftAmt = In.getNode()->getConstantOperandVal(1);
19012
19013 // The extracted value is not extracted at the right position
19014 if (ShiftAmt != i * ScalarTypeBitsize)
19015 return SDValue();
19016 }
19017 }
19018
19019 // Only cast if the size is the same
19020 if (Src.getValueType().getSizeInBits() != VT.getSizeInBits())
19021 return SDValue();
19022
19023 return DAG.getBitcast(VT, Src);
19024}
19025
19026SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N,
19027 ArrayRef<int> VectorMask,
19028 SDValue VecIn1, SDValue VecIn2,
19029 unsigned LeftIdx, bool DidSplitVec) {
19030 SDValue ZeroIdx = DAG.getVectorIdxConstant(0, DL);
19031
19032 EVT VT = N->getValueType(0);
19033 EVT InVT1 = VecIn1.getValueType();
19034 EVT InVT2 = VecIn2.getNode() ? VecIn2.getValueType() : InVT1;
19035
19036 unsigned NumElems = VT.getVectorNumElements();
19037 unsigned ShuffleNumElems = NumElems;
19038
19039 // If we artificially split a vector in two already, then the offsets in the
19040 // operands will all be based off of VecIn1, even those in VecIn2.
19041 unsigned Vec2Offset = DidSplitVec ? 0 : InVT1.getVectorNumElements();
19042
19043 uint64_t VTSize = VT.getFixedSizeInBits();
19044 uint64_t InVT1Size = InVT1.getFixedSizeInBits();
19045 uint64_t InVT2Size = InVT2.getFixedSizeInBits();
19046
19047 // We can't generate a shuffle node with mismatched input and output types.
19048 // Try to make the types match the type of the output.
19049 if (InVT1 != VT || InVT2 != VT) {
19050 if ((VTSize % InVT1Size == 0) && InVT1 == InVT2) {
19051 // If the output vector length is a multiple of both input lengths,
19052 // we can concatenate them and pad the rest with undefs.
19053 unsigned NumConcats = VTSize / InVT1Size;
19054 assert(NumConcats >= 2 && "Concat needs at least two inputs!")(static_cast <bool> (NumConcats >= 2 && "Concat needs at least two inputs!"
) ? void (0) : __assert_fail ("NumConcats >= 2 && \"Concat needs at least two inputs!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 19054, __extension__ __PRETTY_FUNCTION__))
;
19055 SmallVector<SDValue, 2> ConcatOps(NumConcats, DAG.getUNDEF(InVT1));
19056 ConcatOps[0] = VecIn1;
19057 ConcatOps[1] = VecIn2 ? VecIn2 : DAG.getUNDEF(InVT1);
19058 VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
19059 VecIn2 = SDValue();
19060 } else if (InVT1Size == VTSize * 2) {
19061 if (!TLI.isExtractSubvectorCheap(VT, InVT1, NumElems))
19062 return SDValue();
19063
19064 if (!VecIn2.getNode()) {
19065 // If we only have one input vector, and it's twice the size of the
19066 // output, split it in two.
19067 VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1,
19068 DAG.getVectorIdxConstant(NumElems, DL));
19069 VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1, ZeroIdx);
19070 // Since we now have shorter input vectors, adjust the offset of the
19071 // second vector's start.
19072 Vec2Offset = NumElems;
19073 } else if (InVT2Size <= InVT1Size) {
19074 // VecIn1 is wider than the output, and we have another, possibly
19075 // smaller input. Pad the smaller input with undefs, shuffle at the
19076 // input vector width, and extract the output.
19077 // The shuffle type is different than VT, so check legality again.
19078 if (LegalOperations &&
19079 !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, InVT1))
19080 return SDValue();
19081
19082 // Legalizing INSERT_SUBVECTOR is tricky - you basically have to
19083 // lower it back into a BUILD_VECTOR. So if the inserted type is
19084 // illegal, don't even try.
19085 if (InVT1 != InVT2) {
19086 if (!TLI.isTypeLegal(InVT2))
19087 return SDValue();
19088 VecIn2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT1,
19089 DAG.getUNDEF(InVT1), VecIn2, ZeroIdx);
19090 }
19091 ShuffleNumElems = NumElems * 2;
19092 } else {
19093 // Both VecIn1 and VecIn2 are wider than the output, and VecIn2 is wider
19094 // than VecIn1. We can't handle this for now - this case will disappear
19095 // when we start sorting the vectors by type.
19096 return SDValue();
19097 }
19098 } else if (InVT2Size * 2 == VTSize && InVT1Size == VTSize) {
19099 SmallVector<SDValue, 2> ConcatOps(2, DAG.getUNDEF(InVT2));
19100 ConcatOps[0] = VecIn2;
19101 VecIn2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
19102 } else {
19103 // TODO: Support cases where the length mismatch isn't exactly by a
19104 // factor of 2.
19105 // TODO: Move this check upwards, so that if we have bad type
19106 // mismatches, we don't create any DAG nodes.
19107 return SDValue();
19108 }
19109 }
19110
19111 // Initialize mask to undef.
19112 SmallVector<int, 8> Mask(ShuffleNumElems, -1);
19113
19114 // Only need to run up to the number of elements actually used, not the
19115 // total number of elements in the shuffle - if we are shuffling a wider
19116 // vector, the high lanes should be set to undef.
19117 for (unsigned i = 0; i != NumElems; ++i) {
19118 if (VectorMask[i] <= 0)
19119 continue;
19120
19121 unsigned ExtIndex = N->getOperand(i).getConstantOperandVal(1);
19122 if (VectorMask[i] == (int)LeftIdx) {
19123 Mask[i] = ExtIndex;
19124 } else if (VectorMask[i] == (int)LeftIdx + 1) {
19125 Mask[i] = Vec2Offset + ExtIndex;
19126 }
19127 }
19128
19129 // The type the input vectors may have changed above.
19130 InVT1 = VecIn1.getValueType();
19131
19132 // If we already have a VecIn2, it should have the same type as VecIn1.
19133 // If we don't, get an undef/zero vector of the appropriate type.
19134 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(InVT1);
19135 assert(InVT1 == VecIn2.getValueType() && "Unexpected second input type.")(static_cast <bool> (InVT1 == VecIn2.getValueType() &&
"Unexpected second input type.") ? void (0) : __assert_fail (
"InVT1 == VecIn2.getValueType() && \"Unexpected second input type.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 19135, __extension__ __PRETTY_FUNCTION__))
;
19136
19137 SDValue Shuffle = DAG.getVectorShuffle(InVT1, DL, VecIn1, VecIn2, Mask);
19138 if (ShuffleNumElems > NumElems)
19139 Shuffle = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuffle, ZeroIdx);
19140
19141 return Shuffle;
19142}
19143
19144static SDValue reduceBuildVecToShuffleWithZero(SDNode *BV, SelectionDAG &DAG) {
19145 assert(BV->getOpcode() == ISD::BUILD_VECTOR && "Expected build vector")(static_cast <bool> (BV->getOpcode() == ISD::BUILD_VECTOR
&& "Expected build vector") ? void (0) : __assert_fail
("BV->getOpcode() == ISD::BUILD_VECTOR && \"Expected build vector\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 19145, __extension__ __PRETTY_FUNCTION__))
;
19146
19147 // First, determine where the build vector is not undef.
19148 // TODO: We could extend this to handle zero elements as well as undefs.
19149 int NumBVOps = BV->getNumOperands();
19150 int ZextElt = -1;
19151 for (int i = 0; i != NumBVOps; ++i) {
19152 SDValue Op = BV->getOperand(i);
19153 if (Op.isUndef())
19154 continue;
19155 if (ZextElt == -1)
19156 ZextElt = i;
19157 else
19158 return SDValue();
19159 }
19160 // Bail out if there's no non-undef element.
19161 if (ZextElt == -1)
19162 return SDValue();
19163
19164 // The build vector contains some number of undef elements and exactly
19165 // one other element. That other element must be a zero-extended scalar
19166 // extracted from a vector at a constant index to turn this into a shuffle.
19167 // Also, require that the build vector does not implicitly truncate/extend
19168 // its elements.
19169 // TODO: This could be enhanced to allow ANY_EXTEND as well as ZERO_EXTEND.
19170 EVT VT = BV->getValueType(0);
19171 SDValue Zext = BV->getOperand(ZextElt);
19172 if (Zext.getOpcode() != ISD::ZERO_EXTEND || !Zext.hasOneUse() ||
19173 Zext.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
19174 !isa<ConstantSDNode>(Zext.getOperand(0).getOperand(1)) ||
19175 Zext.getValueSizeInBits() != VT.getScalarSizeInBits())
19176 return SDValue();
19177
19178 // The zero-extend must be a multiple of the source size, and we must be
19179 // building a vector of the same size as the source of the extract element.
19180 SDValue Extract = Zext.getOperand(0);
19181 unsigned DestSize = Zext.getValueSizeInBits();
19182 unsigned SrcSize = Extract.getValueSizeInBits();
19183 if (DestSize % SrcSize != 0 ||
19184 Extract.getOperand(0).getValueSizeInBits() != VT.getSizeInBits())
19185 return SDValue();
19186
19187 // Create a shuffle mask that will combine the extracted element with zeros
19188 // and undefs.
19189 int ZextRatio = DestSize / SrcSize;
19190 int NumMaskElts = NumBVOps * ZextRatio;
19191 SmallVector<int, 32> ShufMask(NumMaskElts, -1);
19192 for (int i = 0; i != NumMaskElts; ++i) {
19193 if (i / ZextRatio == ZextElt) {
19194 // The low bits of the (potentially translated) extracted element map to
19195 // the source vector. The high bits map to zero. We will use a zero vector
19196 // as the 2nd source operand of the shuffle, so use the 1st element of
19197 // that vector (mask value is number-of-elements) for the high bits.
19198 if (i % ZextRatio == 0)
19199 ShufMask[i] = Extract.getConstantOperandVal(1);
19200 else
19201 ShufMask[i] = NumMaskElts;
19202 }
19203
19204 // Undef elements of the build vector remain undef because we initialize
19205 // the shuffle mask with -1.
19206 }
19207
19208 // buildvec undef, ..., (zext (extractelt V, IndexC)), undef... -->
19209 // bitcast (shuffle V, ZeroVec, VectorMask)
19210 SDLoc DL(BV);
19211 EVT VecVT = Extract.getOperand(0).getValueType();
19212 SDValue ZeroVec = DAG.getConstant(0, DL, VecVT);
19213 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19214 SDValue Shuf = TLI.buildLegalVectorShuffle(VecVT, DL, Extract.getOperand(0),
19215 ZeroVec, ShufMask, DAG);
19216 if (!Shuf)
19217 return SDValue();
19218 return DAG.getBitcast(VT, Shuf);
19219}
19220
19221// Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
19222// operations. If the types of the vectors we're extracting from allow it,
19223// turn this into a vector_shuffle node.
19224SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
19225 SDLoc DL(N);
19226 EVT VT = N->getValueType(0);
19227
19228 // Only type-legal BUILD_VECTOR nodes are converted to shuffle nodes.
19229 if (!isTypeLegal(VT))
19230 return SDValue();
19231
19232 if (SDValue V = reduceBuildVecToShuffleWithZero(N, DAG))
19233 return V;
19234
19235 // May only combine to shuffle after legalize if shuffle is legal.
19236 if (LegalOperations && !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, VT))
19237 return SDValue();
19238
19239 bool UsesZeroVector = false;
19240 unsigned NumElems = N->getNumOperands();
19241
19242 // Record, for each element of the newly built vector, which input vector
19243 // that element comes from. -1 stands for undef, 0 for the zero vector,
19244 // and positive values for the input vectors.
19245 // VectorMask maps each element to its vector number, and VecIn maps vector
19246 // numbers to their initial SDValues.
19247
19248 SmallVector<int, 8> VectorMask(NumElems, -1);
19249 SmallVector<SDValue, 8> VecIn;
19250 VecIn.push_back(SDValue());
19251
19252 for (unsigned i = 0; i != NumElems; ++i) {
19253 SDValue Op = N->getOperand(i);
19254
19255 if (Op.isUndef())
19256 continue;
19257
19258 // See if we can use a blend with a zero vector.
19259 // TODO: Should we generalize this to a blend with an arbitrary constant
19260 // vector?
19261 if (isNullConstant(Op) || isNullFPConstant(Op)) {
19262 UsesZeroVector = true;
19263 VectorMask[i] = 0;
19264 continue;
19265 }
19266
19267 // Not an undef or zero. If the input is something other than an
19268 // EXTRACT_VECTOR_ELT with an in-range constant index, bail out.
19269 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
19270 !isa<ConstantSDNode>(Op.getOperand(1)))
19271 return SDValue();
19272 SDValue ExtractedFromVec = Op.getOperand(0);
19273
19274 if (ExtractedFromVec.getValueType().isScalableVector())
19275 return SDValue();
19276
19277 const APInt &ExtractIdx = Op.getConstantOperandAPInt(1);
19278 if (ExtractIdx.uge(ExtractedFromVec.getValueType().getVectorNumElements()))
19279 return SDValue();
19280
19281 // All inputs must have the same element type as the output.
19282 if (VT.getVectorElementType() !=
19283 ExtractedFromVec.getValueType().getVectorElementType())
19284 return SDValue();
19285
19286 // Have we seen this input vector before?
19287 // The vectors are expected to be tiny (usually 1 or 2 elements), so using
19288 // a map back from SDValues to numbers isn't worth it.
19289 unsigned Idx = std::distance(VecIn.begin(), find(VecIn, ExtractedFromVec));
19290 if (Idx == VecIn.size())
19291 VecIn.push_back(ExtractedFromVec);
19292
19293 VectorMask[i] = Idx;
19294 }
19295
19296 // If we didn't find at least one input vector, bail out.
19297 if (VecIn.size() < 2)
19298 return SDValue();
19299
19300 // If all the Operands of BUILD_VECTOR extract from same
19301 // vector, then split the vector efficiently based on the maximum
19302 // vector access index and adjust the VectorMask and
19303 // VecIn accordingly.
19304 bool DidSplitVec = false;
19305 if (VecIn.size() == 2) {
19306 unsigned MaxIndex = 0;
19307 unsigned NearestPow2 = 0;
19308 SDValue Vec = VecIn.back();
19309 EVT InVT = Vec.getValueType();
19310 SmallVector<unsigned, 8> IndexVec(NumElems, 0);
19311
19312 for (unsigned i = 0; i < NumElems; i++) {
19313 if (VectorMask[i] <= 0)
19314 continue;
19315 unsigned Index = N->getOperand(i).getConstantOperandVal(1);
19316 IndexVec[i] = Index;
19317 MaxIndex = std::max(MaxIndex, Index);
19318 }
19319
19320 NearestPow2 = PowerOf2Ceil(MaxIndex);
19321 if (InVT.isSimple() && NearestPow2 > 2 && MaxIndex < NearestPow2 &&
19322 NumElems * 2 < NearestPow2) {
19323 unsigned SplitSize = NearestPow2 / 2;
19324 EVT SplitVT = EVT::getVectorVT(*DAG.getContext(),
19325 InVT.getVectorElementType(), SplitSize);
19326 if (TLI.isTypeLegal(SplitVT)) {
19327 SDValue VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, Vec,
19328 DAG.getVectorIdxConstant(SplitSize, DL));
19329 SDValue VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, Vec,
19330 DAG.getVectorIdxConstant(0, DL));
19331 VecIn.pop_back();
19332 VecIn.push_back(VecIn1);
19333 VecIn.push_back(VecIn2);
19334 DidSplitVec = true;
19335
19336 for (unsigned i = 0; i < NumElems; i++) {
19337 if (VectorMask[i] <= 0)
19338 continue;
19339 VectorMask[i] = (IndexVec[i] < SplitSize) ? 1 : 2;
19340 }
19341 }
19342 }
19343 }
19344
19345 // TODO: We want to sort the vectors by descending length, so that adjacent
19346 // pairs have similar length, and the longer vector is always first in the
19347 // pair.
19348
19349 // TODO: Should this fire if some of the input vectors has illegal type (like
19350 // it does now), or should we let legalization run its course first?
19351
19352 // Shuffle phase:
19353 // Take pairs of vectors, and shuffle them so that the result has elements
19354 // from these vectors in the correct places.
19355 // For example, given:
19356 // t10: i32 = extract_vector_elt t1, Constant:i64<0>
19357 // t11: i32 = extract_vector_elt t2, Constant:i64<0>
19358 // t12: i32 = extract_vector_elt t3, Constant:i64<0>
19359 // t13: i32 = extract_vector_elt t1, Constant:i64<1>
19360 // t14: v4i32 = BUILD_VECTOR t10, t11, t12, t13
19361 // We will generate:
19362 // t20: v4i32 = vector_shuffle<0,4,u,1> t1, t2
19363 // t21: v4i32 = vector_shuffle<u,u,0,u> t3, undef
19364 SmallVector<SDValue, 4> Shuffles;
19365 for (unsigned In = 0, Len = (VecIn.size() / 2); In < Len; ++In) {
19366 unsigned LeftIdx = 2 * In + 1;
19367 SDValue VecLeft = VecIn[LeftIdx];
19368 SDValue VecRight =
19369 (LeftIdx + 1) < VecIn.size() ? VecIn[LeftIdx + 1] : SDValue();
19370
19371 if (SDValue Shuffle = createBuildVecShuffle(DL, N, VectorMask, VecLeft,
19372 VecRight, LeftIdx, DidSplitVec))
19373 Shuffles.push_back(Shuffle);
19374 else
19375 return SDValue();
19376 }
19377
19378 // If we need the zero vector as an "ingredient" in the blend tree, add it
19379 // to the list of shuffles.
19380 if (UsesZeroVector)
19381 Shuffles.push_back(VT.isInteger() ? DAG.getConstant(0, DL, VT)
19382 : DAG.getConstantFP(0.0, DL, VT));
19383
19384 // If we only have one shuffle, we're done.
19385 if (Shuffles.size() == 1)
19386 return Shuffles[0];
19387
19388 // Update the vector mask to point to the post-shuffle vectors.
19389 for (int &Vec : VectorMask)
19390 if (Vec == 0)
19391 Vec = Shuffles.size() - 1;
19392 else
19393 Vec = (Vec - 1) / 2;
19394
19395 // More than one shuffle. Generate a binary tree of blends, e.g. if from
19396 // the previous step we got the set of shuffles t10, t11, t12, t13, we will
19397 // generate:
19398 // t10: v8i32 = vector_shuffle<0,8,u,u,u,u,u,u> t1, t2
19399 // t11: v8i32 = vector_shuffle<u,u,0,8,u,u,u,u> t3, t4
19400 // t12: v8i32 = vector_shuffle<u,u,u,u,0,8,u,u> t5, t6
19401 // t13: v8i32 = vector_shuffle<u,u,u,u,u,u,0,8> t7, t8
19402 // t20: v8i32 = vector_shuffle<0,1,10,11,u,u,u,u> t10, t11
19403 // t21: v8i32 = vector_shuffle<u,u,u,u,4,5,14,15> t12, t13
19404 // t30: v8i32 = vector_shuffle<0,1,2,3,12,13,14,15> t20, t21
19405
19406 // Make sure the initial size of the shuffle list is even.
19407 if (Shuffles.size() % 2)
19408 Shuffles.push_back(DAG.getUNDEF(VT));
19409
19410 for (unsigned CurSize = Shuffles.size(); CurSize > 1; CurSize /= 2) {
19411 if (CurSize % 2) {
19412 Shuffles[CurSize] = DAG.getUNDEF(VT);
19413 CurSize++;
19414 }
19415 for (unsigned In = 0, Len = CurSize / 2; In < Len; ++In) {
19416 int Left = 2 * In;
19417 int Right = 2 * In + 1;
19418 SmallVector<int, 8> Mask(NumElems, -1);
19419 for (unsigned i = 0; i != NumElems; ++i) {
19420 if (VectorMask[i] == Left) {
19421 Mask[i] = i;
19422 VectorMask[i] = In;
19423 } else if (VectorMask[i] == Right) {
19424 Mask[i] = i + NumElems;
19425 VectorMask[i] = In;
19426 }
19427 }
19428
19429 Shuffles[In] =
19430 DAG.getVectorShuffle(VT, DL, Shuffles[Left], Shuffles[Right], Mask);
19431 }
19432 }
19433 return Shuffles[0];
19434}
19435
19436// Try to turn a build vector of zero extends of extract vector elts into a
19437// a vector zero extend and possibly an extract subvector.
19438// TODO: Support sign extend?
19439// TODO: Allow undef elements?
19440SDValue DAGCombiner::convertBuildVecZextToZext(SDNode *N) {
19441 if (LegalOperations)
19442 return SDValue();
19443
19444 EVT VT = N->getValueType(0);
19445
19446 bool FoundZeroExtend = false;
19447 SDValue Op0 = N->getOperand(0);
19448 auto checkElem = [&](SDValue Op) -> int64_t {
19449 unsigned Opc = Op.getOpcode();
19450 FoundZeroExtend |= (Opc == ISD::ZERO_EXTEND);
19451 if ((Opc == ISD::ZERO_EXTEND || Opc == ISD::ANY_EXTEND) &&
19452 Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
19453 Op0.getOperand(0).getOperand(0) == Op.getOperand(0).getOperand(0))
19454 if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(0).getOperand(1)))
19455 return C->getZExtValue();
19456 return -1;
19457 };
19458
19459 // Make sure the first element matches
19460 // (zext (extract_vector_elt X, C))
19461 int64_t Offset = checkElem(Op0);
19462 if (Offset < 0)
19463 return SDValue();
19464
19465 unsigned NumElems = N->getNumOperands();
19466 SDValue In = Op0.getOperand(0).getOperand(0);
19467 EVT InSVT = In.getValueType().getScalarType();
19468 EVT InVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumElems);
19469
19470 // Don't create an illegal input type after type legalization.
19471 if (LegalTypes && !TLI.isTypeLegal(InVT))
19472 return SDValue();
19473
19474 // Ensure all the elements come from the same vector and are adjacent.
19475 for (unsigned i = 1; i != NumElems; ++i) {
19476 if ((Offset + i) != checkElem(N->getOperand(i)))
19477 return SDValue();
19478 }
19479
19480 SDLoc DL(N);
19481 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InVT, In,
19482 Op0.getOperand(0).getOperand(1));
19483 return DAG.getNode(FoundZeroExtend ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND, DL,
19484 VT, In);
19485}
19486
19487SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
19488 EVT VT = N->getValueType(0);
19489
19490 // A vector built entirely of undefs is undef.
19491 if (ISD::allOperandsUndef(N))
19492 return DAG.getUNDEF(VT);
19493
19494 // If this is a splat of a bitcast from another vector, change to a
19495 // concat_vector.
19496 // For example:
19497 // (build_vector (i64 (bitcast (v2i32 X))), (i64 (bitcast (v2i32 X)))) ->
19498 // (v2i64 (bitcast (concat_vectors (v2i32 X), (v2i32 X))))
19499 //
19500 // If X is a build_vector itself, the concat can become a larger build_vector.
19501 // TODO: Maybe this is useful for non-splat too?
19502 if (!LegalOperations) {
19503 if (SDValue Splat = cast<BuildVectorSDNode>(N)->getSplatValue()) {
19504 Splat = peekThroughBitcasts(Splat);
19505 EVT SrcVT = Splat.getValueType();
19506 if (SrcVT.isVector()) {
19507 unsigned NumElts = N->getNumOperands() * SrcVT.getVectorNumElements();
19508 EVT NewVT = EVT::getVectorVT(*DAG.getContext(),
19509 SrcVT.getVectorElementType(), NumElts);
19510 if (!LegalTypes || TLI.isTypeLegal(NewVT)) {
19511 SmallVector<SDValue, 8> Ops(N->getNumOperands(), Splat);
19512 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N),
19513 NewVT, Ops);
19514 return DAG.getBitcast(VT, Concat);
19515 }
19516 }
19517 }
19518 }
19519
19520 // Check if we can express BUILD VECTOR via subvector extract.
19521 if (!LegalTypes && (N->getNumOperands() > 1)) {
19522 SDValue Op0 = N->getOperand(0);
19523 auto checkElem = [&](SDValue Op) -> uint64_t {
19524 if ((Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) &&
19525 (Op0.getOperand(0) == Op.getOperand(0)))
19526 if (auto CNode = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
19527 return CNode->getZExtValue();
19528 return -1;
19529 };
19530
19531 int Offset = checkElem(Op0);
19532 for (unsigned i = 0; i < N->getNumOperands(); ++i) {
19533 if (Offset + i != checkElem(N->getOperand(i))) {
19534 Offset = -1;
19535 break;
19536 }
19537 }
19538
19539 if ((Offset == 0) &&
19540 (Op0.getOperand(0).getValueType() == N->getValueType(0)))
19541 return Op0.getOperand(0);
19542 if ((Offset != -1) &&
19543 ((Offset % N->getValueType(0).getVectorNumElements()) ==
19544 0)) // IDX must be multiple of output size.
19545 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), N->getValueType(0),
19546 Op0.getOperand(0), Op0.getOperand(1));
19547 }
19548
19549 if (SDValue V = convertBuildVecZextToZext(N))
19550 return V;
19551
19552 if (SDValue V = reduceBuildVecExtToExtBuildVec(N))
19553 return V;
19554
19555 if (SDValue V = reduceBuildVecTruncToBitCast(N))
19556 return V;
19557
19558 if (SDValue V = reduceBuildVecToShuffle(N))
19559 return V;
19560
19561 // A splat of a single element is a SPLAT_VECTOR if supported on the target.
19562 // Do this late as some of the above may replace the splat.
19563 if (TLI.getOperationAction(ISD::SPLAT_VECTOR, VT) != TargetLowering::Expand)
19564 if (SDValue V = cast<BuildVectorSDNode>(N)->getSplatValue()) {
19565 assert(!V.isUndef() && "Splat of undef should have been handled earlier")(static_cast <bool> (!V.isUndef() && "Splat of undef should have been handled earlier"
) ? void (0) : __assert_fail ("!V.isUndef() && \"Splat of undef should have been handled earlier\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 19565, __extension__ __PRETTY_FUNCTION__))
;
19566 return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, V);
19567 }
19568
19569 return SDValue();
19570}
19571
19572static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
19573 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19574 EVT OpVT = N->getOperand(0).getValueType();
19575
19576 // If the operands are legal vectors, leave them alone.
19577 if (TLI.isTypeLegal(OpVT))
19578 return SDValue();
19579
19580 SDLoc DL(N);
19581 EVT VT = N->getValueType(0);
19582 SmallVector<SDValue, 8> Ops;
19583
19584 EVT SVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits());
19585 SDValue ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);
19586
19587 // Keep track of what we encounter.
19588 bool AnyInteger = false;
19589 bool AnyFP = false;
19590 for (const SDValue &Op : N->ops()) {
19591 if (ISD::BITCAST == Op.getOpcode() &&
19592 !Op.getOperand(0).getValueType().isVector())
19593 Ops.push_back(Op.getOperand(0));
19594 else if (ISD::UNDEF == Op.getOpcode())
19595 Ops.push_back(ScalarUndef);
19596 else
19597 return SDValue();
19598
19599 // Note whether we encounter an integer or floating point scalar.
19600 // If it's neither, bail out, it could be something weird like x86mmx.
19601 EVT LastOpVT = Ops.back().getValueType();
19602 if (LastOpVT.isFloatingPoint())
19603 AnyFP = true;
19604 else if (LastOpVT.isInteger())
19605 AnyInteger = true;
19606 else
19607 return SDValue();
19608 }
19609
19610 // If any of the operands is a floating point scalar bitcast to a vector,
19611 // use floating point types throughout, and bitcast everything.
19612 // Replace UNDEFs by another scalar UNDEF node, of the final desired type.
19613 if (AnyFP) {
19614 SVT = EVT::getFloatingPointVT(OpVT.getSizeInBits());
19615 ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT);
19616 if (AnyInteger) {
19617 for (SDValue &Op : Ops) {
19618 if (Op.getValueType() == SVT)
19619 continue;
19620 if (Op.isUndef())
19621 Op = ScalarUndef;
19622 else
19623 Op = DAG.getBitcast(SVT, Op);
19624 }
19625 }
19626 }
19627
19628 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SVT,
19629 VT.getSizeInBits() / SVT.getSizeInBits());
19630 return DAG.getBitcast(VT, DAG.getBuildVector(VecVT, DL, Ops));
19631}
19632
19633// Check to see if this is a CONCAT_VECTORS of a bunch of EXTRACT_SUBVECTOR
19634// operations. If so, and if the EXTRACT_SUBVECTOR vector inputs come from at
19635// most two distinct vectors the same size as the result, attempt to turn this
19636// into a legal shuffle.
19637static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) {
19638 EVT VT = N->getValueType(0);
19639 EVT OpVT = N->getOperand(0).getValueType();
19640
19641 // We currently can't generate an appropriate shuffle for a scalable vector.
19642 if (VT.isScalableVector())
19643 return SDValue();
19644
19645 int NumElts = VT.getVectorNumElements();
19646 int NumOpElts = OpVT.getVectorNumElements();
19647
19648 SDValue SV0 = DAG.getUNDEF(VT), SV1 = DAG.getUNDEF(VT);
19649 SmallVector<int, 8> Mask;
19650
19651 for (SDValue Op : N->ops()) {
19652 Op = peekThroughBitcasts(Op);
19653
19654 // UNDEF nodes convert to UNDEF shuffle mask values.
19655 if (Op.isUndef()) {
19656 Mask.append((unsigned)NumOpElts, -1);
19657 continue;
19658 }
19659
19660 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR)
19661 return SDValue();
19662
19663 // What vector are we extracting the subvector from and at what index?
19664 SDValue ExtVec = Op.getOperand(0);
19665 int ExtIdx = Op.getConstantOperandVal(1);
19666
19667 // We want the EVT of the original extraction to correctly scale the
19668 // extraction index.
19669 EVT ExtVT = ExtVec.getValueType();
19670 ExtVec = peekThroughBitcasts(ExtVec);
19671
19672 // UNDEF nodes convert to UNDEF shuffle mask values.
19673 if (ExtVec.isUndef()) {
19674 Mask.append((unsigned)NumOpElts, -1);
19675 continue;
19676 }
19677
19678 // Ensure that we are extracting a subvector from a vector the same
19679 // size as the result.
19680 if (ExtVT.getSizeInBits() != VT.getSizeInBits())
19681 return SDValue();
19682
19683 // Scale the subvector index to account for any bitcast.
19684 int NumExtElts = ExtVT.getVectorNumElements();
19685 if (0 == (NumExtElts % NumElts))
19686 ExtIdx /= (NumExtElts / NumElts);
19687 else if (0 == (NumElts % NumExtElts))
19688 ExtIdx *= (NumElts / NumExtElts);
19689 else
19690 return SDValue();
19691
19692 // At most we can reference 2 inputs in the final shuffle.
19693 if (SV0.isUndef() || SV0 == ExtVec) {
19694 SV0 = ExtVec;
19695 for (int i = 0; i != NumOpElts; ++i)
19696 Mask.push_back(i + ExtIdx);
19697 } else if (SV1.isUndef() || SV1 == ExtVec) {
19698 SV1 = ExtVec;
19699 for (int i = 0; i != NumOpElts; ++i)
19700 Mask.push_back(i + ExtIdx + NumElts);
19701 } else {
19702 return SDValue();
19703 }
19704 }
19705
19706 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19707 return TLI.buildLegalVectorShuffle(VT, SDLoc(N), DAG.getBitcast(VT, SV0),
19708 DAG.getBitcast(VT, SV1), Mask, DAG);
19709}
19710
19711static SDValue combineConcatVectorOfCasts(SDNode *N, SelectionDAG &DAG) {
19712 unsigned CastOpcode = N->getOperand(0).getOpcode();
19713 switch (CastOpcode) {
19714 case ISD::SINT_TO_FP:
19715 case ISD::UINT_TO_FP:
19716 case ISD::FP_TO_SINT:
19717 case ISD::FP_TO_UINT:
19718 // TODO: Allow more opcodes?
19719 // case ISD::BITCAST:
19720 // case ISD::TRUNCATE:
19721 // case ISD::ZERO_EXTEND:
19722 // case ISD::SIGN_EXTEND:
19723 // case ISD::FP_EXTEND:
19724 break;
19725 default:
19726 return SDValue();
19727 }
19728
19729 EVT SrcVT = N->getOperand(0).getOperand(0).getValueType();
19730 if (!SrcVT.isVector())
19731 return SDValue();
19732
19733 // All operands of the concat must be the same kind of cast from the same
19734 // source type.
19735 SmallVector<SDValue, 4> SrcOps;
19736 for (SDValue Op : N->ops()) {
19737 if (Op.getOpcode() != CastOpcode || !Op.hasOneUse() ||
19738 Op.getOperand(0).getValueType() != SrcVT)
19739 return SDValue();
19740 SrcOps.push_back(Op.getOperand(0));
19741 }
19742
19743 // The wider cast must be supported by the target. This is unusual because
19744 // the operation support type parameter depends on the opcode. In addition,
19745 // check the other type in the cast to make sure this is really legal.
19746 EVT VT = N->getValueType(0);
19747 EVT SrcEltVT = SrcVT.getVectorElementType();
19748 ElementCount NumElts = SrcVT.getVectorElementCount() * N->getNumOperands();
19749 EVT ConcatSrcVT = EVT::getVectorVT(*DAG.getContext(), SrcEltVT, NumElts);
19750 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19751 switch (CastOpcode) {
19752 case ISD::SINT_TO_FP:
19753 case ISD::UINT_TO_FP:
19754 if (!TLI.isOperationLegalOrCustom(CastOpcode, ConcatSrcVT) ||
19755 !TLI.isTypeLegal(VT))
19756 return SDValue();
19757 break;
19758 case ISD::FP_TO_SINT:
19759 case ISD::FP_TO_UINT:
19760 if (!TLI.isOperationLegalOrCustom(CastOpcode, VT) ||
19761 !TLI.isTypeLegal(ConcatSrcVT))
19762 return SDValue();
19763 break;
19764 default:
19765 llvm_unreachable("Unexpected cast opcode")::llvm::llvm_unreachable_internal("Unexpected cast opcode", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 19765)
;
19766 }
19767
19768 // concat (cast X), (cast Y)... -> cast (concat X, Y...)
19769 SDLoc DL(N);
19770 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, DL, ConcatSrcVT, SrcOps);
19771 return DAG.getNode(CastOpcode, DL, VT, NewConcat);
19772}
19773
19774SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
19775 // If we only have one input vector, we don't need to do any concatenation.
19776 if (N->getNumOperands() == 1)
19777 return N->getOperand(0);
19778
19779 // Check if all of the operands are undefs.
19780 EVT VT = N->getValueType(0);
19781 if (ISD::allOperandsUndef(N))
19782 return DAG.getUNDEF(VT);
19783
19784 // Optimize concat_vectors where all but the first of the vectors are undef.
19785 if (all_of(drop_begin(N->ops()),
19786 [](const SDValue &Op) { return Op.isUndef(); })) {
19787 SDValue In = N->getOperand(0);
19788 assert(In.getValueType().isVector() && "Must concat vectors")(static_cast <bool> (In.getValueType().isVector() &&
"Must concat vectors") ? void (0) : __assert_fail ("In.getValueType().isVector() && \"Must concat vectors\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 19788, __extension__ __PRETTY_FUNCTION__))
;
19789
19790 // If the input is a concat_vectors, just make a larger concat by padding
19791 // with smaller undefs.
19792 if (In.getOpcode() == ISD::CONCAT_VECTORS && In.hasOneUse()) {
19793 unsigned NumOps = N->getNumOperands() * In.getNumOperands();
19794 SmallVector<SDValue, 4> Ops(In->op_begin(), In->op_end());
19795 Ops.resize(NumOps, DAG.getUNDEF(Ops[0].getValueType()));
19796 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
19797 }
19798
19799 SDValue Scalar = peekThroughOneUseBitcasts(In);
19800
19801 // concat_vectors(scalar_to_vector(scalar), undef) ->
19802 // scalar_to_vector(scalar)
19803 if (!LegalOperations && Scalar.getOpcode() == ISD::SCALAR_TO_VECTOR &&
19804 Scalar.hasOneUse()) {
19805 EVT SVT = Scalar.getValueType().getVectorElementType();
19806 if (SVT == Scalar.getOperand(0).getValueType())
19807 Scalar = Scalar.getOperand(0);
19808 }
19809
19810 // concat_vectors(scalar, undef) -> scalar_to_vector(scalar)
19811 if (!Scalar.getValueType().isVector()) {
19812 // If the bitcast type isn't legal, it might be a trunc of a legal type;
19813 // look through the trunc so we can still do the transform:
19814 // concat_vectors(trunc(scalar), undef) -> scalar_to_vector(scalar)
19815 if (Scalar->getOpcode() == ISD::TRUNCATE &&
19816 !TLI.isTypeLegal(Scalar.getValueType()) &&
19817 TLI.isTypeLegal(Scalar->getOperand(0).getValueType()))
19818 Scalar = Scalar->getOperand(0);
19819
19820 EVT SclTy = Scalar.getValueType();
19821
19822 if (!SclTy.isFloatingPoint() && !SclTy.isInteger())
19823 return SDValue();
19824
19825 // Bail out if the vector size is not a multiple of the scalar size.
19826 if (VT.getSizeInBits() % SclTy.getSizeInBits())
19827 return SDValue();
19828
19829 unsigned VNTNumElms = VT.getSizeInBits() / SclTy.getSizeInBits();
19830 if (VNTNumElms < 2)
19831 return SDValue();
19832
19833 EVT NVT = EVT::getVectorVT(*DAG.getContext(), SclTy, VNTNumElms);
19834 if (!TLI.isTypeLegal(NVT) || !TLI.isTypeLegal(Scalar.getValueType()))
19835 return SDValue();
19836
19837 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), NVT, Scalar);
19838 return DAG.getBitcast(VT, Res);
19839 }
19840 }
19841
19842 // Fold any combination of BUILD_VECTOR or UNDEF nodes into one BUILD_VECTOR.
19843 // We have already tested above for an UNDEF only concatenation.
19844 // fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...))
19845 // -> (BUILD_VECTOR A, B, ..., C, D, ...)
19846 auto IsBuildVectorOrUndef = [](const SDValue &Op) {
19847 return ISD::UNDEF == Op.getOpcode() || ISD::BUILD_VECTOR == Op.getOpcode();
19848 };
19849 if (llvm::all_of(N->ops(), IsBuildVectorOrUndef)) {
19850 SmallVector<SDValue, 8> Opnds;
19851 EVT SVT = VT.getScalarType();
19852
19853 EVT MinVT = SVT;
19854 if (!SVT.isFloatingPoint()) {
19855 // If BUILD_VECTOR are from built from integer, they may have different
19856 // operand types. Get the smallest type and truncate all operands to it.
19857 bool FoundMinVT = false;
19858 for (const SDValue &Op : N->ops())
19859 if (ISD::BUILD_VECTOR == Op.getOpcode()) {
19860 EVT OpSVT = Op.getOperand(0).getValueType();
19861 MinVT = (!FoundMinVT || OpSVT.bitsLE(MinVT)) ? OpSVT : MinVT;
19862 FoundMinVT = true;
19863 }
19864 assert(FoundMinVT && "Concat vector type mismatch")(static_cast <bool> (FoundMinVT && "Concat vector type mismatch"
) ? void (0) : __assert_fail ("FoundMinVT && \"Concat vector type mismatch\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 19864, __extension__ __PRETTY_FUNCTION__))
;
19865 }
19866
19867 for (const SDValue &Op : N->ops()) {
19868 EVT OpVT = Op.getValueType();
19869 unsigned NumElts = OpVT.getVectorNumElements();
19870
19871 if (ISD::UNDEF == Op.getOpcode())
19872 Opnds.append(NumElts, DAG.getUNDEF(MinVT));
19873
19874 if (ISD::BUILD_VECTOR == Op.getOpcode()) {
19875 if (SVT.isFloatingPoint()) {
19876 assert(SVT == OpVT.getScalarType() && "Concat vector type mismatch")(static_cast <bool> (SVT == OpVT.getScalarType() &&
"Concat vector type mismatch") ? void (0) : __assert_fail ("SVT == OpVT.getScalarType() && \"Concat vector type mismatch\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 19876, __extension__ __PRETTY_FUNCTION__))
;
19877 Opnds.append(Op->op_begin(), Op->op_begin() + NumElts);
19878 } else {
19879 for (unsigned i = 0; i != NumElts; ++i)
19880 Opnds.push_back(
19881 DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinVT, Op.getOperand(i)));
19882 }
19883 }
19884 }
19885
19886 assert(VT.getVectorNumElements() == Opnds.size() &&(static_cast <bool> (VT.getVectorNumElements() == Opnds
.size() && "Concat vector type mismatch") ? void (0) :
__assert_fail ("VT.getVectorNumElements() == Opnds.size() && \"Concat vector type mismatch\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 19887, __extension__ __PRETTY_FUNCTION__))
19887 "Concat vector type mismatch")(static_cast <bool> (VT.getVectorNumElements() == Opnds
.size() && "Concat vector type mismatch") ? void (0) :
__assert_fail ("VT.getVectorNumElements() == Opnds.size() && \"Concat vector type mismatch\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 19887, __extension__ __PRETTY_FUNCTION__))
;
19888 return DAG.getBuildVector(VT, SDLoc(N), Opnds);
19889 }
19890
19891 // Fold CONCAT_VECTORS of only bitcast scalars (or undef) to BUILD_VECTOR.
19892 if (SDValue V = combineConcatVectorOfScalars(N, DAG))
19893 return V;
19894
19895 // Fold CONCAT_VECTORS of EXTRACT_SUBVECTOR (or undef) to VECTOR_SHUFFLE.
19896 if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT))
19897 if (SDValue V = combineConcatVectorOfExtracts(N, DAG))
19898 return V;
19899
19900 if (SDValue V = combineConcatVectorOfCasts(N, DAG))
19901 return V;
19902
19903 // Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR
19904 // nodes often generate nop CONCAT_VECTOR nodes. Scan the CONCAT_VECTOR
19905 // operands and look for a CONCAT operations that place the incoming vectors
19906 // at the exact same location.
19907 //
19908 // For scalable vectors, EXTRACT_SUBVECTOR indexes are implicitly scaled.
19909 SDValue SingleSource = SDValue();
19910 unsigned PartNumElem =
19911 N->getOperand(0).getValueType().getVectorMinNumElements();
19912
19913 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
19914 SDValue Op = N->getOperand(i);
19915
19916 if (Op.isUndef())
19917 continue;
19918
19919 // Check if this is the identity extract:
19920 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR)
19921 return SDValue();
19922
19923 // Find the single incoming vector for the extract_subvector.
19924 if (SingleSource.getNode()) {
19925 if (Op.getOperand(0) != SingleSource)
19926 return SDValue();
19927 } else {
19928 SingleSource = Op.getOperand(0);
19929
19930 // Check the source type is the same as the type of the result.
19931 // If not, this concat may extend the vector, so we can not
19932 // optimize it away.
19933 if (SingleSource.getValueType() != N->getValueType(0))
19934 return SDValue();
19935 }
19936
19937 // Check that we are reading from the identity index.
19938 unsigned IdentityIndex = i * PartNumElem;
19939 if (Op.getConstantOperandAPInt(1) != IdentityIndex)
19940 return SDValue();
19941 }
19942
19943 if (SingleSource.getNode())
19944 return SingleSource;
19945
19946 return SDValue();
19947}
19948
19949// Helper that peeks through INSERT_SUBVECTOR/CONCAT_VECTORS to find
19950// if the subvector can be sourced for free.
19951static SDValue getSubVectorSrc(SDValue V, SDValue Index, EVT SubVT) {
19952 if (V.getOpcode() == ISD::INSERT_SUBVECTOR &&
19953 V.getOperand(1).getValueType() == SubVT && V.getOperand(2) == Index) {
19954 return V.getOperand(1);
19955 }
19956 auto *IndexC = dyn_cast<ConstantSDNode>(Index);
19957 if (IndexC && V.getOpcode() == ISD::CONCAT_VECTORS &&
19958 V.getOperand(0).getValueType() == SubVT &&
19959 (IndexC->getZExtValue() % SubVT.getVectorMinNumElements()) == 0) {
19960 uint64_t SubIdx = IndexC->getZExtValue() / SubVT.getVectorMinNumElements();
19961 return V.getOperand(SubIdx);
19962 }
19963 return SDValue();
19964}
19965
19966static SDValue narrowInsertExtractVectorBinOp(SDNode *Extract,
19967 SelectionDAG &DAG,
19968 bool LegalOperations) {
19969 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19970 SDValue BinOp = Extract->getOperand(0);
19971 unsigned BinOpcode = BinOp.getOpcode();
19972 if (!TLI.isBinOp(BinOpcode) || BinOp.getNode()->getNumValues() != 1)
19973 return SDValue();
19974
19975 EVT VecVT = BinOp.getValueType();
19976 SDValue Bop0 = BinOp.getOperand(0), Bop1 = BinOp.getOperand(1);
19977 if (VecVT != Bop0.getValueType() || VecVT != Bop1.getValueType())
19978 return SDValue();
19979
19980 SDValue Index = Extract->getOperand(1);
19981 EVT SubVT = Extract->getValueType(0);
19982 if (!TLI.isOperationLegalOrCustom(BinOpcode, SubVT, LegalOperations))
19983 return SDValue();
19984
19985 SDValue Sub0 = getSubVectorSrc(Bop0, Index, SubVT);
19986 SDValue Sub1 = getSubVectorSrc(Bop1, Index, SubVT);
19987
19988 // TODO: We could handle the case where only 1 operand is being inserted by
19989 // creating an extract of the other operand, but that requires checking
19990 // number of uses and/or costs.
19991 if (!Sub0 || !Sub1)
19992 return SDValue();
19993
19994 // We are inserting both operands of the wide binop only to extract back
19995 // to the narrow vector size. Eliminate all of the insert/extract:
19996 // ext (binop (ins ?, X, Index), (ins ?, Y, Index)), Index --> binop X, Y
19997 return DAG.getNode(BinOpcode, SDLoc(Extract), SubVT, Sub0, Sub1,
19998 BinOp->getFlags());
19999}
20000
20001/// If we are extracting a subvector produced by a wide binary operator try
20002/// to use a narrow binary operator and/or avoid concatenation and extraction.
20003static SDValue narrowExtractedVectorBinOp(SDNode *Extract, SelectionDAG &DAG,
20004 bool LegalOperations) {
20005 // TODO: Refactor with the caller (visitEXTRACT_SUBVECTOR), so we can share
20006 // some of these bailouts with other transforms.
20007
20008 if (SDValue V = narrowInsertExtractVectorBinOp(Extract, DAG, LegalOperations))
20009 return V;
20010
20011 // The extract index must be a constant, so we can map it to a concat operand.
20012 auto *ExtractIndexC = dyn_cast<ConstantSDNode>(Extract->getOperand(1));
20013 if (!ExtractIndexC)
20014 return SDValue();
20015
20016 // We are looking for an optionally bitcasted wide vector binary operator
20017 // feeding an extract subvector.
20018 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20019 SDValue BinOp = peekThroughBitcasts(Extract->getOperand(0));
20020 unsigned BOpcode = BinOp.getOpcode();
20021 if (!TLI.isBinOp(BOpcode) || BinOp.getNode()->getNumValues() != 1)
20022 return SDValue();
20023
20024 // Exclude the fake form of fneg (fsub -0.0, x) because that is likely to be
20025 // reduced to the unary fneg when it is visited, and we probably want to deal
20026 // with fneg in a target-specific way.
20027 if (BOpcode == ISD::FSUB) {
20028 auto *C = isConstOrConstSplatFP(BinOp.getOperand(0), /*AllowUndefs*/ true);
20029 if (C && C->getValueAPF().isNegZero())
20030 return SDValue();
20031 }
20032
20033 // The binop must be a vector type, so we can extract some fraction of it.
20034 EVT WideBVT = BinOp.getValueType();
20035 // The optimisations below currently assume we are dealing with fixed length
20036 // vectors. It is possible to add support for scalable vectors, but at the
20037 // moment we've done no analysis to prove whether they are profitable or not.
20038 if (!WideBVT.isFixedLengthVector())
20039 return SDValue();
20040
20041 EVT VT = Extract->getValueType(0);
20042 unsigned ExtractIndex = ExtractIndexC->getZExtValue();
20043 assert(ExtractIndex % VT.getVectorNumElements() == 0 &&(static_cast <bool> (ExtractIndex % VT.getVectorNumElements
() == 0 && "Extract index is not a multiple of the vector length."
) ? void (0) : __assert_fail ("ExtractIndex % VT.getVectorNumElements() == 0 && \"Extract index is not a multiple of the vector length.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20044, __extension__ __PRETTY_FUNCTION__))
20044 "Extract index is not a multiple of the vector length.")(static_cast <bool> (ExtractIndex % VT.getVectorNumElements
() == 0 && "Extract index is not a multiple of the vector length."
) ? void (0) : __assert_fail ("ExtractIndex % VT.getVectorNumElements() == 0 && \"Extract index is not a multiple of the vector length.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20044, __extension__ __PRETTY_FUNCTION__))
;
20045
20046 // Bail out if this is not a proper multiple width extraction.
20047 unsigned WideWidth = WideBVT.getSizeInBits();
20048 unsigned NarrowWidth = VT.getSizeInBits();
20049 if (WideWidth % NarrowWidth != 0)
20050 return SDValue();
20051
20052 // Bail out if we are extracting a fraction of a single operation. This can
20053 // occur because we potentially looked through a bitcast of the binop.
20054 unsigned NarrowingRatio = WideWidth / NarrowWidth;
20055 unsigned WideNumElts = WideBVT.getVectorNumElements();
20056 if (WideNumElts % NarrowingRatio != 0)
20057 return SDValue();
20058
20059 // Bail out if the target does not support a narrower version of the binop.
20060 EVT NarrowBVT = EVT::getVectorVT(*DAG.getContext(), WideBVT.getScalarType(),
20061 WideNumElts / NarrowingRatio);
20062 if (!TLI.isOperationLegalOrCustomOrPromote(BOpcode, NarrowBVT))
20063 return SDValue();
20064
20065 // If extraction is cheap, we don't need to look at the binop operands
20066 // for concat ops. The narrow binop alone makes this transform profitable.
20067 // We can't just reuse the original extract index operand because we may have
20068 // bitcasted.
20069 unsigned ConcatOpNum = ExtractIndex / VT.getVectorNumElements();
20070 unsigned ExtBOIdx = ConcatOpNum * NarrowBVT.getVectorNumElements();
20071 if (TLI.isExtractSubvectorCheap(NarrowBVT, WideBVT, ExtBOIdx) &&
20072 BinOp.hasOneUse() && Extract->getOperand(0)->hasOneUse()) {
20073 // extract (binop B0, B1), N --> binop (extract B0, N), (extract B1, N)
20074 SDLoc DL(Extract);
20075 SDValue NewExtIndex = DAG.getVectorIdxConstant(ExtBOIdx, DL);
20076 SDValue X = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
20077 BinOp.getOperand(0), NewExtIndex);
20078 SDValue Y = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
20079 BinOp.getOperand(1), NewExtIndex);
20080 SDValue NarrowBinOp = DAG.getNode(BOpcode, DL, NarrowBVT, X, Y,
20081 BinOp.getNode()->getFlags());
20082 return DAG.getBitcast(VT, NarrowBinOp);
20083 }
20084
20085 // Only handle the case where we are doubling and then halving. A larger ratio
20086 // may require more than two narrow binops to replace the wide binop.
20087 if (NarrowingRatio != 2)
20088 return SDValue();
20089
20090 // TODO: The motivating case for this transform is an x86 AVX1 target. That
20091 // target has temptingly almost legal versions of bitwise logic ops in 256-bit
20092 // flavors, but no other 256-bit integer support. This could be extended to
20093 // handle any binop, but that may require fixing/adding other folds to avoid
20094 // codegen regressions.
20095 if (BOpcode != ISD::AND && BOpcode != ISD::OR && BOpcode != ISD::XOR)
20096 return SDValue();
20097
20098 // We need at least one concatenation operation of a binop operand to make
20099 // this transform worthwhile. The concat must double the input vector sizes.
20100 auto GetSubVector = [ConcatOpNum](SDValue V) -> SDValue {
20101 if (V.getOpcode() == ISD::CONCAT_VECTORS && V.getNumOperands() == 2)
20102 return V.getOperand(ConcatOpNum);
20103 return SDValue();
20104 };
20105 SDValue SubVecL = GetSubVector(peekThroughBitcasts(BinOp.getOperand(0)));
20106 SDValue SubVecR = GetSubVector(peekThroughBitcasts(BinOp.getOperand(1)));
20107
20108 if (SubVecL || SubVecR) {
20109 // If a binop operand was not the result of a concat, we must extract a
20110 // half-sized operand for our new narrow binop:
20111 // extract (binop (concat X1, X2), (concat Y1, Y2)), N --> binop XN, YN
20112 // extract (binop (concat X1, X2), Y), N --> binop XN, (extract Y, IndexC)
20113 // extract (binop X, (concat Y1, Y2)), N --> binop (extract X, IndexC), YN
20114 SDLoc DL(Extract);
20115 SDValue IndexC = DAG.getVectorIdxConstant(ExtBOIdx, DL);
20116 SDValue X = SubVecL ? DAG.getBitcast(NarrowBVT, SubVecL)
20117 : DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
20118 BinOp.getOperand(0), IndexC);
20119
20120 SDValue Y = SubVecR ? DAG.getBitcast(NarrowBVT, SubVecR)
20121 : DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NarrowBVT,
20122 BinOp.getOperand(1), IndexC);
20123
20124 SDValue NarrowBinOp = DAG.getNode(BOpcode, DL, NarrowBVT, X, Y);
20125 return DAG.getBitcast(VT, NarrowBinOp);
20126 }
20127
20128 return SDValue();
20129}
20130
20131/// If we are extracting a subvector from a wide vector load, convert to a
20132/// narrow load to eliminate the extraction:
20133/// (extract_subvector (load wide vector)) --> (load narrow vector)
20134static SDValue narrowExtractedVectorLoad(SDNode *Extract, SelectionDAG &DAG) {
20135 // TODO: Add support for big-endian. The offset calculation must be adjusted.
20136 if (DAG.getDataLayout().isBigEndian())
20137 return SDValue();
20138
20139 auto *Ld = dyn_cast<LoadSDNode>(Extract->getOperand(0));
20140 auto *ExtIdx = dyn_cast<ConstantSDNode>(Extract->getOperand(1));
20141 if (!Ld || Ld->getExtensionType() || !Ld->isSimple() ||
20142 !ExtIdx)
20143 return SDValue();
20144
20145 // Allow targets to opt-out.
20146 EVT VT = Extract->getValueType(0);
20147
20148 // We can only create byte sized loads.
20149 if (!VT.isByteSized())
20150 return SDValue();
20151
20152 unsigned Index = ExtIdx->getZExtValue();
20153 unsigned NumElts = VT.getVectorMinNumElements();
20154
20155 // The definition of EXTRACT_SUBVECTOR states that the index must be a
20156 // multiple of the minimum number of elements in the result type.
20157 assert(Index % NumElts == 0 && "The extract subvector index is not a "(static_cast <bool> (Index % NumElts == 0 && "The extract subvector index is not a "
"multiple of the result's element count") ? void (0) : __assert_fail
("Index % NumElts == 0 && \"The extract subvector index is not a \" \"multiple of the result's element count\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20158, __extension__ __PRETTY_FUNCTION__))
20158 "multiple of the result's element count")(static_cast <bool> (Index % NumElts == 0 && "The extract subvector index is not a "
"multiple of the result's element count") ? void (0) : __assert_fail
("Index % NumElts == 0 && \"The extract subvector index is not a \" \"multiple of the result's element count\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20158, __extension__ __PRETTY_FUNCTION__))
;
20159
20160 // It's fine to use TypeSize here as we know the offset will not be negative.
20161 TypeSize Offset = VT.getStoreSize() * (Index / NumElts);
20162
20163 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20164 if (!TLI.shouldReduceLoadWidth(Ld, Ld->getExtensionType(), VT))
20165 return SDValue();
20166
20167 // The narrow load will be offset from the base address of the old load if
20168 // we are extracting from something besides index 0 (little-endian).
20169 SDLoc DL(Extract);
20170
20171 // TODO: Use "BaseIndexOffset" to make this more effective.
20172 SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(), Offset, DL);
20173
20174 uint64_t StoreSize = MemoryLocation::getSizeOrUnknown(VT.getStoreSize());
20175 MachineFunction &MF = DAG.getMachineFunction();
20176 MachineMemOperand *MMO;
20177 if (Offset.isScalable()) {
20178 MachinePointerInfo MPI =
20179 MachinePointerInfo(Ld->getPointerInfo().getAddrSpace());
20180 MMO = MF.getMachineMemOperand(Ld->getMemOperand(), MPI, StoreSize);
20181 } else
20182 MMO = MF.getMachineMemOperand(Ld->getMemOperand(), Offset.getFixedSize(),
20183 StoreSize);
20184
20185 SDValue NewLd = DAG.getLoad(VT, DL, Ld->getChain(), NewAddr, MMO);
20186 DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
20187 return NewLd;
20188}
20189
20190SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode *N) {
20191 EVT NVT = N->getValueType(0);
20192 SDValue V = N->getOperand(0);
20193 uint64_t ExtIdx = N->getConstantOperandVal(1);
20194
20195 // Extract from UNDEF is UNDEF.
20196 if (V.isUndef())
20197 return DAG.getUNDEF(NVT);
20198
20199 if (TLI.isOperationLegalOrCustomOrPromote(ISD::LOAD, NVT))
20200 if (SDValue NarrowLoad = narrowExtractedVectorLoad(N, DAG))
20201 return NarrowLoad;
20202
20203 // Combine an extract of an extract into a single extract_subvector.
20204 // ext (ext X, C), 0 --> ext X, C
20205 if (ExtIdx == 0 && V.getOpcode() == ISD::EXTRACT_SUBVECTOR && V.hasOneUse()) {
20206 if (TLI.isExtractSubvectorCheap(NVT, V.getOperand(0).getValueType(),
20207 V.getConstantOperandVal(1)) &&
20208 TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NVT)) {
20209 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), NVT, V.getOperand(0),
20210 V.getOperand(1));
20211 }
20212 }
20213
20214 // Try to move vector bitcast after extract_subv by scaling extraction index:
20215 // extract_subv (bitcast X), Index --> bitcast (extract_subv X, Index')
20216 if (V.getOpcode() == ISD::BITCAST &&
20217 V.getOperand(0).getValueType().isVector()) {
20218 SDValue SrcOp = V.getOperand(0);
20219 EVT SrcVT = SrcOp.getValueType();
20220 unsigned SrcNumElts = SrcVT.getVectorMinNumElements();
20221 unsigned DestNumElts = V.getValueType().getVectorMinNumElements();
20222 if ((SrcNumElts % DestNumElts) == 0) {
20223 unsigned SrcDestRatio = SrcNumElts / DestNumElts;
20224 ElementCount NewExtEC = NVT.getVectorElementCount() * SrcDestRatio;
20225 EVT NewExtVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(),
20226 NewExtEC);
20227 if (TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NewExtVT)) {
20228 SDLoc DL(N);
20229 SDValue NewIndex = DAG.getVectorIdxConstant(ExtIdx * SrcDestRatio, DL);
20230 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewExtVT,
20231 V.getOperand(0), NewIndex);
20232 return DAG.getBitcast(NVT, NewExtract);
20233 }
20234 }
20235 if ((DestNumElts % SrcNumElts) == 0) {
20236 unsigned DestSrcRatio = DestNumElts / SrcNumElts;
20237 if (NVT.getVectorElementCount().isKnownMultipleOf(DestSrcRatio)) {
20238 ElementCount NewExtEC =
20239 NVT.getVectorElementCount().divideCoefficientBy(DestSrcRatio);
20240 EVT ScalarVT = SrcVT.getScalarType();
20241 if ((ExtIdx % DestSrcRatio) == 0) {
20242 SDLoc DL(N);
20243 unsigned IndexValScaled = ExtIdx / DestSrcRatio;
20244 EVT NewExtVT =
20245 EVT::getVectorVT(*DAG.getContext(), ScalarVT, NewExtEC);
20246 if (TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NewExtVT)) {
20247 SDValue NewIndex = DAG.getVectorIdxConstant(IndexValScaled, DL);
20248 SDValue NewExtract =
20249 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewExtVT,
20250 V.getOperand(0), NewIndex);
20251 return DAG.getBitcast(NVT, NewExtract);
20252 }
20253 if (NewExtEC.isScalar() &&
20254 TLI.isOperationLegalOrCustom(ISD::EXTRACT_VECTOR_ELT, ScalarVT)) {
20255 SDValue NewIndex = DAG.getVectorIdxConstant(IndexValScaled, DL);
20256 SDValue NewExtract =
20257 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarVT,
20258 V.getOperand(0), NewIndex);
20259 return DAG.getBitcast(NVT, NewExtract);
20260 }
20261 }
20262 }
20263 }
20264 }
20265
20266 if (V.getOpcode() == ISD::CONCAT_VECTORS) {
20267 unsigned ExtNumElts = NVT.getVectorMinNumElements();
20268 EVT ConcatSrcVT = V.getOperand(0).getValueType();
20269 assert(ConcatSrcVT.getVectorElementType() == NVT.getVectorElementType() &&(static_cast <bool> (ConcatSrcVT.getVectorElementType()
== NVT.getVectorElementType() && "Concat and extract subvector do not change element type"
) ? void (0) : __assert_fail ("ConcatSrcVT.getVectorElementType() == NVT.getVectorElementType() && \"Concat and extract subvector do not change element type\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20270, __extension__ __PRETTY_FUNCTION__))
20270 "Concat and extract subvector do not change element type")(static_cast <bool> (ConcatSrcVT.getVectorElementType()
== NVT.getVectorElementType() && "Concat and extract subvector do not change element type"
) ? void (0) : __assert_fail ("ConcatSrcVT.getVectorElementType() == NVT.getVectorElementType() && \"Concat and extract subvector do not change element type\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20270, __extension__ __PRETTY_FUNCTION__))
;
20271 assert((ExtIdx % ExtNumElts) == 0 &&(static_cast <bool> ((ExtIdx % ExtNumElts) == 0 &&
"Extract index is not a multiple of the input vector length."
) ? void (0) : __assert_fail ("(ExtIdx % ExtNumElts) == 0 && \"Extract index is not a multiple of the input vector length.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20272, __extension__ __PRETTY_FUNCTION__))
20272 "Extract index is not a multiple of the input vector length.")(static_cast <bool> ((ExtIdx % ExtNumElts) == 0 &&
"Extract index is not a multiple of the input vector length."
) ? void (0) : __assert_fail ("(ExtIdx % ExtNumElts) == 0 && \"Extract index is not a multiple of the input vector length.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20272, __extension__ __PRETTY_FUNCTION__))
;
20273
20274 unsigned ConcatSrcNumElts = ConcatSrcVT.getVectorMinNumElements();
20275 unsigned ConcatOpIdx = ExtIdx / ConcatSrcNumElts;
20276
20277 // If the concatenated source types match this extract, it's a direct
20278 // simplification:
20279 // extract_subvec (concat V1, V2, ...), i --> Vi
20280 if (ConcatSrcNumElts == ExtNumElts)
20281 return V.getOperand(ConcatOpIdx);
20282
20283 // If the concatenated source vectors are a multiple length of this extract,
20284 // then extract a fraction of one of those source vectors directly from a
20285 // concat operand. Example:
20286 // v2i8 extract_subvec (v16i8 concat (v8i8 X), (v8i8 Y), 14 -->
20287 // v2i8 extract_subvec v8i8 Y, 6
20288 if (NVT.isFixedLengthVector() && ConcatSrcNumElts % ExtNumElts == 0) {
20289 SDLoc DL(N);
20290 unsigned NewExtIdx = ExtIdx - ConcatOpIdx * ConcatSrcNumElts;
20291 assert(NewExtIdx + ExtNumElts <= ConcatSrcNumElts &&(static_cast <bool> (NewExtIdx + ExtNumElts <= ConcatSrcNumElts
&& "Trying to extract from >1 concat operand?") ?
void (0) : __assert_fail ("NewExtIdx + ExtNumElts <= ConcatSrcNumElts && \"Trying to extract from >1 concat operand?\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20292, __extension__ __PRETTY_FUNCTION__))
20292 "Trying to extract from >1 concat operand?")(static_cast <bool> (NewExtIdx + ExtNumElts <= ConcatSrcNumElts
&& "Trying to extract from >1 concat operand?") ?
void (0) : __assert_fail ("NewExtIdx + ExtNumElts <= ConcatSrcNumElts && \"Trying to extract from >1 concat operand?\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20292, __extension__ __PRETTY_FUNCTION__))
;
20293 assert(NewExtIdx % ExtNumElts == 0 &&(static_cast <bool> (NewExtIdx % ExtNumElts == 0 &&
"Extract index is not a multiple of the input vector length."
) ? void (0) : __assert_fail ("NewExtIdx % ExtNumElts == 0 && \"Extract index is not a multiple of the input vector length.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20294, __extension__ __PRETTY_FUNCTION__))
20294 "Extract index is not a multiple of the input vector length.")(static_cast <bool> (NewExtIdx % ExtNumElts == 0 &&
"Extract index is not a multiple of the input vector length."
) ? void (0) : __assert_fail ("NewExtIdx % ExtNumElts == 0 && \"Extract index is not a multiple of the input vector length.\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20294, __extension__ __PRETTY_FUNCTION__))
;
20295 SDValue NewIndexC = DAG.getVectorIdxConstant(NewExtIdx, DL);
20296 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NVT,
20297 V.getOperand(ConcatOpIdx), NewIndexC);
20298 }
20299 }
20300
20301 V = peekThroughBitcasts(V);
20302
20303 // If the input is a build vector. Try to make a smaller build vector.
20304 if (V.getOpcode() == ISD::BUILD_VECTOR) {
20305 EVT InVT = V.getValueType();
20306 unsigned ExtractSize = NVT.getSizeInBits();
20307 unsigned EltSize = InVT.getScalarSizeInBits();
20308 // Only do this if we won't split any elements.
20309 if (ExtractSize % EltSize == 0) {
20310 unsigned NumElems = ExtractSize / EltSize;
20311 EVT EltVT = InVT.getVectorElementType();
20312 EVT ExtractVT =
20313 NumElems == 1 ? EltVT
20314 : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElems);
20315 if ((Level < AfterLegalizeDAG ||
20316 (NumElems == 1 ||
20317 TLI.isOperationLegal(ISD::BUILD_VECTOR, ExtractVT))) &&
20318 (!LegalTypes || TLI.isTypeLegal(ExtractVT))) {
20319 unsigned IdxVal = (ExtIdx * NVT.getScalarSizeInBits()) / EltSize;
20320
20321 if (NumElems == 1) {
20322 SDValue Src = V->getOperand(IdxVal);
20323 if (EltVT != Src.getValueType())
20324 Src = DAG.getNode(ISD::TRUNCATE, SDLoc(N), InVT, Src);
20325 return DAG.getBitcast(NVT, Src);
20326 }
20327
20328 // Extract the pieces from the original build_vector.
20329 SDValue BuildVec = DAG.getBuildVector(ExtractVT, SDLoc(N),
20330 V->ops().slice(IdxVal, NumElems));
20331 return DAG.getBitcast(NVT, BuildVec);
20332 }
20333 }
20334 }
20335
20336 if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
20337 // Handle only simple case where vector being inserted and vector
20338 // being extracted are of same size.
20339 EVT SmallVT = V.getOperand(1).getValueType();
20340 if (!NVT.bitsEq(SmallVT))
20341 return SDValue();
20342
20343 // Combine:
20344 // (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx)
20345 // Into:
20346 // indices are equal or bit offsets are equal => V1
20347 // otherwise => (extract_subvec V1, ExtIdx)
20348 uint64_t InsIdx = V.getConstantOperandVal(2);
20349 if (InsIdx * SmallVT.getScalarSizeInBits() ==
20350 ExtIdx * NVT.getScalarSizeInBits())
20351 return DAG.getBitcast(NVT, V.getOperand(1));
20352 return DAG.getNode(
20353 ISD::EXTRACT_SUBVECTOR, SDLoc(N), NVT,
20354 DAG.getBitcast(N->getOperand(0).getValueType(), V.getOperand(0)),
20355 N->getOperand(1));
20356 }
20357
20358 if (SDValue NarrowBOp = narrowExtractedVectorBinOp(N, DAG, LegalOperations))
20359 return NarrowBOp;
20360
20361 if (SimplifyDemandedVectorElts(SDValue(N, 0)))
20362 return SDValue(N, 0);
20363
20364 return SDValue();
20365}
20366
20367/// Try to convert a wide shuffle of concatenated vectors into 2 narrow shuffles
20368/// followed by concatenation. Narrow vector ops may have better performance
20369/// than wide ops, and this can unlock further narrowing of other vector ops.
20370/// Targets can invert this transform later if it is not profitable.
20371static SDValue foldShuffleOfConcatUndefs(ShuffleVectorSDNode *Shuf,
20372 SelectionDAG &DAG) {
20373 SDValue N0 = Shuf->getOperand(0), N1 = Shuf->getOperand(1);
20374 if (N0.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
20375 N1.getOpcode() != ISD::CONCAT_VECTORS || N1.getNumOperands() != 2 ||
20376 !N0.getOperand(1).isUndef() || !N1.getOperand(1).isUndef())
20377 return SDValue();
20378
20379 // Split the wide shuffle mask into halves. Any mask element that is accessing
20380 // operand 1 is offset down to account for narrowing of the vectors.
20381 ArrayRef<int> Mask = Shuf->getMask();
20382 EVT VT = Shuf->getValueType(0);
20383 unsigned NumElts = VT.getVectorNumElements();
20384 unsigned HalfNumElts = NumElts / 2;
20385 SmallVector<int, 16> Mask0(HalfNumElts, -1);
20386 SmallVector<int, 16> Mask1(HalfNumElts, -1);
20387 for (unsigned i = 0; i != NumElts; ++i) {
20388 if (Mask[i] == -1)
20389 continue;
20390 int M = Mask[i] < (int)NumElts ? Mask[i] : Mask[i] - (int)HalfNumElts;
20391 if (i < HalfNumElts)
20392 Mask0[i] = M;
20393 else
20394 Mask1[i - HalfNumElts] = M;
20395 }
20396
20397 // Ask the target if this is a valid transform.
20398 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20399 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
20400 HalfNumElts);
20401 if (!TLI.isShuffleMaskLegal(Mask0, HalfVT) ||
20402 !TLI.isShuffleMaskLegal(Mask1, HalfVT))
20403 return SDValue();
20404
20405 // shuffle (concat X, undef), (concat Y, undef), Mask -->
20406 // concat (shuffle X, Y, Mask0), (shuffle X, Y, Mask1)
20407 SDValue X = N0.getOperand(0), Y = N1.getOperand(0);
20408 SDLoc DL(Shuf);
20409 SDValue Shuf0 = DAG.getVectorShuffle(HalfVT, DL, X, Y, Mask0);
20410 SDValue Shuf1 = DAG.getVectorShuffle(HalfVT, DL, X, Y, Mask1);
20411 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Shuf0, Shuf1);
20412}
20413
20414// Tries to turn a shuffle of two CONCAT_VECTORS into a single concat,
20415// or turn a shuffle of a single concat into simpler shuffle then concat.
20416static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) {
20417 EVT VT = N->getValueType(0);
20418 unsigned NumElts = VT.getVectorNumElements();
20419
20420 SDValue N0 = N->getOperand(0);
20421 SDValue N1 = N->getOperand(1);
20422 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
20423 ArrayRef<int> Mask = SVN->getMask();
20424
20425 SmallVector<SDValue, 4> Ops;
20426 EVT ConcatVT = N0.getOperand(0).getValueType();
20427 unsigned NumElemsPerConcat = ConcatVT.getVectorNumElements();
20428 unsigned NumConcats = NumElts / NumElemsPerConcat;
20429
20430 auto IsUndefMaskElt = [](int i) { return i == -1; };
20431
20432 // Special case: shuffle(concat(A,B)) can be more efficiently represented
20433 // as concat(shuffle(A,B),UNDEF) if the shuffle doesn't set any of the high
20434 // half vector elements.
20435 if (NumElemsPerConcat * 2 == NumElts && N1.isUndef() &&
20436 llvm::all_of(Mask.slice(NumElemsPerConcat, NumElemsPerConcat),
20437 IsUndefMaskElt)) {
20438 N0 = DAG.getVectorShuffle(ConcatVT, SDLoc(N), N0.getOperand(0),
20439 N0.getOperand(1),
20440 Mask.slice(0, NumElemsPerConcat));
20441 N1 = DAG.getUNDEF(ConcatVT);
20442 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, N0, N1);
20443 }
20444
20445 // Look at every vector that's inserted. We're looking for exact
20446 // subvector-sized copies from a concatenated vector
20447 for (unsigned I = 0; I != NumConcats; ++I) {
20448 unsigned Begin = I * NumElemsPerConcat;
20449 ArrayRef<int> SubMask = Mask.slice(Begin, NumElemsPerConcat);
20450
20451 // Make sure we're dealing with a copy.
20452 if (llvm::all_of(SubMask, IsUndefMaskElt)) {
20453 Ops.push_back(DAG.getUNDEF(ConcatVT));
20454 continue;
20455 }
20456
20457 int OpIdx = -1;
20458 for (int i = 0; i != (int)NumElemsPerConcat; ++i) {
20459 if (IsUndefMaskElt(SubMask[i]))
20460 continue;
20461 if ((SubMask[i] % (int)NumElemsPerConcat) != i)
20462 return SDValue();
20463 int EltOpIdx = SubMask[i] / NumElemsPerConcat;
20464 if (0 <= OpIdx && EltOpIdx != OpIdx)
20465 return SDValue();
20466 OpIdx = EltOpIdx;
20467 }
20468 assert(0 <= OpIdx && "Unknown concat_vectors op")(static_cast <bool> (0 <= OpIdx && "Unknown concat_vectors op"
) ? void (0) : __assert_fail ("0 <= OpIdx && \"Unknown concat_vectors op\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20468, __extension__ __PRETTY_FUNCTION__))
;
20469
20470 if (OpIdx < (int)N0.getNumOperands())
20471 Ops.push_back(N0.getOperand(OpIdx));
20472 else
20473 Ops.push_back(N1.getOperand(OpIdx - N0.getNumOperands()));
20474 }
20475
20476 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
20477}
20478
20479// Attempt to combine a shuffle of 2 inputs of 'scalar sources' -
20480// BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR.
20481//
20482// SHUFFLE(BUILD_VECTOR(), BUILD_VECTOR()) -> BUILD_VECTOR() is always
20483// a simplification in some sense, but it isn't appropriate in general: some
20484// BUILD_VECTORs are substantially cheaper than others. The general case
20485// of a BUILD_VECTOR requires inserting each element individually (or
20486// performing the equivalent in a temporary stack variable). A BUILD_VECTOR of
20487// all constants is a single constant pool load. A BUILD_VECTOR where each
20488// element is identical is a splat. A BUILD_VECTOR where most of the operands
20489// are undef lowers to a small number of element insertions.
20490//
20491// To deal with this, we currently use a bunch of mostly arbitrary heuristics.
20492// We don't fold shuffles where one side is a non-zero constant, and we don't
20493// fold shuffles if the resulting (non-splat) BUILD_VECTOR would have duplicate
20494// non-constant operands. This seems to work out reasonably well in practice.
20495static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN,
20496 SelectionDAG &DAG,
20497 const TargetLowering &TLI) {
20498 EVT VT = SVN->getValueType(0);
20499 unsigned NumElts = VT.getVectorNumElements();
20500 SDValue N0 = SVN->getOperand(0);
20501 SDValue N1 = SVN->getOperand(1);
20502
20503 if (!N0->hasOneUse())
20504 return SDValue();
20505
20506 // If only one of N1,N2 is constant, bail out if it is not ALL_ZEROS as
20507 // discussed above.
20508 if (!N1.isUndef()) {
20509 if (!N1->hasOneUse())
20510 return SDValue();
20511
20512 bool N0AnyConst = isAnyConstantBuildVector(N0);
20513 bool N1AnyConst = isAnyConstantBuildVector(N1);
20514 if (N0AnyConst && !N1AnyConst && !ISD::isBuildVectorAllZeros(N0.getNode()))
20515 return SDValue();
20516 if (!N0AnyConst && N1AnyConst && !ISD::isBuildVectorAllZeros(N1.getNode()))
20517 return SDValue();
20518 }
20519
20520 // If both inputs are splats of the same value then we can safely merge this
20521 // to a single BUILD_VECTOR with undef elements based on the shuffle mask.
20522 bool IsSplat = false;
20523 auto *BV0 = dyn_cast<BuildVectorSDNode>(N0);
20524 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
20525 if (BV0 && BV1)
20526 if (SDValue Splat0 = BV0->getSplatValue())
20527 IsSplat = (Splat0 == BV1->getSplatValue());
20528
20529 SmallVector<SDValue, 8> Ops;
20530 SmallSet<SDValue, 16> DuplicateOps;
20531 for (int M : SVN->getMask()) {
20532 SDValue Op = DAG.getUNDEF(VT.getScalarType());
20533 if (M >= 0) {
20534 int Idx = M < (int)NumElts ? M : M - NumElts;
20535 SDValue &S = (M < (int)NumElts ? N0 : N1);
20536 if (S.getOpcode() == ISD::BUILD_VECTOR) {
20537 Op = S.getOperand(Idx);
20538 } else if (S.getOpcode() == ISD::SCALAR_TO_VECTOR) {
20539 SDValue Op0 = S.getOperand(0);
20540 Op = Idx == 0 ? Op0 : DAG.getUNDEF(Op0.getValueType());
20541 } else {
20542 // Operand can't be combined - bail out.
20543 return SDValue();
20544 }
20545 }
20546
20547 // Don't duplicate a non-constant BUILD_VECTOR operand unless we're
20548 // generating a splat; semantically, this is fine, but it's likely to
20549 // generate low-quality code if the target can't reconstruct an appropriate
20550 // shuffle.
20551 if (!Op.isUndef() && !isIntOrFPConstant(Op))
20552 if (!IsSplat && !DuplicateOps.insert(Op).second)
20553 return SDValue();
20554
20555 Ops.push_back(Op);
20556 }
20557
20558 // BUILD_VECTOR requires all inputs to be of the same type, find the
20559 // maximum type and extend them all.
20560 EVT SVT = VT.getScalarType();
20561 if (SVT.isInteger())
20562 for (SDValue &Op : Ops)
20563 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
20564 if (SVT != VT.getScalarType())
20565 for (SDValue &Op : Ops)
20566 Op = TLI.isZExtFree(Op.getValueType(), SVT)
20567 ? DAG.getZExtOrTrunc(Op, SDLoc(SVN), SVT)
20568 : DAG.getSExtOrTrunc(Op, SDLoc(SVN), SVT);
20569 return DAG.getBuildVector(VT, SDLoc(SVN), Ops);
20570}
20571
20572// Match shuffles that can be converted to any_vector_extend_in_reg.
20573// This is often generated during legalization.
20574// e.g. v4i32 <0,u,1,u> -> (v2i64 any_vector_extend_in_reg(v4i32 src))
20575// TODO Add support for ZERO_EXTEND_VECTOR_INREG when we have a test case.
20576static SDValue combineShuffleToVectorExtend(ShuffleVectorSDNode *SVN,
20577 SelectionDAG &DAG,
20578 const TargetLowering &TLI,
20579 bool LegalOperations) {
20580 EVT VT = SVN->getValueType(0);
20581 bool IsBigEndian = DAG.getDataLayout().isBigEndian();
20582
20583 // TODO Add support for big-endian when we have a test case.
20584 if (!VT.isInteger() || IsBigEndian)
20585 return SDValue();
20586
20587 unsigned NumElts = VT.getVectorNumElements();
20588 unsigned EltSizeInBits = VT.getScalarSizeInBits();
20589 ArrayRef<int> Mask = SVN->getMask();
20590 SDValue N0 = SVN->getOperand(0);
20591
20592 // shuffle<0,-1,1,-1> == (v2i64 anyextend_vector_inreg(v4i32))
20593 auto isAnyExtend = [&Mask, &NumElts](unsigned Scale) {
20594 for (unsigned i = 0; i != NumElts; ++i) {
20595 if (Mask[i] < 0)
20596 continue;
20597 if ((i % Scale) == 0 && Mask[i] == (int)(i / Scale))
20598 continue;
20599 return false;
20600 }
20601 return true;
20602 };
20603
20604 // Attempt to match a '*_extend_vector_inreg' shuffle, we just search for
20605 // power-of-2 extensions as they are the most likely.
20606 for (unsigned Scale = 2; Scale < NumElts; Scale *= 2) {
20607 // Check for non power of 2 vector sizes
20608 if (NumElts % Scale != 0)
20609 continue;
20610 if (!isAnyExtend(Scale))
20611 continue;
20612
20613 EVT OutSVT = EVT::getIntegerVT(*DAG.getContext(), EltSizeInBits * Scale);
20614 EVT OutVT = EVT::getVectorVT(*DAG.getContext(), OutSVT, NumElts / Scale);
20615 // Never create an illegal type. Only create unsupported operations if we
20616 // are pre-legalization.
20617 if (TLI.isTypeLegal(OutVT))
20618 if (!LegalOperations ||
20619 TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND_VECTOR_INREG, OutVT))
20620 return DAG.getBitcast(VT,
20621 DAG.getNode(ISD::ANY_EXTEND_VECTOR_INREG,
20622 SDLoc(SVN), OutVT, N0));
20623 }
20624
20625 return SDValue();
20626}
20627
20628// Detect 'truncate_vector_inreg' style shuffles that pack the lower parts of
20629// each source element of a large type into the lowest elements of a smaller
20630// destination type. This is often generated during legalization.
20631// If the source node itself was a '*_extend_vector_inreg' node then we should
20632// then be able to remove it.
20633static SDValue combineTruncationShuffle(ShuffleVectorSDNode *SVN,
20634 SelectionDAG &DAG) {
20635 EVT VT = SVN->getValueType(0);
20636 bool IsBigEndian = DAG.getDataLayout().isBigEndian();
20637
20638 // TODO Add support for big-endian when we have a test case.
20639 if (!VT.isInteger() || IsBigEndian)
20640 return SDValue();
20641
20642 SDValue N0 = peekThroughBitcasts(SVN->getOperand(0));
20643
20644 unsigned Opcode = N0.getOpcode();
20645 if (Opcode != ISD::ANY_EXTEND_VECTOR_INREG &&
20646 Opcode != ISD::SIGN_EXTEND_VECTOR_INREG &&
20647 Opcode != ISD::ZERO_EXTEND_VECTOR_INREG)
20648 return SDValue();
20649
20650 SDValue N00 = N0.getOperand(0);
20651 ArrayRef<int> Mask = SVN->getMask();
20652 unsigned NumElts = VT.getVectorNumElements();
20653 unsigned EltSizeInBits = VT.getScalarSizeInBits();
20654 unsigned ExtSrcSizeInBits = N00.getScalarValueSizeInBits();
20655 unsigned ExtDstSizeInBits = N0.getScalarValueSizeInBits();
20656
20657 if (ExtDstSizeInBits % ExtSrcSizeInBits != 0)
20658 return SDValue();
20659 unsigned ExtScale = ExtDstSizeInBits / ExtSrcSizeInBits;
20660
20661 // (v4i32 truncate_vector_inreg(v2i64)) == shuffle<0,2-1,-1>
20662 // (v8i16 truncate_vector_inreg(v4i32)) == shuffle<0,2,4,6,-1,-1,-1,-1>
20663 // (v8i16 truncate_vector_inreg(v2i64)) == shuffle<0,4,-1,-1,-1,-1,-1,-1>
20664 auto isTruncate = [&Mask, &NumElts](unsigned Scale) {
20665 for (unsigned i = 0; i != NumElts; ++i) {
20666 if (Mask[i] < 0)
20667 continue;
20668 if ((i * Scale) < NumElts && Mask[i] == (int)(i * Scale))
20669 continue;
20670 return false;
20671 }
20672 return true;
20673 };
20674
20675 // At the moment we just handle the case where we've truncated back to the
20676 // same size as before the extension.
20677 // TODO: handle more extension/truncation cases as cases arise.
20678 if (EltSizeInBits != ExtSrcSizeInBits)
20679 return SDValue();
20680
20681 // We can remove *extend_vector_inreg only if the truncation happens at
20682 // the same scale as the extension.
20683 if (isTruncate(ExtScale))
20684 return DAG.getBitcast(VT, N00);
20685
20686 return SDValue();
20687}
20688
20689// Combine shuffles of splat-shuffles of the form:
20690// shuffle (shuffle V, undef, splat-mask), undef, M
20691// If splat-mask contains undef elements, we need to be careful about
20692// introducing undef's in the folded mask which are not the result of composing
20693// the masks of the shuffles.
20694static SDValue combineShuffleOfSplatVal(ShuffleVectorSDNode *Shuf,
20695 SelectionDAG &DAG) {
20696 if (!Shuf->getOperand(1).isUndef())
20697 return SDValue();
20698 auto *Splat = dyn_cast<ShuffleVectorSDNode>(Shuf->getOperand(0));
20699 if (!Splat || !Splat->isSplat())
20700 return SDValue();
20701
20702 ArrayRef<int> ShufMask = Shuf->getMask();
20703 ArrayRef<int> SplatMask = Splat->getMask();
20704 assert(ShufMask.size() == SplatMask.size() && "Mask length mismatch")(static_cast <bool> (ShufMask.size() == SplatMask.size(
) && "Mask length mismatch") ? void (0) : __assert_fail
("ShufMask.size() == SplatMask.size() && \"Mask length mismatch\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20704, __extension__ __PRETTY_FUNCTION__))
;
20705
20706 // Prefer simplifying to the splat-shuffle, if possible. This is legal if
20707 // every undef mask element in the splat-shuffle has a corresponding undef
20708 // element in the user-shuffle's mask or if the composition of mask elements
20709 // would result in undef.
20710 // Examples for (shuffle (shuffle v, undef, SplatMask), undef, UserMask):
20711 // * UserMask=[0,2,u,u], SplatMask=[2,u,2,u] -> [2,2,u,u]
20712 // In this case it is not legal to simplify to the splat-shuffle because we
20713 // may be exposing the users of the shuffle an undef element at index 1
20714 // which was not there before the combine.
20715 // * UserMask=[0,u,2,u], SplatMask=[2,u,2,u] -> [2,u,2,u]
20716 // In this case the composition of masks yields SplatMask, so it's ok to
20717 // simplify to the splat-shuffle.
20718 // * UserMask=[3,u,2,u], SplatMask=[2,u,2,u] -> [u,u,2,u]
20719 // In this case the composed mask includes all undef elements of SplatMask
20720 // and in addition sets element zero to undef. It is safe to simplify to
20721 // the splat-shuffle.
20722 auto CanSimplifyToExistingSplat = [](ArrayRef<int> UserMask,
20723 ArrayRef<int> SplatMask) {
20724 for (unsigned i = 0, e = UserMask.size(); i != e; ++i)
20725 if (UserMask[i] != -1 && SplatMask[i] == -1 &&
20726 SplatMask[UserMask[i]] != -1)
20727 return false;
20728 return true;
20729 };
20730 if (CanSimplifyToExistingSplat(ShufMask, SplatMask))
20731 return Shuf->getOperand(0);
20732
20733 // Create a new shuffle with a mask that is composed of the two shuffles'
20734 // masks.
20735 SmallVector<int, 32> NewMask;
20736 for (int Idx : ShufMask)
20737 NewMask.push_back(Idx == -1 ? -1 : SplatMask[Idx]);
20738
20739 return DAG.getVectorShuffle(Splat->getValueType(0), SDLoc(Splat),
20740 Splat->getOperand(0), Splat->getOperand(1),
20741 NewMask);
20742}
20743
20744/// Combine shuffle of shuffle of the form:
20745/// shuf (shuf X, undef, InnerMask), undef, OuterMask --> splat X
20746static SDValue formSplatFromShuffles(ShuffleVectorSDNode *OuterShuf,
20747 SelectionDAG &DAG) {
20748 if (!OuterShuf->getOperand(1).isUndef())
20749 return SDValue();
20750 auto *InnerShuf = dyn_cast<ShuffleVectorSDNode>(OuterShuf->getOperand(0));
20751 if (!InnerShuf || !InnerShuf->getOperand(1).isUndef())
20752 return SDValue();
20753
20754 ArrayRef<int> OuterMask = OuterShuf->getMask();
20755 ArrayRef<int> InnerMask = InnerShuf->getMask();
20756 unsigned NumElts = OuterMask.size();
20757 assert(NumElts == InnerMask.size() && "Mask length mismatch")(static_cast <bool> (NumElts == InnerMask.size() &&
"Mask length mismatch") ? void (0) : __assert_fail ("NumElts == InnerMask.size() && \"Mask length mismatch\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20757, __extension__ __PRETTY_FUNCTION__))
;
20758 SmallVector<int, 32> CombinedMask(NumElts, -1);
20759 int SplatIndex = -1;
20760 for (unsigned i = 0; i != NumElts; ++i) {
20761 // Undef lanes remain undef.
20762 int OuterMaskElt = OuterMask[i];
20763 if (OuterMaskElt == -1)
20764 continue;
20765
20766 // Peek through the shuffle masks to get the underlying source element.
20767 int InnerMaskElt = InnerMask[OuterMaskElt];
20768 if (InnerMaskElt == -1)
20769 continue;
20770
20771 // Initialize the splatted element.
20772 if (SplatIndex == -1)
20773 SplatIndex = InnerMaskElt;
20774
20775 // Non-matching index - this is not a splat.
20776 if (SplatIndex != InnerMaskElt)
20777 return SDValue();
20778
20779 CombinedMask[i] = InnerMaskElt;
20780 }
20781 assert((all_of(CombinedMask, [](int M) { return M == -1; }) ||(static_cast <bool> ((all_of(CombinedMask, [](int M) { return
M == -1; }) || getSplatIndex(CombinedMask) != -1) &&
"Expected a splat mask") ? void (0) : __assert_fail ("(all_of(CombinedMask, [](int M) { return M == -1; }) || getSplatIndex(CombinedMask) != -1) && \"Expected a splat mask\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20783, __extension__ __PRETTY_FUNCTION__))
20782 getSplatIndex(CombinedMask) != -1) &&(static_cast <bool> ((all_of(CombinedMask, [](int M) { return
M == -1; }) || getSplatIndex(CombinedMask) != -1) &&
"Expected a splat mask") ? void (0) : __assert_fail ("(all_of(CombinedMask, [](int M) { return M == -1; }) || getSplatIndex(CombinedMask) != -1) && \"Expected a splat mask\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20783, __extension__ __PRETTY_FUNCTION__))
20783 "Expected a splat mask")(static_cast <bool> ((all_of(CombinedMask, [](int M) { return
M == -1; }) || getSplatIndex(CombinedMask) != -1) &&
"Expected a splat mask") ? void (0) : __assert_fail ("(all_of(CombinedMask, [](int M) { return M == -1; }) || getSplatIndex(CombinedMask) != -1) && \"Expected a splat mask\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20783, __extension__ __PRETTY_FUNCTION__))
;
20784
20785 // TODO: The transform may be a win even if the mask is not legal.
20786 EVT VT = OuterShuf->getValueType(0);
20787 assert(VT == InnerShuf->getValueType(0) && "Expected matching shuffle types")(static_cast <bool> (VT == InnerShuf->getValueType(0
) && "Expected matching shuffle types") ? void (0) : __assert_fail
("VT == InnerShuf->getValueType(0) && \"Expected matching shuffle types\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20787, __extension__ __PRETTY_FUNCTION__))
;
20788 if (!DAG.getTargetLoweringInfo().isShuffleMaskLegal(CombinedMask, VT))
20789 return SDValue();
20790
20791 return DAG.getVectorShuffle(VT, SDLoc(OuterShuf), InnerShuf->getOperand(0),
20792 InnerShuf->getOperand(1), CombinedMask);
20793}
20794
20795/// If the shuffle mask is taking exactly one element from the first vector
20796/// operand and passing through all other elements from the second vector
20797/// operand, return the index of the mask element that is choosing an element
20798/// from the first operand. Otherwise, return -1.
20799static int getShuffleMaskIndexOfOneElementFromOp0IntoOp1(ArrayRef<int> Mask) {
20800 int MaskSize = Mask.size();
20801 int EltFromOp0 = -1;
20802 // TODO: This does not match if there are undef elements in the shuffle mask.
20803 // Should we ignore undefs in the shuffle mask instead? The trade-off is
20804 // removing an instruction (a shuffle), but losing the knowledge that some
20805 // vector lanes are not needed.
20806 for (int i = 0; i != MaskSize; ++i) {
20807 if (Mask[i] >= 0 && Mask[i] < MaskSize) {
20808 // We're looking for a shuffle of exactly one element from operand 0.
20809 if (EltFromOp0 != -1)
20810 return -1;
20811 EltFromOp0 = i;
20812 } else if (Mask[i] != i + MaskSize) {
20813 // Nothing from operand 1 can change lanes.
20814 return -1;
20815 }
20816 }
20817 return EltFromOp0;
20818}
20819
20820/// If a shuffle inserts exactly one element from a source vector operand into
20821/// another vector operand and we can access the specified element as a scalar,
20822/// then we can eliminate the shuffle.
20823static SDValue replaceShuffleOfInsert(ShuffleVectorSDNode *Shuf,
20824 SelectionDAG &DAG) {
20825 // First, check if we are taking one element of a vector and shuffling that
20826 // element into another vector.
20827 ArrayRef<int> Mask = Shuf->getMask();
20828 SmallVector<int, 16> CommutedMask(Mask.begin(), Mask.end());
20829 SDValue Op0 = Shuf->getOperand(0);
20830 SDValue Op1 = Shuf->getOperand(1);
20831 int ShufOp0Index = getShuffleMaskIndexOfOneElementFromOp0IntoOp1(Mask);
20832 if (ShufOp0Index == -1) {
20833 // Commute mask and check again.
20834 ShuffleVectorSDNode::commuteMask(CommutedMask);
20835 ShufOp0Index = getShuffleMaskIndexOfOneElementFromOp0IntoOp1(CommutedMask);
20836 if (ShufOp0Index == -1)
20837 return SDValue();
20838 // Commute operands to match the commuted shuffle mask.
20839 std::swap(Op0, Op1);
20840 Mask = CommutedMask;
20841 }
20842
20843 // The shuffle inserts exactly one element from operand 0 into operand 1.
20844 // Now see if we can access that element as a scalar via a real insert element
20845 // instruction.
20846 // TODO: We can try harder to locate the element as a scalar. Examples: it
20847 // could be an operand of SCALAR_TO_VECTOR, BUILD_VECTOR, or a constant.
20848 assert(Mask[ShufOp0Index] >= 0 && Mask[ShufOp0Index] < (int)Mask.size() &&(static_cast <bool> (Mask[ShufOp0Index] >= 0 &&
Mask[ShufOp0Index] < (int)Mask.size() && "Shuffle mask value must be from operand 0"
) ? void (0) : __assert_fail ("Mask[ShufOp0Index] >= 0 && Mask[ShufOp0Index] < (int)Mask.size() && \"Shuffle mask value must be from operand 0\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20849, __extension__ __PRETTY_FUNCTION__))
20849 "Shuffle mask value must be from operand 0")(static_cast <bool> (Mask[ShufOp0Index] >= 0 &&
Mask[ShufOp0Index] < (int)Mask.size() && "Shuffle mask value must be from operand 0"
) ? void (0) : __assert_fail ("Mask[ShufOp0Index] >= 0 && Mask[ShufOp0Index] < (int)Mask.size() && \"Shuffle mask value must be from operand 0\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20849, __extension__ __PRETTY_FUNCTION__))
;
20850 if (Op0.getOpcode() != ISD::INSERT_VECTOR_ELT)
20851 return SDValue();
20852
20853 auto *InsIndexC = dyn_cast<ConstantSDNode>(Op0.getOperand(2));
20854 if (!InsIndexC || InsIndexC->getSExtValue() != Mask[ShufOp0Index])
20855 return SDValue();
20856
20857 // There's an existing insertelement with constant insertion index, so we
20858 // don't need to check the legality/profitability of a replacement operation
20859 // that differs at most in the constant value. The target should be able to
20860 // lower any of those in a similar way. If not, legalization will expand this
20861 // to a scalar-to-vector plus shuffle.
20862 //
20863 // Note that the shuffle may move the scalar from the position that the insert
20864 // element used. Therefore, our new insert element occurs at the shuffle's
20865 // mask index value, not the insert's index value.
20866 // shuffle (insertelt v1, x, C), v2, mask --> insertelt v2, x, C'
20867 SDValue NewInsIndex = DAG.getVectorIdxConstant(ShufOp0Index, SDLoc(Shuf));
20868 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Shuf), Op0.getValueType(),
20869 Op1, Op0.getOperand(1), NewInsIndex);
20870}
20871
20872/// If we have a unary shuffle of a shuffle, see if it can be folded away
20873/// completely. This has the potential to lose undef knowledge because the first
20874/// shuffle may not have an undef mask element where the second one does. So
20875/// only call this after doing simplifications based on demanded elements.
20876static SDValue simplifyShuffleOfShuffle(ShuffleVectorSDNode *Shuf) {
20877 // shuf (shuf0 X, Y, Mask0), undef, Mask
20878 auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(Shuf->getOperand(0));
20879 if (!Shuf0 || !Shuf->getOperand(1).isUndef())
20880 return SDValue();
20881
20882 ArrayRef<int> Mask = Shuf->getMask();
20883 ArrayRef<int> Mask0 = Shuf0->getMask();
20884 for (int i = 0, e = (int)Mask.size(); i != e; ++i) {
20885 // Ignore undef elements.
20886 if (Mask[i] == -1)
20887 continue;
20888 assert(Mask[i] >= 0 && Mask[i] < e && "Unexpected shuffle mask value")(static_cast <bool> (Mask[i] >= 0 && Mask[i]
< e && "Unexpected shuffle mask value") ? void (0
) : __assert_fail ("Mask[i] >= 0 && Mask[i] < e && \"Unexpected shuffle mask value\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20888, __extension__ __PRETTY_FUNCTION__))
;
20889
20890 // Is the element of the shuffle operand chosen by this shuffle the same as
20891 // the element chosen by the shuffle operand itself?
20892 if (Mask0[Mask[i]] != Mask0[i])
20893 return SDValue();
20894 }
20895 // Every element of this shuffle is identical to the result of the previous
20896 // shuffle, so we can replace this value.
20897 return Shuf->getOperand(0);
20898}
20899
20900SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
20901 EVT VT = N->getValueType(0);
20902 unsigned NumElts = VT.getVectorNumElements();
20903
20904 SDValue N0 = N->getOperand(0);
20905 SDValue N1 = N->getOperand(1);
20906
20907 assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG")(static_cast <bool> (N0.getValueType() == VT &&
"Vector shuffle must be normalized in DAG") ? void (0) : __assert_fail
("N0.getValueType() == VT && \"Vector shuffle must be normalized in DAG\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20907, __extension__ __PRETTY_FUNCTION__))
;
20908
20909 // Canonicalize shuffle undef, undef -> undef
20910 if (N0.isUndef() && N1.isUndef())
20911 return DAG.getUNDEF(VT);
20912
20913 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
20914
20915 // Canonicalize shuffle v, v -> v, undef
20916 if (N0 == N1) {
20917 SmallVector<int, 8> NewMask;
20918 for (unsigned i = 0; i != NumElts; ++i) {
20919 int Idx = SVN->getMaskElt(i);
20920 if (Idx >= (int)NumElts) Idx -= NumElts;
20921 NewMask.push_back(Idx);
20922 }
20923 return DAG.getVectorShuffle(VT, SDLoc(N), N0, DAG.getUNDEF(VT), NewMask);
20924 }
20925
20926 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
20927 if (N0.isUndef())
20928 return DAG.getCommutedVectorShuffle(*SVN);
20929
20930 // Remove references to rhs if it is undef
20931 if (N1.isUndef()) {
20932 bool Changed = false;
20933 SmallVector<int, 8> NewMask;
20934 for (unsigned i = 0; i != NumElts; ++i) {
20935 int Idx = SVN->getMaskElt(i);
20936 if (Idx >= (int)NumElts) {
20937 Idx = -1;
20938 Changed = true;
20939 }
20940 NewMask.push_back(Idx);
20941 }
20942 if (Changed)
20943 return DAG.getVectorShuffle(VT, SDLoc(N), N0, N1, NewMask);
20944 }
20945
20946 if (SDValue InsElt = replaceShuffleOfInsert(SVN, DAG))
20947 return InsElt;
20948
20949 // A shuffle of a single vector that is a splatted value can always be folded.
20950 if (SDValue V = combineShuffleOfSplatVal(SVN, DAG))
20951 return V;
20952
20953 if (SDValue V = formSplatFromShuffles(SVN, DAG))
20954 return V;
20955
20956 // If it is a splat, check if the argument vector is another splat or a
20957 // build_vector.
20958 if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) {
20959 int SplatIndex = SVN->getSplatIndex();
20960 if (N0.hasOneUse() && TLI.isExtractVecEltCheap(VT, SplatIndex) &&
20961 TLI.isBinOp(N0.getOpcode()) && N0.getNode()->getNumValues() == 1) {
20962 // splat (vector_bo L, R), Index -->
20963 // splat (scalar_bo (extelt L, Index), (extelt R, Index))
20964 SDValue L = N0.getOperand(0), R = N0.getOperand(1);
20965 SDLoc DL(N);
20966 EVT EltVT = VT.getScalarType();
20967 SDValue Index = DAG.getVectorIdxConstant(SplatIndex, DL);
20968 SDValue ExtL = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, L, Index);
20969 SDValue ExtR = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, R, Index);
20970 SDValue NewBO = DAG.getNode(N0.getOpcode(), DL, EltVT, ExtL, ExtR,
20971 N0.getNode()->getFlags());
20972 SDValue Insert = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, NewBO);
20973 SmallVector<int, 16> ZeroMask(VT.getVectorNumElements(), 0);
20974 return DAG.getVectorShuffle(VT, DL, Insert, DAG.getUNDEF(VT), ZeroMask);
20975 }
20976
20977 // If this is a bit convert that changes the element type of the vector but
20978 // not the number of vector elements, look through it. Be careful not to
20979 // look though conversions that change things like v4f32 to v2f64.
20980 SDNode *V = N0.getNode();
20981 if (V->getOpcode() == ISD::BITCAST) {
20982 SDValue ConvInput = V->getOperand(0);
20983 if (ConvInput.getValueType().isVector() &&
20984 ConvInput.getValueType().getVectorNumElements() == NumElts)
20985 V = ConvInput.getNode();
20986 }
20987
20988 if (V->getOpcode() == ISD::BUILD_VECTOR) {
20989 assert(V->getNumOperands() == NumElts &&(static_cast <bool> (V->getNumOperands() == NumElts &&
"BUILD_VECTOR has wrong number of operands") ? void (0) : __assert_fail
("V->getNumOperands() == NumElts && \"BUILD_VECTOR has wrong number of operands\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20990, __extension__ __PRETTY_FUNCTION__))
20990 "BUILD_VECTOR has wrong number of operands")(static_cast <bool> (V->getNumOperands() == NumElts &&
"BUILD_VECTOR has wrong number of operands") ? void (0) : __assert_fail
("V->getNumOperands() == NumElts && \"BUILD_VECTOR has wrong number of operands\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 20990, __extension__ __PRETTY_FUNCTION__))
;
20991 SDValue Base;
20992 bool AllSame = true;
20993 for (unsigned i = 0; i != NumElts; ++i) {
20994 if (!V->getOperand(i).isUndef()) {
20995 Base = V->getOperand(i);
20996 break;
20997 }
20998 }
20999 // Splat of <u, u, u, u>, return <u, u, u, u>
21000 if (!Base.getNode())
21001 return N0;
21002 for (unsigned i = 0; i != NumElts; ++i) {
21003 if (V->getOperand(i) != Base) {
21004 AllSame = false;
21005 break;
21006 }
21007 }
21008 // Splat of <x, x, x, x>, return <x, x, x, x>
21009 if (AllSame)
21010 return N0;
21011
21012 // Canonicalize any other splat as a build_vector.
21013 SDValue Splatted = V->getOperand(SplatIndex);
21014 SmallVector<SDValue, 8> Ops(NumElts, Splatted);
21015 SDValue NewBV = DAG.getBuildVector(V->getValueType(0), SDLoc(N), Ops);
21016
21017 // We may have jumped through bitcasts, so the type of the
21018 // BUILD_VECTOR may not match the type of the shuffle.
21019 if (V->getValueType(0) != VT)
21020 NewBV = DAG.getBitcast(VT, NewBV);
21021 return NewBV;
21022 }
21023 }
21024
21025 // Simplify source operands based on shuffle mask.
21026 if (SimplifyDemandedVectorElts(SDValue(N, 0)))
21027 return SDValue(N, 0);
21028
21029 // This is intentionally placed after demanded elements simplification because
21030 // it could eliminate knowledge of undef elements created by this shuffle.
21031 if (SDValue ShufOp = simplifyShuffleOfShuffle(SVN))
21032 return ShufOp;
21033
21034 // Match shuffles that can be converted to any_vector_extend_in_reg.
21035 if (SDValue V = combineShuffleToVectorExtend(SVN, DAG, TLI, LegalOperations))
21036 return V;
21037
21038 // Combine "truncate_vector_in_reg" style shuffles.
21039 if (SDValue V = combineTruncationShuffle(SVN, DAG))
21040 return V;
21041
21042 if (N0.getOpcode() == ISD::CONCAT_VECTORS &&
21043 Level < AfterLegalizeVectorOps &&
21044 (N1.isUndef() ||
21045 (N1.getOpcode() == ISD::CONCAT_VECTORS &&
21046 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) {
21047 if (SDValue V = partitionShuffleOfConcats(N, DAG))
21048 return V;
21049 }
21050
21051 // A shuffle of a concat of the same narrow vector can be reduced to use
21052 // only low-half elements of a concat with undef:
21053 // shuf (concat X, X), undef, Mask --> shuf (concat X, undef), undef, Mask'
21054 if (N0.getOpcode() == ISD::CONCAT_VECTORS && N1.isUndef() &&
21055 N0.getNumOperands() == 2 &&
21056 N0.getOperand(0) == N0.getOperand(1)) {
21057 int HalfNumElts = (int)NumElts / 2;
21058 SmallVector<int, 8> NewMask;
21059 for (unsigned i = 0; i != NumElts; ++i) {
21060 int Idx = SVN->getMaskElt(i);
21061 if (Idx >= HalfNumElts) {
21062 assert(Idx < (int)NumElts && "Shuffle mask chooses undef op")(static_cast <bool> (Idx < (int)NumElts && "Shuffle mask chooses undef op"
) ? void (0) : __assert_fail ("Idx < (int)NumElts && \"Shuffle mask chooses undef op\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 21062, __extension__ __PRETTY_FUNCTION__))
;
21063 Idx -= HalfNumElts;
21064 }
21065 NewMask.push_back(Idx);
21066 }
21067 if (TLI.isShuffleMaskLegal(NewMask, VT)) {
21068 SDValue UndefVec = DAG.getUNDEF(N0.getOperand(0).getValueType());
21069 SDValue NewCat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
21070 N0.getOperand(0), UndefVec);
21071 return DAG.getVectorShuffle(VT, SDLoc(N), NewCat, N1, NewMask);
21072 }
21073 }
21074
21075 // Attempt to combine a shuffle of 2 inputs of 'scalar sources' -
21076 // BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR.
21077 if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT))
21078 if (SDValue Res = combineShuffleOfScalars(SVN, DAG, TLI))
21079 return Res;
21080
21081 // If this shuffle only has a single input that is a bitcasted shuffle,
21082 // attempt to merge the 2 shuffles and suitably bitcast the inputs/output
21083 // back to their original types.
21084 if (N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() &&
21085 N1.isUndef() && Level < AfterLegalizeVectorOps &&
21086 TLI.isTypeLegal(VT)) {
21087
21088 SDValue BC0 = peekThroughOneUseBitcasts(N0);
21089 if (BC0.getOpcode() == ISD::VECTOR_SHUFFLE && BC0.hasOneUse()) {
21090 EVT SVT = VT.getScalarType();
21091 EVT InnerVT = BC0->getValueType(0);
21092 EVT InnerSVT = InnerVT.getScalarType();
21093
21094 // Determine which shuffle works with the smaller scalar type.
21095 EVT ScaleVT = SVT.bitsLT(InnerSVT) ? VT : InnerVT;
21096 EVT ScaleSVT = ScaleVT.getScalarType();
21097
21098 if (TLI.isTypeLegal(ScaleVT) &&
21099 0 == (InnerSVT.getSizeInBits() % ScaleSVT.getSizeInBits()) &&
21100 0 == (SVT.getSizeInBits() % ScaleSVT.getSizeInBits())) {
21101 int InnerScale = InnerSVT.getSizeInBits() / ScaleSVT.getSizeInBits();
21102 int OuterScale = SVT.getSizeInBits() / ScaleSVT.getSizeInBits();
21103
21104 // Scale the shuffle masks to the smaller scalar type.
21105 ShuffleVectorSDNode *InnerSVN = cast<ShuffleVectorSDNode>(BC0);
21106 SmallVector<int, 8> InnerMask;
21107 SmallVector<int, 8> OuterMask;
21108 narrowShuffleMaskElts(InnerScale, InnerSVN->getMask(), InnerMask);
21109 narrowShuffleMaskElts(OuterScale, SVN->getMask(), OuterMask);
21110
21111 // Merge the shuffle masks.
21112 SmallVector<int, 8> NewMask;
21113 for (int M : OuterMask)
21114 NewMask.push_back(M < 0 ? -1 : InnerMask[M]);
21115
21116 // Test for shuffle mask legality over both commutations.
21117 SDValue SV0 = BC0->getOperand(0);
21118 SDValue SV1 = BC0->getOperand(1);
21119 bool LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT);
21120 if (!LegalMask) {
21121 std::swap(SV0, SV1);
21122 ShuffleVectorSDNode::commuteMask(NewMask);
21123 LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT);
21124 }
21125
21126 if (LegalMask) {
21127 SV0 = DAG.getBitcast(ScaleVT, SV0);
21128 SV1 = DAG.getBitcast(ScaleVT, SV1);
21129 return DAG.getBitcast(
21130 VT, DAG.getVectorShuffle(ScaleVT, SDLoc(N), SV0, SV1, NewMask));
21131 }
21132 }
21133 }
21134 }
21135
21136 // Compute the combined shuffle mask for a shuffle with SV0 as the first
21137 // operand, and SV1 as the second operand.
21138 // i.e. Merge SVN(OtherSVN, N1) -> shuffle(SV0, SV1, Mask) iff Commute = false
21139 // Merge SVN(N1, OtherSVN) -> shuffle(SV0, SV1, Mask') iff Commute = true
21140 auto MergeInnerShuffle =
21141 [NumElts, &VT](bool Commute, ShuffleVectorSDNode *SVN,
21142 ShuffleVectorSDNode *OtherSVN, SDValue N1,
21143 const TargetLowering &TLI, SDValue &SV0, SDValue &SV1,
21144 SmallVectorImpl<int> &Mask) -> bool {
21145 // Don't try to fold splats; they're likely to simplify somehow, or they
21146 // might be free.
21147 if (OtherSVN->isSplat())
21148 return false;
21149
21150 SV0 = SV1 = SDValue();
21151 Mask.clear();
21152
21153 for (unsigned i = 0; i != NumElts; ++i) {
21154 int Idx = SVN->getMaskElt(i);
21155 if (Idx < 0) {
21156 // Propagate Undef.
21157 Mask.push_back(Idx);
21158 continue;
21159 }
21160
21161 if (Commute)
21162 Idx = (Idx < (int)NumElts) ? (Idx + NumElts) : (Idx - NumElts);
21163
21164 SDValue CurrentVec;
21165 if (Idx < (int)NumElts) {
21166 // This shuffle index refers to the inner shuffle N0. Lookup the inner
21167 // shuffle mask to identify which vector is actually referenced.
21168 Idx = OtherSVN->getMaskElt(Idx);
21169 if (Idx < 0) {
21170 // Propagate Undef.
21171 Mask.push_back(Idx);
21172 continue;
21173 }
21174 CurrentVec = (Idx < (int)NumElts) ? OtherSVN->getOperand(0)
21175 : OtherSVN->getOperand(1);
21176 } else {
21177 // This shuffle index references an element within N1.
21178 CurrentVec = N1;
21179 }
21180
21181 // Simple case where 'CurrentVec' is UNDEF.
21182 if (CurrentVec.isUndef()) {
21183 Mask.push_back(-1);
21184 continue;
21185 }
21186
21187 // Canonicalize the shuffle index. We don't know yet if CurrentVec
21188 // will be the first or second operand of the combined shuffle.
21189 Idx = Idx % NumElts;
21190 if (!SV0.getNode() || SV0 == CurrentVec) {
21191 // Ok. CurrentVec is the left hand side.
21192 // Update the mask accordingly.
21193 SV0 = CurrentVec;
21194 Mask.push_back(Idx);
21195 continue;
21196 }
21197 if (!SV1.getNode() || SV1 == CurrentVec) {
21198 // Ok. CurrentVec is the right hand side.
21199 // Update the mask accordingly.
21200 SV1 = CurrentVec;
21201 Mask.push_back(Idx + NumElts);
21202 continue;
21203 }
21204
21205 // Last chance - see if the vector is another shuffle and if it
21206 // uses one of the existing candidate shuffle ops.
21207 if (auto *CurrentSVN = dyn_cast<ShuffleVectorSDNode>(CurrentVec)) {
21208 int InnerIdx = CurrentSVN->getMaskElt(Idx);
21209 if (InnerIdx < 0) {
21210 Mask.push_back(-1);
21211 continue;
21212 }
21213 SDValue InnerVec = (InnerIdx < (int)NumElts)
21214 ? CurrentSVN->getOperand(0)
21215 : CurrentSVN->getOperand(1);
21216 if (InnerVec.isUndef()) {
21217 Mask.push_back(-1);
21218 continue;
21219 }
21220 InnerIdx %= NumElts;
21221 if (InnerVec == SV0) {
21222 Mask.push_back(InnerIdx);
21223 continue;
21224 }
21225 if (InnerVec == SV1) {
21226 Mask.push_back(InnerIdx + NumElts);
21227 continue;
21228 }
21229 }
21230
21231 // Bail out if we cannot convert the shuffle pair into a single shuffle.
21232 return false;
21233 }
21234
21235 if (llvm::all_of(Mask, [](int M) { return M < 0; }))
21236 return true;
21237
21238 // Avoid introducing shuffles with illegal mask.
21239 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
21240 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
21241 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
21242 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, A, M2)
21243 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, A, M2)
21244 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, B, M2)
21245 if (TLI.isShuffleMaskLegal(Mask, VT))
21246 return true;
21247
21248 std::swap(SV0, SV1);
21249 ShuffleVectorSDNode::commuteMask(Mask);
21250 return TLI.isShuffleMaskLegal(Mask, VT);
21251 };
21252
21253 if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) {
21254 // Canonicalize shuffles according to rules:
21255 // shuffle(A, shuffle(A, B)) -> shuffle(shuffle(A,B), A)
21256 // shuffle(B, shuffle(A, B)) -> shuffle(shuffle(A,B), B)
21257 // shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B)
21258 if (N1.getOpcode() == ISD::VECTOR_SHUFFLE &&
21259 N0.getOpcode() != ISD::VECTOR_SHUFFLE) {
21260 // The incoming shuffle must be of the same type as the result of the
21261 // current shuffle.
21262 assert(N1->getOperand(0).getValueType() == VT &&(static_cast <bool> (N1->getOperand(0).getValueType(
) == VT && "Shuffle types don't match") ? void (0) : __assert_fail
("N1->getOperand(0).getValueType() == VT && \"Shuffle types don't match\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 21263, __extension__ __PRETTY_FUNCTION__))
21263 "Shuffle types don't match")(static_cast <bool> (N1->getOperand(0).getValueType(
) == VT && "Shuffle types don't match") ? void (0) : __assert_fail
("N1->getOperand(0).getValueType() == VT && \"Shuffle types don't match\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 21263, __extension__ __PRETTY_FUNCTION__))
;
21264
21265 SDValue SV0 = N1->getOperand(0);
21266 SDValue SV1 = N1->getOperand(1);
21267 bool HasSameOp0 = N0 == SV0;
21268 bool IsSV1Undef = SV1.isUndef();
21269 if (HasSameOp0 || IsSV1Undef || N0 == SV1)
21270 // Commute the operands of this shuffle so merging below will trigger.
21271 return DAG.getCommutedVectorShuffle(*SVN);
21272 }
21273
21274 // Canonicalize splat shuffles to the RHS to improve merging below.
21275 // shuffle(splat(A,u), shuffle(C,D)) -> shuffle'(shuffle(C,D), splat(A,u))
21276 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE &&
21277 N1.getOpcode() == ISD::VECTOR_SHUFFLE &&
21278 cast<ShuffleVectorSDNode>(N0)->isSplat() &&
21279 !cast<ShuffleVectorSDNode>(N1)->isSplat()) {
21280 return DAG.getCommutedVectorShuffle(*SVN);
21281 }
21282
21283 // Try to fold according to rules:
21284 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2)
21285 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2)
21286 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2)
21287 // Don't try to fold shuffles with illegal type.
21288 // Only fold if this shuffle is the only user of the other shuffle.
21289 // Try matching shuffle(C,shuffle(A,B)) commutted patterns as well.
21290 for (int i = 0; i != 2; ++i) {
21291 if (N->getOperand(i).getOpcode() == ISD::VECTOR_SHUFFLE &&
21292 N->isOnlyUserOf(N->getOperand(i).getNode())) {
21293 // The incoming shuffle must be of the same type as the result of the
21294 // current shuffle.
21295 auto *OtherSV = cast<ShuffleVectorSDNode>(N->getOperand(i));
21296 assert(OtherSV->getOperand(0).getValueType() == VT &&(static_cast <bool> (OtherSV->getOperand(0).getValueType
() == VT && "Shuffle types don't match") ? void (0) :
__assert_fail ("OtherSV->getOperand(0).getValueType() == VT && \"Shuffle types don't match\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 21297, __extension__ __PRETTY_FUNCTION__))
21297 "Shuffle types don't match")(static_cast <bool> (OtherSV->getOperand(0).getValueType
() == VT && "Shuffle types don't match") ? void (0) :
__assert_fail ("OtherSV->getOperand(0).getValueType() == VT && \"Shuffle types don't match\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 21297, __extension__ __PRETTY_FUNCTION__))
;
21298
21299 SDValue SV0, SV1;
21300 SmallVector<int, 4> Mask;
21301 if (MergeInnerShuffle(i != 0, SVN, OtherSV, N->getOperand(1 - i), TLI,
21302 SV0, SV1, Mask)) {
21303 // Check if all indices in Mask are Undef. In case, propagate Undef.
21304 if (llvm::all_of(Mask, [](int M) { return M < 0; }))
21305 return DAG.getUNDEF(VT);
21306
21307 return DAG.getVectorShuffle(VT, SDLoc(N),
21308 SV0 ? SV0 : DAG.getUNDEF(VT),
21309 SV1 ? SV1 : DAG.getUNDEF(VT), Mask);
21310 }
21311 }
21312 }
21313
21314 // Merge shuffles through binops if we are able to merge it with at least
21315 // one other shuffles.
21316 // shuffle(bop(shuffle(x,y),shuffle(z,w)),undef)
21317 // shuffle(bop(shuffle(x,y),shuffle(z,w)),bop(shuffle(a,b),shuffle(c,d)))
21318 unsigned SrcOpcode = N0.getOpcode();
21319 if (TLI.isBinOp(SrcOpcode) && N->isOnlyUserOf(N0.getNode()) &&
21320 (N1.isUndef() ||
21321 (SrcOpcode == N1.getOpcode() && N->isOnlyUserOf(N1.getNode())))) {
21322 // Get binop source ops, or just pass on the undef.
21323 SDValue Op00 = N0.getOperand(0);
21324 SDValue Op01 = N0.getOperand(1);
21325 SDValue Op10 = N1.isUndef() ? N1 : N1.getOperand(0);
21326 SDValue Op11 = N1.isUndef() ? N1 : N1.getOperand(1);
21327 // TODO: We might be able to relax the VT check but we don't currently
21328 // have any isBinOp() that has different result/ops VTs so play safe until
21329 // we have test coverage.
21330 if (Op00.getValueType() == VT && Op10.getValueType() == VT &&
21331 Op01.getValueType() == VT && Op11.getValueType() == VT &&
21332 (Op00.getOpcode() == ISD::VECTOR_SHUFFLE ||
21333 Op10.getOpcode() == ISD::VECTOR_SHUFFLE ||
21334 Op01.getOpcode() == ISD::VECTOR_SHUFFLE ||
21335 Op11.getOpcode() == ISD::VECTOR_SHUFFLE)) {
21336 auto CanMergeInnerShuffle = [&](SDValue &SV0, SDValue &SV1,
21337 SmallVectorImpl<int> &Mask, bool LeftOp,
21338 bool Commute) {
21339 SDValue InnerN = Commute ? N1 : N0;
21340 SDValue Op0 = LeftOp ? Op00 : Op01;
21341 SDValue Op1 = LeftOp ? Op10 : Op11;
21342 if (Commute)
21343 std::swap(Op0, Op1);
21344 // Only accept the merged shuffle if we don't introduce undef elements,
21345 // or the inner shuffle already contained undef elements.
21346 auto *SVN0 = dyn_cast<ShuffleVectorSDNode>(Op0);
21347 return SVN0 && InnerN->isOnlyUserOf(SVN0) &&
21348 MergeInnerShuffle(Commute, SVN, SVN0, Op1, TLI, SV0, SV1,
21349 Mask) &&
21350 (llvm::any_of(SVN0->getMask(), [](int M) { return M < 0; }) ||
21351 llvm::none_of(Mask, [](int M) { return M < 0; }));
21352 };
21353
21354 // Ensure we don't increase the number of shuffles - we must merge a
21355 // shuffle from at least one of the LHS and RHS ops.
21356 bool MergedLeft = false;
21357 SDValue LeftSV0, LeftSV1;
21358 SmallVector<int, 4> LeftMask;
21359 if (CanMergeInnerShuffle(LeftSV0, LeftSV1, LeftMask, true, false) ||
21360 CanMergeInnerShuffle(LeftSV0, LeftSV1, LeftMask, true, true)) {
21361 MergedLeft = true;
21362 } else {
21363 LeftMask.assign(SVN->getMask().begin(), SVN->getMask().end());
21364 LeftSV0 = Op00, LeftSV1 = Op10;
21365 }
21366
21367 bool MergedRight = false;
21368 SDValue RightSV0, RightSV1;
21369 SmallVector<int, 4> RightMask;
21370 if (CanMergeInnerShuffle(RightSV0, RightSV1, RightMask, false, false) ||
21371 CanMergeInnerShuffle(RightSV0, RightSV1, RightMask, false, true)) {
21372 MergedRight = true;
21373 } else {
21374 RightMask.assign(SVN->getMask().begin(), SVN->getMask().end());
21375 RightSV0 = Op01, RightSV1 = Op11;
21376 }
21377
21378 if (MergedLeft || MergedRight) {
21379 SDLoc DL(N);
21380 SDValue LHS = DAG.getVectorShuffle(
21381 VT, DL, LeftSV0 ? LeftSV0 : DAG.getUNDEF(VT),
21382 LeftSV1 ? LeftSV1 : DAG.getUNDEF(VT), LeftMask);
21383 SDValue RHS = DAG.getVectorShuffle(
21384 VT, DL, RightSV0 ? RightSV0 : DAG.getUNDEF(VT),
21385 RightSV1 ? RightSV1 : DAG.getUNDEF(VT), RightMask);
21386 return DAG.getNode(SrcOpcode, DL, VT, LHS, RHS);
21387 }
21388 }
21389 }
21390 }
21391
21392 if (SDValue V = foldShuffleOfConcatUndefs(SVN, DAG))
21393 return V;
21394
21395 return SDValue();
21396}
21397
21398SDValue DAGCombiner::visitSCALAR_TO_VECTOR(SDNode *N) {
21399 SDValue InVal = N->getOperand(0);
21400 EVT VT = N->getValueType(0);
21401
21402 // Replace a SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C0)) pattern
21403 // with a VECTOR_SHUFFLE and possible truncate.
21404 if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
21405 VT.isFixedLengthVector() &&
21406 InVal->getOperand(0).getValueType().isFixedLengthVector()) {
21407 SDValue InVec = InVal->getOperand(0);
21408 SDValue EltNo = InVal->getOperand(1);
21409 auto InVecT = InVec.getValueType();
21410 if (ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(EltNo)) {
21411 SmallVector<int, 8> NewMask(InVecT.getVectorNumElements(), -1);
21412 int Elt = C0->getZExtValue();
21413 NewMask[0] = Elt;
21414 // If we have an implict truncate do truncate here as long as it's legal.
21415 // if it's not legal, this should
21416 if (VT.getScalarType() != InVal.getValueType() &&
21417 InVal.getValueType().isScalarInteger() &&
21418 isTypeLegal(VT.getScalarType())) {
21419 SDValue Val =
21420 DAG.getNode(ISD::TRUNCATE, SDLoc(InVal), VT.getScalarType(), InVal);
21421 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Val);
21422 }
21423 if (VT.getScalarType() == InVecT.getScalarType() &&
21424 VT.getVectorNumElements() <= InVecT.getVectorNumElements()) {
21425 SDValue LegalShuffle =
21426 TLI.buildLegalVectorShuffle(InVecT, SDLoc(N), InVec,
21427 DAG.getUNDEF(InVecT), NewMask, DAG);
21428 if (LegalShuffle) {
21429 // If the initial vector is the correct size this shuffle is a
21430 // valid result.
21431 if (VT == InVecT)
21432 return LegalShuffle;
21433 // If not we must truncate the vector.
21434 if (VT.getVectorNumElements() != InVecT.getVectorNumElements()) {
21435 SDValue ZeroIdx = DAG.getVectorIdxConstant(0, SDLoc(N));
21436 EVT SubVT = EVT::getVectorVT(*DAG.getContext(),
21437 InVecT.getVectorElementType(),
21438 VT.getVectorNumElements());
21439 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), SubVT,
21440 LegalShuffle, ZeroIdx);
21441 }
21442 }
21443 }
21444 }
21445 }
21446
21447 return SDValue();
21448}
21449
21450SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {
21451 EVT VT = N->getValueType(0);
21452 SDValue N0 = N->getOperand(0);
21453 SDValue N1 = N->getOperand(1);
21454 SDValue N2 = N->getOperand(2);
21455 uint64_t InsIdx = N->getConstantOperandVal(2);
21456
21457 // If inserting an UNDEF, just return the original vector.
21458 if (N1.isUndef())
21459 return N0;
21460
21461 // If this is an insert of an extracted vector into an undef vector, we can
21462 // just use the input to the extract.
21463 if (N0.isUndef() && N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
21464 N1.getOperand(1) == N2 && N1.getOperand(0).getValueType() == VT)
21465 return N1.getOperand(0);
21466
21467 // If we are inserting a bitcast value into an undef, with the same
21468 // number of elements, just use the bitcast input of the extract.
21469 // i.e. INSERT_SUBVECTOR UNDEF (BITCAST N1) N2 ->
21470 // BITCAST (INSERT_SUBVECTOR UNDEF N1 N2)
21471 if (N0.isUndef() && N1.getOpcode() == ISD::BITCAST &&
21472 N1.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
21473 N1.getOperand(0).getOperand(1) == N2 &&
21474 N1.getOperand(0).getOperand(0).getValueType().getVectorElementCount() ==
21475 VT.getVectorElementCount() &&
21476 N1.getOperand(0).getOperand(0).getValueType().getSizeInBits() ==
21477 VT.getSizeInBits()) {
21478 return DAG.getBitcast(VT, N1.getOperand(0).getOperand(0));
21479 }
21480
21481 // If both N1 and N2 are bitcast values on which insert_subvector
21482 // would makes sense, pull the bitcast through.
21483 // i.e. INSERT_SUBVECTOR (BITCAST N0) (BITCAST N1) N2 ->
21484 // BITCAST (INSERT_SUBVECTOR N0 N1 N2)
21485 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST) {
21486 SDValue CN0 = N0.getOperand(0);
21487 SDValue CN1 = N1.getOperand(0);
21488 EVT CN0VT = CN0.getValueType();
21489 EVT CN1VT = CN1.getValueType();
21490 if (CN0VT.isVector() && CN1VT.isVector() &&
21491 CN0VT.getVectorElementType() == CN1VT.getVectorElementType() &&
21492 CN0VT.getVectorElementCount() == VT.getVectorElementCount()) {
21493 SDValue NewINSERT = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N),
21494 CN0.getValueType(), CN0, CN1, N2);
21495 return DAG.getBitcast(VT, NewINSERT);
21496 }
21497 }
21498
21499 // Combine INSERT_SUBVECTORs where we are inserting to the same index.
21500 // INSERT_SUBVECTOR( INSERT_SUBVECTOR( Vec, SubOld, Idx ), SubNew, Idx )
21501 // --> INSERT_SUBVECTOR( Vec, SubNew, Idx )
21502 if (N0.getOpcode() == ISD::INSERT_SUBVECTOR &&
21503 N0.getOperand(1).getValueType() == N1.getValueType() &&
21504 N0.getOperand(2) == N2)
21505 return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0),
21506 N1, N2);
21507
21508 // Eliminate an intermediate insert into an undef vector:
21509 // insert_subvector undef, (insert_subvector undef, X, 0), N2 -->
21510 // insert_subvector undef, X, N2
21511 if (N0.isUndef() && N1.getOpcode() == ISD::INSERT_SUBVECTOR &&
21512 N1.getOperand(0).isUndef() && isNullConstant(N1.getOperand(2)))
21513 return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0,
21514 N1.getOperand(1), N2);
21515
21516 // Push subvector bitcasts to the output, adjusting the index as we go.
21517 // insert_subvector(bitcast(v), bitcast(s), c1)
21518 // -> bitcast(insert_subvector(v, s, c2))
21519 if ((N0.isUndef() || N0.getOpcode() == ISD::BITCAST) &&
21520 N1.getOpcode() == ISD::BITCAST) {
21521 SDValue N0Src = peekThroughBitcasts(N0);
21522 SDValue N1Src = peekThroughBitcasts(N1);
21523 EVT N0SrcSVT = N0Src.getValueType().getScalarType();
21524 EVT N1SrcSVT = N1Src.getValueType().getScalarType();
21525 if ((N0.isUndef() || N0SrcSVT == N1SrcSVT) &&
21526 N0Src.getValueType().isVector() && N1Src.getValueType().isVector()) {
21527 EVT NewVT;
21528 SDLoc DL(N);
21529 SDValue NewIdx;
21530 LLVMContext &Ctx = *DAG.getContext();
21531 ElementCount NumElts = VT.getVectorElementCount();
21532 unsigned EltSizeInBits = VT.getScalarSizeInBits();
21533 if ((EltSizeInBits % N1SrcSVT.getSizeInBits()) == 0) {
21534 unsigned Scale = EltSizeInBits / N1SrcSVT.getSizeInBits();
21535 NewVT = EVT::getVectorVT(Ctx, N1SrcSVT, NumElts * Scale);
21536 NewIdx = DAG.getVectorIdxConstant(InsIdx * Scale, DL);
21537 } else if ((N1SrcSVT.getSizeInBits() % EltSizeInBits) == 0) {
21538 unsigned Scale = N1SrcSVT.getSizeInBits() / EltSizeInBits;
21539 if (NumElts.isKnownMultipleOf(Scale) && (InsIdx % Scale) == 0) {
21540 NewVT = EVT::getVectorVT(Ctx, N1SrcSVT,
21541 NumElts.divideCoefficientBy(Scale));
21542 NewIdx = DAG.getVectorIdxConstant(InsIdx / Scale, DL);
21543 }
21544 }
21545 if (NewIdx && hasOperation(ISD::INSERT_SUBVECTOR, NewVT)) {
21546 SDValue Res = DAG.getBitcast(NewVT, N0Src);
21547 Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, NewVT, Res, N1Src, NewIdx);
21548 return DAG.getBitcast(VT, Res);
21549 }
21550 }
21551 }
21552
21553 // Canonicalize insert_subvector dag nodes.
21554 // Example:
21555 // (insert_subvector (insert_subvector A, Idx0), Idx1)
21556 // -> (insert_subvector (insert_subvector A, Idx1), Idx0)
21557 if (N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.hasOneUse() &&
21558 N1.getValueType() == N0.getOperand(1).getValueType()) {
21559 unsigned OtherIdx = N0.getConstantOperandVal(2);
21560 if (InsIdx < OtherIdx) {
21561 // Swap nodes.
21562 SDValue NewOp = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT,
21563 N0.getOperand(0), N1, N2);
21564 AddToWorklist(NewOp.getNode());
21565 return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N0.getNode()),
21566 VT, NewOp, N0.getOperand(1), N0.getOperand(2));
21567 }
21568 }
21569
21570 // If the input vector is a concatenation, and the insert replaces
21571 // one of the pieces, we can optimize into a single concat_vectors.
21572 if (N0.getOpcode() == ISD::CONCAT_VECTORS && N0.hasOneUse() &&
21573 N0.getOperand(0).getValueType() == N1.getValueType() &&
21574 N0.getOperand(0).getValueType().isScalableVector() ==
21575 N1.getValueType().isScalableVector()) {
21576 unsigned Factor = N1.getValueType().getVectorMinNumElements();
21577 SmallVector<SDValue, 8> Ops(N0->op_begin(), N0->op_end());
21578 Ops[InsIdx / Factor] = N1;
21579 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops);
21580 }
21581
21582 // Simplify source operands based on insertion.
21583 if (SimplifyDemandedVectorElts(SDValue(N, 0)))
21584 return SDValue(N, 0);
21585
21586 return SDValue();
21587}
21588
21589SDValue DAGCombiner::visitFP_TO_FP16(SDNode *N) {
21590 SDValue N0 = N->getOperand(0);
21591
21592 // fold (fp_to_fp16 (fp16_to_fp op)) -> op
21593 if (N0->getOpcode() == ISD::FP16_TO_FP)
21594 return N0->getOperand(0);
21595
21596 return SDValue();
21597}
21598
21599SDValue DAGCombiner::visitFP16_TO_FP(SDNode *N) {
21600 SDValue N0 = N->getOperand(0);
21601
21602 // fold fp16_to_fp(op & 0xffff) -> fp16_to_fp(op)
21603 if (!TLI.shouldKeepZExtForFP16Conv() && N0->getOpcode() == ISD::AND) {
21604 ConstantSDNode *AndConst = getAsNonOpaqueConstant(N0.getOperand(1));
21605 if (AndConst && AndConst->getAPIntValue() == 0xffff) {
21606 return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), N->getValueType(0),
21607 N0.getOperand(0));
21608 }
21609 }
21610
21611 return SDValue();
21612}
21613
21614SDValue DAGCombiner::visitVECREDUCE(SDNode *N) {
21615 SDValue N0 = N->getOperand(0);
21616 EVT VT = N0.getValueType();
21617 unsigned Opcode = N->getOpcode();
21618
21619 // VECREDUCE over 1-element vector is just an extract.
21620 if (VT.getVectorElementCount().isScalar()) {
21621 SDLoc dl(N);
21622 SDValue Res =
21623 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT.getVectorElementType(), N0,
21624 DAG.getVectorIdxConstant(0, dl));
21625 if (Res.getValueType() != N->getValueType(0))
21626 Res = DAG.getNode(ISD::ANY_EXTEND, dl, N->getValueType(0), Res);
21627 return Res;
21628 }
21629
21630 // On an boolean vector an and/or reduction is the same as a umin/umax
21631 // reduction. Convert them if the latter is legal while the former isn't.
21632 if (Opcode == ISD::VECREDUCE_AND || Opcode == ISD::VECREDUCE_OR) {
21633 unsigned NewOpcode = Opcode == ISD::VECREDUCE_AND
21634 ? ISD::VECREDUCE_UMIN : ISD::VECREDUCE_UMAX;
21635 if (!TLI.isOperationLegalOrCustom(Opcode, VT) &&
21636 TLI.isOperationLegalOrCustom(NewOpcode, VT) &&
21637 DAG.ComputeNumSignBits(N0) == VT.getScalarSizeInBits())
21638 return DAG.getNode(NewOpcode, SDLoc(N), N->getValueType(0), N0);
21639 }
21640
21641 return SDValue();
21642}
21643
21644/// Returns a vector_shuffle if it able to transform an AND to a vector_shuffle
21645/// with the destination vector and a zero vector.
21646/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
21647/// vector_shuffle V, Zero, <0, 4, 2, 4>
21648SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
21649 assert(N->getOpcode() == ISD::AND && "Unexpected opcode!")(static_cast <bool> (N->getOpcode() == ISD::AND &&
"Unexpected opcode!") ? void (0) : __assert_fail ("N->getOpcode() == ISD::AND && \"Unexpected opcode!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 21649, __extension__ __PRETTY_FUNCTION__))
;
21650
21651 EVT VT = N->getValueType(0);
21652 SDValue LHS = N->getOperand(0);
21653 SDValue RHS = peekThroughBitcasts(N->getOperand(1));
21654 SDLoc DL(N);
21655
21656 // Make sure we're not running after operation legalization where it
21657 // may have custom lowered the vector shuffles.
21658 if (LegalOperations)
21659 return SDValue();
21660
21661 if (RHS.getOpcode() != ISD::BUILD_VECTOR)
21662 return SDValue();
21663
21664 EVT RVT = RHS.getValueType();
21665 unsigned NumElts = RHS.getNumOperands();
21666
21667 // Attempt to create a valid clear mask, splitting the mask into
21668 // sub elements and checking to see if each is
21669 // all zeros or all ones - suitable for shuffle masking.
21670 auto BuildClearMask = [&](int Split) {
21671 int NumSubElts = NumElts * Split;
21672 int NumSubBits = RVT.getScalarSizeInBits() / Split;
21673
21674 SmallVector<int, 8> Indices;
21675 for (int i = 0; i != NumSubElts; ++i) {
21676 int EltIdx = i / Split;
21677 int SubIdx = i % Split;
21678 SDValue Elt = RHS.getOperand(EltIdx);
21679 // X & undef --> 0 (not undef). So this lane must be converted to choose
21680 // from the zero constant vector (same as if the element had all 0-bits).
21681 if (Elt.isUndef()) {
21682 Indices.push_back(i + NumSubElts);
21683 continue;
21684 }
21685
21686 APInt Bits;
21687 if (isa<ConstantSDNode>(Elt))
21688 Bits = cast<ConstantSDNode>(Elt)->getAPIntValue();
21689 else if (isa<ConstantFPSDNode>(Elt))
21690 Bits = cast<ConstantFPSDNode>(Elt)->getValueAPF().bitcastToAPInt();
21691 else
21692 return SDValue();
21693
21694 // Extract the sub element from the constant bit mask.
21695 if (DAG.getDataLayout().isBigEndian())
21696 Bits = Bits.extractBits(NumSubBits, (Split - SubIdx - 1) * NumSubBits);
21697 else
21698 Bits = Bits.extractBits(NumSubBits, SubIdx * NumSubBits);
21699
21700 if (Bits.isAllOnesValue())
21701 Indices.push_back(i);
21702 else if (Bits == 0)
21703 Indices.push_back(i + NumSubElts);
21704 else
21705 return SDValue();
21706 }
21707
21708 // Let's see if the target supports this vector_shuffle.
21709 EVT ClearSVT = EVT::getIntegerVT(*DAG.getContext(), NumSubBits);
21710 EVT ClearVT = EVT::getVectorVT(*DAG.getContext(), ClearSVT, NumSubElts);
21711 if (!TLI.isVectorClearMaskLegal(Indices, ClearVT))
21712 return SDValue();
21713
21714 SDValue Zero = DAG.getConstant(0, DL, ClearVT);
21715 return DAG.getBitcast(VT, DAG.getVectorShuffle(ClearVT, DL,
21716 DAG.getBitcast(ClearVT, LHS),
21717 Zero, Indices));
21718 };
21719
21720 // Determine maximum split level (byte level masking).
21721 int MaxSplit = 1;
21722 if (RVT.getScalarSizeInBits() % 8 == 0)
21723 MaxSplit = RVT.getScalarSizeInBits() / 8;
21724
21725 for (int Split = 1; Split <= MaxSplit; ++Split)
21726 if (RVT.getScalarSizeInBits() % Split == 0)
21727 if (SDValue S = BuildClearMask(Split))
21728 return S;
21729
21730 return SDValue();
21731}
21732
21733/// If a vector binop is performed on splat values, it may be profitable to
21734/// extract, scalarize, and insert/splat.
21735static SDValue scalarizeBinOpOfSplats(SDNode *N, SelectionDAG &DAG) {
21736 SDValue N0 = N->getOperand(0);
21737 SDValue N1 = N->getOperand(1);
21738 unsigned Opcode = N->getOpcode();
21739 EVT VT = N->getValueType(0);
21740 EVT EltVT = VT.getVectorElementType();
21741 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21742
21743 // TODO: Remove/replace the extract cost check? If the elements are available
21744 // as scalars, then there may be no extract cost. Should we ask if
21745 // inserting a scalar back into a vector is cheap instead?
21746 int Index0, Index1;
21747 SDValue Src0 = DAG.getSplatSourceVector(N0, Index0);
21748 SDValue Src1 = DAG.getSplatSourceVector(N1, Index1);
21749 if (!Src0 || !Src1 || Index0 != Index1 ||
21750 Src0.getValueType().getVectorElementType() != EltVT ||
21751 Src1.getValueType().getVectorElementType() != EltVT ||
21752 !TLI.isExtractVecEltCheap(VT, Index0) ||
21753 !TLI.isOperationLegalOrCustom(Opcode, EltVT))
21754 return SDValue();
21755
21756 SDLoc DL(N);
21757 SDValue IndexC = DAG.getVectorIdxConstant(Index0, DL);
21758 SDValue X = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src0, IndexC);
21759 SDValue Y = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src1, IndexC);
21760 SDValue ScalarBO = DAG.getNode(Opcode, DL, EltVT, X, Y, N->getFlags());
21761
21762 // If all lanes but 1 are undefined, no need to splat the scalar result.
21763 // TODO: Keep track of undefs and use that info in the general case.
21764 if (N0.getOpcode() == ISD::BUILD_VECTOR && N0.getOpcode() == N1.getOpcode() &&
21765 count_if(N0->ops(), [](SDValue V) { return !V.isUndef(); }) == 1 &&
21766 count_if(N1->ops(), [](SDValue V) { return !V.isUndef(); }) == 1) {
21767 // bo (build_vec ..undef, X, undef...), (build_vec ..undef, Y, undef...) -->
21768 // build_vec ..undef, (bo X, Y), undef...
21769 SmallVector<SDValue, 8> Ops(VT.getVectorNumElements(), DAG.getUNDEF(EltVT));
21770 Ops[Index0] = ScalarBO;
21771 return DAG.getBuildVector(VT, DL, Ops);
21772 }
21773
21774 // bo (splat X, Index), (splat Y, Index) --> splat (bo X, Y), Index
21775 SmallVector<SDValue, 8> Ops(VT.getVectorNumElements(), ScalarBO);
21776 return DAG.getBuildVector(VT, DL, Ops);
21777}
21778
21779/// Visit a binary vector operation, like ADD.
21780SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
21781 assert(N->getValueType(0).isVector() &&(static_cast <bool> (N->getValueType(0).isVector() &&
"SimplifyVBinOp only works on vectors!") ? void (0) : __assert_fail
("N->getValueType(0).isVector() && \"SimplifyVBinOp only works on vectors!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 21782, __extension__ __PRETTY_FUNCTION__))
21782 "SimplifyVBinOp only works on vectors!")(static_cast <bool> (N->getValueType(0).isVector() &&
"SimplifyVBinOp only works on vectors!") ? void (0) : __assert_fail
("N->getValueType(0).isVector() && \"SimplifyVBinOp only works on vectors!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 21782, __extension__ __PRETTY_FUNCTION__))
;
21783
21784 SDValue LHS = N->getOperand(0);
21785 SDValue RHS = N->getOperand(1);
21786 SDValue Ops[] = {LHS, RHS};
21787 EVT VT = N->getValueType(0);
21788 unsigned Opcode = N->getOpcode();
21789 SDNodeFlags Flags = N->getFlags();
21790
21791 // See if we can constant fold the vector operation.
21792 if (SDValue Fold = DAG.FoldConstantVectorArithmetic(
21793 Opcode, SDLoc(LHS), LHS.getValueType(), Ops, N->getFlags()))
21794 return Fold;
21795
21796 // Move unary shuffles with identical masks after a vector binop:
21797 // VBinOp (shuffle A, Undef, Mask), (shuffle B, Undef, Mask))
21798 // --> shuffle (VBinOp A, B), Undef, Mask
21799 // This does not require type legality checks because we are creating the
21800 // same types of operations that are in the original sequence. We do have to
21801 // restrict ops like integer div that have immediate UB (eg, div-by-zero)
21802 // though. This code is adapted from the identical transform in instcombine.
21803 if (Opcode != ISD::UDIV && Opcode != ISD::SDIV &&
21804 Opcode != ISD::UREM && Opcode != ISD::SREM &&
21805 Opcode != ISD::UDIVREM && Opcode != ISD::SDIVREM) {
21806 auto *Shuf0 = dyn_cast<ShuffleVectorSDNode>(LHS);
21807 auto *Shuf1 = dyn_cast<ShuffleVectorSDNode>(RHS);
21808 if (Shuf0 && Shuf1 && Shuf0->getMask().equals(Shuf1->getMask()) &&
21809 LHS.getOperand(1).isUndef() && RHS.getOperand(1).isUndef() &&
21810 (LHS.hasOneUse() || RHS.hasOneUse() || LHS == RHS)) {
21811 SDLoc DL(N);
21812 SDValue NewBinOp = DAG.getNode(Opcode, DL, VT, LHS.getOperand(0),
21813 RHS.getOperand(0), Flags);
21814 SDValue UndefV = LHS.getOperand(1);
21815 return DAG.getVectorShuffle(VT, DL, NewBinOp, UndefV, Shuf0->getMask());
21816 }
21817
21818 // Try to sink a splat shuffle after a binop with a uniform constant.
21819 // This is limited to cases where neither the shuffle nor the constant have
21820 // undefined elements because that could be poison-unsafe or inhibit
21821 // demanded elements analysis. It is further limited to not change a splat
21822 // of an inserted scalar because that may be optimized better by
21823 // load-folding or other target-specific behaviors.
21824 if (isConstOrConstSplat(RHS) && Shuf0 && is_splat(Shuf0->getMask()) &&
21825 Shuf0->hasOneUse() && Shuf0->getOperand(1).isUndef() &&
21826 Shuf0->getOperand(0).getOpcode() != ISD::INSERT_VECTOR_ELT) {
21827 // binop (splat X), (splat C) --> splat (binop X, C)
21828 SDLoc DL(N);
21829 SDValue X = Shuf0->getOperand(0);
21830 SDValue NewBinOp = DAG.getNode(Opcode, DL, VT, X, RHS, Flags);
21831 return DAG.getVectorShuffle(VT, DL, NewBinOp, DAG.getUNDEF(VT),
21832 Shuf0->getMask());
21833 }
21834 if (isConstOrConstSplat(LHS) && Shuf1 && is_splat(Shuf1->getMask()) &&
21835 Shuf1->hasOneUse() && Shuf1->getOperand(1).isUndef() &&
21836 Shuf1->getOperand(0).getOpcode() != ISD::INSERT_VECTOR_ELT) {
21837 // binop (splat C), (splat X) --> splat (binop C, X)
21838 SDLoc DL(N);
21839 SDValue X = Shuf1->getOperand(0);
21840 SDValue NewBinOp = DAG.getNode(Opcode, DL, VT, LHS, X, Flags);
21841 return DAG.getVectorShuffle(VT, DL, NewBinOp, DAG.getUNDEF(VT),
21842 Shuf1->getMask());
21843 }
21844 }
21845
21846 // The following pattern is likely to emerge with vector reduction ops. Moving
21847 // the binary operation ahead of insertion may allow using a narrower vector
21848 // instruction that has better performance than the wide version of the op:
21849 // VBinOp (ins undef, X, Z), (ins undef, Y, Z) --> ins VecC, (VBinOp X, Y), Z
21850 if (LHS.getOpcode() == ISD::INSERT_SUBVECTOR && LHS.getOperand(0).isUndef() &&
21851 RHS.getOpcode() == ISD::INSERT_SUBVECTOR && RHS.getOperand(0).isUndef() &&
21852 LHS.getOperand(2) == RHS.getOperand(2) &&
21853 (LHS.hasOneUse() || RHS.hasOneUse())) {
21854 SDValue X = LHS.getOperand(1);
21855 SDValue Y = RHS.getOperand(1);
21856 SDValue Z = LHS.getOperand(2);
21857 EVT NarrowVT = X.getValueType();
21858 if (NarrowVT == Y.getValueType() &&
21859 TLI.isOperationLegalOrCustomOrPromote(Opcode, NarrowVT,
21860 LegalOperations)) {
21861 // (binop undef, undef) may not return undef, so compute that result.
21862 SDLoc DL(N);
21863 SDValue VecC =
21864 DAG.getNode(Opcode, DL, VT, DAG.getUNDEF(VT), DAG.getUNDEF(VT));
21865 SDValue NarrowBO = DAG.getNode(Opcode, DL, NarrowVT, X, Y);
21866 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, VecC, NarrowBO, Z);
21867 }
21868 }
21869
21870 // Make sure all but the first op are undef or constant.
21871 auto ConcatWithConstantOrUndef = [](SDValue Concat) {
21872 return Concat.getOpcode() == ISD::CONCAT_VECTORS &&
21873 all_of(drop_begin(Concat->ops()), [](const SDValue &Op) {
21874 return Op.isUndef() ||
21875 ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
21876 });
21877 };
21878
21879 // The following pattern is likely to emerge with vector reduction ops. Moving
21880 // the binary operation ahead of the concat may allow using a narrower vector
21881 // instruction that has better performance than the wide version of the op:
21882 // VBinOp (concat X, undef/constant), (concat Y, undef/constant) -->
21883 // concat (VBinOp X, Y), VecC
21884 if (ConcatWithConstantOrUndef(LHS) && ConcatWithConstantOrUndef(RHS) &&
21885 (LHS.hasOneUse() || RHS.hasOneUse())) {
21886 EVT NarrowVT = LHS.getOperand(0).getValueType();
21887 if (NarrowVT == RHS.getOperand(0).getValueType() &&
21888 TLI.isOperationLegalOrCustomOrPromote(Opcode, NarrowVT)) {
21889 SDLoc DL(N);
21890 unsigned NumOperands = LHS.getNumOperands();
21891 SmallVector<SDValue, 4> ConcatOps;
21892 for (unsigned i = 0; i != NumOperands; ++i) {
21893 // This constant fold for operands 1 and up.
21894 ConcatOps.push_back(DAG.getNode(Opcode, DL, NarrowVT, LHS.getOperand(i),
21895 RHS.getOperand(i)));
21896 }
21897
21898 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
21899 }
21900 }
21901
21902 if (SDValue V = scalarizeBinOpOfSplats(N, DAG))
21903 return V;
21904
21905 return SDValue();
21906}
21907
21908SDValue DAGCombiner::SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1,
21909 SDValue N2) {
21910 assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!")(static_cast <bool> (N0.getOpcode() ==ISD::SETCC &&
"First argument must be a SetCC node!") ? void (0) : __assert_fail
("N0.getOpcode() ==ISD::SETCC && \"First argument must be a SetCC node!\""
, "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 21910, __extension__ __PRETTY_FUNCTION__))
;
21911
21912 SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2,
21913 cast<CondCodeSDNode>(N0.getOperand(2))->get());
21914
21915 // If we got a simplified select_cc node back from SimplifySelectCC, then
21916 // break it down into a new SETCC node, and a new SELECT node, and then return
21917 // the SELECT node, since we were called with a SELECT node.
21918 if (SCC.getNode()) {
21919 // Check to see if we got a select_cc back (to turn into setcc/select).
21920 // Otherwise, just return whatever node we got back, like fabs.
21921 if (SCC.getOpcode() == ISD::SELECT_CC) {
21922 const SDNodeFlags Flags = N0.getNode()->getFlags();
21923 SDValue SETCC = DAG.getNode(ISD::SETCC, SDLoc(N0),
21924 N0.getValueType(),
21925 SCC.getOperand(0), SCC.getOperand(1),
21926 SCC.getOperand(4), Flags);
21927 AddToWorklist(SETCC.getNode());
21928 SDValue SelectNode = DAG.getSelect(SDLoc(SCC), SCC.getValueType(), SETCC,
21929 SCC.getOperand(2), SCC.getOperand(3));
21930 SelectNode->setFlags(Flags);
21931 return SelectNode;
21932 }
21933
21934 return SCC;
21935 }
21936 return SDValue();
21937}
21938
21939/// Given a SELECT or a SELECT_CC node, where LHS and RHS are the two values
21940/// being selected between, see if we can simplify the select. Callers of this
21941/// should assume that TheSelect is deleted if this returns true. As such, they
21942/// should return the appropriate thing (e.g. the node) back to the top-level of
21943/// the DAG combiner loop to avoid it being looked at.
21944bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
21945 SDValue RHS) {
21946 // fold (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x))
21947 // The select + setcc is redundant, because fsqrt returns NaN for X < 0.
21948 if (const ConstantFPSDNode *NaN = isConstOrConstSplatFP(LHS)) {
21949 if (NaN->isNaN() && RHS.getOpcode() == ISD::FSQRT) {
21950 // We have: (select (setcc ?, ?, ?), NaN, (fsqrt ?))
21951 SDValue Sqrt = RHS;
21952 ISD::CondCode CC;
21953 SDValue CmpLHS;
21954 const ConstantFPSDNode *Zero = nullptr;
21955
21956 if (TheSelect->getOpcode() == ISD::SELECT_CC) {
21957 CC = cast<CondCodeSDNode>(TheSelect->getOperand(4))->get();
21958 CmpLHS = TheSelect->getOperand(0);
21959 Zero = isConstOrConstSplatFP(TheSelect->getOperand(1));
21960 } else {
21961 // SELECT or VSELECT
21962 SDValue Cmp = TheSelect->getOperand(0);
21963 if (Cmp.getOpcode() == ISD::SETCC) {
21964 CC = cast<CondCodeSDNode>(Cmp.getOperand(2))->get();
21965 CmpLHS = Cmp.getOperand(0);
21966 Zero = isConstOrConstSplatFP(Cmp.getOperand(1));
21967 }
21968 }
21969 if (Zero && Zero->isZero() &&
21970 Sqrt.getOperand(0) == CmpLHS && (CC == ISD::SETOLT ||
21971 CC == ISD::SETULT || CC == ISD::SETLT)) {
21972 // We have: (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x))
21973 CombineTo(TheSelect, Sqrt);
21974 return true;
21975 }
21976 }
21977 }
21978 // Cannot simplify select with vector condition
21979 if (TheSelect->getOperand(0).getValueType().isVector()) return false;
21980
21981 // If this is a select from two identical things, try to pull the operation
21982 // through the select.
21983 if (LHS.getOpcode() != RHS.getOpcode() ||
21984 !LHS.hasOneUse() || !RHS.hasOneUse())
21985 return false;
21986
21987 // If this is a load and the token chain is identical, replace the select
21988 // of two loads with a load through a select of the address to load from.
21989 // This triggers in things like "select bool X, 10.0, 123.0" after the FP
21990 // constants have been dropped into the constant pool.
21991 if (LHS.getOpcode() == ISD::LOAD) {
21992 LoadSDNode *LLD = cast<LoadSDNode>(LHS);
21993 LoadSDNode *RLD = cast<LoadSDNode>(RHS);
21994
21995 // Token chains must be identical.
21996 if (LHS.getOperand(0) != RHS.getOperand(0) ||
21997 // Do not let this transformation reduce the number of volatile loads.
21998 // Be conservative for atomics for the moment
21999 // TODO: This does appear to be legal for unordered atomics (see D66309)
22000 !LLD->isSimple() || !RLD->isSimple() ||
22001 // FIXME: If either is a pre/post inc/dec load,
22002 // we'd need to split out the address adjustment.
22003 LLD->isIndexed() || RLD->isIndexed() ||
22004 // If this is an EXTLOAD, the VT's must match.
22005 LLD->getMemoryVT() != RLD->getMemoryVT() ||
22006 // If this is an EXTLOAD, the kind of extension must match.
22007 (LLD->getExtensionType() != RLD->getExtensionType() &&
22008 // The only exception is if one of the extensions is anyext.
22009 LLD->getExtensionType() != ISD::EXTLOAD &&
22010 RLD->getExtensionType() != ISD::EXTLOAD) ||
22011 // FIXME: this discards src value information. This is
22012 // over-conservative. It would be beneficial to be able to remember
22013 // both potential memory locations. Since we are discarding
22014 // src value info, don't do the transformation if the memory
22015 // locations are not in the default address space.
22016 LLD->getPointerInfo().getAddrSpace() != 0 ||
22017 RLD->getPointerInfo().getAddrSpace() != 0 ||
22018 // We can't produce a CMOV of a TargetFrameIndex since we won't
22019 // generate the address generation required.
22020 LLD->getBasePtr().getOpcode() == ISD::TargetFrameIndex ||
22021 RLD->getBasePtr().getOpcode() == ISD::TargetFrameIndex ||
22022 !TLI.isOperationLegalOrCustom(TheSelect->getOpcode(),
22023 LLD->getBasePtr().getValueType()))
22024 return false;
22025
22026 // The loads must not depend on one another.
22027 if (LLD->isPredecessorOf(RLD) || RLD->isPredecessorOf(LLD))
22028 return false;
22029
22030 // Check that the select condition doesn't reach either load. If so,
22031 // folding this will induce a cycle into the DAG. If not, this is safe to
22032 // xform, so create a select of the addresses.
22033
22034 SmallPtrSet<const SDNode *, 32> Visited;
22035 SmallVector<const SDNode *, 16> Worklist;
22036
22037 // Always fail if LLD and RLD are not independent. TheSelect is a
22038 // predecessor to all Nodes in question so we need not search past it.
22039
22040 Visited.insert(TheSelect);
22041 Worklist.push_back(LLD);
22042 Worklist.push_back(RLD);
22043
22044 if (SDNode::hasPredecessorHelper(LLD, Visited, Worklist) ||
22045 SDNode::hasPredecessorHelper(RLD, Visited, Worklist))
22046 return false;
22047
22048 SDValue Addr;
22049 if (TheSelect->getOpcode() == ISD::SELECT) {
22050 // We cannot do this optimization if any pair of {RLD, LLD} is a
22051 // predecessor to {RLD, LLD, CondNode}. As we've already compared the
22052 // Loads, we only need to check if CondNode is a successor to one of the
22053 // loads. We can further avoid this if there's no use of their chain
22054 // value.
22055 SDNode *CondNode = TheSelect->getOperand(0).getNode();
22056 Worklist.push_back(CondNode);
22057
22058 if ((LLD->hasAnyUseOfValue(1) &&
22059 SDNode::hasPredecessorHelper(LLD, Visited, Worklist)) ||
22060 (RLD->hasAnyUseOfValue(1) &&
22061 SDNode::hasPredecessorHelper(RLD, Visited, Worklist)))
22062 return false;
22063
22064 Addr = DAG.getSelect(SDLoc(TheSelect),
22065 LLD->getBasePtr().getValueType(),
22066 TheSelect->getOperand(0), LLD->getBasePtr(),
22067 RLD->getBasePtr());
22068 } else { // Otherwise SELECT_CC
22069 // We cannot do this optimization if any pair of {RLD, LLD} is a
22070 // predecessor to {RLD, LLD, CondLHS, CondRHS}. As we've already compared
22071 // the Loads, we only need to check if CondLHS/CondRHS is a successor to
22072 // one of the loads. We can further avoid this if there's no use of their
22073 // chain value.
22074
22075 SDNode *CondLHS = TheSelect->getOperand(0).getNode();
22076 SDNode *CondRHS = TheSelect->getOperand(1).getNode();
22077 Worklist.push_back(CondLHS);
22078 Worklist.push_back(CondRHS);
22079
22080 if ((LLD->hasAnyUseOfValue(1) &&
22081 SDNode::hasPredecessorHelper(LLD, Visited, Worklist)) ||
22082 (RLD->hasAnyUseOfValue(1) &&
22083 SDNode::hasPredecessorHelper(RLD, Visited, Worklist)))
22084 return false;
22085
22086 Addr = DAG.getNode(ISD::SELECT_CC, SDLoc(TheSelect),
22087 LLD->getBasePtr().getValueType(),
22088 TheSelect->getOperand(0),
22089 TheSelect->getOperand(1),
22090 LLD->getBasePtr(), RLD->getBasePtr(),
22091 TheSelect->getOperand(4));
22092 }
22093
22094 SDValue Load;
22095 // It is safe to replace the two loads if they have different alignments,
22096 // but the new load must be the minimum (most restrictive) alignment of the
22097 // inputs.
22098 Align Alignment = std::min(LLD->getAlign(), RLD->getAlign());
22099 MachineMemOperand::Flags MMOFlags = LLD->getMemOperand()->getFlags();
22100 if (!RLD->isInvariant())
22101 MMOFlags &= ~MachineMemOperand::MOInvariant;
22102 if (!RLD->isDereferenceable())
22103 MMOFlags &= ~MachineMemOperand::MODereferenceable;
22104 if (LLD->getExtensionType() == ISD::NON_EXTLOAD) {
22105 // FIXME: Discards pointer and AA info.
22106 Load = DAG.getLoad(TheSelect->getValueType(0), SDLoc(TheSelect),
22107 LLD->getChain(), Addr, MachinePointerInfo(), Alignment,
22108 MMOFlags);
22109 } else {
22110 // FIXME: Discards pointer and AA info.
22111 Load = DAG.getExtLoad(
22112 LLD->getExtensionType() == ISD::EXTLOAD ? RLD->getExtensionType()
22113 : LLD->getExtensionType(),
22114 SDLoc(TheSelect), TheSelect->getValueType(0), LLD->getChain(), Addr,
22115 MachinePointerInfo(), LLD->getMemoryVT(), Alignment, MMOFlags);
22116 }
22117
22118 // Users of the select now use the result of the load.
22119 CombineTo(TheSelect, Load);
22120
22121 // Users of the old loads now use the new load's chain. We know the
22122 // old-load value is dead now.
22123 CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1));
22124 CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1));
22125 return true;
22126 }
22127
22128 return false;
22129}
22130
22131/// Try to fold an expression of the form (N0 cond N1) ? N2 : N3 to a shift and
22132/// bitwise 'and'.
22133SDValue DAGCombiner::foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0,
22134 SDValue N1, SDValue N2, SDValue N3,
22135 ISD::CondCode CC) {
22136 // If this is a select where the false operand is zero and the compare is a
22137 // check of the sign bit, see if we can perform the "gzip trick":
22138 // select_cc setlt X, 0, A, 0 -> and (sra X, size(X)-1), A
22139 // select_cc setgt X, 0, A, 0 -> and (not (sra X, size(X)-1)), A
22140 EVT XType = N0.getValueType();
22141 EVT AType = N2.getValueType();
22142 if (!isNullConstant(N3) || !XType.bitsGE(AType))
22143 return SDValue();
22144
22145 // If the comparison is testing for a positive value, we have to invert
22146 // the sign bit mask, so only do that transform if the target has a bitwise
22147 // 'and not' instruction (the invert is free).
22148 if (CC == ISD::SETGT && TLI.hasAndNot(N2)) {
22149 // (X > -1) ? A : 0
22150 // (X > 0) ? X : 0 <-- This is canonical signed max.
22151 if (!(isAllOnesConstant(N1) || (isNullConstant(N1) && N0 == N2)))
22152 return SDValue();
22153 } else if (CC == ISD::SETLT) {
22154 // (X < 0) ? A : 0
22155 // (X < 1) ? X : 0 <-- This is un-canonicalized signed min.
22156 if (!(isNullConstant(N1) || (isOneConstant(N1) && N0 == N2)))
22157 return SDValue();
22158 } else {
22159 return SDValue();
22160 }
22161
22162 // and (sra X, size(X)-1), A -> "and (srl X, C2), A" iff A is a single-bit
22163 // constant.
22164 EVT ShiftAmtTy = getShiftAmountTy(N0.getValueType());
22165 auto *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
22166 if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue() - 1)) == 0)) {
22167 unsigned ShCt = XType.getSizeInBits() - N2C->getAPIntValue().logBase2() - 1;
22168 if (!TLI.shouldAvoidTransformToShift(XType, ShCt)) {
22169 SDValue ShiftAmt = DAG.getConstant(ShCt, DL, ShiftAmtTy);
22170 SDValue Shift = DAG.getNode(ISD::SRL, DL, XType, N0, ShiftAmt);
22171 AddToWorklist(Shift.getNode());
22172
22173 if (XType.bitsGT(AType)) {
22174 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
22175 AddToWorklist(Shift.getNode());
22176 }
22177
22178 if (CC == ISD::SETGT)
22179 Shift = DAG.getNOT(DL, Shift, AType);
22180
22181 return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
22182 }
22183 }
22184
22185 unsigned ShCt = XType.getSizeInBits() - 1;
22186 if (TLI.shouldAvoidTransformToShift(XType, ShCt))
22187 return SDValue();
22188
22189 SDValue ShiftAmt = DAG.getConstant(ShCt, DL, ShiftAmtTy);
22190 SDValue Shift = DAG.getNode(ISD::SRA, DL, XType, N0, ShiftAmt);
22191 AddToWorklist(Shift.getNode());
22192
22193 if (XType.bitsGT(AType)) {
22194 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift);
22195 AddToWorklist(Shift.getNode());
22196 }
22197
22198 if (CC == ISD::SETGT)
22199 Shift = DAG.getNOT(DL, Shift, AType);
22200
22201 return DAG.getNode(ISD::AND, DL, AType, Shift, N2);
22202}
22203
22204// Transform (fneg/fabs (bitconvert x)) to avoid loading constant pool values.
22205SDValue DAGCombiner::foldSignChangeInBitcast(SDNode *N) {
22206 SDValue N0 = N->getOperand(0);
22207 EVT VT = N->getValueType(0);
22208 bool IsFabs = N->getOpcode() == ISD::FABS;
22209 bool IsFree = IsFabs ? TLI.isFAbsFree(VT) : TLI.isFNegFree(VT);
22210
22211 if (IsFree || N0.getOpcode() != ISD::BITCAST || !N0.hasOneUse())
22212 return SDValue();
22213
22214 SDValue Int = N0.getOperand(0);
22215 EVT IntVT = Int.getValueType();
22216
22217 // The operand to cast should be integer.
22218 if (!IntVT.isInteger() || IntVT.isVector())
22219 return SDValue();
22220
22221 // (fneg (bitconvert x)) -> (bitconvert (xor x sign))
22222 // (fabs (bitconvert x)) -> (bitconvert (and x ~sign))
22223 APInt SignMask;
22224 if (N0.getValueType().isVector()) {
22225 // For vector, create a sign mask (0x80...) or its inverse (for fabs,
22226 // 0x7f...) per element and splat it.
22227 SignMask = APInt::getSignMask(N0.getScalarValueSizeInBits());
22228 if (IsFabs)
22229 SignMask = ~SignMask;
22230 SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask);
22231 } else {
22232 // For scalar, just use the sign mask (0x80... or the inverse, 0x7f...)
22233 SignMask = APInt::getSignMask(IntVT.getSizeInBits());
22234 if (IsFabs)
22235 SignMask = ~SignMask;
22236 }
22237 SDLoc DL(N0);
22238 Int = DAG.getNode(IsFabs ? ISD::AND : ISD::XOR, DL, IntVT, Int,
22239 DAG.getConstant(SignMask, DL, IntVT));
22240 AddToWorklist(Int.getNode());
22241 return DAG.getBitcast(VT, Int);
22242}
22243
22244/// Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)"
22245/// where "tmp" is a constant pool entry containing an array with 1.0 and 2.0
22246/// in it. This may be a win when the constant is not otherwise available
22247/// because it replaces two constant pool loads with one.
22248SDValue DAGCombiner::convertSelectOfFPConstantsToLoadOffset(
22249 const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2, SDValue N3,
22250 ISD::CondCode CC) {
22251 if (!TLI.reduceSelectOfFPConstantLoads(N0.getValueType()))
22252 return SDValue();
22253
22254 // If we are before legalize types, we want the other legalization to happen
22255 // first (for example, to avoid messing with soft float).
22256 auto *TV = dyn_cast<ConstantFPSDNode>(N2);
22257 auto *FV = dyn_cast<ConstantFPSDNode>(N3);
22258 EVT VT = N2.getValueType();
22259 if (!TV || !FV || !TLI.isTypeLegal(VT))
22260 return SDValue();
22261
22262 // If a constant can be materialized without loads, this does not make sense.
22263 if (TLI.getOperationAction(ISD::ConstantFP, VT) == TargetLowering::Legal ||
22264 TLI.isFPImmLegal(TV->getValueAPF(), TV->getValueType(0), ForCodeSize) ||
22265 TLI.isFPImmLegal(FV->getValueAPF(), FV->getValueType(0), ForCodeSize))
22266 return SDValue();
22267
22268 // If both constants have multiple uses, then we won't need to do an extra
22269 // load. The values are likely around in registers for other users.
22270 if (!TV->hasOneUse() && !FV->hasOneUse())
22271 return SDValue();
22272
22273 Constant *Elts[] = { const_cast<ConstantFP*>(FV->getConstantFPValue()),
22274 const_cast<ConstantFP*>(TV->getConstantFPValue()) };
22275 Type *FPTy = Elts[0]->getType();
22276 const DataLayout &TD = DAG.getDataLayout();
22277
22278 // Create a ConstantArray of the two constants.
22279 Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts);
22280 SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(DAG.getDataLayout()),
22281 TD.getPrefTypeAlign(FPTy));
22282 Align Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlign();
22283
22284 // Get offsets to the 0 and 1 elements of the array, so we can select between
22285 // them.
22286 SDValue Zero = DAG.getIntPtrConstant(0, DL);
22287 unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType());
22288 SDValue One = DAG.getIntPtrConstant(EltSize, SDLoc(FV));
22289 SDValue Cond =
22290 DAG.getSetCC(DL, getSetCCResultType(N0.getValueType()), N0, N1, CC);
22291 AddToWorklist(Cond.getNode());
22292 SDValue CstOffset = DAG.getSelect(DL, Zero.getValueType(), Cond, One, Zero);
22293 AddToWorklist(CstOffset.getNode());
22294 CPIdx = DAG.getNode(ISD::ADD, DL, CPIdx.getValueType(), CPIdx, CstOffset);
22295 AddToWorklist(CPIdx.getNode());
22296 return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
22297 MachinePointerInfo::getConstantPool(
22298 DAG.getMachineFunction()), Alignment);
22299}
22300
22301/// Simplify an expression of the form (N0 cond N1) ? N2 : N3
22302/// where 'cond' is the comparison specified by CC.
22303SDValue DAGCombiner::SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
22304 SDValue N2, SDValue N3, ISD::CondCode CC,
22305 bool NotExtCompare) {
22306 // (x ? y : y) -> y.
22307 if (N2 == N3) return N2;
22308
22309 EVT CmpOpVT = N0.getValueType();
22310 EVT CmpResVT = getSetCCResultType(CmpOpVT);
22311 EVT VT = N2.getValueType();
22312 auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
22313 auto *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
22314 auto *N3C = dyn_cast<ConstantSDNode>(N3.getNode());
22315
22316 // Determine if the condition we're dealing with is constant.
22317 if (SDValue SCC = DAG.FoldSetCC(CmpResVT, N0, N1, CC, DL)) {
22318 AddToWorklist(SCC.getNode());
22319 if (auto *SCCC = dyn_cast<ConstantSDNode>(SCC)) {
22320 // fold select_cc true, x, y -> x
22321 // fold select_cc false, x, y -> y
22322 return !(SCCC->isNullValue()) ? N2 : N3;
22323 }
22324 }
22325
22326 if (SDValue V =
22327 convertSelectOfFPConstantsToLoadOffset(DL, N0, N1, N2, N3, CC))
22328 return V;
22329
22330 if (SDValue V = foldSelectCCToShiftAnd(DL, N0, N1, N2, N3, CC))
22331 return V;
22332
22333 // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A)
22334 // where y is has a single bit set.
22335 // A plaintext description would be, we can turn the SELECT_CC into an AND
22336 // when the condition can be materialized as an all-ones register. Any
22337 // single bit-test can be materialized as an all-ones register with
22338 // shift-left and shift-right-arith.
22339 if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND &&
22340 N0->getValueType(0) == VT && isNullConstant(N1) && isNullConstant(N2)) {
22341 SDValue AndLHS = N0->getOperand(0);
22342 auto *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1));
22343 if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) {
22344 // Shift the tested bit over the sign bit.
22345 const APInt &AndMask = ConstAndRHS->getAPIntValue();
22346 unsigned ShCt = AndMask.getBitWidth() - 1;
22347 if (!TLI.shouldAvoidTransformToShift(VT, ShCt)) {
22348 SDValue ShlAmt =
22349 DAG.getConstant(AndMask.countLeadingZeros(), SDLoc(AndLHS),
22350 getShiftAmountTy(AndLHS.getValueType()));
22351 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N0), VT, AndLHS, ShlAmt);
22352
22353 // Now arithmetic right shift it all the way over, so the result is
22354 // either all-ones, or zero.
22355 SDValue ShrAmt =
22356 DAG.getConstant(ShCt, SDLoc(Shl),
22357 getShiftAmountTy(Shl.getValueType()));
22358 SDValue Shr = DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, ShrAmt);
22359
22360 return DAG.getNode(ISD::AND, DL, VT, Shr, N3);
22361 }
22362 }
22363 }
22364
22365 // fold select C, 16, 0 -> shl C, 4
22366 bool Fold = N2C && isNullConstant(N3) && N2C->getAPIntValue().isPowerOf2();
22367 bool Swap = N3C && isNullConstant(N2) && N3C->getAPIntValue().isPowerOf2();
22368
22369 if ((Fold || Swap) &&
22370 TLI.getBooleanContents(CmpOpVT) ==
22371 TargetLowering::ZeroOrOneBooleanContent &&
22372 (!LegalOperations || TLI.isOperationLegal(ISD::SETCC, CmpOpVT))) {
22373
22374 if (Swap) {
22375 CC = ISD::getSetCCInverse(CC, CmpOpVT);
22376 std::swap(N2C, N3C);
22377 }
22378
22379 // If the caller doesn't want us to simplify this into a zext of a compare,
22380 // don't do it.
22381 if (NotExtCompare && N2C->isOne())
22382 return SDValue();
22383
22384 SDValue Temp, SCC;
22385 // zext (setcc n0, n1)
22386 if (LegalTypes) {
22387 SCC = DAG.getSetCC(DL, CmpResVT, N0, N1, CC);
22388 if (VT.bitsLT(SCC.getValueType()))
22389 Temp = DAG.getZeroExtendInReg(SCC, SDLoc(N2), VT);
22390 else
22391 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), VT, SCC);
22392 } else {
22393 SCC = DAG.getSetCC(SDLoc(N0), MVT::i1, N0, N1, CC);
22394 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), VT, SCC);
22395 }
22396
22397 AddToWorklist(SCC.getNode());
22398 AddToWorklist(Temp.getNode());
22399
22400 if (N2C->isOne())
22401 return Temp;
22402
22403 unsigned ShCt = N2C->getAPIntValue().logBase2();
22404 if (TLI.shouldAvoidTransformToShift(VT, ShCt))
22405 return SDValue();
22406
22407 // shl setcc result by log2 n2c
22408 return DAG.getNode(ISD::SHL, DL, N2.getValueType(), Temp,
22409 DAG.getConstant(ShCt, SDLoc(Temp),
22410 getShiftAmountTy(Temp.getValueType())));
22411 }
22412
22413 // select_cc seteq X, 0, sizeof(X), ctlz(X) -> ctlz(X)
22414 // select_cc seteq X, 0, sizeof(X), ctlz_zero_undef(X) -> ctlz(X)
22415 // select_cc seteq X, 0, sizeof(X), cttz(X) -> cttz(X)
22416 // select_cc seteq X, 0, sizeof(X), cttz_zero_undef(X) -> cttz(X)
22417 // select_cc setne X, 0, ctlz(X), sizeof(X) -> ctlz(X)
22418 // select_cc setne X, 0, ctlz_zero_undef(X), sizeof(X) -> ctlz(X)
22419 // select_cc setne X, 0, cttz(X), sizeof(X) -> cttz(X)
22420 // select_cc setne X, 0, cttz_zero_undef(X), sizeof(X) -> cttz(X)
22421 if (N1C && N1C->isNullValue() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
22422 SDValue ValueOnZero = N2;
22423 SDValue Count = N3;
22424 // If the condition is NE instead of E, swap the operands.
22425 if (CC == ISD::SETNE)
22426 std::swap(ValueOnZero, Count);
22427 // Check if the value on zero is a constant equal to the bits in the type.
22428 if (auto *ValueOnZeroC = dyn_cast<ConstantSDNode>(ValueOnZero)) {
22429 if (ValueOnZeroC->getAPIntValue() == VT.getSizeInBits()) {
22430 // If the other operand is cttz/cttz_zero_undef of N0, and cttz is
22431 // legal, combine to just cttz.
22432 if ((Count.getOpcode() == ISD::CTTZ ||
22433 Count.getOpcode() == ISD::CTTZ_ZERO_UNDEF) &&
22434 N0 == Count.getOperand(0) &&
22435 (!LegalOperations || TLI.isOperationLegal(ISD::CTTZ, VT)))
22436 return DAG.getNode(ISD::CTTZ, DL, VT, N0);
22437 // If the other operand is ctlz/ctlz_zero_undef of N0, and ctlz is
22438 // legal, combine to just ctlz.
22439 if ((Count.getOpcode() == ISD::CTLZ ||
22440 Count.getOpcode() == ISD::CTLZ_ZERO_UNDEF) &&
22441 N0 == Count.getOperand(0) &&
22442 (!LegalOperations || TLI.isOperationLegal(ISD::CTLZ, VT)))
22443 return DAG.getNode(ISD::CTLZ, DL, VT, N0);
22444 }
22445 }
22446 }
22447
22448 return SDValue();
22449}
22450
22451/// This is a stub for TargetLowering::SimplifySetCC.
22452SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
22453 ISD::CondCode Cond, const SDLoc &DL,
22454 bool foldBooleans) {
22455 TargetLowering::DAGCombinerInfo
22456 DagCombineInfo(DAG, Level, false, this);
22457 return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL);
22458}
22459
22460/// Given an ISD::SDIV node expressing a divide by constant, return
22461/// a DAG expression to select that will generate the same value by multiplying
22462/// by a magic number.
22463/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
22464SDValue DAGCombiner::BuildSDIV(SDNode *N) {
22465 // when optimising for minimum size, we don't want to expand a div to a mul
22466 // and a shift.
22467 if (DAG.getMachineFunction().getFunction().hasMinSize())
22468 return SDValue();
22469
22470 SmallVector<SDNode *, 8> Built;
22471 if (SDValue S = TLI.BuildSDIV(N, DAG, LegalOperations, Built)) {
22472 for (SDNode *N : Built)
22473 AddToWorklist(N);
22474 return S;
22475 }
22476
22477 return SDValue();
22478}
22479
22480/// Given an ISD::SDIV node expressing a divide by constant power of 2, return a
22481/// DAG expression that will generate the same value by right shifting.
22482SDValue DAGCombiner::BuildSDIVPow2(SDNode *N) {
22483 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
22484 if (!C)
22485 return SDValue();
22486
22487 // Avoid division by zero.
22488 if (C->isNullValue())
22489 return SDValue();
22490
22491 SmallVector<SDNode *, 8> Built;
22492 if (SDValue S = TLI.BuildSDIVPow2(N, C->getAPIntValue(), DAG, Built)) {
22493 for (SDNode *N : Built)
22494 AddToWorklist(N);
22495 return S;
22496 }
22497
22498 return SDValue();
22499}
22500
22501/// Given an ISD::UDIV node expressing a divide by constant, return a DAG
22502/// expression that will generate the same value by multiplying by a magic
22503/// number.
22504/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
22505SDValue DAGCombiner::BuildUDIV(SDNode *N) {
22506 // when optimising for minimum size, we don't want to expand a div to a mul
22507 // and a shift.
22508 if (DAG.getMachineFunction().getFunction().hasMinSize())
22509 return SDValue();
22510
22511 SmallVector<SDNode *, 8> Built;
22512 if (SDValue S = TLI.BuildUDIV(N, DAG, LegalOperations, Built)) {
22513 for (SDNode *N : Built)
22514 AddToWorklist(N);
22515 return S;
22516 }
22517
22518 return SDValue();
22519}
22520
22521/// Determines the LogBase2 value for a non-null input value using the
22522/// transform: LogBase2(V) = (EltBits - 1) - ctlz(V).
22523SDValue DAGCombiner::BuildLogBase2(SDValue V, const SDLoc &DL) {
22524 EVT VT = V.getValueType();
22525 SDValue Ctlz = DAG.getNode(ISD::CTLZ, DL, VT, V);
22526 SDValue Base = DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT);
22527 SDValue LogBase2 = DAG.getNode(ISD::SUB, DL, VT, Base, Ctlz);
22528 return LogBase2;
22529}
22530
22531/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
22532/// For the reciprocal, we need to find the zero of the function:
22533/// F(X) = A X - 1 [which has a zero at X = 1/A]
22534/// =>
22535/// X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form
22536/// does not require additional intermediate precision]
22537/// For the last iteration, put numerator N into it to gain more precision:
22538/// Result = N X_i + X_i (N - N A X_i)
22539SDValue DAGCombiner::BuildDivEstimate(SDValue N, SDValue Op,
22540 SDNodeFlags Flags) {
22541 if (LegalDAG)
22542 return SDValue();
22543
22544 // TODO: Handle half and/or extended types?
22545 EVT VT = Op.getValueType();
22546 if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64)
22547 return SDValue();
22548
22549 // If estimates are explicitly disabled for this function, we're done.
22550 MachineFunction &MF = DAG.getMachineFunction();
22551 int Enabled = TLI.getRecipEstimateDivEnabled(VT, MF);
22552 if (Enabled == TLI.ReciprocalEstimate::Disabled)
22553 return SDValue();
22554
22555 // Estimates may be explicitly enabled for this type with a custom number of
22556 // refinement steps.
22557 int Iterations = TLI.getDivRefinementSteps(VT, MF);
22558 if (SDValue Est = TLI.getRecipEstimate(Op, DAG, Enabled, Iterations)) {
22559 AddToWorklist(Est.getNode());
22560
22561 SDLoc DL(Op);
22562 if (Iterations) {
22563 SDValue FPOne = DAG.getConstantFP(1.0, DL, VT);
22564
22565 // Newton iterations: Est = Est + Est (N - Arg * Est)
22566 // If this is the last iteration, also multiply by the numerator.
22567 for (int i = 0; i < Iterations; ++i) {
22568 SDValue MulEst = Est;
22569
22570 if (i == Iterations - 1) {
22571 MulEst = DAG.getNode(ISD::FMUL, DL, VT, N, Est, Flags);
22572 AddToWorklist(MulEst.getNode());
22573 }
22574
22575 SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Op, MulEst, Flags);
22576 AddToWorklist(NewEst.getNode());
22577
22578 NewEst = DAG.getNode(ISD::FSUB, DL, VT,
22579 (i == Iterations - 1 ? N : FPOne), NewEst, Flags);
22580 AddToWorklist(NewEst.getNode());
22581
22582 NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags);
22583 AddToWorklist(NewEst.getNode());
22584
22585 Est = DAG.getNode(ISD::FADD, DL, VT, MulEst, NewEst, Flags);
22586 AddToWorklist(Est.getNode());
22587 }
22588 } else {
22589 // If no iterations are available, multiply with N.
22590 Est = DAG.getNode(ISD::FMUL, DL, VT, Est, N, Flags);
22591 AddToWorklist(Est.getNode());
22592 }
22593
22594 return Est;
22595 }
22596
22597 return SDValue();
22598}
22599
22600/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
22601/// For the reciprocal sqrt, we need to find the zero of the function:
22602/// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)]
22603/// =>
22604/// X_{i+1} = X_i (1.5 - A X_i^2 / 2)
22605/// As a result, we precompute A/2 prior to the iteration loop.
22606SDValue DAGCombiner::buildSqrtNROneConst(SDValue Arg, SDValue Est,
22607 unsigned Iterations,
22608 SDNodeFlags Flags, bool Reciprocal) {
22609 EVT VT = Arg.getValueType();
22610 SDLoc DL(Arg);
22611 SDValue ThreeHalves = DAG.getConstantFP(1.5, DL, VT);
22612
22613 // We now need 0.5 * Arg which we can write as (1.5 * Arg - Arg) so that
22614 // this entire sequence requires only one FP constant.
22615 SDValue HalfArg = DAG.getNode(ISD::FMUL, DL, VT, ThreeHalves, Arg, Flags);
22616 HalfArg = DAG.getNode(ISD::FSUB, DL, VT, HalfArg, Arg, Flags);
22617
22618 // Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est)
22619 for (unsigned i = 0; i < Iterations; ++i) {
22620 SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, Est, Flags);
22621 NewEst = DAG.getNode(ISD::FMUL, DL, VT, HalfArg, NewEst, Flags);
22622 NewEst = DAG.getNode(ISD::FSUB, DL, VT, ThreeHalves, NewEst, Flags);
22623 Est = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags);
22624 }
22625
22626 // If non-reciprocal square root is requested, multiply the result by Arg.
22627 if (!Reciprocal)
22628 Est = DAG.getNode(ISD::FMUL, DL, VT, Est, Arg, Flags);
22629
22630 return Est;
22631}
22632
22633/// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i)
22634/// For the reciprocal sqrt, we need to find the zero of the function:
22635/// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)]
22636/// =>
22637/// X_{i+1} = (-0.5 * X_i) * (A * X_i * X_i + (-3.0))
22638SDValue DAGCombiner::buildSqrtNRTwoConst(SDValue Arg, SDValue Est,
22639 unsigned Iterations,
22640 SDNodeFlags Flags, bool Reciprocal) {
22641 EVT VT = Arg.getValueType();
22642 SDLoc DL(Arg);
22643 SDValue MinusThree = DAG.getConstantFP(-3.0, DL, VT);
22644 SDValue MinusHalf = DAG.getConstantFP(-0.5, DL, VT);
22645
22646 // This routine must enter the loop below to work correctly
22647 // when (Reciprocal == false).
22648 assert(Iterations > 0)(static_cast <bool> (Iterations > 0) ? void (0) : __assert_fail
("Iterations > 0", "/build/llvm-toolchain-snapshot-13~++20210506100649+6304c0836a4d/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp"
, 22648, __extension__ __PRETTY_FUNCTION__))
;
22649
22650 // Newton iterations for reciprocal square root:
22651 // E = (E * -0.5) * ((A * E) * E + -3.0)
22652 for (unsigned i = 0; i < Iterations; ++i) {
22653 SDValue AE = DAG.getNode(ISD::FMUL, DL, VT, Arg, Est, Flags);
22654 SDValue AEE = DAG.getNode(ISD::FMUL, DL, VT, AE, Est, Flags);
22655 SDValue RHS = DAG.getNode(ISD::FADD, DL, VT, AEE, MinusThree, Flags);
22656
22657 // When calculating a square root at the last iteration build:
22658 // S = ((A * E) * -0.5) * ((A * E) * E + -3.0)
22659 // (notice a common subexpression)
22660 SDValue LHS;
22661 if (Reciprocal || (i + 1) < Iterations) {
22662 // RSQRT: LHS = (E * -0.5)
22663 LHS = DAG.getNode(ISD::FMUL, DL, VT, Est, MinusHalf, Flags);
22664 } else {
22665 // SQRT: LHS = (A * E) * -0.5
22666 LHS = DAG.getNode(ISD::FMUL, DL, VT, AE, MinusHalf, Flags);
22667 }
22668
22669 Est = DAG.getNode(ISD::FMUL, DL, VT, LHS, RHS, Flags);
22670 }
22671
22672 return Est;
22673}
22674
22675/// Build code to calculate either rsqrt(Op) or sqrt(Op). In the latter case
22676/// Op*rsqrt(Op) is actually computed, so additional postprocessing is needed if
22677/// Op can be zero.
22678SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, SDNodeFlags Flags,
22679 bool Reciprocal) {
22680 if (LegalDAG)
22681 return SDValue();
22682
22683 // TODO: Handle half and/or extended types?
22684 EVT VT = Op.getValueType();
22685 if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64)
22686 return SDValue();
22687
22688 // If estimates are explicitly disabled for this function, we're done.
22689 MachineFunction &MF = DAG.getMachineFunction();
22690 int Enabled = TLI.getRecipEstimateSqrtEnabled(VT, MF);
22691 if (Enabled == TLI.ReciprocalEstimate::Disabled)
22692 return SDValue();
22693
22694 // Estimates may be explicitly enabled for this type with a custom number of
22695 // refinement steps.
22696 int Iterations = TLI.getSqrtRefinementSteps(VT, MF);
22697
22698 bool UseOneConstNR = false;
22699 if (SDValue Est =
22700 TLI.getSqrtEstimate(Op, DAG, Enabled, Iterations, UseOneConstNR,
22701 Reciprocal)) {
22702 AddToWorklist(Est.getNode());
22703
22704 if (Iterations)
22705 Est = UseOneConstNR
22706 ? buildSqrtNROneConst(Op, Est, Iterations, Flags, Reciprocal)
22707 : buildSqrtNRTwoConst(Op, Est, Iterations, Flags, Reciprocal);
22708 if (!Reciprocal) {
22709 SDLoc DL(Op);
22710 // Try the target specific test first.
22711 SDValue Test = TLI.getSqrtInputTest(Op, DAG, DAG.getDenormalMode(VT));
22712
22713 // The estimate is now completely wrong if the input was exactly 0.0 or
22714 // possibly a denormal. Force the answer to 0.0 or value provided by
22715 // target for those cases.
22716 Est = DAG.getNode(
22717 Test.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT,
22718 Test, TLI.getSqrtResultForDenormInput(Op, DAG), Est);
22719 }
22720 return Est;
22721 }
22722
22723 return SDValue();
22724}
22725
22726SDValue DAGCombiner::buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags) {
22727 return buildSqrtEstimateImpl(Op, Flags, true);
22728}
22729
22730SDValue DAGCombiner::buildSqrtEstimate(SDValue Op, SDNodeFlags Flags) {
22731 return buildSqrtEstimateImpl(Op, Flags, false);
22732}
22733
22734/// Return true if there is any possibility that the two addresses overlap.
22735bool DAGCombiner::isAlias(SDNode *Op0, SDNode *Op1) const {
22736
22737 struct MemUseCharacteristics {
22738 bool IsVolatile;
22739 bool IsAtomic;
22740 SDValue BasePtr;
22741 int64_t Offset;
22742 Optional<int64_t> NumBytes;
22743 MachineMemOperand *MMO;
22744 };
22745
22746 auto getCharacteristics = [](SDNode *N) -> MemUseCharacteristics {
22747 if (const auto *LSN = dyn_cast<LSBaseSDNode>(N)) {
22748 int64_t Offset = 0;
22749 if (auto *C = dyn_cast<ConstantSDNode>(LSN->getOffset()))
22750 Offset = (LSN->getAddressingMode() == ISD::PRE_INC)
22751 ? C->getSExtValue()
22752 : (LSN->getAddressingMode() == ISD::PRE_DEC)
22753 ? -1 * C->getSExtValue()
22754 : 0;
22755 uint64_t Size =
22756 MemoryLocation::getSizeOrUnknown(LSN->getMemoryVT().getStoreSize());
22757 return {LSN->isVolatile(), LSN->isAtomic(), LSN->getBasePtr(),
22758 Offset /*base offset*/,
22759 Optional<int64_t>(Size),
22760 LSN->getMemOperand()};
22761 }
22762 if (const auto *LN = cast<LifetimeSDNode>(N))
22763 return {false /*isVolatile*/, /*isAtomic*/ false, LN->getOperand(1),
22764 (LN->hasOffset()) ? LN->getOffset() : 0,
22765 (LN->hasOffset()) ? Optional<int64_t>(LN->getSize())
22766 : Optional<int64_t>(),
22767 (MachineMemOperand *)nullptr};
22768 // Default.
22769 return {false /*isvolatile*/, /*isAtomic*/ false, SDValue(),
22770 (int64_t)0 /*offset*/,
22771 Optional<int64_t>() /*size*/, (MachineMemOperand *)nullptr};
22772 };
22773
22774 MemUseCharacteristics MUC0 = getCharacteristics(Op0),
22775 MUC1 = getCharacteristics(Op1);
22776
22777 // If they are to the same address, then they must be aliases.
22778 if (MUC0.BasePtr.getNode() && MUC0.BasePtr == MUC1.BasePtr &&
22779 MUC0.Offset == MUC1.Offset)
22780 return true;
22781
22782 // If they are both volatile then they cannot be reordered.
22783 if (MUC0.IsVolatile && MUC1.IsVolatile)
22784 return true;
22785
22786 // Be conservative about atomics for the moment
22787 // TODO: This is way overconservative for unordered atomics (see D66309)
22788 if (MUC0.IsAtomic && MUC1.IsAtomic)
22789 return true;
22790
22791 if (MUC0.MMO && MUC1.MMO) {
22792 if ((MUC0.MMO->isInvariant() && MUC1.MMO->isStore()) ||
22793 (MUC1.MMO->isInvariant() && MUC0.MMO->isStore()))
22794 return false;
22795 }
22796
22797 // Try to prove that there is aliasing, or that there is no aliasing. Either
22798 // way, we can return now. If nothing can be proved, proceed with more tests.
22799 bool IsAlias;
22800 if (BaseIndexOffset::computeAliasing(Op0, MUC0.NumBytes, Op1, MUC1.NumBytes,
22801 DAG, IsAlias))
22802 return IsAlias;
22803
22804 // The following all rely on MMO0 and MMO1 being valid. Fail conservatively if
22805 // either are not known.
22806 if (!MUC0.MMO || !MUC1.MMO)
22807 return true;
22808
22809 // If one operation reads from invariant memory, and the other may store, they
22810 // cannot alias. These should really be checking the equivalent of mayWrite,
22811 // but it only matters for memory nodes other than load /store.
22812 if ((MUC0.MMO->isInvariant() && MUC1.MMO->isStore()) ||
22813 (MUC1.MMO->isInvariant() && MUC0.MMO->isStore()))
22814 return false;
22815
22816 // If we know required SrcValue1 and SrcValue2 have relatively large
22817 // alignment compared to the size and offset of the access, we may be able
22818 // to prove they do not alias. This check is conservative for now to catch
22819 // cases created by splitting vector types, it only works when the offsets are
22820 // multiples of the size of the data.
22821 int64_t SrcValOffset0 = MUC0.MMO->getOffset();
22822 int64_t SrcValOffset1 = MUC1.MMO->getOffset();
22823 Align OrigAlignment0 = MUC0.MMO->getBaseAlign();
22824 Align OrigAlignment1 = MUC1.MMO->getBaseAlign();
22825 auto &Size0 = MUC0.NumBytes;
22826 auto &Size1 = MUC1.NumBytes;
22827 if (OrigAlignment0 == OrigAlignment1 && SrcValOffset0 != SrcValOffset1 &&
22828 Size0.hasValue() && Size1.hasValue() && *Size0 == *Size1 &&
22829 OrigAlignment0 > *Size0 && SrcValOffset0 % *Size0 == 0 &&
22830 SrcValOffset1 % *Size1 == 0) {
22831 int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0.value();
22832 int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1.value();
22833
22834 // There is no overlap between these relatively aligned accesses of
22835 // similar size. Return no alias.
22836 if ((OffAlign0 + *Size0) <= OffAlign1 || (OffAlign1 + *Size1) <= OffAlign0)
22837 return false;
22838 }
22839
22840 bool UseAA = CombinerGlobalAA.getNumOccurrences() > 0
22841 ? CombinerGlobalAA
22842 : DAG.getSubtarget().useAA();
22843#ifndef NDEBUG
22844 if (CombinerAAOnlyFunc.getNumOccurrences() &&
22845 CombinerAAOnlyFunc != DAG.getMachineFunction().getName())
22846 UseAA = false;
22847#endif
22848
22849 if (UseAA && AA && MUC0.MMO->getValue() && MUC1.MMO->getValue() &&
22850 Size0.hasValue() && Size1.hasValue()) {
22851 // Use alias analysis information.
22852 int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);
22853 int64_t Overlap0 = *Size0 + SrcValOffset0 - MinOffset;
22854 int64_t Overlap1 = *Size1 + SrcValOffset1 - MinOffset;
22855 if (AA->isNoAlias(
22856 MemoryLocation(MUC0.MMO->getValue(), Overlap0,
22857 UseTBAA ? MUC0.MMO->getAAInfo() : AAMDNodes()),
22858 MemoryLocation(MUC1.MMO->getValue(), Overlap1,
22859 UseTBAA ? MUC1.MMO->getAAInfo() : AAMDNodes())))
22860 return false;
22861 }
22862
22863 // Otherwise we have to assume they alias.
22864 return true;
22865}
22866
22867/// Walk up chain skipping non-aliasing memory nodes,
22868/// looking for aliasing nodes and adding them to the Aliases vector.
22869void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
22870 SmallVectorImpl<SDValue> &Aliases) {
22871 SmallVector<SDValue, 8> Chains; // List of chains to visit.
22872 SmallPtrSet<SDNode *, 16> Visited; // Visited node set.
22873
22874 // Get alias information for node.
22875 // TODO: relax aliasing for unordered atomics (see D66309)
22876 const bool IsLoad = isa<LoadSDNode>(N) && cast<LoadSDNode>(N)->isSimple();
22877
22878 // Starting off.
22879 Chains.push_back(OriginalChain);
22880 unsigned Depth = 0;
22881
22882 // Attempt to improve chain by a single step
22883 std::function<bool(SDValue &)> ImproveChain = [&](SDValue &C) -> bool {
22884 switch (C.getOpcode()) {
22885 case ISD::EntryToken:
22886 // No need to mark EntryToken.
22887 C = SDValue();
22888 return true;
22889 case ISD::LOAD:
22890 case ISD::STORE: {
22891 // Get alias information for C.
22892 // TODO: Relax aliasing for unordered atomics (see D66309)
22893 bool IsOpLoad = isa<LoadSDNode>(C.getNode()) &&
22894 cast<LSBaseSDNode>(C.getNode())->isSimple();
22895 if ((IsLoad && IsOpLoad) || !isAlias(N, C.getNode())) {
22896 // Look further up the chain.
22897 C = C.getOperand(0);
22898 return true;
22899 }
22900 // Alias, so stop here.
22901 return false;
22902 }
22903
22904 case ISD::CopyFromReg:
22905 // Always forward past past CopyFromReg.
22906 C = C.getOperand(0);
22907 return true;
22908
22909 case ISD::LIFETIME_START:
22910 case ISD::LIFETIME_END: {
22911 // We can forward past any lifetime start/end that can be proven not to
22912 // alias the memory access.
22913 if (!isAlias(N, C.getNode())) {
22914 // Look further up the chain.
22915 C = C.getOperand(0);
22916 return true;
22917 }
22918 return false;
22919 }
22920 default:
22921 return false;
22922 }
22923 };
22924
22925 // Look at each chain and determine if it is an alias. If so, add it to the
22926 // aliases list. If not, then continue up the chain looking for the next
22927 // candidate.
22928 while (!Chains.empty()) {
22929 SDValue Chain = Chains.pop_back_val();
22930
22931 // Don't bother if we've seen Chain before.
22932 if (!Visited.insert(Chain.getNode()).second)
22933 continue;
22934
22935 // For TokenFactor nodes, look at each operand and only continue up the
22936 // chain until we reach the depth limit.
22937 //
22938 // FIXME: The depth check could be made to return the last non-aliasing
22939 // chain we found before we hit a tokenfactor rather than the original
22940 // chain.
22941 if (Depth > TLI.getGatherAllAliasesMaxDepth()) {
22942 Aliases.clear();
22943 Aliases.push_back(OriginalChain);
22944 return;
22945 }
22946
22947 if (Chain.getOpcode() == ISD::TokenFactor) {
22948 // We have to check each of the operands of the token factor for "small"
22949 // token factors, so we queue them up. Adding the operands to the queue
22950 // (stack) in reverse order maintains the original order and increases the
22951 // likelihood that getNode will find a matching token factor (CSE.)
22952 if (Chain.getNumOperands() > 16) {
22953 Aliases.push_back(Chain);
22954 continue;
22955 }
22956 for (unsigned n = Chain.getNumOperands(); n;)
22957 Chains.push_back(Chain.getOperand(--n));
22958 ++Depth;
22959 continue;
22960 }
22961 // Everything else
22962 if (ImproveChain(Chain)) {
22963 // Updated Chain Found, Consider new chain if one exists.
22964 if (Chain.getNode())
22965 Chains.push_back(Chain);
22966 ++Depth;
22967 continue;
22968 }
22969 // No Improved Chain Possible, treat as Alias.
22970 Aliases.push_back(Chain);
22971 }
22972}
22973
22974/// Walk up chain skipping non-aliasing memory nodes, looking for a better chain
22975/// (aliasing node.)
22976SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
22977 if (OptLevel == CodeGenOpt::None)
22978 return OldChain;
22979
22980 // Ops for replacing token factor.
22981 SmallVector<SDValue, 8> Aliases;
22982
22983 // Accumulate all the aliases to this node.
22984 GatherAllAliases(N, OldChain, Aliases);
22985
22986 // If no operands then chain to entry token.
22987 if (Aliases.size() == 0)
22988 return DAG.getEntryNode();
22989
22990 // If a single operand then chain to it. We don't need to revisit it.
22991 if (Aliases.size() == 1)
22992 return Aliases[0];
22993
22994 // Construct a custom tailored token factor.
22995 return DAG.getTokenFactor(SDLoc(N), Aliases);
22996}
22997
22998namespace {
22999// TODO: Replace with with std::monostate when we move to C++17.
23000struct UnitT { } Unit;
23001bool operator==(const UnitT &, const UnitT &) { return true; }
23002bool operator!=(const UnitT &, const UnitT &) { return false; }
23003} // namespace
23004
23005// This function tries to collect a bunch of potentially interesting
23006// nodes to improve the chains of, all at once. This might seem
23007// redundant, as this function gets called when visiting every store
23008// node, so why not let the work be done on each store as it's visited?
23009//
23010// I believe this is mainly important because mergeConsecutiveStores
23011// is unable to deal with merging stores of different sizes, so unless
23012// we improve the chains of all the potential candidates up-front
23013// before running mergeConsecutiveStores, it might only see some of
23014// the nodes that will eventually be candidates, and then not be able
23015// to go from a partially-merged state to the desired final
23016// fully-merged state.
23017
23018bool DAGCombiner::parallelizeChainedStores(StoreSDNode *St) {
23019 SmallVector<StoreSDNode *, 8> ChainedStores;
23020 StoreSDNode *STChain = St;
23021 // Intervals records which offsets from BaseIndex have been covered. In
23022 // the common case, every store writes to the immediately previous address
23023 // space and thus merged with the previous interval at insertion time.
23024
23025 using IMap =
23026 llvm::IntervalMap<int64_t, UnitT, 8, IntervalMapHalfOpenInfo<int64_t>>;
23027 IMap::Allocator A;
23028 IMap Intervals(A);
23029
23030 // This holds the base pointer, index, and the offset in bytes from the base
23031 // pointer.
23032 const BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
23033
23034 // We must have a base and an offset.
23035 if (!BasePtr.getBase().getNode())
23036 return false;
23037
23038 // Do not handle stores to undef base pointers.
23039 if (BasePtr.getBase().isUndef())
23040 return false;
23041
23042 // BaseIndexOffset assumes that offsets are fixed-size, which
23043 // is not valid for scalable vectors where the offsets are
23044 // scaled by `vscale`, so bail out early.
23045 if (St->getMemoryVT().isScalableVector())
23046 return false;
23047
23048 // Add ST's interval.
23049 Intervals.insert(0, (St->getMemoryVT().getSizeInBits() + 7) / 8, Unit);
23050
23051 while (StoreSDNode *Chain = dyn_cast<StoreSDNode>(STChain->getChain())) {
23052 if (Chain->getMemoryVT().isScalableVector())
23053 return false;
23054
23055 // If the chain has more than one use, then we can't reorder the mem ops.
23056 if (!SDValue(Chain, 0)->hasOneUse())
23057 break;
23058 // TODO: Relax for unordered atomics (see D66309)
23059 if (!Chain->isSimple() || Chain->isIndexed())
23060 break;
23061
23062 // Find the base pointer and offset for this memory node.
23063 const BaseIndexOffset Ptr = BaseIndexOffset::match(Chain, DAG);
23064 // Check that the base pointer is the same as the original one.
23065 int64_t Offset;
23066 if (!BasePtr.equalBaseIndex(Ptr, DAG, Offset))
23067 break;
23068 int64_t Length = (Chain->getMemoryVT().getSizeInBits() + 7) / 8;
23069 // Make sure we don't overlap with other intervals by checking the ones to
23070 // the left or right before inserting.
23071 auto I = Intervals.find(Offset);
23072 // If there's a next interval, we should end before it.
23073 if (I != Intervals.end() && I.start() < (Offset + Length))
23074 break;
23075 // If there's a previous interval, we should start after it.
23076 if (I != Intervals.begin() && (--I).stop() <= Offset)
23077 break;
23078 Intervals.insert(Offset, Offset + Length, Unit);
23079
23080 ChainedStores.push_back(Chain);
23081 STChain = Chain;
23082 }
23083
23084 // If we didn't find a chained store, exit.
23085 if (ChainedStores.size() == 0)
23086 return false;
23087
23088 // Improve all chained stores (St and ChainedStores members) starting from
23089 // where the store chain ended and return single TokenFactor.
23090 SDValue NewChain = STChain->getChain();
23091 SmallVector<SDValue, 8> TFOps;
23092 for (unsigned I = ChainedStores.size(); I;) {
23093 StoreSDNode *S = ChainedStores[--I];
23094 SDValue BetterChain = FindBetterChain(S, NewChain);
23095 S = cast<StoreSDNode>(DAG.UpdateNodeOperands(
23096 S, BetterChain, S->getOperand(1), S->getOperand(2), S->getOperand(3)));
23097 TFOps.push_back(SDValue(S, 0));
23098 ChainedStores[I] = S;
23099 }
23100
23101 // Improve St's chain. Use a new node to avoid creating a loop from CombineTo.
23102 SDValue BetterChain = FindBetterChain(St, NewChain);
23103 SDValue NewST;
23104 if (St->isTruncatingStore())
23105 NewST = DAG.getTruncStore(BetterChain, SDLoc(St), St->getValue(),
23106 St->getBasePtr(), St->getMemoryVT(),
23107 St->getMemOperand());
23108 else
23109 NewST = DAG.getStore(BetterChain, SDLoc(St), St->getValue(),
23110 St->getBasePtr(), St->getMemOperand());
23111
23112 TFOps.push_back(NewST);
23113
23114 // If we improved every element of TFOps, then we've lost the dependence on
23115 // NewChain to successors of St and we need to add it back to TFOps. Do so at
23116 // the beginning to keep relative order consistent with FindBetterChains.
23117 auto hasImprovedChain = [&](SDValue ST) -> bool {
23118 return ST->getOperand(0) != NewChain;
23119 };
23120 bool AddNewChain = llvm::all_of(TFOps, hasImprovedChain);
23121 if (AddNewChain)
23122 TFOps.insert(TFOps.begin(), NewChain);
23123
23124 SDValue TF = DAG.getTokenFactor(SDLoc(STChain), TFOps);
23125 CombineTo(St, TF);
23126
23127 // Add TF and its operands to the worklist.
23128 AddToWorklist(TF.getNode());
23129 for (const SDValue &Op : TF->ops())
23130 AddToWorklist(Op.getNode());
23131 AddToWorklist(STChain);
23132 return true;
23133}
23134
23135bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) {
23136 if (OptLevel == CodeGenOpt::None)
23137 return false;
23138
23139 const BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
23140
23141 // We must have a base and an offset.
23142 if (!BasePtr.getBase().getNode())
23143 return false;
23144
23145 // Do not handle stores to undef base pointers.
23146 if (BasePtr.getBase().isUndef())
23147 return false;
23148
23149 // Directly improve a chain of disjoint stores starting at St.
23150 if (parallelizeChainedStores(St))
23151 return true;
23152
23153 // Improve St's Chain..
23154 SDValue BetterChain = FindBetterChain(St, St->getChain());
23155 if (St->getChain() != BetterChain) {
23156 replaceStoreChain(St, BetterChain);
23157 return true;
23158 }
23159 return false;
23160}
23161
23162/// This is the entry point for the file.
23163void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis *AA,
23164 CodeGenOpt::Level OptLevel) {
23165 /// This is the main entry point to this class.
23166 DAGCombiner(*this, AA, OptLevel).Run(Level);
23167}