Bug Summary

File:build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
Warning:line 1134, column 42
The result of the right shift is undefined due to shifting by '32', which is greater or equal to the width of type 'uint32_t'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name HexagonISelDAGToDAG.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/Hexagon -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/Hexagon -I include -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-10-03-140002-15933-1 -x c++ /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp

/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp

1//===-- HexagonISelDAGToDAG.cpp - A dag to dag inst selector for Hexagon --===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines an instruction selector for the Hexagon target.
10//
11//===----------------------------------------------------------------------===//
12
13#include "HexagonISelDAGToDAG.h"
14#include "Hexagon.h"
15#include "HexagonISelLowering.h"
16#include "HexagonMachineFunctionInfo.h"
17#include "HexagonTargetMachine.h"
18#include "llvm/CodeGen/FunctionLoweringInfo.h"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/SelectionDAGISel.h"
21#include "llvm/IR/Intrinsics.h"
22#include "llvm/IR/IntrinsicsHexagon.h"
23#include "llvm/Support/CommandLine.h"
24#include "llvm/Support/Debug.h"
25using namespace llvm;
26
27#define DEBUG_TYPE"hexagon-isel" "hexagon-isel"
28
29static
30cl::opt<bool>
31EnableAddressRebalancing("isel-rebalance-addr", cl::Hidden, cl::init(true),
32 cl::desc("Rebalance address calculation trees to improve "
33 "instruction selection"));
34
35// Rebalance only if this allows e.g. combining a GA with an offset or
36// factoring out a shift.
37static
38cl::opt<bool>
39RebalanceOnlyForOptimizations("rebalance-only-opt", cl::Hidden, cl::init(false),
40 cl::desc("Rebalance address tree only if this allows optimizations"));
41
42static
43cl::opt<bool>
44RebalanceOnlyImbalancedTrees("rebalance-only-imbal", cl::Hidden,
45 cl::init(false), cl::desc("Rebalance address tree only if it is imbalanced"));
46
47static cl::opt<bool> CheckSingleUse("hexagon-isel-su", cl::Hidden,
48 cl::init(true), cl::desc("Enable checking of SDNode's single-use status"));
49
50//===----------------------------------------------------------------------===//
51// Instruction Selector Implementation
52//===----------------------------------------------------------------------===//
53
54#define GET_DAGISEL_BODY HexagonDAGToDAGISel
55#include "HexagonGenDAGISel.inc"
56
57namespace llvm {
58/// createHexagonISelDag - This pass converts a legalized DAG into a
59/// Hexagon-specific DAG, ready for instruction scheduling.
60FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM,
61 CodeGenOpt::Level OptLevel) {
62 return new HexagonDAGToDAGISel(TM, OptLevel);
63}
64}
65
66void HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, const SDLoc &dl) {
67 SDValue Chain = LD->getChain();
68 SDValue Base = LD->getBasePtr();
69 SDValue Offset = LD->getOffset();
70 int32_t Inc = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
71 EVT LoadedVT = LD->getMemoryVT();
72 unsigned Opcode = 0;
73
74 // Check for zero extended loads. Treat any-extend loads as zero extended
75 // loads.
76 ISD::LoadExtType ExtType = LD->getExtensionType();
77 bool IsZeroExt = (ExtType == ISD::ZEXTLOAD || ExtType == ISD::EXTLOAD);
78 bool IsValidInc = HII->isValidAutoIncImm(LoadedVT, Inc);
79
80 assert(LoadedVT.isSimple())(static_cast <bool> (LoadedVT.isSimple()) ? void (0) : __assert_fail
("LoadedVT.isSimple()", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 80, __extension__ __PRETTY_FUNCTION__))
;
81 switch (LoadedVT.getSimpleVT().SimpleTy) {
82 case MVT::i8:
83 if (IsZeroExt)
84 Opcode = IsValidInc ? Hexagon::L2_loadrub_pi : Hexagon::L2_loadrub_io;
85 else
86 Opcode = IsValidInc ? Hexagon::L2_loadrb_pi : Hexagon::L2_loadrb_io;
87 break;
88 case MVT::i16:
89 if (IsZeroExt)
90 Opcode = IsValidInc ? Hexagon::L2_loadruh_pi : Hexagon::L2_loadruh_io;
91 else
92 Opcode = IsValidInc ? Hexagon::L2_loadrh_pi : Hexagon::L2_loadrh_io;
93 break;
94 case MVT::i32:
95 case MVT::f32:
96 case MVT::v2i16:
97 case MVT::v4i8:
98 Opcode = IsValidInc ? Hexagon::L2_loadri_pi : Hexagon::L2_loadri_io;
99 break;
100 case MVT::i64:
101 case MVT::f64:
102 case MVT::v2i32:
103 case MVT::v4i16:
104 case MVT::v8i8:
105 Opcode = IsValidInc ? Hexagon::L2_loadrd_pi : Hexagon::L2_loadrd_io;
106 break;
107 case MVT::v64i8:
108 case MVT::v32i16:
109 case MVT::v16i32:
110 case MVT::v8i64:
111 case MVT::v128i8:
112 case MVT::v64i16:
113 case MVT::v32i32:
114 case MVT::v16i64:
115 if (isAlignedMemNode(LD)) {
116 if (LD->isNonTemporal())
117 Opcode = IsValidInc ? Hexagon::V6_vL32b_nt_pi : Hexagon::V6_vL32b_nt_ai;
118 else
119 Opcode = IsValidInc ? Hexagon::V6_vL32b_pi : Hexagon::V6_vL32b_ai;
120 } else {
121 Opcode = IsValidInc ? Hexagon::V6_vL32Ub_pi : Hexagon::V6_vL32Ub_ai;
122 }
123 break;
124 default:
125 llvm_unreachable("Unexpected memory type in indexed load")::llvm::llvm_unreachable_internal("Unexpected memory type in indexed load"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 125)
;
126 }
127
128 SDValue IncV = CurDAG->getTargetConstant(Inc, dl, MVT::i32);
129 MachineMemOperand *MemOp = LD->getMemOperand();
130
131 auto getExt64 = [this,ExtType] (MachineSDNode *N, const SDLoc &dl)
132 -> MachineSDNode* {
133 if (ExtType == ISD::ZEXTLOAD || ExtType == ISD::EXTLOAD) {
134 SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
135 return CurDAG->getMachineNode(Hexagon::A4_combineir, dl, MVT::i64,
136 Zero, SDValue(N, 0));
137 }
138 if (ExtType == ISD::SEXTLOAD)
139 return CurDAG->getMachineNode(Hexagon::A2_sxtw, dl, MVT::i64,
140 SDValue(N, 0));
141 return N;
142 };
143
144 // Loaded value Next address Chain
145 SDValue From[3] = { SDValue(LD,0), SDValue(LD,1), SDValue(LD,2) };
146 SDValue To[3];
147
148 EVT ValueVT = LD->getValueType(0);
149 if (ValueVT == MVT::i64 && ExtType != ISD::NON_EXTLOAD) {
150 // A load extending to i64 will actually produce i32, which will then
151 // need to be extended to i64.
152 assert(LoadedVT.getSizeInBits() <= 32)(static_cast <bool> (LoadedVT.getSizeInBits() <= 32)
? void (0) : __assert_fail ("LoadedVT.getSizeInBits() <= 32"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 152, __extension__
__PRETTY_FUNCTION__))
;
153 ValueVT = MVT::i32;
154 }
155
156 if (IsValidInc) {
157 MachineSDNode *L = CurDAG->getMachineNode(Opcode, dl, ValueVT,
158 MVT::i32, MVT::Other, Base,
159 IncV, Chain);
160 CurDAG->setNodeMemRefs(L, {MemOp});
161 To[1] = SDValue(L, 1); // Next address.
162 To[2] = SDValue(L, 2); // Chain.
163 // Handle special case for extension to i64.
164 if (LD->getValueType(0) == MVT::i64)
165 L = getExt64(L, dl);
166 To[0] = SDValue(L, 0); // Loaded (extended) value.
167 } else {
168 SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
169 MachineSDNode *L = CurDAG->getMachineNode(Opcode, dl, ValueVT, MVT::Other,
170 Base, Zero, Chain);
171 CurDAG->setNodeMemRefs(L, {MemOp});
172 To[2] = SDValue(L, 1); // Chain.
173 MachineSDNode *A = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
174 Base, IncV);
175 To[1] = SDValue(A, 0); // Next address.
176 // Handle special case for extension to i64.
177 if (LD->getValueType(0) == MVT::i64)
178 L = getExt64(L, dl);
179 To[0] = SDValue(L, 0); // Loaded (extended) value.
180 }
181 ReplaceUses(From, To, 3);
182 CurDAG->RemoveDeadNode(LD);
183}
184
185MachineSDNode *HexagonDAGToDAGISel::LoadInstrForLoadIntrinsic(SDNode *IntN) {
186 if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)
187 return nullptr;
188
189 SDLoc dl(IntN);
190 unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
191
192 static std::map<unsigned,unsigned> LoadPciMap = {
193 { Intrinsic::hexagon_circ_ldb, Hexagon::L2_loadrb_pci },
194 { Intrinsic::hexagon_circ_ldub, Hexagon::L2_loadrub_pci },
195 { Intrinsic::hexagon_circ_ldh, Hexagon::L2_loadrh_pci },
196 { Intrinsic::hexagon_circ_lduh, Hexagon::L2_loadruh_pci },
197 { Intrinsic::hexagon_circ_ldw, Hexagon::L2_loadri_pci },
198 { Intrinsic::hexagon_circ_ldd, Hexagon::L2_loadrd_pci },
199 };
200 auto FLC = LoadPciMap.find(IntNo);
201 if (FLC != LoadPciMap.end()) {
202 EVT ValTy = (IntNo == Intrinsic::hexagon_circ_ldd) ? MVT::i64 : MVT::i32;
203 EVT RTys[] = { ValTy, MVT::i32, MVT::Other };
204 // Operands: { Base, Increment, Modifier, Chain }
205 auto Inc = cast<ConstantSDNode>(IntN->getOperand(5));
206 SDValue I = CurDAG->getTargetConstant(Inc->getSExtValue(), dl, MVT::i32);
207 MachineSDNode *Res = CurDAG->getMachineNode(FLC->second, dl, RTys,
208 { IntN->getOperand(2), I, IntN->getOperand(4),
209 IntN->getOperand(0) });
210 return Res;
211 }
212
213 return nullptr;
214}
215
216SDNode *HexagonDAGToDAGISel::StoreInstrForLoadIntrinsic(MachineSDNode *LoadN,
217 SDNode *IntN) {
218 // The "LoadN" is just a machine load instruction. The intrinsic also
219 // involves storing it. Generate an appropriate store to the location
220 // given in the intrinsic's operand(3).
221 uint64_t F = HII->get(LoadN->getMachineOpcode()).TSFlags;
222 unsigned SizeBits = (F >> HexagonII::MemAccessSizePos) &
223 HexagonII::MemAccesSizeMask;
224 unsigned Size = 1U << (SizeBits-1);
225
226 SDLoc dl(IntN);
227 MachinePointerInfo PI;
228 SDValue TS;
229 SDValue Loc = IntN->getOperand(3);
230
231 if (Size >= 4)
232 TS = CurDAG->getStore(SDValue(LoadN, 2), dl, SDValue(LoadN, 0), Loc, PI,
233 Align(Size));
234 else
235 TS = CurDAG->getTruncStore(SDValue(LoadN, 2), dl, SDValue(LoadN, 0), Loc,
236 PI, MVT::getIntegerVT(Size * 8), Align(Size));
237
238 SDNode *StoreN;
239 {
240 HandleSDNode Handle(TS);
241 SelectStore(TS.getNode());
242 StoreN = Handle.getValue().getNode();
243 }
244
245 // Load's results are { Loaded value, Updated pointer, Chain }
246 ReplaceUses(SDValue(IntN, 0), SDValue(LoadN, 1));
247 ReplaceUses(SDValue(IntN, 1), SDValue(StoreN, 0));
248 return StoreN;
249}
250
251bool HexagonDAGToDAGISel::tryLoadOfLoadIntrinsic(LoadSDNode *N) {
252 // The intrinsics for load circ/brev perform two operations:
253 // 1. Load a value V from the specified location, using the addressing
254 // mode corresponding to the intrinsic.
255 // 2. Store V into a specified location. This location is typically a
256 // local, temporary object.
257 // In many cases, the program using these intrinsics will immediately
258 // load V again from the local object. In those cases, when certain
259 // conditions are met, the last load can be removed.
260 // This function identifies and optimizes this pattern. If the pattern
261 // cannot be optimized, it returns nullptr, which will cause the load
262 // to be selected separately from the intrinsic (which will be handled
263 // in SelectIntrinsicWChain).
264
265 SDValue Ch = N->getOperand(0);
266 SDValue Loc = N->getOperand(1);
267
268 // Assume that the load and the intrinsic are connected directly with a
269 // chain:
270 // t1: i32,ch = int.load ..., ..., ..., Loc, ... // <-- C
271 // t2: i32,ch = load t1:1, Loc, ...
272 SDNode *C = Ch.getNode();
273
274 if (C->getOpcode() != ISD::INTRINSIC_W_CHAIN)
275 return false;
276
277 // The second load can only be eliminated if its extension type matches
278 // that of the load instruction corresponding to the intrinsic. The user
279 // can provide an address of an unsigned variable to store the result of
280 // a sign-extending intrinsic into (or the other way around).
281 ISD::LoadExtType IntExt;
282 switch (cast<ConstantSDNode>(C->getOperand(1))->getZExtValue()) {
283 case Intrinsic::hexagon_circ_ldub:
284 case Intrinsic::hexagon_circ_lduh:
285 IntExt = ISD::ZEXTLOAD;
286 break;
287 case Intrinsic::hexagon_circ_ldw:
288 case Intrinsic::hexagon_circ_ldd:
289 IntExt = ISD::NON_EXTLOAD;
290 break;
291 default:
292 IntExt = ISD::SEXTLOAD;
293 break;
294 }
295 if (N->getExtensionType() != IntExt)
296 return false;
297
298 // Make sure the target location for the loaded value in the load intrinsic
299 // is the location from which LD (or N) is loading.
300 if (C->getNumOperands() < 4 || Loc.getNode() != C->getOperand(3).getNode())
301 return false;
302
303 if (MachineSDNode *L = LoadInstrForLoadIntrinsic(C)) {
304 SDNode *S = StoreInstrForLoadIntrinsic(L, C);
305 SDValue F[] = { SDValue(N,0), SDValue(N,1), SDValue(C,0), SDValue(C,1) };
306 SDValue T[] = { SDValue(L,0), SDValue(S,0), SDValue(L,1), SDValue(S,0) };
307 ReplaceUses(F, T, std::size(T));
308 // This transformation will leave the intrinsic dead. If it remains in
309 // the DAG, the selection code will see it again, but without the load,
310 // and it will generate a store that is normally required for it.
311 CurDAG->RemoveDeadNode(C);
312 return true;
313 }
314 return false;
315}
316
317// Convert the bit-reverse load intrinsic to appropriate target instruction.
318bool HexagonDAGToDAGISel::SelectBrevLdIntrinsic(SDNode *IntN) {
319 if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)
320 return false;
321
322 const SDLoc &dl(IntN);
323 unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
324
325 static const std::map<unsigned, unsigned> LoadBrevMap = {
326 { Intrinsic::hexagon_L2_loadrb_pbr, Hexagon::L2_loadrb_pbr },
327 { Intrinsic::hexagon_L2_loadrub_pbr, Hexagon::L2_loadrub_pbr },
328 { Intrinsic::hexagon_L2_loadrh_pbr, Hexagon::L2_loadrh_pbr },
329 { Intrinsic::hexagon_L2_loadruh_pbr, Hexagon::L2_loadruh_pbr },
330 { Intrinsic::hexagon_L2_loadri_pbr, Hexagon::L2_loadri_pbr },
331 { Intrinsic::hexagon_L2_loadrd_pbr, Hexagon::L2_loadrd_pbr }
332 };
333 auto FLI = LoadBrevMap.find(IntNo);
334 if (FLI != LoadBrevMap.end()) {
335 EVT ValTy =
336 (IntNo == Intrinsic::hexagon_L2_loadrd_pbr) ? MVT::i64 : MVT::i32;
337 EVT RTys[] = { ValTy, MVT::i32, MVT::Other };
338 // Operands of Intrinsic: {chain, enum ID of intrinsic, baseptr,
339 // modifier}.
340 // Operands of target instruction: { Base, Modifier, Chain }.
341 MachineSDNode *Res = CurDAG->getMachineNode(
342 FLI->second, dl, RTys,
343 {IntN->getOperand(2), IntN->getOperand(3), IntN->getOperand(0)});
344
345 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(IntN)->getMemOperand();
346 CurDAG->setNodeMemRefs(Res, {MemOp});
347
348 ReplaceUses(SDValue(IntN, 0), SDValue(Res, 0));
349 ReplaceUses(SDValue(IntN, 1), SDValue(Res, 1));
350 ReplaceUses(SDValue(IntN, 2), SDValue(Res, 2));
351 CurDAG->RemoveDeadNode(IntN);
352 return true;
353 }
354 return false;
355}
356
357/// Generate a machine instruction node for the new circular buffer intrinsics.
358/// The new versions use a CSx register instead of the K field.
359bool HexagonDAGToDAGISel::SelectNewCircIntrinsic(SDNode *IntN) {
360 if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)
361 return false;
362
363 SDLoc DL(IntN);
364 unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
365 SmallVector<SDValue, 7> Ops;
366
367 static std::map<unsigned,unsigned> LoadNPcMap = {
368 { Intrinsic::hexagon_L2_loadrub_pci, Hexagon::PS_loadrub_pci },
369 { Intrinsic::hexagon_L2_loadrb_pci, Hexagon::PS_loadrb_pci },
370 { Intrinsic::hexagon_L2_loadruh_pci, Hexagon::PS_loadruh_pci },
371 { Intrinsic::hexagon_L2_loadrh_pci, Hexagon::PS_loadrh_pci },
372 { Intrinsic::hexagon_L2_loadri_pci, Hexagon::PS_loadri_pci },
373 { Intrinsic::hexagon_L2_loadrd_pci, Hexagon::PS_loadrd_pci },
374 { Intrinsic::hexagon_L2_loadrub_pcr, Hexagon::PS_loadrub_pcr },
375 { Intrinsic::hexagon_L2_loadrb_pcr, Hexagon::PS_loadrb_pcr },
376 { Intrinsic::hexagon_L2_loadruh_pcr, Hexagon::PS_loadruh_pcr },
377 { Intrinsic::hexagon_L2_loadrh_pcr, Hexagon::PS_loadrh_pcr },
378 { Intrinsic::hexagon_L2_loadri_pcr, Hexagon::PS_loadri_pcr },
379 { Intrinsic::hexagon_L2_loadrd_pcr, Hexagon::PS_loadrd_pcr }
380 };
381 auto FLI = LoadNPcMap.find (IntNo);
382 if (FLI != LoadNPcMap.end()) {
383 EVT ValTy = MVT::i32;
384 if (IntNo == Intrinsic::hexagon_L2_loadrd_pci ||
385 IntNo == Intrinsic::hexagon_L2_loadrd_pcr)
386 ValTy = MVT::i64;
387 EVT RTys[] = { ValTy, MVT::i32, MVT::Other };
388 // Handle load.*_pci case which has 6 operands.
389 if (IntN->getNumOperands() == 6) {
390 auto Inc = cast<ConstantSDNode>(IntN->getOperand(3));
391 SDValue I = CurDAG->getTargetConstant(Inc->getSExtValue(), DL, MVT::i32);
392 // Operands: { Base, Increment, Modifier, Start, Chain }.
393 Ops = { IntN->getOperand(2), I, IntN->getOperand(4), IntN->getOperand(5),
394 IntN->getOperand(0) };
395 } else
396 // Handle load.*_pcr case which has 5 operands.
397 // Operands: { Base, Modifier, Start, Chain }.
398 Ops = { IntN->getOperand(2), IntN->getOperand(3), IntN->getOperand(4),
399 IntN->getOperand(0) };
400 MachineSDNode *Res = CurDAG->getMachineNode(FLI->second, DL, RTys, Ops);
401 ReplaceUses(SDValue(IntN, 0), SDValue(Res, 0));
402 ReplaceUses(SDValue(IntN, 1), SDValue(Res, 1));
403 ReplaceUses(SDValue(IntN, 2), SDValue(Res, 2));
404 CurDAG->RemoveDeadNode(IntN);
405 return true;
406 }
407
408 static std::map<unsigned,unsigned> StoreNPcMap = {
409 { Intrinsic::hexagon_S2_storerb_pci, Hexagon::PS_storerb_pci },
410 { Intrinsic::hexagon_S2_storerh_pci, Hexagon::PS_storerh_pci },
411 { Intrinsic::hexagon_S2_storerf_pci, Hexagon::PS_storerf_pci },
412 { Intrinsic::hexagon_S2_storeri_pci, Hexagon::PS_storeri_pci },
413 { Intrinsic::hexagon_S2_storerd_pci, Hexagon::PS_storerd_pci },
414 { Intrinsic::hexagon_S2_storerb_pcr, Hexagon::PS_storerb_pcr },
415 { Intrinsic::hexagon_S2_storerh_pcr, Hexagon::PS_storerh_pcr },
416 { Intrinsic::hexagon_S2_storerf_pcr, Hexagon::PS_storerf_pcr },
417 { Intrinsic::hexagon_S2_storeri_pcr, Hexagon::PS_storeri_pcr },
418 { Intrinsic::hexagon_S2_storerd_pcr, Hexagon::PS_storerd_pcr }
419 };
420 auto FSI = StoreNPcMap.find (IntNo);
421 if (FSI != StoreNPcMap.end()) {
422 EVT RTys[] = { MVT::i32, MVT::Other };
423 // Handle store.*_pci case which has 7 operands.
424 if (IntN->getNumOperands() == 7) {
425 auto Inc = cast<ConstantSDNode>(IntN->getOperand(3));
426 SDValue I = CurDAG->getTargetConstant(Inc->getSExtValue(), DL, MVT::i32);
427 // Operands: { Base, Increment, Modifier, Value, Start, Chain }.
428 Ops = { IntN->getOperand(2), I, IntN->getOperand(4), IntN->getOperand(5),
429 IntN->getOperand(6), IntN->getOperand(0) };
430 } else
431 // Handle store.*_pcr case which has 6 operands.
432 // Operands: { Base, Modifier, Value, Start, Chain }.
433 Ops = { IntN->getOperand(2), IntN->getOperand(3), IntN->getOperand(4),
434 IntN->getOperand(5), IntN->getOperand(0) };
435 MachineSDNode *Res = CurDAG->getMachineNode(FSI->second, DL, RTys, Ops);
436 ReplaceUses(SDValue(IntN, 0), SDValue(Res, 0));
437 ReplaceUses(SDValue(IntN, 1), SDValue(Res, 1));
438 CurDAG->RemoveDeadNode(IntN);
439 return true;
440 }
441
442 return false;
443}
444
445void HexagonDAGToDAGISel::SelectLoad(SDNode *N) {
446 SDLoc dl(N);
447 LoadSDNode *LD = cast<LoadSDNode>(N);
448
449 // Handle indexed loads.
450 ISD::MemIndexedMode AM = LD->getAddressingMode();
451 if (AM != ISD::UNINDEXED) {
452 SelectIndexedLoad(LD, dl);
453 return;
454 }
455
456 // Handle patterns using circ/brev load intrinsics.
457 if (tryLoadOfLoadIntrinsic(LD))
458 return;
459
460 SelectCode(LD);
461}
462
463void HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, const SDLoc &dl) {
464 SDValue Chain = ST->getChain();
465 SDValue Base = ST->getBasePtr();
466 SDValue Offset = ST->getOffset();
467 SDValue Value = ST->getValue();
468 // Get the constant value.
469 int32_t Inc = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
470 EVT StoredVT = ST->getMemoryVT();
471 EVT ValueVT = Value.getValueType();
472
473 bool IsValidInc = HII->isValidAutoIncImm(StoredVT, Inc);
474 unsigned Opcode = 0;
475
476 assert(StoredVT.isSimple())(static_cast <bool> (StoredVT.isSimple()) ? void (0) : __assert_fail
("StoredVT.isSimple()", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 476, __extension__ __PRETTY_FUNCTION__))
;
477 switch (StoredVT.getSimpleVT().SimpleTy) {
478 case MVT::i8:
479 Opcode = IsValidInc ? Hexagon::S2_storerb_pi : Hexagon::S2_storerb_io;
480 break;
481 case MVT::i16:
482 Opcode = IsValidInc ? Hexagon::S2_storerh_pi : Hexagon::S2_storerh_io;
483 break;
484 case MVT::i32:
485 case MVT::f32:
486 case MVT::v2i16:
487 case MVT::v4i8:
488 Opcode = IsValidInc ? Hexagon::S2_storeri_pi : Hexagon::S2_storeri_io;
489 break;
490 case MVT::i64:
491 case MVT::f64:
492 case MVT::v2i32:
493 case MVT::v4i16:
494 case MVT::v8i8:
495 Opcode = IsValidInc ? Hexagon::S2_storerd_pi : Hexagon::S2_storerd_io;
496 break;
497 case MVT::v64i8:
498 case MVT::v32i16:
499 case MVT::v16i32:
500 case MVT::v8i64:
501 case MVT::v128i8:
502 case MVT::v64i16:
503 case MVT::v32i32:
504 case MVT::v16i64:
505 if (isAlignedMemNode(ST)) {
506 if (ST->isNonTemporal())
507 Opcode = IsValidInc ? Hexagon::V6_vS32b_nt_pi : Hexagon::V6_vS32b_nt_ai;
508 else
509 Opcode = IsValidInc ? Hexagon::V6_vS32b_pi : Hexagon::V6_vS32b_ai;
510 } else {
511 Opcode = IsValidInc ? Hexagon::V6_vS32Ub_pi : Hexagon::V6_vS32Ub_ai;
512 }
513 break;
514 default:
515 llvm_unreachable("Unexpected memory type in indexed store")::llvm::llvm_unreachable_internal("Unexpected memory type in indexed store"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 515)
;
516 }
517
518 if (ST->isTruncatingStore() && ValueVT.getSizeInBits() == 64) {
519 assert(StoredVT.getSizeInBits() < 64 && "Not a truncating store")(static_cast <bool> (StoredVT.getSizeInBits() < 64 &&
"Not a truncating store") ? void (0) : __assert_fail ("StoredVT.getSizeInBits() < 64 && \"Not a truncating store\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 519, __extension__
__PRETTY_FUNCTION__))
;
520 Value = CurDAG->getTargetExtractSubreg(Hexagon::isub_lo,
521 dl, MVT::i32, Value);
522 }
523
524 SDValue IncV = CurDAG->getTargetConstant(Inc, dl, MVT::i32);
525 MachineMemOperand *MemOp = ST->getMemOperand();
526
527 // Next address Chain
528 SDValue From[2] = { SDValue(ST,0), SDValue(ST,1) };
529 SDValue To[2];
530
531 if (IsValidInc) {
532 // Build post increment store.
533 SDValue Ops[] = { Base, IncV, Value, Chain };
534 MachineSDNode *S = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::Other,
535 Ops);
536 CurDAG->setNodeMemRefs(S, {MemOp});
537 To[0] = SDValue(S, 0);
538 To[1] = SDValue(S, 1);
539 } else {
540 SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
541 SDValue Ops[] = { Base, Zero, Value, Chain };
542 MachineSDNode *S = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
543 CurDAG->setNodeMemRefs(S, {MemOp});
544 To[1] = SDValue(S, 0);
545 MachineSDNode *A = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
546 Base, IncV);
547 To[0] = SDValue(A, 0);
548 }
549
550 ReplaceUses(From, To, 2);
551 CurDAG->RemoveDeadNode(ST);
552}
553
554void HexagonDAGToDAGISel::SelectStore(SDNode *N) {
555 SDLoc dl(N);
556 StoreSDNode *ST = cast<StoreSDNode>(N);
557
558 // Handle indexed stores.
559 ISD::MemIndexedMode AM = ST->getAddressingMode();
560 if (AM != ISD::UNINDEXED) {
561 SelectIndexedStore(ST, dl);
562 return;
563 }
564
565 SelectCode(ST);
566}
567
568void HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
569 SDLoc dl(N);
570 SDValue Shl_0 = N->getOperand(0);
571 SDValue Shl_1 = N->getOperand(1);
572
573 auto Default = [this,N] () -> void { SelectCode(N); };
574
575 if (N->getValueType(0) != MVT::i32 || Shl_1.getOpcode() != ISD::Constant)
576 return Default();
577
578 // RHS is const.
579 int32_t ShlConst = cast<ConstantSDNode>(Shl_1)->getSExtValue();
580
581 if (Shl_0.getOpcode() == ISD::MUL) {
582 SDValue Mul_0 = Shl_0.getOperand(0); // Val
583 SDValue Mul_1 = Shl_0.getOperand(1); // Const
584 // RHS of mul is const.
585 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mul_1)) {
586 int32_t ValConst = C->getSExtValue() << ShlConst;
587 if (isInt<9>(ValConst)) {
588 SDValue Val = CurDAG->getTargetConstant(ValConst, dl, MVT::i32);
589 SDNode *Result = CurDAG->getMachineNode(Hexagon::M2_mpysmi, dl,
590 MVT::i32, Mul_0, Val);
591 ReplaceNode(N, Result);
592 return;
593 }
594 }
595 return Default();
596 }
597
598 if (Shl_0.getOpcode() == ISD::SUB) {
599 SDValue Sub_0 = Shl_0.getOperand(0); // Const 0
600 SDValue Sub_1 = Shl_0.getOperand(1); // Val
601 if (ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Sub_0)) {
602 if (C1->getSExtValue() != 0 || Sub_1.getOpcode() != ISD::SHL)
603 return Default();
604 SDValue Shl2_0 = Sub_1.getOperand(0); // Val
605 SDValue Shl2_1 = Sub_1.getOperand(1); // Const
606 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(Shl2_1)) {
607 int32_t ValConst = 1 << (ShlConst + C2->getSExtValue());
608 if (isInt<9>(-ValConst)) {
609 SDValue Val = CurDAG->getTargetConstant(-ValConst, dl, MVT::i32);
610 SDNode *Result = CurDAG->getMachineNode(Hexagon::M2_mpysmi, dl,
611 MVT::i32, Shl2_0, Val);
612 ReplaceNode(N, Result);
613 return;
614 }
615 }
616 }
617 }
618
619 return Default();
620}
621
622//
623// Handling intrinsics for circular load and bitreverse load.
624//
625void HexagonDAGToDAGISel::SelectIntrinsicWChain(SDNode *N) {
626 if (MachineSDNode *L = LoadInstrForLoadIntrinsic(N)) {
627 StoreInstrForLoadIntrinsic(L, N);
628 CurDAG->RemoveDeadNode(N);
629 return;
630 }
631
632 // Handle bit-reverse load intrinsics.
633 if (SelectBrevLdIntrinsic(N))
634 return;
635
636 if (SelectNewCircIntrinsic(N))
637 return;
638
639 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
640 if (IntNo == Intrinsic::hexagon_V6_vgathermw ||
641 IntNo == Intrinsic::hexagon_V6_vgathermw_128B ||
642 IntNo == Intrinsic::hexagon_V6_vgathermh ||
643 IntNo == Intrinsic::hexagon_V6_vgathermh_128B ||
644 IntNo == Intrinsic::hexagon_V6_vgathermhw ||
645 IntNo == Intrinsic::hexagon_V6_vgathermhw_128B) {
646 SelectV65Gather(N);
647 return;
648 }
649 if (IntNo == Intrinsic::hexagon_V6_vgathermwq ||
650 IntNo == Intrinsic::hexagon_V6_vgathermwq_128B ||
651 IntNo == Intrinsic::hexagon_V6_vgathermhq ||
652 IntNo == Intrinsic::hexagon_V6_vgathermhq_128B ||
653 IntNo == Intrinsic::hexagon_V6_vgathermhwq ||
654 IntNo == Intrinsic::hexagon_V6_vgathermhwq_128B) {
655 SelectV65GatherPred(N);
656 return;
657 }
658
659 SelectCode(N);
660}
661
662void HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
663 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
664 unsigned Bits;
665 switch (IID) {
666 case Intrinsic::hexagon_S2_vsplatrb:
667 Bits = 8;
668 break;
669 case Intrinsic::hexagon_S2_vsplatrh:
670 Bits = 16;
671 break;
672 case Intrinsic::hexagon_V6_vaddcarry:
673 case Intrinsic::hexagon_V6_vaddcarry_128B:
674 case Intrinsic::hexagon_V6_vsubcarry:
675 case Intrinsic::hexagon_V6_vsubcarry_128B:
676 SelectHVXDualOutput(N);
677 return;
678 default:
679 SelectCode(N);
680 return;
681 }
682
683 SDValue V = N->getOperand(1);
684 SDValue U;
685 if (keepsLowBits(V, Bits, U)) {
686 SDValue R = CurDAG->getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
687 N->getOperand(0), U);
688 ReplaceNode(N, R.getNode());
689 SelectCode(R.getNode());
690 return;
691 }
692 SelectCode(N);
693}
694
695//
696// Map floating point constant values.
697//
698void HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
699 SDLoc dl(N);
700 auto *CN = cast<ConstantFPSDNode>(N);
701 APInt A = CN->getValueAPF().bitcastToAPInt();
702 if (N->getValueType(0) == MVT::f32) {
703 SDValue V = CurDAG->getTargetConstant(A.getZExtValue(), dl, MVT::i32);
704 ReplaceNode(N, CurDAG->getMachineNode(Hexagon::A2_tfrsi, dl, MVT::f32, V));
705 return;
706 }
707 if (N->getValueType(0) == MVT::f64) {
708 SDValue V = CurDAG->getTargetConstant(A.getZExtValue(), dl, MVT::i64);
709 ReplaceNode(N, CurDAG->getMachineNode(Hexagon::CONST64, dl, MVT::f64, V));
710 return;
711 }
712
713 SelectCode(N);
714}
715
716//
717// Map boolean values.
718//
719void HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
720 if (N->getValueType(0) == MVT::i1) {
721 assert(!(cast<ConstantSDNode>(N)->getZExtValue() >> 1))(static_cast <bool> (!(cast<ConstantSDNode>(N)->
getZExtValue() >> 1)) ? void (0) : __assert_fail ("!(cast<ConstantSDNode>(N)->getZExtValue() >> 1)"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 721, __extension__
__PRETTY_FUNCTION__))
;
722 unsigned Opc = (cast<ConstantSDNode>(N)->getSExtValue() != 0)
723 ? Hexagon::PS_true
724 : Hexagon::PS_false;
725 ReplaceNode(N, CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i1));
726 return;
727 }
728
729 SelectCode(N);
730}
731
732void HexagonDAGToDAGISel::SelectFrameIndex(SDNode *N) {
733 MachineFrameInfo &MFI = MF->getFrameInfo();
734 const HexagonFrameLowering *HFI = HST->getFrameLowering();
735 int FX = cast<FrameIndexSDNode>(N)->getIndex();
736 Align StkA = HFI->getStackAlign();
737 Align MaxA = MFI.getMaxAlign();
738 SDValue FI = CurDAG->getTargetFrameIndex(FX, MVT::i32);
739 SDLoc DL(N);
740 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
741 SDNode *R = nullptr;
742
743 // Use PS_fi when:
744 // - the object is fixed, or
745 // - there are no objects with higher-than-default alignment, or
746 // - there are no dynamically allocated objects.
747 // Otherwise, use PS_fia.
748 if (FX < 0 || MaxA <= StkA || !MFI.hasVarSizedObjects()) {
749 R = CurDAG->getMachineNode(Hexagon::PS_fi, DL, MVT::i32, FI, Zero);
750 } else {
751 auto &HMFI = *MF->getInfo<HexagonMachineFunctionInfo>();
752 unsigned AR = HMFI.getStackAlignBaseVReg();
753 SDValue CH = CurDAG->getEntryNode();
754 SDValue Ops[] = { CurDAG->getCopyFromReg(CH, DL, AR, MVT::i32), FI, Zero };
755 R = CurDAG->getMachineNode(Hexagon::PS_fia, DL, MVT::i32, Ops);
756 }
757
758 ReplaceNode(N, R);
759}
760
761void HexagonDAGToDAGISel::SelectAddSubCarry(SDNode *N) {
762 unsigned OpcCarry = N->getOpcode() == HexagonISD::ADDC ? Hexagon::A4_addp_c
763 : Hexagon::A4_subp_c;
764 SDNode *C = CurDAG->getMachineNode(OpcCarry, SDLoc(N), N->getVTList(),
765 { N->getOperand(0), N->getOperand(1),
766 N->getOperand(2) });
767 ReplaceNode(N, C);
768}
769
770void HexagonDAGToDAGISel::SelectVAlign(SDNode *N) {
771 MVT ResTy = N->getValueType(0).getSimpleVT();
772 if (HST->isHVXVectorType(ResTy, true))
773 return SelectHvxVAlign(N);
774
775 const SDLoc &dl(N);
776 unsigned VecLen = ResTy.getSizeInBits();
777 if (VecLen == 32) {
778 SDValue Ops[] = {
779 CurDAG->getTargetConstant(Hexagon::DoubleRegsRegClassID, dl, MVT::i32),
780 N->getOperand(0),
781 CurDAG->getTargetConstant(Hexagon::isub_hi, dl, MVT::i32),
782 N->getOperand(1),
783 CurDAG->getTargetConstant(Hexagon::isub_lo, dl, MVT::i32)
784 };
785 SDNode *R = CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl,
786 MVT::i64, Ops);
787
788 // Shift right by "(Addr & 0x3) * 8" bytes.
789 SDNode *C;
790 SDValue M0 = CurDAG->getTargetConstant(0x18, dl, MVT::i32);
791 SDValue M1 = CurDAG->getTargetConstant(0x03, dl, MVT::i32);
792 if (HST->useCompound()) {
793 C = CurDAG->getMachineNode(Hexagon::S4_andi_asl_ri, dl, MVT::i32,
794 M0, N->getOperand(2), M1);
795 } else {
796 SDNode *T = CurDAG->getMachineNode(Hexagon::S2_asl_i_r, dl, MVT::i32,
797 N->getOperand(2), M1);
798 C = CurDAG->getMachineNode(Hexagon::A2_andir, dl, MVT::i32,
799 SDValue(T, 0), M0);
800 }
801 SDNode *S = CurDAG->getMachineNode(Hexagon::S2_lsr_r_p, dl, MVT::i64,
802 SDValue(R, 0), SDValue(C, 0));
803 SDValue E = CurDAG->getTargetExtractSubreg(Hexagon::isub_lo, dl, ResTy,
804 SDValue(S, 0));
805 ReplaceNode(N, E.getNode());
806 } else {
807 assert(VecLen == 64)(static_cast <bool> (VecLen == 64) ? void (0) : __assert_fail
("VecLen == 64", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 807, __extension__ __PRETTY_FUNCTION__))
;
808 SDNode *Pu = CurDAG->getMachineNode(Hexagon::C2_tfrrp, dl, MVT::v8i1,
809 N->getOperand(2));
810 SDNode *VA = CurDAG->getMachineNode(Hexagon::S2_valignrb, dl, ResTy,
811 N->getOperand(0), N->getOperand(1),
812 SDValue(Pu,0));
813 ReplaceNode(N, VA);
814 }
815}
816
817void HexagonDAGToDAGISel::SelectVAlignAddr(SDNode *N) {
818 const SDLoc &dl(N);
819 SDValue A = N->getOperand(1);
820 int Mask = -cast<ConstantSDNode>(A.getNode())->getSExtValue();
821 assert(isPowerOf2_32(-Mask))(static_cast <bool> (isPowerOf2_32(-Mask)) ? void (0) :
__assert_fail ("isPowerOf2_32(-Mask)", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 821, __extension__ __PRETTY_FUNCTION__))
;
822
823 SDValue M = CurDAG->getTargetConstant(Mask, dl, MVT::i32);
824 SDNode *AA = CurDAG->getMachineNode(Hexagon::A2_andir, dl, MVT::i32,
825 N->getOperand(0), M);
826 ReplaceNode(N, AA);
827}
828
829// Handle these nodes here to avoid having to write patterns for all
830// combinations of input/output types. In all cases, the resulting
831// instruction is the same.
832void HexagonDAGToDAGISel::SelectTypecast(SDNode *N) {
833 SDValue Op = N->getOperand(0);
834 MVT OpTy = Op.getValueType().getSimpleVT();
835 SDNode *T = CurDAG->MorphNodeTo(N, N->getOpcode(),
836 CurDAG->getVTList(OpTy), {Op});
837 ReplaceNode(T, Op.getNode());
838}
839
840void HexagonDAGToDAGISel::SelectP2D(SDNode *N) {
841 MVT ResTy = N->getValueType(0).getSimpleVT();
842 SDNode *T = CurDAG->getMachineNode(Hexagon::C2_mask, SDLoc(N), ResTy,
843 N->getOperand(0));
844 ReplaceNode(N, T);
845}
846
847void HexagonDAGToDAGISel::SelectD2P(SDNode *N) {
848 const SDLoc &dl(N);
849 MVT ResTy = N->getValueType(0).getSimpleVT();
850 SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
851 SDNode *T = CurDAG->getMachineNode(Hexagon::A4_vcmpbgtui, dl, ResTy,
852 N->getOperand(0), Zero);
853 ReplaceNode(N, T);
854}
855
856void HexagonDAGToDAGISel::SelectV2Q(SDNode *N) {
857 const SDLoc &dl(N);
858 MVT ResTy = N->getValueType(0).getSimpleVT();
859 // The argument to V2Q should be a single vector.
860 MVT OpTy = N->getOperand(0).getValueType().getSimpleVT(); (void)OpTy;
861 assert(HST->getVectorLength() * 8 == OpTy.getSizeInBits())(static_cast <bool> (HST->getVectorLength() * 8 == OpTy
.getSizeInBits()) ? void (0) : __assert_fail ("HST->getVectorLength() * 8 == OpTy.getSizeInBits()"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 861, __extension__
__PRETTY_FUNCTION__))
;
862
863 SDValue C = CurDAG->getTargetConstant(-1, dl, MVT::i32);
864 SDNode *R = CurDAG->getMachineNode(Hexagon::A2_tfrsi, dl, MVT::i32, C);
865 SDNode *T = CurDAG->getMachineNode(Hexagon::V6_vandvrt, dl, ResTy,
866 N->getOperand(0), SDValue(R,0));
867 ReplaceNode(N, T);
868}
869
870void HexagonDAGToDAGISel::SelectQ2V(SDNode *N) {
871 const SDLoc &dl(N);
872 MVT ResTy = N->getValueType(0).getSimpleVT();
873 // The result of V2Q should be a single vector.
874 assert(HST->getVectorLength() * 8 == ResTy.getSizeInBits())(static_cast <bool> (HST->getVectorLength() * 8 == ResTy
.getSizeInBits()) ? void (0) : __assert_fail ("HST->getVectorLength() * 8 == ResTy.getSizeInBits()"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 874, __extension__
__PRETTY_FUNCTION__))
;
875
876 SDValue C = CurDAG->getTargetConstant(-1, dl, MVT::i32);
877 SDNode *R = CurDAG->getMachineNode(Hexagon::A2_tfrsi, dl, MVT::i32, C);
878 SDNode *T = CurDAG->getMachineNode(Hexagon::V6_vandqrt, dl, ResTy,
879 N->getOperand(0), SDValue(R,0));
880 ReplaceNode(N, T);
881}
882
883void HexagonDAGToDAGISel::Select(SDNode *N) {
884 if (N->isMachineOpcode())
885 return N->setNodeId(-1); // Already selected.
886
887 switch (N->getOpcode()) {
888 case ISD::Constant: return SelectConstant(N);
889 case ISD::ConstantFP: return SelectConstantFP(N);
890 case ISD::FrameIndex: return SelectFrameIndex(N);
891 case ISD::SHL: return SelectSHL(N);
892 case ISD::LOAD: return SelectLoad(N);
893 case ISD::STORE: return SelectStore(N);
894 case ISD::INTRINSIC_W_CHAIN: return SelectIntrinsicWChain(N);
895 case ISD::INTRINSIC_WO_CHAIN: return SelectIntrinsicWOChain(N);
896
897 case HexagonISD::ADDC:
898 case HexagonISD::SUBC: return SelectAddSubCarry(N);
899 case HexagonISD::VALIGN: return SelectVAlign(N);
900 case HexagonISD::VALIGNADDR: return SelectVAlignAddr(N);
901 case HexagonISD::TYPECAST: return SelectTypecast(N);
902 case HexagonISD::P2D: return SelectP2D(N);
903 case HexagonISD::D2P: return SelectD2P(N);
904 case HexagonISD::Q2V: return SelectQ2V(N);
905 case HexagonISD::V2Q: return SelectV2Q(N);
906 }
907
908 if (HST->useHVXOps()) {
909 switch (N->getOpcode()) {
910 case ISD::VECTOR_SHUFFLE: return SelectHvxShuffle(N);
911 case HexagonISD::VROR: return SelectHvxRor(N);
912 }
913 }
914
915 SelectCode(N);
916}
917
918bool HexagonDAGToDAGISel::
919SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
920 std::vector<SDValue> &OutOps) {
921 SDValue Inp = Op, Res;
922
923 switch (ConstraintID) {
924 default:
925 return true;
926 case InlineAsm::Constraint_o: // Offsetable.
927 case InlineAsm::Constraint_v: // Not offsetable.
928 case InlineAsm::Constraint_m: // Memory.
929 if (SelectAddrFI(Inp, Res))
930 OutOps.push_back(Res);
931 else
932 OutOps.push_back(Inp);
933 break;
934 }
935
936 OutOps.push_back(CurDAG->getTargetConstant(0, SDLoc(Op), MVT::i32));
937 return false;
938}
939
940
941static bool isMemOPCandidate(SDNode *I, SDNode *U) {
942 // I is an operand of U. Check if U is an arithmetic (binary) operation
943 // usable in a memop, where the other operand is a loaded value, and the
944 // result of U is stored in the same location.
945
946 if (!U->hasOneUse())
947 return false;
948 unsigned Opc = U->getOpcode();
949 switch (Opc) {
950 case ISD::ADD:
951 case ISD::SUB:
952 case ISD::AND:
953 case ISD::OR:
954 break;
955 default:
956 return false;
957 }
958
959 SDValue S0 = U->getOperand(0);
960 SDValue S1 = U->getOperand(1);
961 SDValue SY = (S0.getNode() == I) ? S1 : S0;
962
963 SDNode *UUse = *U->use_begin();
964 if (UUse->getNumValues() != 1)
965 return false;
966
967 // Check if one of the inputs to U is a load instruction and the output
968 // is used by a store instruction. If so and they also have the same
969 // base pointer, then don't preoprocess this node sequence as it
970 // can be matched to a memop.
971 SDNode *SYNode = SY.getNode();
972 if (UUse->getOpcode() == ISD::STORE && SYNode->getOpcode() == ISD::LOAD) {
973 SDValue LDBasePtr = cast<MemSDNode>(SYNode)->getBasePtr();
974 SDValue STBasePtr = cast<MemSDNode>(UUse)->getBasePtr();
975 if (LDBasePtr == STBasePtr)
976 return true;
977 }
978 return false;
979}
980
981
982// Transform: (or (select c x 0) z) -> (select c (or x z) z)
983// (or (select c 0 y) z) -> (select c z (or y z))
984void HexagonDAGToDAGISel::ppSimplifyOrSelect0(std::vector<SDNode*> &&Nodes) {
985 SelectionDAG &DAG = *CurDAG;
986
987 for (auto *I : Nodes) {
988 if (I->getOpcode() != ISD::OR)
989 continue;
990
991 auto IsZero = [] (const SDValue &V) -> bool {
992 if (ConstantSDNode *SC = dyn_cast<ConstantSDNode>(V.getNode()))
993 return SC->isZero();
994 return false;
995 };
996 auto IsSelect0 = [IsZero] (const SDValue &Op) -> bool {
997 if (Op.getOpcode() != ISD::SELECT)
998 return false;
999 return IsZero(Op.getOperand(1)) || IsZero(Op.getOperand(2));
1000 };
1001
1002 SDValue N0 = I->getOperand(0), N1 = I->getOperand(1);
1003 EVT VT = I->getValueType(0);
1004 bool SelN0 = IsSelect0(N0);
1005 SDValue SOp = SelN0 ? N0 : N1;
1006 SDValue VOp = SelN0 ? N1 : N0;
1007
1008 if (SOp.getOpcode() == ISD::SELECT && SOp.getNode()->hasOneUse()) {
1009 SDValue SC = SOp.getOperand(0);
1010 SDValue SX = SOp.getOperand(1);
1011 SDValue SY = SOp.getOperand(2);
1012 SDLoc DLS = SOp;
1013 if (IsZero(SY)) {
1014 SDValue NewOr = DAG.getNode(ISD::OR, DLS, VT, SX, VOp);
1015 SDValue NewSel = DAG.getNode(ISD::SELECT, DLS, VT, SC, NewOr, VOp);
1016 DAG.ReplaceAllUsesWith(I, NewSel.getNode());
1017 } else if (IsZero(SX)) {
1018 SDValue NewOr = DAG.getNode(ISD::OR, DLS, VT, SY, VOp);
1019 SDValue NewSel = DAG.getNode(ISD::SELECT, DLS, VT, SC, VOp, NewOr);
1020 DAG.ReplaceAllUsesWith(I, NewSel.getNode());
1021 }
1022 }
1023 }
1024}
1025
1026// Transform: (store ch val (add x (add (shl y c) e)))
1027// to: (store ch val (add x (shl (add y d) c))),
1028// where e = (shl d c) for some integer d.
1029// The purpose of this is to enable generation of loads/stores with
1030// shifted addressing mode, i.e. mem(x+y<<#c). For that, the shift
1031// value c must be 0, 1 or 2.
1032void HexagonDAGToDAGISel::ppAddrReorderAddShl(std::vector<SDNode*> &&Nodes) {
1033 SelectionDAG &DAG = *CurDAG;
1034
1035 for (auto *I : Nodes) {
1036 if (I->getOpcode() != ISD::STORE)
1037 continue;
1038
1039 // I matched: (store ch val Off)
1040 SDValue Off = I->getOperand(2);
1041 // Off needs to match: (add x (add (shl y c) (shl d c))))
1042 if (Off.getOpcode() != ISD::ADD)
1043 continue;
1044 // Off matched: (add x T0)
1045 SDValue T0 = Off.getOperand(1);
1046 // T0 needs to match: (add T1 T2):
1047 if (T0.getOpcode() != ISD::ADD)
1048 continue;
1049 // T0 matched: (add T1 T2)
1050 SDValue T1 = T0.getOperand(0);
1051 SDValue T2 = T0.getOperand(1);
1052 // T1 needs to match: (shl y c)
1053 if (T1.getOpcode() != ISD::SHL)
1054 continue;
1055 SDValue C = T1.getOperand(1);
1056 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(C.getNode());
1057 if (CN == nullptr)
1058 continue;
1059 unsigned CV = CN->getZExtValue();
1060 if (CV > 2)
1061 continue;
1062 // T2 needs to match e, where e = (shl d c) for some d.
1063 ConstantSDNode *EN = dyn_cast<ConstantSDNode>(T2.getNode());
1064 if (EN == nullptr)
1065 continue;
1066 unsigned EV = EN->getZExtValue();
1067 if (EV % (1 << CV) != 0)
1068 continue;
1069 unsigned DV = EV / (1 << CV);
1070
1071 // Replace T0 with: (shl (add y d) c)
1072 SDLoc DL = SDLoc(I);
1073 EVT VT = T0.getValueType();
1074 SDValue D = DAG.getConstant(DV, DL, VT);
1075 // NewAdd = (add y d)
1076 SDValue NewAdd = DAG.getNode(ISD::ADD, DL, VT, T1.getOperand(0), D);
1077 // NewShl = (shl NewAdd c)
1078 SDValue NewShl = DAG.getNode(ISD::SHL, DL, VT, NewAdd, C);
1079 ReplaceNode(T0.getNode(), NewShl.getNode());
1080 }
1081}
1082
1083// Transform: (load ch (add x (and (srl y c) Mask)))
1084// to: (load ch (add x (shl (srl y d) d-c)))
1085// where
1086// Mask = 00..0 111..1 0.0
1087// | | +-- d-c 0s, and d-c is 0, 1 or 2.
1088// | +-------- 1s
1089// +-------------- at most c 0s
1090// Motivating example:
1091// DAG combiner optimizes (add x (shl (srl y 5) 2))
1092// to (add x (and (srl y 3) 1FFFFFFC))
1093// which results in a constant-extended and(##...,lsr). This transformation
1094// undoes this simplification for cases where the shl can be folded into
1095// an addressing mode.
1096void HexagonDAGToDAGISel::ppAddrRewriteAndSrl(std::vector<SDNode*> &&Nodes) {
1097 SelectionDAG &DAG = *CurDAG;
1098
1099 for (SDNode *N : Nodes) {
1100 unsigned Opc = N->getOpcode();
1101 if (Opc != ISD::LOAD && Opc != ISD::STORE)
2
Assuming 'Opc' is equal to LOAD
1102 continue;
1103 SDValue Addr = Opc
2.1
'Opc' is equal to LOAD
2.1
'Opc' is equal to LOAD
== ISD::LOAD ? N->getOperand(1) : N->getOperand(2);
3
'?' condition is true
1104 // Addr must match: (add x T0)
1105 if (Addr.getOpcode() != ISD::ADD)
4
Assuming the condition is false
5
Taking false branch
1106 continue;
1107 SDValue T0 = Addr.getOperand(1);
1108 // T0 must match: (and T1 Mask)
1109 if (T0.getOpcode() != ISD::AND)
6
Assuming the condition is false
7
Taking false branch
1110 continue;
1111
1112 // We have an AND.
1113 //
1114 // Check the first operand. It must be: (srl y c).
1115 SDValue S = T0.getOperand(0);
1116 if (S.getOpcode() != ISD::SRL)
8
Assuming the condition is false
9
Taking false branch
1117 continue;
1118 ConstantSDNode *SN = dyn_cast<ConstantSDNode>(S.getOperand(1).getNode());
10
Assuming the object is a 'CastReturnType'
1119 if (SN == nullptr)
11
Taking false branch
1120 continue;
1121 if (SN->getAPIntValue().getBitWidth() != 32)
12
Assuming the condition is false
13
Taking false branch
1122 continue;
1123 uint32_t CV = SN->getZExtValue();
1124
1125 // Check the second operand: the supposed mask.
1126 ConstantSDNode *MN = dyn_cast<ConstantSDNode>(T0.getOperand(1).getNode());
14
Assuming the object is a 'CastReturnType'
1127 if (MN == nullptr)
15
Taking false branch
1128 continue;
1129 if (MN->getAPIntValue().getBitWidth() != 32)
16
Assuming the condition is false
17
Taking false branch
1130 continue;
1131 uint32_t Mask = MN->getZExtValue();
1132 // Examine the mask.
1133 uint32_t TZ = countTrailingZeros(Mask);
18
Calling 'countTrailingZeros<unsigned int>'
25
Returning from 'countTrailingZeros<unsigned int>'
26
'TZ' initialized to 32
1134 uint32_t M1 = countTrailingOnes(Mask >> TZ);
27
The result of the right shift is undefined due to shifting by '32', which is greater or equal to the width of type 'uint32_t'
1135 uint32_t LZ = countLeadingZeros(Mask);
1136 // Trailing zeros + middle ones + leading zeros must equal the width.
1137 if (TZ + M1 + LZ != 32)
1138 continue;
1139 // The number of trailing zeros will be encoded in the addressing mode.
1140 if (TZ > 2)
1141 continue;
1142 // The number of leading zeros must be at most c.
1143 if (LZ > CV)
1144 continue;
1145
1146 // All looks good.
1147 SDValue Y = S.getOperand(0);
1148 EVT VT = Addr.getValueType();
1149 SDLoc dl(S);
1150 // TZ = D-C, so D = TZ+C.
1151 SDValue D = DAG.getConstant(TZ+CV, dl, VT);
1152 SDValue DC = DAG.getConstant(TZ, dl, VT);
1153 SDValue NewSrl = DAG.getNode(ISD::SRL, dl, VT, Y, D);
1154 SDValue NewShl = DAG.getNode(ISD::SHL, dl, VT, NewSrl, DC);
1155 ReplaceNode(T0.getNode(), NewShl.getNode());
1156 }
1157}
1158
1159// Transform: (op ... (zext i1 c) ...) -> (select c (op ... 0 ...)
1160// (op ... 1 ...))
1161void HexagonDAGToDAGISel::ppHoistZextI1(std::vector<SDNode*> &&Nodes) {
1162 SelectionDAG &DAG = *CurDAG;
1163
1164 for (SDNode *N : Nodes) {
1165 unsigned Opc = N->getOpcode();
1166 if (Opc != ISD::ZERO_EXTEND)
1167 continue;
1168 SDValue OpI1 = N->getOperand(0);
1169 EVT OpVT = OpI1.getValueType();
1170 if (!OpVT.isSimple() || OpVT.getSimpleVT() != MVT::i1)
1171 continue;
1172 for (auto I = N->use_begin(), E = N->use_end(); I != E; ++I) {
1173 SDNode *U = *I;
1174 if (U->getNumValues() != 1)
1175 continue;
1176 EVT UVT = U->getValueType(0);
1177 if (!UVT.isSimple() || !UVT.isInteger() || UVT.getSimpleVT() == MVT::i1)
1178 continue;
1179 // Do not generate select for all i1 vector type.
1180 if (UVT.isVector() && UVT.getVectorElementType() == MVT::i1)
1181 continue;
1182 if (isMemOPCandidate(N, U))
1183 continue;
1184
1185 // Potentially simplifiable operation.
1186 unsigned I1N = I.getOperandNo();
1187 SmallVector<SDValue,2> Ops(U->getNumOperands());
1188 for (unsigned i = 0, n = U->getNumOperands(); i != n; ++i)
1189 Ops[i] = U->getOperand(i);
1190 EVT BVT = Ops[I1N].getValueType();
1191
1192 const SDLoc &dl(U);
1193 SDValue C0 = DAG.getConstant(0, dl, BVT);
1194 SDValue C1 = DAG.getConstant(1, dl, BVT);
1195 SDValue If0, If1;
1196
1197 if (isa<MachineSDNode>(U)) {
1198 unsigned UseOpc = U->getMachineOpcode();
1199 Ops[I1N] = C0;
1200 If0 = SDValue(DAG.getMachineNode(UseOpc, dl, UVT, Ops), 0);
1201 Ops[I1N] = C1;
1202 If1 = SDValue(DAG.getMachineNode(UseOpc, dl, UVT, Ops), 0);
1203 } else {
1204 unsigned UseOpc = U->getOpcode();
1205 Ops[I1N] = C0;
1206 If0 = DAG.getNode(UseOpc, dl, UVT, Ops);
1207 Ops[I1N] = C1;
1208 If1 = DAG.getNode(UseOpc, dl, UVT, Ops);
1209 }
1210 // We're generating a SELECT way after legalization, so keep the types
1211 // simple.
1212 unsigned UW = UVT.getSizeInBits();
1213 EVT SVT = (UW == 32 || UW == 64) ? MVT::getIntegerVT(UW) : UVT;
1214 SDValue Sel = DAG.getNode(ISD::SELECT, dl, SVT, OpI1,
1215 DAG.getBitcast(SVT, If1),
1216 DAG.getBitcast(SVT, If0));
1217 SDValue Ret = DAG.getBitcast(UVT, Sel);
1218 DAG.ReplaceAllUsesWith(U, Ret.getNode());
1219 }
1220 }
1221}
1222
1223void HexagonDAGToDAGISel::PreprocessISelDAG() {
1224 // Repack all nodes before calling each preprocessing function,
1225 // because each of them can modify the set of nodes.
1226 auto getNodes = [this] () -> std::vector<SDNode*> {
1227 std::vector<SDNode*> T;
1228 T.reserve(CurDAG->allnodes_size());
1229 for (SDNode &N : CurDAG->allnodes())
1230 T.push_back(&N);
1231 return T;
1232 };
1233
1234 // Transform: (or (select c x 0) z) -> (select c (or x z) z)
1235 // (or (select c 0 y) z) -> (select c z (or y z))
1236 ppSimplifyOrSelect0(getNodes());
1237
1238 // Transform: (store ch val (add x (add (shl y c) e)))
1239 // to: (store ch val (add x (shl (add y d) c))),
1240 // where e = (shl d c) for some integer d.
1241 // The purpose of this is to enable generation of loads/stores with
1242 // shifted addressing mode, i.e. mem(x+y<<#c). For that, the shift
1243 // value c must be 0, 1 or 2.
1244 ppAddrReorderAddShl(getNodes());
1245
1246 // Transform: (load ch (add x (and (srl y c) Mask)))
1247 // to: (load ch (add x (shl (srl y d) d-c)))
1248 // where
1249 // Mask = 00..0 111..1 0.0
1250 // | | +-- d-c 0s, and d-c is 0, 1 or 2.
1251 // | +-------- 1s
1252 // +-------------- at most c 0s
1253 // Motivating example:
1254 // DAG combiner optimizes (add x (shl (srl y 5) 2))
1255 // to (add x (and (srl y 3) 1FFFFFFC))
1256 // which results in a constant-extended and(##...,lsr). This transformation
1257 // undoes this simplification for cases where the shl can be folded into
1258 // an addressing mode.
1259 ppAddrRewriteAndSrl(getNodes());
1
Calling 'HexagonDAGToDAGISel::ppAddrRewriteAndSrl'
1260
1261 // Transform: (op ... (zext i1 c) ...) -> (select c (op ... 0 ...)
1262 // (op ... 1 ...))
1263 ppHoistZextI1(getNodes());
1264
1265 DEBUG_WITH_TYPE("isel", {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Preprocessed (Hexagon) selection DAG:"
; CurDAG->dump(); }; } } while (false)
1266 dbgs() << "Preprocessed (Hexagon) selection DAG:";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Preprocessed (Hexagon) selection DAG:"
; CurDAG->dump(); }; } } while (false)
1267 CurDAG->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Preprocessed (Hexagon) selection DAG:"
; CurDAG->dump(); }; } } while (false)
1268 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Preprocessed (Hexagon) selection DAG:"
; CurDAG->dump(); }; } } while (false)
;
1269
1270 if (EnableAddressRebalancing) {
1271 rebalanceAddressTrees();
1272
1273 DEBUG_WITH_TYPE("isel", {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Address tree balanced selection DAG:"
; CurDAG->dump(); }; } } while (false)
1274 dbgs() << "Address tree balanced selection DAG:";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Address tree balanced selection DAG:"
; CurDAG->dump(); }; } } while (false)
1275 CurDAG->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Address tree balanced selection DAG:"
; CurDAG->dump(); }; } } while (false)
1276 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Address tree balanced selection DAG:"
; CurDAG->dump(); }; } } while (false)
;
1277 }
1278}
1279
1280void HexagonDAGToDAGISel::emitFunctionEntryCode() {
1281 auto &HST = MF->getSubtarget<HexagonSubtarget>();
1282 auto &HFI = *HST.getFrameLowering();
1283 if (!HFI.needsAligna(*MF))
1284 return;
1285
1286 MachineFrameInfo &MFI = MF->getFrameInfo();
1287 MachineBasicBlock *EntryBB = &MF->front();
1288 Register AR = FuncInfo->CreateReg(MVT::i32);
1289 Align EntryMaxA = MFI.getMaxAlign();
1290 BuildMI(EntryBB, DebugLoc(), HII->get(Hexagon::PS_aligna), AR)
1291 .addImm(EntryMaxA.value());
1292 MF->getInfo<HexagonMachineFunctionInfo>()->setStackAlignBaseVReg(AR);
1293}
1294
1295void HexagonDAGToDAGISel::updateAligna() {
1296 auto &HFI = *MF->getSubtarget<HexagonSubtarget>().getFrameLowering();
1297 if (!HFI.needsAligna(*MF))
1298 return;
1299 auto *AlignaI = const_cast<MachineInstr*>(HFI.getAlignaInstr(*MF));
1300 assert(AlignaI != nullptr)(static_cast <bool> (AlignaI != nullptr) ? void (0) : __assert_fail
("AlignaI != nullptr", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1300, __extension__ __PRETTY_FUNCTION__))
;
1301 unsigned MaxA = MF->getFrameInfo().getMaxAlign().value();
1302 if (AlignaI->getOperand(1).getImm() < MaxA)
1303 AlignaI->getOperand(1).setImm(MaxA);
1304}
1305
1306// Match a frame index that can be used in an addressing mode.
1307bool HexagonDAGToDAGISel::SelectAddrFI(SDValue &N, SDValue &R) {
1308 if (N.getOpcode() != ISD::FrameIndex)
1309 return false;
1310 auto &HFI = *HST->getFrameLowering();
1311 MachineFrameInfo &MFI = MF->getFrameInfo();
1312 int FX = cast<FrameIndexSDNode>(N)->getIndex();
1313 if (!MFI.isFixedObjectIndex(FX) && HFI.needsAligna(*MF))
1314 return false;
1315 R = CurDAG->getTargetFrameIndex(FX, MVT::i32);
1316 return true;
1317}
1318
1319inline bool HexagonDAGToDAGISel::SelectAddrGA(SDValue &N, SDValue &R) {
1320 return SelectGlobalAddress(N, R, false, Align(1));
1321}
1322
1323inline bool HexagonDAGToDAGISel::SelectAddrGP(SDValue &N, SDValue &R) {
1324 return SelectGlobalAddress(N, R, true, Align(1));
1325}
1326
1327inline bool HexagonDAGToDAGISel::SelectAnyImm(SDValue &N, SDValue &R) {
1328 return SelectAnyImmediate(N, R, Align(1));
1329}
1330
1331inline bool HexagonDAGToDAGISel::SelectAnyImm0(SDValue &N, SDValue &R) {
1332 return SelectAnyImmediate(N, R, Align(1));
1333}
1334inline bool HexagonDAGToDAGISel::SelectAnyImm1(SDValue &N, SDValue &R) {
1335 return SelectAnyImmediate(N, R, Align(2));
1336}
1337inline bool HexagonDAGToDAGISel::SelectAnyImm2(SDValue &N, SDValue &R) {
1338 return SelectAnyImmediate(N, R, Align(4));
1339}
1340inline bool HexagonDAGToDAGISel::SelectAnyImm3(SDValue &N, SDValue &R) {
1341 return SelectAnyImmediate(N, R, Align(8));
1342}
1343
1344inline bool HexagonDAGToDAGISel::SelectAnyInt(SDValue &N, SDValue &R) {
1345 EVT T = N.getValueType();
1346 if (!T.isInteger() || T.getSizeInBits() != 32 || !isa<ConstantSDNode>(N))
1347 return false;
1348 int32_t V = cast<const ConstantSDNode>(N)->getZExtValue();
1349 R = CurDAG->getTargetConstant(V, SDLoc(N), N.getValueType());
1350 return true;
1351}
1352
1353bool HexagonDAGToDAGISel::SelectAnyImmediate(SDValue &N, SDValue &R,
1354 Align Alignment) {
1355 switch (N.getOpcode()) {
1356 case ISD::Constant: {
1357 if (N.getValueType() != MVT::i32)
1358 return false;
1359 int32_t V = cast<const ConstantSDNode>(N)->getZExtValue();
1360 if (!isAligned(Alignment, V))
1361 return false;
1362 R = CurDAG->getTargetConstant(V, SDLoc(N), N.getValueType());
1363 return true;
1364 }
1365 case HexagonISD::JT:
1366 case HexagonISD::CP:
1367 // These are assumed to always be aligned at least 8-byte boundary.
1368 if (Alignment > Align(8))
1369 return false;
1370 R = N.getOperand(0);
1371 return true;
1372 case ISD::ExternalSymbol:
1373 // Symbols may be aligned at any boundary.
1374 if (Alignment > Align(1))
1375 return false;
1376 R = N;
1377 return true;
1378 case ISD::BlockAddress:
1379 // Block address is always aligned at least 4-byte boundary.
1380 if (Alignment > Align(4) ||
1381 !isAligned(Alignment, cast<BlockAddressSDNode>(N)->getOffset()))
1382 return false;
1383 R = N;
1384 return true;
1385 }
1386
1387 if (SelectGlobalAddress(N, R, false, Alignment) ||
1388 SelectGlobalAddress(N, R, true, Alignment))
1389 return true;
1390
1391 return false;
1392}
1393
1394bool HexagonDAGToDAGISel::SelectGlobalAddress(SDValue &N, SDValue &R,
1395 bool UseGP, Align Alignment) {
1396 switch (N.getOpcode()) {
1397 case ISD::ADD: {
1398 SDValue N0 = N.getOperand(0);
1399 SDValue N1 = N.getOperand(1);
1400 unsigned GAOpc = N0.getOpcode();
1401 if (UseGP && GAOpc != HexagonISD::CONST32_GP)
1402 return false;
1403 if (!UseGP && GAOpc != HexagonISD::CONST32)
1404 return false;
1405 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N1)) {
1406 if (!isAligned(Alignment, Const->getZExtValue()))
1407 return false;
1408 SDValue Addr = N0.getOperand(0);
1409 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Addr)) {
1410 if (GA->getOpcode() == ISD::TargetGlobalAddress) {
1411 uint64_t NewOff = GA->getOffset() + (uint64_t)Const->getSExtValue();
1412 R = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(Const),
1413 N.getValueType(), NewOff);
1414 return true;
1415 }
1416 }
1417 }
1418 break;
1419 }
1420 case HexagonISD::CP:
1421 case HexagonISD::JT:
1422 case HexagonISD::CONST32:
1423 // The operand(0) of CONST32 is TargetGlobalAddress, which is what we
1424 // want in the instruction.
1425 if (!UseGP)
1426 R = N.getOperand(0);
1427 return !UseGP;
1428 case HexagonISD::CONST32_GP:
1429 if (UseGP)
1430 R = N.getOperand(0);
1431 return UseGP;
1432 default:
1433 return false;
1434 }
1435
1436 return false;
1437}
1438
1439bool HexagonDAGToDAGISel::DetectUseSxtw(SDValue &N, SDValue &R) {
1440 // This (complex pattern) function is meant to detect a sign-extension
1441 // i32->i64 on a per-operand basis. This would allow writing single
1442 // patterns that would cover a number of combinations of different ways
1443 // a sign-extensions could be written. For example:
1444 // (mul (DetectUseSxtw x) (DetectUseSxtw y)) -> (M2_dpmpyss_s0 x y)
1445 // could match either one of these:
1446 // (mul (sext x) (sext_inreg y))
1447 // (mul (sext-load *p) (sext_inreg y))
1448 // (mul (sext_inreg x) (sext y))
1449 // etc.
1450 //
1451 // The returned value will have type i64 and its low word will
1452 // contain the value being extended. The high bits are not specified.
1453 // The returned type is i64 because the original type of N was i64,
1454 // but the users of this function should only use the low-word of the
1455 // result, e.g.
1456 // (mul sxtw:x, sxtw:y) -> (M2_dpmpyss_s0 (LoReg sxtw:x), (LoReg sxtw:y))
1457
1458 if (N.getValueType() != MVT::i64)
1459 return false;
1460 unsigned Opc = N.getOpcode();
1461 switch (Opc) {
1462 case ISD::SIGN_EXTEND:
1463 case ISD::SIGN_EXTEND_INREG: {
1464 // sext_inreg has the source type as a separate operand.
1465 EVT T = Opc == ISD::SIGN_EXTEND
1466 ? N.getOperand(0).getValueType()
1467 : cast<VTSDNode>(N.getOperand(1))->getVT();
1468 unsigned SW = T.getSizeInBits();
1469 if (SW == 32)
1470 R = N.getOperand(0);
1471 else if (SW < 32)
1472 R = N;
1473 else
1474 return false;
1475 break;
1476 }
1477 case ISD::LOAD: {
1478 LoadSDNode *L = cast<LoadSDNode>(N);
1479 if (L->getExtensionType() != ISD::SEXTLOAD)
1480 return false;
1481 // All extending loads extend to i32, so even if the value in
1482 // memory is shorter than 32 bits, it will be i32 after the load.
1483 if (L->getMemoryVT().getSizeInBits() > 32)
1484 return false;
1485 R = N;
1486 break;
1487 }
1488 case ISD::SRA: {
1489 auto *S = dyn_cast<ConstantSDNode>(N.getOperand(1));
1490 if (!S || S->getZExtValue() != 32)
1491 return false;
1492 R = N;
1493 break;
1494 }
1495 default:
1496 return false;
1497 }
1498 EVT RT = R.getValueType();
1499 if (RT == MVT::i64)
1500 return true;
1501 assert(RT == MVT::i32)(static_cast <bool> (RT == MVT::i32) ? void (0) : __assert_fail
("RT == MVT::i32", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1501, __extension__ __PRETTY_FUNCTION__))
;
1502 // This is only to produce a value of type i64. Do not rely on the
1503 // high bits produced by this.
1504 const SDLoc &dl(N);
1505 SDValue Ops[] = {
1506 CurDAG->getTargetConstant(Hexagon::DoubleRegsRegClassID, dl, MVT::i32),
1507 R, CurDAG->getTargetConstant(Hexagon::isub_hi, dl, MVT::i32),
1508 R, CurDAG->getTargetConstant(Hexagon::isub_lo, dl, MVT::i32)
1509 };
1510 SDNode *T = CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl,
1511 MVT::i64, Ops);
1512 R = SDValue(T, 0);
1513 return true;
1514}
1515
1516bool HexagonDAGToDAGISel::keepsLowBits(const SDValue &Val, unsigned NumBits,
1517 SDValue &Src) {
1518 unsigned Opc = Val.getOpcode();
1519 switch (Opc) {
1520 case ISD::SIGN_EXTEND:
1521 case ISD::ZERO_EXTEND:
1522 case ISD::ANY_EXTEND: {
1523 const SDValue &Op0 = Val.getOperand(0);
1524 EVT T = Op0.getValueType();
1525 if (T.isInteger() && T.getSizeInBits() == NumBits) {
1526 Src = Op0;
1527 return true;
1528 }
1529 break;
1530 }
1531 case ISD::SIGN_EXTEND_INREG:
1532 case ISD::AssertSext:
1533 case ISD::AssertZext:
1534 if (Val.getOperand(0).getValueType().isInteger()) {
1535 VTSDNode *T = cast<VTSDNode>(Val.getOperand(1));
1536 if (T->getVT().getSizeInBits() == NumBits) {
1537 Src = Val.getOperand(0);
1538 return true;
1539 }
1540 }
1541 break;
1542 case ISD::AND: {
1543 // Check if this is an AND with NumBits of lower bits set to 1.
1544 uint64_t Mask = (1ULL << NumBits) - 1;
1545 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(0))) {
1546 if (C->getZExtValue() == Mask) {
1547 Src = Val.getOperand(1);
1548 return true;
1549 }
1550 }
1551 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(1))) {
1552 if (C->getZExtValue() == Mask) {
1553 Src = Val.getOperand(0);
1554 return true;
1555 }
1556 }
1557 break;
1558 }
1559 case ISD::OR:
1560 case ISD::XOR: {
1561 // OR/XOR with the lower NumBits bits set to 0.
1562 uint64_t Mask = (1ULL << NumBits) - 1;
1563 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(0))) {
1564 if ((C->getZExtValue() & Mask) == 0) {
1565 Src = Val.getOperand(1);
1566 return true;
1567 }
1568 }
1569 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(1))) {
1570 if ((C->getZExtValue() & Mask) == 0) {
1571 Src = Val.getOperand(0);
1572 return true;
1573 }
1574 }
1575 break;
1576 }
1577 default:
1578 break;
1579 }
1580 return false;
1581}
1582
1583bool HexagonDAGToDAGISel::isAlignedMemNode(const MemSDNode *N) const {
1584 return N->getAlign().value() >= N->getMemoryVT().getStoreSize();
1585}
1586
1587bool HexagonDAGToDAGISel::isSmallStackStore(const StoreSDNode *N) const {
1588 unsigned StackSize = MF->getFrameInfo().estimateStackSize(*MF);
1589 switch (N->getMemoryVT().getStoreSize()) {
1590 case 1:
1591 return StackSize <= 56; // 1*2^6 - 8
1592 case 2:
1593 return StackSize <= 120; // 2*2^6 - 8
1594 case 4:
1595 return StackSize <= 248; // 4*2^6 - 8
1596 default:
1597 return false;
1598 }
1599}
1600
1601// Return true when the given node fits in a positive half word.
1602bool HexagonDAGToDAGISel::isPositiveHalfWord(const SDNode *N) const {
1603 if (const ConstantSDNode *CN = dyn_cast<const ConstantSDNode>(N)) {
1604 int64_t V = CN->getSExtValue();
1605 return V > 0 && isInt<16>(V);
1606 }
1607 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG) {
1608 const VTSDNode *VN = dyn_cast<const VTSDNode>(N->getOperand(1));
1609 return VN->getVT().getSizeInBits() <= 16;
1610 }
1611 return false;
1612}
1613
1614bool HexagonDAGToDAGISel::hasOneUse(const SDNode *N) const {
1615 return !CheckSingleUse || N->hasOneUse();
1616}
1617
1618////////////////////////////////////////////////////////////////////////////////
1619// Rebalancing of address calculation trees
1620
1621static bool isOpcodeHandled(const SDNode *N) {
1622 switch (N->getOpcode()) {
1623 case ISD::ADD:
1624 case ISD::MUL:
1625 return true;
1626 case ISD::SHL:
1627 // We only handle constant shifts because these can be easily flattened
1628 // into multiplications by 2^Op1.
1629 return isa<ConstantSDNode>(N->getOperand(1).getNode());
1630 default:
1631 return false;
1632 }
1633}
1634
1635/// Return the weight of an SDNode
1636int HexagonDAGToDAGISel::getWeight(SDNode *N) {
1637 if (!isOpcodeHandled(N))
1638 return 1;
1639 assert(RootWeights.count(N) && "Cannot get weight of unseen root!")(static_cast <bool> (RootWeights.count(N) && "Cannot get weight of unseen root!"
) ? void (0) : __assert_fail ("RootWeights.count(N) && \"Cannot get weight of unseen root!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1639, __extension__
__PRETTY_FUNCTION__))
;
1640 assert(RootWeights[N] != -1 && "Cannot get weight of unvisited root!")(static_cast <bool> (RootWeights[N] != -1 && "Cannot get weight of unvisited root!"
) ? void (0) : __assert_fail ("RootWeights[N] != -1 && \"Cannot get weight of unvisited root!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1640, __extension__
__PRETTY_FUNCTION__))
;
1641 assert(RootWeights[N] != -2 && "Cannot get weight of RAWU'd root!")(static_cast <bool> (RootWeights[N] != -2 && "Cannot get weight of RAWU'd root!"
) ? void (0) : __assert_fail ("RootWeights[N] != -2 && \"Cannot get weight of RAWU'd root!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1641, __extension__
__PRETTY_FUNCTION__))
;
1642 return RootWeights[N];
1643}
1644
1645int HexagonDAGToDAGISel::getHeight(SDNode *N) {
1646 if (!isOpcodeHandled(N))
1647 return 0;
1648 assert(RootWeights.count(N) && RootWeights[N] >= 0 &&(static_cast <bool> (RootWeights.count(N) && RootWeights
[N] >= 0 && "Cannot query height of unvisited/RAUW'd node!"
) ? void (0) : __assert_fail ("RootWeights.count(N) && RootWeights[N] >= 0 && \"Cannot query height of unvisited/RAUW'd node!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1649, __extension__
__PRETTY_FUNCTION__))
1649 "Cannot query height of unvisited/RAUW'd node!")(static_cast <bool> (RootWeights.count(N) && RootWeights
[N] >= 0 && "Cannot query height of unvisited/RAUW'd node!"
) ? void (0) : __assert_fail ("RootWeights.count(N) && RootWeights[N] >= 0 && \"Cannot query height of unvisited/RAUW'd node!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1649, __extension__
__PRETTY_FUNCTION__))
;
1650 return RootHeights[N];
1651}
1652
1653namespace {
1654struct WeightedLeaf {
1655 SDValue Value;
1656 int Weight;
1657 int InsertionOrder;
1658
1659 WeightedLeaf() {}
1660
1661 WeightedLeaf(SDValue Value, int Weight, int InsertionOrder) :
1662 Value(Value), Weight(Weight), InsertionOrder(InsertionOrder) {
1663 assert(Weight >= 0 && "Weight must be >= 0")(static_cast <bool> (Weight >= 0 && "Weight must be >= 0"
) ? void (0) : __assert_fail ("Weight >= 0 && \"Weight must be >= 0\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1663, __extension__
__PRETTY_FUNCTION__))
;
1664 }
1665
1666 static bool Compare(const WeightedLeaf &A, const WeightedLeaf &B) {
1667 assert(A.Value.getNode() && B.Value.getNode())(static_cast <bool> (A.Value.getNode() && B.Value
.getNode()) ? void (0) : __assert_fail ("A.Value.getNode() && B.Value.getNode()"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1667, __extension__
__PRETTY_FUNCTION__))
;
1668 return A.Weight == B.Weight ?
1669 (A.InsertionOrder > B.InsertionOrder) :
1670 (A.Weight > B.Weight);
1671 }
1672};
1673
1674/// A specialized priority queue for WeigthedLeaves. It automatically folds
1675/// constants and allows removal of non-top elements while maintaining the
1676/// priority order.
1677class LeafPrioQueue {
1678 SmallVector<WeightedLeaf, 8> Q;
1679 bool HaveConst;
1680 WeightedLeaf ConstElt;
1681 unsigned Opcode;
1682
1683public:
1684 bool empty() {
1685 return (!HaveConst && Q.empty());
1686 }
1687
1688 size_t size() {
1689 return Q.size() + HaveConst;
1690 }
1691
1692 bool hasConst() {
1693 return HaveConst;
1694 }
1695
1696 const WeightedLeaf &top() {
1697 if (HaveConst)
1698 return ConstElt;
1699 return Q.front();
1700 }
1701
1702 WeightedLeaf pop() {
1703 if (HaveConst) {
1704 HaveConst = false;
1705 return ConstElt;
1706 }
1707 std::pop_heap(Q.begin(), Q.end(), WeightedLeaf::Compare);
1708 return Q.pop_back_val();
1709 }
1710
1711 void push(WeightedLeaf L, bool SeparateConst=true) {
1712 if (!HaveConst && SeparateConst && isa<ConstantSDNode>(L.Value)) {
1713 if (Opcode == ISD::MUL &&
1714 cast<ConstantSDNode>(L.Value)->getSExtValue() == 1)
1715 return;
1716 if (Opcode == ISD::ADD &&
1717 cast<ConstantSDNode>(L.Value)->getSExtValue() == 0)
1718 return;
1719
1720 HaveConst = true;
1721 ConstElt = L;
1722 } else {
1723 Q.push_back(L);
1724 std::push_heap(Q.begin(), Q.end(), WeightedLeaf::Compare);
1725 }
1726 }
1727
1728 /// Push L to the bottom of the queue regardless of its weight. If L is
1729 /// constant, it will not be folded with other constants in the queue.
1730 void pushToBottom(WeightedLeaf L) {
1731 L.Weight = 1000;
1732 push(L, false);
1733 }
1734
1735 /// Search for a SHL(x, [<=MaxAmount]) subtree in the queue, return the one of
1736 /// lowest weight and remove it from the queue.
1737 WeightedLeaf findSHL(uint64_t MaxAmount);
1738
1739 WeightedLeaf findMULbyConst();
1740
1741 LeafPrioQueue(unsigned Opcode) :
1742 HaveConst(false), Opcode(Opcode) { }
1743};
1744} // end anonymous namespace
1745
1746WeightedLeaf LeafPrioQueue::findSHL(uint64_t MaxAmount) {
1747 int ResultPos;
1748 WeightedLeaf Result;
1749
1750 for (int Pos = 0, End = Q.size(); Pos != End; ++Pos) {
1751 const WeightedLeaf &L = Q[Pos];
1752 const SDValue &Val = L.Value;
1753 if (Val.getOpcode() != ISD::SHL ||
1754 !isa<ConstantSDNode>(Val.getOperand(1)) ||
1755 Val.getConstantOperandVal(1) > MaxAmount)
1756 continue;
1757 if (!Result.Value.getNode() || Result.Weight > L.Weight ||
1758 (Result.Weight == L.Weight && Result.InsertionOrder > L.InsertionOrder))
1759 {
1760 Result = L;
1761 ResultPos = Pos;
1762 }
1763 }
1764
1765 if (Result.Value.getNode()) {
1766 Q.erase(&Q[ResultPos]);
1767 std::make_heap(Q.begin(), Q.end(), WeightedLeaf::Compare);
1768 }
1769
1770 return Result;
1771}
1772
1773WeightedLeaf LeafPrioQueue::findMULbyConst() {
1774 int ResultPos;
1775 WeightedLeaf Result;
1776
1777 for (int Pos = 0, End = Q.size(); Pos != End; ++Pos) {
1778 const WeightedLeaf &L = Q[Pos];
1779 const SDValue &Val = L.Value;
1780 if (Val.getOpcode() != ISD::MUL ||
1781 !isa<ConstantSDNode>(Val.getOperand(1)) ||
1782 Val.getConstantOperandVal(1) > 127)
1783 continue;
1784 if (!Result.Value.getNode() || Result.Weight > L.Weight ||
1785 (Result.Weight == L.Weight && Result.InsertionOrder > L.InsertionOrder))
1786 {
1787 Result = L;
1788 ResultPos = Pos;
1789 }
1790 }
1791
1792 if (Result.Value.getNode()) {
1793 Q.erase(&Q[ResultPos]);
1794 std::make_heap(Q.begin(), Q.end(), WeightedLeaf::Compare);
1795 }
1796
1797 return Result;
1798}
1799
1800SDValue HexagonDAGToDAGISel::getMultiplierForSHL(SDNode *N) {
1801 uint64_t MulFactor = 1ull << N->getConstantOperandVal(1);
1802 return CurDAG->getConstant(MulFactor, SDLoc(N),
1803 N->getOperand(1).getValueType());
1804}
1805
1806/// @returns the value x for which 2^x is a factor of Val
1807static unsigned getPowerOf2Factor(SDValue Val) {
1808 if (Val.getOpcode() == ISD::MUL) {
1809 unsigned MaxFactor = 0;
1810 for (int i = 0; i < 2; ++i) {
1811 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(i));
1812 if (!C)
1813 continue;
1814 const APInt &CInt = C->getAPIntValue();
1815 if (CInt.getBoolValue())
1816 MaxFactor = CInt.countTrailingZeros();
1817 }
1818 return MaxFactor;
1819 }
1820 if (Val.getOpcode() == ISD::SHL) {
1821 if (!isa<ConstantSDNode>(Val.getOperand(1).getNode()))
1822 return 0;
1823 return (unsigned) Val.getConstantOperandVal(1);
1824 }
1825
1826 return 0;
1827}
1828
1829/// @returns true if V>>Amount will eliminate V's operation on its child
1830static bool willShiftRightEliminate(SDValue V, unsigned Amount) {
1831 if (V.getOpcode() == ISD::MUL) {
1832 SDValue Ops[] = { V.getOperand(0), V.getOperand(1) };
1833 for (int i = 0; i < 2; ++i)
1834 if (isa<ConstantSDNode>(Ops[i].getNode()) &&
1835 V.getConstantOperandVal(i) % (1ULL << Amount) == 0) {
1836 uint64_t NewConst = V.getConstantOperandVal(i) >> Amount;
1837 return (NewConst == 1);
1838 }
1839 } else if (V.getOpcode() == ISD::SHL) {
1840 return (Amount == V.getConstantOperandVal(1));
1841 }
1842
1843 return false;
1844}
1845
1846SDValue HexagonDAGToDAGISel::factorOutPowerOf2(SDValue V, unsigned Power) {
1847 SDValue Ops[] = { V.getOperand(0), V.getOperand(1) };
1848 if (V.getOpcode() == ISD::MUL) {
1849 for (int i=0; i < 2; ++i) {
1850 if (isa<ConstantSDNode>(Ops[i].getNode()) &&
1851 V.getConstantOperandVal(i) % ((uint64_t)1 << Power) == 0) {
1852 uint64_t NewConst = V.getConstantOperandVal(i) >> Power;
1853 if (NewConst == 1)
1854 return Ops[!i];
1855 Ops[i] = CurDAG->getConstant(NewConst,
1856 SDLoc(V), V.getValueType());
1857 break;
1858 }
1859 }
1860 } else if (V.getOpcode() == ISD::SHL) {
1861 uint64_t ShiftAmount = V.getConstantOperandVal(1);
1862 if (ShiftAmount == Power)
1863 return Ops[0];
1864 Ops[1] = CurDAG->getConstant(ShiftAmount - Power,
1865 SDLoc(V), V.getValueType());
1866 }
1867
1868 return CurDAG->getNode(V.getOpcode(), SDLoc(V), V.getValueType(), Ops);
1869}
1870
1871static bool isTargetConstant(const SDValue &V) {
1872 return V.getOpcode() == HexagonISD::CONST32 ||
1873 V.getOpcode() == HexagonISD::CONST32_GP;
1874}
1875
1876unsigned HexagonDAGToDAGISel::getUsesInFunction(const Value *V) {
1877 if (GAUsesInFunction.count(V))
1878 return GAUsesInFunction[V];
1879
1880 unsigned Result = 0;
1881 const Function &CurF = CurDAG->getMachineFunction().getFunction();
1882 for (const User *U : V->users()) {
1883 if (isa<Instruction>(U) &&
1884 cast<Instruction>(U)->getParent()->getParent() == &CurF)
1885 ++Result;
1886 }
1887
1888 GAUsesInFunction[V] = Result;
1889
1890 return Result;
1891}
1892
1893/// Note - After calling this, N may be dead. It may have been replaced by a
1894/// new node, so always use the returned value in place of N.
1895///
1896/// @returns The SDValue taking the place of N (which could be N if it is
1897/// unchanged)
1898SDValue HexagonDAGToDAGISel::balanceSubTree(SDNode *N, bool TopLevel) {
1899 assert(RootWeights.count(N) && "Cannot balance non-root node.")(static_cast <bool> (RootWeights.count(N) && "Cannot balance non-root node."
) ? void (0) : __assert_fail ("RootWeights.count(N) && \"Cannot balance non-root node.\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1899, __extension__
__PRETTY_FUNCTION__))
;
1900 assert(RootWeights[N] != -2 && "This node was RAUW'd!")(static_cast <bool> (RootWeights[N] != -2 && "This node was RAUW'd!"
) ? void (0) : __assert_fail ("RootWeights[N] != -2 && \"This node was RAUW'd!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1900, __extension__
__PRETTY_FUNCTION__))
;
1901 assert(!TopLevel || N->getOpcode() == ISD::ADD)(static_cast <bool> (!TopLevel || N->getOpcode() == ISD
::ADD) ? void (0) : __assert_fail ("!TopLevel || N->getOpcode() == ISD::ADD"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1901, __extension__
__PRETTY_FUNCTION__))
;
1902
1903 // Return early if this node was already visited
1904 if (RootWeights[N] != -1)
1905 return SDValue(N, 0);
1906
1907 assert(isOpcodeHandled(N))(static_cast <bool> (isOpcodeHandled(N)) ? void (0) : __assert_fail
("isOpcodeHandled(N)", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1907, __extension__ __PRETTY_FUNCTION__))
;
1908
1909 SDValue Op0 = N->getOperand(0);
1910 SDValue Op1 = N->getOperand(1);
1911
1912 // Return early if the operands will remain unchanged or are all roots
1913 if ((!isOpcodeHandled(Op0.getNode()) || RootWeights.count(Op0.getNode())) &&
1914 (!isOpcodeHandled(Op1.getNode()) || RootWeights.count(Op1.getNode()))) {
1915 SDNode *Op0N = Op0.getNode();
1916 int Weight;
1917 if (isOpcodeHandled(Op0N) && RootWeights[Op0N] == -1) {
1918 Weight = getWeight(balanceSubTree(Op0N).getNode());
1919 // Weight = calculateWeight(Op0N);
1920 } else
1921 Weight = getWeight(Op0N);
1922
1923 SDNode *Op1N = N->getOperand(1).getNode(); // Op1 may have been RAUWd
1924 if (isOpcodeHandled(Op1N) && RootWeights[Op1N] == -1) {
1925 Weight += getWeight(balanceSubTree(Op1N).getNode());
1926 // Weight += calculateWeight(Op1N);
1927 } else
1928 Weight += getWeight(Op1N);
1929
1930 RootWeights[N] = Weight;
1931 RootHeights[N] = std::max(getHeight(N->getOperand(0).getNode()),
1932 getHeight(N->getOperand(1).getNode())) + 1;
1933
1934 LLVM_DEBUG(dbgs() << "--> No need to balance root (Weight=" << Weightdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> No need to balance root (Weight="
<< Weight << " Height=" << RootHeights[N] <<
"): "; } } while (false)
1935 << " Height=" << RootHeights[N] << "): ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> No need to balance root (Weight="
<< Weight << " Height=" << RootHeights[N] <<
"): "; } } while (false)
;
1936 LLVM_DEBUG(N->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { N->dump(CurDAG); } } while (false)
;
1937
1938 return SDValue(N, 0);
1939 }
1940
1941 LLVM_DEBUG(dbgs() << "** Balancing root node: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "** Balancing root node: "
; } } while (false)
;
1942 LLVM_DEBUG(N->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { N->dump(CurDAG); } } while (false)
;
1943
1944 unsigned NOpcode = N->getOpcode();
1945
1946 LeafPrioQueue Leaves(NOpcode);
1947 SmallVector<SDValue, 4> Worklist;
1948 Worklist.push_back(SDValue(N, 0));
1949
1950 // SHL nodes will be converted to MUL nodes
1951 if (NOpcode == ISD::SHL)
1952 NOpcode = ISD::MUL;
1953
1954 bool CanFactorize = false;
1955 WeightedLeaf Mul1, Mul2;
1956 unsigned MaxPowerOf2 = 0;
1957 WeightedLeaf GA;
1958
1959 // Do not try to factor out a shift if there is already a shift at the tip of
1960 // the tree.
1961 bool HaveTopLevelShift = false;
1962 if (TopLevel &&
1963 ((isOpcodeHandled(Op0.getNode()) && Op0.getOpcode() == ISD::SHL &&
1964 Op0.getConstantOperandVal(1) < 4) ||
1965 (isOpcodeHandled(Op1.getNode()) && Op1.getOpcode() == ISD::SHL &&
1966 Op1.getConstantOperandVal(1) < 4)))
1967 HaveTopLevelShift = true;
1968
1969 // Flatten the subtree into an ordered list of leaves; at the same time
1970 // determine whether the tree is already balanced.
1971 int InsertionOrder = 0;
1972 SmallDenseMap<SDValue, int> NodeHeights;
1973 bool Imbalanced = false;
1974 int CurrentWeight = 0;
1975 while (!Worklist.empty()) {
1976 SDValue Child = Worklist.pop_back_val();
1977
1978 if (Child.getNode() != N && RootWeights.count(Child.getNode())) {
1979 // CASE 1: Child is a root note
1980
1981 int Weight = RootWeights[Child.getNode()];
1982 if (Weight == -1) {
1983 Child = balanceSubTree(Child.getNode());
1984 // calculateWeight(Child.getNode());
1985 Weight = getWeight(Child.getNode());
1986 } else if (Weight == -2) {
1987 // Whoops, this node was RAUWd by one of the balanceSubTree calls we
1988 // made. Our worklist isn't up to date anymore.
1989 // Restart the whole process.
1990 LLVM_DEBUG(dbgs() << "--> Subtree was RAUWd. Restarting...\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Subtree was RAUWd. Restarting...\n"
; } } while (false)
;
1991 return balanceSubTree(N, TopLevel);
1992 }
1993
1994 NodeHeights[Child] = 1;
1995 CurrentWeight += Weight;
1996
1997 unsigned PowerOf2;
1998 if (TopLevel && !CanFactorize && !HaveTopLevelShift &&
1999 (Child.getOpcode() == ISD::MUL || Child.getOpcode() == ISD::SHL) &&
2000 Child.hasOneUse() && (PowerOf2 = getPowerOf2Factor(Child))) {
2001 // Try to identify two factorizable MUL/SHL children greedily. Leave
2002 // them out of the priority queue for now so we can deal with them
2003 // after.
2004 if (!Mul1.Value.getNode()) {
2005 Mul1 = WeightedLeaf(Child, Weight, InsertionOrder++);
2006 MaxPowerOf2 = PowerOf2;
2007 } else {
2008 Mul2 = WeightedLeaf(Child, Weight, InsertionOrder++);
2009 MaxPowerOf2 = std::min(MaxPowerOf2, PowerOf2);
2010
2011 // Our addressing modes can only shift by a maximum of 3
2012 if (MaxPowerOf2 > 3)
2013 MaxPowerOf2 = 3;
2014
2015 CanFactorize = true;
2016 }
2017 } else
2018 Leaves.push(WeightedLeaf(Child, Weight, InsertionOrder++));
2019 } else if (!isOpcodeHandled(Child.getNode())) {
2020 // CASE 2: Child is an unhandled kind of node (e.g. constant)
2021 int Weight = getWeight(Child.getNode());
2022
2023 NodeHeights[Child] = getHeight(Child.getNode());
2024 CurrentWeight += Weight;
2025
2026 if (isTargetConstant(Child) && !GA.Value.getNode())
2027 GA = WeightedLeaf(Child, Weight, InsertionOrder++);
2028 else
2029 Leaves.push(WeightedLeaf(Child, Weight, InsertionOrder++));
2030 } else {
2031 // CASE 3: Child is a subtree of same opcode
2032 // Visit children first, then flatten.
2033 unsigned ChildOpcode = Child.getOpcode();
2034 assert(ChildOpcode == NOpcode ||(static_cast <bool> (ChildOpcode == NOpcode || (NOpcode
== ISD::MUL && ChildOpcode == ISD::SHL)) ? void (0) :
__assert_fail ("ChildOpcode == NOpcode || (NOpcode == ISD::MUL && ChildOpcode == ISD::SHL)"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 2035, __extension__
__PRETTY_FUNCTION__))
2035 (NOpcode == ISD::MUL && ChildOpcode == ISD::SHL))(static_cast <bool> (ChildOpcode == NOpcode || (NOpcode
== ISD::MUL && ChildOpcode == ISD::SHL)) ? void (0) :
__assert_fail ("ChildOpcode == NOpcode || (NOpcode == ISD::MUL && ChildOpcode == ISD::SHL)"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 2035, __extension__
__PRETTY_FUNCTION__))
;
2036
2037 // Convert SHL to MUL
2038 SDValue Op1;
2039 if (ChildOpcode == ISD::SHL)
2040 Op1 = getMultiplierForSHL(Child.getNode());
2041 else
2042 Op1 = Child->getOperand(1);
2043
2044 if (!NodeHeights.count(Op1) || !NodeHeights.count(Child->getOperand(0))) {
2045 assert(!NodeHeights.count(Child) && "Parent visited before children?")(static_cast <bool> (!NodeHeights.count(Child) &&
"Parent visited before children?") ? void (0) : __assert_fail
("!NodeHeights.count(Child) && \"Parent visited before children?\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 2045, __extension__
__PRETTY_FUNCTION__))
;
2046 // Visit children first, then re-visit this node
2047 Worklist.push_back(Child);
2048 Worklist.push_back(Op1);
2049 Worklist.push_back(Child->getOperand(0));
2050 } else {
2051 // Back at this node after visiting the children
2052 if (std::abs(NodeHeights[Op1] - NodeHeights[Child->getOperand(0)]) > 1)
2053 Imbalanced = true;
2054
2055 NodeHeights[Child] = std::max(NodeHeights[Op1],
2056 NodeHeights[Child->getOperand(0)]) + 1;
2057 }
2058 }
2059 }
2060
2061 LLVM_DEBUG(dbgs() << "--> Current height=" << NodeHeights[SDValue(N, 0)]do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Current height=" <<
NodeHeights[SDValue(N, 0)] << " weight=" << CurrentWeight
<< " imbalanced=" << Imbalanced << "\n"; }
} while (false)
2062 << " weight=" << CurrentWeightdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Current height=" <<
NodeHeights[SDValue(N, 0)] << " weight=" << CurrentWeight
<< " imbalanced=" << Imbalanced << "\n"; }
} while (false)
2063 << " imbalanced=" << Imbalanced << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Current height=" <<
NodeHeights[SDValue(N, 0)] << " weight=" << CurrentWeight
<< " imbalanced=" << Imbalanced << "\n"; }
} while (false)
;
2064
2065 // Transform MUL(x, C * 2^Y) + SHL(z, Y) -> SHL(ADD(MUL(x, C), z), Y)
2066 // This factors out a shift in order to match memw(a<<Y+b).
2067 if (CanFactorize && (willShiftRightEliminate(Mul1.Value, MaxPowerOf2) ||
2068 willShiftRightEliminate(Mul2.Value, MaxPowerOf2))) {
2069 LLVM_DEBUG(dbgs() << "--> Found common factor for two MUL children!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Found common factor for two MUL children!\n"
; } } while (false)
;
2070 int Weight = Mul1.Weight + Mul2.Weight;
2071 int Height = std::max(NodeHeights[Mul1.Value], NodeHeights[Mul2.Value]) + 1;
2072 SDValue Mul1Factored = factorOutPowerOf2(Mul1.Value, MaxPowerOf2);
2073 SDValue Mul2Factored = factorOutPowerOf2(Mul2.Value, MaxPowerOf2);
2074 SDValue Sum = CurDAG->getNode(ISD::ADD, SDLoc(N), Mul1.Value.getValueType(),
2075 Mul1Factored, Mul2Factored);
2076 SDValue Const = CurDAG->getConstant(MaxPowerOf2, SDLoc(N),
2077 Mul1.Value.getValueType());
2078 SDValue New = CurDAG->getNode(ISD::SHL, SDLoc(N), Mul1.Value.getValueType(),
2079 Sum, Const);
2080 NodeHeights[New] = Height;
2081 Leaves.push(WeightedLeaf(New, Weight, Mul1.InsertionOrder));
2082 } else if (Mul1.Value.getNode()) {
2083 // We failed to factorize two MULs, so now the Muls are left outside the
2084 // queue... add them back.
2085 Leaves.push(Mul1);
2086 if (Mul2.Value.getNode())
2087 Leaves.push(Mul2);
2088 CanFactorize = false;
2089 }
2090
2091 // Combine GA + Constant -> GA+Offset, but only if GA is not used elsewhere
2092 // and the root node itself is not used more than twice. This reduces the
2093 // amount of additional constant extenders introduced by this optimization.
2094 bool CombinedGA = false;
2095 if (NOpcode == ISD::ADD && GA.Value.getNode() && Leaves.hasConst() &&
2096 GA.Value.hasOneUse() && N->use_size() < 3) {
2097 GlobalAddressSDNode *GANode =
2098 cast<GlobalAddressSDNode>(GA.Value.getOperand(0));
2099 ConstantSDNode *Offset = cast<ConstantSDNode>(Leaves.top().Value);
2100
2101 if (getUsesInFunction(GANode->getGlobal()) == 1 && Offset->hasOneUse() &&
2102 getTargetLowering()->isOffsetFoldingLegal(GANode)) {
2103 LLVM_DEBUG(dbgs() << "--> Combining GA and offset ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Combining GA and offset ("
<< Offset->getSExtValue() << "): "; } } while
(false)
2104 << Offset->getSExtValue() << "): ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Combining GA and offset ("
<< Offset->getSExtValue() << "): "; } } while
(false)
;
2105 LLVM_DEBUG(GANode->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { GANode->dump(CurDAG); } } while (false
)
;
2106
2107 SDValue NewTGA =
2108 CurDAG->getTargetGlobalAddress(GANode->getGlobal(), SDLoc(GA.Value),
2109 GANode->getValueType(0),
2110 GANode->getOffset() + (uint64_t)Offset->getSExtValue());
2111 GA.Value = CurDAG->getNode(GA.Value.getOpcode(), SDLoc(GA.Value),
2112 GA.Value.getValueType(), NewTGA);
2113 GA.Weight += Leaves.top().Weight;
2114
2115 NodeHeights[GA.Value] = getHeight(GA.Value.getNode());
2116 CombinedGA = true;
2117
2118 Leaves.pop(); // Remove the offset constant from the queue
2119 }
2120 }
2121
2122 if ((RebalanceOnlyForOptimizations && !CanFactorize && !CombinedGA) ||
2123 (RebalanceOnlyImbalancedTrees && !Imbalanced)) {
2124 RootWeights[N] = CurrentWeight;
2125 RootHeights[N] = NodeHeights[SDValue(N, 0)];
2126
2127 return SDValue(N, 0);
2128 }
2129
2130 // Combine GA + SHL(x, C<=31) so we will match Rx=add(#u8,asl(Rx,#U5))
2131 if (NOpcode == ISD::ADD && GA.Value.getNode()) {
2132 WeightedLeaf SHL = Leaves.findSHL(31);
2133 if (SHL.Value.getNode()) {
2134 int Height = std::max(NodeHeights[GA.Value], NodeHeights[SHL.Value]) + 1;
2135 GA.Value = CurDAG->getNode(ISD::ADD, SDLoc(GA.Value),
2136 GA.Value.getValueType(),
2137 GA.Value, SHL.Value);
2138 GA.Weight = SHL.Weight; // Specifically ignore the GA weight here
2139 NodeHeights[GA.Value] = Height;
2140 }
2141 }
2142
2143 if (GA.Value.getNode())
2144 Leaves.push(GA);
2145
2146 // If this is the top level and we haven't factored out a shift, we should try
2147 // to move a constant to the bottom to match addressing modes like memw(rX+C)
2148 if (TopLevel && !CanFactorize && Leaves.hasConst()) {
2149 LLVM_DEBUG(dbgs() << "--> Pushing constant to tip of tree.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Pushing constant to tip of tree."
; } } while (false)
;
2150 Leaves.pushToBottom(Leaves.pop());
2151 }
2152
2153 const DataLayout &DL = CurDAG->getDataLayout();
2154 const TargetLowering &TLI = *getTargetLowering();
2155
2156 // Rebuild the tree using Huffman's algorithm
2157 while (Leaves.size() > 1) {
2158 WeightedLeaf L0 = Leaves.pop();
2159
2160 // See whether we can grab a MUL to form an add(Rx,mpyi(Ry,#u6)),
2161 // otherwise just get the next leaf
2162 WeightedLeaf L1 = Leaves.findMULbyConst();
2163 if (!L1.Value.getNode())
2164 L1 = Leaves.pop();
2165
2166 assert(L0.Weight <= L1.Weight && "Priority queue is broken!")(static_cast <bool> (L0.Weight <= L1.Weight &&
"Priority queue is broken!") ? void (0) : __assert_fail ("L0.Weight <= L1.Weight && \"Priority queue is broken!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 2166, __extension__
__PRETTY_FUNCTION__))
;
2167
2168 SDValue V0 = L0.Value;
2169 int V0Weight = L0.Weight;
2170 SDValue V1 = L1.Value;
2171 int V1Weight = L1.Weight;
2172
2173 // Make sure that none of these nodes have been RAUW'd
2174 if ((RootWeights.count(V0.getNode()) && RootWeights[V0.getNode()] == -2) ||
2175 (RootWeights.count(V1.getNode()) && RootWeights[V1.getNode()] == -2)) {
2176 LLVM_DEBUG(dbgs() << "--> Subtree was RAUWd. Restarting...\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Subtree was RAUWd. Restarting...\n"
; } } while (false)
;
2177 return balanceSubTree(N, TopLevel);
2178 }
2179
2180 ConstantSDNode *V0C = dyn_cast<ConstantSDNode>(V0);
2181 ConstantSDNode *V1C = dyn_cast<ConstantSDNode>(V1);
2182 EVT VT = N->getValueType(0);
2183 SDValue NewNode;
2184
2185 if (V0C && !V1C) {
2186 std::swap(V0, V1);
2187 std::swap(V0C, V1C);
2188 }
2189
2190 // Calculate height of this node
2191 assert(NodeHeights.count(V0) && NodeHeights.count(V1) &&(static_cast <bool> (NodeHeights.count(V0) && NodeHeights
.count(V1) && "Children must have been visited before re-combining them!"
) ? void (0) : __assert_fail ("NodeHeights.count(V0) && NodeHeights.count(V1) && \"Children must have been visited before re-combining them!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 2192, __extension__
__PRETTY_FUNCTION__))
2192 "Children must have been visited before re-combining them!")(static_cast <bool> (NodeHeights.count(V0) && NodeHeights
.count(V1) && "Children must have been visited before re-combining them!"
) ? void (0) : __assert_fail ("NodeHeights.count(V0) && NodeHeights.count(V1) && \"Children must have been visited before re-combining them!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 2192, __extension__
__PRETTY_FUNCTION__))
;
2193 int Height = std::max(NodeHeights[V0], NodeHeights[V1]) + 1;
2194
2195 // Rebuild this node (and restore SHL from MUL if needed)
2196 if (V1C && NOpcode == ISD::MUL && V1C->getAPIntValue().isPowerOf2())
2197 NewNode = CurDAG->getNode(
2198 ISD::SHL, SDLoc(V0), VT, V0,
2199 CurDAG->getConstant(
2200 V1C->getAPIntValue().logBase2(), SDLoc(N),
2201 TLI.getScalarShiftAmountTy(DL, V0.getValueType())));
2202 else
2203 NewNode = CurDAG->getNode(NOpcode, SDLoc(N), VT, V0, V1);
2204
2205 NodeHeights[NewNode] = Height;
2206
2207 int Weight = V0Weight + V1Weight;
2208 Leaves.push(WeightedLeaf(NewNode, Weight, L0.InsertionOrder));
2209
2210 LLVM_DEBUG(dbgs() << "--> Built new node (Weight=" << Weightdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Built new node (Weight="
<< Weight << ",Height=" << Height <<
"):\n"; } } while (false)
2211 << ",Height=" << Height << "):\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Built new node (Weight="
<< Weight << ",Height=" << Height <<
"):\n"; } } while (false)
;
2212 LLVM_DEBUG(NewNode.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { NewNode.dump(); } } while (false)
;
2213 }
2214
2215 assert(Leaves.size() == 1)(static_cast <bool> (Leaves.size() == 1) ? void (0) : __assert_fail
("Leaves.size() == 1", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 2215, __extension__ __PRETTY_FUNCTION__))
;
2216 SDValue NewRoot = Leaves.top().Value;
2217
2218 assert(NodeHeights.count(NewRoot))(static_cast <bool> (NodeHeights.count(NewRoot)) ? void
(0) : __assert_fail ("NodeHeights.count(NewRoot)", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 2218, __extension__ __PRETTY_FUNCTION__))
;
2219 int Height = NodeHeights[NewRoot];
2220
2221 // Restore SHL if we earlier converted it to a MUL
2222 if (NewRoot.getOpcode() == ISD::MUL) {
2223 ConstantSDNode *V1C = dyn_cast<ConstantSDNode>(NewRoot.getOperand(1));
2224 if (V1C && V1C->getAPIntValue().isPowerOf2()) {
2225 EVT VT = NewRoot.getValueType();
2226 SDValue V0 = NewRoot.getOperand(0);
2227 NewRoot = CurDAG->getNode(
2228 ISD::SHL, SDLoc(NewRoot), VT, V0,
2229 CurDAG->getConstant(
2230 V1C->getAPIntValue().logBase2(), SDLoc(NewRoot),
2231 TLI.getScalarShiftAmountTy(DL, V0.getValueType())));
2232 }
2233 }
2234
2235 if (N != NewRoot.getNode()) {
2236 LLVM_DEBUG(dbgs() << "--> Root is now: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Root is now: "; }
} while (false)
;
2237 LLVM_DEBUG(NewRoot.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { NewRoot.dump(); } } while (false)
;
2238
2239 // Replace all uses of old root by new root
2240 CurDAG->ReplaceAllUsesWith(N, NewRoot.getNode());
2241 // Mark that we have RAUW'd N
2242 RootWeights[N] = -2;
2243 } else {
2244 LLVM_DEBUG(dbgs() << "--> Root unchanged.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Root unchanged.\n"
; } } while (false)
;
2245 }
2246
2247 RootWeights[NewRoot.getNode()] = Leaves.top().Weight;
2248 RootHeights[NewRoot.getNode()] = Height;
2249
2250 return NewRoot;
2251}
2252
2253void HexagonDAGToDAGISel::rebalanceAddressTrees() {
2254 for (SDNode &Node : llvm::make_early_inc_range(CurDAG->allnodes())) {
2255 SDNode *N = &Node;
2256 if (N->getOpcode() != ISD::LOAD && N->getOpcode() != ISD::STORE)
2257 continue;
2258
2259 SDValue BasePtr = cast<MemSDNode>(N)->getBasePtr();
2260 if (BasePtr.getOpcode() != ISD::ADD)
2261 continue;
2262
2263 // We've already processed this node
2264 if (RootWeights.count(BasePtr.getNode()))
2265 continue;
2266
2267 LLVM_DEBUG(dbgs() << "** Rebalancing address calculation in node: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "** Rebalancing address calculation in node: "
; } } while (false)
;
2268 LLVM_DEBUG(N->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { N->dump(CurDAG); } } while (false)
;
2269
2270 // FindRoots
2271 SmallVector<SDNode *, 4> Worklist;
2272
2273 Worklist.push_back(BasePtr.getOperand(0).getNode());
2274 Worklist.push_back(BasePtr.getOperand(1).getNode());
2275
2276 while (!Worklist.empty()) {
2277 SDNode *N = Worklist.pop_back_val();
2278 unsigned Opcode = N->getOpcode();
2279
2280 if (!isOpcodeHandled(N))
2281 continue;
2282
2283 Worklist.push_back(N->getOperand(0).getNode());
2284 Worklist.push_back(N->getOperand(1).getNode());
2285
2286 // Not a root if it has only one use and same opcode as its parent
2287 if (N->hasOneUse() && Opcode == N->use_begin()->getOpcode())
2288 continue;
2289
2290 // This root node has already been processed
2291 if (RootWeights.count(N))
2292 continue;
2293
2294 RootWeights[N] = -1;
2295 }
2296
2297 // Balance node itself
2298 RootWeights[BasePtr.getNode()] = -1;
2299 SDValue NewBasePtr = balanceSubTree(BasePtr.getNode(), /*TopLevel=*/ true);
2300
2301 if (N->getOpcode() == ISD::LOAD)
2302 N = CurDAG->UpdateNodeOperands(N, N->getOperand(0),
2303 NewBasePtr, N->getOperand(2));
2304 else
2305 N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), N->getOperand(1),
2306 NewBasePtr, N->getOperand(3));
2307
2308 LLVM_DEBUG(dbgs() << "--> Final node: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Final node: "; } }
while (false)
;
2309 LLVM_DEBUG(N->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { N->dump(CurDAG); } } while (false)
;
2310 }
2311
2312 CurDAG->RemoveDeadNodes();
2313 GAUsesInFunction.clear();
2314 RootHeights.clear();
2315 RootWeights.clear();
2316}

/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include/llvm/Support/MathExtras.h

1//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains some functions that are useful for math stuff.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_SUPPORT_MATHEXTRAS_H
14#define LLVM_SUPPORT_MATHEXTRAS_H
15
16#include "llvm/ADT/bit.h"
17#include "llvm/Support/Compiler.h"
18#include <cassert>
19#include <climits>
20#include <cstdint>
21#include <cstring>
22#include <limits>
23#include <type_traits>
24
25#ifdef _MSC_VER
26// Declare these intrinsics manually rather including intrin.h. It's very
27// expensive, and MathExtras.h is popular.
28// #include <intrin.h>
29extern "C" {
30unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
31unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
32unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
33unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
34}
35#endif
36
37namespace llvm {
38
39/// The behavior an operation has on an input of 0.
40enum ZeroBehavior {
41 /// The returned value is undefined.
42 ZB_Undefined,
43 /// The returned value is numeric_limits<T>::max()
44 ZB_Max,
45 /// The returned value is numeric_limits<T>::digits
46 ZB_Width
47};
48
49/// Mathematical constants.
50namespace numbers {
51// TODO: Track C++20 std::numbers.
52// TODO: Favor using the hexadecimal FP constants (requires C++17).
53constexpr double e = 2.7182818284590452354, // (0x1.5bf0a8b145749P+1) https://oeis.org/A001113
54 egamma = .57721566490153286061, // (0x1.2788cfc6fb619P-1) https://oeis.org/A001620
55 ln2 = .69314718055994530942, // (0x1.62e42fefa39efP-1) https://oeis.org/A002162
56 ln10 = 2.3025850929940456840, // (0x1.24bb1bbb55516P+1) https://oeis.org/A002392
57 log2e = 1.4426950408889634074, // (0x1.71547652b82feP+0)
58 log10e = .43429448190325182765, // (0x1.bcb7b1526e50eP-2)
59 pi = 3.1415926535897932385, // (0x1.921fb54442d18P+1) https://oeis.org/A000796
60 inv_pi = .31830988618379067154, // (0x1.45f306bc9c883P-2) https://oeis.org/A049541
61 sqrtpi = 1.7724538509055160273, // (0x1.c5bf891b4ef6bP+0) https://oeis.org/A002161
62 inv_sqrtpi = .56418958354775628695, // (0x1.20dd750429b6dP-1) https://oeis.org/A087197
63 sqrt2 = 1.4142135623730950488, // (0x1.6a09e667f3bcdP+0) https://oeis.org/A00219
64 inv_sqrt2 = .70710678118654752440, // (0x1.6a09e667f3bcdP-1)
65 sqrt3 = 1.7320508075688772935, // (0x1.bb67ae8584caaP+0) https://oeis.org/A002194
66 inv_sqrt3 = .57735026918962576451, // (0x1.279a74590331cP-1)
67 phi = 1.6180339887498948482; // (0x1.9e3779b97f4a8P+0) https://oeis.org/A001622
68constexpr float ef = 2.71828183F, // (0x1.5bf0a8P+1) https://oeis.org/A001113
69 egammaf = .577215665F, // (0x1.2788d0P-1) https://oeis.org/A001620
70 ln2f = .693147181F, // (0x1.62e430P-1) https://oeis.org/A002162
71 ln10f = 2.30258509F, // (0x1.26bb1cP+1) https://oeis.org/A002392
72 log2ef = 1.44269504F, // (0x1.715476P+0)
73 log10ef = .434294482F, // (0x1.bcb7b2P-2)
74 pif = 3.14159265F, // (0x1.921fb6P+1) https://oeis.org/A000796
75 inv_pif = .318309886F, // (0x1.45f306P-2) https://oeis.org/A049541
76 sqrtpif = 1.77245385F, // (0x1.c5bf8aP+0) https://oeis.org/A002161
77 inv_sqrtpif = .564189584F, // (0x1.20dd76P-1) https://oeis.org/A087197
78 sqrt2f = 1.41421356F, // (0x1.6a09e6P+0) https://oeis.org/A002193
79 inv_sqrt2f = .707106781F, // (0x1.6a09e6P-1)
80 sqrt3f = 1.73205081F, // (0x1.bb67aeP+0) https://oeis.org/A002194
81 inv_sqrt3f = .577350269F, // (0x1.279a74P-1)
82 phif = 1.61803399F; // (0x1.9e377aP+0) https://oeis.org/A001622
83} // namespace numbers
84
85namespace detail {
86template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter {
87 static unsigned count(T Val, ZeroBehavior) {
88 if (!Val)
89 return std::numeric_limits<T>::digits;
90 if (Val & 0x1)
91 return 0;
92
93 // Bisection method.
94 unsigned ZeroBits = 0;
95 T Shift = std::numeric_limits<T>::digits >> 1;
96 T Mask = std::numeric_limits<T>::max() >> Shift;
97 while (Shift) {
98 if ((Val & Mask) == 0) {
99 Val >>= Shift;
100 ZeroBits |= Shift;
101 }
102 Shift >>= 1;
103 Mask >>= Shift;
104 }
105 return ZeroBits;
106 }
107};
108
109#if defined(__GNUC__4) || defined(_MSC_VER)
110template <typename T> struct TrailingZerosCounter<T, 4> {
111 static unsigned count(T Val, ZeroBehavior ZB) {
112 if (ZB
19.1
'ZB' is not equal to ZB_Undefined
19.1
'ZB' is not equal to ZB_Undefined
!= ZB_Undefined && Val == 0)
20
Assuming 'Val' is equal to 0
21
Taking true branch
113 return 32;
22
Returning the value 32
114
115#if __has_builtin(__builtin_ctz)1 || defined(__GNUC__4)
116 return __builtin_ctz(Val);
117#elif defined(_MSC_VER)
118 unsigned long Index;
119 _BitScanForward(&Index, Val);
120 return Index;
121#endif
122 }
123};
124
125#if !defined(_MSC_VER) || defined(_M_X64)
126template <typename T> struct TrailingZerosCounter<T, 8> {
127 static unsigned count(T Val, ZeroBehavior ZB) {
128 if (ZB != ZB_Undefined && Val == 0)
129 return 64;
130
131#if __has_builtin(__builtin_ctzll)1 || defined(__GNUC__4)
132 return __builtin_ctzll(Val);
133#elif defined(_MSC_VER)
134 unsigned long Index;
135 _BitScanForward64(&Index, Val);
136 return Index;
137#endif
138 }
139};
140#endif
141#endif
142} // namespace detail
143
144/// Count number of 0's from the least significant bit to the most
145/// stopping at the first 1.
146///
147/// Only unsigned integral types are allowed.
148///
149/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
150/// valid arguments.
151template <typename T>
152unsigned countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
153 static_assert(std::is_unsigned_v<T>,
154 "Only unsigned integral types are allowed.");
155 return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB);
19
Calling 'TrailingZerosCounter::count'
23
Returning from 'TrailingZerosCounter::count'
24
Returning the value 32
156}
157
158namespace detail {
159template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter {
160 static unsigned count(T Val, ZeroBehavior) {
161 if (!Val)
162 return std::numeric_limits<T>::digits;
163
164 // Bisection method.
165 unsigned ZeroBits = 0;
166 for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
167 T Tmp = Val >> Shift;
168 if (Tmp)
169 Val = Tmp;
170 else
171 ZeroBits |= Shift;
172 }
173 return ZeroBits;
174 }
175};
176
177#if defined(__GNUC__4) || defined(_MSC_VER)
178template <typename T> struct LeadingZerosCounter<T, 4> {
179 static unsigned count(T Val, ZeroBehavior ZB) {
180 if (ZB != ZB_Undefined && Val == 0)
181 return 32;
182
183#if __has_builtin(__builtin_clz)1 || defined(__GNUC__4)
184 return __builtin_clz(Val);
185#elif defined(_MSC_VER)
186 unsigned long Index;
187 _BitScanReverse(&Index, Val);
188 return Index ^ 31;
189#endif
190 }
191};
192
193#if !defined(_MSC_VER) || defined(_M_X64)
194template <typename T> struct LeadingZerosCounter<T, 8> {
195 static unsigned count(T Val, ZeroBehavior ZB) {
196 if (ZB != ZB_Undefined && Val == 0)
197 return 64;
198
199#if __has_builtin(__builtin_clzll)1 || defined(__GNUC__4)
200 return __builtin_clzll(Val);
201#elif defined(_MSC_VER)
202 unsigned long Index;
203 _BitScanReverse64(&Index, Val);
204 return Index ^ 63;
205#endif
206 }
207};
208#endif
209#endif
210} // namespace detail
211
212/// Count number of 0's from the most significant bit to the least
213/// stopping at the first 1.
214///
215/// Only unsigned integral types are allowed.
216///
217/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
218/// valid arguments.
219template <typename T>
220unsigned countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
221 static_assert(std::is_unsigned_v<T>,
222 "Only unsigned integral types are allowed.");
223 return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB);
224}
225
226/// Get the index of the first set bit starting from the least
227/// significant bit.
228///
229/// Only unsigned integral types are allowed.
230///
231/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
232/// valid arguments.
233template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
234 if (ZB == ZB_Max && Val == 0)
235 return std::numeric_limits<T>::max();
236
237 return countTrailingZeros(Val, ZB_Undefined);
238}
239
240/// Create a bitmask with the N right-most bits set to 1, and all other
241/// bits set to 0. Only unsigned types are allowed.
242template <typename T> T maskTrailingOnes(unsigned N) {
243 static_assert(std::is_unsigned<T>::value, "Invalid type!");
244 const unsigned Bits = CHAR_BIT8 * sizeof(T);
245 assert(N <= Bits && "Invalid bit index")(static_cast <bool> (N <= Bits && "Invalid bit index"
) ? void (0) : __assert_fail ("N <= Bits && \"Invalid bit index\""
, "llvm/include/llvm/Support/MathExtras.h", 245, __extension__
__PRETTY_FUNCTION__))
;
246 return N == 0 ? 0 : (T(-1) >> (Bits - N));
247}
248
249/// Create a bitmask with the N left-most bits set to 1, and all other
250/// bits set to 0. Only unsigned types are allowed.
251template <typename T> T maskLeadingOnes(unsigned N) {
252 return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
253}
254
255/// Create a bitmask with the N right-most bits set to 0, and all other
256/// bits set to 1. Only unsigned types are allowed.
257template <typename T> T maskTrailingZeros(unsigned N) {
258 return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
259}
260
261/// Create a bitmask with the N left-most bits set to 0, and all other
262/// bits set to 1. Only unsigned types are allowed.
263template <typename T> T maskLeadingZeros(unsigned N) {
264 return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
265}
266
267/// Get the index of the last set bit starting from the least
268/// significant bit.
269///
270/// Only unsigned integral types are allowed.
271///
272/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
273/// valid arguments.
274template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
275 if (ZB == ZB_Max && Val == 0)
276 return std::numeric_limits<T>::max();
277
278 // Use ^ instead of - because both gcc and llvm can remove the associated ^
279 // in the __builtin_clz intrinsic on x86.
280 return countLeadingZeros(Val, ZB_Undefined) ^
281 (std::numeric_limits<T>::digits - 1);
282}
283
284/// Macro compressed bit reversal table for 256 bits.
285///
286/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
287static const unsigned char BitReverseTable256[256] = {
288#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
289#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
290#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
291 R6(0), R6(2), R6(1), R6(3)
292#undef R2
293#undef R4
294#undef R6
295};
296
297/// Reverse the bits in \p Val.
298template <typename T> T reverseBits(T Val) {
299#if __has_builtin(__builtin_bitreverse8)1
300 if constexpr (std::is_same_v<T, uint8_t>)
301 return __builtin_bitreverse8(Val);
302#endif
303#if __has_builtin(__builtin_bitreverse16)1
304 if constexpr (std::is_same_v<T, uint16_t>)
305 return __builtin_bitreverse16(Val);
306#endif
307#if __has_builtin(__builtin_bitreverse32)1
308 if constexpr (std::is_same_v<T, uint32_t>)
309 return __builtin_bitreverse32(Val);
310#endif
311#if __has_builtin(__builtin_bitreverse64)1
312 if constexpr (std::is_same_v<T, uint64_t>)
313 return __builtin_bitreverse64(Val);
314#endif
315
316 unsigned char in[sizeof(Val)];
317 unsigned char out[sizeof(Val)];
318 std::memcpy(in, &Val, sizeof(Val));
319 for (unsigned i = 0; i < sizeof(Val); ++i)
320 out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
321 std::memcpy(&Val, out, sizeof(Val));
322 return Val;
323}
324
325// NOTE: The following support functions use the _32/_64 extensions instead of
326// type overloading so that signed and unsigned integers can be used without
327// ambiguity.
328
329/// Return the high 32 bits of a 64 bit value.
330constexpr inline uint32_t Hi_32(uint64_t Value) {
331 return static_cast<uint32_t>(Value >> 32);
332}
333
334/// Return the low 32 bits of a 64 bit value.
335constexpr inline uint32_t Lo_32(uint64_t Value) {
336 return static_cast<uint32_t>(Value);
337}
338
339/// Make a 64-bit integer from a high / low pair of 32-bit integers.
340constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
341 return ((uint64_t)High << 32) | (uint64_t)Low;
342}
343
344/// Checks if an integer fits into the given bit width.
345template <unsigned N> constexpr inline bool isInt(int64_t x) {
346 if constexpr (N == 8)
347 return static_cast<int8_t>(x) == x;
348 if constexpr (N == 16)
349 return static_cast<int16_t>(x) == x;
350 if constexpr (N == 32)
351 return static_cast<int32_t>(x) == x;
352 if constexpr (N < 64)
353 return -(INT64_C(1)1L << (N - 1)) <= x && x < (INT64_C(1)1L << (N - 1));
354 (void)x; // MSVC v19.25 warns that x is unused.
355 return true;
356}
357
358/// Checks if a signed integer is an N bit number shifted left by S.
359template <unsigned N, unsigned S>
360constexpr inline bool isShiftedInt(int64_t x) {
361 static_assert(
362 N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
363 static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
364 return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
365}
366
367/// Checks if an unsigned integer fits into the given bit width.
368template <unsigned N> constexpr inline bool isUInt(uint64_t x) {
369 static_assert(N > 0, "isUInt<0> doesn't make sense");
370 if constexpr (N == 8)
371 return static_cast<uint8_t>(x) == x;
372 if constexpr (N == 16)
373 return static_cast<uint16_t>(x) == x;
374 if constexpr (N == 32)
375 return static_cast<uint32_t>(x) == x;
376 if constexpr (N < 64)
377 return x < (UINT64_C(1)1UL << (N));
378 (void)x; // MSVC v19.25 warns that x is unused.
379 return true;
380}
381
382/// Checks if a unsigned integer is an N bit number shifted left by S.
383template <unsigned N, unsigned S>
384constexpr inline bool isShiftedUInt(uint64_t x) {
385 static_assert(
386 N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
387 static_assert(N + S <= 64,
388 "isShiftedUInt<N, S> with N + S > 64 is too wide.");
389 // Per the two static_asserts above, S must be strictly less than 64. So
390 // 1 << S is not undefined behavior.
391 return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
392}
393
394/// Gets the maximum value for a N-bit unsigned integer.
395inline uint64_t maxUIntN(uint64_t N) {
396 assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 &&
"integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "llvm/include/llvm/Support/MathExtras.h", 396, __extension__
__PRETTY_FUNCTION__))
;
397
398 // uint64_t(1) << 64 is undefined behavior, so we can't do
399 // (uint64_t(1) << N) - 1
400 // without checking first that N != 64. But this works and doesn't have a
401 // branch.
402 return UINT64_MAX(18446744073709551615UL) >> (64 - N);
403}
404
405/// Gets the minimum value for a N-bit signed integer.
406inline int64_t minIntN(int64_t N) {
407 assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 &&
"integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "llvm/include/llvm/Support/MathExtras.h", 407, __extension__
__PRETTY_FUNCTION__))
;
408
409 return UINT64_C(1)1UL + ~(UINT64_C(1)1UL << (N - 1));
410}
411
412/// Gets the maximum value for a N-bit signed integer.
413inline int64_t maxIntN(int64_t N) {
414 assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 &&
"integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "llvm/include/llvm/Support/MathExtras.h", 414, __extension__
__PRETTY_FUNCTION__))
;
415
416 // This relies on two's complement wraparound when N == 64, so we convert to
417 // int64_t only at the very end to avoid UB.
418 return (UINT64_C(1)1UL << (N - 1)) - 1;
419}
420
421/// Checks if an unsigned integer fits into the given (dynamic) bit width.
422inline bool isUIntN(unsigned N, uint64_t x) {
423 return N >= 64 || x <= maxUIntN(N);
424}
425
426/// Checks if an signed integer fits into the given (dynamic) bit width.
427inline bool isIntN(unsigned N, int64_t x) {
428 return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
429}
430
431/// Return true if the argument is a non-empty sequence of ones starting at the
432/// least significant bit with the remainder zero (32 bit version).
433/// Ex. isMask_32(0x0000FFFFU) == true.
434constexpr inline bool isMask_32(uint32_t Value) {
435 return Value && ((Value + 1) & Value) == 0;
436}
437
438/// Return true if the argument is a non-empty sequence of ones starting at the
439/// least significant bit with the remainder zero (64 bit version).
440constexpr inline bool isMask_64(uint64_t Value) {
441 return Value && ((Value + 1) & Value) == 0;
442}
443
444/// Return true if the argument contains a non-empty sequence of ones with the
445/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
446constexpr inline bool isShiftedMask_32(uint32_t Value) {
447 return Value && isMask_32((Value - 1) | Value);
448}
449
450/// Return true if the argument contains a non-empty sequence of ones with the
451/// remainder zero (64 bit version.)
452constexpr inline bool isShiftedMask_64(uint64_t Value) {
453 return Value && isMask_64((Value - 1) | Value);
454}
455
456/// Return true if the argument is a power of two > 0.
457/// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
458constexpr inline bool isPowerOf2_32(uint32_t Value) {
459 return llvm::has_single_bit(Value);
460}
461
462/// Return true if the argument is a power of two > 0 (64 bit edition.)
463constexpr inline bool isPowerOf2_64(uint64_t Value) {
464 return llvm::has_single_bit(Value);
465}
466
467/// Count the number of ones from the most significant bit to the first
468/// zero bit.
469///
470/// Ex. countLeadingOnes(0xFF0FFF00) == 8.
471/// Only unsigned integral types are allowed.
472///
473/// \param ZB the behavior on an input of all ones. Only ZB_Width and
474/// ZB_Undefined are valid arguments.
475template <typename T>
476unsigned countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
477 static_assert(std::is_unsigned_v<T>,
478 "Only unsigned integral types are allowed.");
479 return countLeadingZeros<T>(~Value, ZB);
480}
481
482/// Count the number of ones from the least significant bit to the first
483/// zero bit.
484///
485/// Ex. countTrailingOnes(0x00FF00FF) == 8.
486/// Only unsigned integral types are allowed.
487///
488/// \param ZB the behavior on an input of all ones. Only ZB_Width and
489/// ZB_Undefined are valid arguments.
490template <typename T>
491unsigned countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
492 static_assert(std::is_unsigned_v<T>,
493 "Only unsigned integral types are allowed.");
494 return countTrailingZeros<T>(~Value, ZB);
495}
496
497/// Count the number of set bits in a value.
498/// Ex. countPopulation(0xF000F000) = 8
499/// Returns 0 if the word is zero.
500template <typename T>
501inline unsigned countPopulation(T Value) {
502 static_assert(std::is_unsigned_v<T>,
503 "Only unsigned integral types are allowed.");
504 return (unsigned)llvm::popcount(Value);
505}
506
507/// Return true if the argument contains a non-empty sequence of ones with the
508/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
509/// If true, \p MaskIdx will specify the index of the lowest set bit and \p
510/// MaskLen is updated to specify the length of the mask, else neither are
511/// updated.
512inline bool isShiftedMask_32(uint32_t Value, unsigned &MaskIdx,
513 unsigned &MaskLen) {
514 if (!isShiftedMask_32(Value))
515 return false;
516 MaskIdx = countTrailingZeros(Value);
517 MaskLen = countPopulation(Value);
518 return true;
519}
520
521/// Return true if the argument contains a non-empty sequence of ones with the
522/// remainder zero (64 bit version.) If true, \p MaskIdx will specify the index
523/// of the lowest set bit and \p MaskLen is updated to specify the length of the
524/// mask, else neither are updated.
525inline bool isShiftedMask_64(uint64_t Value, unsigned &MaskIdx,
526 unsigned &MaskLen) {
527 if (!isShiftedMask_64(Value))
528 return false;
529 MaskIdx = countTrailingZeros(Value);
530 MaskLen = countPopulation(Value);
531 return true;
532}
533
534/// Compile time Log2.
535/// Valid only for positive powers of two.
536template <size_t kValue> constexpr inline size_t CTLog2() {
537 static_assert(kValue > 0 && llvm::isPowerOf2_64(kValue),
538 "Value is not a valid power of 2");
539 return 1 + CTLog2<kValue / 2>();
540}
541
542template <> constexpr inline size_t CTLog2<1>() { return 0; }
543
544/// Return the floor log base 2 of the specified value, -1 if the value is zero.
545/// (32 bit edition.)
546/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
547inline unsigned Log2_32(uint32_t Value) {
548 return 31 - countLeadingZeros(Value);
549}
550
551/// Return the floor log base 2 of the specified value, -1 if the value is zero.
552/// (64 bit edition.)
553inline unsigned Log2_64(uint64_t Value) {
554 return 63 - countLeadingZeros(Value);
555}
556
557/// Return the ceil log base 2 of the specified value, 32 if the value is zero.
558/// (32 bit edition).
559/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
560inline unsigned Log2_32_Ceil(uint32_t Value) {
561 return 32 - countLeadingZeros(Value - 1);
562}
563
564/// Return the ceil log base 2 of the specified value, 64 if the value is zero.
565/// (64 bit edition.)
566inline unsigned Log2_64_Ceil(uint64_t Value) {
567 return 64 - countLeadingZeros(Value - 1);
568}
569
570/// This function takes a 64-bit integer and returns the bit equivalent double.
571inline double BitsToDouble(uint64_t Bits) {
572 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
573 return llvm::bit_cast<double>(Bits);
574}
575
576/// This function takes a 32-bit integer and returns the bit equivalent float.
577inline float BitsToFloat(uint32_t Bits) {
578 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
579 return llvm::bit_cast<float>(Bits);
580}
581
582/// This function takes a double and returns the bit equivalent 64-bit integer.
583/// Note that copying doubles around changes the bits of NaNs on some hosts,
584/// notably x86, so this routine cannot be used if these bits are needed.
585inline uint64_t DoubleToBits(double Double) {
586 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
587 return llvm::bit_cast<uint64_t>(Double);
588}
589
590/// This function takes a float and returns the bit equivalent 32-bit integer.
591/// Note that copying floats around changes the bits of NaNs on some hosts,
592/// notably x86, so this routine cannot be used if these bits are needed.
593inline uint32_t FloatToBits(float Float) {
594 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
595 return llvm::bit_cast<uint32_t>(Float);
596}
597
598/// A and B are either alignments or offsets. Return the minimum alignment that
599/// may be assumed after adding the two together.
600constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
601 // The largest power of 2 that divides both A and B.
602 //
603 // Replace "-Value" by "1+~Value" in the following commented code to avoid
604 // MSVC warning C4146
605 // return (A | B) & -(A | B);
606 return (A | B) & (1 + ~(A | B));
607}
608
609/// Returns the next power of two (in 64-bits) that is strictly greater than A.
610/// Returns zero on overflow.
611constexpr inline uint64_t NextPowerOf2(uint64_t A) {
612 A |= (A >> 1);
613 A |= (A >> 2);
614 A |= (A >> 4);
615 A |= (A >> 8);
616 A |= (A >> 16);
617 A |= (A >> 32);
618 return A + 1;
619}
620
621/// Returns the power of two which is less than or equal to the given value.
622/// Essentially, it is a floor operation across the domain of powers of two.
623inline uint64_t PowerOf2Floor(uint64_t A) {
624 if (!A) return 0;
625 return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
626}
627
628/// Returns the power of two which is greater than or equal to the given value.
629/// Essentially, it is a ceil operation across the domain of powers of two.
630inline uint64_t PowerOf2Ceil(uint64_t A) {
631 if (!A)
632 return 0;
633 return NextPowerOf2(A - 1);
634}
635
636/// Returns the next integer (mod 2**64) that is greater than or equal to
637/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
638///
639/// Examples:
640/// \code
641/// alignTo(5, 8) = 8
642/// alignTo(17, 8) = 24
643/// alignTo(~0LL, 8) = 0
644/// alignTo(321, 255) = 510
645/// \endcode
646inline uint64_t alignTo(uint64_t Value, uint64_t Align) {
647 assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0."
) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 647, __extension__
__PRETTY_FUNCTION__))
;
648 return (Value + Align - 1) / Align * Align;
649}
650
651inline uint64_t alignToPowerOf2(uint64_t Value, uint64_t Align) {
652 assert(Align != 0 && (Align & (Align - 1)) == 0 &&(static_cast <bool> (Align != 0 && (Align &
(Align - 1)) == 0 && "Align must be a power of 2") ?
void (0) : __assert_fail ("Align != 0 && (Align & (Align - 1)) == 0 && \"Align must be a power of 2\""
, "llvm/include/llvm/Support/MathExtras.h", 653, __extension__
__PRETTY_FUNCTION__))
653 "Align must be a power of 2")(static_cast <bool> (Align != 0 && (Align &
(Align - 1)) == 0 && "Align must be a power of 2") ?
void (0) : __assert_fail ("Align != 0 && (Align & (Align - 1)) == 0 && \"Align must be a power of 2\""
, "llvm/include/llvm/Support/MathExtras.h", 653, __extension__
__PRETTY_FUNCTION__))
;
654 return (Value + Align - 1) & -Align;
655}
656
657/// If non-zero \p Skew is specified, the return value will be a minimal integer
658/// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for
659/// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p
660/// Skew mod \p A'. \p Align must be non-zero.
661///
662/// Examples:
663/// \code
664/// alignTo(5, 8, 7) = 7
665/// alignTo(17, 8, 1) = 17
666/// alignTo(~0LL, 8, 3) = 3
667/// alignTo(321, 255, 42) = 552
668/// \endcode
669inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew) {
670 assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0."
) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 670, __extension__
__PRETTY_FUNCTION__))
;
671 Skew %= Align;
672 return alignTo(Value - Skew, Align) + Skew;
673}
674
675/// Returns the next integer (mod 2**64) that is greater than or equal to
676/// \p Value and is a multiple of \c Align. \c Align must be non-zero.
677template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) {
678 static_assert(Align != 0u, "Align must be non-zero");
679 return (Value + Align - 1) / Align * Align;
680}
681
682/// Returns the integer ceil(Numerator / Denominator).
683inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) {
684 return alignTo(Numerator, Denominator) / Denominator;
685}
686
687/// Returns the integer nearest(Numerator / Denominator).
688inline uint64_t divideNearest(uint64_t Numerator, uint64_t Denominator) {
689 return (Numerator + (Denominator / 2)) / Denominator;
690}
691
692/// Returns the largest uint64_t less than or equal to \p Value and is
693/// \p Skew mod \p Align. \p Align must be non-zero
694inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
695 assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0."
) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 695, __extension__
__PRETTY_FUNCTION__))
;
696 Skew %= Align;
697 return (Value - Skew) / Align * Align + Skew;
698}
699
700/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
701/// Requires 0 < B <= 32.
702template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) {
703 static_assert(B > 0, "Bit width can't be 0.");
704 static_assert(B <= 32, "Bit width out of range.");
705 return int32_t(X << (32 - B)) >> (32 - B);
706}
707
708/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
709/// Requires 0 < B <= 32.
710inline int32_t SignExtend32(uint32_t X, unsigned B) {
711 assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0."
) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 711, __extension__
__PRETTY_FUNCTION__))
;
712 assert(B <= 32 && "Bit width out of range.")(static_cast <bool> (B <= 32 && "Bit width out of range."
) ? void (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\""
, "llvm/include/llvm/Support/MathExtras.h", 712, __extension__
__PRETTY_FUNCTION__))
;
713 return int32_t(X << (32 - B)) >> (32 - B);
714}
715
716/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
717/// Requires 0 < B <= 64.
718template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) {
719 static_assert(B > 0, "Bit width can't be 0.");
720 static_assert(B <= 64, "Bit width out of range.");
721 return int64_t(x << (64 - B)) >> (64 - B);
722}
723
724/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
725/// Requires 0 < B <= 64.
726inline int64_t SignExtend64(uint64_t X, unsigned B) {
727 assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0."
) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 727, __extension__
__PRETTY_FUNCTION__))
;
728 assert(B <= 64 && "Bit width out of range.")(static_cast <bool> (B <= 64 && "Bit width out of range."
) ? void (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\""
, "llvm/include/llvm/Support/MathExtras.h", 728, __extension__
__PRETTY_FUNCTION__))
;
729 return int64_t(X << (64 - B)) >> (64 - B);
730}
731
732/// Subtract two unsigned integers, X and Y, of type T and return the absolute
733/// value of the result.
734template <typename T>
735std::enable_if_t<std::is_unsigned<T>::value, T> AbsoluteDifference(T X, T Y) {
736 return X > Y ? (X - Y) : (Y - X);
737}
738
739/// Add two unsigned integers, X and Y, of type T. Clamp the result to the
740/// maximum representable value of T on overflow. ResultOverflowed indicates if
741/// the result is larger than the maximum representable value of type T.
742template <typename T>
743std::enable_if_t<std::is_unsigned<T>::value, T>
744SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) {
745 bool Dummy;
746 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
747 // Hacker's Delight, p. 29
748 T Z = X + Y;
749 Overflowed = (Z < X || Z < Y);
750 if (Overflowed)
751 return std::numeric_limits<T>::max();
752 else
753 return Z;
754}
755
756/// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the
757/// maximum representable value of T on overflow. ResultOverflowed indicates if
758/// the result is larger than the maximum representable value of type T.
759template <typename T>
760std::enable_if_t<std::is_unsigned<T>::value, T>
761SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) {
762 bool Dummy;
763 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
764
765 // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
766 // because it fails for uint16_t (where multiplication can have undefined
767 // behavior due to promotion to int), and requires a division in addition
768 // to the multiplication.
769
770 Overflowed = false;
771
772 // Log2(Z) would be either Log2Z or Log2Z + 1.
773 // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
774 // will necessarily be less than Log2Max as desired.
775 int Log2Z = Log2_64(X) + Log2_64(Y);
776 const T Max = std::numeric_limits<T>::max();
777 int Log2Max = Log2_64(Max);
778 if (Log2Z < Log2Max) {
779 return X * Y;
780 }
781 if (Log2Z > Log2Max) {
782 Overflowed = true;
783 return Max;
784 }
785
786 // We're going to use the top bit, and maybe overflow one
787 // bit past it. Multiply all but the bottom bit then add
788 // that on at the end.
789 T Z = (X >> 1) * Y;
790 if (Z & ~(Max >> 1)) {
791 Overflowed = true;
792 return Max;
793 }
794 Z <<= 1;
795 if (X & 1)
796 return SaturatingAdd(Z, Y, ResultOverflowed);
797
798 return Z;
799}
800
801/// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
802/// the product. Clamp the result to the maximum representable value of T on
803/// overflow. ResultOverflowed indicates if the result is larger than the
804/// maximum representable value of type T.
805template <typename T>
806std::enable_if_t<std::is_unsigned<T>::value, T>
807SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) {
808 bool Dummy;
809 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
810
811 T Product = SaturatingMultiply(X, Y, &Overflowed);
812 if (Overflowed)
813 return Product;
814
815 return SaturatingAdd(A, Product, &Overflowed);
816}
817
818/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
819extern const float huge_valf;
820
821
822/// Add two signed integers, computing the two's complement truncated result,
823/// returning true if overflow occurred.
824template <typename T>
825std::enable_if_t<std::is_signed<T>::value, T> AddOverflow(T X, T Y, T &Result) {
826#if __has_builtin(__builtin_add_overflow)1
827 return __builtin_add_overflow(X, Y, &Result);
828#else
829 // Perform the unsigned addition.
830 using U = std::make_unsigned_t<T>;
831 const U UX = static_cast<U>(X);
832 const U UY = static_cast<U>(Y);
833 const U UResult = UX + UY;
834
835 // Convert to signed.
836 Result = static_cast<T>(UResult);
837
838 // Adding two positive numbers should result in a positive number.
839 if (X > 0 && Y > 0)
840 return Result <= 0;
841 // Adding two negatives should result in a negative number.
842 if (X < 0 && Y < 0)
843 return Result >= 0;
844 return false;
845#endif
846}
847
848/// Subtract two signed integers, computing the two's complement truncated
849/// result, returning true if an overflow ocurred.
850template <typename T>
851std::enable_if_t<std::is_signed<T>::value, T> SubOverflow(T X, T Y, T &Result) {
852#if __has_builtin(__builtin_sub_overflow)1
853 return __builtin_sub_overflow(X, Y, &Result);
854#else
855 // Perform the unsigned addition.
856 using U = std::make_unsigned_t<T>;
857 const U UX = static_cast<U>(X);
858 const U UY = static_cast<U>(Y);
859 const U UResult = UX - UY;
860
861 // Convert to signed.
862 Result = static_cast<T>(UResult);
863
864 // Subtracting a positive number from a negative results in a negative number.
865 if (X <= 0 && Y > 0)
866 return Result >= 0;
867 // Subtracting a negative number from a positive results in a positive number.
868 if (X >= 0 && Y < 0)
869 return Result <= 0;
870 return false;
871#endif
872}
873
874/// Multiply two signed integers, computing the two's complement truncated
875/// result, returning true if an overflow ocurred.
876template <typename T>
877std::enable_if_t<std::is_signed<T>::value, T> MulOverflow(T X, T Y, T &Result) {
878 // Perform the unsigned multiplication on absolute values.
879 using U = std::make_unsigned_t<T>;
880 const U UX = X < 0 ? (0 - static_cast<U>(X)) : static_cast<U>(X);
881 const U UY = Y < 0 ? (0 - static_cast<U>(Y)) : static_cast<U>(Y);
882 const U UResult = UX * UY;
883
884 // Convert to signed.
885 const bool IsNegative = (X < 0) ^ (Y < 0);
886 Result = IsNegative ? (0 - UResult) : UResult;
887
888 // If any of the args was 0, result is 0 and no overflow occurs.
889 if (UX == 0 || UY == 0)
890 return false;
891
892 // UX and UY are in [1, 2^n], where n is the number of digits.
893 // Check how the max allowed absolute value (2^n for negative, 2^(n-1) for
894 // positive) divided by an argument compares to the other.
895 if (IsNegative)
896 return UX > (static_cast<U>(std::numeric_limits<T>::max()) + U(1)) / UY;
897 else
898 return UX > (static_cast<U>(std::numeric_limits<T>::max())) / UY;
899}
900
901} // End llvm namespace
902
903#endif