Bug Summary

File:llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
Warning:line 1134, column 42
The result of the right shift is undefined due to shifting by '32', which is greater or equal to the width of type 'uint32_t'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name HexagonISelDAGToDAG.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/Hexagon -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Target/Hexagon -I include -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-16-232930-107970-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp

/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp

1//===-- HexagonISelDAGToDAG.cpp - A dag to dag inst selector for Hexagon --===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines an instruction selector for the Hexagon target.
10//
11//===----------------------------------------------------------------------===//
12
13#include "HexagonISelDAGToDAG.h"
14#include "Hexagon.h"
15#include "HexagonISelLowering.h"
16#include "HexagonMachineFunctionInfo.h"
17#include "HexagonTargetMachine.h"
18#include "llvm/CodeGen/FunctionLoweringInfo.h"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/SelectionDAGISel.h"
21#include "llvm/IR/Intrinsics.h"
22#include "llvm/IR/IntrinsicsHexagon.h"
23#include "llvm/Support/CommandLine.h"
24#include "llvm/Support/Debug.h"
25using namespace llvm;
26
27#define DEBUG_TYPE"hexagon-isel" "hexagon-isel"
28
29static
30cl::opt<bool>
31EnableAddressRebalancing("isel-rebalance-addr", cl::Hidden, cl::init(true),
32 cl::desc("Rebalance address calculation trees to improve "
33 "instruction selection"));
34
35// Rebalance only if this allows e.g. combining a GA with an offset or
36// factoring out a shift.
37static
38cl::opt<bool>
39RebalanceOnlyForOptimizations("rebalance-only-opt", cl::Hidden, cl::init(false),
40 cl::desc("Rebalance address tree only if this allows optimizations"));
41
42static
43cl::opt<bool>
44RebalanceOnlyImbalancedTrees("rebalance-only-imbal", cl::Hidden,
45 cl::init(false), cl::desc("Rebalance address tree only if it is imbalanced"));
46
47static cl::opt<bool> CheckSingleUse("hexagon-isel-su", cl::Hidden,
48 cl::init(true), cl::desc("Enable checking of SDNode's single-use status"));
49
50//===----------------------------------------------------------------------===//
51// Instruction Selector Implementation
52//===----------------------------------------------------------------------===//
53
54#define GET_DAGISEL_BODY HexagonDAGToDAGISel
55#include "HexagonGenDAGISel.inc"
56
57namespace llvm {
58/// createHexagonISelDag - This pass converts a legalized DAG into a
59/// Hexagon-specific DAG, ready for instruction scheduling.
60FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM,
61 CodeGenOpt::Level OptLevel) {
62 return new HexagonDAGToDAGISel(TM, OptLevel);
63}
64}
65
66void HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, const SDLoc &dl) {
67 SDValue Chain = LD->getChain();
68 SDValue Base = LD->getBasePtr();
69 SDValue Offset = LD->getOffset();
70 int32_t Inc = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
71 EVT LoadedVT = LD->getMemoryVT();
72 unsigned Opcode = 0;
73
74 // Check for zero extended loads. Treat any-extend loads as zero extended
75 // loads.
76 ISD::LoadExtType ExtType = LD->getExtensionType();
77 bool IsZeroExt = (ExtType == ISD::ZEXTLOAD || ExtType == ISD::EXTLOAD);
78 bool IsValidInc = HII->isValidAutoIncImm(LoadedVT, Inc);
79
80 assert(LoadedVT.isSimple())(static_cast <bool> (LoadedVT.isSimple()) ? void (0) : __assert_fail
("LoadedVT.isSimple()", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 80, __extension__ __PRETTY_FUNCTION__))
;
81 switch (LoadedVT.getSimpleVT().SimpleTy) {
82 case MVT::i8:
83 if (IsZeroExt)
84 Opcode = IsValidInc ? Hexagon::L2_loadrub_pi : Hexagon::L2_loadrub_io;
85 else
86 Opcode = IsValidInc ? Hexagon::L2_loadrb_pi : Hexagon::L2_loadrb_io;
87 break;
88 case MVT::i16:
89 if (IsZeroExt)
90 Opcode = IsValidInc ? Hexagon::L2_loadruh_pi : Hexagon::L2_loadruh_io;
91 else
92 Opcode = IsValidInc ? Hexagon::L2_loadrh_pi : Hexagon::L2_loadrh_io;
93 break;
94 case MVT::i32:
95 case MVT::f32:
96 case MVT::v2i16:
97 case MVT::v4i8:
98 Opcode = IsValidInc ? Hexagon::L2_loadri_pi : Hexagon::L2_loadri_io;
99 break;
100 case MVT::i64:
101 case MVT::f64:
102 case MVT::v2i32:
103 case MVT::v4i16:
104 case MVT::v8i8:
105 Opcode = IsValidInc ? Hexagon::L2_loadrd_pi : Hexagon::L2_loadrd_io;
106 break;
107 case MVT::v64i8:
108 case MVT::v32i16:
109 case MVT::v16i32:
110 case MVT::v8i64:
111 case MVT::v128i8:
112 case MVT::v64i16:
113 case MVT::v32i32:
114 case MVT::v16i64:
115 if (isAlignedMemNode(LD)) {
116 if (LD->isNonTemporal())
117 Opcode = IsValidInc ? Hexagon::V6_vL32b_nt_pi : Hexagon::V6_vL32b_nt_ai;
118 else
119 Opcode = IsValidInc ? Hexagon::V6_vL32b_pi : Hexagon::V6_vL32b_ai;
120 } else {
121 Opcode = IsValidInc ? Hexagon::V6_vL32Ub_pi : Hexagon::V6_vL32Ub_ai;
122 }
123 break;
124 default:
125 llvm_unreachable("Unexpected memory type in indexed load")::llvm::llvm_unreachable_internal("Unexpected memory type in indexed load"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 125)
;
126 }
127
128 SDValue IncV = CurDAG->getTargetConstant(Inc, dl, MVT::i32);
129 MachineMemOperand *MemOp = LD->getMemOperand();
130
131 auto getExt64 = [this,ExtType] (MachineSDNode *N, const SDLoc &dl)
132 -> MachineSDNode* {
133 if (ExtType == ISD::ZEXTLOAD || ExtType == ISD::EXTLOAD) {
134 SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
135 return CurDAG->getMachineNode(Hexagon::A4_combineir, dl, MVT::i64,
136 Zero, SDValue(N, 0));
137 }
138 if (ExtType == ISD::SEXTLOAD)
139 return CurDAG->getMachineNode(Hexagon::A2_sxtw, dl, MVT::i64,
140 SDValue(N, 0));
141 return N;
142 };
143
144 // Loaded value Next address Chain
145 SDValue From[3] = { SDValue(LD,0), SDValue(LD,1), SDValue(LD,2) };
146 SDValue To[3];
147
148 EVT ValueVT = LD->getValueType(0);
149 if (ValueVT == MVT::i64 && ExtType != ISD::NON_EXTLOAD) {
150 // A load extending to i64 will actually produce i32, which will then
151 // need to be extended to i64.
152 assert(LoadedVT.getSizeInBits() <= 32)(static_cast <bool> (LoadedVT.getSizeInBits() <= 32)
? void (0) : __assert_fail ("LoadedVT.getSizeInBits() <= 32"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 152, __extension__
__PRETTY_FUNCTION__))
;
153 ValueVT = MVT::i32;
154 }
155
156 if (IsValidInc) {
157 MachineSDNode *L = CurDAG->getMachineNode(Opcode, dl, ValueVT,
158 MVT::i32, MVT::Other, Base,
159 IncV, Chain);
160 CurDAG->setNodeMemRefs(L, {MemOp});
161 To[1] = SDValue(L, 1); // Next address.
162 To[2] = SDValue(L, 2); // Chain.
163 // Handle special case for extension to i64.
164 if (LD->getValueType(0) == MVT::i64)
165 L = getExt64(L, dl);
166 To[0] = SDValue(L, 0); // Loaded (extended) value.
167 } else {
168 SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
169 MachineSDNode *L = CurDAG->getMachineNode(Opcode, dl, ValueVT, MVT::Other,
170 Base, Zero, Chain);
171 CurDAG->setNodeMemRefs(L, {MemOp});
172 To[2] = SDValue(L, 1); // Chain.
173 MachineSDNode *A = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
174 Base, IncV);
175 To[1] = SDValue(A, 0); // Next address.
176 // Handle special case for extension to i64.
177 if (LD->getValueType(0) == MVT::i64)
178 L = getExt64(L, dl);
179 To[0] = SDValue(L, 0); // Loaded (extended) value.
180 }
181 ReplaceUses(From, To, 3);
182 CurDAG->RemoveDeadNode(LD);
183}
184
185MachineSDNode *HexagonDAGToDAGISel::LoadInstrForLoadIntrinsic(SDNode *IntN) {
186 if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)
187 return nullptr;
188
189 SDLoc dl(IntN);
190 unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
191
192 static std::map<unsigned,unsigned> LoadPciMap = {
193 { Intrinsic::hexagon_circ_ldb, Hexagon::L2_loadrb_pci },
194 { Intrinsic::hexagon_circ_ldub, Hexagon::L2_loadrub_pci },
195 { Intrinsic::hexagon_circ_ldh, Hexagon::L2_loadrh_pci },
196 { Intrinsic::hexagon_circ_lduh, Hexagon::L2_loadruh_pci },
197 { Intrinsic::hexagon_circ_ldw, Hexagon::L2_loadri_pci },
198 { Intrinsic::hexagon_circ_ldd, Hexagon::L2_loadrd_pci },
199 };
200 auto FLC = LoadPciMap.find(IntNo);
201 if (FLC != LoadPciMap.end()) {
202 EVT ValTy = (IntNo == Intrinsic::hexagon_circ_ldd) ? MVT::i64 : MVT::i32;
203 EVT RTys[] = { ValTy, MVT::i32, MVT::Other };
204 // Operands: { Base, Increment, Modifier, Chain }
205 auto Inc = cast<ConstantSDNode>(IntN->getOperand(5));
206 SDValue I = CurDAG->getTargetConstant(Inc->getSExtValue(), dl, MVT::i32);
207 MachineSDNode *Res = CurDAG->getMachineNode(FLC->second, dl, RTys,
208 { IntN->getOperand(2), I, IntN->getOperand(4),
209 IntN->getOperand(0) });
210 return Res;
211 }
212
213 return nullptr;
214}
215
216SDNode *HexagonDAGToDAGISel::StoreInstrForLoadIntrinsic(MachineSDNode *LoadN,
217 SDNode *IntN) {
218 // The "LoadN" is just a machine load instruction. The intrinsic also
219 // involves storing it. Generate an appropriate store to the location
220 // given in the intrinsic's operand(3).
221 uint64_t F = HII->get(LoadN->getMachineOpcode()).TSFlags;
222 unsigned SizeBits = (F >> HexagonII::MemAccessSizePos) &
223 HexagonII::MemAccesSizeMask;
224 unsigned Size = 1U << (SizeBits-1);
225
226 SDLoc dl(IntN);
227 MachinePointerInfo PI;
228 SDValue TS;
229 SDValue Loc = IntN->getOperand(3);
230
231 if (Size >= 4)
232 TS = CurDAG->getStore(SDValue(LoadN, 2), dl, SDValue(LoadN, 0), Loc, PI,
233 Align(Size));
234 else
235 TS = CurDAG->getTruncStore(SDValue(LoadN, 2), dl, SDValue(LoadN, 0), Loc,
236 PI, MVT::getIntegerVT(Size * 8), Align(Size));
237
238 SDNode *StoreN;
239 {
240 HandleSDNode Handle(TS);
241 SelectStore(TS.getNode());
242 StoreN = Handle.getValue().getNode();
243 }
244
245 // Load's results are { Loaded value, Updated pointer, Chain }
246 ReplaceUses(SDValue(IntN, 0), SDValue(LoadN, 1));
247 ReplaceUses(SDValue(IntN, 1), SDValue(StoreN, 0));
248 return StoreN;
249}
250
251bool HexagonDAGToDAGISel::tryLoadOfLoadIntrinsic(LoadSDNode *N) {
252 // The intrinsics for load circ/brev perform two operations:
253 // 1. Load a value V from the specified location, using the addressing
254 // mode corresponding to the intrinsic.
255 // 2. Store V into a specified location. This location is typically a
256 // local, temporary object.
257 // In many cases, the program using these intrinsics will immediately
258 // load V again from the local object. In those cases, when certain
259 // conditions are met, the last load can be removed.
260 // This function identifies and optimizes this pattern. If the pattern
261 // cannot be optimized, it returns nullptr, which will cause the load
262 // to be selected separately from the intrinsic (which will be handled
263 // in SelectIntrinsicWChain).
264
265 SDValue Ch = N->getOperand(0);
266 SDValue Loc = N->getOperand(1);
267
268 // Assume that the load and the intrinsic are connected directly with a
269 // chain:
270 // t1: i32,ch = int.load ..., ..., ..., Loc, ... // <-- C
271 // t2: i32,ch = load t1:1, Loc, ...
272 SDNode *C = Ch.getNode();
273
274 if (C->getOpcode() != ISD::INTRINSIC_W_CHAIN)
275 return false;
276
277 // The second load can only be eliminated if its extension type matches
278 // that of the load instruction corresponding to the intrinsic. The user
279 // can provide an address of an unsigned variable to store the result of
280 // a sign-extending intrinsic into (or the other way around).
281 ISD::LoadExtType IntExt;
282 switch (cast<ConstantSDNode>(C->getOperand(1))->getZExtValue()) {
283 case Intrinsic::hexagon_circ_ldub:
284 case Intrinsic::hexagon_circ_lduh:
285 IntExt = ISD::ZEXTLOAD;
286 break;
287 case Intrinsic::hexagon_circ_ldw:
288 case Intrinsic::hexagon_circ_ldd:
289 IntExt = ISD::NON_EXTLOAD;
290 break;
291 default:
292 IntExt = ISD::SEXTLOAD;
293 break;
294 }
295 if (N->getExtensionType() != IntExt)
296 return false;
297
298 // Make sure the target location for the loaded value in the load intrinsic
299 // is the location from which LD (or N) is loading.
300 if (C->getNumOperands() < 4 || Loc.getNode() != C->getOperand(3).getNode())
301 return false;
302
303 if (MachineSDNode *L = LoadInstrForLoadIntrinsic(C)) {
304 SDNode *S = StoreInstrForLoadIntrinsic(L, C);
305 SDValue F[] = { SDValue(N,0), SDValue(N,1), SDValue(C,0), SDValue(C,1) };
306 SDValue T[] = { SDValue(L,0), SDValue(S,0), SDValue(L,1), SDValue(S,0) };
307 ReplaceUses(F, T, array_lengthof(T));
308 // This transformation will leave the intrinsic dead. If it remains in
309 // the DAG, the selection code will see it again, but without the load,
310 // and it will generate a store that is normally required for it.
311 CurDAG->RemoveDeadNode(C);
312 return true;
313 }
314 return false;
315}
316
317// Convert the bit-reverse load intrinsic to appropriate target instruction.
318bool HexagonDAGToDAGISel::SelectBrevLdIntrinsic(SDNode *IntN) {
319 if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)
320 return false;
321
322 const SDLoc &dl(IntN);
323 unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
324
325 static const std::map<unsigned, unsigned> LoadBrevMap = {
326 { Intrinsic::hexagon_L2_loadrb_pbr, Hexagon::L2_loadrb_pbr },
327 { Intrinsic::hexagon_L2_loadrub_pbr, Hexagon::L2_loadrub_pbr },
328 { Intrinsic::hexagon_L2_loadrh_pbr, Hexagon::L2_loadrh_pbr },
329 { Intrinsic::hexagon_L2_loadruh_pbr, Hexagon::L2_loadruh_pbr },
330 { Intrinsic::hexagon_L2_loadri_pbr, Hexagon::L2_loadri_pbr },
331 { Intrinsic::hexagon_L2_loadrd_pbr, Hexagon::L2_loadrd_pbr }
332 };
333 auto FLI = LoadBrevMap.find(IntNo);
334 if (FLI != LoadBrevMap.end()) {
335 EVT ValTy =
336 (IntNo == Intrinsic::hexagon_L2_loadrd_pbr) ? MVT::i64 : MVT::i32;
337 EVT RTys[] = { ValTy, MVT::i32, MVT::Other };
338 // Operands of Intrinsic: {chain, enum ID of intrinsic, baseptr,
339 // modifier}.
340 // Operands of target instruction: { Base, Modifier, Chain }.
341 MachineSDNode *Res = CurDAG->getMachineNode(
342 FLI->second, dl, RTys,
343 {IntN->getOperand(2), IntN->getOperand(3), IntN->getOperand(0)});
344
345 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(IntN)->getMemOperand();
346 CurDAG->setNodeMemRefs(Res, {MemOp});
347
348 ReplaceUses(SDValue(IntN, 0), SDValue(Res, 0));
349 ReplaceUses(SDValue(IntN, 1), SDValue(Res, 1));
350 ReplaceUses(SDValue(IntN, 2), SDValue(Res, 2));
351 CurDAG->RemoveDeadNode(IntN);
352 return true;
353 }
354 return false;
355}
356
357/// Generate a machine instruction node for the new circlar buffer intrinsics.
358/// The new versions use a CSx register instead of the K field.
359bool HexagonDAGToDAGISel::SelectNewCircIntrinsic(SDNode *IntN) {
360 if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)
361 return false;
362
363 SDLoc DL(IntN);
364 unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
365 SmallVector<SDValue, 7> Ops;
366
367 static std::map<unsigned,unsigned> LoadNPcMap = {
368 { Intrinsic::hexagon_L2_loadrub_pci, Hexagon::PS_loadrub_pci },
369 { Intrinsic::hexagon_L2_loadrb_pci, Hexagon::PS_loadrb_pci },
370 { Intrinsic::hexagon_L2_loadruh_pci, Hexagon::PS_loadruh_pci },
371 { Intrinsic::hexagon_L2_loadrh_pci, Hexagon::PS_loadrh_pci },
372 { Intrinsic::hexagon_L2_loadri_pci, Hexagon::PS_loadri_pci },
373 { Intrinsic::hexagon_L2_loadrd_pci, Hexagon::PS_loadrd_pci },
374 { Intrinsic::hexagon_L2_loadrub_pcr, Hexagon::PS_loadrub_pcr },
375 { Intrinsic::hexagon_L2_loadrb_pcr, Hexagon::PS_loadrb_pcr },
376 { Intrinsic::hexagon_L2_loadruh_pcr, Hexagon::PS_loadruh_pcr },
377 { Intrinsic::hexagon_L2_loadrh_pcr, Hexagon::PS_loadrh_pcr },
378 { Intrinsic::hexagon_L2_loadri_pcr, Hexagon::PS_loadri_pcr },
379 { Intrinsic::hexagon_L2_loadrd_pcr, Hexagon::PS_loadrd_pcr }
380 };
381 auto FLI = LoadNPcMap.find (IntNo);
382 if (FLI != LoadNPcMap.end()) {
383 EVT ValTy = MVT::i32;
384 if (IntNo == Intrinsic::hexagon_L2_loadrd_pci ||
385 IntNo == Intrinsic::hexagon_L2_loadrd_pcr)
386 ValTy = MVT::i64;
387 EVT RTys[] = { ValTy, MVT::i32, MVT::Other };
388 // Handle load.*_pci case which has 6 operands.
389 if (IntN->getNumOperands() == 6) {
390 auto Inc = cast<ConstantSDNode>(IntN->getOperand(3));
391 SDValue I = CurDAG->getTargetConstant(Inc->getSExtValue(), DL, MVT::i32);
392 // Operands: { Base, Increment, Modifier, Start, Chain }.
393 Ops = { IntN->getOperand(2), I, IntN->getOperand(4), IntN->getOperand(5),
394 IntN->getOperand(0) };
395 } else
396 // Handle load.*_pcr case which has 5 operands.
397 // Operands: { Base, Modifier, Start, Chain }.
398 Ops = { IntN->getOperand(2), IntN->getOperand(3), IntN->getOperand(4),
399 IntN->getOperand(0) };
400 MachineSDNode *Res = CurDAG->getMachineNode(FLI->second, DL, RTys, Ops);
401 ReplaceUses(SDValue(IntN, 0), SDValue(Res, 0));
402 ReplaceUses(SDValue(IntN, 1), SDValue(Res, 1));
403 ReplaceUses(SDValue(IntN, 2), SDValue(Res, 2));
404 CurDAG->RemoveDeadNode(IntN);
405 return true;
406 }
407
408 static std::map<unsigned,unsigned> StoreNPcMap = {
409 { Intrinsic::hexagon_S2_storerb_pci, Hexagon::PS_storerb_pci },
410 { Intrinsic::hexagon_S2_storerh_pci, Hexagon::PS_storerh_pci },
411 { Intrinsic::hexagon_S2_storerf_pci, Hexagon::PS_storerf_pci },
412 { Intrinsic::hexagon_S2_storeri_pci, Hexagon::PS_storeri_pci },
413 { Intrinsic::hexagon_S2_storerd_pci, Hexagon::PS_storerd_pci },
414 { Intrinsic::hexagon_S2_storerb_pcr, Hexagon::PS_storerb_pcr },
415 { Intrinsic::hexagon_S2_storerh_pcr, Hexagon::PS_storerh_pcr },
416 { Intrinsic::hexagon_S2_storerf_pcr, Hexagon::PS_storerf_pcr },
417 { Intrinsic::hexagon_S2_storeri_pcr, Hexagon::PS_storeri_pcr },
418 { Intrinsic::hexagon_S2_storerd_pcr, Hexagon::PS_storerd_pcr }
419 };
420 auto FSI = StoreNPcMap.find (IntNo);
421 if (FSI != StoreNPcMap.end()) {
422 EVT RTys[] = { MVT::i32, MVT::Other };
423 // Handle store.*_pci case which has 7 operands.
424 if (IntN->getNumOperands() == 7) {
425 auto Inc = cast<ConstantSDNode>(IntN->getOperand(3));
426 SDValue I = CurDAG->getTargetConstant(Inc->getSExtValue(), DL, MVT::i32);
427 // Operands: { Base, Increment, Modifier, Value, Start, Chain }.
428 Ops = { IntN->getOperand(2), I, IntN->getOperand(4), IntN->getOperand(5),
429 IntN->getOperand(6), IntN->getOperand(0) };
430 } else
431 // Handle store.*_pcr case which has 6 operands.
432 // Operands: { Base, Modifier, Value, Start, Chain }.
433 Ops = { IntN->getOperand(2), IntN->getOperand(3), IntN->getOperand(4),
434 IntN->getOperand(5), IntN->getOperand(0) };
435 MachineSDNode *Res = CurDAG->getMachineNode(FSI->second, DL, RTys, Ops);
436 ReplaceUses(SDValue(IntN, 0), SDValue(Res, 0));
437 ReplaceUses(SDValue(IntN, 1), SDValue(Res, 1));
438 CurDAG->RemoveDeadNode(IntN);
439 return true;
440 }
441
442 return false;
443}
444
445void HexagonDAGToDAGISel::SelectLoad(SDNode *N) {
446 SDLoc dl(N);
447 LoadSDNode *LD = cast<LoadSDNode>(N);
448
449 // Handle indexed loads.
450 ISD::MemIndexedMode AM = LD->getAddressingMode();
451 if (AM != ISD::UNINDEXED) {
452 SelectIndexedLoad(LD, dl);
453 return;
454 }
455
456 // Handle patterns using circ/brev load intrinsics.
457 if (tryLoadOfLoadIntrinsic(LD))
458 return;
459
460 SelectCode(LD);
461}
462
463void HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, const SDLoc &dl) {
464 SDValue Chain = ST->getChain();
465 SDValue Base = ST->getBasePtr();
466 SDValue Offset = ST->getOffset();
467 SDValue Value = ST->getValue();
468 // Get the constant value.
469 int32_t Inc = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
470 EVT StoredVT = ST->getMemoryVT();
471 EVT ValueVT = Value.getValueType();
472
473 bool IsValidInc = HII->isValidAutoIncImm(StoredVT, Inc);
474 unsigned Opcode = 0;
475
476 assert(StoredVT.isSimple())(static_cast <bool> (StoredVT.isSimple()) ? void (0) : __assert_fail
("StoredVT.isSimple()", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 476, __extension__ __PRETTY_FUNCTION__))
;
477 switch (StoredVT.getSimpleVT().SimpleTy) {
478 case MVT::i8:
479 Opcode = IsValidInc ? Hexagon::S2_storerb_pi : Hexagon::S2_storerb_io;
480 break;
481 case MVT::i16:
482 Opcode = IsValidInc ? Hexagon::S2_storerh_pi : Hexagon::S2_storerh_io;
483 break;
484 case MVT::i32:
485 case MVT::f32:
486 case MVT::v2i16:
487 case MVT::v4i8:
488 Opcode = IsValidInc ? Hexagon::S2_storeri_pi : Hexagon::S2_storeri_io;
489 break;
490 case MVT::i64:
491 case MVT::f64:
492 case MVT::v2i32:
493 case MVT::v4i16:
494 case MVT::v8i8:
495 Opcode = IsValidInc ? Hexagon::S2_storerd_pi : Hexagon::S2_storerd_io;
496 break;
497 case MVT::v64i8:
498 case MVT::v32i16:
499 case MVT::v16i32:
500 case MVT::v8i64:
501 case MVT::v128i8:
502 case MVT::v64i16:
503 case MVT::v32i32:
504 case MVT::v16i64:
505 if (isAlignedMemNode(ST)) {
506 if (ST->isNonTemporal())
507 Opcode = IsValidInc ? Hexagon::V6_vS32b_nt_pi : Hexagon::V6_vS32b_nt_ai;
508 else
509 Opcode = IsValidInc ? Hexagon::V6_vS32b_pi : Hexagon::V6_vS32b_ai;
510 } else {
511 Opcode = IsValidInc ? Hexagon::V6_vS32Ub_pi : Hexagon::V6_vS32Ub_ai;
512 }
513 break;
514 default:
515 llvm_unreachable("Unexpected memory type in indexed store")::llvm::llvm_unreachable_internal("Unexpected memory type in indexed store"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 515)
;
516 }
517
518 if (ST->isTruncatingStore() && ValueVT.getSizeInBits() == 64) {
519 assert(StoredVT.getSizeInBits() < 64 && "Not a truncating store")(static_cast <bool> (StoredVT.getSizeInBits() < 64 &&
"Not a truncating store") ? void (0) : __assert_fail ("StoredVT.getSizeInBits() < 64 && \"Not a truncating store\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 519, __extension__
__PRETTY_FUNCTION__))
;
520 Value = CurDAG->getTargetExtractSubreg(Hexagon::isub_lo,
521 dl, MVT::i32, Value);
522 }
523
524 SDValue IncV = CurDAG->getTargetConstant(Inc, dl, MVT::i32);
525 MachineMemOperand *MemOp = ST->getMemOperand();
526
527 // Next address Chain
528 SDValue From[2] = { SDValue(ST,0), SDValue(ST,1) };
529 SDValue To[2];
530
531 if (IsValidInc) {
532 // Build post increment store.
533 SDValue Ops[] = { Base, IncV, Value, Chain };
534 MachineSDNode *S = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::Other,
535 Ops);
536 CurDAG->setNodeMemRefs(S, {MemOp});
537 To[0] = SDValue(S, 0);
538 To[1] = SDValue(S, 1);
539 } else {
540 SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
541 SDValue Ops[] = { Base, Zero, Value, Chain };
542 MachineSDNode *S = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
543 CurDAG->setNodeMemRefs(S, {MemOp});
544 To[1] = SDValue(S, 0);
545 MachineSDNode *A = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
546 Base, IncV);
547 To[0] = SDValue(A, 0);
548 }
549
550 ReplaceUses(From, To, 2);
551 CurDAG->RemoveDeadNode(ST);
552}
553
554void HexagonDAGToDAGISel::SelectStore(SDNode *N) {
555 SDLoc dl(N);
556 StoreSDNode *ST = cast<StoreSDNode>(N);
557
558 // Handle indexed stores.
559 ISD::MemIndexedMode AM = ST->getAddressingMode();
560 if (AM != ISD::UNINDEXED) {
561 SelectIndexedStore(ST, dl);
562 return;
563 }
564
565 SelectCode(ST);
566}
567
568void HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
569 SDLoc dl(N);
570 SDValue Shl_0 = N->getOperand(0);
571 SDValue Shl_1 = N->getOperand(1);
572
573 auto Default = [this,N] () -> void { SelectCode(N); };
574
575 if (N->getValueType(0) != MVT::i32 || Shl_1.getOpcode() != ISD::Constant)
576 return Default();
577
578 // RHS is const.
579 int32_t ShlConst = cast<ConstantSDNode>(Shl_1)->getSExtValue();
580
581 if (Shl_0.getOpcode() == ISD::MUL) {
582 SDValue Mul_0 = Shl_0.getOperand(0); // Val
583 SDValue Mul_1 = Shl_0.getOperand(1); // Const
584 // RHS of mul is const.
585 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mul_1)) {
586 int32_t ValConst = C->getSExtValue() << ShlConst;
587 if (isInt<9>(ValConst)) {
588 SDValue Val = CurDAG->getTargetConstant(ValConst, dl, MVT::i32);
589 SDNode *Result = CurDAG->getMachineNode(Hexagon::M2_mpysmi, dl,
590 MVT::i32, Mul_0, Val);
591 ReplaceNode(N, Result);
592 return;
593 }
594 }
595 return Default();
596 }
597
598 if (Shl_0.getOpcode() == ISD::SUB) {
599 SDValue Sub_0 = Shl_0.getOperand(0); // Const 0
600 SDValue Sub_1 = Shl_0.getOperand(1); // Val
601 if (ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Sub_0)) {
602 if (C1->getSExtValue() != 0 || Sub_1.getOpcode() != ISD::SHL)
603 return Default();
604 SDValue Shl2_0 = Sub_1.getOperand(0); // Val
605 SDValue Shl2_1 = Sub_1.getOperand(1); // Const
606 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(Shl2_1)) {
607 int32_t ValConst = 1 << (ShlConst + C2->getSExtValue());
608 if (isInt<9>(-ValConst)) {
609 SDValue Val = CurDAG->getTargetConstant(-ValConst, dl, MVT::i32);
610 SDNode *Result = CurDAG->getMachineNode(Hexagon::M2_mpysmi, dl,
611 MVT::i32, Shl2_0, Val);
612 ReplaceNode(N, Result);
613 return;
614 }
615 }
616 }
617 }
618
619 return Default();
620}
621
622//
623// Handling intrinsics for circular load and bitreverse load.
624//
625void HexagonDAGToDAGISel::SelectIntrinsicWChain(SDNode *N) {
626 if (MachineSDNode *L = LoadInstrForLoadIntrinsic(N)) {
627 StoreInstrForLoadIntrinsic(L, N);
628 CurDAG->RemoveDeadNode(N);
629 return;
630 }
631
632 // Handle bit-reverse load intrinsics.
633 if (SelectBrevLdIntrinsic(N))
634 return;
635
636 if (SelectNewCircIntrinsic(N))
637 return;
638
639 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
640 if (IntNo == Intrinsic::hexagon_V6_vgathermw ||
641 IntNo == Intrinsic::hexagon_V6_vgathermw_128B ||
642 IntNo == Intrinsic::hexagon_V6_vgathermh ||
643 IntNo == Intrinsic::hexagon_V6_vgathermh_128B ||
644 IntNo == Intrinsic::hexagon_V6_vgathermhw ||
645 IntNo == Intrinsic::hexagon_V6_vgathermhw_128B) {
646 SelectV65Gather(N);
647 return;
648 }
649 if (IntNo == Intrinsic::hexagon_V6_vgathermwq ||
650 IntNo == Intrinsic::hexagon_V6_vgathermwq_128B ||
651 IntNo == Intrinsic::hexagon_V6_vgathermhq ||
652 IntNo == Intrinsic::hexagon_V6_vgathermhq_128B ||
653 IntNo == Intrinsic::hexagon_V6_vgathermhwq ||
654 IntNo == Intrinsic::hexagon_V6_vgathermhwq_128B) {
655 SelectV65GatherPred(N);
656 return;
657 }
658
659 SelectCode(N);
660}
661
662void HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
663 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
664 unsigned Bits;
665 switch (IID) {
666 case Intrinsic::hexagon_S2_vsplatrb:
667 Bits = 8;
668 break;
669 case Intrinsic::hexagon_S2_vsplatrh:
670 Bits = 16;
671 break;
672 case Intrinsic::hexagon_V6_vaddcarry:
673 case Intrinsic::hexagon_V6_vaddcarry_128B:
674 case Intrinsic::hexagon_V6_vsubcarry:
675 case Intrinsic::hexagon_V6_vsubcarry_128B:
676 SelectHVXDualOutput(N);
677 return;
678 default:
679 SelectCode(N);
680 return;
681 }
682
683 SDValue V = N->getOperand(1);
684 SDValue U;
685 if (keepsLowBits(V, Bits, U)) {
686 SDValue R = CurDAG->getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
687 N->getOperand(0), U);
688 ReplaceNode(N, R.getNode());
689 SelectCode(R.getNode());
690 return;
691 }
692 SelectCode(N);
693}
694
695//
696// Map floating point constant values.
697//
698void HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
699 SDLoc dl(N);
700 auto *CN = cast<ConstantFPSDNode>(N);
701 APInt A = CN->getValueAPF().bitcastToAPInt();
702 if (N->getValueType(0) == MVT::f32) {
703 SDValue V = CurDAG->getTargetConstant(A.getZExtValue(), dl, MVT::i32);
704 ReplaceNode(N, CurDAG->getMachineNode(Hexagon::A2_tfrsi, dl, MVT::f32, V));
705 return;
706 }
707 if (N->getValueType(0) == MVT::f64) {
708 SDValue V = CurDAG->getTargetConstant(A.getZExtValue(), dl, MVT::i64);
709 ReplaceNode(N, CurDAG->getMachineNode(Hexagon::CONST64, dl, MVT::f64, V));
710 return;
711 }
712
713 SelectCode(N);
714}
715
716//
717// Map boolean values.
718//
719void HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
720 if (N->getValueType(0) == MVT::i1) {
721 assert(!(cast<ConstantSDNode>(N)->getZExtValue() >> 1))(static_cast <bool> (!(cast<ConstantSDNode>(N)->
getZExtValue() >> 1)) ? void (0) : __assert_fail ("!(cast<ConstantSDNode>(N)->getZExtValue() >> 1)"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 721, __extension__
__PRETTY_FUNCTION__))
;
722 unsigned Opc = (cast<ConstantSDNode>(N)->getSExtValue() != 0)
723 ? Hexagon::PS_true
724 : Hexagon::PS_false;
725 ReplaceNode(N, CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i1));
726 return;
727 }
728
729 SelectCode(N);
730}
731
732void HexagonDAGToDAGISel::SelectFrameIndex(SDNode *N) {
733 MachineFrameInfo &MFI = MF->getFrameInfo();
734 const HexagonFrameLowering *HFI = HST->getFrameLowering();
735 int FX = cast<FrameIndexSDNode>(N)->getIndex();
736 Align StkA = HFI->getStackAlign();
737 Align MaxA = MFI.getMaxAlign();
738 SDValue FI = CurDAG->getTargetFrameIndex(FX, MVT::i32);
739 SDLoc DL(N);
740 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
741 SDNode *R = nullptr;
742
743 // Use PS_fi when:
744 // - the object is fixed, or
745 // - there are no objects with higher-than-default alignment, or
746 // - there are no dynamically allocated objects.
747 // Otherwise, use PS_fia.
748 if (FX < 0 || MaxA <= StkA || !MFI.hasVarSizedObjects()) {
749 R = CurDAG->getMachineNode(Hexagon::PS_fi, DL, MVT::i32, FI, Zero);
750 } else {
751 auto &HMFI = *MF->getInfo<HexagonMachineFunctionInfo>();
752 unsigned AR = HMFI.getStackAlignBaseVReg();
753 SDValue CH = CurDAG->getEntryNode();
754 SDValue Ops[] = { CurDAG->getCopyFromReg(CH, DL, AR, MVT::i32), FI, Zero };
755 R = CurDAG->getMachineNode(Hexagon::PS_fia, DL, MVT::i32, Ops);
756 }
757
758 ReplaceNode(N, R);
759}
760
761void HexagonDAGToDAGISel::SelectAddSubCarry(SDNode *N) {
762 unsigned OpcCarry = N->getOpcode() == HexagonISD::ADDC ? Hexagon::A4_addp_c
763 : Hexagon::A4_subp_c;
764 SDNode *C = CurDAG->getMachineNode(OpcCarry, SDLoc(N), N->getVTList(),
765 { N->getOperand(0), N->getOperand(1),
766 N->getOperand(2) });
767 ReplaceNode(N, C);
768}
769
770void HexagonDAGToDAGISel::SelectVAlign(SDNode *N) {
771 MVT ResTy = N->getValueType(0).getSimpleVT();
772 if (HST->isHVXVectorType(ResTy, true))
773 return SelectHvxVAlign(N);
774
775 const SDLoc &dl(N);
776 unsigned VecLen = ResTy.getSizeInBits();
777 if (VecLen == 32) {
778 SDValue Ops[] = {
779 CurDAG->getTargetConstant(Hexagon::DoubleRegsRegClassID, dl, MVT::i32),
780 N->getOperand(0),
781 CurDAG->getTargetConstant(Hexagon::isub_hi, dl, MVT::i32),
782 N->getOperand(1),
783 CurDAG->getTargetConstant(Hexagon::isub_lo, dl, MVT::i32)
784 };
785 SDNode *R = CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl,
786 MVT::i64, Ops);
787
788 // Shift right by "(Addr & 0x3) * 8" bytes.
789 SDNode *C;
790 SDValue M0 = CurDAG->getTargetConstant(0x18, dl, MVT::i32);
791 SDValue M1 = CurDAG->getTargetConstant(0x03, dl, MVT::i32);
792 if (HST->useCompound()) {
793 C = CurDAG->getMachineNode(Hexagon::S4_andi_asl_ri, dl, MVT::i32,
794 M0, N->getOperand(2), M1);
795 } else {
796 SDNode *T = CurDAG->getMachineNode(Hexagon::S2_asl_i_r, dl, MVT::i32,
797 N->getOperand(2), M1);
798 C = CurDAG->getMachineNode(Hexagon::A2_andir, dl, MVT::i32,
799 SDValue(T, 0), M0);
800 }
801 SDNode *S = CurDAG->getMachineNode(Hexagon::S2_lsr_r_p, dl, MVT::i64,
802 SDValue(R, 0), SDValue(C, 0));
803 SDValue E = CurDAG->getTargetExtractSubreg(Hexagon::isub_lo, dl, ResTy,
804 SDValue(S, 0));
805 ReplaceNode(N, E.getNode());
806 } else {
807 assert(VecLen == 64)(static_cast <bool> (VecLen == 64) ? void (0) : __assert_fail
("VecLen == 64", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 807, __extension__ __PRETTY_FUNCTION__))
;
808 SDNode *Pu = CurDAG->getMachineNode(Hexagon::C2_tfrrp, dl, MVT::v8i1,
809 N->getOperand(2));
810 SDNode *VA = CurDAG->getMachineNode(Hexagon::S2_valignrb, dl, ResTy,
811 N->getOperand(0), N->getOperand(1),
812 SDValue(Pu,0));
813 ReplaceNode(N, VA);
814 }
815}
816
817void HexagonDAGToDAGISel::SelectVAlignAddr(SDNode *N) {
818 const SDLoc &dl(N);
819 SDValue A = N->getOperand(1);
820 int Mask = -cast<ConstantSDNode>(A.getNode())->getSExtValue();
821 assert(isPowerOf2_32(-Mask))(static_cast <bool> (isPowerOf2_32(-Mask)) ? void (0) :
__assert_fail ("isPowerOf2_32(-Mask)", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 821, __extension__ __PRETTY_FUNCTION__))
;
822
823 SDValue M = CurDAG->getTargetConstant(Mask, dl, MVT::i32);
824 SDNode *AA = CurDAG->getMachineNode(Hexagon::A2_andir, dl, MVT::i32,
825 N->getOperand(0), M);
826 ReplaceNode(N, AA);
827}
828
829// Handle these nodes here to avoid having to write patterns for all
830// combinations of input/output types. In all cases, the resulting
831// instruction is the same.
832void HexagonDAGToDAGISel::SelectTypecast(SDNode *N) {
833 SDValue Op = N->getOperand(0);
834 MVT OpTy = Op.getValueType().getSimpleVT();
835 SDNode *T = CurDAG->MorphNodeTo(N, N->getOpcode(),
836 CurDAG->getVTList(OpTy), {Op});
837 ReplaceNode(T, Op.getNode());
838}
839
840void HexagonDAGToDAGISel::SelectP2D(SDNode *N) {
841 MVT ResTy = N->getValueType(0).getSimpleVT();
842 SDNode *T = CurDAG->getMachineNode(Hexagon::C2_mask, SDLoc(N), ResTy,
843 N->getOperand(0));
844 ReplaceNode(N, T);
845}
846
847void HexagonDAGToDAGISel::SelectD2P(SDNode *N) {
848 const SDLoc &dl(N);
849 MVT ResTy = N->getValueType(0).getSimpleVT();
850 SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
851 SDNode *T = CurDAG->getMachineNode(Hexagon::A4_vcmpbgtui, dl, ResTy,
852 N->getOperand(0), Zero);
853 ReplaceNode(N, T);
854}
855
856void HexagonDAGToDAGISel::SelectV2Q(SDNode *N) {
857 const SDLoc &dl(N);
858 MVT ResTy = N->getValueType(0).getSimpleVT();
859 // The argument to V2Q should be a single vector.
860 MVT OpTy = N->getOperand(0).getValueType().getSimpleVT(); (void)OpTy;
861 assert(HST->getVectorLength() * 8 == OpTy.getSizeInBits())(static_cast <bool> (HST->getVectorLength() * 8 == OpTy
.getSizeInBits()) ? void (0) : __assert_fail ("HST->getVectorLength() * 8 == OpTy.getSizeInBits()"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 861, __extension__
__PRETTY_FUNCTION__))
;
862
863 SDValue C = CurDAG->getTargetConstant(-1, dl, MVT::i32);
864 SDNode *R = CurDAG->getMachineNode(Hexagon::A2_tfrsi, dl, MVT::i32, C);
865 SDNode *T = CurDAG->getMachineNode(Hexagon::V6_vandvrt, dl, ResTy,
866 N->getOperand(0), SDValue(R,0));
867 ReplaceNode(N, T);
868}
869
870void HexagonDAGToDAGISel::SelectQ2V(SDNode *N) {
871 const SDLoc &dl(N);
872 MVT ResTy = N->getValueType(0).getSimpleVT();
873 // The result of V2Q should be a single vector.
874 assert(HST->getVectorLength() * 8 == ResTy.getSizeInBits())(static_cast <bool> (HST->getVectorLength() * 8 == ResTy
.getSizeInBits()) ? void (0) : __assert_fail ("HST->getVectorLength() * 8 == ResTy.getSizeInBits()"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 874, __extension__
__PRETTY_FUNCTION__))
;
875
876 SDValue C = CurDAG->getTargetConstant(-1, dl, MVT::i32);
877 SDNode *R = CurDAG->getMachineNode(Hexagon::A2_tfrsi, dl, MVT::i32, C);
878 SDNode *T = CurDAG->getMachineNode(Hexagon::V6_vandqrt, dl, ResTy,
879 N->getOperand(0), SDValue(R,0));
880 ReplaceNode(N, T);
881}
882
883void HexagonDAGToDAGISel::Select(SDNode *N) {
884 if (N->isMachineOpcode())
885 return N->setNodeId(-1); // Already selected.
886
887 switch (N->getOpcode()) {
888 case ISD::Constant: return SelectConstant(N);
889 case ISD::ConstantFP: return SelectConstantFP(N);
890 case ISD::FrameIndex: return SelectFrameIndex(N);
891 case ISD::SHL: return SelectSHL(N);
892 case ISD::LOAD: return SelectLoad(N);
893 case ISD::STORE: return SelectStore(N);
894 case ISD::INTRINSIC_W_CHAIN: return SelectIntrinsicWChain(N);
895 case ISD::INTRINSIC_WO_CHAIN: return SelectIntrinsicWOChain(N);
896
897 case HexagonISD::ADDC:
898 case HexagonISD::SUBC: return SelectAddSubCarry(N);
899 case HexagonISD::VALIGN: return SelectVAlign(N);
900 case HexagonISD::VALIGNADDR: return SelectVAlignAddr(N);
901 case HexagonISD::TYPECAST: return SelectTypecast(N);
902 case HexagonISD::P2D: return SelectP2D(N);
903 case HexagonISD::D2P: return SelectD2P(N);
904 case HexagonISD::Q2V: return SelectQ2V(N);
905 case HexagonISD::V2Q: return SelectV2Q(N);
906 }
907
908 if (HST->useHVXOps()) {
909 switch (N->getOpcode()) {
910 case ISD::VECTOR_SHUFFLE: return SelectHvxShuffle(N);
911 case HexagonISD::VROR: return SelectHvxRor(N);
912 }
913 }
914
915 SelectCode(N);
916}
917
918bool HexagonDAGToDAGISel::
919SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
920 std::vector<SDValue> &OutOps) {
921 SDValue Inp = Op, Res;
922
923 switch (ConstraintID) {
924 default:
925 return true;
926 case InlineAsm::Constraint_o: // Offsetable.
927 case InlineAsm::Constraint_v: // Not offsetable.
928 case InlineAsm::Constraint_m: // Memory.
929 if (SelectAddrFI(Inp, Res))
930 OutOps.push_back(Res);
931 else
932 OutOps.push_back(Inp);
933 break;
934 }
935
936 OutOps.push_back(CurDAG->getTargetConstant(0, SDLoc(Op), MVT::i32));
937 return false;
938}
939
940
941static bool isMemOPCandidate(SDNode *I, SDNode *U) {
942 // I is an operand of U. Check if U is an arithmetic (binary) operation
943 // usable in a memop, where the other operand is a loaded value, and the
944 // result of U is stored in the same location.
945
946 if (!U->hasOneUse())
947 return false;
948 unsigned Opc = U->getOpcode();
949 switch (Opc) {
950 case ISD::ADD:
951 case ISD::SUB:
952 case ISD::AND:
953 case ISD::OR:
954 break;
955 default:
956 return false;
957 }
958
959 SDValue S0 = U->getOperand(0);
960 SDValue S1 = U->getOperand(1);
961 SDValue SY = (S0.getNode() == I) ? S1 : S0;
962
963 SDNode *UUse = *U->use_begin();
964 if (UUse->getNumValues() != 1)
965 return false;
966
967 // Check if one of the inputs to U is a load instruction and the output
968 // is used by a store instruction. If so and they also have the same
969 // base pointer, then don't preoprocess this node sequence as it
970 // can be matched to a memop.
971 SDNode *SYNode = SY.getNode();
972 if (UUse->getOpcode() == ISD::STORE && SYNode->getOpcode() == ISD::LOAD) {
973 SDValue LDBasePtr = cast<MemSDNode>(SYNode)->getBasePtr();
974 SDValue STBasePtr = cast<MemSDNode>(UUse)->getBasePtr();
975 if (LDBasePtr == STBasePtr)
976 return true;
977 }
978 return false;
979}
980
981
982// Transform: (or (select c x 0) z) -> (select c (or x z) z)
983// (or (select c 0 y) z) -> (select c z (or y z))
984void HexagonDAGToDAGISel::ppSimplifyOrSelect0(std::vector<SDNode*> &&Nodes) {
985 SelectionDAG &DAG = *CurDAG;
986
987 for (auto I : Nodes) {
988 if (I->getOpcode() != ISD::OR)
989 continue;
990
991 auto IsZero = [] (const SDValue &V) -> bool {
992 if (ConstantSDNode *SC = dyn_cast<ConstantSDNode>(V.getNode()))
993 return SC->isZero();
994 return false;
995 };
996 auto IsSelect0 = [IsZero] (const SDValue &Op) -> bool {
997 if (Op.getOpcode() != ISD::SELECT)
998 return false;
999 return IsZero(Op.getOperand(1)) || IsZero(Op.getOperand(2));
1000 };
1001
1002 SDValue N0 = I->getOperand(0), N1 = I->getOperand(1);
1003 EVT VT = I->getValueType(0);
1004 bool SelN0 = IsSelect0(N0);
1005 SDValue SOp = SelN0 ? N0 : N1;
1006 SDValue VOp = SelN0 ? N1 : N0;
1007
1008 if (SOp.getOpcode() == ISD::SELECT && SOp.getNode()->hasOneUse()) {
1009 SDValue SC = SOp.getOperand(0);
1010 SDValue SX = SOp.getOperand(1);
1011 SDValue SY = SOp.getOperand(2);
1012 SDLoc DLS = SOp;
1013 if (IsZero(SY)) {
1014 SDValue NewOr = DAG.getNode(ISD::OR, DLS, VT, SX, VOp);
1015 SDValue NewSel = DAG.getNode(ISD::SELECT, DLS, VT, SC, NewOr, VOp);
1016 DAG.ReplaceAllUsesWith(I, NewSel.getNode());
1017 } else if (IsZero(SX)) {
1018 SDValue NewOr = DAG.getNode(ISD::OR, DLS, VT, SY, VOp);
1019 SDValue NewSel = DAG.getNode(ISD::SELECT, DLS, VT, SC, VOp, NewOr);
1020 DAG.ReplaceAllUsesWith(I, NewSel.getNode());
1021 }
1022 }
1023 }
1024}
1025
1026// Transform: (store ch val (add x (add (shl y c) e)))
1027// to: (store ch val (add x (shl (add y d) c))),
1028// where e = (shl d c) for some integer d.
1029// The purpose of this is to enable generation of loads/stores with
1030// shifted addressing mode, i.e. mem(x+y<<#c). For that, the shift
1031// value c must be 0, 1 or 2.
1032void HexagonDAGToDAGISel::ppAddrReorderAddShl(std::vector<SDNode*> &&Nodes) {
1033 SelectionDAG &DAG = *CurDAG;
1034
1035 for (auto I : Nodes) {
1036 if (I->getOpcode() != ISD::STORE)
1037 continue;
1038
1039 // I matched: (store ch val Off)
1040 SDValue Off = I->getOperand(2);
1041 // Off needs to match: (add x (add (shl y c) (shl d c))))
1042 if (Off.getOpcode() != ISD::ADD)
1043 continue;
1044 // Off matched: (add x T0)
1045 SDValue T0 = Off.getOperand(1);
1046 // T0 needs to match: (add T1 T2):
1047 if (T0.getOpcode() != ISD::ADD)
1048 continue;
1049 // T0 matched: (add T1 T2)
1050 SDValue T1 = T0.getOperand(0);
1051 SDValue T2 = T0.getOperand(1);
1052 // T1 needs to match: (shl y c)
1053 if (T1.getOpcode() != ISD::SHL)
1054 continue;
1055 SDValue C = T1.getOperand(1);
1056 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(C.getNode());
1057 if (CN == nullptr)
1058 continue;
1059 unsigned CV = CN->getZExtValue();
1060 if (CV > 2)
1061 continue;
1062 // T2 needs to match e, where e = (shl d c) for some d.
1063 ConstantSDNode *EN = dyn_cast<ConstantSDNode>(T2.getNode());
1064 if (EN == nullptr)
1065 continue;
1066 unsigned EV = EN->getZExtValue();
1067 if (EV % (1 << CV) != 0)
1068 continue;
1069 unsigned DV = EV / (1 << CV);
1070
1071 // Replace T0 with: (shl (add y d) c)
1072 SDLoc DL = SDLoc(I);
1073 EVT VT = T0.getValueType();
1074 SDValue D = DAG.getConstant(DV, DL, VT);
1075 // NewAdd = (add y d)
1076 SDValue NewAdd = DAG.getNode(ISD::ADD, DL, VT, T1.getOperand(0), D);
1077 // NewShl = (shl NewAdd c)
1078 SDValue NewShl = DAG.getNode(ISD::SHL, DL, VT, NewAdd, C);
1079 ReplaceNode(T0.getNode(), NewShl.getNode());
1080 }
1081}
1082
1083// Transform: (load ch (add x (and (srl y c) Mask)))
1084// to: (load ch (add x (shl (srl y d) d-c)))
1085// where
1086// Mask = 00..0 111..1 0.0
1087// | | +-- d-c 0s, and d-c is 0, 1 or 2.
1088// | +-------- 1s
1089// +-------------- at most c 0s
1090// Motivating example:
1091// DAG combiner optimizes (add x (shl (srl y 5) 2))
1092// to (add x (and (srl y 3) 1FFFFFFC))
1093// which results in a constant-extended and(##...,lsr). This transformation
1094// undoes this simplification for cases where the shl can be folded into
1095// an addressing mode.
1096void HexagonDAGToDAGISel::ppAddrRewriteAndSrl(std::vector<SDNode*> &&Nodes) {
1097 SelectionDAG &DAG = *CurDAG;
1098
1099 for (SDNode *N : Nodes) {
1100 unsigned Opc = N->getOpcode();
1101 if (Opc != ISD::LOAD && Opc != ISD::STORE)
2
Assuming 'Opc' is equal to LOAD
1102 continue;
1103 SDValue Addr = Opc
2.1
'Opc' is equal to LOAD
2.1
'Opc' is equal to LOAD
== ISD::LOAD ? N->getOperand(1) : N->getOperand(2);
3
'?' condition is true
1104 // Addr must match: (add x T0)
1105 if (Addr.getOpcode() != ISD::ADD)
4
Assuming the condition is false
5
Taking false branch
1106 continue;
1107 SDValue T0 = Addr.getOperand(1);
1108 // T0 must match: (and T1 Mask)
1109 if (T0.getOpcode() != ISD::AND)
6
Assuming the condition is false
7
Taking false branch
1110 continue;
1111
1112 // We have an AND.
1113 //
1114 // Check the first operand. It must be: (srl y c).
1115 SDValue S = T0.getOperand(0);
1116 if (S.getOpcode() != ISD::SRL)
8
Assuming the condition is false
9
Taking false branch
1117 continue;
1118 ConstantSDNode *SN = dyn_cast<ConstantSDNode>(S.getOperand(1).getNode());
10
Assuming the object is a 'ConstantSDNode'
1119 if (SN == nullptr)
11
Taking false branch
1120 continue;
1121 if (SN->getAPIntValue().getBitWidth() != 32)
12
Assuming the condition is false
13
Taking false branch
1122 continue;
1123 uint32_t CV = SN->getZExtValue();
1124
1125 // Check the second operand: the supposed mask.
1126 ConstantSDNode *MN = dyn_cast<ConstantSDNode>(T0.getOperand(1).getNode());
14
Assuming the object is a 'ConstantSDNode'
1127 if (MN == nullptr)
15
Taking false branch
1128 continue;
1129 if (MN->getAPIntValue().getBitWidth() != 32)
16
Assuming the condition is false
17
Taking false branch
1130 continue;
1131 uint32_t Mask = MN->getZExtValue();
1132 // Examine the mask.
1133 uint32_t TZ = countTrailingZeros(Mask);
18
Calling 'countTrailingZeros<unsigned int>'
25
Returning from 'countTrailingZeros<unsigned int>'
26
'TZ' initialized to 32
1134 uint32_t M1 = countTrailingOnes(Mask >> TZ);
27
The result of the right shift is undefined due to shifting by '32', which is greater or equal to the width of type 'uint32_t'
1135 uint32_t LZ = countLeadingZeros(Mask);
1136 // Trailing zeros + middle ones + leading zeros must equal the width.
1137 if (TZ + M1 + LZ != 32)
1138 continue;
1139 // The number of trailing zeros will be encoded in the addressing mode.
1140 if (TZ > 2)
1141 continue;
1142 // The number of leading zeros must be at most c.
1143 if (LZ > CV)
1144 continue;
1145
1146 // All looks good.
1147 SDValue Y = S.getOperand(0);
1148 EVT VT = Addr.getValueType();
1149 SDLoc dl(S);
1150 // TZ = D-C, so D = TZ+C.
1151 SDValue D = DAG.getConstant(TZ+CV, dl, VT);
1152 SDValue DC = DAG.getConstant(TZ, dl, VT);
1153 SDValue NewSrl = DAG.getNode(ISD::SRL, dl, VT, Y, D);
1154 SDValue NewShl = DAG.getNode(ISD::SHL, dl, VT, NewSrl, DC);
1155 ReplaceNode(T0.getNode(), NewShl.getNode());
1156 }
1157}
1158
1159// Transform: (op ... (zext i1 c) ...) -> (select c (op ... 0 ...)
1160// (op ... 1 ...))
1161void HexagonDAGToDAGISel::ppHoistZextI1(std::vector<SDNode*> &&Nodes) {
1162 SelectionDAG &DAG = *CurDAG;
1163
1164 for (SDNode *N : Nodes) {
1165 unsigned Opc = N->getOpcode();
1166 if (Opc != ISD::ZERO_EXTEND)
1167 continue;
1168 SDValue OpI1 = N->getOperand(0);
1169 EVT OpVT = OpI1.getValueType();
1170 if (!OpVT.isSimple() || OpVT.getSimpleVT() != MVT::i1)
1171 continue;
1172 for (auto I = N->use_begin(), E = N->use_end(); I != E; ++I) {
1173 SDNode *U = *I;
1174 if (U->getNumValues() != 1)
1175 continue;
1176 EVT UVT = U->getValueType(0);
1177 if (!UVT.isSimple() || !UVT.isInteger() || UVT.getSimpleVT() == MVT::i1)
1178 continue;
1179 // Do not generate select for all i1 vector type.
1180 if (UVT.isVector() && UVT.getVectorElementType() == MVT::i1)
1181 continue;
1182 if (isMemOPCandidate(N, U))
1183 continue;
1184
1185 // Potentially simplifiable operation.
1186 unsigned I1N = I.getOperandNo();
1187 SmallVector<SDValue,2> Ops(U->getNumOperands());
1188 for (unsigned i = 0, n = U->getNumOperands(); i != n; ++i)
1189 Ops[i] = U->getOperand(i);
1190 EVT BVT = Ops[I1N].getValueType();
1191
1192 const SDLoc &dl(U);
1193 SDValue C0 = DAG.getConstant(0, dl, BVT);
1194 SDValue C1 = DAG.getConstant(1, dl, BVT);
1195 SDValue If0, If1;
1196
1197 if (isa<MachineSDNode>(U)) {
1198 unsigned UseOpc = U->getMachineOpcode();
1199 Ops[I1N] = C0;
1200 If0 = SDValue(DAG.getMachineNode(UseOpc, dl, UVT, Ops), 0);
1201 Ops[I1N] = C1;
1202 If1 = SDValue(DAG.getMachineNode(UseOpc, dl, UVT, Ops), 0);
1203 } else {
1204 unsigned UseOpc = U->getOpcode();
1205 Ops[I1N] = C0;
1206 If0 = DAG.getNode(UseOpc, dl, UVT, Ops);
1207 Ops[I1N] = C1;
1208 If1 = DAG.getNode(UseOpc, dl, UVT, Ops);
1209 }
1210 // We're generating a SELECT way after legalization, so keep the types
1211 // simple.
1212 unsigned UW = UVT.getSizeInBits();
1213 EVT SVT = (UW == 32 || UW == 64) ? MVT::getIntegerVT(UW) : UVT;
1214 SDValue Sel = DAG.getNode(ISD::SELECT, dl, SVT, OpI1,
1215 DAG.getBitcast(SVT, If1),
1216 DAG.getBitcast(SVT, If0));
1217 SDValue Ret = DAG.getBitcast(UVT, Sel);
1218 DAG.ReplaceAllUsesWith(U, Ret.getNode());
1219 }
1220 }
1221}
1222
1223void HexagonDAGToDAGISel::PreprocessISelDAG() {
1224 // Repack all nodes before calling each preprocessing function,
1225 // because each of them can modify the set of nodes.
1226 auto getNodes = [this] () -> std::vector<SDNode*> {
1227 std::vector<SDNode*> T;
1228 T.reserve(CurDAG->allnodes_size());
1229 for (SDNode &N : CurDAG->allnodes())
1230 T.push_back(&N);
1231 return T;
1232 };
1233
1234 // Transform: (or (select c x 0) z) -> (select c (or x z) z)
1235 // (or (select c 0 y) z) -> (select c z (or y z))
1236 ppSimplifyOrSelect0(getNodes());
1237
1238 // Transform: (store ch val (add x (add (shl y c) e)))
1239 // to: (store ch val (add x (shl (add y d) c))),
1240 // where e = (shl d c) for some integer d.
1241 // The purpose of this is to enable generation of loads/stores with
1242 // shifted addressing mode, i.e. mem(x+y<<#c). For that, the shift
1243 // value c must be 0, 1 or 2.
1244 ppAddrReorderAddShl(getNodes());
1245
1246 // Transform: (load ch (add x (and (srl y c) Mask)))
1247 // to: (load ch (add x (shl (srl y d) d-c)))
1248 // where
1249 // Mask = 00..0 111..1 0.0
1250 // | | +-- d-c 0s, and d-c is 0, 1 or 2.
1251 // | +-------- 1s
1252 // +-------------- at most c 0s
1253 // Motivating example:
1254 // DAG combiner optimizes (add x (shl (srl y 5) 2))
1255 // to (add x (and (srl y 3) 1FFFFFFC))
1256 // which results in a constant-extended and(##...,lsr). This transformation
1257 // undoes this simplification for cases where the shl can be folded into
1258 // an addressing mode.
1259 ppAddrRewriteAndSrl(getNodes());
1
Calling 'HexagonDAGToDAGISel::ppAddrRewriteAndSrl'
1260
1261 // Transform: (op ... (zext i1 c) ...) -> (select c (op ... 0 ...)
1262 // (op ... 1 ...))
1263 ppHoistZextI1(getNodes());
1264
1265 DEBUG_WITH_TYPE("isel", {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Preprocessed (Hexagon) selection DAG:"
; CurDAG->dump(); }; } } while (false)
1266 dbgs() << "Preprocessed (Hexagon) selection DAG:";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Preprocessed (Hexagon) selection DAG:"
; CurDAG->dump(); }; } } while (false)
1267 CurDAG->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Preprocessed (Hexagon) selection DAG:"
; CurDAG->dump(); }; } } while (false)
1268 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Preprocessed (Hexagon) selection DAG:"
; CurDAG->dump(); }; } } while (false)
;
1269
1270 if (EnableAddressRebalancing) {
1271 rebalanceAddressTrees();
1272
1273 DEBUG_WITH_TYPE("isel", {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Address tree balanced selection DAG:"
; CurDAG->dump(); }; } } while (false)
1274 dbgs() << "Address tree balanced selection DAG:";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Address tree balanced selection DAG:"
; CurDAG->dump(); }; } } while (false)
1275 CurDAG->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Address tree balanced selection DAG:"
; CurDAG->dump(); }; } } while (false)
1276 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Address tree balanced selection DAG:"
; CurDAG->dump(); }; } } while (false)
;
1277 }
1278}
1279
1280void HexagonDAGToDAGISel::emitFunctionEntryCode() {
1281 auto &HST = MF->getSubtarget<HexagonSubtarget>();
1282 auto &HFI = *HST.getFrameLowering();
1283 if (!HFI.needsAligna(*MF))
1284 return;
1285
1286 MachineFrameInfo &MFI = MF->getFrameInfo();
1287 MachineBasicBlock *EntryBB = &MF->front();
1288 unsigned AR = FuncInfo->CreateReg(MVT::i32);
1289 Align EntryMaxA = MFI.getMaxAlign();
1290 BuildMI(EntryBB, DebugLoc(), HII->get(Hexagon::PS_aligna), AR)
1291 .addImm(EntryMaxA.value());
1292 MF->getInfo<HexagonMachineFunctionInfo>()->setStackAlignBaseVReg(AR);
1293}
1294
1295void HexagonDAGToDAGISel::updateAligna() {
1296 auto &HFI = *MF->getSubtarget<HexagonSubtarget>().getFrameLowering();
1297 if (!HFI.needsAligna(*MF))
1298 return;
1299 auto *AlignaI = const_cast<MachineInstr*>(HFI.getAlignaInstr(*MF));
1300 assert(AlignaI != nullptr)(static_cast <bool> (AlignaI != nullptr) ? void (0) : __assert_fail
("AlignaI != nullptr", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1300, __extension__ __PRETTY_FUNCTION__))
;
1301 unsigned MaxA = MF->getFrameInfo().getMaxAlign().value();
1302 if (AlignaI->getOperand(1).getImm() < MaxA)
1303 AlignaI->getOperand(1).setImm(MaxA);
1304}
1305
1306// Match a frame index that can be used in an addressing mode.
1307bool HexagonDAGToDAGISel::SelectAddrFI(SDValue &N, SDValue &R) {
1308 if (N.getOpcode() != ISD::FrameIndex)
1309 return false;
1310 auto &HFI = *HST->getFrameLowering();
1311 MachineFrameInfo &MFI = MF->getFrameInfo();
1312 int FX = cast<FrameIndexSDNode>(N)->getIndex();
1313 if (!MFI.isFixedObjectIndex(FX) && HFI.needsAligna(*MF))
1314 return false;
1315 R = CurDAG->getTargetFrameIndex(FX, MVT::i32);
1316 return true;
1317}
1318
1319inline bool HexagonDAGToDAGISel::SelectAddrGA(SDValue &N, SDValue &R) {
1320 return SelectGlobalAddress(N, R, false, Align(1));
1321}
1322
1323inline bool HexagonDAGToDAGISel::SelectAddrGP(SDValue &N, SDValue &R) {
1324 return SelectGlobalAddress(N, R, true, Align(1));
1325}
1326
1327inline bool HexagonDAGToDAGISel::SelectAnyImm(SDValue &N, SDValue &R) {
1328 return SelectAnyImmediate(N, R, Align(1));
1329}
1330
1331inline bool HexagonDAGToDAGISel::SelectAnyImm0(SDValue &N, SDValue &R) {
1332 return SelectAnyImmediate(N, R, Align(1));
1333}
1334inline bool HexagonDAGToDAGISel::SelectAnyImm1(SDValue &N, SDValue &R) {
1335 return SelectAnyImmediate(N, R, Align(2));
1336}
1337inline bool HexagonDAGToDAGISel::SelectAnyImm2(SDValue &N, SDValue &R) {
1338 return SelectAnyImmediate(N, R, Align(4));
1339}
1340inline bool HexagonDAGToDAGISel::SelectAnyImm3(SDValue &N, SDValue &R) {
1341 return SelectAnyImmediate(N, R, Align(8));
1342}
1343
1344inline bool HexagonDAGToDAGISel::SelectAnyInt(SDValue &N, SDValue &R) {
1345 EVT T = N.getValueType();
1346 if (!T.isInteger() || T.getSizeInBits() != 32 || !isa<ConstantSDNode>(N))
1347 return false;
1348 R = N;
1349 return true;
1350}
1351
1352bool HexagonDAGToDAGISel::SelectAnyImmediate(SDValue &N, SDValue &R,
1353 Align Alignment) {
1354 switch (N.getOpcode()) {
1355 case ISD::Constant: {
1356 if (N.getValueType() != MVT::i32)
1357 return false;
1358 int32_t V = cast<const ConstantSDNode>(N)->getZExtValue();
1359 if (!isAligned(Alignment, V))
1360 return false;
1361 R = CurDAG->getTargetConstant(V, SDLoc(N), N.getValueType());
1362 return true;
1363 }
1364 case HexagonISD::JT:
1365 case HexagonISD::CP:
1366 // These are assumed to always be aligned at least 8-byte boundary.
1367 if (Alignment > Align(8))
1368 return false;
1369 R = N.getOperand(0);
1370 return true;
1371 case ISD::ExternalSymbol:
1372 // Symbols may be aligned at any boundary.
1373 if (Alignment > Align(1))
1374 return false;
1375 R = N;
1376 return true;
1377 case ISD::BlockAddress:
1378 // Block address is always aligned at least 4-byte boundary.
1379 if (Alignment > Align(4) ||
1380 !isAligned(Alignment, cast<BlockAddressSDNode>(N)->getOffset()))
1381 return false;
1382 R = N;
1383 return true;
1384 }
1385
1386 if (SelectGlobalAddress(N, R, false, Alignment) ||
1387 SelectGlobalAddress(N, R, true, Alignment))
1388 return true;
1389
1390 return false;
1391}
1392
1393bool HexagonDAGToDAGISel::SelectGlobalAddress(SDValue &N, SDValue &R,
1394 bool UseGP, Align Alignment) {
1395 switch (N.getOpcode()) {
1396 case ISD::ADD: {
1397 SDValue N0 = N.getOperand(0);
1398 SDValue N1 = N.getOperand(1);
1399 unsigned GAOpc = N0.getOpcode();
1400 if (UseGP && GAOpc != HexagonISD::CONST32_GP)
1401 return false;
1402 if (!UseGP && GAOpc != HexagonISD::CONST32)
1403 return false;
1404 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N1)) {
1405 if (!isAligned(Alignment, Const->getZExtValue()))
1406 return false;
1407 SDValue Addr = N0.getOperand(0);
1408 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Addr)) {
1409 if (GA->getOpcode() == ISD::TargetGlobalAddress) {
1410 uint64_t NewOff = GA->getOffset() + (uint64_t)Const->getSExtValue();
1411 R = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(Const),
1412 N.getValueType(), NewOff);
1413 return true;
1414 }
1415 }
1416 }
1417 break;
1418 }
1419 case HexagonISD::CP:
1420 case HexagonISD::JT:
1421 case HexagonISD::CONST32:
1422 // The operand(0) of CONST32 is TargetGlobalAddress, which is what we
1423 // want in the instruction.
1424 if (!UseGP)
1425 R = N.getOperand(0);
1426 return !UseGP;
1427 case HexagonISD::CONST32_GP:
1428 if (UseGP)
1429 R = N.getOperand(0);
1430 return UseGP;
1431 default:
1432 return false;
1433 }
1434
1435 return false;
1436}
1437
1438bool HexagonDAGToDAGISel::DetectUseSxtw(SDValue &N, SDValue &R) {
1439 // This (complex pattern) function is meant to detect a sign-extension
1440 // i32->i64 on a per-operand basis. This would allow writing single
1441 // patterns that would cover a number of combinations of different ways
1442 // a sign-extensions could be written. For example:
1443 // (mul (DetectUseSxtw x) (DetectUseSxtw y)) -> (M2_dpmpyss_s0 x y)
1444 // could match either one of these:
1445 // (mul (sext x) (sext_inreg y))
1446 // (mul (sext-load *p) (sext_inreg y))
1447 // (mul (sext_inreg x) (sext y))
1448 // etc.
1449 //
1450 // The returned value will have type i64 and its low word will
1451 // contain the value being extended. The high bits are not specified.
1452 // The returned type is i64 because the original type of N was i64,
1453 // but the users of this function should only use the low-word of the
1454 // result, e.g.
1455 // (mul sxtw:x, sxtw:y) -> (M2_dpmpyss_s0 (LoReg sxtw:x), (LoReg sxtw:y))
1456
1457 if (N.getValueType() != MVT::i64)
1458 return false;
1459 unsigned Opc = N.getOpcode();
1460 switch (Opc) {
1461 case ISD::SIGN_EXTEND:
1462 case ISD::SIGN_EXTEND_INREG: {
1463 // sext_inreg has the source type as a separate operand.
1464 EVT T = Opc == ISD::SIGN_EXTEND
1465 ? N.getOperand(0).getValueType()
1466 : cast<VTSDNode>(N.getOperand(1))->getVT();
1467 unsigned SW = T.getSizeInBits();
1468 if (SW == 32)
1469 R = N.getOperand(0);
1470 else if (SW < 32)
1471 R = N;
1472 else
1473 return false;
1474 break;
1475 }
1476 case ISD::LOAD: {
1477 LoadSDNode *L = cast<LoadSDNode>(N);
1478 if (L->getExtensionType() != ISD::SEXTLOAD)
1479 return false;
1480 // All extending loads extend to i32, so even if the value in
1481 // memory is shorter than 32 bits, it will be i32 after the load.
1482 if (L->getMemoryVT().getSizeInBits() > 32)
1483 return false;
1484 R = N;
1485 break;
1486 }
1487 case ISD::SRA: {
1488 auto *S = dyn_cast<ConstantSDNode>(N.getOperand(1));
1489 if (!S || S->getZExtValue() != 32)
1490 return false;
1491 R = N;
1492 break;
1493 }
1494 default:
1495 return false;
1496 }
1497 EVT RT = R.getValueType();
1498 if (RT == MVT::i64)
1499 return true;
1500 assert(RT == MVT::i32)(static_cast <bool> (RT == MVT::i32) ? void (0) : __assert_fail
("RT == MVT::i32", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1500, __extension__ __PRETTY_FUNCTION__))
;
1501 // This is only to produce a value of type i64. Do not rely on the
1502 // high bits produced by this.
1503 const SDLoc &dl(N);
1504 SDValue Ops[] = {
1505 CurDAG->getTargetConstant(Hexagon::DoubleRegsRegClassID, dl, MVT::i32),
1506 R, CurDAG->getTargetConstant(Hexagon::isub_hi, dl, MVT::i32),
1507 R, CurDAG->getTargetConstant(Hexagon::isub_lo, dl, MVT::i32)
1508 };
1509 SDNode *T = CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl,
1510 MVT::i64, Ops);
1511 R = SDValue(T, 0);
1512 return true;
1513}
1514
1515bool HexagonDAGToDAGISel::keepsLowBits(const SDValue &Val, unsigned NumBits,
1516 SDValue &Src) {
1517 unsigned Opc = Val.getOpcode();
1518 switch (Opc) {
1519 case ISD::SIGN_EXTEND:
1520 case ISD::ZERO_EXTEND:
1521 case ISD::ANY_EXTEND: {
1522 const SDValue &Op0 = Val.getOperand(0);
1523 EVT T = Op0.getValueType();
1524 if (T.isInteger() && T.getSizeInBits() == NumBits) {
1525 Src = Op0;
1526 return true;
1527 }
1528 break;
1529 }
1530 case ISD::SIGN_EXTEND_INREG:
1531 case ISD::AssertSext:
1532 case ISD::AssertZext:
1533 if (Val.getOperand(0).getValueType().isInteger()) {
1534 VTSDNode *T = cast<VTSDNode>(Val.getOperand(1));
1535 if (T->getVT().getSizeInBits() == NumBits) {
1536 Src = Val.getOperand(0);
1537 return true;
1538 }
1539 }
1540 break;
1541 case ISD::AND: {
1542 // Check if this is an AND with NumBits of lower bits set to 1.
1543 uint64_t Mask = (1 << NumBits) - 1;
1544 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(0))) {
1545 if (C->getZExtValue() == Mask) {
1546 Src = Val.getOperand(1);
1547 return true;
1548 }
1549 }
1550 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(1))) {
1551 if (C->getZExtValue() == Mask) {
1552 Src = Val.getOperand(0);
1553 return true;
1554 }
1555 }
1556 break;
1557 }
1558 case ISD::OR:
1559 case ISD::XOR: {
1560 // OR/XOR with the lower NumBits bits set to 0.
1561 uint64_t Mask = (1 << NumBits) - 1;
1562 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(0))) {
1563 if ((C->getZExtValue() & Mask) == 0) {
1564 Src = Val.getOperand(1);
1565 return true;
1566 }
1567 }
1568 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(1))) {
1569 if ((C->getZExtValue() & Mask) == 0) {
1570 Src = Val.getOperand(0);
1571 return true;
1572 }
1573 }
1574 break;
1575 }
1576 default:
1577 break;
1578 }
1579 return false;
1580}
1581
1582bool HexagonDAGToDAGISel::isAlignedMemNode(const MemSDNode *N) const {
1583 return N->getAlignment() >= N->getMemoryVT().getStoreSize();
1584}
1585
1586bool HexagonDAGToDAGISel::isSmallStackStore(const StoreSDNode *N) const {
1587 unsigned StackSize = MF->getFrameInfo().estimateStackSize(*MF);
1588 switch (N->getMemoryVT().getStoreSize()) {
1589 case 1:
1590 return StackSize <= 56; // 1*2^6 - 8
1591 case 2:
1592 return StackSize <= 120; // 2*2^6 - 8
1593 case 4:
1594 return StackSize <= 248; // 4*2^6 - 8
1595 default:
1596 return false;
1597 }
1598}
1599
1600// Return true when the given node fits in a positive half word.
1601bool HexagonDAGToDAGISel::isPositiveHalfWord(const SDNode *N) const {
1602 if (const ConstantSDNode *CN = dyn_cast<const ConstantSDNode>(N)) {
1603 int64_t V = CN->getSExtValue();
1604 return V > 0 && isInt<16>(V);
1605 }
1606 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG) {
1607 const VTSDNode *VN = dyn_cast<const VTSDNode>(N->getOperand(1));
1608 return VN->getVT().getSizeInBits() <= 16;
1609 }
1610 return false;
1611}
1612
1613bool HexagonDAGToDAGISel::hasOneUse(const SDNode *N) const {
1614 return !CheckSingleUse || N->hasOneUse();
1615}
1616
1617////////////////////////////////////////////////////////////////////////////////
1618// Rebalancing of address calculation trees
1619
1620static bool isOpcodeHandled(const SDNode *N) {
1621 switch (N->getOpcode()) {
1622 case ISD::ADD:
1623 case ISD::MUL:
1624 return true;
1625 case ISD::SHL:
1626 // We only handle constant shifts because these can be easily flattened
1627 // into multiplications by 2^Op1.
1628 return isa<ConstantSDNode>(N->getOperand(1).getNode());
1629 default:
1630 return false;
1631 }
1632}
1633
1634/// Return the weight of an SDNode
1635int HexagonDAGToDAGISel::getWeight(SDNode *N) {
1636 if (!isOpcodeHandled(N))
1637 return 1;
1638 assert(RootWeights.count(N) && "Cannot get weight of unseen root!")(static_cast <bool> (RootWeights.count(N) && "Cannot get weight of unseen root!"
) ? void (0) : __assert_fail ("RootWeights.count(N) && \"Cannot get weight of unseen root!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1638, __extension__
__PRETTY_FUNCTION__))
;
1639 assert(RootWeights[N] != -1 && "Cannot get weight of unvisited root!")(static_cast <bool> (RootWeights[N] != -1 && "Cannot get weight of unvisited root!"
) ? void (0) : __assert_fail ("RootWeights[N] != -1 && \"Cannot get weight of unvisited root!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1639, __extension__
__PRETTY_FUNCTION__))
;
1640 assert(RootWeights[N] != -2 && "Cannot get weight of RAWU'd root!")(static_cast <bool> (RootWeights[N] != -2 && "Cannot get weight of RAWU'd root!"
) ? void (0) : __assert_fail ("RootWeights[N] != -2 && \"Cannot get weight of RAWU'd root!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1640, __extension__
__PRETTY_FUNCTION__))
;
1641 return RootWeights[N];
1642}
1643
1644int HexagonDAGToDAGISel::getHeight(SDNode *N) {
1645 if (!isOpcodeHandled(N))
1646 return 0;
1647 assert(RootWeights.count(N) && RootWeights[N] >= 0 &&(static_cast <bool> (RootWeights.count(N) && RootWeights
[N] >= 0 && "Cannot query height of unvisited/RAUW'd node!"
) ? void (0) : __assert_fail ("RootWeights.count(N) && RootWeights[N] >= 0 && \"Cannot query height of unvisited/RAUW'd node!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1648, __extension__
__PRETTY_FUNCTION__))
1648 "Cannot query height of unvisited/RAUW'd node!")(static_cast <bool> (RootWeights.count(N) && RootWeights
[N] >= 0 && "Cannot query height of unvisited/RAUW'd node!"
) ? void (0) : __assert_fail ("RootWeights.count(N) && RootWeights[N] >= 0 && \"Cannot query height of unvisited/RAUW'd node!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1648, __extension__
__PRETTY_FUNCTION__))
;
1649 return RootHeights[N];
1650}
1651
1652namespace {
1653struct WeightedLeaf {
1654 SDValue Value;
1655 int Weight;
1656 int InsertionOrder;
1657
1658 WeightedLeaf() : Value(SDValue()) { }
1659
1660 WeightedLeaf(SDValue Value, int Weight, int InsertionOrder) :
1661 Value(Value), Weight(Weight), InsertionOrder(InsertionOrder) {
1662 assert(Weight >= 0 && "Weight must be >= 0")(static_cast <bool> (Weight >= 0 && "Weight must be >= 0"
) ? void (0) : __assert_fail ("Weight >= 0 && \"Weight must be >= 0\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1662, __extension__
__PRETTY_FUNCTION__))
;
1663 }
1664
1665 static bool Compare(const WeightedLeaf &A, const WeightedLeaf &B) {
1666 assert(A.Value.getNode() && B.Value.getNode())(static_cast <bool> (A.Value.getNode() && B.Value
.getNode()) ? void (0) : __assert_fail ("A.Value.getNode() && B.Value.getNode()"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1666, __extension__
__PRETTY_FUNCTION__))
;
1667 return A.Weight == B.Weight ?
1668 (A.InsertionOrder > B.InsertionOrder) :
1669 (A.Weight > B.Weight);
1670 }
1671};
1672
1673/// A specialized priority queue for WeigthedLeaves. It automatically folds
1674/// constants and allows removal of non-top elements while maintaining the
1675/// priority order.
1676class LeafPrioQueue {
1677 SmallVector<WeightedLeaf, 8> Q;
1678 bool HaveConst;
1679 WeightedLeaf ConstElt;
1680 unsigned Opcode;
1681
1682public:
1683 bool empty() {
1684 return (!HaveConst && Q.empty());
1685 }
1686
1687 size_t size() {
1688 return Q.size() + HaveConst;
1689 }
1690
1691 bool hasConst() {
1692 return HaveConst;
1693 }
1694
1695 const WeightedLeaf &top() {
1696 if (HaveConst)
1697 return ConstElt;
1698 return Q.front();
1699 }
1700
1701 WeightedLeaf pop() {
1702 if (HaveConst) {
1703 HaveConst = false;
1704 return ConstElt;
1705 }
1706 std::pop_heap(Q.begin(), Q.end(), WeightedLeaf::Compare);
1707 return Q.pop_back_val();
1708 }
1709
1710 void push(WeightedLeaf L, bool SeparateConst=true) {
1711 if (!HaveConst && SeparateConst && isa<ConstantSDNode>(L.Value)) {
1712 if (Opcode == ISD::MUL &&
1713 cast<ConstantSDNode>(L.Value)->getSExtValue() == 1)
1714 return;
1715 if (Opcode == ISD::ADD &&
1716 cast<ConstantSDNode>(L.Value)->getSExtValue() == 0)
1717 return;
1718
1719 HaveConst = true;
1720 ConstElt = L;
1721 } else {
1722 Q.push_back(L);
1723 std::push_heap(Q.begin(), Q.end(), WeightedLeaf::Compare);
1724 }
1725 }
1726
1727 /// Push L to the bottom of the queue regardless of its weight. If L is
1728 /// constant, it will not be folded with other constants in the queue.
1729 void pushToBottom(WeightedLeaf L) {
1730 L.Weight = 1000;
1731 push(L, false);
1732 }
1733
1734 /// Search for a SHL(x, [<=MaxAmount]) subtree in the queue, return the one of
1735 /// lowest weight and remove it from the queue.
1736 WeightedLeaf findSHL(uint64_t MaxAmount);
1737
1738 WeightedLeaf findMULbyConst();
1739
1740 LeafPrioQueue(unsigned Opcode) :
1741 HaveConst(false), Opcode(Opcode) { }
1742};
1743} // end anonymous namespace
1744
1745WeightedLeaf LeafPrioQueue::findSHL(uint64_t MaxAmount) {
1746 int ResultPos;
1747 WeightedLeaf Result;
1748
1749 for (int Pos = 0, End = Q.size(); Pos != End; ++Pos) {
1750 const WeightedLeaf &L = Q[Pos];
1751 const SDValue &Val = L.Value;
1752 if (Val.getOpcode() != ISD::SHL ||
1753 !isa<ConstantSDNode>(Val.getOperand(1)) ||
1754 Val.getConstantOperandVal(1) > MaxAmount)
1755 continue;
1756 if (!Result.Value.getNode() || Result.Weight > L.Weight ||
1757 (Result.Weight == L.Weight && Result.InsertionOrder > L.InsertionOrder))
1758 {
1759 Result = L;
1760 ResultPos = Pos;
1761 }
1762 }
1763
1764 if (Result.Value.getNode()) {
1765 Q.erase(&Q[ResultPos]);
1766 std::make_heap(Q.begin(), Q.end(), WeightedLeaf::Compare);
1767 }
1768
1769 return Result;
1770}
1771
1772WeightedLeaf LeafPrioQueue::findMULbyConst() {
1773 int ResultPos;
1774 WeightedLeaf Result;
1775
1776 for (int Pos = 0, End = Q.size(); Pos != End; ++Pos) {
1777 const WeightedLeaf &L = Q[Pos];
1778 const SDValue &Val = L.Value;
1779 if (Val.getOpcode() != ISD::MUL ||
1780 !isa<ConstantSDNode>(Val.getOperand(1)) ||
1781 Val.getConstantOperandVal(1) > 127)
1782 continue;
1783 if (!Result.Value.getNode() || Result.Weight > L.Weight ||
1784 (Result.Weight == L.Weight && Result.InsertionOrder > L.InsertionOrder))
1785 {
1786 Result = L;
1787 ResultPos = Pos;
1788 }
1789 }
1790
1791 if (Result.Value.getNode()) {
1792 Q.erase(&Q[ResultPos]);
1793 std::make_heap(Q.begin(), Q.end(), WeightedLeaf::Compare);
1794 }
1795
1796 return Result;
1797}
1798
1799SDValue HexagonDAGToDAGISel::getMultiplierForSHL(SDNode *N) {
1800 uint64_t MulFactor = 1ull << N->getConstantOperandVal(1);
1801 return CurDAG->getConstant(MulFactor, SDLoc(N),
1802 N->getOperand(1).getValueType());
1803}
1804
1805/// @returns the value x for which 2^x is a factor of Val
1806static unsigned getPowerOf2Factor(SDValue Val) {
1807 if (Val.getOpcode() == ISD::MUL) {
1808 unsigned MaxFactor = 0;
1809 for (int i = 0; i < 2; ++i) {
1810 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(i));
1811 if (!C)
1812 continue;
1813 const APInt &CInt = C->getAPIntValue();
1814 if (CInt.getBoolValue())
1815 MaxFactor = CInt.countTrailingZeros();
1816 }
1817 return MaxFactor;
1818 }
1819 if (Val.getOpcode() == ISD::SHL) {
1820 if (!isa<ConstantSDNode>(Val.getOperand(1).getNode()))
1821 return 0;
1822 return (unsigned) Val.getConstantOperandVal(1);
1823 }
1824
1825 return 0;
1826}
1827
1828/// @returns true if V>>Amount will eliminate V's operation on its child
1829static bool willShiftRightEliminate(SDValue V, unsigned Amount) {
1830 if (V.getOpcode() == ISD::MUL) {
1831 SDValue Ops[] = { V.getOperand(0), V.getOperand(1) };
1832 for (int i = 0; i < 2; ++i)
1833 if (isa<ConstantSDNode>(Ops[i].getNode()) &&
1834 V.getConstantOperandVal(i) % (1ULL << Amount) == 0) {
1835 uint64_t NewConst = V.getConstantOperandVal(i) >> Amount;
1836 return (NewConst == 1);
1837 }
1838 } else if (V.getOpcode() == ISD::SHL) {
1839 return (Amount == V.getConstantOperandVal(1));
1840 }
1841
1842 return false;
1843}
1844
1845SDValue HexagonDAGToDAGISel::factorOutPowerOf2(SDValue V, unsigned Power) {
1846 SDValue Ops[] = { V.getOperand(0), V.getOperand(1) };
1847 if (V.getOpcode() == ISD::MUL) {
1848 for (int i=0; i < 2; ++i) {
1849 if (isa<ConstantSDNode>(Ops[i].getNode()) &&
1850 V.getConstantOperandVal(i) % ((uint64_t)1 << Power) == 0) {
1851 uint64_t NewConst = V.getConstantOperandVal(i) >> Power;
1852 if (NewConst == 1)
1853 return Ops[!i];
1854 Ops[i] = CurDAG->getConstant(NewConst,
1855 SDLoc(V), V.getValueType());
1856 break;
1857 }
1858 }
1859 } else if (V.getOpcode() == ISD::SHL) {
1860 uint64_t ShiftAmount = V.getConstantOperandVal(1);
1861 if (ShiftAmount == Power)
1862 return Ops[0];
1863 Ops[1] = CurDAG->getConstant(ShiftAmount - Power,
1864 SDLoc(V), V.getValueType());
1865 }
1866
1867 return CurDAG->getNode(V.getOpcode(), SDLoc(V), V.getValueType(), Ops);
1868}
1869
1870static bool isTargetConstant(const SDValue &V) {
1871 return V.getOpcode() == HexagonISD::CONST32 ||
1872 V.getOpcode() == HexagonISD::CONST32_GP;
1873}
1874
1875unsigned HexagonDAGToDAGISel::getUsesInFunction(const Value *V) {
1876 if (GAUsesInFunction.count(V))
1877 return GAUsesInFunction[V];
1878
1879 unsigned Result = 0;
1880 const Function &CurF = CurDAG->getMachineFunction().getFunction();
1881 for (const User *U : V->users()) {
1882 if (isa<Instruction>(U) &&
1883 cast<Instruction>(U)->getParent()->getParent() == &CurF)
1884 ++Result;
1885 }
1886
1887 GAUsesInFunction[V] = Result;
1888
1889 return Result;
1890}
1891
1892/// Note - After calling this, N may be dead. It may have been replaced by a
1893/// new node, so always use the returned value in place of N.
1894///
1895/// @returns The SDValue taking the place of N (which could be N if it is
1896/// unchanged)
1897SDValue HexagonDAGToDAGISel::balanceSubTree(SDNode *N, bool TopLevel) {
1898 assert(RootWeights.count(N) && "Cannot balance non-root node.")(static_cast <bool> (RootWeights.count(N) && "Cannot balance non-root node."
) ? void (0) : __assert_fail ("RootWeights.count(N) && \"Cannot balance non-root node.\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1898, __extension__
__PRETTY_FUNCTION__))
;
1899 assert(RootWeights[N] != -2 && "This node was RAUW'd!")(static_cast <bool> (RootWeights[N] != -2 && "This node was RAUW'd!"
) ? void (0) : __assert_fail ("RootWeights[N] != -2 && \"This node was RAUW'd!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1899, __extension__
__PRETTY_FUNCTION__))
;
1900 assert(!TopLevel || N->getOpcode() == ISD::ADD)(static_cast <bool> (!TopLevel || N->getOpcode() == ISD
::ADD) ? void (0) : __assert_fail ("!TopLevel || N->getOpcode() == ISD::ADD"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 1900, __extension__
__PRETTY_FUNCTION__))
;
1901
1902 // Return early if this node was already visited
1903 if (RootWeights[N] != -1)
1904 return SDValue(N, 0);
1905
1906 assert(isOpcodeHandled(N))(static_cast <bool> (isOpcodeHandled(N)) ? void (0) : __assert_fail
("isOpcodeHandled(N)", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1906, __extension__ __PRETTY_FUNCTION__))
;
1907
1908 SDValue Op0 = N->getOperand(0);
1909 SDValue Op1 = N->getOperand(1);
1910
1911 // Return early if the operands will remain unchanged or are all roots
1912 if ((!isOpcodeHandled(Op0.getNode()) || RootWeights.count(Op0.getNode())) &&
1913 (!isOpcodeHandled(Op1.getNode()) || RootWeights.count(Op1.getNode()))) {
1914 SDNode *Op0N = Op0.getNode();
1915 int Weight;
1916 if (isOpcodeHandled(Op0N) && RootWeights[Op0N] == -1) {
1917 Weight = getWeight(balanceSubTree(Op0N).getNode());
1918 // Weight = calculateWeight(Op0N);
1919 } else
1920 Weight = getWeight(Op0N);
1921
1922 SDNode *Op1N = N->getOperand(1).getNode(); // Op1 may have been RAUWd
1923 if (isOpcodeHandled(Op1N) && RootWeights[Op1N] == -1) {
1924 Weight += getWeight(balanceSubTree(Op1N).getNode());
1925 // Weight += calculateWeight(Op1N);
1926 } else
1927 Weight += getWeight(Op1N);
1928
1929 RootWeights[N] = Weight;
1930 RootHeights[N] = std::max(getHeight(N->getOperand(0).getNode()),
1931 getHeight(N->getOperand(1).getNode())) + 1;
1932
1933 LLVM_DEBUG(dbgs() << "--> No need to balance root (Weight=" << Weightdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> No need to balance root (Weight="
<< Weight << " Height=" << RootHeights[N] <<
"): "; } } while (false)
1934 << " Height=" << RootHeights[N] << "): ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> No need to balance root (Weight="
<< Weight << " Height=" << RootHeights[N] <<
"): "; } } while (false)
;
1935 LLVM_DEBUG(N->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { N->dump(CurDAG); } } while (false)
;
1936
1937 return SDValue(N, 0);
1938 }
1939
1940 LLVM_DEBUG(dbgs() << "** Balancing root node: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "** Balancing root node: "
; } } while (false)
;
1941 LLVM_DEBUG(N->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { N->dump(CurDAG); } } while (false)
;
1942
1943 unsigned NOpcode = N->getOpcode();
1944
1945 LeafPrioQueue Leaves(NOpcode);
1946 SmallVector<SDValue, 4> Worklist;
1947 Worklist.push_back(SDValue(N, 0));
1948
1949 // SHL nodes will be converted to MUL nodes
1950 if (NOpcode == ISD::SHL)
1951 NOpcode = ISD::MUL;
1952
1953 bool CanFactorize = false;
1954 WeightedLeaf Mul1, Mul2;
1955 unsigned MaxPowerOf2 = 0;
1956 WeightedLeaf GA;
1957
1958 // Do not try to factor out a shift if there is already a shift at the tip of
1959 // the tree.
1960 bool HaveTopLevelShift = false;
1961 if (TopLevel &&
1962 ((isOpcodeHandled(Op0.getNode()) && Op0.getOpcode() == ISD::SHL &&
1963 Op0.getConstantOperandVal(1) < 4) ||
1964 (isOpcodeHandled(Op1.getNode()) && Op1.getOpcode() == ISD::SHL &&
1965 Op1.getConstantOperandVal(1) < 4)))
1966 HaveTopLevelShift = true;
1967
1968 // Flatten the subtree into an ordered list of leaves; at the same time
1969 // determine whether the tree is already balanced.
1970 int InsertionOrder = 0;
1971 SmallDenseMap<SDValue, int> NodeHeights;
1972 bool Imbalanced = false;
1973 int CurrentWeight = 0;
1974 while (!Worklist.empty()) {
1975 SDValue Child = Worklist.pop_back_val();
1976
1977 if (Child.getNode() != N && RootWeights.count(Child.getNode())) {
1978 // CASE 1: Child is a root note
1979
1980 int Weight = RootWeights[Child.getNode()];
1981 if (Weight == -1) {
1982 Child = balanceSubTree(Child.getNode());
1983 // calculateWeight(Child.getNode());
1984 Weight = getWeight(Child.getNode());
1985 } else if (Weight == -2) {
1986 // Whoops, this node was RAUWd by one of the balanceSubTree calls we
1987 // made. Our worklist isn't up to date anymore.
1988 // Restart the whole process.
1989 LLVM_DEBUG(dbgs() << "--> Subtree was RAUWd. Restarting...\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Subtree was RAUWd. Restarting...\n"
; } } while (false)
;
1990 return balanceSubTree(N, TopLevel);
1991 }
1992
1993 NodeHeights[Child] = 1;
1994 CurrentWeight += Weight;
1995
1996 unsigned PowerOf2;
1997 if (TopLevel && !CanFactorize && !HaveTopLevelShift &&
1998 (Child.getOpcode() == ISD::MUL || Child.getOpcode() == ISD::SHL) &&
1999 Child.hasOneUse() && (PowerOf2 = getPowerOf2Factor(Child))) {
2000 // Try to identify two factorizable MUL/SHL children greedily. Leave
2001 // them out of the priority queue for now so we can deal with them
2002 // after.
2003 if (!Mul1.Value.getNode()) {
2004 Mul1 = WeightedLeaf(Child, Weight, InsertionOrder++);
2005 MaxPowerOf2 = PowerOf2;
2006 } else {
2007 Mul2 = WeightedLeaf(Child, Weight, InsertionOrder++);
2008 MaxPowerOf2 = std::min(MaxPowerOf2, PowerOf2);
2009
2010 // Our addressing modes can only shift by a maximum of 3
2011 if (MaxPowerOf2 > 3)
2012 MaxPowerOf2 = 3;
2013
2014 CanFactorize = true;
2015 }
2016 } else
2017 Leaves.push(WeightedLeaf(Child, Weight, InsertionOrder++));
2018 } else if (!isOpcodeHandled(Child.getNode())) {
2019 // CASE 2: Child is an unhandled kind of node (e.g. constant)
2020 int Weight = getWeight(Child.getNode());
2021
2022 NodeHeights[Child] = getHeight(Child.getNode());
2023 CurrentWeight += Weight;
2024
2025 if (isTargetConstant(Child) && !GA.Value.getNode())
2026 GA = WeightedLeaf(Child, Weight, InsertionOrder++);
2027 else
2028 Leaves.push(WeightedLeaf(Child, Weight, InsertionOrder++));
2029 } else {
2030 // CASE 3: Child is a subtree of same opcode
2031 // Visit children first, then flatten.
2032 unsigned ChildOpcode = Child.getOpcode();
2033 assert(ChildOpcode == NOpcode ||(static_cast <bool> (ChildOpcode == NOpcode || (NOpcode
== ISD::MUL && ChildOpcode == ISD::SHL)) ? void (0) :
__assert_fail ("ChildOpcode == NOpcode || (NOpcode == ISD::MUL && ChildOpcode == ISD::SHL)"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 2034, __extension__
__PRETTY_FUNCTION__))
2034 (NOpcode == ISD::MUL && ChildOpcode == ISD::SHL))(static_cast <bool> (ChildOpcode == NOpcode || (NOpcode
== ISD::MUL && ChildOpcode == ISD::SHL)) ? void (0) :
__assert_fail ("ChildOpcode == NOpcode || (NOpcode == ISD::MUL && ChildOpcode == ISD::SHL)"
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 2034, __extension__
__PRETTY_FUNCTION__))
;
2035
2036 // Convert SHL to MUL
2037 SDValue Op1;
2038 if (ChildOpcode == ISD::SHL)
2039 Op1 = getMultiplierForSHL(Child.getNode());
2040 else
2041 Op1 = Child->getOperand(1);
2042
2043 if (!NodeHeights.count(Op1) || !NodeHeights.count(Child->getOperand(0))) {
2044 assert(!NodeHeights.count(Child) && "Parent visited before children?")(static_cast <bool> (!NodeHeights.count(Child) &&
"Parent visited before children?") ? void (0) : __assert_fail
("!NodeHeights.count(Child) && \"Parent visited before children?\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 2044, __extension__
__PRETTY_FUNCTION__))
;
2045 // Visit children first, then re-visit this node
2046 Worklist.push_back(Child);
2047 Worklist.push_back(Op1);
2048 Worklist.push_back(Child->getOperand(0));
2049 } else {
2050 // Back at this node after visiting the children
2051 if (std::abs(NodeHeights[Op1] - NodeHeights[Child->getOperand(0)]) > 1)
2052 Imbalanced = true;
2053
2054 NodeHeights[Child] = std::max(NodeHeights[Op1],
2055 NodeHeights[Child->getOperand(0)]) + 1;
2056 }
2057 }
2058 }
2059
2060 LLVM_DEBUG(dbgs() << "--> Current height=" << NodeHeights[SDValue(N, 0)]do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Current height=" <<
NodeHeights[SDValue(N, 0)] << " weight=" << CurrentWeight
<< " imbalanced=" << Imbalanced << "\n"; }
} while (false)
2061 << " weight=" << CurrentWeightdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Current height=" <<
NodeHeights[SDValue(N, 0)] << " weight=" << CurrentWeight
<< " imbalanced=" << Imbalanced << "\n"; }
} while (false)
2062 << " imbalanced=" << Imbalanced << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Current height=" <<
NodeHeights[SDValue(N, 0)] << " weight=" << CurrentWeight
<< " imbalanced=" << Imbalanced << "\n"; }
} while (false)
;
2063
2064 // Transform MUL(x, C * 2^Y) + SHL(z, Y) -> SHL(ADD(MUL(x, C), z), Y)
2065 // This factors out a shift in order to match memw(a<<Y+b).
2066 if (CanFactorize && (willShiftRightEliminate(Mul1.Value, MaxPowerOf2) ||
2067 willShiftRightEliminate(Mul2.Value, MaxPowerOf2))) {
2068 LLVM_DEBUG(dbgs() << "--> Found common factor for two MUL children!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Found common factor for two MUL children!\n"
; } } while (false)
;
2069 int Weight = Mul1.Weight + Mul2.Weight;
2070 int Height = std::max(NodeHeights[Mul1.Value], NodeHeights[Mul2.Value]) + 1;
2071 SDValue Mul1Factored = factorOutPowerOf2(Mul1.Value, MaxPowerOf2);
2072 SDValue Mul2Factored = factorOutPowerOf2(Mul2.Value, MaxPowerOf2);
2073 SDValue Sum = CurDAG->getNode(ISD::ADD, SDLoc(N), Mul1.Value.getValueType(),
2074 Mul1Factored, Mul2Factored);
2075 SDValue Const = CurDAG->getConstant(MaxPowerOf2, SDLoc(N),
2076 Mul1.Value.getValueType());
2077 SDValue New = CurDAG->getNode(ISD::SHL, SDLoc(N), Mul1.Value.getValueType(),
2078 Sum, Const);
2079 NodeHeights[New] = Height;
2080 Leaves.push(WeightedLeaf(New, Weight, Mul1.InsertionOrder));
2081 } else if (Mul1.Value.getNode()) {
2082 // We failed to factorize two MULs, so now the Muls are left outside the
2083 // queue... add them back.
2084 Leaves.push(Mul1);
2085 if (Mul2.Value.getNode())
2086 Leaves.push(Mul2);
2087 CanFactorize = false;
2088 }
2089
2090 // Combine GA + Constant -> GA+Offset, but only if GA is not used elsewhere
2091 // and the root node itself is not used more than twice. This reduces the
2092 // amount of additional constant extenders introduced by this optimization.
2093 bool CombinedGA = false;
2094 if (NOpcode == ISD::ADD && GA.Value.getNode() && Leaves.hasConst() &&
2095 GA.Value.hasOneUse() && N->use_size() < 3) {
2096 GlobalAddressSDNode *GANode =
2097 cast<GlobalAddressSDNode>(GA.Value.getOperand(0));
2098 ConstantSDNode *Offset = cast<ConstantSDNode>(Leaves.top().Value);
2099
2100 if (getUsesInFunction(GANode->getGlobal()) == 1 && Offset->hasOneUse() &&
2101 getTargetLowering()->isOffsetFoldingLegal(GANode)) {
2102 LLVM_DEBUG(dbgs() << "--> Combining GA and offset ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Combining GA and offset ("
<< Offset->getSExtValue() << "): "; } } while
(false)
2103 << Offset->getSExtValue() << "): ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Combining GA and offset ("
<< Offset->getSExtValue() << "): "; } } while
(false)
;
2104 LLVM_DEBUG(GANode->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { GANode->dump(CurDAG); } } while (false
)
;
2105
2106 SDValue NewTGA =
2107 CurDAG->getTargetGlobalAddress(GANode->getGlobal(), SDLoc(GA.Value),
2108 GANode->getValueType(0),
2109 GANode->getOffset() + (uint64_t)Offset->getSExtValue());
2110 GA.Value = CurDAG->getNode(GA.Value.getOpcode(), SDLoc(GA.Value),
2111 GA.Value.getValueType(), NewTGA);
2112 GA.Weight += Leaves.top().Weight;
2113
2114 NodeHeights[GA.Value] = getHeight(GA.Value.getNode());
2115 CombinedGA = true;
2116
2117 Leaves.pop(); // Remove the offset constant from the queue
2118 }
2119 }
2120
2121 if ((RebalanceOnlyForOptimizations && !CanFactorize && !CombinedGA) ||
2122 (RebalanceOnlyImbalancedTrees && !Imbalanced)) {
2123 RootWeights[N] = CurrentWeight;
2124 RootHeights[N] = NodeHeights[SDValue(N, 0)];
2125
2126 return SDValue(N, 0);
2127 }
2128
2129 // Combine GA + SHL(x, C<=31) so we will match Rx=add(#u8,asl(Rx,#U5))
2130 if (NOpcode == ISD::ADD && GA.Value.getNode()) {
2131 WeightedLeaf SHL = Leaves.findSHL(31);
2132 if (SHL.Value.getNode()) {
2133 int Height = std::max(NodeHeights[GA.Value], NodeHeights[SHL.Value]) + 1;
2134 GA.Value = CurDAG->getNode(ISD::ADD, SDLoc(GA.Value),
2135 GA.Value.getValueType(),
2136 GA.Value, SHL.Value);
2137 GA.Weight = SHL.Weight; // Specifically ignore the GA weight here
2138 NodeHeights[GA.Value] = Height;
2139 }
2140 }
2141
2142 if (GA.Value.getNode())
2143 Leaves.push(GA);
2144
2145 // If this is the top level and we haven't factored out a shift, we should try
2146 // to move a constant to the bottom to match addressing modes like memw(rX+C)
2147 if (TopLevel && !CanFactorize && Leaves.hasConst()) {
2148 LLVM_DEBUG(dbgs() << "--> Pushing constant to tip of tree.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Pushing constant to tip of tree."
; } } while (false)
;
2149 Leaves.pushToBottom(Leaves.pop());
2150 }
2151
2152 const DataLayout &DL = CurDAG->getDataLayout();
2153 const TargetLowering &TLI = *getTargetLowering();
2154
2155 // Rebuild the tree using Huffman's algorithm
2156 while (Leaves.size() > 1) {
2157 WeightedLeaf L0 = Leaves.pop();
2158
2159 // See whether we can grab a MUL to form an add(Rx,mpyi(Ry,#u6)),
2160 // otherwise just get the next leaf
2161 WeightedLeaf L1 = Leaves.findMULbyConst();
2162 if (!L1.Value.getNode())
2163 L1 = Leaves.pop();
2164
2165 assert(L0.Weight <= L1.Weight && "Priority queue is broken!")(static_cast <bool> (L0.Weight <= L1.Weight &&
"Priority queue is broken!") ? void (0) : __assert_fail ("L0.Weight <= L1.Weight && \"Priority queue is broken!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 2165, __extension__
__PRETTY_FUNCTION__))
;
2166
2167 SDValue V0 = L0.Value;
2168 int V0Weight = L0.Weight;
2169 SDValue V1 = L1.Value;
2170 int V1Weight = L1.Weight;
2171
2172 // Make sure that none of these nodes have been RAUW'd
2173 if ((RootWeights.count(V0.getNode()) && RootWeights[V0.getNode()] == -2) ||
2174 (RootWeights.count(V1.getNode()) && RootWeights[V1.getNode()] == -2)) {
2175 LLVM_DEBUG(dbgs() << "--> Subtree was RAUWd. Restarting...\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Subtree was RAUWd. Restarting...\n"
; } } while (false)
;
2176 return balanceSubTree(N, TopLevel);
2177 }
2178
2179 ConstantSDNode *V0C = dyn_cast<ConstantSDNode>(V0);
2180 ConstantSDNode *V1C = dyn_cast<ConstantSDNode>(V1);
2181 EVT VT = N->getValueType(0);
2182 SDValue NewNode;
2183
2184 if (V0C && !V1C) {
2185 std::swap(V0, V1);
2186 std::swap(V0C, V1C);
2187 }
2188
2189 // Calculate height of this node
2190 assert(NodeHeights.count(V0) && NodeHeights.count(V1) &&(static_cast <bool> (NodeHeights.count(V0) && NodeHeights
.count(V1) && "Children must have been visited before re-combining them!"
) ? void (0) : __assert_fail ("NodeHeights.count(V0) && NodeHeights.count(V1) && \"Children must have been visited before re-combining them!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 2191, __extension__
__PRETTY_FUNCTION__))
2191 "Children must have been visited before re-combining them!")(static_cast <bool> (NodeHeights.count(V0) && NodeHeights
.count(V1) && "Children must have been visited before re-combining them!"
) ? void (0) : __assert_fail ("NodeHeights.count(V0) && NodeHeights.count(V1) && \"Children must have been visited before re-combining them!\""
, "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp", 2191, __extension__
__PRETTY_FUNCTION__))
;
2192 int Height = std::max(NodeHeights[V0], NodeHeights[V1]) + 1;
2193
2194 // Rebuild this node (and restore SHL from MUL if needed)
2195 if (V1C && NOpcode == ISD::MUL && V1C->getAPIntValue().isPowerOf2())
2196 NewNode = CurDAG->getNode(
2197 ISD::SHL, SDLoc(V0), VT, V0,
2198 CurDAG->getConstant(
2199 V1C->getAPIntValue().logBase2(), SDLoc(N),
2200 TLI.getScalarShiftAmountTy(DL, V0.getValueType())));
2201 else
2202 NewNode = CurDAG->getNode(NOpcode, SDLoc(N), VT, V0, V1);
2203
2204 NodeHeights[NewNode] = Height;
2205
2206 int Weight = V0Weight + V1Weight;
2207 Leaves.push(WeightedLeaf(NewNode, Weight, L0.InsertionOrder));
2208
2209 LLVM_DEBUG(dbgs() << "--> Built new node (Weight=" << Weightdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Built new node (Weight="
<< Weight << ",Height=" << Height <<
"):\n"; } } while (false)
2210 << ",Height=" << Height << "):\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Built new node (Weight="
<< Weight << ",Height=" << Height <<
"):\n"; } } while (false)
;
2211 LLVM_DEBUG(NewNode.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { NewNode.dump(); } } while (false)
;
2212 }
2213
2214 assert(Leaves.size() == 1)(static_cast <bool> (Leaves.size() == 1) ? void (0) : __assert_fail
("Leaves.size() == 1", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 2214, __extension__ __PRETTY_FUNCTION__))
;
2215 SDValue NewRoot = Leaves.top().Value;
2216
2217 assert(NodeHeights.count(NewRoot))(static_cast <bool> (NodeHeights.count(NewRoot)) ? void
(0) : __assert_fail ("NodeHeights.count(NewRoot)", "llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 2217, __extension__ __PRETTY_FUNCTION__))
;
2218 int Height = NodeHeights[NewRoot];
2219
2220 // Restore SHL if we earlier converted it to a MUL
2221 if (NewRoot.getOpcode() == ISD::MUL) {
2222 ConstantSDNode *V1C = dyn_cast<ConstantSDNode>(NewRoot.getOperand(1));
2223 if (V1C && V1C->getAPIntValue().isPowerOf2()) {
2224 EVT VT = NewRoot.getValueType();
2225 SDValue V0 = NewRoot.getOperand(0);
2226 NewRoot = CurDAG->getNode(
2227 ISD::SHL, SDLoc(NewRoot), VT, V0,
2228 CurDAG->getConstant(
2229 V1C->getAPIntValue().logBase2(), SDLoc(NewRoot),
2230 TLI.getScalarShiftAmountTy(DL, V0.getValueType())));
2231 }
2232 }
2233
2234 if (N != NewRoot.getNode()) {
2235 LLVM_DEBUG(dbgs() << "--> Root is now: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Root is now: "; }
} while (false)
;
2236 LLVM_DEBUG(NewRoot.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { NewRoot.dump(); } } while (false)
;
2237
2238 // Replace all uses of old root by new root
2239 CurDAG->ReplaceAllUsesWith(N, NewRoot.getNode());
2240 // Mark that we have RAUW'd N
2241 RootWeights[N] = -2;
2242 } else {
2243 LLVM_DEBUG(dbgs() << "--> Root unchanged.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Root unchanged.\n"
; } } while (false)
;
2244 }
2245
2246 RootWeights[NewRoot.getNode()] = Leaves.top().Weight;
2247 RootHeights[NewRoot.getNode()] = Height;
2248
2249 return NewRoot;
2250}
2251
2252void HexagonDAGToDAGISel::rebalanceAddressTrees() {
2253 for (SDNode &Node : llvm::make_early_inc_range(CurDAG->allnodes())) {
2254 SDNode *N = &Node;
2255 if (N->getOpcode() != ISD::LOAD && N->getOpcode() != ISD::STORE)
2256 continue;
2257
2258 SDValue BasePtr = cast<MemSDNode>(N)->getBasePtr();
2259 if (BasePtr.getOpcode() != ISD::ADD)
2260 continue;
2261
2262 // We've already processed this node
2263 if (RootWeights.count(BasePtr.getNode()))
2264 continue;
2265
2266 LLVM_DEBUG(dbgs() << "** Rebalancing address calculation in node: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "** Rebalancing address calculation in node: "
; } } while (false)
;
2267 LLVM_DEBUG(N->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { N->dump(CurDAG); } } while (false)
;
2268
2269 // FindRoots
2270 SmallVector<SDNode *, 4> Worklist;
2271
2272 Worklist.push_back(BasePtr.getOperand(0).getNode());
2273 Worklist.push_back(BasePtr.getOperand(1).getNode());
2274
2275 while (!Worklist.empty()) {
2276 SDNode *N = Worklist.pop_back_val();
2277 unsigned Opcode = N->getOpcode();
2278
2279 if (!isOpcodeHandled(N))
2280 continue;
2281
2282 Worklist.push_back(N->getOperand(0).getNode());
2283 Worklist.push_back(N->getOperand(1).getNode());
2284
2285 // Not a root if it has only one use and same opcode as its parent
2286 if (N->hasOneUse() && Opcode == N->use_begin()->getOpcode())
2287 continue;
2288
2289 // This root node has already been processed
2290 if (RootWeights.count(N))
2291 continue;
2292
2293 RootWeights[N] = -1;
2294 }
2295
2296 // Balance node itself
2297 RootWeights[BasePtr.getNode()] = -1;
2298 SDValue NewBasePtr = balanceSubTree(BasePtr.getNode(), /*TopLevel=*/ true);
2299
2300 if (N->getOpcode() == ISD::LOAD)
2301 N = CurDAG->UpdateNodeOperands(N, N->getOperand(0),
2302 NewBasePtr, N->getOperand(2));
2303 else
2304 N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), N->getOperand(1),
2305 NewBasePtr, N->getOperand(3));
2306
2307 LLVM_DEBUG(dbgs() << "--> Final node: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Final node: "; } }
while (false)
;
2308 LLVM_DEBUG(N->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { N->dump(CurDAG); } } while (false)
;
2309 }
2310
2311 CurDAG->RemoveDeadNodes();
2312 GAUsesInFunction.clear();
2313 RootHeights.clear();
2314 RootWeights.clear();
2315}

/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include/llvm/Support/MathExtras.h

1//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains some functions that are useful for math stuff.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_SUPPORT_MATHEXTRAS_H
14#define LLVM_SUPPORT_MATHEXTRAS_H
15
16#include "llvm/Support/Compiler.h"
17#include <cassert>
18#include <climits>
19#include <cmath>
20#include <cstdint>
21#include <cstring>
22#include <limits>
23#include <type_traits>
24
25#ifdef __ANDROID_NDK__
26#include <android/api-level.h>
27#endif
28
29#ifdef _MSC_VER
30// Declare these intrinsics manually rather including intrin.h. It's very
31// expensive, and MathExtras.h is popular.
32// #include <intrin.h>
33extern "C" {
34unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
35unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
36unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
37unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
38}
39#endif
40
41namespace llvm {
42
43/// The behavior an operation has on an input of 0.
44enum ZeroBehavior {
45 /// The returned value is undefined.
46 ZB_Undefined,
47 /// The returned value is numeric_limits<T>::max()
48 ZB_Max,
49 /// The returned value is numeric_limits<T>::digits
50 ZB_Width
51};
52
53/// Mathematical constants.
54namespace numbers {
55// TODO: Track C++20 std::numbers.
56// TODO: Favor using the hexadecimal FP constants (requires C++17).
57constexpr double e = 2.7182818284590452354, // (0x1.5bf0a8b145749P+1) https://oeis.org/A001113
58 egamma = .57721566490153286061, // (0x1.2788cfc6fb619P-1) https://oeis.org/A001620
59 ln2 = .69314718055994530942, // (0x1.62e42fefa39efP-1) https://oeis.org/A002162
60 ln10 = 2.3025850929940456840, // (0x1.24bb1bbb55516P+1) https://oeis.org/A002392
61 log2e = 1.4426950408889634074, // (0x1.71547652b82feP+0)
62 log10e = .43429448190325182765, // (0x1.bcb7b1526e50eP-2)
63 pi = 3.1415926535897932385, // (0x1.921fb54442d18P+1) https://oeis.org/A000796
64 inv_pi = .31830988618379067154, // (0x1.45f306bc9c883P-2) https://oeis.org/A049541
65 sqrtpi = 1.7724538509055160273, // (0x1.c5bf891b4ef6bP+0) https://oeis.org/A002161
66 inv_sqrtpi = .56418958354775628695, // (0x1.20dd750429b6dP-1) https://oeis.org/A087197
67 sqrt2 = 1.4142135623730950488, // (0x1.6a09e667f3bcdP+0) https://oeis.org/A00219
68 inv_sqrt2 = .70710678118654752440, // (0x1.6a09e667f3bcdP-1)
69 sqrt3 = 1.7320508075688772935, // (0x1.bb67ae8584caaP+0) https://oeis.org/A002194
70 inv_sqrt3 = .57735026918962576451, // (0x1.279a74590331cP-1)
71 phi = 1.6180339887498948482; // (0x1.9e3779b97f4a8P+0) https://oeis.org/A001622
72constexpr float ef = 2.71828183F, // (0x1.5bf0a8P+1) https://oeis.org/A001113
73 egammaf = .577215665F, // (0x1.2788d0P-1) https://oeis.org/A001620
74 ln2f = .693147181F, // (0x1.62e430P-1) https://oeis.org/A002162
75 ln10f = 2.30258509F, // (0x1.26bb1cP+1) https://oeis.org/A002392
76 log2ef = 1.44269504F, // (0x1.715476P+0)
77 log10ef = .434294482F, // (0x1.bcb7b2P-2)
78 pif = 3.14159265F, // (0x1.921fb6P+1) https://oeis.org/A000796
79 inv_pif = .318309886F, // (0x1.45f306P-2) https://oeis.org/A049541
80 sqrtpif = 1.77245385F, // (0x1.c5bf8aP+0) https://oeis.org/A002161
81 inv_sqrtpif = .564189584F, // (0x1.20dd76P-1) https://oeis.org/A087197
82 sqrt2f = 1.41421356F, // (0x1.6a09e6P+0) https://oeis.org/A002193
83 inv_sqrt2f = .707106781F, // (0x1.6a09e6P-1)
84 sqrt3f = 1.73205081F, // (0x1.bb67aeP+0) https://oeis.org/A002194
85 inv_sqrt3f = .577350269F, // (0x1.279a74P-1)
86 phif = 1.61803399F; // (0x1.9e377aP+0) https://oeis.org/A001622
87} // namespace numbers
88
89namespace detail {
90template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter {
91 static unsigned count(T Val, ZeroBehavior) {
92 if (!Val)
93 return std::numeric_limits<T>::digits;
94 if (Val & 0x1)
95 return 0;
96
97 // Bisection method.
98 unsigned ZeroBits = 0;
99 T Shift = std::numeric_limits<T>::digits >> 1;
100 T Mask = std::numeric_limits<T>::max() >> Shift;
101 while (Shift) {
102 if ((Val & Mask) == 0) {
103 Val >>= Shift;
104 ZeroBits |= Shift;
105 }
106 Shift >>= 1;
107 Mask >>= Shift;
108 }
109 return ZeroBits;
110 }
111};
112
113#if defined(__GNUC__4) || defined(_MSC_VER)
114template <typename T> struct TrailingZerosCounter<T, 4> {
115 static unsigned count(T Val, ZeroBehavior ZB) {
116 if (ZB
19.1
'ZB' is not equal to ZB_Undefined
19.1
'ZB' is not equal to ZB_Undefined
!= ZB_Undefined && Val == 0)
20
Assuming 'Val' is equal to 0
21
Taking true branch
117 return 32;
22
Returning the value 32
118
119#if __has_builtin(__builtin_ctz)1 || defined(__GNUC__4)
120 return __builtin_ctz(Val);
121#elif defined(_MSC_VER)
122 unsigned long Index;
123 _BitScanForward(&Index, Val);
124 return Index;
125#endif
126 }
127};
128
129#if !defined(_MSC_VER) || defined(_M_X64)
130template <typename T> struct TrailingZerosCounter<T, 8> {
131 static unsigned count(T Val, ZeroBehavior ZB) {
132 if (ZB != ZB_Undefined && Val == 0)
133 return 64;
134
135#if __has_builtin(__builtin_ctzll)1 || defined(__GNUC__4)
136 return __builtin_ctzll(Val);
137#elif defined(_MSC_VER)
138 unsigned long Index;
139 _BitScanForward64(&Index, Val);
140 return Index;
141#endif
142 }
143};
144#endif
145#endif
146} // namespace detail
147
148/// Count number of 0's from the least significant bit to the most
149/// stopping at the first 1.
150///
151/// Only unsigned integral types are allowed.
152///
153/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
154/// valid arguments.
155template <typename T>
156unsigned countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
157 static_assert(std::numeric_limits<T>::is_integer &&
158 !std::numeric_limits<T>::is_signed,
159 "Only unsigned integral types are allowed.");
160 return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB);
19
Calling 'TrailingZerosCounter::count'
23
Returning from 'TrailingZerosCounter::count'
24
Returning the value 32
161}
162
163namespace detail {
164template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter {
165 static unsigned count(T Val, ZeroBehavior) {
166 if (!Val)
167 return std::numeric_limits<T>::digits;
168
169 // Bisection method.
170 unsigned ZeroBits = 0;
171 for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
172 T Tmp = Val >> Shift;
173 if (Tmp)
174 Val = Tmp;
175 else
176 ZeroBits |= Shift;
177 }
178 return ZeroBits;
179 }
180};
181
182#if defined(__GNUC__4) || defined(_MSC_VER)
183template <typename T> struct LeadingZerosCounter<T, 4> {
184 static unsigned count(T Val, ZeroBehavior ZB) {
185 if (ZB != ZB_Undefined && Val == 0)
186 return 32;
187
188#if __has_builtin(__builtin_clz)1 || defined(__GNUC__4)
189 return __builtin_clz(Val);
190#elif defined(_MSC_VER)
191 unsigned long Index;
192 _BitScanReverse(&Index, Val);
193 return Index ^ 31;
194#endif
195 }
196};
197
198#if !defined(_MSC_VER) || defined(_M_X64)
199template <typename T> struct LeadingZerosCounter<T, 8> {
200 static unsigned count(T Val, ZeroBehavior ZB) {
201 if (ZB != ZB_Undefined && Val == 0)
202 return 64;
203
204#if __has_builtin(__builtin_clzll)1 || defined(__GNUC__4)
205 return __builtin_clzll(Val);
206#elif defined(_MSC_VER)
207 unsigned long Index;
208 _BitScanReverse64(&Index, Val);
209 return Index ^ 63;
210#endif
211 }
212};
213#endif
214#endif
215} // namespace detail
216
217/// Count number of 0's from the most significant bit to the least
218/// stopping at the first 1.
219///
220/// Only unsigned integral types are allowed.
221///
222/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
223/// valid arguments.
224template <typename T>
225unsigned countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
226 static_assert(std::numeric_limits<T>::is_integer &&
227 !std::numeric_limits<T>::is_signed,
228 "Only unsigned integral types are allowed.");
229 return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB);
230}
231
232/// Get the index of the first set bit starting from the least
233/// significant bit.
234///
235/// Only unsigned integral types are allowed.
236///
237/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
238/// valid arguments.
239template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
240 if (ZB == ZB_Max && Val == 0)
241 return std::numeric_limits<T>::max();
242
243 return countTrailingZeros(Val, ZB_Undefined);
244}
245
246/// Create a bitmask with the N right-most bits set to 1, and all other
247/// bits set to 0. Only unsigned types are allowed.
248template <typename T> T maskTrailingOnes(unsigned N) {
249 static_assert(std::is_unsigned<T>::value, "Invalid type!");
250 const unsigned Bits = CHAR_BIT8 * sizeof(T);
251 assert(N <= Bits && "Invalid bit index")(static_cast <bool> (N <= Bits && "Invalid bit index"
) ? void (0) : __assert_fail ("N <= Bits && \"Invalid bit index\""
, "llvm/include/llvm/Support/MathExtras.h", 251, __extension__
__PRETTY_FUNCTION__))
;
252 return N == 0 ? 0 : (T(-1) >> (Bits - N));
253}
254
255/// Create a bitmask with the N left-most bits set to 1, and all other
256/// bits set to 0. Only unsigned types are allowed.
257template <typename T> T maskLeadingOnes(unsigned N) {
258 return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
259}
260
261/// Create a bitmask with the N right-most bits set to 0, and all other
262/// bits set to 1. Only unsigned types are allowed.
263template <typename T> T maskTrailingZeros(unsigned N) {
264 return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
265}
266
267/// Create a bitmask with the N left-most bits set to 0, and all other
268/// bits set to 1. Only unsigned types are allowed.
269template <typename T> T maskLeadingZeros(unsigned N) {
270 return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
271}
272
273/// Get the index of the last set bit starting from the least
274/// significant bit.
275///
276/// Only unsigned integral types are allowed.
277///
278/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
279/// valid arguments.
280template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
281 if (ZB == ZB_Max && Val == 0)
282 return std::numeric_limits<T>::max();
283
284 // Use ^ instead of - because both gcc and llvm can remove the associated ^
285 // in the __builtin_clz intrinsic on x86.
286 return countLeadingZeros(Val, ZB_Undefined) ^
287 (std::numeric_limits<T>::digits - 1);
288}
289
290/// Macro compressed bit reversal table for 256 bits.
291///
292/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
293static const unsigned char BitReverseTable256[256] = {
294#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
295#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
296#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
297 R6(0), R6(2), R6(1), R6(3)
298#undef R2
299#undef R4
300#undef R6
301};
302
303/// Reverse the bits in \p Val.
304template <typename T>
305T reverseBits(T Val) {
306 unsigned char in[sizeof(Val)];
307 unsigned char out[sizeof(Val)];
308 std::memcpy(in, &Val, sizeof(Val));
309 for (unsigned i = 0; i < sizeof(Val); ++i)
310 out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
311 std::memcpy(&Val, out, sizeof(Val));
312 return Val;
313}
314
315#if __has_builtin(__builtin_bitreverse8)1
316template<>
317inline uint8_t reverseBits<uint8_t>(uint8_t Val) {
318 return __builtin_bitreverse8(Val);
319}
320#endif
321
322#if __has_builtin(__builtin_bitreverse16)1
323template<>
324inline uint16_t reverseBits<uint16_t>(uint16_t Val) {
325 return __builtin_bitreverse16(Val);
326}
327#endif
328
329#if __has_builtin(__builtin_bitreverse32)1
330template<>
331inline uint32_t reverseBits<uint32_t>(uint32_t Val) {
332 return __builtin_bitreverse32(Val);
333}
334#endif
335
336#if __has_builtin(__builtin_bitreverse64)1
337template<>
338inline uint64_t reverseBits<uint64_t>(uint64_t Val) {
339 return __builtin_bitreverse64(Val);
340}
341#endif
342
343// NOTE: The following support functions use the _32/_64 extensions instead of
344// type overloading so that signed and unsigned integers can be used without
345// ambiguity.
346
347/// Return the high 32 bits of a 64 bit value.
348constexpr inline uint32_t Hi_32(uint64_t Value) {
349 return static_cast<uint32_t>(Value >> 32);
350}
351
352/// Return the low 32 bits of a 64 bit value.
353constexpr inline uint32_t Lo_32(uint64_t Value) {
354 return static_cast<uint32_t>(Value);
355}
356
357/// Make a 64-bit integer from a high / low pair of 32-bit integers.
358constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
359 return ((uint64_t)High << 32) | (uint64_t)Low;
360}
361
362/// Checks if an integer fits into the given bit width.
363template <unsigned N> constexpr inline bool isInt(int64_t x) {
364 return N >= 64 || (-(INT64_C(1)1L<<(N-1)) <= x && x < (INT64_C(1)1L<<(N-1)));
365}
366// Template specializations to get better code for common cases.
367template <> constexpr inline bool isInt<8>(int64_t x) {
368 return static_cast<int8_t>(x) == x;
369}
370template <> constexpr inline bool isInt<16>(int64_t x) {
371 return static_cast<int16_t>(x) == x;
372}
373template <> constexpr inline bool isInt<32>(int64_t x) {
374 return static_cast<int32_t>(x) == x;
375}
376
377/// Checks if a signed integer is an N bit number shifted left by S.
378template <unsigned N, unsigned S>
379constexpr inline bool isShiftedInt(int64_t x) {
380 static_assert(
381 N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
382 static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
383 return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
384}
385
386/// Checks if an unsigned integer fits into the given bit width.
387///
388/// This is written as two functions rather than as simply
389///
390/// return N >= 64 || X < (UINT64_C(1) << N);
391///
392/// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting
393/// left too many places.
394template <unsigned N>
395constexpr inline std::enable_if_t<(N < 64), bool> isUInt(uint64_t X) {
396 static_assert(N > 0, "isUInt<0> doesn't make sense");
397 return X < (UINT64_C(1)1UL << (N));
398}
399template <unsigned N>
400constexpr inline std::enable_if_t<N >= 64, bool> isUInt(uint64_t) {
401 return true;
402}
403
404// Template specializations to get better code for common cases.
405template <> constexpr inline bool isUInt<8>(uint64_t x) {
406 return static_cast<uint8_t>(x) == x;
407}
408template <> constexpr inline bool isUInt<16>(uint64_t x) {
409 return static_cast<uint16_t>(x) == x;
410}
411template <> constexpr inline bool isUInt<32>(uint64_t x) {
412 return static_cast<uint32_t>(x) == x;
413}
414
415/// Checks if a unsigned integer is an N bit number shifted left by S.
416template <unsigned N, unsigned S>
417constexpr inline bool isShiftedUInt(uint64_t x) {
418 static_assert(
419 N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
420 static_assert(N + S <= 64,
421 "isShiftedUInt<N, S> with N + S > 64 is too wide.");
422 // Per the two static_asserts above, S must be strictly less than 64. So
423 // 1 << S is not undefined behavior.
424 return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
425}
426
427/// Gets the maximum value for a N-bit unsigned integer.
428inline uint64_t maxUIntN(uint64_t N) {
429 assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 &&
"integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "llvm/include/llvm/Support/MathExtras.h", 429, __extension__
__PRETTY_FUNCTION__))
;
430
431 // uint64_t(1) << 64 is undefined behavior, so we can't do
432 // (uint64_t(1) << N) - 1
433 // without checking first that N != 64. But this works and doesn't have a
434 // branch.
435 return UINT64_MAX(18446744073709551615UL) >> (64 - N);
436}
437
438/// Gets the minimum value for a N-bit signed integer.
439inline int64_t minIntN(int64_t N) {
440 assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 &&
"integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "llvm/include/llvm/Support/MathExtras.h", 440, __extension__
__PRETTY_FUNCTION__))
;
441
442 return UINT64_C(1)1UL + ~(UINT64_C(1)1UL << (N - 1));
443}
444
445/// Gets the maximum value for a N-bit signed integer.
446inline int64_t maxIntN(int64_t N) {
447 assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 &&
"integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "llvm/include/llvm/Support/MathExtras.h", 447, __extension__
__PRETTY_FUNCTION__))
;
448
449 // This relies on two's complement wraparound when N == 64, so we convert to
450 // int64_t only at the very end to avoid UB.
451 return (UINT64_C(1)1UL << (N - 1)) - 1;
452}
453
454/// Checks if an unsigned integer fits into the given (dynamic) bit width.
455inline bool isUIntN(unsigned N, uint64_t x) {
456 return N >= 64 || x <= maxUIntN(N);
457}
458
459/// Checks if an signed integer fits into the given (dynamic) bit width.
460inline bool isIntN(unsigned N, int64_t x) {
461 return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
462}
463
464/// Return true if the argument is a non-empty sequence of ones starting at the
465/// least significant bit with the remainder zero (32 bit version).
466/// Ex. isMask_32(0x0000FFFFU) == true.
467constexpr inline bool isMask_32(uint32_t Value) {
468 return Value && ((Value + 1) & Value) == 0;
469}
470
471/// Return true if the argument is a non-empty sequence of ones starting at the
472/// least significant bit with the remainder zero (64 bit version).
473constexpr inline bool isMask_64(uint64_t Value) {
474 return Value && ((Value + 1) & Value) == 0;
475}
476
477/// Return true if the argument contains a non-empty sequence of ones with the
478/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
479constexpr inline bool isShiftedMask_32(uint32_t Value) {
480 return Value && isMask_32((Value - 1) | Value);
481}
482
483/// Return true if the argument contains a non-empty sequence of ones with the
484/// remainder zero (64 bit version.)
485constexpr inline bool isShiftedMask_64(uint64_t Value) {
486 return Value && isMask_64((Value - 1) | Value);
487}
488
489/// Return true if the argument is a power of two > 0.
490/// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
491constexpr inline bool isPowerOf2_32(uint32_t Value) {
492 return Value && !(Value & (Value - 1));
493}
494
495/// Return true if the argument is a power of two > 0 (64 bit edition.)
496constexpr inline bool isPowerOf2_64(uint64_t Value) {
497 return Value && !(Value & (Value - 1));
498}
499
500/// Count the number of ones from the most significant bit to the first
501/// zero bit.
502///
503/// Ex. countLeadingOnes(0xFF0FFF00) == 8.
504/// Only unsigned integral types are allowed.
505///
506/// \param ZB the behavior on an input of all ones. Only ZB_Width and
507/// ZB_Undefined are valid arguments.
508template <typename T>
509unsigned countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
510 static_assert(std::numeric_limits<T>::is_integer &&
511 !std::numeric_limits<T>::is_signed,
512 "Only unsigned integral types are allowed.");
513 return countLeadingZeros<T>(~Value, ZB);
514}
515
516/// Count the number of ones from the least significant bit to the first
517/// zero bit.
518///
519/// Ex. countTrailingOnes(0x00FF00FF) == 8.
520/// Only unsigned integral types are allowed.
521///
522/// \param ZB the behavior on an input of all ones. Only ZB_Width and
523/// ZB_Undefined are valid arguments.
524template <typename T>
525unsigned countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
526 static_assert(std::numeric_limits<T>::is_integer &&
527 !std::numeric_limits<T>::is_signed,
528 "Only unsigned integral types are allowed.");
529 return countTrailingZeros<T>(~Value, ZB);
530}
531
532namespace detail {
533template <typename T, std::size_t SizeOfT> struct PopulationCounter {
534 static unsigned count(T Value) {
535 // Generic version, forward to 32 bits.
536 static_assert(SizeOfT <= 4, "Not implemented!");
537#if defined(__GNUC__4)
538 return __builtin_popcount(Value);
539#else
540 uint32_t v = Value;
541 v = v - ((v >> 1) & 0x55555555);
542 v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
543 return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
544#endif
545 }
546};
547
548template <typename T> struct PopulationCounter<T, 8> {
549 static unsigned count(T Value) {
550#if defined(__GNUC__4)
551 return __builtin_popcountll(Value);
552#else
553 uint64_t v = Value;
554 v = v - ((v >> 1) & 0x5555555555555555ULL);
555 v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
556 v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
557 return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
558#endif
559 }
560};
561} // namespace detail
562
563/// Count the number of set bits in a value.
564/// Ex. countPopulation(0xF000F000) = 8
565/// Returns 0 if the word is zero.
566template <typename T>
567inline unsigned countPopulation(T Value) {
568 static_assert(std::numeric_limits<T>::is_integer &&
569 !std::numeric_limits<T>::is_signed,
570 "Only unsigned integral types are allowed.");
571 return detail::PopulationCounter<T, sizeof(T)>::count(Value);
572}
573
574/// Compile time Log2.
575/// Valid only for positive powers of two.
576template <size_t kValue> constexpr inline size_t CTLog2() {
577 static_assert(kValue > 0 && llvm::isPowerOf2_64(kValue),
578 "Value is not a valid power of 2");
579 return 1 + CTLog2<kValue / 2>();
580}
581
582template <> constexpr inline size_t CTLog2<1>() { return 0; }
583
584/// Return the log base 2 of the specified value.
585inline double Log2(double Value) {
586#if defined(__ANDROID_API__) && __ANDROID_API__ < 18
587 return __builtin_log(Value) / __builtin_log(2.0);
588#else
589 return log2(Value);
590#endif
591}
592
593/// Return the floor log base 2 of the specified value, -1 if the value is zero.
594/// (32 bit edition.)
595/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
596inline unsigned Log2_32(uint32_t Value) {
597 return 31 - countLeadingZeros(Value);
598}
599
600/// Return the floor log base 2 of the specified value, -1 if the value is zero.
601/// (64 bit edition.)
602inline unsigned Log2_64(uint64_t Value) {
603 return 63 - countLeadingZeros(Value);
604}
605
606/// Return the ceil log base 2 of the specified value, 32 if the value is zero.
607/// (32 bit edition).
608/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
609inline unsigned Log2_32_Ceil(uint32_t Value) {
610 return 32 - countLeadingZeros(Value - 1);
611}
612
613/// Return the ceil log base 2 of the specified value, 64 if the value is zero.
614/// (64 bit edition.)
615inline unsigned Log2_64_Ceil(uint64_t Value) {
616 return 64 - countLeadingZeros(Value - 1);
617}
618
619/// Return the greatest common divisor of the values using Euclid's algorithm.
620template <typename T>
621inline T greatestCommonDivisor(T A, T B) {
622 while (B) {
623 T Tmp = B;
624 B = A % B;
625 A = Tmp;
626 }
627 return A;
628}
629
630inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
631 return greatestCommonDivisor<uint64_t>(A, B);
632}
633
634/// This function takes a 64-bit integer and returns the bit equivalent double.
635inline double BitsToDouble(uint64_t Bits) {
636 double D;
637 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
638 memcpy(&D, &Bits, sizeof(Bits));
639 return D;
640}
641
642/// This function takes a 32-bit integer and returns the bit equivalent float.
643inline float BitsToFloat(uint32_t Bits) {
644 float F;
645 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
646 memcpy(&F, &Bits, sizeof(Bits));
647 return F;
648}
649
650/// This function takes a double and returns the bit equivalent 64-bit integer.
651/// Note that copying doubles around changes the bits of NaNs on some hosts,
652/// notably x86, so this routine cannot be used if these bits are needed.
653inline uint64_t DoubleToBits(double Double) {
654 uint64_t Bits;
655 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
656 memcpy(&Bits, &Double, sizeof(Double));
657 return Bits;
658}
659
660/// This function takes a float and returns the bit equivalent 32-bit integer.
661/// Note that copying floats around changes the bits of NaNs on some hosts,
662/// notably x86, so this routine cannot be used if these bits are needed.
663inline uint32_t FloatToBits(float Float) {
664 uint32_t Bits;
665 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
666 memcpy(&Bits, &Float, sizeof(Float));
667 return Bits;
668}
669
670/// A and B are either alignments or offsets. Return the minimum alignment that
671/// may be assumed after adding the two together.
672constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
673 // The largest power of 2 that divides both A and B.
674 //
675 // Replace "-Value" by "1+~Value" in the following commented code to avoid
676 // MSVC warning C4146
677 // return (A | B) & -(A | B);
678 return (A | B) & (1 + ~(A | B));
679}
680
681/// Returns the next power of two (in 64-bits) that is strictly greater than A.
682/// Returns zero on overflow.
683inline uint64_t NextPowerOf2(uint64_t A) {
684 A |= (A >> 1);
685 A |= (A >> 2);
686 A |= (A >> 4);
687 A |= (A >> 8);
688 A |= (A >> 16);
689 A |= (A >> 32);
690 return A + 1;
691}
692
693/// Returns the power of two which is less than or equal to the given value.
694/// Essentially, it is a floor operation across the domain of powers of two.
695inline uint64_t PowerOf2Floor(uint64_t A) {
696 if (!A) return 0;
697 return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
698}
699
700/// Returns the power of two which is greater than or equal to the given value.
701/// Essentially, it is a ceil operation across the domain of powers of two.
702inline uint64_t PowerOf2Ceil(uint64_t A) {
703 if (!A)
704 return 0;
705 return NextPowerOf2(A - 1);
706}
707
708/// Returns the next integer (mod 2**64) that is greater than or equal to
709/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
710///
711/// If non-zero \p Skew is specified, the return value will be a minimal
712/// integer that is greater than or equal to \p Value and equal to
713/// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
714/// \p Align, its value is adjusted to '\p Skew mod \p Align'.
715///
716/// Examples:
717/// \code
718/// alignTo(5, 8) = 8
719/// alignTo(17, 8) = 24
720/// alignTo(~0LL, 8) = 0
721/// alignTo(321, 255) = 510
722///
723/// alignTo(5, 8, 7) = 7
724/// alignTo(17, 8, 1) = 17
725/// alignTo(~0LL, 8, 3) = 3
726/// alignTo(321, 255, 42) = 552
727/// \endcode
728inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
729 assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0."
) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 729, __extension__
__PRETTY_FUNCTION__))
;
730 Skew %= Align;
731 return (Value + Align - 1 - Skew) / Align * Align + Skew;
732}
733
734/// Returns the next integer (mod 2**64) that is greater than or equal to
735/// \p Value and is a multiple of \c Align. \c Align must be non-zero.
736template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) {
737 static_assert(Align != 0u, "Align must be non-zero");
738 return (Value + Align - 1) / Align * Align;
739}
740
741/// Returns the integer ceil(Numerator / Denominator).
742inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) {
743 return alignTo(Numerator, Denominator) / Denominator;
744}
745
746/// Returns the integer nearest(Numerator / Denominator).
747inline uint64_t divideNearest(uint64_t Numerator, uint64_t Denominator) {
748 return (Numerator + (Denominator / 2)) / Denominator;
749}
750
751/// Returns the largest uint64_t less than or equal to \p Value and is
752/// \p Skew mod \p Align. \p Align must be non-zero
753inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
754 assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0."
) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 754, __extension__
__PRETTY_FUNCTION__))
;
755 Skew %= Align;
756 return (Value - Skew) / Align * Align + Skew;
757}
758
759/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
760/// Requires 0 < B <= 32.
761template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) {
762 static_assert(B > 0, "Bit width can't be 0.");
763 static_assert(B <= 32, "Bit width out of range.");
764 return int32_t(X << (32 - B)) >> (32 - B);
765}
766
767/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
768/// Requires 0 < B <= 32.
769inline int32_t SignExtend32(uint32_t X, unsigned B) {
770 assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0."
) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 770, __extension__
__PRETTY_FUNCTION__))
;
771 assert(B <= 32 && "Bit width out of range.")(static_cast <bool> (B <= 32 && "Bit width out of range."
) ? void (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\""
, "llvm/include/llvm/Support/MathExtras.h", 771, __extension__
__PRETTY_FUNCTION__))
;
772 return int32_t(X << (32 - B)) >> (32 - B);
773}
774
775/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
776/// Requires 0 < B <= 64.
777template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) {
778 static_assert(B > 0, "Bit width can't be 0.");
779 static_assert(B <= 64, "Bit width out of range.");
780 return int64_t(x << (64 - B)) >> (64 - B);
781}
782
783/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
784/// Requires 0 < B <= 64.
785inline int64_t SignExtend64(uint64_t X, unsigned B) {
786 assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0."
) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 786, __extension__
__PRETTY_FUNCTION__))
;
787 assert(B <= 64 && "Bit width out of range.")(static_cast <bool> (B <= 64 && "Bit width out of range."
) ? void (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\""
, "llvm/include/llvm/Support/MathExtras.h", 787, __extension__
__PRETTY_FUNCTION__))
;
788 return int64_t(X << (64 - B)) >> (64 - B);
789}
790
791/// Subtract two unsigned integers, X and Y, of type T and return the absolute
792/// value of the result.
793template <typename T>
794std::enable_if_t<std::is_unsigned<T>::value, T> AbsoluteDifference(T X, T Y) {
795 return X > Y ? (X - Y) : (Y - X);
796}
797
798/// Add two unsigned integers, X and Y, of type T. Clamp the result to the
799/// maximum representable value of T on overflow. ResultOverflowed indicates if
800/// the result is larger than the maximum representable value of type T.
801template <typename T>
802std::enable_if_t<std::is_unsigned<T>::value, T>
803SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) {
804 bool Dummy;
805 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
806 // Hacker's Delight, p. 29
807 T Z = X + Y;
808 Overflowed = (Z < X || Z < Y);
809 if (Overflowed)
810 return std::numeric_limits<T>::max();
811 else
812 return Z;
813}
814
815/// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the
816/// maximum representable value of T on overflow. ResultOverflowed indicates if
817/// the result is larger than the maximum representable value of type T.
818template <typename T>
819std::enable_if_t<std::is_unsigned<T>::value, T>
820SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) {
821 bool Dummy;
822 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
823
824 // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
825 // because it fails for uint16_t (where multiplication can have undefined
826 // behavior due to promotion to int), and requires a division in addition
827 // to the multiplication.
828
829 Overflowed = false;
830
831 // Log2(Z) would be either Log2Z or Log2Z + 1.
832 // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
833 // will necessarily be less than Log2Max as desired.
834 int Log2Z = Log2_64(X) + Log2_64(Y);
835 const T Max = std::numeric_limits<T>::max();
836 int Log2Max = Log2_64(Max);
837 if (Log2Z < Log2Max) {
838 return X * Y;
839 }
840 if (Log2Z > Log2Max) {
841 Overflowed = true;
842 return Max;
843 }
844
845 // We're going to use the top bit, and maybe overflow one
846 // bit past it. Multiply all but the bottom bit then add
847 // that on at the end.
848 T Z = (X >> 1) * Y;
849 if (Z & ~(Max >> 1)) {
850 Overflowed = true;
851 return Max;
852 }
853 Z <<= 1;
854 if (X & 1)
855 return SaturatingAdd(Z, Y, ResultOverflowed);
856
857 return Z;
858}
859
860/// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
861/// the product. Clamp the result to the maximum representable value of T on
862/// overflow. ResultOverflowed indicates if the result is larger than the
863/// maximum representable value of type T.
864template <typename T>
865std::enable_if_t<std::is_unsigned<T>::value, T>
866SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) {
867 bool Dummy;
868 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
869
870 T Product = SaturatingMultiply(X, Y, &Overflowed);
871 if (Overflowed)
872 return Product;
873
874 return SaturatingAdd(A, Product, &Overflowed);
875}
876
877/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
878extern const float huge_valf;
879
880
881/// Add two signed integers, computing the two's complement truncated result,
882/// returning true if overflow occured.
883template <typename T>
884std::enable_if_t<std::is_signed<T>::value, T> AddOverflow(T X, T Y, T &Result) {
885#if __has_builtin(__builtin_add_overflow)1
886 return __builtin_add_overflow(X, Y, &Result);
887#else
888 // Perform the unsigned addition.
889 using U = std::make_unsigned_t<T>;
890 const U UX = static_cast<U>(X);
891 const U UY = static_cast<U>(Y);
892 const U UResult = UX + UY;
893
894 // Convert to signed.
895 Result = static_cast<T>(UResult);
896
897 // Adding two positive numbers should result in a positive number.
898 if (X > 0 && Y > 0)
899 return Result <= 0;
900 // Adding two negatives should result in a negative number.
901 if (X < 0 && Y < 0)
902 return Result >= 0;
903 return false;
904#endif
905}
906
907/// Subtract two signed integers, computing the two's complement truncated
908/// result, returning true if an overflow ocurred.
909template <typename T>
910std::enable_if_t<std::is_signed<T>::value, T> SubOverflow(T X, T Y, T &Result) {
911#if __has_builtin(__builtin_sub_overflow)1
912 return __builtin_sub_overflow(X, Y, &Result);
913#else
914 // Perform the unsigned addition.
915 using U = std::make_unsigned_t<T>;
916 const U UX = static_cast<U>(X);
917 const U UY = static_cast<U>(Y);
918 const U UResult = UX - UY;
919
920 // Convert to signed.
921 Result = static_cast<T>(UResult);
922
923 // Subtracting a positive number from a negative results in a negative number.
924 if (X <= 0 && Y > 0)
925 return Result >= 0;
926 // Subtracting a negative number from a positive results in a positive number.
927 if (X >= 0 && Y < 0)
928 return Result <= 0;
929 return false;
930#endif
931}
932
933/// Multiply two signed integers, computing the two's complement truncated
934/// result, returning true if an overflow ocurred.
935template <typename T>
936std::enable_if_t<std::is_signed<T>::value, T> MulOverflow(T X, T Y, T &Result) {
937 // Perform the unsigned multiplication on absolute values.
938 using U = std::make_unsigned_t<T>;
939 const U UX = X < 0 ? (0 - static_cast<U>(X)) : static_cast<U>(X);
940 const U UY = Y < 0 ? (0 - static_cast<U>(Y)) : static_cast<U>(Y);
941 const U UResult = UX * UY;
942
943 // Convert to signed.
944 const bool IsNegative = (X < 0) ^ (Y < 0);
945 Result = IsNegative ? (0 - UResult) : UResult;
946
947 // If any of the args was 0, result is 0 and no overflow occurs.
948 if (UX == 0 || UY == 0)
949 return false;
950
951 // UX and UY are in [1, 2^n], where n is the number of digits.
952 // Check how the max allowed absolute value (2^n for negative, 2^(n-1) for
953 // positive) divided by an argument compares to the other.
954 if (IsNegative)
955 return UX > (static_cast<U>(std::numeric_limits<T>::max()) + U(1)) / UY;
956 else
957 return UX > (static_cast<U>(std::numeric_limits<T>::max())) / UY;
958}
959
960} // End llvm namespace
961
962#endif