Bug Summary

File:lib/Target/Mips/MipsAnalyzeImmediate.cpp
Warning:line 48, column 20
The result of the right shift is undefined due to shifting by '64', which is greater or equal to the width of type 'uint64_t'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name MipsAnalyzeImmediate.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Target/Mips -I /build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Mips -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Target/Mips -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Mips/MipsAnalyzeImmediate.cpp -faddrsig

/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Mips/MipsAnalyzeImmediate.cpp

1//===- MipsAnalyzeImmediate.cpp - Analyze Immediates ----------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MipsAnalyzeImmediate.h"
11#include "Mips.h"
12#include "llvm/Support/MathExtras.h"
13#include <cassert>
14#include <cstdint>
15#include <iterator>
16
17using namespace llvm;
18
19MipsAnalyzeImmediate::Inst::Inst(unsigned O, unsigned I) : Opc(O), ImmOpnd(I) {}
20
21// Add I to the instruction sequences.
22void MipsAnalyzeImmediate::AddInstr(InstSeqLs &SeqLs, const Inst &I) {
23 // Add an instruction seqeunce consisting of just I.
24 if (SeqLs.empty()) {
25 SeqLs.push_back(InstSeq(1, I));
26 return;
27 }
28
29 for (InstSeqLs::iterator Iter = SeqLs.begin(); Iter != SeqLs.end(); ++Iter)
30 Iter->push_back(I);
31}
32
33void MipsAnalyzeImmediate::GetInstSeqLsADDiu(uint64_t Imm, unsigned RemSize,
34 InstSeqLs &SeqLs) {
35 GetInstSeqLs((Imm + 0x8000ULL) & 0xffffffffffff0000ULL, RemSize, SeqLs);
7
Calling 'MipsAnalyzeImmediate::GetInstSeqLs'
36 AddInstr(SeqLs, Inst(ADDiu, Imm & 0xffffULL));
37}
38
39void MipsAnalyzeImmediate::GetInstSeqLsORi(uint64_t Imm, unsigned RemSize,
40 InstSeqLs &SeqLs) {
41 GetInstSeqLs(Imm & 0xffffffffffff0000ULL, RemSize, SeqLs);
42 AddInstr(SeqLs, Inst(ORi, Imm & 0xffffULL));
43}
44
45void MipsAnalyzeImmediate::GetInstSeqLsSLL(uint64_t Imm, unsigned RemSize,
46 InstSeqLs &SeqLs) {
47 unsigned Shamt = countTrailingZeros(Imm);
14
Calling 'countTrailingZeros<unsigned long>'
21
Returning from 'countTrailingZeros<unsigned long>'
22
'Shamt' initialized to 64
48 GetInstSeqLs(Imm >> Shamt, RemSize - Shamt, SeqLs);
23
The result of the right shift is undefined due to shifting by '64', which is greater or equal to the width of type 'uint64_t'
49 AddInstr(SeqLs, Inst(SLL, Shamt));
50}
51
52void MipsAnalyzeImmediate::GetInstSeqLs(uint64_t Imm, unsigned RemSize,
53 InstSeqLs &SeqLs) {
54 uint64_t MaskedImm = Imm & (0xffffffffffffffffULL >> (64 - Size));
55
56 // Do nothing if Imm is 0.
57 if (!MaskedImm)
8
Assuming 'MaskedImm' is not equal to 0
9
Taking false branch
58 return;
59
60 // A single ADDiu will do if RemSize <= 16.
61 if (RemSize <= 16) {
10
Taking false branch
62 AddInstr(SeqLs, Inst(ADDiu, MaskedImm));
63 return;
64 }
65
66 // Shift if the lower 16-bit is cleared.
67 if (!(Imm & 0xffff)) {
11
Assuming the condition is true
12
Taking true branch
68 GetInstSeqLsSLL(Imm, RemSize, SeqLs);
13
Calling 'MipsAnalyzeImmediate::GetInstSeqLsSLL'
69 return;
70 }
71
72 GetInstSeqLsADDiu(Imm, RemSize, SeqLs);
73
74 // If bit 15 is cleared, it doesn't make a difference whether the last
75 // instruction is an ADDiu or ORi. In that case, do not call GetInstSeqLsORi.
76 if (Imm & 0x8000) {
77 InstSeqLs SeqLsORi;
78 GetInstSeqLsORi(Imm, RemSize, SeqLsORi);
79 SeqLs.append(std::make_move_iterator(SeqLsORi.begin()),
80 std::make_move_iterator(SeqLsORi.end()));
81 }
82}
83
84// Replace a ADDiu & SLL pair with a LUi.
85// e.g. the following two instructions
86// ADDiu 0x0111
87// SLL 18
88// are replaced with
89// LUi 0x444
90void MipsAnalyzeImmediate::ReplaceADDiuSLLWithLUi(InstSeq &Seq) {
91 // Check if the first two instructions are ADDiu and SLL and the shift amount
92 // is at least 16.
93 if ((Seq.size() < 2) || (Seq[0].Opc != ADDiu) ||
94 (Seq[1].Opc != SLL) || (Seq[1].ImmOpnd < 16))
95 return;
96
97 // Sign-extend and shift operand of ADDiu and see if it still fits in 16-bit.
98 int64_t Imm = SignExtend64<16>(Seq[0].ImmOpnd);
99 int64_t ShiftedImm = (uint64_t)Imm << (Seq[1].ImmOpnd - 16);
100
101 if (!isInt<16>(ShiftedImm))
102 return;
103
104 // Replace the first instruction and erase the second.
105 Seq[0].Opc = LUi;
106 Seq[0].ImmOpnd = (unsigned)(ShiftedImm & 0xffff);
107 Seq.erase(Seq.begin() + 1);
108}
109
110void MipsAnalyzeImmediate::GetShortestSeq(InstSeqLs &SeqLs, InstSeq &Insts) {
111 InstSeqLs::iterator ShortestSeq = SeqLs.end();
112 // The length of an instruction sequence is at most 7.
113 unsigned ShortestLength = 8;
114
115 for (InstSeqLs::iterator S = SeqLs.begin(); S != SeqLs.end(); ++S) {
116 ReplaceADDiuSLLWithLUi(*S);
117 assert(S->size() <= 7)((S->size() <= 7) ? static_cast<void> (0) : __assert_fail
("S->size() <= 7", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Mips/MipsAnalyzeImmediate.cpp"
, 117, __PRETTY_FUNCTION__))
;
118
119 if (S->size() < ShortestLength) {
120 ShortestSeq = S;
121 ShortestLength = S->size();
122 }
123 }
124
125 Insts.clear();
126 Insts.append(ShortestSeq->begin(), ShortestSeq->end());
127}
128
129const MipsAnalyzeImmediate::InstSeq
130&MipsAnalyzeImmediate::Analyze(uint64_t Imm, unsigned Size,
131 bool LastInstrIsADDiu) {
132 this->Size = Size;
133
134 if (Size == 32) {
1
Assuming 'Size' is equal to 32
2
Taking true branch
135 ADDiu = Mips::ADDiu;
136 ORi = Mips::ORi;
137 SLL = Mips::SLL;
138 LUi = Mips::LUi;
139 } else {
140 ADDiu = Mips::DADDiu;
141 ORi = Mips::ORi64;
142 SLL = Mips::DSLL;
143 LUi = Mips::LUi64;
144 }
145
146 InstSeqLs SeqLs;
147
148 // Get the list of instruction sequences.
149 if (LastInstrIsADDiu | !Imm)
3
Assuming 'Imm' is not equal to 0
4
Assuming the condition is true
5
Taking true branch
150 GetInstSeqLsADDiu(Imm, Size, SeqLs);
6
Calling 'MipsAnalyzeImmediate::GetInstSeqLsADDiu'
151 else
152 GetInstSeqLs(Imm, Size, SeqLs);
153
154 // Set Insts to the shortest instruction sequence.
155 GetShortestSeq(SeqLs, Insts);
156
157 return Insts;
158}

/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h

1//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains some functions that are useful for math stuff.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_SUPPORT_MATHEXTRAS_H
15#define LLVM_SUPPORT_MATHEXTRAS_H
16
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/SwapByteOrder.h"
19#include <algorithm>
20#include <cassert>
21#include <climits>
22#include <cstring>
23#include <limits>
24#include <type_traits>
25
26#ifdef __ANDROID_NDK__
27#include <android/api-level.h>
28#endif
29
30#ifdef _MSC_VER
31// Declare these intrinsics manually rather including intrin.h. It's very
32// expensive, and MathExtras.h is popular.
33// #include <intrin.h>
34extern "C" {
35unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
36unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
37unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
38unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
39}
40#endif
41
42namespace llvm {
43/// The behavior an operation has on an input of 0.
44enum ZeroBehavior {
45 /// The returned value is undefined.
46 ZB_Undefined,
47 /// The returned value is numeric_limits<T>::max()
48 ZB_Max,
49 /// The returned value is numeric_limits<T>::digits
50 ZB_Width
51};
52
53namespace detail {
54template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter {
55 static std::size_t count(T Val, ZeroBehavior) {
56 if (!Val)
57 return std::numeric_limits<T>::digits;
58 if (Val & 0x1)
59 return 0;
60
61 // Bisection method.
62 std::size_t ZeroBits = 0;
63 T Shift = std::numeric_limits<T>::digits >> 1;
64 T Mask = std::numeric_limits<T>::max() >> Shift;
65 while (Shift) {
66 if ((Val & Mask) == 0) {
67 Val >>= Shift;
68 ZeroBits |= Shift;
69 }
70 Shift >>= 1;
71 Mask >>= Shift;
72 }
73 return ZeroBits;
74 }
75};
76
77#if __GNUC__4 >= 4 || defined(_MSC_VER)
78template <typename T> struct TrailingZerosCounter<T, 4> {
79 static std::size_t count(T Val, ZeroBehavior ZB) {
80 if (ZB != ZB_Undefined && Val == 0)
81 return 32;
82
83#if __has_builtin(__builtin_ctz)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20
) + ((0) << 10) + (0))
84 return __builtin_ctz(Val);
85#elif defined(_MSC_VER)
86 unsigned long Index;
87 _BitScanForward(&Index, Val);
88 return Index;
89#endif
90 }
91};
92
93#if !defined(_MSC_VER) || defined(_M_X64)
94template <typename T> struct TrailingZerosCounter<T, 8> {
95 static std::size_t count(T Val, ZeroBehavior ZB) {
96 if (ZB != ZB_Undefined && Val == 0)
16
Assuming 'Val' is equal to 0
17
Taking true branch
97 return 64;
18
Returning the value 64
98
99#if __has_builtin(__builtin_ctzll)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20
) + ((0) << 10) + (0))
100 return __builtin_ctzll(Val);
101#elif defined(_MSC_VER)
102 unsigned long Index;
103 _BitScanForward64(&Index, Val);
104 return Index;
105#endif
106 }
107};
108#endif
109#endif
110} // namespace detail
111
112/// Count number of 0's from the least significant bit to the most
113/// stopping at the first 1.
114///
115/// Only unsigned integral types are allowed.
116///
117/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
118/// valid arguments.
119template <typename T>
120std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
121 static_assert(std::numeric_limits<T>::is_integer &&
122 !std::numeric_limits<T>::is_signed,
123 "Only unsigned integral types are allowed.");
124 return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB);
15
Calling 'TrailingZerosCounter::count'
19
Returning from 'TrailingZerosCounter::count'
20
Returning the value 64
125}
126
127namespace detail {
128template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter {
129 static std::size_t count(T Val, ZeroBehavior) {
130 if (!Val)
131 return std::numeric_limits<T>::digits;
132
133 // Bisection method.
134 std::size_t ZeroBits = 0;
135 for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
136 T Tmp = Val >> Shift;
137 if (Tmp)
138 Val = Tmp;
139 else
140 ZeroBits |= Shift;
141 }
142 return ZeroBits;
143 }
144};
145
146#if __GNUC__4 >= 4 || defined(_MSC_VER)
147template <typename T> struct LeadingZerosCounter<T, 4> {
148 static std::size_t count(T Val, ZeroBehavior ZB) {
149 if (ZB != ZB_Undefined && Val == 0)
150 return 32;
151
152#if __has_builtin(__builtin_clz)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20
) + ((0) << 10) + (0))
153 return __builtin_clz(Val);
154#elif defined(_MSC_VER)
155 unsigned long Index;
156 _BitScanReverse(&Index, Val);
157 return Index ^ 31;
158#endif
159 }
160};
161
162#if !defined(_MSC_VER) || defined(_M_X64)
163template <typename T> struct LeadingZerosCounter<T, 8> {
164 static std::size_t count(T Val, ZeroBehavior ZB) {
165 if (ZB != ZB_Undefined && Val == 0)
166 return 64;
167
168#if __has_builtin(__builtin_clzll)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20
) + ((0) << 10) + (0))
169 return __builtin_clzll(Val);
170#elif defined(_MSC_VER)
171 unsigned long Index;
172 _BitScanReverse64(&Index, Val);
173 return Index ^ 63;
174#endif
175 }
176};
177#endif
178#endif
179} // namespace detail
180
181/// Count number of 0's from the most significant bit to the least
182/// stopping at the first 1.
183///
184/// Only unsigned integral types are allowed.
185///
186/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
187/// valid arguments.
188template <typename T>
189std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
190 static_assert(std::numeric_limits<T>::is_integer &&
191 !std::numeric_limits<T>::is_signed,
192 "Only unsigned integral types are allowed.");
193 return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB);
194}
195
196/// Get the index of the first set bit starting from the least
197/// significant bit.
198///
199/// Only unsigned integral types are allowed.
200///
201/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
202/// valid arguments.
203template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
204 if (ZB == ZB_Max && Val == 0)
205 return std::numeric_limits<T>::max();
206
207 return countTrailingZeros(Val, ZB_Undefined);
208}
209
210/// Create a bitmask with the N right-most bits set to 1, and all other
211/// bits set to 0. Only unsigned types are allowed.
212template <typename T> T maskTrailingOnes(unsigned N) {
213 static_assert(std::is_unsigned<T>::value, "Invalid type!");
214 const unsigned Bits = CHAR_BIT8 * sizeof(T);
215 assert(N <= Bits && "Invalid bit index")((N <= Bits && "Invalid bit index") ? static_cast<
void> (0) : __assert_fail ("N <= Bits && \"Invalid bit index\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 215, __PRETTY_FUNCTION__))
;
216 return N == 0 ? 0 : (T(-1) >> (Bits - N));
217}
218
219/// Create a bitmask with the N left-most bits set to 1, and all other
220/// bits set to 0. Only unsigned types are allowed.
221template <typename T> T maskLeadingOnes(unsigned N) {
222 return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
223}
224
225/// Create a bitmask with the N right-most bits set to 0, and all other
226/// bits set to 1. Only unsigned types are allowed.
227template <typename T> T maskTrailingZeros(unsigned N) {
228 return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
229}
230
231/// Create a bitmask with the N left-most bits set to 0, and all other
232/// bits set to 1. Only unsigned types are allowed.
233template <typename T> T maskLeadingZeros(unsigned N) {
234 return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
235}
236
237/// Get the index of the last set bit starting from the least
238/// significant bit.
239///
240/// Only unsigned integral types are allowed.
241///
242/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
243/// valid arguments.
244template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
245 if (ZB == ZB_Max && Val == 0)
246 return std::numeric_limits<T>::max();
247
248 // Use ^ instead of - because both gcc and llvm can remove the associated ^
249 // in the __builtin_clz intrinsic on x86.
250 return countLeadingZeros(Val, ZB_Undefined) ^
251 (std::numeric_limits<T>::digits - 1);
252}
253
254/// Macro compressed bit reversal table for 256 bits.
255///
256/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
257static const unsigned char BitReverseTable256[256] = {
258#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
259#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
260#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
261 R6(0), R6(2), R6(1), R6(3)
262#undef R2
263#undef R4
264#undef R6
265};
266
267/// Reverse the bits in \p Val.
268template <typename T>
269T reverseBits(T Val) {
270 unsigned char in[sizeof(Val)];
271 unsigned char out[sizeof(Val)];
272 std::memcpy(in, &Val, sizeof(Val));
273 for (unsigned i = 0; i < sizeof(Val); ++i)
274 out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
275 std::memcpy(&Val, out, sizeof(Val));
276 return Val;
277}
278
279// NOTE: The following support functions use the _32/_64 extensions instead of
280// type overloading so that signed and unsigned integers can be used without
281// ambiguity.
282
283/// Return the high 32 bits of a 64 bit value.
284constexpr inline uint32_t Hi_32(uint64_t Value) {
285 return static_cast<uint32_t>(Value >> 32);
286}
287
288/// Return the low 32 bits of a 64 bit value.
289constexpr inline uint32_t Lo_32(uint64_t Value) {
290 return static_cast<uint32_t>(Value);
291}
292
293/// Make a 64-bit integer from a high / low pair of 32-bit integers.
294constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
295 return ((uint64_t)High << 32) | (uint64_t)Low;
296}
297
298/// Checks if an integer fits into the given bit width.
299template <unsigned N> constexpr inline bool isInt(int64_t x) {
300 return N >= 64 || (-(INT64_C(1)1L<<(N-1)) <= x && x < (INT64_C(1)1L<<(N-1)));
301}
302// Template specializations to get better code for common cases.
303template <> constexpr inline bool isInt<8>(int64_t x) {
304 return static_cast<int8_t>(x) == x;
305}
306template <> constexpr inline bool isInt<16>(int64_t x) {
307 return static_cast<int16_t>(x) == x;
308}
309template <> constexpr inline bool isInt<32>(int64_t x) {
310 return static_cast<int32_t>(x) == x;
311}
312
313/// Checks if a signed integer is an N bit number shifted left by S.
314template <unsigned N, unsigned S>
315constexpr inline bool isShiftedInt(int64_t x) {
316 static_assert(
317 N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
318 static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
319 return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
320}
321
322/// Checks if an unsigned integer fits into the given bit width.
323///
324/// This is written as two functions rather than as simply
325///
326/// return N >= 64 || X < (UINT64_C(1) << N);
327///
328/// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting
329/// left too many places.
330template <unsigned N>
331constexpr inline typename std::enable_if<(N < 64), bool>::type
332isUInt(uint64_t X) {
333 static_assert(N > 0, "isUInt<0> doesn't make sense");
334 return X < (UINT64_C(1)1UL << (N));
335}
336template <unsigned N>
337constexpr inline typename std::enable_if<N >= 64, bool>::type
338isUInt(uint64_t X) {
339 return true;
340}
341
342// Template specializations to get better code for common cases.
343template <> constexpr inline bool isUInt<8>(uint64_t x) {
344 return static_cast<uint8_t>(x) == x;
345}
346template <> constexpr inline bool isUInt<16>(uint64_t x) {
347 return static_cast<uint16_t>(x) == x;
348}
349template <> constexpr inline bool isUInt<32>(uint64_t x) {
350 return static_cast<uint32_t>(x) == x;
351}
352
353/// Checks if a unsigned integer is an N bit number shifted left by S.
354template <unsigned N, unsigned S>
355constexpr inline bool isShiftedUInt(uint64_t x) {
356 static_assert(
357 N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
358 static_assert(N + S <= 64,
359 "isShiftedUInt<N, S> with N + S > 64 is too wide.");
360 // Per the two static_asserts above, S must be strictly less than 64. So
361 // 1 << S is not undefined behavior.
362 return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
363}
364
365/// Gets the maximum value for a N-bit unsigned integer.
366inline uint64_t maxUIntN(uint64_t N) {
367 assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 367, __PRETTY_FUNCTION__))
;
368
369 // uint64_t(1) << 64 is undefined behavior, so we can't do
370 // (uint64_t(1) << N) - 1
371 // without checking first that N != 64. But this works and doesn't have a
372 // branch.
373 return UINT64_MAX(18446744073709551615UL) >> (64 - N);
374}
375
376/// Gets the minimum value for a N-bit signed integer.
377inline int64_t minIntN(int64_t N) {
378 assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 378, __PRETTY_FUNCTION__))
;
379
380 return -(UINT64_C(1)1UL<<(N-1));
381}
382
383/// Gets the maximum value for a N-bit signed integer.
384inline int64_t maxIntN(int64_t N) {
385 assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 385, __PRETTY_FUNCTION__))
;
386
387 // This relies on two's complement wraparound when N == 64, so we convert to
388 // int64_t only at the very end to avoid UB.
389 return (UINT64_C(1)1UL << (N - 1)) - 1;
390}
391
392/// Checks if an unsigned integer fits into the given (dynamic) bit width.
393inline bool isUIntN(unsigned N, uint64_t x) {
394 return N >= 64 || x <= maxUIntN(N);
395}
396
397/// Checks if an signed integer fits into the given (dynamic) bit width.
398inline bool isIntN(unsigned N, int64_t x) {
399 return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
400}
401
402/// Return true if the argument is a non-empty sequence of ones starting at the
403/// least significant bit with the remainder zero (32 bit version).
404/// Ex. isMask_32(0x0000FFFFU) == true.
405constexpr inline bool isMask_32(uint32_t Value) {
406 return Value && ((Value + 1) & Value) == 0;
407}
408
409/// Return true if the argument is a non-empty sequence of ones starting at the
410/// least significant bit with the remainder zero (64 bit version).
411constexpr inline bool isMask_64(uint64_t Value) {
412 return Value && ((Value + 1) & Value) == 0;
413}
414
415/// Return true if the argument contains a non-empty sequence of ones with the
416/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
417constexpr inline bool isShiftedMask_32(uint32_t Value) {
418 return Value && isMask_32((Value - 1) | Value);
419}
420
421/// Return true if the argument contains a non-empty sequence of ones with the
422/// remainder zero (64 bit version.)
423constexpr inline bool isShiftedMask_64(uint64_t Value) {
424 return Value && isMask_64((Value - 1) | Value);
425}
426
427/// Return true if the argument is a power of two > 0.
428/// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
429constexpr inline bool isPowerOf2_32(uint32_t Value) {
430 return Value && !(Value & (Value - 1));
431}
432
433/// Return true if the argument is a power of two > 0 (64 bit edition.)
434constexpr inline bool isPowerOf2_64(uint64_t Value) {
435 return Value && !(Value & (Value - 1));
436}
437
438/// Return a byte-swapped representation of the 16-bit argument.
439inline uint16_t ByteSwap_16(uint16_t Value) {
440 return sys::SwapByteOrder_16(Value);
441}
442
443/// Return a byte-swapped representation of the 32-bit argument.
444inline uint32_t ByteSwap_32(uint32_t Value) {
445 return sys::SwapByteOrder_32(Value);
446}
447
448/// Return a byte-swapped representation of the 64-bit argument.
449inline uint64_t ByteSwap_64(uint64_t Value) {
450 return sys::SwapByteOrder_64(Value);
451}
452
453/// Count the number of ones from the most significant bit to the first
454/// zero bit.
455///
456/// Ex. countLeadingOnes(0xFF0FFF00) == 8.
457/// Only unsigned integral types are allowed.
458///
459/// \param ZB the behavior on an input of all ones. Only ZB_Width and
460/// ZB_Undefined are valid arguments.
461template <typename T>
462std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
463 static_assert(std::numeric_limits<T>::is_integer &&
464 !std::numeric_limits<T>::is_signed,
465 "Only unsigned integral types are allowed.");
466 return countLeadingZeros<T>(~Value, ZB);
467}
468
469/// Count the number of ones from the least significant bit to the first
470/// zero bit.
471///
472/// Ex. countTrailingOnes(0x00FF00FF) == 8.
473/// Only unsigned integral types are allowed.
474///
475/// \param ZB the behavior on an input of all ones. Only ZB_Width and
476/// ZB_Undefined are valid arguments.
477template <typename T>
478std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
479 static_assert(std::numeric_limits<T>::is_integer &&
480 !std::numeric_limits<T>::is_signed,
481 "Only unsigned integral types are allowed.");
482 return countTrailingZeros<T>(~Value, ZB);
483}
484
485namespace detail {
486template <typename T, std::size_t SizeOfT> struct PopulationCounter {
487 static unsigned count(T Value) {
488 // Generic version, forward to 32 bits.
489 static_assert(SizeOfT <= 4, "Not implemented!");
490#if __GNUC__4 >= 4
491 return __builtin_popcount(Value);
492#else
493 uint32_t v = Value;
494 v = v - ((v >> 1) & 0x55555555);
495 v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
496 return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
497#endif
498 }
499};
500
501template <typename T> struct PopulationCounter<T, 8> {
502 static unsigned count(T Value) {
503#if __GNUC__4 >= 4
504 return __builtin_popcountll(Value);
505#else
506 uint64_t v = Value;
507 v = v - ((v >> 1) & 0x5555555555555555ULL);
508 v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
509 v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
510 return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
511#endif
512 }
513};
514} // namespace detail
515
516/// Count the number of set bits in a value.
517/// Ex. countPopulation(0xF000F000) = 8
518/// Returns 0 if the word is zero.
519template <typename T>
520inline unsigned countPopulation(T Value) {
521 static_assert(std::numeric_limits<T>::is_integer &&
522 !std::numeric_limits<T>::is_signed,
523 "Only unsigned integral types are allowed.");
524 return detail::PopulationCounter<T, sizeof(T)>::count(Value);
525}
526
527/// Return the log base 2 of the specified value.
528inline double Log2(double Value) {
529#if defined(__ANDROID_API__) && __ANDROID_API__ < 18
530 return __builtin_log(Value) / __builtin_log(2.0);
531#else
532 return log2(Value);
533#endif
534}
535
536/// Return the floor log base 2 of the specified value, -1 if the value is zero.
537/// (32 bit edition.)
538/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
539inline unsigned Log2_32(uint32_t Value) {
540 return 31 - countLeadingZeros(Value);
541}
542
543/// Return the floor log base 2 of the specified value, -1 if the value is zero.
544/// (64 bit edition.)
545inline unsigned Log2_64(uint64_t Value) {
546 return 63 - countLeadingZeros(Value);
547}
548
549/// Return the ceil log base 2 of the specified value, 32 if the value is zero.
550/// (32 bit edition).
551/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
552inline unsigned Log2_32_Ceil(uint32_t Value) {
553 return 32 - countLeadingZeros(Value - 1);
554}
555
556/// Return the ceil log base 2 of the specified value, 64 if the value is zero.
557/// (64 bit edition.)
558inline unsigned Log2_64_Ceil(uint64_t Value) {
559 return 64 - countLeadingZeros(Value - 1);
560}
561
562/// Return the greatest common divisor of the values using Euclid's algorithm.
563inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
564 while (B) {
565 uint64_t T = B;
566 B = A % B;
567 A = T;
568 }
569 return A;
570}
571
572/// This function takes a 64-bit integer and returns the bit equivalent double.
573inline double BitsToDouble(uint64_t Bits) {
574 double D;
575 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
576 memcpy(&D, &Bits, sizeof(Bits));
577 return D;
578}
579
580/// This function takes a 32-bit integer and returns the bit equivalent float.
581inline float BitsToFloat(uint32_t Bits) {
582 float F;
583 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
584 memcpy(&F, &Bits, sizeof(Bits));
585 return F;
586}
587
588/// This function takes a double and returns the bit equivalent 64-bit integer.
589/// Note that copying doubles around changes the bits of NaNs on some hosts,
590/// notably x86, so this routine cannot be used if these bits are needed.
591inline uint64_t DoubleToBits(double Double) {
592 uint64_t Bits;
593 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
594 memcpy(&Bits, &Double, sizeof(Double));
595 return Bits;
596}
597
598/// This function takes a float and returns the bit equivalent 32-bit integer.
599/// Note that copying floats around changes the bits of NaNs on some hosts,
600/// notably x86, so this routine cannot be used if these bits are needed.
601inline uint32_t FloatToBits(float Float) {
602 uint32_t Bits;
603 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
604 memcpy(&Bits, &Float, sizeof(Float));
605 return Bits;
606}
607
608/// A and B are either alignments or offsets. Return the minimum alignment that
609/// may be assumed after adding the two together.
610constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
611 // The largest power of 2 that divides both A and B.
612 //
613 // Replace "-Value" by "1+~Value" in the following commented code to avoid
614 // MSVC warning C4146
615 // return (A | B) & -(A | B);
616 return (A | B) & (1 + ~(A | B));
617}
618
619/// Aligns \c Addr to \c Alignment bytes, rounding up.
620///
621/// Alignment should be a power of two. This method rounds up, so
622/// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8.
623inline uintptr_t alignAddr(const void *Addr, size_t Alignment) {
624 assert(Alignment && isPowerOf2_64((uint64_t)Alignment) &&((Alignment && isPowerOf2_64((uint64_t)Alignment) &&
"Alignment is not a power of two!") ? static_cast<void>
(0) : __assert_fail ("Alignment && isPowerOf2_64((uint64_t)Alignment) && \"Alignment is not a power of two!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 625, __PRETTY_FUNCTION__))
625 "Alignment is not a power of two!")((Alignment && isPowerOf2_64((uint64_t)Alignment) &&
"Alignment is not a power of two!") ? static_cast<void>
(0) : __assert_fail ("Alignment && isPowerOf2_64((uint64_t)Alignment) && \"Alignment is not a power of two!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 625, __PRETTY_FUNCTION__))
;
626
627 assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr)(((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr) ? static_cast
<void> (0) : __assert_fail ("(uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 627, __PRETTY_FUNCTION__))
;
628
629 return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1));
630}
631
632/// Returns the necessary adjustment for aligning \c Ptr to \c Alignment
633/// bytes, rounding up.
634inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) {
635 return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr;
636}
637
638/// Returns the next power of two (in 64-bits) that is strictly greater than A.
639/// Returns zero on overflow.
640inline uint64_t NextPowerOf2(uint64_t A) {
641 A |= (A >> 1);
642 A |= (A >> 2);
643 A |= (A >> 4);
644 A |= (A >> 8);
645 A |= (A >> 16);
646 A |= (A >> 32);
647 return A + 1;
648}
649
650/// Returns the power of two which is less than or equal to the given value.
651/// Essentially, it is a floor operation across the domain of powers of two.
652inline uint64_t PowerOf2Floor(uint64_t A) {
653 if (!A) return 0;
654 return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
655}
656
657/// Returns the power of two which is greater than or equal to the given value.
658/// Essentially, it is a ceil operation across the domain of powers of two.
659inline uint64_t PowerOf2Ceil(uint64_t A) {
660 if (!A)
661 return 0;
662 return NextPowerOf2(A - 1);
663}
664
665/// Returns the next integer (mod 2**64) that is greater than or equal to
666/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
667///
668/// If non-zero \p Skew is specified, the return value will be a minimal
669/// integer that is greater than or equal to \p Value and equal to
670/// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
671/// \p Align, its value is adjusted to '\p Skew mod \p Align'.
672///
673/// Examples:
674/// \code
675/// alignTo(5, 8) = 8
676/// alignTo(17, 8) = 24
677/// alignTo(~0LL, 8) = 0
678/// alignTo(321, 255) = 510
679///
680/// alignTo(5, 8, 7) = 7
681/// alignTo(17, 8, 1) = 17
682/// alignTo(~0LL, 8, 3) = 3
683/// alignTo(321, 255, 42) = 552
684/// \endcode
685inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
686 assert(Align != 0u && "Align can't be 0.")((Align != 0u && "Align can't be 0.") ? static_cast<
void> (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 686, __PRETTY_FUNCTION__))
;
687 Skew %= Align;
688 return (Value + Align - 1 - Skew) / Align * Align + Skew;
689}
690
691/// Returns the next integer (mod 2**64) that is greater than or equal to
692/// \p Value and is a multiple of \c Align. \c Align must be non-zero.
693template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) {
694 static_assert(Align != 0u, "Align must be non-zero");
695 return (Value + Align - 1) / Align * Align;
696}
697
698/// Returns the integer ceil(Numerator / Denominator).
699inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) {
700 return alignTo(Numerator, Denominator) / Denominator;
701}
702
703/// \c alignTo for contexts where a constant expression is required.
704/// \sa alignTo
705///
706/// \todo FIXME: remove when \c constexpr becomes really \c constexpr
707template <uint64_t Align>
708struct AlignTo {
709 static_assert(Align != 0u, "Align must be non-zero");
710 template <uint64_t Value>
711 struct from_value {
712 static const uint64_t value = (Value + Align - 1) / Align * Align;
713 };
714};
715
716/// Returns the largest uint64_t less than or equal to \p Value and is
717/// \p Skew mod \p Align. \p Align must be non-zero
718inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
719 assert(Align != 0u && "Align can't be 0.")((Align != 0u && "Align can't be 0.") ? static_cast<
void> (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 719, __PRETTY_FUNCTION__))
;
720 Skew %= Align;
721 return (Value - Skew) / Align * Align + Skew;
722}
723
724/// Returns the offset to the next integer (mod 2**64) that is greater than
725/// or equal to \p Value and is a multiple of \p Align. \p Align must be
726/// non-zero.
727inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
728 return alignTo(Value, Align) - Value;
729}
730
731/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
732/// Requires 0 < B <= 32.
733template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) {
734 static_assert(B > 0, "Bit width can't be 0.");
735 static_assert(B <= 32, "Bit width out of range.");
736 return int32_t(X << (32 - B)) >> (32 - B);
737}
738
739/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
740/// Requires 0 < B < 32.
741inline int32_t SignExtend32(uint32_t X, unsigned B) {
742 assert(B > 0 && "Bit width can't be 0.")((B > 0 && "Bit width can't be 0.") ? static_cast<
void> (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 742, __PRETTY_FUNCTION__))
;
743 assert(B <= 32 && "Bit width out of range.")((B <= 32 && "Bit width out of range.") ? static_cast
<void> (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 743, __PRETTY_FUNCTION__))
;
744 return int32_t(X << (32 - B)) >> (32 - B);
745}
746
747/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
748/// Requires 0 < B < 64.
749template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) {
750 static_assert(B > 0, "Bit width can't be 0.");
751 static_assert(B <= 64, "Bit width out of range.");
752 return int64_t(x << (64 - B)) >> (64 - B);
753}
754
755/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
756/// Requires 0 < B < 64.
757inline int64_t SignExtend64(uint64_t X, unsigned B) {
758 assert(B > 0 && "Bit width can't be 0.")((B > 0 && "Bit width can't be 0.") ? static_cast<
void> (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 758, __PRETTY_FUNCTION__))
;
759 assert(B <= 64 && "Bit width out of range.")((B <= 64 && "Bit width out of range.") ? static_cast
<void> (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 759, __PRETTY_FUNCTION__))
;
760 return int64_t(X << (64 - B)) >> (64 - B);
761}
762
763/// Subtract two unsigned integers, X and Y, of type T and return the absolute
764/// value of the result.
765template <typename T>
766typename std::enable_if<std::is_unsigned<T>::value, T>::type
767AbsoluteDifference(T X, T Y) {
768 return std::max(X, Y) - std::min(X, Y);
769}
770
771/// Add two unsigned integers, X and Y, of type T. Clamp the result to the
772/// maximum representable value of T on overflow. ResultOverflowed indicates if
773/// the result is larger than the maximum representable value of type T.
774template <typename T>
775typename std::enable_if<std::is_unsigned<T>::value, T>::type
776SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) {
777 bool Dummy;
778 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
779 // Hacker's Delight, p. 29
780 T Z = X + Y;
781 Overflowed = (Z < X || Z < Y);
782 if (Overflowed)
783 return std::numeric_limits<T>::max();
784 else
785 return Z;
786}
787
788/// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the
789/// maximum representable value of T on overflow. ResultOverflowed indicates if
790/// the result is larger than the maximum representable value of type T.
791template <typename T>
792typename std::enable_if<std::is_unsigned<T>::value, T>::type
793SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) {
794 bool Dummy;
795 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
796
797 // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
798 // because it fails for uint16_t (where multiplication can have undefined
799 // behavior due to promotion to int), and requires a division in addition
800 // to the multiplication.
801
802 Overflowed = false;
803
804 // Log2(Z) would be either Log2Z or Log2Z + 1.
805 // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
806 // will necessarily be less than Log2Max as desired.
807 int Log2Z = Log2_64(X) + Log2_64(Y);
808 const T Max = std::numeric_limits<T>::max();
809 int Log2Max = Log2_64(Max);
810 if (Log2Z < Log2Max) {
811 return X * Y;
812 }
813 if (Log2Z > Log2Max) {
814 Overflowed = true;
815 return Max;
816 }
817
818 // We're going to use the top bit, and maybe overflow one
819 // bit past it. Multiply all but the bottom bit then add
820 // that on at the end.
821 T Z = (X >> 1) * Y;
822 if (Z & ~(Max >> 1)) {
823 Overflowed = true;
824 return Max;
825 }
826 Z <<= 1;
827 if (X & 1)
828 return SaturatingAdd(Z, Y, ResultOverflowed);
829
830 return Z;
831}
832
833/// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
834/// the product. Clamp the result to the maximum representable value of T on
835/// overflow. ResultOverflowed indicates if the result is larger than the
836/// maximum representable value of type T.
837template <typename T>
838typename std::enable_if<std::is_unsigned<T>::value, T>::type
839SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) {
840 bool Dummy;
841 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
842
843 T Product = SaturatingMultiply(X, Y, &Overflowed);
844 if (Overflowed)
845 return Product;
846
847 return SaturatingAdd(A, Product, &Overflowed);
848}
849
850/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
851extern const float huge_valf;
852} // End llvm namespace
853
854#endif