File: | lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp |
Warning: | line 203, column 5 The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'unsigned int' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- WebAssemblyFrameLowering.cpp - WebAssembly Frame Lowering ----------==// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | /// | |||
9 | /// \file | |||
10 | /// This file contains the WebAssembly implementation of | |||
11 | /// TargetFrameLowering class. | |||
12 | /// | |||
13 | /// On WebAssembly, there aren't a lot of things to do here. There are no | |||
14 | /// callee-saved registers to save, and no spill slots. | |||
15 | /// | |||
16 | /// The stack grows downward. | |||
17 | /// | |||
18 | //===----------------------------------------------------------------------===// | |||
19 | ||||
20 | #include "WebAssemblyFrameLowering.h" | |||
21 | #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" | |||
22 | #include "WebAssemblyInstrInfo.h" | |||
23 | #include "WebAssemblyMachineFunctionInfo.h" | |||
24 | #include "WebAssemblySubtarget.h" | |||
25 | #include "WebAssemblyTargetMachine.h" | |||
26 | #include "WebAssemblyUtilities.h" | |||
27 | #include "llvm/CodeGen/MachineFrameInfo.h" | |||
28 | #include "llvm/CodeGen/MachineFunction.h" | |||
29 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |||
30 | #include "llvm/CodeGen/MachineModuleInfoImpls.h" | |||
31 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
32 | #include "llvm/MC/MCAsmInfo.h" | |||
33 | #include "llvm/Support/Debug.h" | |||
34 | using namespace llvm; | |||
35 | ||||
36 | #define DEBUG_TYPE"wasm-frame-info" "wasm-frame-info" | |||
37 | ||||
38 | // TODO: wasm64 | |||
39 | // TODO: Emit TargetOpcode::CFI_INSTRUCTION instructions | |||
40 | ||||
41 | /// We need a base pointer in the case of having items on the stack that | |||
42 | /// require stricter alignment than the stack pointer itself. Because we need | |||
43 | /// to shift the stack pointer by some unknown amount to force the alignment, | |||
44 | /// we need to record the value of the stack pointer on entry to the function. | |||
45 | bool WebAssemblyFrameLowering::hasBP(const MachineFunction &MF) const { | |||
46 | const auto *RegInfo = | |||
47 | MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo(); | |||
48 | return RegInfo->needsStackRealignment(MF); | |||
49 | } | |||
50 | ||||
51 | /// Return true if the specified function should have a dedicated frame pointer | |||
52 | /// register. | |||
53 | bool WebAssemblyFrameLowering::hasFP(const MachineFunction &MF) const { | |||
54 | const MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
55 | ||||
56 | // When we have var-sized objects, we move the stack pointer by an unknown | |||
57 | // amount, and need to emit a frame pointer to restore the stack to where we | |||
58 | // were on function entry. | |||
59 | // If we already need a base pointer, we use that to fix up the stack pointer. | |||
60 | // If there are no fixed-size objects, we would have no use of a frame | |||
61 | // pointer, and thus should not emit one. | |||
62 | bool HasFixedSizedObjects = MFI.getStackSize() > 0; | |||
63 | bool NeedsFixedReference = !hasBP(MF) || HasFixedSizedObjects; | |||
64 | ||||
65 | return MFI.isFrameAddressTaken() || | |||
66 | (MFI.hasVarSizedObjects() && NeedsFixedReference) || | |||
67 | MFI.hasStackMap() || MFI.hasPatchPoint(); | |||
68 | } | |||
69 | ||||
70 | /// Under normal circumstances, when a frame pointer is not required, we reserve | |||
71 | /// argument space for call sites in the function immediately on entry to the | |||
72 | /// current function. This eliminates the need for add/sub sp brackets around | |||
73 | /// call sites. Returns true if the call frame is included as part of the stack | |||
74 | /// frame. | |||
75 | bool WebAssemblyFrameLowering::hasReservedCallFrame( | |||
76 | const MachineFunction &MF) const { | |||
77 | return !MF.getFrameInfo().hasVarSizedObjects(); | |||
78 | } | |||
79 | ||||
80 | // Returns true if this function needs a local user-space stack pointer for its | |||
81 | // local frame (not for exception handling). | |||
82 | bool WebAssemblyFrameLowering::needsSPForLocalFrame( | |||
83 | const MachineFunction &MF) const { | |||
84 | auto &MFI = MF.getFrameInfo(); | |||
85 | return MFI.getStackSize() || MFI.adjustsStack() || hasFP(MF); | |||
86 | } | |||
87 | ||||
88 | // In function with EH pads, we need to make a copy of the value of | |||
89 | // __stack_pointer global in SP32 register, in order to use it when restoring | |||
90 | // __stack_pointer after an exception is caught. | |||
91 | bool WebAssemblyFrameLowering::needsPrologForEH( | |||
92 | const MachineFunction &MF) const { | |||
93 | auto EHType = MF.getTarget().getMCAsmInfo()->getExceptionHandlingType(); | |||
94 | return EHType == ExceptionHandling::Wasm && | |||
95 | MF.getFunction().hasPersonalityFn() && MF.getFrameInfo().hasCalls(); | |||
96 | } | |||
97 | ||||
98 | /// Returns true if this function needs a local user-space stack pointer. | |||
99 | /// Unlike a machine stack pointer, the wasm user stack pointer is a global | |||
100 | /// variable, so it is loaded into a register in the prolog. | |||
101 | bool WebAssemblyFrameLowering::needsSP(const MachineFunction &MF) const { | |||
102 | return needsSPForLocalFrame(MF) || needsPrologForEH(MF); | |||
103 | } | |||
104 | ||||
105 | /// Returns true if the local user-space stack pointer needs to be written back | |||
106 | /// to __stack_pointer global by this function (this is not meaningful if | |||
107 | /// needsSP is false). If false, the stack red zone can be used and only a local | |||
108 | /// SP is needed. | |||
109 | bool WebAssemblyFrameLowering::needsSPWriteback( | |||
110 | const MachineFunction &MF) const { | |||
111 | auto &MFI = MF.getFrameInfo(); | |||
112 | assert(needsSP(MF))((needsSP(MF)) ? static_cast<void> (0) : __assert_fail ( "needsSP(MF)", "/build/llvm-toolchain-snapshot-9~svn362543/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 112, __PRETTY_FUNCTION__)); | |||
113 | // When we don't need a local stack pointer for its local frame but only to | |||
114 | // support EH, we don't need to write SP back in the epilog, because we don't | |||
115 | // bump down the stack pointer in the prolog. We need to write SP back in the | |||
116 | // epilog only if | |||
117 | // 1. We need SP not only for EH support but also because we actually use | |||
118 | // stack or we have a frame address taken. | |||
119 | // 2. We cannot use the red zone. | |||
120 | bool CanUseRedZone = MFI.getStackSize() <= RedZoneSize && !MFI.hasCalls() && | |||
121 | !MF.getFunction().hasFnAttribute(Attribute::NoRedZone); | |||
122 | return needsSPForLocalFrame(MF) && !CanUseRedZone; | |||
123 | } | |||
124 | ||||
125 | void WebAssemblyFrameLowering::writeSPToGlobal( | |||
126 | unsigned SrcReg, MachineFunction &MF, MachineBasicBlock &MBB, | |||
127 | MachineBasicBlock::iterator &InsertStore, const DebugLoc &DL) const { | |||
128 | const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); | |||
129 | ||||
130 | const char *ES = "__stack_pointer"; | |||
131 | auto *SPSymbol = MF.createExternalSymbolName(ES); | |||
132 | BuildMI(MBB, InsertStore, DL, TII->get(WebAssembly::GLOBAL_SET_I32)) | |||
133 | .addExternalSymbol(SPSymbol) | |||
134 | .addReg(SrcReg); | |||
135 | } | |||
136 | ||||
137 | MachineBasicBlock::iterator | |||
138 | WebAssemblyFrameLowering::eliminateCallFramePseudoInstr( | |||
139 | MachineFunction &MF, MachineBasicBlock &MBB, | |||
140 | MachineBasicBlock::iterator I) const { | |||
141 | assert(!I->getOperand(0).getImm() && (hasFP(MF) || hasBP(MF)) &&((!I->getOperand(0).getImm() && (hasFP(MF) || hasBP (MF)) && "Call frame pseudos should only be used for dynamic stack adjustment" ) ? static_cast<void> (0) : __assert_fail ("!I->getOperand(0).getImm() && (hasFP(MF) || hasBP(MF)) && \"Call frame pseudos should only be used for dynamic stack adjustment\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 142, __PRETTY_FUNCTION__)) | |||
142 | "Call frame pseudos should only be used for dynamic stack adjustment")((!I->getOperand(0).getImm() && (hasFP(MF) || hasBP (MF)) && "Call frame pseudos should only be used for dynamic stack adjustment" ) ? static_cast<void> (0) : __assert_fail ("!I->getOperand(0).getImm() && (hasFP(MF) || hasBP(MF)) && \"Call frame pseudos should only be used for dynamic stack adjustment\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 142, __PRETTY_FUNCTION__)); | |||
143 | const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); | |||
144 | if (I->getOpcode() == TII->getCallFrameDestroyOpcode() && | |||
145 | needsSPWriteback(MF)) { | |||
146 | DebugLoc DL = I->getDebugLoc(); | |||
147 | writeSPToGlobal(WebAssembly::SP32, MF, MBB, I, DL); | |||
148 | } | |||
149 | return MBB.erase(I); | |||
150 | } | |||
151 | ||||
152 | void WebAssemblyFrameLowering::emitPrologue(MachineFunction &MF, | |||
153 | MachineBasicBlock &MBB) const { | |||
154 | // TODO: Do ".setMIFlag(MachineInstr::FrameSetup)" on emitted instructions | |||
155 | auto &MFI = MF.getFrameInfo(); | |||
156 | assert(MFI.getCalleeSavedInfo().empty() &&((MFI.getCalleeSavedInfo().empty() && "WebAssembly should not have callee-saved registers" ) ? static_cast<void> (0) : __assert_fail ("MFI.getCalleeSavedInfo().empty() && \"WebAssembly should not have callee-saved registers\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 157, __PRETTY_FUNCTION__)) | |||
| ||||
157 | "WebAssembly should not have callee-saved registers")((MFI.getCalleeSavedInfo().empty() && "WebAssembly should not have callee-saved registers" ) ? static_cast<void> (0) : __assert_fail ("MFI.getCalleeSavedInfo().empty() && \"WebAssembly should not have callee-saved registers\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 157, __PRETTY_FUNCTION__)); | |||
158 | ||||
159 | if (!needsSP(MF)) | |||
160 | return; | |||
161 | uint64_t StackSize = MFI.getStackSize(); | |||
162 | ||||
163 | const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); | |||
164 | auto &MRI = MF.getRegInfo(); | |||
165 | ||||
166 | auto InsertPt = MBB.begin(); | |||
167 | while (InsertPt != MBB.end() && WebAssembly::isArgument(*InsertPt)) | |||
168 | ++InsertPt; | |||
169 | DebugLoc DL; | |||
170 | ||||
171 | const TargetRegisterClass *PtrRC = | |||
172 | MRI.getTargetRegisterInfo()->getPointerRegClass(MF); | |||
173 | unsigned SPReg = WebAssembly::SP32; | |||
174 | if (StackSize) | |||
175 | SPReg = MRI.createVirtualRegister(PtrRC); | |||
176 | ||||
177 | const char *ES = "__stack_pointer"; | |||
178 | auto *SPSymbol = MF.createExternalSymbolName(ES); | |||
179 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::GLOBAL_GET_I32), SPReg) | |||
180 | .addExternalSymbol(SPSymbol); | |||
181 | ||||
182 | bool HasBP = hasBP(MF); | |||
183 | if (HasBP) { | |||
184 | auto FI = MF.getInfo<WebAssemblyFunctionInfo>(); | |||
185 | unsigned BasePtr = MRI.createVirtualRegister(PtrRC); | |||
186 | FI->setBasePointerVreg(BasePtr); | |||
187 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::COPY), BasePtr) | |||
188 | .addReg(SPReg); | |||
189 | } | |||
190 | if (StackSize) { | |||
191 | // Subtract the frame size | |||
192 | unsigned OffsetReg = MRI.createVirtualRegister(PtrRC); | |||
193 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg) | |||
194 | .addImm(StackSize); | |||
195 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::SUB_I32), | |||
196 | WebAssembly::SP32) | |||
197 | .addReg(SPReg) | |||
198 | .addReg(OffsetReg); | |||
199 | } | |||
200 | if (HasBP) { | |||
201 | unsigned BitmaskReg = MRI.createVirtualRegister(PtrRC); | |||
202 | unsigned Alignment = MFI.getMaxAlignment(); | |||
203 | assert((1u << countTrailingZeros(Alignment)) == Alignment &&(((1u << countTrailingZeros(Alignment)) == Alignment && "Alignment must be a power of 2") ? static_cast<void> ( 0) : __assert_fail ("(1u << countTrailingZeros(Alignment)) == Alignment && \"Alignment must be a power of 2\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 204, __PRETTY_FUNCTION__)) | |||
| ||||
204 | "Alignment must be a power of 2")(((1u << countTrailingZeros(Alignment)) == Alignment && "Alignment must be a power of 2") ? static_cast<void> ( 0) : __assert_fail ("(1u << countTrailingZeros(Alignment)) == Alignment && \"Alignment must be a power of 2\"" , "/build/llvm-toolchain-snapshot-9~svn362543/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 204, __PRETTY_FUNCTION__)); | |||
205 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), BitmaskReg) | |||
206 | .addImm((int)~(Alignment - 1)); | |||
207 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::AND_I32), | |||
208 | WebAssembly::SP32) | |||
209 | .addReg(WebAssembly::SP32) | |||
210 | .addReg(BitmaskReg); | |||
211 | } | |||
212 | if (hasFP(MF)) { | |||
213 | // Unlike most conventional targets (where FP points to the saved FP), | |||
214 | // FP points to the bottom of the fixed-size locals, so we can use positive | |||
215 | // offsets in load/store instructions. | |||
216 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::COPY), WebAssembly::FP32) | |||
217 | .addReg(WebAssembly::SP32); | |||
218 | } | |||
219 | if (StackSize && needsSPWriteback(MF)) { | |||
220 | writeSPToGlobal(WebAssembly::SP32, MF, MBB, InsertPt, DL); | |||
221 | } | |||
222 | } | |||
223 | ||||
224 | void WebAssemblyFrameLowering::emitEpilogue(MachineFunction &MF, | |||
225 | MachineBasicBlock &MBB) const { | |||
226 | uint64_t StackSize = MF.getFrameInfo().getStackSize(); | |||
227 | if (!needsSP(MF) || !needsSPWriteback(MF)) | |||
228 | return; | |||
229 | const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); | |||
230 | auto &MRI = MF.getRegInfo(); | |||
231 | auto InsertPt = MBB.getFirstTerminator(); | |||
232 | DebugLoc DL; | |||
233 | ||||
234 | if (InsertPt != MBB.end()) | |||
235 | DL = InsertPt->getDebugLoc(); | |||
236 | ||||
237 | // Restore the stack pointer. If we had fixed-size locals, add the offset | |||
238 | // subtracted in the prolog. | |||
239 | unsigned SPReg = 0; | |||
240 | if (hasBP(MF)) { | |||
241 | auto FI = MF.getInfo<WebAssemblyFunctionInfo>(); | |||
242 | SPReg = FI->getBasePointerVreg(); | |||
243 | } else if (StackSize) { | |||
244 | const TargetRegisterClass *PtrRC = | |||
245 | MRI.getTargetRegisterInfo()->getPointerRegClass(MF); | |||
246 | unsigned OffsetReg = MRI.createVirtualRegister(PtrRC); | |||
247 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg) | |||
248 | .addImm(StackSize); | |||
249 | // In the epilog we don't need to write the result back to the SP32 physreg | |||
250 | // because it won't be used again. We can use a stackified register instead. | |||
251 | SPReg = MRI.createVirtualRegister(PtrRC); | |||
252 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::ADD_I32), SPReg) | |||
253 | .addReg(hasFP(MF) ? WebAssembly::FP32 : WebAssembly::SP32) | |||
254 | .addReg(OffsetReg); | |||
255 | } else { | |||
256 | SPReg = hasFP(MF) ? WebAssembly::FP32 : WebAssembly::SP32; | |||
257 | } | |||
258 | ||||
259 | writeSPToGlobal(SPReg, MF, MBB, InsertPt, DL); | |||
260 | } |
1 | //===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains some functions that are useful for math stuff. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_SUPPORT_MATHEXTRAS_H |
14 | #define LLVM_SUPPORT_MATHEXTRAS_H |
15 | |
16 | #include "llvm/Support/Compiler.h" |
17 | #include "llvm/Support/SwapByteOrder.h" |
18 | #include <algorithm> |
19 | #include <cassert> |
20 | #include <climits> |
21 | #include <cstring> |
22 | #include <limits> |
23 | #include <type_traits> |
24 | |
25 | #ifdef __ANDROID_NDK__ |
26 | #include <android/api-level.h> |
27 | #endif |
28 | |
29 | #ifdef _MSC_VER |
30 | // Declare these intrinsics manually rather including intrin.h. It's very |
31 | // expensive, and MathExtras.h is popular. |
32 | // #include <intrin.h> |
33 | extern "C" { |
34 | unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask); |
35 | unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask); |
36 | unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask); |
37 | unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask); |
38 | } |
39 | #endif |
40 | |
41 | namespace llvm { |
42 | /// The behavior an operation has on an input of 0. |
43 | enum ZeroBehavior { |
44 | /// The returned value is undefined. |
45 | ZB_Undefined, |
46 | /// The returned value is numeric_limits<T>::max() |
47 | ZB_Max, |
48 | /// The returned value is numeric_limits<T>::digits |
49 | ZB_Width |
50 | }; |
51 | |
52 | namespace detail { |
53 | template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter { |
54 | static unsigned count(T Val, ZeroBehavior) { |
55 | if (!Val) |
56 | return std::numeric_limits<T>::digits; |
57 | if (Val & 0x1) |
58 | return 0; |
59 | |
60 | // Bisection method. |
61 | unsigned ZeroBits = 0; |
62 | T Shift = std::numeric_limits<T>::digits >> 1; |
63 | T Mask = std::numeric_limits<T>::max() >> Shift; |
64 | while (Shift) { |
65 | if ((Val & Mask) == 0) { |
66 | Val >>= Shift; |
67 | ZeroBits |= Shift; |
68 | } |
69 | Shift >>= 1; |
70 | Mask >>= Shift; |
71 | } |
72 | return ZeroBits; |
73 | } |
74 | }; |
75 | |
76 | #if __GNUC__4 >= 4 || defined(_MSC_VER) |
77 | template <typename T> struct TrailingZerosCounter<T, 4> { |
78 | static unsigned count(T Val, ZeroBehavior ZB) { |
79 | if (ZB != ZB_Undefined && Val == 0) |
80 | return 32; |
81 | |
82 | #if __has_builtin(__builtin_ctz)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20 ) + ((0) << 10) + (0)) |
83 | return __builtin_ctz(Val); |
84 | #elif defined(_MSC_VER) |
85 | unsigned long Index; |
86 | _BitScanForward(&Index, Val); |
87 | return Index; |
88 | #endif |
89 | } |
90 | }; |
91 | |
92 | #if !defined(_MSC_VER) || defined(_M_X64) |
93 | template <typename T> struct TrailingZerosCounter<T, 8> { |
94 | static unsigned count(T Val, ZeroBehavior ZB) { |
95 | if (ZB != ZB_Undefined && Val == 0) |
96 | return 64; |
97 | |
98 | #if __has_builtin(__builtin_ctzll)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20 ) + ((0) << 10) + (0)) |
99 | return __builtin_ctzll(Val); |
100 | #elif defined(_MSC_VER) |
101 | unsigned long Index; |
102 | _BitScanForward64(&Index, Val); |
103 | return Index; |
104 | #endif |
105 | } |
106 | }; |
107 | #endif |
108 | #endif |
109 | } // namespace detail |
110 | |
111 | /// Count number of 0's from the least significant bit to the most |
112 | /// stopping at the first 1. |
113 | /// |
114 | /// Only unsigned integral types are allowed. |
115 | /// |
116 | /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are |
117 | /// valid arguments. |
118 | template <typename T> |
119 | unsigned countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) { |
120 | static_assert(std::numeric_limits<T>::is_integer && |
121 | !std::numeric_limits<T>::is_signed, |
122 | "Only unsigned integral types are allowed."); |
123 | return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB); |
124 | } |
125 | |
126 | namespace detail { |
127 | template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter { |
128 | static unsigned count(T Val, ZeroBehavior) { |
129 | if (!Val) |
130 | return std::numeric_limits<T>::digits; |
131 | |
132 | // Bisection method. |
133 | unsigned ZeroBits = 0; |
134 | for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) { |
135 | T Tmp = Val >> Shift; |
136 | if (Tmp) |
137 | Val = Tmp; |
138 | else |
139 | ZeroBits |= Shift; |
140 | } |
141 | return ZeroBits; |
142 | } |
143 | }; |
144 | |
145 | #if __GNUC__4 >= 4 || defined(_MSC_VER) |
146 | template <typename T> struct LeadingZerosCounter<T, 4> { |
147 | static unsigned count(T Val, ZeroBehavior ZB) { |
148 | if (ZB != ZB_Undefined && Val == 0) |
149 | return 32; |
150 | |
151 | #if __has_builtin(__builtin_clz)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20 ) + ((0) << 10) + (0)) |
152 | return __builtin_clz(Val); |
153 | #elif defined(_MSC_VER) |
154 | unsigned long Index; |
155 | _BitScanReverse(&Index, Val); |
156 | return Index ^ 31; |
157 | #endif |
158 | } |
159 | }; |
160 | |
161 | #if !defined(_MSC_VER) || defined(_M_X64) |
162 | template <typename T> struct LeadingZerosCounter<T, 8> { |
163 | static unsigned count(T Val, ZeroBehavior ZB) { |
164 | if (ZB != ZB_Undefined && Val == 0) |
165 | return 64; |
166 | |
167 | #if __has_builtin(__builtin_clzll)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20 ) + ((0) << 10) + (0)) |
168 | return __builtin_clzll(Val); |
169 | #elif defined(_MSC_VER) |
170 | unsigned long Index; |
171 | _BitScanReverse64(&Index, Val); |
172 | return Index ^ 63; |
173 | #endif |
174 | } |
175 | }; |
176 | #endif |
177 | #endif |
178 | } // namespace detail |
179 | |
180 | /// Count number of 0's from the most significant bit to the least |
181 | /// stopping at the first 1. |
182 | /// |
183 | /// Only unsigned integral types are allowed. |
184 | /// |
185 | /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are |
186 | /// valid arguments. |
187 | template <typename T> |
188 | unsigned countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) { |
189 | static_assert(std::numeric_limits<T>::is_integer && |
190 | !std::numeric_limits<T>::is_signed, |
191 | "Only unsigned integral types are allowed."); |
192 | return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB); |
193 | } |
194 | |
195 | /// Get the index of the first set bit starting from the least |
196 | /// significant bit. |
197 | /// |
198 | /// Only unsigned integral types are allowed. |
199 | /// |
200 | /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are |
201 | /// valid arguments. |
202 | template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) { |
203 | if (ZB == ZB_Max && Val == 0) |
204 | return std::numeric_limits<T>::max(); |
205 | |
206 | return countTrailingZeros(Val, ZB_Undefined); |
207 | } |
208 | |
209 | /// Create a bitmask with the N right-most bits set to 1, and all other |
210 | /// bits set to 0. Only unsigned types are allowed. |
211 | template <typename T> T maskTrailingOnes(unsigned N) { |
212 | static_assert(std::is_unsigned<T>::value, "Invalid type!"); |
213 | const unsigned Bits = CHAR_BIT8 * sizeof(T); |
214 | assert(N <= Bits && "Invalid bit index")((N <= Bits && "Invalid bit index") ? static_cast< void> (0) : __assert_fail ("N <= Bits && \"Invalid bit index\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/MathExtras.h" , 214, __PRETTY_FUNCTION__)); |
215 | return N == 0 ? 0 : (T(-1) >> (Bits - N)); |
216 | } |
217 | |
218 | /// Create a bitmask with the N left-most bits set to 1, and all other |
219 | /// bits set to 0. Only unsigned types are allowed. |
220 | template <typename T> T maskLeadingOnes(unsigned N) { |
221 | return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); |
222 | } |
223 | |
224 | /// Create a bitmask with the N right-most bits set to 0, and all other |
225 | /// bits set to 1. Only unsigned types are allowed. |
226 | template <typename T> T maskTrailingZeros(unsigned N) { |
227 | return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N); |
228 | } |
229 | |
230 | /// Create a bitmask with the N left-most bits set to 0, and all other |
231 | /// bits set to 1. Only unsigned types are allowed. |
232 | template <typename T> T maskLeadingZeros(unsigned N) { |
233 | return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); |
234 | } |
235 | |
236 | /// Get the index of the last set bit starting from the least |
237 | /// significant bit. |
238 | /// |
239 | /// Only unsigned integral types are allowed. |
240 | /// |
241 | /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are |
242 | /// valid arguments. |
243 | template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) { |
244 | if (ZB == ZB_Max && Val == 0) |
245 | return std::numeric_limits<T>::max(); |
246 | |
247 | // Use ^ instead of - because both gcc and llvm can remove the associated ^ |
248 | // in the __builtin_clz intrinsic on x86. |
249 | return countLeadingZeros(Val, ZB_Undefined) ^ |
250 | (std::numeric_limits<T>::digits - 1); |
251 | } |
252 | |
253 | /// Macro compressed bit reversal table for 256 bits. |
254 | /// |
255 | /// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable |
256 | static const unsigned char BitReverseTable256[256] = { |
257 | #define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64 |
258 | #define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16) |
259 | #define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4) |
260 | R6(0), R6(2), R6(1), R6(3) |
261 | #undef R2 |
262 | #undef R4 |
263 | #undef R6 |
264 | }; |
265 | |
266 | /// Reverse the bits in \p Val. |
267 | template <typename T> |
268 | T reverseBits(T Val) { |
269 | unsigned char in[sizeof(Val)]; |
270 | unsigned char out[sizeof(Val)]; |
271 | std::memcpy(in, &Val, sizeof(Val)); |
272 | for (unsigned i = 0; i < sizeof(Val); ++i) |
273 | out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]]; |
274 | std::memcpy(&Val, out, sizeof(Val)); |
275 | return Val; |
276 | } |
277 | |
278 | // NOTE: The following support functions use the _32/_64 extensions instead of |
279 | // type overloading so that signed and unsigned integers can be used without |
280 | // ambiguity. |
281 | |
282 | /// Return the high 32 bits of a 64 bit value. |
283 | constexpr inline uint32_t Hi_32(uint64_t Value) { |
284 | return static_cast<uint32_t>(Value >> 32); |
285 | } |
286 | |
287 | /// Return the low 32 bits of a 64 bit value. |
288 | constexpr inline uint32_t Lo_32(uint64_t Value) { |
289 | return static_cast<uint32_t>(Value); |
290 | } |
291 | |
292 | /// Make a 64-bit integer from a high / low pair of 32-bit integers. |
293 | constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) { |
294 | return ((uint64_t)High << 32) | (uint64_t)Low; |
295 | } |
296 | |
297 | /// Checks if an integer fits into the given bit width. |
298 | template <unsigned N> constexpr inline bool isInt(int64_t x) { |
299 | return N >= 64 || (-(INT64_C(1)1L<<(N-1)) <= x && x < (INT64_C(1)1L<<(N-1))); |
300 | } |
301 | // Template specializations to get better code for common cases. |
302 | template <> constexpr inline bool isInt<8>(int64_t x) { |
303 | return static_cast<int8_t>(x) == x; |
304 | } |
305 | template <> constexpr inline bool isInt<16>(int64_t x) { |
306 | return static_cast<int16_t>(x) == x; |
307 | } |
308 | template <> constexpr inline bool isInt<32>(int64_t x) { |
309 | return static_cast<int32_t>(x) == x; |
310 | } |
311 | |
312 | /// Checks if a signed integer is an N bit number shifted left by S. |
313 | template <unsigned N, unsigned S> |
314 | constexpr inline bool isShiftedInt(int64_t x) { |
315 | static_assert( |
316 | N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number."); |
317 | static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide."); |
318 | return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); |
319 | } |
320 | |
321 | /// Checks if an unsigned integer fits into the given bit width. |
322 | /// |
323 | /// This is written as two functions rather than as simply |
324 | /// |
325 | /// return N >= 64 || X < (UINT64_C(1) << N); |
326 | /// |
327 | /// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting |
328 | /// left too many places. |
329 | template <unsigned N> |
330 | constexpr inline typename std::enable_if<(N < 64), bool>::type |
331 | isUInt(uint64_t X) { |
332 | static_assert(N > 0, "isUInt<0> doesn't make sense"); |
333 | return X < (UINT64_C(1)1UL << (N)); |
334 | } |
335 | template <unsigned N> |
336 | constexpr inline typename std::enable_if<N >= 64, bool>::type |
337 | isUInt(uint64_t X) { |
338 | return true; |
339 | } |
340 | |
341 | // Template specializations to get better code for common cases. |
342 | template <> constexpr inline bool isUInt<8>(uint64_t x) { |
343 | return static_cast<uint8_t>(x) == x; |
344 | } |
345 | template <> constexpr inline bool isUInt<16>(uint64_t x) { |
346 | return static_cast<uint16_t>(x) == x; |
347 | } |
348 | template <> constexpr inline bool isUInt<32>(uint64_t x) { |
349 | return static_cast<uint32_t>(x) == x; |
350 | } |
351 | |
352 | /// Checks if a unsigned integer is an N bit number shifted left by S. |
353 | template <unsigned N, unsigned S> |
354 | constexpr inline bool isShiftedUInt(uint64_t x) { |
355 | static_assert( |
356 | N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)"); |
357 | static_assert(N + S <= 64, |
358 | "isShiftedUInt<N, S> with N + S > 64 is too wide."); |
359 | // Per the two static_asserts above, S must be strictly less than 64. So |
360 | // 1 << S is not undefined behavior. |
361 | return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); |
362 | } |
363 | |
364 | /// Gets the maximum value for a N-bit unsigned integer. |
365 | inline uint64_t maxUIntN(uint64_t N) { |
366 | assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range" ) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/MathExtras.h" , 366, __PRETTY_FUNCTION__)); |
367 | |
368 | // uint64_t(1) << 64 is undefined behavior, so we can't do |
369 | // (uint64_t(1) << N) - 1 |
370 | // without checking first that N != 64. But this works and doesn't have a |
371 | // branch. |
372 | return UINT64_MAX(18446744073709551615UL) >> (64 - N); |
373 | } |
374 | |
375 | /// Gets the minimum value for a N-bit signed integer. |
376 | inline int64_t minIntN(int64_t N) { |
377 | assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range" ) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/MathExtras.h" , 377, __PRETTY_FUNCTION__)); |
378 | |
379 | return -(UINT64_C(1)1UL<<(N-1)); |
380 | } |
381 | |
382 | /// Gets the maximum value for a N-bit signed integer. |
383 | inline int64_t maxIntN(int64_t N) { |
384 | assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range" ) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/MathExtras.h" , 384, __PRETTY_FUNCTION__)); |
385 | |
386 | // This relies on two's complement wraparound when N == 64, so we convert to |
387 | // int64_t only at the very end to avoid UB. |
388 | return (UINT64_C(1)1UL << (N - 1)) - 1; |
389 | } |
390 | |
391 | /// Checks if an unsigned integer fits into the given (dynamic) bit width. |
392 | inline bool isUIntN(unsigned N, uint64_t x) { |
393 | return N >= 64 || x <= maxUIntN(N); |
394 | } |
395 | |
396 | /// Checks if an signed integer fits into the given (dynamic) bit width. |
397 | inline bool isIntN(unsigned N, int64_t x) { |
398 | return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N)); |
399 | } |
400 | |
401 | /// Return true if the argument is a non-empty sequence of ones starting at the |
402 | /// least significant bit with the remainder zero (32 bit version). |
403 | /// Ex. isMask_32(0x0000FFFFU) == true. |
404 | constexpr inline bool isMask_32(uint32_t Value) { |
405 | return Value && ((Value + 1) & Value) == 0; |
406 | } |
407 | |
408 | /// Return true if the argument is a non-empty sequence of ones starting at the |
409 | /// least significant bit with the remainder zero (64 bit version). |
410 | constexpr inline bool isMask_64(uint64_t Value) { |
411 | return Value && ((Value + 1) & Value) == 0; |
412 | } |
413 | |
414 | /// Return true if the argument contains a non-empty sequence of ones with the |
415 | /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. |
416 | constexpr inline bool isShiftedMask_32(uint32_t Value) { |
417 | return Value && isMask_32((Value - 1) | Value); |
418 | } |
419 | |
420 | /// Return true if the argument contains a non-empty sequence of ones with the |
421 | /// remainder zero (64 bit version.) |
422 | constexpr inline bool isShiftedMask_64(uint64_t Value) { |
423 | return Value && isMask_64((Value - 1) | Value); |
424 | } |
425 | |
426 | /// Return true if the argument is a power of two > 0. |
427 | /// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) |
428 | constexpr inline bool isPowerOf2_32(uint32_t Value) { |
429 | return Value && !(Value & (Value - 1)); |
430 | } |
431 | |
432 | /// Return true if the argument is a power of two > 0 (64 bit edition.) |
433 | constexpr inline bool isPowerOf2_64(uint64_t Value) { |
434 | return Value && !(Value & (Value - 1)); |
435 | } |
436 | |
437 | /// Return a byte-swapped representation of the 16-bit argument. |
438 | inline uint16_t ByteSwap_16(uint16_t Value) { |
439 | return sys::SwapByteOrder_16(Value); |
440 | } |
441 | |
442 | /// Return a byte-swapped representation of the 32-bit argument. |
443 | inline uint32_t ByteSwap_32(uint32_t Value) { |
444 | return sys::SwapByteOrder_32(Value); |
445 | } |
446 | |
447 | /// Return a byte-swapped representation of the 64-bit argument. |
448 | inline uint64_t ByteSwap_64(uint64_t Value) { |
449 | return sys::SwapByteOrder_64(Value); |
450 | } |
451 | |
452 | /// Count the number of ones from the most significant bit to the first |
453 | /// zero bit. |
454 | /// |
455 | /// Ex. countLeadingOnes(0xFF0FFF00) == 8. |
456 | /// Only unsigned integral types are allowed. |
457 | /// |
458 | /// \param ZB the behavior on an input of all ones. Only ZB_Width and |
459 | /// ZB_Undefined are valid arguments. |
460 | template <typename T> |
461 | unsigned countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) { |
462 | static_assert(std::numeric_limits<T>::is_integer && |
463 | !std::numeric_limits<T>::is_signed, |
464 | "Only unsigned integral types are allowed."); |
465 | return countLeadingZeros<T>(~Value, ZB); |
466 | } |
467 | |
468 | /// Count the number of ones from the least significant bit to the first |
469 | /// zero bit. |
470 | /// |
471 | /// Ex. countTrailingOnes(0x00FF00FF) == 8. |
472 | /// Only unsigned integral types are allowed. |
473 | /// |
474 | /// \param ZB the behavior on an input of all ones. Only ZB_Width and |
475 | /// ZB_Undefined are valid arguments. |
476 | template <typename T> |
477 | unsigned countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) { |
478 | static_assert(std::numeric_limits<T>::is_integer && |
479 | !std::numeric_limits<T>::is_signed, |
480 | "Only unsigned integral types are allowed."); |
481 | return countTrailingZeros<T>(~Value, ZB); |
482 | } |
483 | |
484 | namespace detail { |
485 | template <typename T, std::size_t SizeOfT> struct PopulationCounter { |
486 | static unsigned count(T Value) { |
487 | // Generic version, forward to 32 bits. |
488 | static_assert(SizeOfT <= 4, "Not implemented!"); |
489 | #if __GNUC__4 >= 4 |
490 | return __builtin_popcount(Value); |
491 | #else |
492 | uint32_t v = Value; |
493 | v = v - ((v >> 1) & 0x55555555); |
494 | v = (v & 0x33333333) + ((v >> 2) & 0x33333333); |
495 | return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; |
496 | #endif |
497 | } |
498 | }; |
499 | |
500 | template <typename T> struct PopulationCounter<T, 8> { |
501 | static unsigned count(T Value) { |
502 | #if __GNUC__4 >= 4 |
503 | return __builtin_popcountll(Value); |
504 | #else |
505 | uint64_t v = Value; |
506 | v = v - ((v >> 1) & 0x5555555555555555ULL); |
507 | v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL); |
508 | v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL; |
509 | return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); |
510 | #endif |
511 | } |
512 | }; |
513 | } // namespace detail |
514 | |
515 | /// Count the number of set bits in a value. |
516 | /// Ex. countPopulation(0xF000F000) = 8 |
517 | /// Returns 0 if the word is zero. |
518 | template <typename T> |
519 | inline unsigned countPopulation(T Value) { |
520 | static_assert(std::numeric_limits<T>::is_integer && |
521 | !std::numeric_limits<T>::is_signed, |
522 | "Only unsigned integral types are allowed."); |
523 | return detail::PopulationCounter<T, sizeof(T)>::count(Value); |
524 | } |
525 | |
526 | /// Return the log base 2 of the specified value. |
527 | inline double Log2(double Value) { |
528 | #if defined(__ANDROID_API__) && __ANDROID_API__ < 18 |
529 | return __builtin_log(Value) / __builtin_log(2.0); |
530 | #else |
531 | return log2(Value); |
532 | #endif |
533 | } |
534 | |
535 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. |
536 | /// (32 bit edition.) |
537 | /// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 |
538 | inline unsigned Log2_32(uint32_t Value) { |
539 | return 31 - countLeadingZeros(Value); |
540 | } |
541 | |
542 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. |
543 | /// (64 bit edition.) |
544 | inline unsigned Log2_64(uint64_t Value) { |
545 | return 63 - countLeadingZeros(Value); |
546 | } |
547 | |
548 | /// Return the ceil log base 2 of the specified value, 32 if the value is zero. |
549 | /// (32 bit edition). |
550 | /// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 |
551 | inline unsigned Log2_32_Ceil(uint32_t Value) { |
552 | return 32 - countLeadingZeros(Value - 1); |
553 | } |
554 | |
555 | /// Return the ceil log base 2 of the specified value, 64 if the value is zero. |
556 | /// (64 bit edition.) |
557 | inline unsigned Log2_64_Ceil(uint64_t Value) { |
558 | return 64 - countLeadingZeros(Value - 1); |
559 | } |
560 | |
561 | /// Return the greatest common divisor of the values using Euclid's algorithm. |
562 | inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) { |
563 | while (B) { |
564 | uint64_t T = B; |
565 | B = A % B; |
566 | A = T; |
567 | } |
568 | return A; |
569 | } |
570 | |
571 | /// This function takes a 64-bit integer and returns the bit equivalent double. |
572 | inline double BitsToDouble(uint64_t Bits) { |
573 | double D; |
574 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); |
575 | memcpy(&D, &Bits, sizeof(Bits)); |
576 | return D; |
577 | } |
578 | |
579 | /// This function takes a 32-bit integer and returns the bit equivalent float. |
580 | inline float BitsToFloat(uint32_t Bits) { |
581 | float F; |
582 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); |
583 | memcpy(&F, &Bits, sizeof(Bits)); |
584 | return F; |
585 | } |
586 | |
587 | /// This function takes a double and returns the bit equivalent 64-bit integer. |
588 | /// Note that copying doubles around changes the bits of NaNs on some hosts, |
589 | /// notably x86, so this routine cannot be used if these bits are needed. |
590 | inline uint64_t DoubleToBits(double Double) { |
591 | uint64_t Bits; |
592 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); |
593 | memcpy(&Bits, &Double, sizeof(Double)); |
594 | return Bits; |
595 | } |
596 | |
597 | /// This function takes a float and returns the bit equivalent 32-bit integer. |
598 | /// Note that copying floats around changes the bits of NaNs on some hosts, |
599 | /// notably x86, so this routine cannot be used if these bits are needed. |
600 | inline uint32_t FloatToBits(float Float) { |
601 | uint32_t Bits; |
602 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); |
603 | memcpy(&Bits, &Float, sizeof(Float)); |
604 | return Bits; |
605 | } |
606 | |
607 | /// A and B are either alignments or offsets. Return the minimum alignment that |
608 | /// may be assumed after adding the two together. |
609 | constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) { |
610 | // The largest power of 2 that divides both A and B. |
611 | // |
612 | // Replace "-Value" by "1+~Value" in the following commented code to avoid |
613 | // MSVC warning C4146 |
614 | // return (A | B) & -(A | B); |
615 | return (A | B) & (1 + ~(A | B)); |
616 | } |
617 | |
618 | /// Aligns \c Addr to \c Alignment bytes, rounding up. |
619 | /// |
620 | /// Alignment should be a power of two. This method rounds up, so |
621 | /// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8. |
622 | inline uintptr_t alignAddr(const void *Addr, size_t Alignment) { |
623 | assert(Alignment && isPowerOf2_64((uint64_t)Alignment) &&((Alignment && isPowerOf2_64((uint64_t)Alignment) && "Alignment is not a power of two!") ? static_cast<void> (0) : __assert_fail ("Alignment && isPowerOf2_64((uint64_t)Alignment) && \"Alignment is not a power of two!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/MathExtras.h" , 624, __PRETTY_FUNCTION__)) |
624 | "Alignment is not a power of two!")((Alignment && isPowerOf2_64((uint64_t)Alignment) && "Alignment is not a power of two!") ? static_cast<void> (0) : __assert_fail ("Alignment && isPowerOf2_64((uint64_t)Alignment) && \"Alignment is not a power of two!\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/MathExtras.h" , 624, __PRETTY_FUNCTION__)); |
625 | |
626 | assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr)(((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr) ? static_cast <void> (0) : __assert_fail ("(uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/MathExtras.h" , 626, __PRETTY_FUNCTION__)); |
627 | |
628 | return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1)); |
629 | } |
630 | |
631 | /// Returns the necessary adjustment for aligning \c Ptr to \c Alignment |
632 | /// bytes, rounding up. |
633 | inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) { |
634 | return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr; |
635 | } |
636 | |
637 | /// Returns the next power of two (in 64-bits) that is strictly greater than A. |
638 | /// Returns zero on overflow. |
639 | inline uint64_t NextPowerOf2(uint64_t A) { |
640 | A |= (A >> 1); |
641 | A |= (A >> 2); |
642 | A |= (A >> 4); |
643 | A |= (A >> 8); |
644 | A |= (A >> 16); |
645 | A |= (A >> 32); |
646 | return A + 1; |
647 | } |
648 | |
649 | /// Returns the power of two which is less than or equal to the given value. |
650 | /// Essentially, it is a floor operation across the domain of powers of two. |
651 | inline uint64_t PowerOf2Floor(uint64_t A) { |
652 | if (!A) return 0; |
653 | return 1ull << (63 - countLeadingZeros(A, ZB_Undefined)); |
654 | } |
655 | |
656 | /// Returns the power of two which is greater than or equal to the given value. |
657 | /// Essentially, it is a ceil operation across the domain of powers of two. |
658 | inline uint64_t PowerOf2Ceil(uint64_t A) { |
659 | if (!A) |
660 | return 0; |
661 | return NextPowerOf2(A - 1); |
662 | } |
663 | |
664 | /// Returns the next integer (mod 2**64) that is greater than or equal to |
665 | /// \p Value and is a multiple of \p Align. \p Align must be non-zero. |
666 | /// |
667 | /// If non-zero \p Skew is specified, the return value will be a minimal |
668 | /// integer that is greater than or equal to \p Value and equal to |
669 | /// \p Align * N + \p Skew for some integer N. If \p Skew is larger than |
670 | /// \p Align, its value is adjusted to '\p Skew mod \p Align'. |
671 | /// |
672 | /// Examples: |
673 | /// \code |
674 | /// alignTo(5, 8) = 8 |
675 | /// alignTo(17, 8) = 24 |
676 | /// alignTo(~0LL, 8) = 0 |
677 | /// alignTo(321, 255) = 510 |
678 | /// |
679 | /// alignTo(5, 8, 7) = 7 |
680 | /// alignTo(17, 8, 1) = 17 |
681 | /// alignTo(~0LL, 8, 3) = 3 |
682 | /// alignTo(321, 255, 42) = 552 |
683 | /// \endcode |
684 | inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { |
685 | assert(Align != 0u && "Align can't be 0.")((Align != 0u && "Align can't be 0.") ? static_cast< void> (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/MathExtras.h" , 685, __PRETTY_FUNCTION__)); |
686 | Skew %= Align; |
687 | return (Value + Align - 1 - Skew) / Align * Align + Skew; |
688 | } |
689 | |
690 | /// Returns the next integer (mod 2**64) that is greater than or equal to |
691 | /// \p Value and is a multiple of \c Align. \c Align must be non-zero. |
692 | template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) { |
693 | static_assert(Align != 0u, "Align must be non-zero"); |
694 | return (Value + Align - 1) / Align * Align; |
695 | } |
696 | |
697 | /// Returns the integer ceil(Numerator / Denominator). |
698 | inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) { |
699 | return alignTo(Numerator, Denominator) / Denominator; |
700 | } |
701 | |
702 | /// \c alignTo for contexts where a constant expression is required. |
703 | /// \sa alignTo |
704 | /// |
705 | /// \todo FIXME: remove when \c constexpr becomes really \c constexpr |
706 | template <uint64_t Align> |
707 | struct AlignTo { |
708 | static_assert(Align != 0u, "Align must be non-zero"); |
709 | template <uint64_t Value> |
710 | struct from_value { |
711 | static const uint64_t value = (Value + Align - 1) / Align * Align; |
712 | }; |
713 | }; |
714 | |
715 | /// Returns the largest uint64_t less than or equal to \p Value and is |
716 | /// \p Skew mod \p Align. \p Align must be non-zero |
717 | inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { |
718 | assert(Align != 0u && "Align can't be 0.")((Align != 0u && "Align can't be 0.") ? static_cast< void> (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/MathExtras.h" , 718, __PRETTY_FUNCTION__)); |
719 | Skew %= Align; |
720 | return (Value - Skew) / Align * Align + Skew; |
721 | } |
722 | |
723 | /// Returns the offset to the next integer (mod 2**64) that is greater than |
724 | /// or equal to \p Value and is a multiple of \p Align. \p Align must be |
725 | /// non-zero. |
726 | inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) { |
727 | return alignTo(Value, Align) - Value; |
728 | } |
729 | |
730 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. |
731 | /// Requires 0 < B <= 32. |
732 | template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) { |
733 | static_assert(B > 0, "Bit width can't be 0."); |
734 | static_assert(B <= 32, "Bit width out of range."); |
735 | return int32_t(X << (32 - B)) >> (32 - B); |
736 | } |
737 | |
738 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. |
739 | /// Requires 0 < B < 32. |
740 | inline int32_t SignExtend32(uint32_t X, unsigned B) { |
741 | assert(B > 0 && "Bit width can't be 0.")((B > 0 && "Bit width can't be 0.") ? static_cast< void> (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/MathExtras.h" , 741, __PRETTY_FUNCTION__)); |
742 | assert(B <= 32 && "Bit width out of range.")((B <= 32 && "Bit width out of range.") ? static_cast <void> (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/MathExtras.h" , 742, __PRETTY_FUNCTION__)); |
743 | return int32_t(X << (32 - B)) >> (32 - B); |
744 | } |
745 | |
746 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. |
747 | /// Requires 0 < B < 64. |
748 | template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) { |
749 | static_assert(B > 0, "Bit width can't be 0."); |
750 | static_assert(B <= 64, "Bit width out of range."); |
751 | return int64_t(x << (64 - B)) >> (64 - B); |
752 | } |
753 | |
754 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. |
755 | /// Requires 0 < B < 64. |
756 | inline int64_t SignExtend64(uint64_t X, unsigned B) { |
757 | assert(B > 0 && "Bit width can't be 0.")((B > 0 && "Bit width can't be 0.") ? static_cast< void> (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/MathExtras.h" , 757, __PRETTY_FUNCTION__)); |
758 | assert(B <= 64 && "Bit width out of range.")((B <= 64 && "Bit width out of range.") ? static_cast <void> (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\"" , "/build/llvm-toolchain-snapshot-9~svn362543/include/llvm/Support/MathExtras.h" , 758, __PRETTY_FUNCTION__)); |
759 | return int64_t(X << (64 - B)) >> (64 - B); |
760 | } |
761 | |
762 | /// Subtract two unsigned integers, X and Y, of type T and return the absolute |
763 | /// value of the result. |
764 | template <typename T> |
765 | typename std::enable_if<std::is_unsigned<T>::value, T>::type |
766 | AbsoluteDifference(T X, T Y) { |
767 | return std::max(X, Y) - std::min(X, Y); |
768 | } |
769 | |
770 | /// Add two unsigned integers, X and Y, of type T. Clamp the result to the |
771 | /// maximum representable value of T on overflow. ResultOverflowed indicates if |
772 | /// the result is larger than the maximum representable value of type T. |
773 | template <typename T> |
774 | typename std::enable_if<std::is_unsigned<T>::value, T>::type |
775 | SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) { |
776 | bool Dummy; |
777 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; |
778 | // Hacker's Delight, p. 29 |
779 | T Z = X + Y; |
780 | Overflowed = (Z < X || Z < Y); |
781 | if (Overflowed) |
782 | return std::numeric_limits<T>::max(); |
783 | else |
784 | return Z; |
785 | } |
786 | |
787 | /// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the |
788 | /// maximum representable value of T on overflow. ResultOverflowed indicates if |
789 | /// the result is larger than the maximum representable value of type T. |
790 | template <typename T> |
791 | typename std::enable_if<std::is_unsigned<T>::value, T>::type |
792 | SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) { |
793 | bool Dummy; |
794 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; |
795 | |
796 | // Hacker's Delight, p. 30 has a different algorithm, but we don't use that |
797 | // because it fails for uint16_t (where multiplication can have undefined |
798 | // behavior due to promotion to int), and requires a division in addition |
799 | // to the multiplication. |
800 | |
801 | Overflowed = false; |
802 | |
803 | // Log2(Z) would be either Log2Z or Log2Z + 1. |
804 | // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z |
805 | // will necessarily be less than Log2Max as desired. |
806 | int Log2Z = Log2_64(X) + Log2_64(Y); |
807 | const T Max = std::numeric_limits<T>::max(); |
808 | int Log2Max = Log2_64(Max); |
809 | if (Log2Z < Log2Max) { |
810 | return X * Y; |
811 | } |
812 | if (Log2Z > Log2Max) { |
813 | Overflowed = true; |
814 | return Max; |
815 | } |
816 | |
817 | // We're going to use the top bit, and maybe overflow one |
818 | // bit past it. Multiply all but the bottom bit then add |
819 | // that on at the end. |
820 | T Z = (X >> 1) * Y; |
821 | if (Z & ~(Max >> 1)) { |
822 | Overflowed = true; |
823 | return Max; |
824 | } |
825 | Z <<= 1; |
826 | if (X & 1) |
827 | return SaturatingAdd(Z, Y, ResultOverflowed); |
828 | |
829 | return Z; |
830 | } |
831 | |
832 | /// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to |
833 | /// the product. Clamp the result to the maximum representable value of T on |
834 | /// overflow. ResultOverflowed indicates if the result is larger than the |
835 | /// maximum representable value of type T. |
836 | template <typename T> |
837 | typename std::enable_if<std::is_unsigned<T>::value, T>::type |
838 | SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) { |
839 | bool Dummy; |
840 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; |
841 | |
842 | T Product = SaturatingMultiply(X, Y, &Overflowed); |
843 | if (Overflowed) |
844 | return Product; |
845 | |
846 | return SaturatingAdd(A, Product, &Overflowed); |
847 | } |
848 | |
849 | /// Use this rather than HUGE_VALF; the latter causes warnings on MSVC. |
850 | extern const float huge_valf; |
851 | } // End llvm namespace |
852 | |
853 | #endif |