File: | include/llvm/Support/MathExtras.h |
Warning: | line 124, column 3 The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'unsigned int' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- WebAssemblyFrameLowering.cpp - WebAssembly Frame Lowering ----------==// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | /// | |||
10 | /// \file | |||
11 | /// This file contains the WebAssembly implementation of | |||
12 | /// TargetFrameLowering class. | |||
13 | /// | |||
14 | /// On WebAssembly, there aren't a lot of things to do here. There are no | |||
15 | /// callee-saved registers to save, and no spill slots. | |||
16 | /// | |||
17 | /// The stack grows downward. | |||
18 | /// | |||
19 | //===----------------------------------------------------------------------===// | |||
20 | ||||
21 | #include "WebAssemblyFrameLowering.h" | |||
22 | #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" | |||
23 | #include "WebAssemblyInstrInfo.h" | |||
24 | #include "WebAssemblyMachineFunctionInfo.h" | |||
25 | #include "WebAssemblySubtarget.h" | |||
26 | #include "WebAssemblyTargetMachine.h" | |||
27 | #include "WebAssemblyUtilities.h" | |||
28 | #include "llvm/CodeGen/MachineFrameInfo.h" | |||
29 | #include "llvm/CodeGen/MachineFunction.h" | |||
30 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |||
31 | #include "llvm/CodeGen/MachineModuleInfoImpls.h" | |||
32 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
33 | #include "llvm/MC/MCAsmInfo.h" | |||
34 | #include "llvm/Support/Debug.h" | |||
35 | using namespace llvm; | |||
36 | ||||
37 | #define DEBUG_TYPE"wasm-frame-info" "wasm-frame-info" | |||
38 | ||||
39 | // TODO: wasm64 | |||
40 | // TODO: Emit TargetOpcode::CFI_INSTRUCTION instructions | |||
41 | ||||
42 | /// We need a base pointer in the case of having items on the stack that | |||
43 | /// require stricter alignment than the stack pointer itself. Because we need | |||
44 | /// to shift the stack pointer by some unknown amount to force the alignment, | |||
45 | /// we need to record the value of the stack pointer on entry to the function. | |||
46 | bool WebAssemblyFrameLowering::hasBP(const MachineFunction &MF) const { | |||
47 | const auto *RegInfo = | |||
48 | MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo(); | |||
49 | return RegInfo->needsStackRealignment(MF); | |||
50 | } | |||
51 | ||||
52 | /// Return true if the specified function should have a dedicated frame pointer | |||
53 | /// register. | |||
54 | bool WebAssemblyFrameLowering::hasFP(const MachineFunction &MF) const { | |||
55 | const MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
56 | ||||
57 | // When we have var-sized objects, we move the stack pointer by an unknown | |||
58 | // amount, and need to emit a frame pointer to restore the stack to where we | |||
59 | // were on function entry. | |||
60 | // If we already need a base pointer, we use that to fix up the stack pointer. | |||
61 | // If there are no fixed-size objects, we would have no use of a frame | |||
62 | // pointer, and thus should not emit one. | |||
63 | bool HasFixedSizedObjects = MFI.getStackSize() > 0; | |||
64 | bool NeedsFixedReference = !hasBP(MF) || HasFixedSizedObjects; | |||
65 | ||||
66 | return MFI.isFrameAddressTaken() || | |||
67 | (MFI.hasVarSizedObjects() && NeedsFixedReference) || | |||
68 | MFI.hasStackMap() || MFI.hasPatchPoint(); | |||
69 | } | |||
70 | ||||
71 | /// Under normal circumstances, when a frame pointer is not required, we reserve | |||
72 | /// argument space for call sites in the function immediately on entry to the | |||
73 | /// current function. This eliminates the need for add/sub sp brackets around | |||
74 | /// call sites. Returns true if the call frame is included as part of the stack | |||
75 | /// frame. | |||
76 | bool WebAssemblyFrameLowering::hasReservedCallFrame( | |||
77 | const MachineFunction &MF) const { | |||
78 | return !MF.getFrameInfo().hasVarSizedObjects(); | |||
79 | } | |||
80 | ||||
81 | // Returns true if this function needs a local user-space stack pointer for its | |||
82 | // local frame (not for exception handling). | |||
83 | bool WebAssemblyFrameLowering::needsSPForLocalFrame( | |||
84 | const MachineFunction &MF) const { | |||
85 | auto &MFI = MF.getFrameInfo(); | |||
86 | return MFI.getStackSize() || MFI.adjustsStack() || hasFP(MF); | |||
87 | } | |||
88 | ||||
89 | // In function with EH pads, we need to make a copy of the value of | |||
90 | // __stack_pointer global in SP32 register, in order to use it when restoring | |||
91 | // __stack_pointer after an exception is caught. | |||
92 | bool WebAssemblyFrameLowering::needsPrologForEH( | |||
93 | const MachineFunction &MF) const { | |||
94 | auto EHType = MF.getTarget().getMCAsmInfo()->getExceptionHandlingType(); | |||
95 | return EHType == ExceptionHandling::Wasm && | |||
96 | MF.getFunction().hasPersonalityFn() && MF.getFrameInfo().hasCalls(); | |||
97 | } | |||
98 | ||||
99 | /// Returns true if this function needs a local user-space stack pointer. | |||
100 | /// Unlike a machine stack pointer, the wasm user stack pointer is a global | |||
101 | /// variable, so it is loaded into a register in the prolog. | |||
102 | bool WebAssemblyFrameLowering::needsSP(const MachineFunction &MF) const { | |||
103 | return needsSPForLocalFrame(MF) || needsPrologForEH(MF); | |||
104 | } | |||
105 | ||||
106 | /// Returns true if the local user-space stack pointer needs to be written back | |||
107 | /// to __stack_pointer global by this function (this is not meaningful if | |||
108 | /// needsSP is false). If false, the stack red zone can be used and only a local | |||
109 | /// SP is needed. | |||
110 | bool WebAssemblyFrameLowering::needsSPWriteback( | |||
111 | const MachineFunction &MF) const { | |||
112 | auto &MFI = MF.getFrameInfo(); | |||
113 | assert(needsSP(MF))((needsSP(MF)) ? static_cast<void> (0) : __assert_fail ( "needsSP(MF)", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 113, __PRETTY_FUNCTION__)); | |||
114 | // When we don't need a local stack pointer for its local frame but only to | |||
115 | // support EH, we don't need to write SP back in the epilog, because we don't | |||
116 | // bump down the stack pointer in the prolog. We need to write SP back in the | |||
117 | // epilog only if | |||
118 | // 1. We need SP not only for EH support but also because we actually use | |||
119 | // stack or we have a frame address taken. | |||
120 | // 2. We cannot use the red zone. | |||
121 | bool CanUseRedZone = MFI.getStackSize() <= RedZoneSize && !MFI.hasCalls() && | |||
122 | !MF.getFunction().hasFnAttribute(Attribute::NoRedZone); | |||
123 | return needsSPForLocalFrame(MF) && !CanUseRedZone; | |||
124 | } | |||
125 | ||||
126 | void WebAssemblyFrameLowering::writeSPToGlobal( | |||
127 | unsigned SrcReg, MachineFunction &MF, MachineBasicBlock &MBB, | |||
128 | MachineBasicBlock::iterator &InsertStore, const DebugLoc &DL) const { | |||
129 | const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); | |||
130 | ||||
131 | const char *ES = "__stack_pointer"; | |||
132 | auto *SPSymbol = MF.createExternalSymbolName(ES); | |||
133 | BuildMI(MBB, InsertStore, DL, TII->get(WebAssembly::SET_GLOBAL_I32)) | |||
134 | .addExternalSymbol(SPSymbol, WebAssemblyII::MO_SYMBOL_GLOBAL) | |||
135 | .addReg(SrcReg); | |||
136 | } | |||
137 | ||||
138 | MachineBasicBlock::iterator | |||
139 | WebAssemblyFrameLowering::eliminateCallFramePseudoInstr( | |||
140 | MachineFunction &MF, MachineBasicBlock &MBB, | |||
141 | MachineBasicBlock::iterator I) const { | |||
142 | assert(!I->getOperand(0).getImm() && (hasFP(MF) || hasBP(MF)) &&((!I->getOperand(0).getImm() && (hasFP(MF) || hasBP (MF)) && "Call frame pseudos should only be used for dynamic stack adjustment" ) ? static_cast<void> (0) : __assert_fail ("!I->getOperand(0).getImm() && (hasFP(MF) || hasBP(MF)) && \"Call frame pseudos should only be used for dynamic stack adjustment\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 143, __PRETTY_FUNCTION__)) | |||
143 | "Call frame pseudos should only be used for dynamic stack adjustment")((!I->getOperand(0).getImm() && (hasFP(MF) || hasBP (MF)) && "Call frame pseudos should only be used for dynamic stack adjustment" ) ? static_cast<void> (0) : __assert_fail ("!I->getOperand(0).getImm() && (hasFP(MF) || hasBP(MF)) && \"Call frame pseudos should only be used for dynamic stack adjustment\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 143, __PRETTY_FUNCTION__)); | |||
144 | const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); | |||
145 | if (I->getOpcode() == TII->getCallFrameDestroyOpcode() && | |||
146 | needsSPWriteback(MF)) { | |||
147 | DebugLoc DL = I->getDebugLoc(); | |||
148 | writeSPToGlobal(WebAssembly::SP32, MF, MBB, I, DL); | |||
149 | } | |||
150 | return MBB.erase(I); | |||
151 | } | |||
152 | ||||
153 | void WebAssemblyFrameLowering::emitPrologue(MachineFunction &MF, | |||
154 | MachineBasicBlock &MBB) const { | |||
155 | // TODO: Do ".setMIFlag(MachineInstr::FrameSetup)" on emitted instructions | |||
156 | auto &MFI = MF.getFrameInfo(); | |||
157 | assert(MFI.getCalleeSavedInfo().empty() &&((MFI.getCalleeSavedInfo().empty() && "WebAssembly should not have callee-saved registers" ) ? static_cast<void> (0) : __assert_fail ("MFI.getCalleeSavedInfo().empty() && \"WebAssembly should not have callee-saved registers\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 158, __PRETTY_FUNCTION__)) | |||
158 | "WebAssembly should not have callee-saved registers")((MFI.getCalleeSavedInfo().empty() && "WebAssembly should not have callee-saved registers" ) ? static_cast<void> (0) : __assert_fail ("MFI.getCalleeSavedInfo().empty() && \"WebAssembly should not have callee-saved registers\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 158, __PRETTY_FUNCTION__)); | |||
159 | ||||
160 | if (!needsSP(MF)) | |||
| ||||
161 | return; | |||
162 | uint64_t StackSize = MFI.getStackSize(); | |||
163 | ||||
164 | const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); | |||
165 | auto &MRI = MF.getRegInfo(); | |||
166 | ||||
167 | auto InsertPt = MBB.begin(); | |||
168 | while (InsertPt != MBB.end() && WebAssembly::isArgument(*InsertPt)) | |||
169 | ++InsertPt; | |||
170 | DebugLoc DL; | |||
171 | ||||
172 | const TargetRegisterClass *PtrRC = | |||
173 | MRI.getTargetRegisterInfo()->getPointerRegClass(MF); | |||
174 | unsigned SPReg = WebAssembly::SP32; | |||
175 | if (StackSize) | |||
176 | SPReg = MRI.createVirtualRegister(PtrRC); | |||
177 | ||||
178 | const char *ES = "__stack_pointer"; | |||
179 | auto *SPSymbol = MF.createExternalSymbolName(ES); | |||
180 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::GET_GLOBAL_I32), SPReg) | |||
181 | .addExternalSymbol(SPSymbol, WebAssemblyII::MO_SYMBOL_GLOBAL); | |||
182 | ||||
183 | bool HasBP = hasBP(MF); | |||
184 | if (HasBP) { | |||
185 | auto FI = MF.getInfo<WebAssemblyFunctionInfo>(); | |||
186 | unsigned BasePtr = MRI.createVirtualRegister(PtrRC); | |||
187 | FI->setBasePointerVreg(BasePtr); | |||
188 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::COPY), BasePtr) | |||
189 | .addReg(SPReg); | |||
190 | } | |||
191 | if (StackSize) { | |||
192 | // Subtract the frame size | |||
193 | unsigned OffsetReg = MRI.createVirtualRegister(PtrRC); | |||
194 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg) | |||
195 | .addImm(StackSize); | |||
196 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::SUB_I32), | |||
197 | WebAssembly::SP32) | |||
198 | .addReg(SPReg) | |||
199 | .addReg(OffsetReg); | |||
200 | } | |||
201 | if (HasBP) { | |||
202 | unsigned BitmaskReg = MRI.createVirtualRegister(PtrRC); | |||
203 | unsigned Alignment = MFI.getMaxAlignment(); | |||
204 | assert((1u << countTrailingZeros(Alignment)) == Alignment &&(((1u << countTrailingZeros(Alignment)) == Alignment && "Alignment must be a power of 2") ? static_cast<void> ( 0) : __assert_fail ("(1u << countTrailingZeros(Alignment)) == Alignment && \"Alignment must be a power of 2\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 205, __PRETTY_FUNCTION__)) | |||
205 | "Alignment must be a power of 2")(((1u << countTrailingZeros(Alignment)) == Alignment && "Alignment must be a power of 2") ? static_cast<void> ( 0) : __assert_fail ("(1u << countTrailingZeros(Alignment)) == Alignment && \"Alignment must be a power of 2\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp" , 205, __PRETTY_FUNCTION__)); | |||
206 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), BitmaskReg) | |||
207 | .addImm((int)~(Alignment - 1)); | |||
208 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::AND_I32), | |||
209 | WebAssembly::SP32) | |||
210 | .addReg(WebAssembly::SP32) | |||
211 | .addReg(BitmaskReg); | |||
212 | } | |||
213 | if (hasFP(MF)) { | |||
214 | // Unlike most conventional targets (where FP points to the saved FP), | |||
215 | // FP points to the bottom of the fixed-size locals, so we can use positive | |||
216 | // offsets in load/store instructions. | |||
217 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::COPY), WebAssembly::FP32) | |||
218 | .addReg(WebAssembly::SP32); | |||
219 | } | |||
220 | if (StackSize && needsSPWriteback(MF)) { | |||
221 | writeSPToGlobal(WebAssembly::SP32, MF, MBB, InsertPt, DL); | |||
222 | } | |||
223 | } | |||
224 | ||||
225 | void WebAssemblyFrameLowering::emitEpilogue(MachineFunction &MF, | |||
226 | MachineBasicBlock &MBB) const { | |||
227 | uint64_t StackSize = MF.getFrameInfo().getStackSize(); | |||
228 | if (!needsSP(MF) || !needsSPWriteback(MF)) | |||
229 | return; | |||
230 | const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); | |||
231 | auto &MRI = MF.getRegInfo(); | |||
232 | auto InsertPt = MBB.getFirstTerminator(); | |||
233 | DebugLoc DL; | |||
234 | ||||
235 | if (InsertPt != MBB.end()) | |||
236 | DL = InsertPt->getDebugLoc(); | |||
237 | ||||
238 | // Restore the stack pointer. If we had fixed-size locals, add the offset | |||
239 | // subtracted in the prolog. | |||
240 | unsigned SPReg = 0; | |||
241 | if (hasBP(MF)) { | |||
242 | auto FI = MF.getInfo<WebAssemblyFunctionInfo>(); | |||
243 | SPReg = FI->getBasePointerVreg(); | |||
244 | } else if (StackSize) { | |||
245 | const TargetRegisterClass *PtrRC = | |||
246 | MRI.getTargetRegisterInfo()->getPointerRegClass(MF); | |||
247 | unsigned OffsetReg = MRI.createVirtualRegister(PtrRC); | |||
248 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg) | |||
249 | .addImm(StackSize); | |||
250 | // In the epilog we don't need to write the result back to the SP32 physreg | |||
251 | // because it won't be used again. We can use a stackified register instead. | |||
252 | SPReg = MRI.createVirtualRegister(PtrRC); | |||
253 | BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::ADD_I32), SPReg) | |||
254 | .addReg(hasFP(MF) ? WebAssembly::FP32 : WebAssembly::SP32) | |||
255 | .addReg(OffsetReg); | |||
256 | } else { | |||
257 | SPReg = hasFP(MF) ? WebAssembly::FP32 : WebAssembly::SP32; | |||
258 | } | |||
259 | ||||
260 | writeSPToGlobal(SPReg, MF, MBB, InsertPt, DL); | |||
261 | } |
1 | //===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | // | |||
10 | // This file contains some functions that are useful for math stuff. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #ifndef LLVM_SUPPORT_MATHEXTRAS_H | |||
15 | #define LLVM_SUPPORT_MATHEXTRAS_H | |||
16 | ||||
17 | #include "llvm/Support/Compiler.h" | |||
18 | #include "llvm/Support/SwapByteOrder.h" | |||
19 | #include <algorithm> | |||
20 | #include <cassert> | |||
21 | #include <climits> | |||
22 | #include <cstring> | |||
23 | #include <limits> | |||
24 | #include <type_traits> | |||
25 | ||||
26 | #ifdef __ANDROID_NDK__ | |||
27 | #include <android/api-level.h> | |||
28 | #endif | |||
29 | ||||
30 | #ifdef _MSC_VER | |||
31 | // Declare these intrinsics manually rather including intrin.h. It's very | |||
32 | // expensive, and MathExtras.h is popular. | |||
33 | // #include <intrin.h> | |||
34 | extern "C" { | |||
35 | unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask); | |||
36 | unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask); | |||
37 | unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask); | |||
38 | unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask); | |||
39 | } | |||
40 | #endif | |||
41 | ||||
42 | namespace llvm { | |||
43 | /// The behavior an operation has on an input of 0. | |||
44 | enum ZeroBehavior { | |||
45 | /// The returned value is undefined. | |||
46 | ZB_Undefined, | |||
47 | /// The returned value is numeric_limits<T>::max() | |||
48 | ZB_Max, | |||
49 | /// The returned value is numeric_limits<T>::digits | |||
50 | ZB_Width | |||
51 | }; | |||
52 | ||||
53 | namespace detail { | |||
54 | template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter { | |||
55 | static std::size_t count(T Val, ZeroBehavior) { | |||
56 | if (!Val) | |||
57 | return std::numeric_limits<T>::digits; | |||
58 | if (Val & 0x1) | |||
59 | return 0; | |||
60 | ||||
61 | // Bisection method. | |||
62 | std::size_t ZeroBits = 0; | |||
63 | T Shift = std::numeric_limits<T>::digits >> 1; | |||
64 | T Mask = std::numeric_limits<T>::max() >> Shift; | |||
65 | while (Shift) { | |||
66 | if ((Val & Mask) == 0) { | |||
67 | Val >>= Shift; | |||
68 | ZeroBits |= Shift; | |||
69 | } | |||
70 | Shift >>= 1; | |||
71 | Mask >>= Shift; | |||
72 | } | |||
73 | return ZeroBits; | |||
74 | } | |||
75 | }; | |||
76 | ||||
77 | #if __GNUC__4 >= 4 || defined(_MSC_VER) | |||
78 | template <typename T> struct TrailingZerosCounter<T, 4> { | |||
79 | static std::size_t count(T Val, ZeroBehavior ZB) { | |||
80 | if (ZB != ZB_Undefined && Val == 0) | |||
81 | return 32; | |||
82 | ||||
83 | #if __has_builtin(__builtin_ctz)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20 ) + ((0) << 10) + (0)) | |||
84 | return __builtin_ctz(Val); | |||
85 | #elif defined(_MSC_VER) | |||
86 | unsigned long Index; | |||
87 | _BitScanForward(&Index, Val); | |||
88 | return Index; | |||
89 | #endif | |||
90 | } | |||
91 | }; | |||
92 | ||||
93 | #if !defined(_MSC_VER) || defined(_M_X64) | |||
94 | template <typename T> struct TrailingZerosCounter<T, 8> { | |||
95 | static std::size_t count(T Val, ZeroBehavior ZB) { | |||
96 | if (ZB != ZB_Undefined && Val == 0) | |||
97 | return 64; | |||
98 | ||||
99 | #if __has_builtin(__builtin_ctzll)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20 ) + ((0) << 10) + (0)) | |||
100 | return __builtin_ctzll(Val); | |||
101 | #elif defined(_MSC_VER) | |||
102 | unsigned long Index; | |||
103 | _BitScanForward64(&Index, Val); | |||
104 | return Index; | |||
105 | #endif | |||
106 | } | |||
107 | }; | |||
108 | #endif | |||
109 | #endif | |||
110 | } // namespace detail | |||
111 | ||||
112 | /// Count number of 0's from the least significant bit to the most | |||
113 | /// stopping at the first 1. | |||
114 | /// | |||
115 | /// Only unsigned integral types are allowed. | |||
116 | /// | |||
117 | /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are | |||
118 | /// valid arguments. | |||
119 | template <typename T> | |||
120 | std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) { | |||
121 | static_assert(std::numeric_limits<T>::is_integer && | |||
122 | !std::numeric_limits<T>::is_signed, | |||
123 | "Only unsigned integral types are allowed."); | |||
124 | return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB); | |||
| ||||
125 | } | |||
126 | ||||
127 | namespace detail { | |||
128 | template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter { | |||
129 | static std::size_t count(T Val, ZeroBehavior) { | |||
130 | if (!Val) | |||
131 | return std::numeric_limits<T>::digits; | |||
132 | ||||
133 | // Bisection method. | |||
134 | std::size_t ZeroBits = 0; | |||
135 | for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) { | |||
136 | T Tmp = Val >> Shift; | |||
137 | if (Tmp) | |||
138 | Val = Tmp; | |||
139 | else | |||
140 | ZeroBits |= Shift; | |||
141 | } | |||
142 | return ZeroBits; | |||
143 | } | |||
144 | }; | |||
145 | ||||
146 | #if __GNUC__4 >= 4 || defined(_MSC_VER) | |||
147 | template <typename T> struct LeadingZerosCounter<T, 4> { | |||
148 | static std::size_t count(T Val, ZeroBehavior ZB) { | |||
149 | if (ZB != ZB_Undefined && Val == 0) | |||
150 | return 32; | |||
151 | ||||
152 | #if __has_builtin(__builtin_clz)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20 ) + ((0) << 10) + (0)) | |||
153 | return __builtin_clz(Val); | |||
154 | #elif defined(_MSC_VER) | |||
155 | unsigned long Index; | |||
156 | _BitScanReverse(&Index, Val); | |||
157 | return Index ^ 31; | |||
158 | #endif | |||
159 | } | |||
160 | }; | |||
161 | ||||
162 | #if !defined(_MSC_VER) || defined(_M_X64) | |||
163 | template <typename T> struct LeadingZerosCounter<T, 8> { | |||
164 | static std::size_t count(T Val, ZeroBehavior ZB) { | |||
165 | if (ZB != ZB_Undefined && Val == 0) | |||
166 | return 64; | |||
167 | ||||
168 | #if __has_builtin(__builtin_clzll)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20 ) + ((0) << 10) + (0)) | |||
169 | return __builtin_clzll(Val); | |||
170 | #elif defined(_MSC_VER) | |||
171 | unsigned long Index; | |||
172 | _BitScanReverse64(&Index, Val); | |||
173 | return Index ^ 63; | |||
174 | #endif | |||
175 | } | |||
176 | }; | |||
177 | #endif | |||
178 | #endif | |||
179 | } // namespace detail | |||
180 | ||||
181 | /// Count number of 0's from the most significant bit to the least | |||
182 | /// stopping at the first 1. | |||
183 | /// | |||
184 | /// Only unsigned integral types are allowed. | |||
185 | /// | |||
186 | /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are | |||
187 | /// valid arguments. | |||
188 | template <typename T> | |||
189 | std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) { | |||
190 | static_assert(std::numeric_limits<T>::is_integer && | |||
191 | !std::numeric_limits<T>::is_signed, | |||
192 | "Only unsigned integral types are allowed."); | |||
193 | return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB); | |||
194 | } | |||
195 | ||||
196 | /// Get the index of the first set bit starting from the least | |||
197 | /// significant bit. | |||
198 | /// | |||
199 | /// Only unsigned integral types are allowed. | |||
200 | /// | |||
201 | /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are | |||
202 | /// valid arguments. | |||
203 | template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) { | |||
204 | if (ZB == ZB_Max && Val == 0) | |||
205 | return std::numeric_limits<T>::max(); | |||
206 | ||||
207 | return countTrailingZeros(Val, ZB_Undefined); | |||
208 | } | |||
209 | ||||
210 | /// Create a bitmask with the N right-most bits set to 1, and all other | |||
211 | /// bits set to 0. Only unsigned types are allowed. | |||
212 | template <typename T> T maskTrailingOnes(unsigned N) { | |||
213 | static_assert(std::is_unsigned<T>::value, "Invalid type!"); | |||
214 | const unsigned Bits = CHAR_BIT8 * sizeof(T); | |||
215 | assert(N <= Bits && "Invalid bit index")((N <= Bits && "Invalid bit index") ? static_cast< void> (0) : __assert_fail ("N <= Bits && \"Invalid bit index\"" , "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h" , 215, __PRETTY_FUNCTION__)); | |||
216 | return N == 0 ? 0 : (T(-1) >> (Bits - N)); | |||
217 | } | |||
218 | ||||
219 | /// Create a bitmask with the N left-most bits set to 1, and all other | |||
220 | /// bits set to 0. Only unsigned types are allowed. | |||
221 | template <typename T> T maskLeadingOnes(unsigned N) { | |||
222 | return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); | |||
223 | } | |||
224 | ||||
225 | /// Create a bitmask with the N right-most bits set to 0, and all other | |||
226 | /// bits set to 1. Only unsigned types are allowed. | |||
227 | template <typename T> T maskTrailingZeros(unsigned N) { | |||
228 | return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N); | |||
229 | } | |||
230 | ||||
231 | /// Create a bitmask with the N left-most bits set to 0, and all other | |||
232 | /// bits set to 1. Only unsigned types are allowed. | |||
233 | template <typename T> T maskLeadingZeros(unsigned N) { | |||
234 | return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); | |||
235 | } | |||
236 | ||||
237 | /// Get the index of the last set bit starting from the least | |||
238 | /// significant bit. | |||
239 | /// | |||
240 | /// Only unsigned integral types are allowed. | |||
241 | /// | |||
242 | /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are | |||
243 | /// valid arguments. | |||
244 | template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) { | |||
245 | if (ZB == ZB_Max && Val == 0) | |||
246 | return std::numeric_limits<T>::max(); | |||
247 | ||||
248 | // Use ^ instead of - because both gcc and llvm can remove the associated ^ | |||
249 | // in the __builtin_clz intrinsic on x86. | |||
250 | return countLeadingZeros(Val, ZB_Undefined) ^ | |||
251 | (std::numeric_limits<T>::digits - 1); | |||
252 | } | |||
253 | ||||
254 | /// Macro compressed bit reversal table for 256 bits. | |||
255 | /// | |||
256 | /// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable | |||
257 | static const unsigned char BitReverseTable256[256] = { | |||
258 | #define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64 | |||
259 | #define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16) | |||
260 | #define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4) | |||
261 | R6(0), R6(2), R6(1), R6(3) | |||
262 | #undef R2 | |||
263 | #undef R4 | |||
264 | #undef R6 | |||
265 | }; | |||
266 | ||||
267 | /// Reverse the bits in \p Val. | |||
268 | template <typename T> | |||
269 | T reverseBits(T Val) { | |||
270 | unsigned char in[sizeof(Val)]; | |||
271 | unsigned char out[sizeof(Val)]; | |||
272 | std::memcpy(in, &Val, sizeof(Val)); | |||
273 | for (unsigned i = 0; i < sizeof(Val); ++i) | |||
274 | out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]]; | |||
275 | std::memcpy(&Val, out, sizeof(Val)); | |||
276 | return Val; | |||
277 | } | |||
278 | ||||
279 | // NOTE: The following support functions use the _32/_64 extensions instead of | |||
280 | // type overloading so that signed and unsigned integers can be used without | |||
281 | // ambiguity. | |||
282 | ||||
283 | /// Return the high 32 bits of a 64 bit value. | |||
284 | constexpr inline uint32_t Hi_32(uint64_t Value) { | |||
285 | return static_cast<uint32_t>(Value >> 32); | |||
286 | } | |||
287 | ||||
288 | /// Return the low 32 bits of a 64 bit value. | |||
289 | constexpr inline uint32_t Lo_32(uint64_t Value) { | |||
290 | return static_cast<uint32_t>(Value); | |||
291 | } | |||
292 | ||||
293 | /// Make a 64-bit integer from a high / low pair of 32-bit integers. | |||
294 | constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) { | |||
295 | return ((uint64_t)High << 32) | (uint64_t)Low; | |||
296 | } | |||
297 | ||||
298 | /// Checks if an integer fits into the given bit width. | |||
299 | template <unsigned N> constexpr inline bool isInt(int64_t x) { | |||
300 | return N >= 64 || (-(INT64_C(1)1L<<(N-1)) <= x && x < (INT64_C(1)1L<<(N-1))); | |||
301 | } | |||
302 | // Template specializations to get better code for common cases. | |||
303 | template <> constexpr inline bool isInt<8>(int64_t x) { | |||
304 | return static_cast<int8_t>(x) == x; | |||
305 | } | |||
306 | template <> constexpr inline bool isInt<16>(int64_t x) { | |||
307 | return static_cast<int16_t>(x) == x; | |||
308 | } | |||
309 | template <> constexpr inline bool isInt<32>(int64_t x) { | |||
310 | return static_cast<int32_t>(x) == x; | |||
311 | } | |||
312 | ||||
313 | /// Checks if a signed integer is an N bit number shifted left by S. | |||
314 | template <unsigned N, unsigned S> | |||
315 | constexpr inline bool isShiftedInt(int64_t x) { | |||
316 | static_assert( | |||
317 | N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number."); | |||
318 | static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide."); | |||
319 | return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); | |||
320 | } | |||
321 | ||||
322 | /// Checks if an unsigned integer fits into the given bit width. | |||
323 | /// | |||
324 | /// This is written as two functions rather than as simply | |||
325 | /// | |||
326 | /// return N >= 64 || X < (UINT64_C(1) << N); | |||
327 | /// | |||
328 | /// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting | |||
329 | /// left too many places. | |||
330 | template <unsigned N> | |||
331 | constexpr inline typename std::enable_if<(N < 64), bool>::type | |||
332 | isUInt(uint64_t X) { | |||
333 | static_assert(N > 0, "isUInt<0> doesn't make sense"); | |||
334 | return X < (UINT64_C(1)1UL << (N)); | |||
335 | } | |||
336 | template <unsigned N> | |||
337 | constexpr inline typename std::enable_if<N >= 64, bool>::type | |||
338 | isUInt(uint64_t X) { | |||
339 | return true; | |||
340 | } | |||
341 | ||||
342 | // Template specializations to get better code for common cases. | |||
343 | template <> constexpr inline bool isUInt<8>(uint64_t x) { | |||
344 | return static_cast<uint8_t>(x) == x; | |||
345 | } | |||
346 | template <> constexpr inline bool isUInt<16>(uint64_t x) { | |||
347 | return static_cast<uint16_t>(x) == x; | |||
348 | } | |||
349 | template <> constexpr inline bool isUInt<32>(uint64_t x) { | |||
350 | return static_cast<uint32_t>(x) == x; | |||
351 | } | |||
352 | ||||
353 | /// Checks if a unsigned integer is an N bit number shifted left by S. | |||
354 | template <unsigned N, unsigned S> | |||
355 | constexpr inline bool isShiftedUInt(uint64_t x) { | |||
356 | static_assert( | |||
357 | N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)"); | |||
358 | static_assert(N + S <= 64, | |||
359 | "isShiftedUInt<N, S> with N + S > 64 is too wide."); | |||
360 | // Per the two static_asserts above, S must be strictly less than 64. So | |||
361 | // 1 << S is not undefined behavior. | |||
362 | return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); | |||
363 | } | |||
364 | ||||
365 | /// Gets the maximum value for a N-bit unsigned integer. | |||
366 | inline uint64_t maxUIntN(uint64_t N) { | |||
367 | assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range" ) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h" , 367, __PRETTY_FUNCTION__)); | |||
368 | ||||
369 | // uint64_t(1) << 64 is undefined behavior, so we can't do | |||
370 | // (uint64_t(1) << N) - 1 | |||
371 | // without checking first that N != 64. But this works and doesn't have a | |||
372 | // branch. | |||
373 | return UINT64_MAX(18446744073709551615UL) >> (64 - N); | |||
374 | } | |||
375 | ||||
376 | /// Gets the minimum value for a N-bit signed integer. | |||
377 | inline int64_t minIntN(int64_t N) { | |||
378 | assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range" ) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h" , 378, __PRETTY_FUNCTION__)); | |||
379 | ||||
380 | return -(UINT64_C(1)1UL<<(N-1)); | |||
381 | } | |||
382 | ||||
383 | /// Gets the maximum value for a N-bit signed integer. | |||
384 | inline int64_t maxIntN(int64_t N) { | |||
385 | assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range" ) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h" , 385, __PRETTY_FUNCTION__)); | |||
386 | ||||
387 | // This relies on two's complement wraparound when N == 64, so we convert to | |||
388 | // int64_t only at the very end to avoid UB. | |||
389 | return (UINT64_C(1)1UL << (N - 1)) - 1; | |||
390 | } | |||
391 | ||||
392 | /// Checks if an unsigned integer fits into the given (dynamic) bit width. | |||
393 | inline bool isUIntN(unsigned N, uint64_t x) { | |||
394 | return N >= 64 || x <= maxUIntN(N); | |||
395 | } | |||
396 | ||||
397 | /// Checks if an signed integer fits into the given (dynamic) bit width. | |||
398 | inline bool isIntN(unsigned N, int64_t x) { | |||
399 | return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N)); | |||
400 | } | |||
401 | ||||
402 | /// Return true if the argument is a non-empty sequence of ones starting at the | |||
403 | /// least significant bit with the remainder zero (32 bit version). | |||
404 | /// Ex. isMask_32(0x0000FFFFU) == true. | |||
405 | constexpr inline bool isMask_32(uint32_t Value) { | |||
406 | return Value && ((Value + 1) & Value) == 0; | |||
407 | } | |||
408 | ||||
409 | /// Return true if the argument is a non-empty sequence of ones starting at the | |||
410 | /// least significant bit with the remainder zero (64 bit version). | |||
411 | constexpr inline bool isMask_64(uint64_t Value) { | |||
412 | return Value && ((Value + 1) & Value) == 0; | |||
413 | } | |||
414 | ||||
415 | /// Return true if the argument contains a non-empty sequence of ones with the | |||
416 | /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. | |||
417 | constexpr inline bool isShiftedMask_32(uint32_t Value) { | |||
418 | return Value && isMask_32((Value - 1) | Value); | |||
419 | } | |||
420 | ||||
421 | /// Return true if the argument contains a non-empty sequence of ones with the | |||
422 | /// remainder zero (64 bit version.) | |||
423 | constexpr inline bool isShiftedMask_64(uint64_t Value) { | |||
424 | return Value && isMask_64((Value - 1) | Value); | |||
425 | } | |||
426 | ||||
427 | /// Return true if the argument is a power of two > 0. | |||
428 | /// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) | |||
429 | constexpr inline bool isPowerOf2_32(uint32_t Value) { | |||
430 | return Value && !(Value & (Value - 1)); | |||
431 | } | |||
432 | ||||
433 | /// Return true if the argument is a power of two > 0 (64 bit edition.) | |||
434 | constexpr inline bool isPowerOf2_64(uint64_t Value) { | |||
435 | return Value && !(Value & (Value - 1)); | |||
436 | } | |||
437 | ||||
438 | /// Return a byte-swapped representation of the 16-bit argument. | |||
439 | inline uint16_t ByteSwap_16(uint16_t Value) { | |||
440 | return sys::SwapByteOrder_16(Value); | |||
441 | } | |||
442 | ||||
443 | /// Return a byte-swapped representation of the 32-bit argument. | |||
444 | inline uint32_t ByteSwap_32(uint32_t Value) { | |||
445 | return sys::SwapByteOrder_32(Value); | |||
446 | } | |||
447 | ||||
448 | /// Return a byte-swapped representation of the 64-bit argument. | |||
449 | inline uint64_t ByteSwap_64(uint64_t Value) { | |||
450 | return sys::SwapByteOrder_64(Value); | |||
451 | } | |||
452 | ||||
453 | /// Count the number of ones from the most significant bit to the first | |||
454 | /// zero bit. | |||
455 | /// | |||
456 | /// Ex. countLeadingOnes(0xFF0FFF00) == 8. | |||
457 | /// Only unsigned integral types are allowed. | |||
458 | /// | |||
459 | /// \param ZB the behavior on an input of all ones. Only ZB_Width and | |||
460 | /// ZB_Undefined are valid arguments. | |||
461 | template <typename T> | |||
462 | std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) { | |||
463 | static_assert(std::numeric_limits<T>::is_integer && | |||
464 | !std::numeric_limits<T>::is_signed, | |||
465 | "Only unsigned integral types are allowed."); | |||
466 | return countLeadingZeros<T>(~Value, ZB); | |||
467 | } | |||
468 | ||||
469 | /// Count the number of ones from the least significant bit to the first | |||
470 | /// zero bit. | |||
471 | /// | |||
472 | /// Ex. countTrailingOnes(0x00FF00FF) == 8. | |||
473 | /// Only unsigned integral types are allowed. | |||
474 | /// | |||
475 | /// \param ZB the behavior on an input of all ones. Only ZB_Width and | |||
476 | /// ZB_Undefined are valid arguments. | |||
477 | template <typename T> | |||
478 | std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) { | |||
479 | static_assert(std::numeric_limits<T>::is_integer && | |||
480 | !std::numeric_limits<T>::is_signed, | |||
481 | "Only unsigned integral types are allowed."); | |||
482 | return countTrailingZeros<T>(~Value, ZB); | |||
483 | } | |||
484 | ||||
485 | namespace detail { | |||
486 | template <typename T, std::size_t SizeOfT> struct PopulationCounter { | |||
487 | static unsigned count(T Value) { | |||
488 | // Generic version, forward to 32 bits. | |||
489 | static_assert(SizeOfT <= 4, "Not implemented!"); | |||
490 | #if __GNUC__4 >= 4 | |||
491 | return __builtin_popcount(Value); | |||
492 | #else | |||
493 | uint32_t v = Value; | |||
494 | v = v - ((v >> 1) & 0x55555555); | |||
495 | v = (v & 0x33333333) + ((v >> 2) & 0x33333333); | |||
496 | return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; | |||
497 | #endif | |||
498 | } | |||
499 | }; | |||
500 | ||||
501 | template <typename T> struct PopulationCounter<T, 8> { | |||
502 | static unsigned count(T Value) { | |||
503 | #if __GNUC__4 >= 4 | |||
504 | return __builtin_popcountll(Value); | |||
505 | #else | |||
506 | uint64_t v = Value; | |||
507 | v = v - ((v >> 1) & 0x5555555555555555ULL); | |||
508 | v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL); | |||
509 | v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL; | |||
510 | return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); | |||
511 | #endif | |||
512 | } | |||
513 | }; | |||
514 | } // namespace detail | |||
515 | ||||
516 | /// Count the number of set bits in a value. | |||
517 | /// Ex. countPopulation(0xF000F000) = 8 | |||
518 | /// Returns 0 if the word is zero. | |||
519 | template <typename T> | |||
520 | inline unsigned countPopulation(T Value) { | |||
521 | static_assert(std::numeric_limits<T>::is_integer && | |||
522 | !std::numeric_limits<T>::is_signed, | |||
523 | "Only unsigned integral types are allowed."); | |||
524 | return detail::PopulationCounter<T, sizeof(T)>::count(Value); | |||
525 | } | |||
526 | ||||
527 | /// Return the log base 2 of the specified value. | |||
528 | inline double Log2(double Value) { | |||
529 | #if defined(__ANDROID_API__) && __ANDROID_API__ < 18 | |||
530 | return __builtin_log(Value) / __builtin_log(2.0); | |||
531 | #else | |||
532 | return log2(Value); | |||
533 | #endif | |||
534 | } | |||
535 | ||||
536 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. | |||
537 | /// (32 bit edition.) | |||
538 | /// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 | |||
539 | inline unsigned Log2_32(uint32_t Value) { | |||
540 | return 31 - countLeadingZeros(Value); | |||
541 | } | |||
542 | ||||
543 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. | |||
544 | /// (64 bit edition.) | |||
545 | inline unsigned Log2_64(uint64_t Value) { | |||
546 | return 63 - countLeadingZeros(Value); | |||
547 | } | |||
548 | ||||
549 | /// Return the ceil log base 2 of the specified value, 32 if the value is zero. | |||
550 | /// (32 bit edition). | |||
551 | /// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 | |||
552 | inline unsigned Log2_32_Ceil(uint32_t Value) { | |||
553 | return 32 - countLeadingZeros(Value - 1); | |||
554 | } | |||
555 | ||||
556 | /// Return the ceil log base 2 of the specified value, 64 if the value is zero. | |||
557 | /// (64 bit edition.) | |||
558 | inline unsigned Log2_64_Ceil(uint64_t Value) { | |||
559 | return 64 - countLeadingZeros(Value - 1); | |||
560 | } | |||
561 | ||||
562 | /// Return the greatest common divisor of the values using Euclid's algorithm. | |||
563 | inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) { | |||
564 | while (B) { | |||
565 | uint64_t T = B; | |||
566 | B = A % B; | |||
567 | A = T; | |||
568 | } | |||
569 | return A; | |||
570 | } | |||
571 | ||||
572 | /// This function takes a 64-bit integer and returns the bit equivalent double. | |||
573 | inline double BitsToDouble(uint64_t Bits) { | |||
574 | double D; | |||
575 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); | |||
576 | memcpy(&D, &Bits, sizeof(Bits)); | |||
577 | return D; | |||
578 | } | |||
579 | ||||
580 | /// This function takes a 32-bit integer and returns the bit equivalent float. | |||
581 | inline float BitsToFloat(uint32_t Bits) { | |||
582 | float F; | |||
583 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); | |||
584 | memcpy(&F, &Bits, sizeof(Bits)); | |||
585 | return F; | |||
586 | } | |||
587 | ||||
588 | /// This function takes a double and returns the bit equivalent 64-bit integer. | |||
589 | /// Note that copying doubles around changes the bits of NaNs on some hosts, | |||
590 | /// notably x86, so this routine cannot be used if these bits are needed. | |||
591 | inline uint64_t DoubleToBits(double Double) { | |||
592 | uint64_t Bits; | |||
593 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); | |||
594 | memcpy(&Bits, &Double, sizeof(Double)); | |||
595 | return Bits; | |||
596 | } | |||
597 | ||||
598 | /// This function takes a float and returns the bit equivalent 32-bit integer. | |||
599 | /// Note that copying floats around changes the bits of NaNs on some hosts, | |||
600 | /// notably x86, so this routine cannot be used if these bits are needed. | |||
601 | inline uint32_t FloatToBits(float Float) { | |||
602 | uint32_t Bits; | |||
603 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); | |||
604 | memcpy(&Bits, &Float, sizeof(Float)); | |||
605 | return Bits; | |||
606 | } | |||
607 | ||||
608 | /// A and B are either alignments or offsets. Return the minimum alignment that | |||
609 | /// may be assumed after adding the two together. | |||
610 | constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) { | |||
611 | // The largest power of 2 that divides both A and B. | |||
612 | // | |||
613 | // Replace "-Value" by "1+~Value" in the following commented code to avoid | |||
614 | // MSVC warning C4146 | |||
615 | // return (A | B) & -(A | B); | |||
616 | return (A | B) & (1 + ~(A | B)); | |||
617 | } | |||
618 | ||||
619 | /// Aligns \c Addr to \c Alignment bytes, rounding up. | |||
620 | /// | |||
621 | /// Alignment should be a power of two. This method rounds up, so | |||
622 | /// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8. | |||
623 | inline uintptr_t alignAddr(const void *Addr, size_t Alignment) { | |||
624 | assert(Alignment && isPowerOf2_64((uint64_t)Alignment) &&((Alignment && isPowerOf2_64((uint64_t)Alignment) && "Alignment is not a power of two!") ? static_cast<void> (0) : __assert_fail ("Alignment && isPowerOf2_64((uint64_t)Alignment) && \"Alignment is not a power of two!\"" , "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h" , 625, __PRETTY_FUNCTION__)) | |||
625 | "Alignment is not a power of two!")((Alignment && isPowerOf2_64((uint64_t)Alignment) && "Alignment is not a power of two!") ? static_cast<void> (0) : __assert_fail ("Alignment && isPowerOf2_64((uint64_t)Alignment) && \"Alignment is not a power of two!\"" , "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h" , 625, __PRETTY_FUNCTION__)); | |||
626 | ||||
627 | assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr)(((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr) ? static_cast <void> (0) : __assert_fail ("(uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr" , "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h" , 627, __PRETTY_FUNCTION__)); | |||
628 | ||||
629 | return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1)); | |||
630 | } | |||
631 | ||||
632 | /// Returns the necessary adjustment for aligning \c Ptr to \c Alignment | |||
633 | /// bytes, rounding up. | |||
634 | inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) { | |||
635 | return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr; | |||
636 | } | |||
637 | ||||
638 | /// Returns the next power of two (in 64-bits) that is strictly greater than A. | |||
639 | /// Returns zero on overflow. | |||
640 | inline uint64_t NextPowerOf2(uint64_t A) { | |||
641 | A |= (A >> 1); | |||
642 | A |= (A >> 2); | |||
643 | A |= (A >> 4); | |||
644 | A |= (A >> 8); | |||
645 | A |= (A >> 16); | |||
646 | A |= (A >> 32); | |||
647 | return A + 1; | |||
648 | } | |||
649 | ||||
650 | /// Returns the power of two which is less than or equal to the given value. | |||
651 | /// Essentially, it is a floor operation across the domain of powers of two. | |||
652 | inline uint64_t PowerOf2Floor(uint64_t A) { | |||
653 | if (!A) return 0; | |||
654 | return 1ull << (63 - countLeadingZeros(A, ZB_Undefined)); | |||
655 | } | |||
656 | ||||
657 | /// Returns the power of two which is greater than or equal to the given value. | |||
658 | /// Essentially, it is a ceil operation across the domain of powers of two. | |||
659 | inline uint64_t PowerOf2Ceil(uint64_t A) { | |||
660 | if (!A) | |||
661 | return 0; | |||
662 | return NextPowerOf2(A - 1); | |||
663 | } | |||
664 | ||||
665 | /// Returns the next integer (mod 2**64) that is greater than or equal to | |||
666 | /// \p Value and is a multiple of \p Align. \p Align must be non-zero. | |||
667 | /// | |||
668 | /// If non-zero \p Skew is specified, the return value will be a minimal | |||
669 | /// integer that is greater than or equal to \p Value and equal to | |||
670 | /// \p Align * N + \p Skew for some integer N. If \p Skew is larger than | |||
671 | /// \p Align, its value is adjusted to '\p Skew mod \p Align'. | |||
672 | /// | |||
673 | /// Examples: | |||
674 | /// \code | |||
675 | /// alignTo(5, 8) = 8 | |||
676 | /// alignTo(17, 8) = 24 | |||
677 | /// alignTo(~0LL, 8) = 0 | |||
678 | /// alignTo(321, 255) = 510 | |||
679 | /// | |||
680 | /// alignTo(5, 8, 7) = 7 | |||
681 | /// alignTo(17, 8, 1) = 17 | |||
682 | /// alignTo(~0LL, 8, 3) = 3 | |||
683 | /// alignTo(321, 255, 42) = 552 | |||
684 | /// \endcode | |||
685 | inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { | |||
686 | assert(Align != 0u && "Align can't be 0.")((Align != 0u && "Align can't be 0.") ? static_cast< void> (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h" , 686, __PRETTY_FUNCTION__)); | |||
687 | Skew %= Align; | |||
688 | return (Value + Align - 1 - Skew) / Align * Align + Skew; | |||
689 | } | |||
690 | ||||
691 | /// Returns the next integer (mod 2**64) that is greater than or equal to | |||
692 | /// \p Value and is a multiple of \c Align. \c Align must be non-zero. | |||
693 | template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) { | |||
694 | static_assert(Align != 0u, "Align must be non-zero"); | |||
695 | return (Value + Align - 1) / Align * Align; | |||
696 | } | |||
697 | ||||
698 | /// Returns the integer ceil(Numerator / Denominator). | |||
699 | inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) { | |||
700 | return alignTo(Numerator, Denominator) / Denominator; | |||
701 | } | |||
702 | ||||
703 | /// \c alignTo for contexts where a constant expression is required. | |||
704 | /// \sa alignTo | |||
705 | /// | |||
706 | /// \todo FIXME: remove when \c constexpr becomes really \c constexpr | |||
707 | template <uint64_t Align> | |||
708 | struct AlignTo { | |||
709 | static_assert(Align != 0u, "Align must be non-zero"); | |||
710 | template <uint64_t Value> | |||
711 | struct from_value { | |||
712 | static const uint64_t value = (Value + Align - 1) / Align * Align; | |||
713 | }; | |||
714 | }; | |||
715 | ||||
716 | /// Returns the largest uint64_t less than or equal to \p Value and is | |||
717 | /// \p Skew mod \p Align. \p Align must be non-zero | |||
718 | inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { | |||
719 | assert(Align != 0u && "Align can't be 0.")((Align != 0u && "Align can't be 0.") ? static_cast< void> (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h" , 719, __PRETTY_FUNCTION__)); | |||
720 | Skew %= Align; | |||
721 | return (Value - Skew) / Align * Align + Skew; | |||
722 | } | |||
723 | ||||
724 | /// Returns the offset to the next integer (mod 2**64) that is greater than | |||
725 | /// or equal to \p Value and is a multiple of \p Align. \p Align must be | |||
726 | /// non-zero. | |||
727 | inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) { | |||
728 | return alignTo(Value, Align) - Value; | |||
729 | } | |||
730 | ||||
731 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. | |||
732 | /// Requires 0 < B <= 32. | |||
733 | template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) { | |||
734 | static_assert(B > 0, "Bit width can't be 0."); | |||
735 | static_assert(B <= 32, "Bit width out of range."); | |||
736 | return int32_t(X << (32 - B)) >> (32 - B); | |||
737 | } | |||
738 | ||||
739 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. | |||
740 | /// Requires 0 < B < 32. | |||
741 | inline int32_t SignExtend32(uint32_t X, unsigned B) { | |||
742 | assert(B > 0 && "Bit width can't be 0.")((B > 0 && "Bit width can't be 0.") ? static_cast< void> (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h" , 742, __PRETTY_FUNCTION__)); | |||
743 | assert(B <= 32 && "Bit width out of range.")((B <= 32 && "Bit width out of range.") ? static_cast <void> (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\"" , "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h" , 743, __PRETTY_FUNCTION__)); | |||
744 | return int32_t(X << (32 - B)) >> (32 - B); | |||
745 | } | |||
746 | ||||
747 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. | |||
748 | /// Requires 0 < B < 64. | |||
749 | template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) { | |||
750 | static_assert(B > 0, "Bit width can't be 0."); | |||
751 | static_assert(B <= 64, "Bit width out of range."); | |||
752 | return int64_t(x << (64 - B)) >> (64 - B); | |||
753 | } | |||
754 | ||||
755 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. | |||
756 | /// Requires 0 < B < 64. | |||
757 | inline int64_t SignExtend64(uint64_t X, unsigned B) { | |||
758 | assert(B > 0 && "Bit width can't be 0.")((B > 0 && "Bit width can't be 0.") ? static_cast< void> (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h" , 758, __PRETTY_FUNCTION__)); | |||
759 | assert(B <= 64 && "Bit width out of range.")((B <= 64 && "Bit width out of range.") ? static_cast <void> (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\"" , "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h" , 759, __PRETTY_FUNCTION__)); | |||
760 | return int64_t(X << (64 - B)) >> (64 - B); | |||
761 | } | |||
762 | ||||
763 | /// Subtract two unsigned integers, X and Y, of type T and return the absolute | |||
764 | /// value of the result. | |||
765 | template <typename T> | |||
766 | typename std::enable_if<std::is_unsigned<T>::value, T>::type | |||
767 | AbsoluteDifference(T X, T Y) { | |||
768 | return std::max(X, Y) - std::min(X, Y); | |||
769 | } | |||
770 | ||||
771 | /// Add two unsigned integers, X and Y, of type T. Clamp the result to the | |||
772 | /// maximum representable value of T on overflow. ResultOverflowed indicates if | |||
773 | /// the result is larger than the maximum representable value of type T. | |||
774 | template <typename T> | |||
775 | typename std::enable_if<std::is_unsigned<T>::value, T>::type | |||
776 | SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) { | |||
777 | bool Dummy; | |||
778 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; | |||
779 | // Hacker's Delight, p. 29 | |||
780 | T Z = X + Y; | |||
781 | Overflowed = (Z < X || Z < Y); | |||
782 | if (Overflowed) | |||
783 | return std::numeric_limits<T>::max(); | |||
784 | else | |||
785 | return Z; | |||
786 | } | |||
787 | ||||
788 | /// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the | |||
789 | /// maximum representable value of T on overflow. ResultOverflowed indicates if | |||
790 | /// the result is larger than the maximum representable value of type T. | |||
791 | template <typename T> | |||
792 | typename std::enable_if<std::is_unsigned<T>::value, T>::type | |||
793 | SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) { | |||
794 | bool Dummy; | |||
795 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; | |||
796 | ||||
797 | // Hacker's Delight, p. 30 has a different algorithm, but we don't use that | |||
798 | // because it fails for uint16_t (where multiplication can have undefined | |||
799 | // behavior due to promotion to int), and requires a division in addition | |||
800 | // to the multiplication. | |||
801 | ||||
802 | Overflowed = false; | |||
803 | ||||
804 | // Log2(Z) would be either Log2Z or Log2Z + 1. | |||
805 | // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z | |||
806 | // will necessarily be less than Log2Max as desired. | |||
807 | int Log2Z = Log2_64(X) + Log2_64(Y); | |||
808 | const T Max = std::numeric_limits<T>::max(); | |||
809 | int Log2Max = Log2_64(Max); | |||
810 | if (Log2Z < Log2Max) { | |||
811 | return X * Y; | |||
812 | } | |||
813 | if (Log2Z > Log2Max) { | |||
814 | Overflowed = true; | |||
815 | return Max; | |||
816 | } | |||
817 | ||||
818 | // We're going to use the top bit, and maybe overflow one | |||
819 | // bit past it. Multiply all but the bottom bit then add | |||
820 | // that on at the end. | |||
821 | T Z = (X >> 1) * Y; | |||
822 | if (Z & ~(Max >> 1)) { | |||
823 | Overflowed = true; | |||
824 | return Max; | |||
825 | } | |||
826 | Z <<= 1; | |||
827 | if (X & 1) | |||
828 | return SaturatingAdd(Z, Y, ResultOverflowed); | |||
829 | ||||
830 | return Z; | |||
831 | } | |||
832 | ||||
833 | /// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to | |||
834 | /// the product. Clamp the result to the maximum representable value of T on | |||
835 | /// overflow. ResultOverflowed indicates if the result is larger than the | |||
836 | /// maximum representable value of type T. | |||
837 | template <typename T> | |||
838 | typename std::enable_if<std::is_unsigned<T>::value, T>::type | |||
839 | SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) { | |||
840 | bool Dummy; | |||
841 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; | |||
842 | ||||
843 | T Product = SaturatingMultiply(X, Y, &Overflowed); | |||
844 | if (Overflowed) | |||
845 | return Product; | |||
846 | ||||
847 | return SaturatingAdd(A, Product, &Overflowed); | |||
848 | } | |||
849 | ||||
850 | /// Use this rather than HUGE_VALF; the latter causes warnings on MSVC. | |||
851 | extern const float huge_valf; | |||
852 | } // End llvm namespace | |||
853 | ||||
854 | #endif |