File: | build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp |
Warning: | line 360, column 25 The result of the left shift is undefined due to shifting by '4294967295', which is greater or equal to the width of type 'int' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- MipsLegalizerInfo.cpp ------------------------------------*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | /// \file | |||
9 | /// This file implements the targeting of the Machinelegalizer class for Mips. | |||
10 | /// \todo This should be generated by TableGen. | |||
11 | //===----------------------------------------------------------------------===// | |||
12 | ||||
13 | #include "MipsLegalizerInfo.h" | |||
14 | #include "MipsTargetMachine.h" | |||
15 | #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" | |||
16 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" | |||
17 | #include "llvm/IR/IntrinsicsMips.h" | |||
18 | ||||
19 | using namespace llvm; | |||
20 | ||||
21 | struct TypesAndMemOps { | |||
22 | LLT ValTy; | |||
23 | LLT PtrTy; | |||
24 | unsigned MemSize; | |||
25 | bool SystemSupportsUnalignedAccess; | |||
26 | }; | |||
27 | ||||
28 | // Assumes power of 2 memory size. Subtargets that have only naturally-aligned | |||
29 | // memory access need to perform additional legalization here. | |||
30 | static bool isUnalignedMemmoryAccess(uint64_t MemSize, uint64_t AlignInBits) { | |||
31 | assert(isPowerOf2_64(MemSize) && "Expected power of 2 memory size")(static_cast <bool> (isPowerOf2_64(MemSize) && "Expected power of 2 memory size" ) ? void (0) : __assert_fail ("isPowerOf2_64(MemSize) && \"Expected power of 2 memory size\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 31, __extension__ __PRETTY_FUNCTION__)); | |||
32 | assert(isPowerOf2_64(AlignInBits) && "Expected power of 2 align")(static_cast <bool> (isPowerOf2_64(AlignInBits) && "Expected power of 2 align") ? void (0) : __assert_fail ("isPowerOf2_64(AlignInBits) && \"Expected power of 2 align\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 32, __extension__ __PRETTY_FUNCTION__)); | |||
33 | if (MemSize > AlignInBits) | |||
34 | return true; | |||
35 | return false; | |||
36 | } | |||
37 | ||||
38 | static bool | |||
39 | CheckTy0Ty1MemSizeAlign(const LegalityQuery &Query, | |||
40 | std::initializer_list<TypesAndMemOps> SupportedValues) { | |||
41 | unsigned QueryMemSize = Query.MMODescrs[0].MemoryTy.getSizeInBits(); | |||
42 | ||||
43 | // Non power of two memory access is never legal. | |||
44 | if (!isPowerOf2_64(QueryMemSize)) | |||
45 | return false; | |||
46 | ||||
47 | for (auto &Val : SupportedValues) { | |||
48 | if (Val.ValTy != Query.Types[0]) | |||
49 | continue; | |||
50 | if (Val.PtrTy != Query.Types[1]) | |||
51 | continue; | |||
52 | if (Val.MemSize != QueryMemSize) | |||
53 | continue; | |||
54 | if (!Val.SystemSupportsUnalignedAccess && | |||
55 | isUnalignedMemmoryAccess(QueryMemSize, Query.MMODescrs[0].AlignInBits)) | |||
56 | return false; | |||
57 | return true; | |||
58 | } | |||
59 | return false; | |||
60 | } | |||
61 | ||||
62 | static bool CheckTyN(unsigned N, const LegalityQuery &Query, | |||
63 | std::initializer_list<LLT> SupportedValues) { | |||
64 | return llvm::is_contained(SupportedValues, Query.Types[N]); | |||
65 | } | |||
66 | ||||
67 | MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) { | |||
68 | using namespace TargetOpcode; | |||
69 | ||||
70 | const LLT s1 = LLT::scalar(1); | |||
71 | const LLT s8 = LLT::scalar(8); | |||
72 | const LLT s16 = LLT::scalar(16); | |||
73 | const LLT s32 = LLT::scalar(32); | |||
74 | const LLT s64 = LLT::scalar(64); | |||
75 | const LLT v16s8 = LLT::fixed_vector(16, 8); | |||
76 | const LLT v8s16 = LLT::fixed_vector(8, 16); | |||
77 | const LLT v4s32 = LLT::fixed_vector(4, 32); | |||
78 | const LLT v2s64 = LLT::fixed_vector(2, 64); | |||
79 | const LLT p0 = LLT::pointer(0, 32); | |||
80 | ||||
81 | getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL}) | |||
82 | .legalIf([=, &ST](const LegalityQuery &Query) { | |||
83 | if (CheckTyN(0, Query, {s32})) | |||
84 | return true; | |||
85 | if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64})) | |||
86 | return true; | |||
87 | return false; | |||
88 | }) | |||
89 | .clampScalar(0, s32, s32); | |||
90 | ||||
91 | getActionDefinitionsBuilder({G_UADDO, G_UADDE, G_USUBO, G_USUBE, G_UMULO}) | |||
92 | .lowerFor({{s32, s1}}); | |||
93 | ||||
94 | getActionDefinitionsBuilder(G_UMULH) | |||
95 | .legalFor({s32}) | |||
96 | .maxScalar(0, s32); | |||
97 | ||||
98 | // MIPS32r6 does not have alignment restrictions for memory access. | |||
99 | // For MIPS32r5 and older memory access must be naturally-aligned i.e. aligned | |||
100 | // to at least a multiple of its own size. There is however a two instruction | |||
101 | // combination that performs 4 byte unaligned access (lwr/lwl and swl/swr) | |||
102 | // therefore 4 byte load and store are legal and will use NoAlignRequirements. | |||
103 | bool NoAlignRequirements = true; | |||
104 | ||||
105 | getActionDefinitionsBuilder({G_LOAD, G_STORE}) | |||
106 | .legalIf([=, &ST](const LegalityQuery &Query) { | |||
107 | if (CheckTy0Ty1MemSizeAlign( | |||
108 | Query, {{s32, p0, 8, NoAlignRequirements}, | |||
109 | {s32, p0, 16, ST.systemSupportsUnalignedAccess()}, | |||
110 | {s32, p0, 32, NoAlignRequirements}, | |||
111 | {p0, p0, 32, NoAlignRequirements}, | |||
112 | {s64, p0, 64, ST.systemSupportsUnalignedAccess()}})) | |||
113 | return true; | |||
114 | if (ST.hasMSA() && CheckTy0Ty1MemSizeAlign( | |||
115 | Query, {{v16s8, p0, 128, NoAlignRequirements}, | |||
116 | {v8s16, p0, 128, NoAlignRequirements}, | |||
117 | {v4s32, p0, 128, NoAlignRequirements}, | |||
118 | {v2s64, p0, 128, NoAlignRequirements}})) | |||
119 | return true; | |||
120 | return false; | |||
121 | }) | |||
122 | // Custom lower scalar memory access, up to 8 bytes, for: | |||
123 | // - non-power-of-2 MemSizes | |||
124 | // - unaligned 2 or 8 byte MemSizes for MIPS32r5 and older | |||
125 | .customIf([=, &ST](const LegalityQuery &Query) { | |||
126 | if (!Query.Types[0].isScalar() || Query.Types[1] != p0 || | |||
127 | Query.Types[0] == s1) | |||
128 | return false; | |||
129 | ||||
130 | unsigned Size = Query.Types[0].getSizeInBits(); | |||
131 | unsigned QueryMemSize = Query.MMODescrs[0].MemoryTy.getSizeInBits(); | |||
132 | assert(QueryMemSize <= Size && "Scalar can't hold MemSize")(static_cast <bool> (QueryMemSize <= Size && "Scalar can't hold MemSize") ? void (0) : __assert_fail ("QueryMemSize <= Size && \"Scalar can't hold MemSize\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 132, __extension__ __PRETTY_FUNCTION__)); | |||
133 | ||||
134 | if (Size > 64 || QueryMemSize > 64) | |||
135 | return false; | |||
136 | ||||
137 | if (!isPowerOf2_64(Query.MMODescrs[0].MemoryTy.getSizeInBits())) | |||
138 | return true; | |||
139 | ||||
140 | if (!ST.systemSupportsUnalignedAccess() && | |||
141 | isUnalignedMemmoryAccess(QueryMemSize, | |||
142 | Query.MMODescrs[0].AlignInBits)) { | |||
143 | assert(QueryMemSize != 32 && "4 byte load and store are legal")(static_cast <bool> (QueryMemSize != 32 && "4 byte load and store are legal" ) ? void (0) : __assert_fail ("QueryMemSize != 32 && \"4 byte load and store are legal\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 143, __extension__ __PRETTY_FUNCTION__)); | |||
144 | return true; | |||
145 | } | |||
146 | ||||
147 | return false; | |||
148 | }) | |||
149 | .minScalar(0, s32) | |||
150 | .lower(); | |||
151 | ||||
152 | getActionDefinitionsBuilder(G_IMPLICIT_DEF) | |||
153 | .legalFor({s32, s64}); | |||
154 | ||||
155 | getActionDefinitionsBuilder(G_UNMERGE_VALUES) | |||
156 | .legalFor({{s32, s64}}); | |||
157 | ||||
158 | getActionDefinitionsBuilder(G_MERGE_VALUES) | |||
159 | .legalFor({{s64, s32}}); | |||
160 | ||||
161 | getActionDefinitionsBuilder({G_ZEXTLOAD, G_SEXTLOAD}) | |||
162 | .legalForTypesWithMemDesc({{s32, p0, s8, 8}, | |||
163 | {s32, p0, s16, 8}}) | |||
164 | .clampScalar(0, s32, s32); | |||
165 | ||||
166 | getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}) | |||
167 | .legalIf([](const LegalityQuery &Query) { return false; }) | |||
168 | .maxScalar(0, s32); | |||
169 | ||||
170 | getActionDefinitionsBuilder(G_TRUNC) | |||
171 | .legalIf([](const LegalityQuery &Query) { return false; }) | |||
172 | .maxScalar(1, s32); | |||
173 | ||||
174 | getActionDefinitionsBuilder(G_SELECT) | |||
175 | .legalForCartesianProduct({p0, s32, s64}, {s32}) | |||
176 | .minScalar(0, s32) | |||
177 | .minScalar(1, s32); | |||
178 | ||||
179 | getActionDefinitionsBuilder(G_BRCOND) | |||
180 | .legalFor({s32}) | |||
181 | .minScalar(0, s32); | |||
182 | ||||
183 | getActionDefinitionsBuilder(G_BRJT) | |||
184 | .legalFor({{p0, s32}}); | |||
185 | ||||
186 | getActionDefinitionsBuilder(G_BRINDIRECT) | |||
187 | .legalFor({p0}); | |||
188 | ||||
189 | getActionDefinitionsBuilder(G_PHI) | |||
190 | .legalFor({p0, s32, s64}) | |||
191 | .minScalar(0, s32); | |||
192 | ||||
193 | getActionDefinitionsBuilder({G_AND, G_OR, G_XOR}) | |||
194 | .legalFor({s32}) | |||
195 | .clampScalar(0, s32, s32); | |||
196 | ||||
197 | getActionDefinitionsBuilder({G_SDIV, G_SREM, G_UDIV, G_UREM}) | |||
198 | .legalIf([=, &ST](const LegalityQuery &Query) { | |||
199 | if (CheckTyN(0, Query, {s32})) | |||
200 | return true; | |||
201 | if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64})) | |||
202 | return true; | |||
203 | return false; | |||
204 | }) | |||
205 | .minScalar(0, s32) | |||
206 | .libcallFor({s64}); | |||
207 | ||||
208 | getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR}) | |||
209 | .legalFor({{s32, s32}}) | |||
210 | .clampScalar(1, s32, s32) | |||
211 | .clampScalar(0, s32, s32); | |||
212 | ||||
213 | getActionDefinitionsBuilder(G_ICMP) | |||
214 | .legalForCartesianProduct({s32}, {s32, p0}) | |||
215 | .clampScalar(1, s32, s32) | |||
216 | .minScalar(0, s32); | |||
217 | ||||
218 | getActionDefinitionsBuilder(G_CONSTANT) | |||
219 | .legalFor({s32}) | |||
220 | .clampScalar(0, s32, s32); | |||
221 | ||||
222 | getActionDefinitionsBuilder({G_PTR_ADD, G_INTTOPTR}) | |||
223 | .legalFor({{p0, s32}}); | |||
224 | ||||
225 | getActionDefinitionsBuilder(G_PTRTOINT) | |||
226 | .legalFor({{s32, p0}}); | |||
227 | ||||
228 | getActionDefinitionsBuilder(G_FRAME_INDEX) | |||
229 | .legalFor({p0}); | |||
230 | ||||
231 | getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE}) | |||
232 | .legalFor({p0}); | |||
233 | ||||
234 | getActionDefinitionsBuilder(G_DYN_STACKALLOC) | |||
235 | .lowerFor({{p0, s32}}); | |||
236 | ||||
237 | getActionDefinitionsBuilder(G_VASTART) | |||
238 | .legalFor({p0}); | |||
239 | ||||
240 | getActionDefinitionsBuilder(G_BSWAP) | |||
241 | .legalIf([=, &ST](const LegalityQuery &Query) { | |||
242 | if (ST.hasMips32r2() && CheckTyN(0, Query, {s32})) | |||
243 | return true; | |||
244 | return false; | |||
245 | }) | |||
246 | .lowerIf([=, &ST](const LegalityQuery &Query) { | |||
247 | if (!ST.hasMips32r2() && CheckTyN(0, Query, {s32})) | |||
248 | return true; | |||
249 | return false; | |||
250 | }) | |||
251 | .maxScalar(0, s32); | |||
252 | ||||
253 | getActionDefinitionsBuilder(G_BITREVERSE) | |||
254 | .lowerFor({s32}) | |||
255 | .maxScalar(0, s32); | |||
256 | ||||
257 | getActionDefinitionsBuilder(G_CTLZ) | |||
258 | .legalFor({{s32, s32}}) | |||
259 | .maxScalar(0, s32) | |||
260 | .maxScalar(1, s32); | |||
261 | getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF) | |||
262 | .lowerFor({{s32, s32}}); | |||
263 | ||||
264 | getActionDefinitionsBuilder(G_CTTZ) | |||
265 | .lowerFor({{s32, s32}}) | |||
266 | .maxScalar(0, s32) | |||
267 | .maxScalar(1, s32); | |||
268 | getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF) | |||
269 | .lowerFor({{s32, s32}, {s64, s64}}); | |||
270 | ||||
271 | getActionDefinitionsBuilder(G_CTPOP) | |||
272 | .lowerFor({{s32, s32}}) | |||
273 | .clampScalar(0, s32, s32) | |||
274 | .clampScalar(1, s32, s32); | |||
275 | ||||
276 | // FP instructions | |||
277 | getActionDefinitionsBuilder(G_FCONSTANT) | |||
278 | .legalFor({s32, s64}); | |||
279 | ||||
280 | getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FABS, G_FSQRT}) | |||
281 | .legalIf([=, &ST](const LegalityQuery &Query) { | |||
282 | if (CheckTyN(0, Query, {s32, s64})) | |||
283 | return true; | |||
284 | if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64})) | |||
285 | return true; | |||
286 | return false; | |||
287 | }); | |||
288 | ||||
289 | getActionDefinitionsBuilder(G_FCMP) | |||
290 | .legalFor({{s32, s32}, {s32, s64}}) | |||
291 | .minScalar(0, s32); | |||
292 | ||||
293 | getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR}) | |||
294 | .libcallFor({s32, s64}); | |||
295 | ||||
296 | getActionDefinitionsBuilder(G_FPEXT) | |||
297 | .legalFor({{s64, s32}}); | |||
298 | ||||
299 | getActionDefinitionsBuilder(G_FPTRUNC) | |||
300 | .legalFor({{s32, s64}}); | |||
301 | ||||
302 | // FP to int conversion instructions | |||
303 | getActionDefinitionsBuilder(G_FPTOSI) | |||
304 | .legalForCartesianProduct({s32}, {s64, s32}) | |||
305 | .libcallForCartesianProduct({s64}, {s64, s32}) | |||
306 | .minScalar(0, s32); | |||
307 | ||||
308 | getActionDefinitionsBuilder(G_FPTOUI) | |||
309 | .libcallForCartesianProduct({s64}, {s64, s32}) | |||
310 | .lowerForCartesianProduct({s32}, {s64, s32}) | |||
311 | .minScalar(0, s32); | |||
312 | ||||
313 | // Int to FP conversion instructions | |||
314 | getActionDefinitionsBuilder(G_SITOFP) | |||
315 | .legalForCartesianProduct({s64, s32}, {s32}) | |||
316 | .libcallForCartesianProduct({s64, s32}, {s64}) | |||
317 | .minScalar(1, s32); | |||
318 | ||||
319 | getActionDefinitionsBuilder(G_UITOFP) | |||
320 | .libcallForCartesianProduct({s64, s32}, {s64}) | |||
321 | .customForCartesianProduct({s64, s32}, {s32}) | |||
322 | .minScalar(1, s32); | |||
323 | ||||
324 | getActionDefinitionsBuilder(G_SEXT_INREG).lower(); | |||
325 | ||||
326 | getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall(); | |||
327 | ||||
328 | getLegacyLegalizerInfo().computeTables(); | |||
329 | verify(*ST.getInstrInfo()); | |||
330 | } | |||
331 | ||||
332 | bool MipsLegalizerInfo::legalizeCustom(LegalizerHelper &Helper, | |||
333 | MachineInstr &MI) const { | |||
334 | using namespace TargetOpcode; | |||
335 | ||||
336 | MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; | |||
337 | MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); | |||
338 | ||||
339 | const LLT s32 = LLT::scalar(32); | |||
340 | const LLT s64 = LLT::scalar(64); | |||
341 | ||||
342 | switch (MI.getOpcode()) { | |||
| ||||
343 | case G_LOAD: | |||
344 | case G_STORE: { | |||
345 | unsigned MemSize = (**MI.memoperands_begin()).getSize(); | |||
346 | Register Val = MI.getOperand(0).getReg(); | |||
347 | unsigned Size = MRI.getType(Val).getSizeInBits(); | |||
348 | ||||
349 | MachineMemOperand *MMOBase = *MI.memoperands_begin(); | |||
350 | ||||
351 | assert(MemSize <= 8 && "MemSize is too large")(static_cast <bool> (MemSize <= 8 && "MemSize is too large" ) ? void (0) : __assert_fail ("MemSize <= 8 && \"MemSize is too large\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 351, __extension__ __PRETTY_FUNCTION__)); | |||
352 | assert(Size <= 64 && "Scalar size is too large")(static_cast <bool> (Size <= 64 && "Scalar size is too large" ) ? void (0) : __assert_fail ("Size <= 64 && \"Scalar size is too large\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 352, __extension__ __PRETTY_FUNCTION__)); | |||
353 | ||||
354 | // Split MemSize into two, P2HalfMemSize is largest power of two smaller | |||
355 | // then MemSize. e.g. 8 = 4 + 4 , 6 = 4 + 2, 3 = 2 + 1. | |||
356 | unsigned P2HalfMemSize, RemMemSize; | |||
357 | if (isPowerOf2_64(MemSize)) { | |||
358 | P2HalfMemSize = RemMemSize = MemSize / 2; | |||
359 | } else { | |||
360 | P2HalfMemSize = 1 << Log2_32(MemSize); | |||
| ||||
361 | RemMemSize = MemSize - P2HalfMemSize; | |||
362 | } | |||
363 | ||||
364 | Register BaseAddr = MI.getOperand(1).getReg(); | |||
365 | LLT PtrTy = MRI.getType(BaseAddr); | |||
366 | MachineFunction &MF = MIRBuilder.getMF(); | |||
367 | ||||
368 | auto P2HalfMemOp = MF.getMachineMemOperand(MMOBase, 0, P2HalfMemSize); | |||
369 | auto RemMemOp = MF.getMachineMemOperand(MMOBase, P2HalfMemSize, RemMemSize); | |||
370 | ||||
371 | if (MI.getOpcode() == G_STORE) { | |||
372 | // Widen Val to s32 or s64 in order to create legal G_LSHR or G_UNMERGE. | |||
373 | if (Size < 32) | |||
374 | Val = MIRBuilder.buildAnyExt(s32, Val).getReg(0); | |||
375 | if (Size > 32 && Size < 64) | |||
376 | Val = MIRBuilder.buildAnyExt(s64, Val).getReg(0); | |||
377 | ||||
378 | auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize); | |||
379 | auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize); | |||
380 | ||||
381 | if (MI.getOpcode() == G_STORE && MemSize <= 4) { | |||
382 | MIRBuilder.buildStore(Val, BaseAddr, *P2HalfMemOp); | |||
383 | auto C_P2Half_InBits = MIRBuilder.buildConstant(s32, P2HalfMemSize * 8); | |||
384 | auto Shift = MIRBuilder.buildLShr(s32, Val, C_P2Half_InBits); | |||
385 | MIRBuilder.buildStore(Shift, Addr, *RemMemOp); | |||
386 | } else { | |||
387 | auto Unmerge = MIRBuilder.buildUnmerge(s32, Val); | |||
388 | MIRBuilder.buildStore(Unmerge.getReg(0), BaseAddr, *P2HalfMemOp); | |||
389 | MIRBuilder.buildStore(Unmerge.getReg(1), Addr, *RemMemOp); | |||
390 | } | |||
391 | } | |||
392 | ||||
393 | if (MI.getOpcode() == G_LOAD) { | |||
394 | ||||
395 | if (MemSize <= 4) { | |||
396 | // This is anyextending load, use 4 byte lwr/lwl. | |||
397 | auto *Load4MMO = MF.getMachineMemOperand(MMOBase, 0, 4); | |||
398 | ||||
399 | if (Size == 32) | |||
400 | MIRBuilder.buildLoad(Val, BaseAddr, *Load4MMO); | |||
401 | else { | |||
402 | auto Load = MIRBuilder.buildLoad(s32, BaseAddr, *Load4MMO); | |||
403 | MIRBuilder.buildTrunc(Val, Load.getReg(0)); | |||
404 | } | |||
405 | ||||
406 | } else { | |||
407 | auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize); | |||
408 | auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize); | |||
409 | ||||
410 | auto Load_P2Half = MIRBuilder.buildLoad(s32, BaseAddr, *P2HalfMemOp); | |||
411 | auto Load_Rem = MIRBuilder.buildLoad(s32, Addr, *RemMemOp); | |||
412 | ||||
413 | if (Size == 64) | |||
414 | MIRBuilder.buildMerge(Val, {Load_P2Half, Load_Rem}); | |||
415 | else { | |||
416 | auto Merge = MIRBuilder.buildMerge(s64, {Load_P2Half, Load_Rem}); | |||
417 | MIRBuilder.buildTrunc(Val, Merge); | |||
418 | } | |||
419 | } | |||
420 | } | |||
421 | MI.eraseFromParent(); | |||
422 | break; | |||
423 | } | |||
424 | case G_UITOFP: { | |||
425 | Register Dst = MI.getOperand(0).getReg(); | |||
426 | Register Src = MI.getOperand(1).getReg(); | |||
427 | LLT DstTy = MRI.getType(Dst); | |||
428 | LLT SrcTy = MRI.getType(Src); | |||
429 | ||||
430 | if (SrcTy != s32) | |||
431 | return false; | |||
432 | if (DstTy != s32 && DstTy != s64) | |||
433 | return false; | |||
434 | ||||
435 | // Let 0xABCDEFGH be given unsigned in MI.getOperand(1). First let's convert | |||
436 | // unsigned to double. Mantissa has 52 bits so we use following trick: | |||
437 | // First make floating point bit mask 0x43300000ABCDEFGH. | |||
438 | // Mask represents 2^52 * 0x1.00000ABCDEFGH i.e. 0x100000ABCDEFGH.0 . | |||
439 | // Next, subtract 2^52 * 0x1.0000000000000 i.e. 0x10000000000000.0 from it. | |||
440 | // Done. Trunc double to float if needed. | |||
441 | ||||
442 | auto C_HiMask = MIRBuilder.buildConstant(s32, UINT32_C(0x43300000)0x43300000U); | |||
443 | auto Bitcast = MIRBuilder.buildMerge(s64, {Src, C_HiMask.getReg(0)}); | |||
444 | ||||
445 | MachineInstrBuilder TwoP52FP = MIRBuilder.buildFConstant( | |||
446 | s64, BitsToDouble(UINT64_C(0x4330000000000000)0x4330000000000000UL)); | |||
447 | ||||
448 | if (DstTy == s64) | |||
449 | MIRBuilder.buildFSub(Dst, Bitcast, TwoP52FP); | |||
450 | else { | |||
451 | MachineInstrBuilder ResF64 = MIRBuilder.buildFSub(s64, Bitcast, TwoP52FP); | |||
452 | MIRBuilder.buildFPTrunc(Dst, ResF64); | |||
453 | } | |||
454 | ||||
455 | MI.eraseFromParent(); | |||
456 | break; | |||
457 | } | |||
458 | default: | |||
459 | return false; | |||
460 | } | |||
461 | ||||
462 | return true; | |||
463 | } | |||
464 | ||||
465 | static bool SelectMSA3OpIntrinsic(MachineInstr &MI, unsigned Opcode, | |||
466 | MachineIRBuilder &MIRBuilder, | |||
467 | const MipsSubtarget &ST) { | |||
468 | assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.")(static_cast <bool> (ST.hasMSA() && "MSA intrinsic not supported on target without MSA." ) ? void (0) : __assert_fail ("ST.hasMSA() && \"MSA intrinsic not supported on target without MSA.\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 468, __extension__ __PRETTY_FUNCTION__)); | |||
469 | if (!MIRBuilder.buildInstr(Opcode) | |||
470 | .add(MI.getOperand(0)) | |||
471 | .add(MI.getOperand(2)) | |||
472 | .add(MI.getOperand(3)) | |||
473 | .constrainAllUses(MIRBuilder.getTII(), *ST.getRegisterInfo(), | |||
474 | *ST.getRegBankInfo())) | |||
475 | return false; | |||
476 | MI.eraseFromParent(); | |||
477 | return true; | |||
478 | } | |||
479 | ||||
480 | static bool MSA3OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode, | |||
481 | MachineIRBuilder &MIRBuilder, | |||
482 | const MipsSubtarget &ST) { | |||
483 | assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.")(static_cast <bool> (ST.hasMSA() && "MSA intrinsic not supported on target without MSA." ) ? void (0) : __assert_fail ("ST.hasMSA() && \"MSA intrinsic not supported on target without MSA.\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 483, __extension__ __PRETTY_FUNCTION__)); | |||
484 | MIRBuilder.buildInstr(Opcode) | |||
485 | .add(MI.getOperand(0)) | |||
486 | .add(MI.getOperand(2)) | |||
487 | .add(MI.getOperand(3)); | |||
488 | MI.eraseFromParent(); | |||
489 | return true; | |||
490 | } | |||
491 | ||||
492 | static bool MSA2OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode, | |||
493 | MachineIRBuilder &MIRBuilder, | |||
494 | const MipsSubtarget &ST) { | |||
495 | assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.")(static_cast <bool> (ST.hasMSA() && "MSA intrinsic not supported on target without MSA." ) ? void (0) : __assert_fail ("ST.hasMSA() && \"MSA intrinsic not supported on target without MSA.\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 495, __extension__ __PRETTY_FUNCTION__)); | |||
496 | MIRBuilder.buildInstr(Opcode) | |||
497 | .add(MI.getOperand(0)) | |||
498 | .add(MI.getOperand(2)); | |||
499 | MI.eraseFromParent(); | |||
500 | return true; | |||
501 | } | |||
502 | ||||
503 | bool MipsLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, | |||
504 | MachineInstr &MI) const { | |||
505 | MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; | |||
506 | const MipsSubtarget &ST = | |||
507 | static_cast<const MipsSubtarget &>(MI.getMF()->getSubtarget()); | |||
508 | const MipsInstrInfo &TII = *ST.getInstrInfo(); | |||
509 | const MipsRegisterInfo &TRI = *ST.getRegisterInfo(); | |||
510 | const RegisterBankInfo &RBI = *ST.getRegBankInfo(); | |||
511 | ||||
512 | switch (MI.getIntrinsicID()) { | |||
513 | case Intrinsic::trap: { | |||
514 | MachineInstr *Trap = MIRBuilder.buildInstr(Mips::TRAP); | |||
515 | MI.eraseFromParent(); | |||
516 | return constrainSelectedInstRegOperands(*Trap, TII, TRI, RBI); | |||
517 | } | |||
518 | case Intrinsic::vacopy: { | |||
519 | MachinePointerInfo MPO; | |||
520 | LLT PtrTy = LLT::pointer(0, 32); | |||
521 | auto Tmp = | |||
522 | MIRBuilder.buildLoad(PtrTy, MI.getOperand(2), | |||
523 | *MI.getMF()->getMachineMemOperand( | |||
524 | MPO, MachineMemOperand::MOLoad, PtrTy, Align(4))); | |||
525 | MIRBuilder.buildStore(Tmp, MI.getOperand(1), | |||
526 | *MI.getMF()->getMachineMemOperand( | |||
527 | MPO, MachineMemOperand::MOStore, PtrTy, Align(4))); | |||
528 | MI.eraseFromParent(); | |||
529 | return true; | |||
530 | } | |||
531 | case Intrinsic::mips_addv_b: | |||
532 | case Intrinsic::mips_addv_h: | |||
533 | case Intrinsic::mips_addv_w: | |||
534 | case Intrinsic::mips_addv_d: | |||
535 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_ADD, MIRBuilder, ST); | |||
536 | case Intrinsic::mips_addvi_b: | |||
537 | return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_B, MIRBuilder, ST); | |||
538 | case Intrinsic::mips_addvi_h: | |||
539 | return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_H, MIRBuilder, ST); | |||
540 | case Intrinsic::mips_addvi_w: | |||
541 | return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_W, MIRBuilder, ST); | |||
542 | case Intrinsic::mips_addvi_d: | |||
543 | return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_D, MIRBuilder, ST); | |||
544 | case Intrinsic::mips_subv_b: | |||
545 | case Intrinsic::mips_subv_h: | |||
546 | case Intrinsic::mips_subv_w: | |||
547 | case Intrinsic::mips_subv_d: | |||
548 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SUB, MIRBuilder, ST); | |||
549 | case Intrinsic::mips_subvi_b: | |||
550 | return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_B, MIRBuilder, ST); | |||
551 | case Intrinsic::mips_subvi_h: | |||
552 | return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_H, MIRBuilder, ST); | |||
553 | case Intrinsic::mips_subvi_w: | |||
554 | return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_W, MIRBuilder, ST); | |||
555 | case Intrinsic::mips_subvi_d: | |||
556 | return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_D, MIRBuilder, ST); | |||
557 | case Intrinsic::mips_mulv_b: | |||
558 | case Intrinsic::mips_mulv_h: | |||
559 | case Intrinsic::mips_mulv_w: | |||
560 | case Intrinsic::mips_mulv_d: | |||
561 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_MUL, MIRBuilder, ST); | |||
562 | case Intrinsic::mips_div_s_b: | |||
563 | case Intrinsic::mips_div_s_h: | |||
564 | case Intrinsic::mips_div_s_w: | |||
565 | case Intrinsic::mips_div_s_d: | |||
566 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SDIV, MIRBuilder, ST); | |||
567 | case Intrinsic::mips_mod_s_b: | |||
568 | case Intrinsic::mips_mod_s_h: | |||
569 | case Intrinsic::mips_mod_s_w: | |||
570 | case Intrinsic::mips_mod_s_d: | |||
571 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SREM, MIRBuilder, ST); | |||
572 | case Intrinsic::mips_div_u_b: | |||
573 | case Intrinsic::mips_div_u_h: | |||
574 | case Intrinsic::mips_div_u_w: | |||
575 | case Intrinsic::mips_div_u_d: | |||
576 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UDIV, MIRBuilder, ST); | |||
577 | case Intrinsic::mips_mod_u_b: | |||
578 | case Intrinsic::mips_mod_u_h: | |||
579 | case Intrinsic::mips_mod_u_w: | |||
580 | case Intrinsic::mips_mod_u_d: | |||
581 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UREM, MIRBuilder, ST); | |||
582 | case Intrinsic::mips_fadd_w: | |||
583 | case Intrinsic::mips_fadd_d: | |||
584 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FADD, MIRBuilder, ST); | |||
585 | case Intrinsic::mips_fsub_w: | |||
586 | case Intrinsic::mips_fsub_d: | |||
587 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FSUB, MIRBuilder, ST); | |||
588 | case Intrinsic::mips_fmul_w: | |||
589 | case Intrinsic::mips_fmul_d: | |||
590 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FMUL, MIRBuilder, ST); | |||
591 | case Intrinsic::mips_fdiv_w: | |||
592 | case Intrinsic::mips_fdiv_d: | |||
593 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FDIV, MIRBuilder, ST); | |||
594 | case Intrinsic::mips_fmax_a_w: | |||
595 | return SelectMSA3OpIntrinsic(MI, Mips::FMAX_A_W, MIRBuilder, ST); | |||
596 | case Intrinsic::mips_fmax_a_d: | |||
597 | return SelectMSA3OpIntrinsic(MI, Mips::FMAX_A_D, MIRBuilder, ST); | |||
598 | case Intrinsic::mips_fsqrt_w: | |||
599 | return MSA2OpIntrinsicToGeneric(MI, TargetOpcode::G_FSQRT, MIRBuilder, ST); | |||
600 | case Intrinsic::mips_fsqrt_d: | |||
601 | return MSA2OpIntrinsicToGeneric(MI, TargetOpcode::G_FSQRT, MIRBuilder, ST); | |||
602 | default: | |||
603 | break; | |||
604 | } | |||
605 | return true; | |||
606 | } |
1 | //===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains some functions that are useful for math stuff. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef LLVM_SUPPORT_MATHEXTRAS_H |
14 | #define LLVM_SUPPORT_MATHEXTRAS_H |
15 | |
16 | #include "llvm/Support/Compiler.h" |
17 | #include <cassert> |
18 | #include <climits> |
19 | #include <cmath> |
20 | #include <cstdint> |
21 | #include <cstring> |
22 | #include <limits> |
23 | #include <type_traits> |
24 | |
25 | #ifdef __ANDROID_NDK__ |
26 | #include <android/api-level.h> |
27 | #endif |
28 | |
29 | #ifdef _MSC_VER |
30 | // Declare these intrinsics manually rather including intrin.h. It's very |
31 | // expensive, and MathExtras.h is popular. |
32 | // #include <intrin.h> |
33 | extern "C" { |
34 | unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask); |
35 | unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask); |
36 | unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask); |
37 | unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask); |
38 | } |
39 | #endif |
40 | |
41 | namespace llvm { |
42 | |
43 | /// The behavior an operation has on an input of 0. |
44 | enum ZeroBehavior { |
45 | /// The returned value is undefined. |
46 | ZB_Undefined, |
47 | /// The returned value is numeric_limits<T>::max() |
48 | ZB_Max, |
49 | /// The returned value is numeric_limits<T>::digits |
50 | ZB_Width |
51 | }; |
52 | |
53 | /// Mathematical constants. |
54 | namespace numbers { |
55 | // TODO: Track C++20 std::numbers. |
56 | // TODO: Favor using the hexadecimal FP constants (requires C++17). |
57 | constexpr double e = 2.7182818284590452354, // (0x1.5bf0a8b145749P+1) https://oeis.org/A001113 |
58 | egamma = .57721566490153286061, // (0x1.2788cfc6fb619P-1) https://oeis.org/A001620 |
59 | ln2 = .69314718055994530942, // (0x1.62e42fefa39efP-1) https://oeis.org/A002162 |
60 | ln10 = 2.3025850929940456840, // (0x1.24bb1bbb55516P+1) https://oeis.org/A002392 |
61 | log2e = 1.4426950408889634074, // (0x1.71547652b82feP+0) |
62 | log10e = .43429448190325182765, // (0x1.bcb7b1526e50eP-2) |
63 | pi = 3.1415926535897932385, // (0x1.921fb54442d18P+1) https://oeis.org/A000796 |
64 | inv_pi = .31830988618379067154, // (0x1.45f306bc9c883P-2) https://oeis.org/A049541 |
65 | sqrtpi = 1.7724538509055160273, // (0x1.c5bf891b4ef6bP+0) https://oeis.org/A002161 |
66 | inv_sqrtpi = .56418958354775628695, // (0x1.20dd750429b6dP-1) https://oeis.org/A087197 |
67 | sqrt2 = 1.4142135623730950488, // (0x1.6a09e667f3bcdP+0) https://oeis.org/A00219 |
68 | inv_sqrt2 = .70710678118654752440, // (0x1.6a09e667f3bcdP-1) |
69 | sqrt3 = 1.7320508075688772935, // (0x1.bb67ae8584caaP+0) https://oeis.org/A002194 |
70 | inv_sqrt3 = .57735026918962576451, // (0x1.279a74590331cP-1) |
71 | phi = 1.6180339887498948482; // (0x1.9e3779b97f4a8P+0) https://oeis.org/A001622 |
72 | constexpr float ef = 2.71828183F, // (0x1.5bf0a8P+1) https://oeis.org/A001113 |
73 | egammaf = .577215665F, // (0x1.2788d0P-1) https://oeis.org/A001620 |
74 | ln2f = .693147181F, // (0x1.62e430P-1) https://oeis.org/A002162 |
75 | ln10f = 2.30258509F, // (0x1.26bb1cP+1) https://oeis.org/A002392 |
76 | log2ef = 1.44269504F, // (0x1.715476P+0) |
77 | log10ef = .434294482F, // (0x1.bcb7b2P-2) |
78 | pif = 3.14159265F, // (0x1.921fb6P+1) https://oeis.org/A000796 |
79 | inv_pif = .318309886F, // (0x1.45f306P-2) https://oeis.org/A049541 |
80 | sqrtpif = 1.77245385F, // (0x1.c5bf8aP+0) https://oeis.org/A002161 |
81 | inv_sqrtpif = .564189584F, // (0x1.20dd76P-1) https://oeis.org/A087197 |
82 | sqrt2f = 1.41421356F, // (0x1.6a09e6P+0) https://oeis.org/A002193 |
83 | inv_sqrt2f = .707106781F, // (0x1.6a09e6P-1) |
84 | sqrt3f = 1.73205081F, // (0x1.bb67aeP+0) https://oeis.org/A002194 |
85 | inv_sqrt3f = .577350269F, // (0x1.279a74P-1) |
86 | phif = 1.61803399F; // (0x1.9e377aP+0) https://oeis.org/A001622 |
87 | } // namespace numbers |
88 | |
89 | namespace detail { |
90 | template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter { |
91 | static unsigned count(T Val, ZeroBehavior) { |
92 | if (!Val) |
93 | return std::numeric_limits<T>::digits; |
94 | if (Val & 0x1) |
95 | return 0; |
96 | |
97 | // Bisection method. |
98 | unsigned ZeroBits = 0; |
99 | T Shift = std::numeric_limits<T>::digits >> 1; |
100 | T Mask = std::numeric_limits<T>::max() >> Shift; |
101 | while (Shift) { |
102 | if ((Val & Mask) == 0) { |
103 | Val >>= Shift; |
104 | ZeroBits |= Shift; |
105 | } |
106 | Shift >>= 1; |
107 | Mask >>= Shift; |
108 | } |
109 | return ZeroBits; |
110 | } |
111 | }; |
112 | |
113 | #if defined(__GNUC__4) || defined(_MSC_VER) |
114 | template <typename T> struct TrailingZerosCounter<T, 4> { |
115 | static unsigned count(T Val, ZeroBehavior ZB) { |
116 | if (ZB != ZB_Undefined && Val == 0) |
117 | return 32; |
118 | |
119 | #if __has_builtin(__builtin_ctz)1 || defined(__GNUC__4) |
120 | return __builtin_ctz(Val); |
121 | #elif defined(_MSC_VER) |
122 | unsigned long Index; |
123 | _BitScanForward(&Index, Val); |
124 | return Index; |
125 | #endif |
126 | } |
127 | }; |
128 | |
129 | #if !defined(_MSC_VER) || defined(_M_X64) |
130 | template <typename T> struct TrailingZerosCounter<T, 8> { |
131 | static unsigned count(T Val, ZeroBehavior ZB) { |
132 | if (ZB != ZB_Undefined && Val == 0) |
133 | return 64; |
134 | |
135 | #if __has_builtin(__builtin_ctzll)1 || defined(__GNUC__4) |
136 | return __builtin_ctzll(Val); |
137 | #elif defined(_MSC_VER) |
138 | unsigned long Index; |
139 | _BitScanForward64(&Index, Val); |
140 | return Index; |
141 | #endif |
142 | } |
143 | }; |
144 | #endif |
145 | #endif |
146 | } // namespace detail |
147 | |
148 | /// Count number of 0's from the least significant bit to the most |
149 | /// stopping at the first 1. |
150 | /// |
151 | /// Only unsigned integral types are allowed. |
152 | /// |
153 | /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are |
154 | /// valid arguments. |
155 | template <typename T> |
156 | unsigned countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) { |
157 | static_assert(std::numeric_limits<T>::is_integer && |
158 | !std::numeric_limits<T>::is_signed, |
159 | "Only unsigned integral types are allowed."); |
160 | return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB); |
161 | } |
162 | |
163 | namespace detail { |
164 | template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter { |
165 | static unsigned count(T Val, ZeroBehavior) { |
166 | if (!Val) |
167 | return std::numeric_limits<T>::digits; |
168 | |
169 | // Bisection method. |
170 | unsigned ZeroBits = 0; |
171 | for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) { |
172 | T Tmp = Val >> Shift; |
173 | if (Tmp) |
174 | Val = Tmp; |
175 | else |
176 | ZeroBits |= Shift; |
177 | } |
178 | return ZeroBits; |
179 | } |
180 | }; |
181 | |
182 | #if defined(__GNUC__4) || defined(_MSC_VER) |
183 | template <typename T> struct LeadingZerosCounter<T, 4> { |
184 | static unsigned count(T Val, ZeroBehavior ZB) { |
185 | if (ZB != ZB_Undefined && Val == 0) |
186 | return 32; |
187 | |
188 | #if __has_builtin(__builtin_clz)1 || defined(__GNUC__4) |
189 | return __builtin_clz(Val); |
190 | #elif defined(_MSC_VER) |
191 | unsigned long Index; |
192 | _BitScanReverse(&Index, Val); |
193 | return Index ^ 31; |
194 | #endif |
195 | } |
196 | }; |
197 | |
198 | #if !defined(_MSC_VER) || defined(_M_X64) |
199 | template <typename T> struct LeadingZerosCounter<T, 8> { |
200 | static unsigned count(T Val, ZeroBehavior ZB) { |
201 | if (ZB != ZB_Undefined && Val == 0) |
202 | return 64; |
203 | |
204 | #if __has_builtin(__builtin_clzll)1 || defined(__GNUC__4) |
205 | return __builtin_clzll(Val); |
206 | #elif defined(_MSC_VER) |
207 | unsigned long Index; |
208 | _BitScanReverse64(&Index, Val); |
209 | return Index ^ 63; |
210 | #endif |
211 | } |
212 | }; |
213 | #endif |
214 | #endif |
215 | } // namespace detail |
216 | |
217 | /// Count number of 0's from the most significant bit to the least |
218 | /// stopping at the first 1. |
219 | /// |
220 | /// Only unsigned integral types are allowed. |
221 | /// |
222 | /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are |
223 | /// valid arguments. |
224 | template <typename T> |
225 | unsigned countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) { |
226 | static_assert(std::numeric_limits<T>::is_integer && |
227 | !std::numeric_limits<T>::is_signed, |
228 | "Only unsigned integral types are allowed."); |
229 | return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB); |
230 | } |
231 | |
232 | /// Get the index of the first set bit starting from the least |
233 | /// significant bit. |
234 | /// |
235 | /// Only unsigned integral types are allowed. |
236 | /// |
237 | /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are |
238 | /// valid arguments. |
239 | template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) { |
240 | if (ZB == ZB_Max && Val == 0) |
241 | return std::numeric_limits<T>::max(); |
242 | |
243 | return countTrailingZeros(Val, ZB_Undefined); |
244 | } |
245 | |
246 | /// Create a bitmask with the N right-most bits set to 1, and all other |
247 | /// bits set to 0. Only unsigned types are allowed. |
248 | template <typename T> T maskTrailingOnes(unsigned N) { |
249 | static_assert(std::is_unsigned<T>::value, "Invalid type!"); |
250 | const unsigned Bits = CHAR_BIT8 * sizeof(T); |
251 | assert(N <= Bits && "Invalid bit index")(static_cast <bool> (N <= Bits && "Invalid bit index" ) ? void (0) : __assert_fail ("N <= Bits && \"Invalid bit index\"" , "llvm/include/llvm/Support/MathExtras.h", 251, __extension__ __PRETTY_FUNCTION__)); |
252 | return N == 0 ? 0 : (T(-1) >> (Bits - N)); |
253 | } |
254 | |
255 | /// Create a bitmask with the N left-most bits set to 1, and all other |
256 | /// bits set to 0. Only unsigned types are allowed. |
257 | template <typename T> T maskLeadingOnes(unsigned N) { |
258 | return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); |
259 | } |
260 | |
261 | /// Create a bitmask with the N right-most bits set to 0, and all other |
262 | /// bits set to 1. Only unsigned types are allowed. |
263 | template <typename T> T maskTrailingZeros(unsigned N) { |
264 | return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N); |
265 | } |
266 | |
267 | /// Create a bitmask with the N left-most bits set to 0, and all other |
268 | /// bits set to 1. Only unsigned types are allowed. |
269 | template <typename T> T maskLeadingZeros(unsigned N) { |
270 | return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); |
271 | } |
272 | |
273 | /// Get the index of the last set bit starting from the least |
274 | /// significant bit. |
275 | /// |
276 | /// Only unsigned integral types are allowed. |
277 | /// |
278 | /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are |
279 | /// valid arguments. |
280 | template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) { |
281 | if (ZB == ZB_Max && Val == 0) |
282 | return std::numeric_limits<T>::max(); |
283 | |
284 | // Use ^ instead of - because both gcc and llvm can remove the associated ^ |
285 | // in the __builtin_clz intrinsic on x86. |
286 | return countLeadingZeros(Val, ZB_Undefined) ^ |
287 | (std::numeric_limits<T>::digits - 1); |
288 | } |
289 | |
290 | /// Macro compressed bit reversal table for 256 bits. |
291 | /// |
292 | /// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable |
293 | static const unsigned char BitReverseTable256[256] = { |
294 | #define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64 |
295 | #define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16) |
296 | #define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4) |
297 | R6(0), R6(2), R6(1), R6(3) |
298 | #undef R2 |
299 | #undef R4 |
300 | #undef R6 |
301 | }; |
302 | |
303 | /// Reverse the bits in \p Val. |
304 | template <typename T> |
305 | T reverseBits(T Val) { |
306 | unsigned char in[sizeof(Val)]; |
307 | unsigned char out[sizeof(Val)]; |
308 | std::memcpy(in, &Val, sizeof(Val)); |
309 | for (unsigned i = 0; i < sizeof(Val); ++i) |
310 | out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]]; |
311 | std::memcpy(&Val, out, sizeof(Val)); |
312 | return Val; |
313 | } |
314 | |
315 | #if __has_builtin(__builtin_bitreverse8)1 |
316 | template<> |
317 | inline uint8_t reverseBits<uint8_t>(uint8_t Val) { |
318 | return __builtin_bitreverse8(Val); |
319 | } |
320 | #endif |
321 | |
322 | #if __has_builtin(__builtin_bitreverse16)1 |
323 | template<> |
324 | inline uint16_t reverseBits<uint16_t>(uint16_t Val) { |
325 | return __builtin_bitreverse16(Val); |
326 | } |
327 | #endif |
328 | |
329 | #if __has_builtin(__builtin_bitreverse32)1 |
330 | template<> |
331 | inline uint32_t reverseBits<uint32_t>(uint32_t Val) { |
332 | return __builtin_bitreverse32(Val); |
333 | } |
334 | #endif |
335 | |
336 | #if __has_builtin(__builtin_bitreverse64)1 |
337 | template<> |
338 | inline uint64_t reverseBits<uint64_t>(uint64_t Val) { |
339 | return __builtin_bitreverse64(Val); |
340 | } |
341 | #endif |
342 | |
343 | // NOTE: The following support functions use the _32/_64 extensions instead of |
344 | // type overloading so that signed and unsigned integers can be used without |
345 | // ambiguity. |
346 | |
347 | /// Return the high 32 bits of a 64 bit value. |
348 | constexpr inline uint32_t Hi_32(uint64_t Value) { |
349 | return static_cast<uint32_t>(Value >> 32); |
350 | } |
351 | |
352 | /// Return the low 32 bits of a 64 bit value. |
353 | constexpr inline uint32_t Lo_32(uint64_t Value) { |
354 | return static_cast<uint32_t>(Value); |
355 | } |
356 | |
357 | /// Make a 64-bit integer from a high / low pair of 32-bit integers. |
358 | constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) { |
359 | return ((uint64_t)High << 32) | (uint64_t)Low; |
360 | } |
361 | |
362 | /// Checks if an integer fits into the given bit width. |
363 | template <unsigned N> constexpr inline bool isInt(int64_t x) { |
364 | return N >= 64 || (-(INT64_C(1)1L<<(N-1)) <= x && x < (INT64_C(1)1L<<(N-1))); |
365 | } |
366 | // Template specializations to get better code for common cases. |
367 | template <> constexpr inline bool isInt<8>(int64_t x) { |
368 | return static_cast<int8_t>(x) == x; |
369 | } |
370 | template <> constexpr inline bool isInt<16>(int64_t x) { |
371 | return static_cast<int16_t>(x) == x; |
372 | } |
373 | template <> constexpr inline bool isInt<32>(int64_t x) { |
374 | return static_cast<int32_t>(x) == x; |
375 | } |
376 | |
377 | /// Checks if a signed integer is an N bit number shifted left by S. |
378 | template <unsigned N, unsigned S> |
379 | constexpr inline bool isShiftedInt(int64_t x) { |
380 | static_assert( |
381 | N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number."); |
382 | static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide."); |
383 | return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); |
384 | } |
385 | |
386 | /// Checks if an unsigned integer fits into the given bit width. |
387 | /// |
388 | /// This is written as two functions rather than as simply |
389 | /// |
390 | /// return N >= 64 || X < (UINT64_C(1) << N); |
391 | /// |
392 | /// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting |
393 | /// left too many places. |
394 | template <unsigned N> |
395 | constexpr inline std::enable_if_t<(N < 64), bool> isUInt(uint64_t X) { |
396 | static_assert(N > 0, "isUInt<0> doesn't make sense"); |
397 | return X < (UINT64_C(1)1UL << (N)); |
398 | } |
399 | template <unsigned N> |
400 | constexpr inline std::enable_if_t<N >= 64, bool> isUInt(uint64_t) { |
401 | return true; |
402 | } |
403 | |
404 | // Template specializations to get better code for common cases. |
405 | template <> constexpr inline bool isUInt<8>(uint64_t x) { |
406 | return static_cast<uint8_t>(x) == x; |
407 | } |
408 | template <> constexpr inline bool isUInt<16>(uint64_t x) { |
409 | return static_cast<uint16_t>(x) == x; |
410 | } |
411 | template <> constexpr inline bool isUInt<32>(uint64_t x) { |
412 | return static_cast<uint32_t>(x) == x; |
413 | } |
414 | |
415 | /// Checks if a unsigned integer is an N bit number shifted left by S. |
416 | template <unsigned N, unsigned S> |
417 | constexpr inline bool isShiftedUInt(uint64_t x) { |
418 | static_assert( |
419 | N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)"); |
420 | static_assert(N + S <= 64, |
421 | "isShiftedUInt<N, S> with N + S > 64 is too wide."); |
422 | // Per the two static_asserts above, S must be strictly less than 64. So |
423 | // 1 << S is not undefined behavior. |
424 | return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); |
425 | } |
426 | |
427 | /// Gets the maximum value for a N-bit unsigned integer. |
428 | inline uint64_t maxUIntN(uint64_t N) { |
429 | assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 && "integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "llvm/include/llvm/Support/MathExtras.h", 429, __extension__ __PRETTY_FUNCTION__)); |
430 | |
431 | // uint64_t(1) << 64 is undefined behavior, so we can't do |
432 | // (uint64_t(1) << N) - 1 |
433 | // without checking first that N != 64. But this works and doesn't have a |
434 | // branch. |
435 | return UINT64_MAX(18446744073709551615UL) >> (64 - N); |
436 | } |
437 | |
438 | /// Gets the minimum value for a N-bit signed integer. |
439 | inline int64_t minIntN(int64_t N) { |
440 | assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 && "integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "llvm/include/llvm/Support/MathExtras.h", 440, __extension__ __PRETTY_FUNCTION__)); |
441 | |
442 | return UINT64_C(1)1UL + ~(UINT64_C(1)1UL << (N - 1)); |
443 | } |
444 | |
445 | /// Gets the maximum value for a N-bit signed integer. |
446 | inline int64_t maxIntN(int64_t N) { |
447 | assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 && "integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "llvm/include/llvm/Support/MathExtras.h", 447, __extension__ __PRETTY_FUNCTION__)); |
448 | |
449 | // This relies on two's complement wraparound when N == 64, so we convert to |
450 | // int64_t only at the very end to avoid UB. |
451 | return (UINT64_C(1)1UL << (N - 1)) - 1; |
452 | } |
453 | |
454 | /// Checks if an unsigned integer fits into the given (dynamic) bit width. |
455 | inline bool isUIntN(unsigned N, uint64_t x) { |
456 | return N >= 64 || x <= maxUIntN(N); |
457 | } |
458 | |
459 | /// Checks if an signed integer fits into the given (dynamic) bit width. |
460 | inline bool isIntN(unsigned N, int64_t x) { |
461 | return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N)); |
462 | } |
463 | |
464 | /// Return true if the argument is a non-empty sequence of ones starting at the |
465 | /// least significant bit with the remainder zero (32 bit version). |
466 | /// Ex. isMask_32(0x0000FFFFU) == true. |
467 | constexpr inline bool isMask_32(uint32_t Value) { |
468 | return Value && ((Value + 1) & Value) == 0; |
469 | } |
470 | |
471 | /// Return true if the argument is a non-empty sequence of ones starting at the |
472 | /// least significant bit with the remainder zero (64 bit version). |
473 | constexpr inline bool isMask_64(uint64_t Value) { |
474 | return Value && ((Value + 1) & Value) == 0; |
475 | } |
476 | |
477 | /// Return true if the argument contains a non-empty sequence of ones with the |
478 | /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. |
479 | constexpr inline bool isShiftedMask_32(uint32_t Value) { |
480 | return Value && isMask_32((Value - 1) | Value); |
481 | } |
482 | |
483 | /// Return true if the argument contains a non-empty sequence of ones with the |
484 | /// remainder zero (64 bit version.) |
485 | constexpr inline bool isShiftedMask_64(uint64_t Value) { |
486 | return Value && isMask_64((Value - 1) | Value); |
487 | } |
488 | |
489 | /// Return true if the argument is a power of two > 0. |
490 | /// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) |
491 | constexpr inline bool isPowerOf2_32(uint32_t Value) { |
492 | return Value && !(Value & (Value - 1)); |
493 | } |
494 | |
495 | /// Return true if the argument is a power of two > 0 (64 bit edition.) |
496 | constexpr inline bool isPowerOf2_64(uint64_t Value) { |
497 | return Value && !(Value & (Value - 1)); |
498 | } |
499 | |
500 | /// Count the number of ones from the most significant bit to the first |
501 | /// zero bit. |
502 | /// |
503 | /// Ex. countLeadingOnes(0xFF0FFF00) == 8. |
504 | /// Only unsigned integral types are allowed. |
505 | /// |
506 | /// \param ZB the behavior on an input of all ones. Only ZB_Width and |
507 | /// ZB_Undefined are valid arguments. |
508 | template <typename T> |
509 | unsigned countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) { |
510 | static_assert(std::numeric_limits<T>::is_integer && |
511 | !std::numeric_limits<T>::is_signed, |
512 | "Only unsigned integral types are allowed."); |
513 | return countLeadingZeros<T>(~Value, ZB); |
514 | } |
515 | |
516 | /// Count the number of ones from the least significant bit to the first |
517 | /// zero bit. |
518 | /// |
519 | /// Ex. countTrailingOnes(0x00FF00FF) == 8. |
520 | /// Only unsigned integral types are allowed. |
521 | /// |
522 | /// \param ZB the behavior on an input of all ones. Only ZB_Width and |
523 | /// ZB_Undefined are valid arguments. |
524 | template <typename T> |
525 | unsigned countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) { |
526 | static_assert(std::numeric_limits<T>::is_integer && |
527 | !std::numeric_limits<T>::is_signed, |
528 | "Only unsigned integral types are allowed."); |
529 | return countTrailingZeros<T>(~Value, ZB); |
530 | } |
531 | |
532 | namespace detail { |
533 | template <typename T, std::size_t SizeOfT> struct PopulationCounter { |
534 | static unsigned count(T Value) { |
535 | // Generic version, forward to 32 bits. |
536 | static_assert(SizeOfT <= 4, "Not implemented!"); |
537 | #if defined(__GNUC__4) |
538 | return __builtin_popcount(Value); |
539 | #else |
540 | uint32_t v = Value; |
541 | v = v - ((v >> 1) & 0x55555555); |
542 | v = (v & 0x33333333) + ((v >> 2) & 0x33333333); |
543 | return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; |
544 | #endif |
545 | } |
546 | }; |
547 | |
548 | template <typename T> struct PopulationCounter<T, 8> { |
549 | static unsigned count(T Value) { |
550 | #if defined(__GNUC__4) |
551 | return __builtin_popcountll(Value); |
552 | #else |
553 | uint64_t v = Value; |
554 | v = v - ((v >> 1) & 0x5555555555555555ULL); |
555 | v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL); |
556 | v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL; |
557 | return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); |
558 | #endif |
559 | } |
560 | }; |
561 | } // namespace detail |
562 | |
563 | /// Count the number of set bits in a value. |
564 | /// Ex. countPopulation(0xF000F000) = 8 |
565 | /// Returns 0 if the word is zero. |
566 | template <typename T> |
567 | inline unsigned countPopulation(T Value) { |
568 | static_assert(std::numeric_limits<T>::is_integer && |
569 | !std::numeric_limits<T>::is_signed, |
570 | "Only unsigned integral types are allowed."); |
571 | return detail::PopulationCounter<T, sizeof(T)>::count(Value); |
572 | } |
573 | |
574 | /// Return true if the argument contains a non-empty sequence of ones with the |
575 | /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. |
576 | /// If true, \p MaskIdx will specify the index of the lowest set bit and \p |
577 | /// MaskLen is updated to specify the length of the mask, else neither are |
578 | /// updated. |
579 | inline bool isShiftedMask_32(uint32_t Value, unsigned &MaskIdx, |
580 | unsigned &MaskLen) { |
581 | if (!isShiftedMask_32(Value)) |
582 | return false; |
583 | MaskIdx = countTrailingZeros(Value); |
584 | MaskLen = countPopulation(Value); |
585 | return true; |
586 | } |
587 | |
588 | /// Return true if the argument contains a non-empty sequence of ones with the |
589 | /// remainder zero (64 bit version.) If true, \p MaskIdx will specify the index |
590 | /// of the lowest set bit and \p MaskLen is updated to specify the length of the |
591 | /// mask, else neither are updated. |
592 | inline bool isShiftedMask_64(uint64_t Value, unsigned &MaskIdx, |
593 | unsigned &MaskLen) { |
594 | if (!isShiftedMask_64(Value)) |
595 | return false; |
596 | MaskIdx = countTrailingZeros(Value); |
597 | MaskLen = countPopulation(Value); |
598 | return true; |
599 | } |
600 | |
601 | /// Compile time Log2. |
602 | /// Valid only for positive powers of two. |
603 | template <size_t kValue> constexpr inline size_t CTLog2() { |
604 | static_assert(kValue > 0 && llvm::isPowerOf2_64(kValue), |
605 | "Value is not a valid power of 2"); |
606 | return 1 + CTLog2<kValue / 2>(); |
607 | } |
608 | |
609 | template <> constexpr inline size_t CTLog2<1>() { return 0; } |
610 | |
611 | /// Return the log base 2 of the specified value. |
612 | inline double Log2(double Value) { |
613 | #if defined(__ANDROID_API__) && __ANDROID_API__ < 18 |
614 | return __builtin_log(Value) / __builtin_log(2.0); |
615 | #else |
616 | return log2(Value); |
617 | #endif |
618 | } |
619 | |
620 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. |
621 | /// (32 bit edition.) |
622 | /// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 |
623 | inline unsigned Log2_32(uint32_t Value) { |
624 | return 31 - countLeadingZeros(Value); |
625 | } |
626 | |
627 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. |
628 | /// (64 bit edition.) |
629 | inline unsigned Log2_64(uint64_t Value) { |
630 | return 63 - countLeadingZeros(Value); |
631 | } |
632 | |
633 | /// Return the ceil log base 2 of the specified value, 32 if the value is zero. |
634 | /// (32 bit edition). |
635 | /// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 |
636 | inline unsigned Log2_32_Ceil(uint32_t Value) { |
637 | return 32 - countLeadingZeros(Value - 1); |
638 | } |
639 | |
640 | /// Return the ceil log base 2 of the specified value, 64 if the value is zero. |
641 | /// (64 bit edition.) |
642 | inline unsigned Log2_64_Ceil(uint64_t Value) { |
643 | return 64 - countLeadingZeros(Value - 1); |
644 | } |
645 | |
646 | /// Return the greatest common divisor of the values using Euclid's algorithm. |
647 | template <typename T> |
648 | inline T greatestCommonDivisor(T A, T B) { |
649 | while (B) { |
650 | T Tmp = B; |
651 | B = A % B; |
652 | A = Tmp; |
653 | } |
654 | return A; |
655 | } |
656 | |
657 | inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) { |
658 | return greatestCommonDivisor<uint64_t>(A, B); |
659 | } |
660 | |
661 | /// This function takes a 64-bit integer and returns the bit equivalent double. |
662 | inline double BitsToDouble(uint64_t Bits) { |
663 | double D; |
664 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); |
665 | memcpy(&D, &Bits, sizeof(Bits)); |
666 | return D; |
667 | } |
668 | |
669 | /// This function takes a 32-bit integer and returns the bit equivalent float. |
670 | inline float BitsToFloat(uint32_t Bits) { |
671 | float F; |
672 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); |
673 | memcpy(&F, &Bits, sizeof(Bits)); |
674 | return F; |
675 | } |
676 | |
677 | /// This function takes a double and returns the bit equivalent 64-bit integer. |
678 | /// Note that copying doubles around changes the bits of NaNs on some hosts, |
679 | /// notably x86, so this routine cannot be used if these bits are needed. |
680 | inline uint64_t DoubleToBits(double Double) { |
681 | uint64_t Bits; |
682 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); |
683 | memcpy(&Bits, &Double, sizeof(Double)); |
684 | return Bits; |
685 | } |
686 | |
687 | /// This function takes a float and returns the bit equivalent 32-bit integer. |
688 | /// Note that copying floats around changes the bits of NaNs on some hosts, |
689 | /// notably x86, so this routine cannot be used if these bits are needed. |
690 | inline uint32_t FloatToBits(float Float) { |
691 | uint32_t Bits; |
692 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); |
693 | memcpy(&Bits, &Float, sizeof(Float)); |
694 | return Bits; |
695 | } |
696 | |
697 | /// A and B are either alignments or offsets. Return the minimum alignment that |
698 | /// may be assumed after adding the two together. |
699 | constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) { |
700 | // The largest power of 2 that divides both A and B. |
701 | // |
702 | // Replace "-Value" by "1+~Value" in the following commented code to avoid |
703 | // MSVC warning C4146 |
704 | // return (A | B) & -(A | B); |
705 | return (A | B) & (1 + ~(A | B)); |
706 | } |
707 | |
708 | /// Returns the next power of two (in 64-bits) that is strictly greater than A. |
709 | /// Returns zero on overflow. |
710 | constexpr inline uint64_t NextPowerOf2(uint64_t A) { |
711 | A |= (A >> 1); |
712 | A |= (A >> 2); |
713 | A |= (A >> 4); |
714 | A |= (A >> 8); |
715 | A |= (A >> 16); |
716 | A |= (A >> 32); |
717 | return A + 1; |
718 | } |
719 | |
720 | /// Returns the power of two which is less than or equal to the given value. |
721 | /// Essentially, it is a floor operation across the domain of powers of two. |
722 | inline uint64_t PowerOf2Floor(uint64_t A) { |
723 | if (!A) return 0; |
724 | return 1ull << (63 - countLeadingZeros(A, ZB_Undefined)); |
725 | } |
726 | |
727 | /// Returns the power of two which is greater than or equal to the given value. |
728 | /// Essentially, it is a ceil operation across the domain of powers of two. |
729 | inline uint64_t PowerOf2Ceil(uint64_t A) { |
730 | if (!A) |
731 | return 0; |
732 | return NextPowerOf2(A - 1); |
733 | } |
734 | |
735 | /// Returns the next integer (mod 2**64) that is greater than or equal to |
736 | /// \p Value and is a multiple of \p Align. \p Align must be non-zero. |
737 | /// |
738 | /// If non-zero \p Skew is specified, the return value will be a minimal |
739 | /// integer that is greater than or equal to \p Value and equal to |
740 | /// \p Align * N + \p Skew for some integer N. If \p Skew is larger than |
741 | /// \p Align, its value is adjusted to '\p Skew mod \p Align'. |
742 | /// |
743 | /// Examples: |
744 | /// \code |
745 | /// alignTo(5, 8) = 8 |
746 | /// alignTo(17, 8) = 24 |
747 | /// alignTo(~0LL, 8) = 0 |
748 | /// alignTo(321, 255) = 510 |
749 | /// |
750 | /// alignTo(5, 8, 7) = 7 |
751 | /// alignTo(17, 8, 1) = 17 |
752 | /// alignTo(~0LL, 8, 3) = 3 |
753 | /// alignTo(321, 255, 42) = 552 |
754 | /// \endcode |
755 | inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { |
756 | assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0." ) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 756, __extension__ __PRETTY_FUNCTION__)); |
757 | Skew %= Align; |
758 | return (Value + Align - 1 - Skew) / Align * Align + Skew; |
759 | } |
760 | |
761 | /// Returns the next integer (mod 2**64) that is greater than or equal to |
762 | /// \p Value and is a multiple of \c Align. \c Align must be non-zero. |
763 | template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) { |
764 | static_assert(Align != 0u, "Align must be non-zero"); |
765 | return (Value + Align - 1) / Align * Align; |
766 | } |
767 | |
768 | /// Returns the integer ceil(Numerator / Denominator). |
769 | inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) { |
770 | return alignTo(Numerator, Denominator) / Denominator; |
771 | } |
772 | |
773 | /// Returns the integer nearest(Numerator / Denominator). |
774 | inline uint64_t divideNearest(uint64_t Numerator, uint64_t Denominator) { |
775 | return (Numerator + (Denominator / 2)) / Denominator; |
776 | } |
777 | |
778 | /// Returns the largest uint64_t less than or equal to \p Value and is |
779 | /// \p Skew mod \p Align. \p Align must be non-zero |
780 | inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { |
781 | assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0." ) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 781, __extension__ __PRETTY_FUNCTION__)); |
782 | Skew %= Align; |
783 | return (Value - Skew) / Align * Align + Skew; |
784 | } |
785 | |
786 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. |
787 | /// Requires 0 < B <= 32. |
788 | template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) { |
789 | static_assert(B > 0, "Bit width can't be 0."); |
790 | static_assert(B <= 32, "Bit width out of range."); |
791 | return int32_t(X << (32 - B)) >> (32 - B); |
792 | } |
793 | |
794 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. |
795 | /// Requires 0 < B <= 32. |
796 | inline int32_t SignExtend32(uint32_t X, unsigned B) { |
797 | assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0." ) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 797, __extension__ __PRETTY_FUNCTION__)); |
798 | assert(B <= 32 && "Bit width out of range.")(static_cast <bool> (B <= 32 && "Bit width out of range." ) ? void (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\"" , "llvm/include/llvm/Support/MathExtras.h", 798, __extension__ __PRETTY_FUNCTION__)); |
799 | return int32_t(X << (32 - B)) >> (32 - B); |
800 | } |
801 | |
802 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. |
803 | /// Requires 0 < B <= 64. |
804 | template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) { |
805 | static_assert(B > 0, "Bit width can't be 0."); |
806 | static_assert(B <= 64, "Bit width out of range."); |
807 | return int64_t(x << (64 - B)) >> (64 - B); |
808 | } |
809 | |
810 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. |
811 | /// Requires 0 < B <= 64. |
812 | inline int64_t SignExtend64(uint64_t X, unsigned B) { |
813 | assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0." ) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 813, __extension__ __PRETTY_FUNCTION__)); |
814 | assert(B <= 64 && "Bit width out of range.")(static_cast <bool> (B <= 64 && "Bit width out of range." ) ? void (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\"" , "llvm/include/llvm/Support/MathExtras.h", 814, __extension__ __PRETTY_FUNCTION__)); |
815 | return int64_t(X << (64 - B)) >> (64 - B); |
816 | } |
817 | |
818 | /// Subtract two unsigned integers, X and Y, of type T and return the absolute |
819 | /// value of the result. |
820 | template <typename T> |
821 | std::enable_if_t<std::is_unsigned<T>::value, T> AbsoluteDifference(T X, T Y) { |
822 | return X > Y ? (X - Y) : (Y - X); |
823 | } |
824 | |
825 | /// Add two unsigned integers, X and Y, of type T. Clamp the result to the |
826 | /// maximum representable value of T on overflow. ResultOverflowed indicates if |
827 | /// the result is larger than the maximum representable value of type T. |
828 | template <typename T> |
829 | std::enable_if_t<std::is_unsigned<T>::value, T> |
830 | SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) { |
831 | bool Dummy; |
832 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; |
833 | // Hacker's Delight, p. 29 |
834 | T Z = X + Y; |
835 | Overflowed = (Z < X || Z < Y); |
836 | if (Overflowed) |
837 | return std::numeric_limits<T>::max(); |
838 | else |
839 | return Z; |
840 | } |
841 | |
842 | /// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the |
843 | /// maximum representable value of T on overflow. ResultOverflowed indicates if |
844 | /// the result is larger than the maximum representable value of type T. |
845 | template <typename T> |
846 | std::enable_if_t<std::is_unsigned<T>::value, T> |
847 | SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) { |
848 | bool Dummy; |
849 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; |
850 | |
851 | // Hacker's Delight, p. 30 has a different algorithm, but we don't use that |
852 | // because it fails for uint16_t (where multiplication can have undefined |
853 | // behavior due to promotion to int), and requires a division in addition |
854 | // to the multiplication. |
855 | |
856 | Overflowed = false; |
857 | |
858 | // Log2(Z) would be either Log2Z or Log2Z + 1. |
859 | // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z |
860 | // will necessarily be less than Log2Max as desired. |
861 | int Log2Z = Log2_64(X) + Log2_64(Y); |
862 | const T Max = std::numeric_limits<T>::max(); |
863 | int Log2Max = Log2_64(Max); |
864 | if (Log2Z < Log2Max) { |
865 | return X * Y; |
866 | } |
867 | if (Log2Z > Log2Max) { |
868 | Overflowed = true; |
869 | return Max; |
870 | } |
871 | |
872 | // We're going to use the top bit, and maybe overflow one |
873 | // bit past it. Multiply all but the bottom bit then add |
874 | // that on at the end. |
875 | T Z = (X >> 1) * Y; |
876 | if (Z & ~(Max >> 1)) { |
877 | Overflowed = true; |
878 | return Max; |
879 | } |
880 | Z <<= 1; |
881 | if (X & 1) |
882 | return SaturatingAdd(Z, Y, ResultOverflowed); |
883 | |
884 | return Z; |
885 | } |
886 | |
887 | /// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to |
888 | /// the product. Clamp the result to the maximum representable value of T on |
889 | /// overflow. ResultOverflowed indicates if the result is larger than the |
890 | /// maximum representable value of type T. |
891 | template <typename T> |
892 | std::enable_if_t<std::is_unsigned<T>::value, T> |
893 | SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) { |
894 | bool Dummy; |
895 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; |
896 | |
897 | T Product = SaturatingMultiply(X, Y, &Overflowed); |
898 | if (Overflowed) |
899 | return Product; |
900 | |
901 | return SaturatingAdd(A, Product, &Overflowed); |
902 | } |
903 | |
904 | /// Use this rather than HUGE_VALF; the latter causes warnings on MSVC. |
905 | extern const float huge_valf; |
906 | |
907 | |
908 | /// Add two signed integers, computing the two's complement truncated result, |
909 | /// returning true if overflow occurred. |
910 | template <typename T> |
911 | std::enable_if_t<std::is_signed<T>::value, T> AddOverflow(T X, T Y, T &Result) { |
912 | #if __has_builtin(__builtin_add_overflow)1 |
913 | return __builtin_add_overflow(X, Y, &Result); |
914 | #else |
915 | // Perform the unsigned addition. |
916 | using U = std::make_unsigned_t<T>; |
917 | const U UX = static_cast<U>(X); |
918 | const U UY = static_cast<U>(Y); |
919 | const U UResult = UX + UY; |
920 | |
921 | // Convert to signed. |
922 | Result = static_cast<T>(UResult); |
923 | |
924 | // Adding two positive numbers should result in a positive number. |
925 | if (X > 0 && Y > 0) |
926 | return Result <= 0; |
927 | // Adding two negatives should result in a negative number. |
928 | if (X < 0 && Y < 0) |
929 | return Result >= 0; |
930 | return false; |
931 | #endif |
932 | } |
933 | |
934 | /// Subtract two signed integers, computing the two's complement truncated |
935 | /// result, returning true if an overflow ocurred. |
936 | template <typename T> |
937 | std::enable_if_t<std::is_signed<T>::value, T> SubOverflow(T X, T Y, T &Result) { |
938 | #if __has_builtin(__builtin_sub_overflow)1 |
939 | return __builtin_sub_overflow(X, Y, &Result); |
940 | #else |
941 | // Perform the unsigned addition. |
942 | using U = std::make_unsigned_t<T>; |
943 | const U UX = static_cast<U>(X); |
944 | const U UY = static_cast<U>(Y); |
945 | const U UResult = UX - UY; |
946 | |
947 | // Convert to signed. |
948 | Result = static_cast<T>(UResult); |
949 | |
950 | // Subtracting a positive number from a negative results in a negative number. |
951 | if (X <= 0 && Y > 0) |
952 | return Result >= 0; |
953 | // Subtracting a negative number from a positive results in a positive number. |
954 | if (X >= 0 && Y < 0) |
955 | return Result <= 0; |
956 | return false; |
957 | #endif |
958 | } |
959 | |
960 | /// Multiply two signed integers, computing the two's complement truncated |
961 | /// result, returning true if an overflow ocurred. |
962 | template <typename T> |
963 | std::enable_if_t<std::is_signed<T>::value, T> MulOverflow(T X, T Y, T &Result) { |
964 | // Perform the unsigned multiplication on absolute values. |
965 | using U = std::make_unsigned_t<T>; |
966 | const U UX = X < 0 ? (0 - static_cast<U>(X)) : static_cast<U>(X); |
967 | const U UY = Y < 0 ? (0 - static_cast<U>(Y)) : static_cast<U>(Y); |
968 | const U UResult = UX * UY; |
969 | |
970 | // Convert to signed. |
971 | const bool IsNegative = (X < 0) ^ (Y < 0); |
972 | Result = IsNegative ? (0 - UResult) : UResult; |
973 | |
974 | // If any of the args was 0, result is 0 and no overflow occurs. |
975 | if (UX == 0 || UY == 0) |
976 | return false; |
977 | |
978 | // UX and UY are in [1, 2^n], where n is the number of digits. |
979 | // Check how the max allowed absolute value (2^n for negative, 2^(n-1) for |
980 | // positive) divided by an argument compares to the other. |
981 | if (IsNegative) |
982 | return UX > (static_cast<U>(std::numeric_limits<T>::max()) + U(1)) / UY; |
983 | else |
984 | return UX > (static_cast<U>(std::numeric_limits<T>::max())) / UY; |
985 | } |
986 | |
987 | } // End llvm namespace |
988 | |
989 | #endif |