| File: | build/source/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp |
| Warning: | line 360, column 25 The result of the left shift is undefined due to shifting by '4294967295', which is greater or equal to the width of type 'int' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | //===- MipsLegalizerInfo.cpp ------------------------------------*- C++ -*-===// | |||
| 2 | // | |||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | |||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
| 6 | // | |||
| 7 | //===----------------------------------------------------------------------===// | |||
| 8 | /// \file | |||
| 9 | /// This file implements the targeting of the Machinelegalizer class for Mips. | |||
| 10 | /// \todo This should be generated by TableGen. | |||
| 11 | //===----------------------------------------------------------------------===// | |||
| 12 | ||||
| 13 | #include "MipsLegalizerInfo.h" | |||
| 14 | #include "MipsTargetMachine.h" | |||
| 15 | #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" | |||
| 16 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" | |||
| 17 | #include "llvm/IR/IntrinsicsMips.h" | |||
| 18 | ||||
| 19 | using namespace llvm; | |||
| 20 | ||||
| 21 | struct TypesAndMemOps { | |||
| 22 | LLT ValTy; | |||
| 23 | LLT PtrTy; | |||
| 24 | unsigned MemSize; | |||
| 25 | bool SystemSupportsUnalignedAccess; | |||
| 26 | }; | |||
| 27 | ||||
| 28 | // Assumes power of 2 memory size. Subtargets that have only naturally-aligned | |||
| 29 | // memory access need to perform additional legalization here. | |||
| 30 | static bool isUnalignedMemmoryAccess(uint64_t MemSize, uint64_t AlignInBits) { | |||
| 31 | assert(isPowerOf2_64(MemSize) && "Expected power of 2 memory size")(static_cast <bool> (isPowerOf2_64(MemSize) && "Expected power of 2 memory size" ) ? void (0) : __assert_fail ("isPowerOf2_64(MemSize) && \"Expected power of 2 memory size\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 31, __extension__ __PRETTY_FUNCTION__)); | |||
| 32 | assert(isPowerOf2_64(AlignInBits) && "Expected power of 2 align")(static_cast <bool> (isPowerOf2_64(AlignInBits) && "Expected power of 2 align") ? void (0) : __assert_fail ("isPowerOf2_64(AlignInBits) && \"Expected power of 2 align\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 32, __extension__ __PRETTY_FUNCTION__)); | |||
| 33 | if (MemSize > AlignInBits) | |||
| 34 | return true; | |||
| 35 | return false; | |||
| 36 | } | |||
| 37 | ||||
| 38 | static bool | |||
| 39 | CheckTy0Ty1MemSizeAlign(const LegalityQuery &Query, | |||
| 40 | std::initializer_list<TypesAndMemOps> SupportedValues) { | |||
| 41 | unsigned QueryMemSize = Query.MMODescrs[0].MemoryTy.getSizeInBits(); | |||
| 42 | ||||
| 43 | // Non power of two memory access is never legal. | |||
| 44 | if (!isPowerOf2_64(QueryMemSize)) | |||
| 45 | return false; | |||
| 46 | ||||
| 47 | for (auto &Val : SupportedValues) { | |||
| 48 | if (Val.ValTy != Query.Types[0]) | |||
| 49 | continue; | |||
| 50 | if (Val.PtrTy != Query.Types[1]) | |||
| 51 | continue; | |||
| 52 | if (Val.MemSize != QueryMemSize) | |||
| 53 | continue; | |||
| 54 | if (!Val.SystemSupportsUnalignedAccess && | |||
| 55 | isUnalignedMemmoryAccess(QueryMemSize, Query.MMODescrs[0].AlignInBits)) | |||
| 56 | return false; | |||
| 57 | return true; | |||
| 58 | } | |||
| 59 | return false; | |||
| 60 | } | |||
| 61 | ||||
| 62 | static bool CheckTyN(unsigned N, const LegalityQuery &Query, | |||
| 63 | std::initializer_list<LLT> SupportedValues) { | |||
| 64 | return llvm::is_contained(SupportedValues, Query.Types[N]); | |||
| 65 | } | |||
| 66 | ||||
| 67 | MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) { | |||
| 68 | using namespace TargetOpcode; | |||
| 69 | ||||
| 70 | const LLT s1 = LLT::scalar(1); | |||
| 71 | const LLT s8 = LLT::scalar(8); | |||
| 72 | const LLT s16 = LLT::scalar(16); | |||
| 73 | const LLT s32 = LLT::scalar(32); | |||
| 74 | const LLT s64 = LLT::scalar(64); | |||
| 75 | const LLT v16s8 = LLT::fixed_vector(16, 8); | |||
| 76 | const LLT v8s16 = LLT::fixed_vector(8, 16); | |||
| 77 | const LLT v4s32 = LLT::fixed_vector(4, 32); | |||
| 78 | const LLT v2s64 = LLT::fixed_vector(2, 64); | |||
| 79 | const LLT p0 = LLT::pointer(0, 32); | |||
| 80 | ||||
| 81 | getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL}) | |||
| 82 | .legalIf([=, &ST](const LegalityQuery &Query) { | |||
| 83 | if (CheckTyN(0, Query, {s32})) | |||
| 84 | return true; | |||
| 85 | if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64})) | |||
| 86 | return true; | |||
| 87 | return false; | |||
| 88 | }) | |||
| 89 | .clampScalar(0, s32, s32); | |||
| 90 | ||||
| 91 | getActionDefinitionsBuilder({G_UADDO, G_UADDE, G_USUBO, G_USUBE, G_UMULO}) | |||
| 92 | .lowerFor({{s32, s1}}); | |||
| 93 | ||||
| 94 | getActionDefinitionsBuilder(G_UMULH) | |||
| 95 | .legalFor({s32}) | |||
| 96 | .maxScalar(0, s32); | |||
| 97 | ||||
| 98 | // MIPS32r6 does not have alignment restrictions for memory access. | |||
| 99 | // For MIPS32r5 and older memory access must be naturally-aligned i.e. aligned | |||
| 100 | // to at least a multiple of its own size. There is however a two instruction | |||
| 101 | // combination that performs 4 byte unaligned access (lwr/lwl and swl/swr) | |||
| 102 | // therefore 4 byte load and store are legal and will use NoAlignRequirements. | |||
| 103 | bool NoAlignRequirements = true; | |||
| 104 | ||||
| 105 | getActionDefinitionsBuilder({G_LOAD, G_STORE}) | |||
| 106 | .legalIf([=, &ST](const LegalityQuery &Query) { | |||
| 107 | if (CheckTy0Ty1MemSizeAlign( | |||
| 108 | Query, {{s32, p0, 8, NoAlignRequirements}, | |||
| 109 | {s32, p0, 16, ST.systemSupportsUnalignedAccess()}, | |||
| 110 | {s32, p0, 32, NoAlignRequirements}, | |||
| 111 | {p0, p0, 32, NoAlignRequirements}, | |||
| 112 | {s64, p0, 64, ST.systemSupportsUnalignedAccess()}})) | |||
| 113 | return true; | |||
| 114 | if (ST.hasMSA() && CheckTy0Ty1MemSizeAlign( | |||
| 115 | Query, {{v16s8, p0, 128, NoAlignRequirements}, | |||
| 116 | {v8s16, p0, 128, NoAlignRequirements}, | |||
| 117 | {v4s32, p0, 128, NoAlignRequirements}, | |||
| 118 | {v2s64, p0, 128, NoAlignRequirements}})) | |||
| 119 | return true; | |||
| 120 | return false; | |||
| 121 | }) | |||
| 122 | // Custom lower scalar memory access, up to 8 bytes, for: | |||
| 123 | // - non-power-of-2 MemSizes | |||
| 124 | // - unaligned 2 or 8 byte MemSizes for MIPS32r5 and older | |||
| 125 | .customIf([=, &ST](const LegalityQuery &Query) { | |||
| 126 | if (!Query.Types[0].isScalar() || Query.Types[1] != p0 || | |||
| 127 | Query.Types[0] == s1) | |||
| 128 | return false; | |||
| 129 | ||||
| 130 | unsigned Size = Query.Types[0].getSizeInBits(); | |||
| 131 | unsigned QueryMemSize = Query.MMODescrs[0].MemoryTy.getSizeInBits(); | |||
| 132 | assert(QueryMemSize <= Size && "Scalar can't hold MemSize")(static_cast <bool> (QueryMemSize <= Size && "Scalar can't hold MemSize") ? void (0) : __assert_fail ("QueryMemSize <= Size && \"Scalar can't hold MemSize\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 132, __extension__ __PRETTY_FUNCTION__)); | |||
| 133 | ||||
| 134 | if (Size > 64 || QueryMemSize > 64) | |||
| 135 | return false; | |||
| 136 | ||||
| 137 | if (!isPowerOf2_64(Query.MMODescrs[0].MemoryTy.getSizeInBits())) | |||
| 138 | return true; | |||
| 139 | ||||
| 140 | if (!ST.systemSupportsUnalignedAccess() && | |||
| 141 | isUnalignedMemmoryAccess(QueryMemSize, | |||
| 142 | Query.MMODescrs[0].AlignInBits)) { | |||
| 143 | assert(QueryMemSize != 32 && "4 byte load and store are legal")(static_cast <bool> (QueryMemSize != 32 && "4 byte load and store are legal" ) ? void (0) : __assert_fail ("QueryMemSize != 32 && \"4 byte load and store are legal\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 143, __extension__ __PRETTY_FUNCTION__)); | |||
| 144 | return true; | |||
| 145 | } | |||
| 146 | ||||
| 147 | return false; | |||
| 148 | }) | |||
| 149 | .minScalar(0, s32) | |||
| 150 | .lower(); | |||
| 151 | ||||
| 152 | getActionDefinitionsBuilder(G_IMPLICIT_DEF) | |||
| 153 | .legalFor({s32, s64}); | |||
| 154 | ||||
| 155 | getActionDefinitionsBuilder(G_UNMERGE_VALUES) | |||
| 156 | .legalFor({{s32, s64}}); | |||
| 157 | ||||
| 158 | getActionDefinitionsBuilder(G_MERGE_VALUES) | |||
| 159 | .legalFor({{s64, s32}}); | |||
| 160 | ||||
| 161 | getActionDefinitionsBuilder({G_ZEXTLOAD, G_SEXTLOAD}) | |||
| 162 | .legalForTypesWithMemDesc({{s32, p0, s8, 8}, | |||
| 163 | {s32, p0, s16, 8}}) | |||
| 164 | .clampScalar(0, s32, s32); | |||
| 165 | ||||
| 166 | getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}) | |||
| 167 | .legalIf([](const LegalityQuery &Query) { return false; }) | |||
| 168 | .maxScalar(0, s32); | |||
| 169 | ||||
| 170 | getActionDefinitionsBuilder(G_TRUNC) | |||
| 171 | .legalIf([](const LegalityQuery &Query) { return false; }) | |||
| 172 | .maxScalar(1, s32); | |||
| 173 | ||||
| 174 | getActionDefinitionsBuilder(G_SELECT) | |||
| 175 | .legalForCartesianProduct({p0, s32, s64}, {s32}) | |||
| 176 | .minScalar(0, s32) | |||
| 177 | .minScalar(1, s32); | |||
| 178 | ||||
| 179 | getActionDefinitionsBuilder(G_BRCOND) | |||
| 180 | .legalFor({s32}) | |||
| 181 | .minScalar(0, s32); | |||
| 182 | ||||
| 183 | getActionDefinitionsBuilder(G_BRJT) | |||
| 184 | .legalFor({{p0, s32}}); | |||
| 185 | ||||
| 186 | getActionDefinitionsBuilder(G_BRINDIRECT) | |||
| 187 | .legalFor({p0}); | |||
| 188 | ||||
| 189 | getActionDefinitionsBuilder(G_PHI) | |||
| 190 | .legalFor({p0, s32, s64}) | |||
| 191 | .minScalar(0, s32); | |||
| 192 | ||||
| 193 | getActionDefinitionsBuilder({G_AND, G_OR, G_XOR}) | |||
| 194 | .legalFor({s32}) | |||
| 195 | .clampScalar(0, s32, s32); | |||
| 196 | ||||
| 197 | getActionDefinitionsBuilder({G_SDIV, G_SREM, G_UDIV, G_UREM}) | |||
| 198 | .legalIf([=, &ST](const LegalityQuery &Query) { | |||
| 199 | if (CheckTyN(0, Query, {s32})) | |||
| 200 | return true; | |||
| 201 | if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64})) | |||
| 202 | return true; | |||
| 203 | return false; | |||
| 204 | }) | |||
| 205 | .minScalar(0, s32) | |||
| 206 | .libcallFor({s64}); | |||
| 207 | ||||
| 208 | getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR}) | |||
| 209 | .legalFor({{s32, s32}}) | |||
| 210 | .clampScalar(1, s32, s32) | |||
| 211 | .clampScalar(0, s32, s32); | |||
| 212 | ||||
| 213 | getActionDefinitionsBuilder(G_ICMP) | |||
| 214 | .legalForCartesianProduct({s32}, {s32, p0}) | |||
| 215 | .clampScalar(1, s32, s32) | |||
| 216 | .minScalar(0, s32); | |||
| 217 | ||||
| 218 | getActionDefinitionsBuilder(G_CONSTANT) | |||
| 219 | .legalFor({s32}) | |||
| 220 | .clampScalar(0, s32, s32); | |||
| 221 | ||||
| 222 | getActionDefinitionsBuilder({G_PTR_ADD, G_INTTOPTR}) | |||
| 223 | .legalFor({{p0, s32}}); | |||
| 224 | ||||
| 225 | getActionDefinitionsBuilder(G_PTRTOINT) | |||
| 226 | .legalFor({{s32, p0}}); | |||
| 227 | ||||
| 228 | getActionDefinitionsBuilder(G_FRAME_INDEX) | |||
| 229 | .legalFor({p0}); | |||
| 230 | ||||
| 231 | getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE}) | |||
| 232 | .legalFor({p0}); | |||
| 233 | ||||
| 234 | getActionDefinitionsBuilder(G_DYN_STACKALLOC) | |||
| 235 | .lowerFor({{p0, s32}}); | |||
| 236 | ||||
| 237 | getActionDefinitionsBuilder(G_VASTART) | |||
| 238 | .legalFor({p0}); | |||
| 239 | ||||
| 240 | getActionDefinitionsBuilder(G_BSWAP) | |||
| 241 | .legalIf([=, &ST](const LegalityQuery &Query) { | |||
| 242 | if (ST.hasMips32r2() && CheckTyN(0, Query, {s32})) | |||
| 243 | return true; | |||
| 244 | return false; | |||
| 245 | }) | |||
| 246 | .lowerIf([=, &ST](const LegalityQuery &Query) { | |||
| 247 | if (!ST.hasMips32r2() && CheckTyN(0, Query, {s32})) | |||
| 248 | return true; | |||
| 249 | return false; | |||
| 250 | }) | |||
| 251 | .maxScalar(0, s32); | |||
| 252 | ||||
| 253 | getActionDefinitionsBuilder(G_BITREVERSE) | |||
| 254 | .lowerFor({s32}) | |||
| 255 | .maxScalar(0, s32); | |||
| 256 | ||||
| 257 | getActionDefinitionsBuilder(G_CTLZ) | |||
| 258 | .legalFor({{s32, s32}}) | |||
| 259 | .maxScalar(0, s32) | |||
| 260 | .maxScalar(1, s32); | |||
| 261 | getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF) | |||
| 262 | .lowerFor({{s32, s32}}); | |||
| 263 | ||||
| 264 | getActionDefinitionsBuilder(G_CTTZ) | |||
| 265 | .lowerFor({{s32, s32}}) | |||
| 266 | .maxScalar(0, s32) | |||
| 267 | .maxScalar(1, s32); | |||
| 268 | getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF) | |||
| 269 | .lowerFor({{s32, s32}, {s64, s64}}); | |||
| 270 | ||||
| 271 | getActionDefinitionsBuilder(G_CTPOP) | |||
| 272 | .lowerFor({{s32, s32}}) | |||
| 273 | .clampScalar(0, s32, s32) | |||
| 274 | .clampScalar(1, s32, s32); | |||
| 275 | ||||
| 276 | // FP instructions | |||
| 277 | getActionDefinitionsBuilder(G_FCONSTANT) | |||
| 278 | .legalFor({s32, s64}); | |||
| 279 | ||||
| 280 | getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FABS, G_FSQRT}) | |||
| 281 | .legalIf([=, &ST](const LegalityQuery &Query) { | |||
| 282 | if (CheckTyN(0, Query, {s32, s64})) | |||
| 283 | return true; | |||
| 284 | if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64})) | |||
| 285 | return true; | |||
| 286 | return false; | |||
| 287 | }); | |||
| 288 | ||||
| 289 | getActionDefinitionsBuilder(G_FCMP) | |||
| 290 | .legalFor({{s32, s32}, {s32, s64}}) | |||
| 291 | .minScalar(0, s32); | |||
| 292 | ||||
| 293 | getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR}) | |||
| 294 | .libcallFor({s32, s64}); | |||
| 295 | ||||
| 296 | getActionDefinitionsBuilder(G_FPEXT) | |||
| 297 | .legalFor({{s64, s32}}); | |||
| 298 | ||||
| 299 | getActionDefinitionsBuilder(G_FPTRUNC) | |||
| 300 | .legalFor({{s32, s64}}); | |||
| 301 | ||||
| 302 | // FP to int conversion instructions | |||
| 303 | getActionDefinitionsBuilder(G_FPTOSI) | |||
| 304 | .legalForCartesianProduct({s32}, {s64, s32}) | |||
| 305 | .libcallForCartesianProduct({s64}, {s64, s32}) | |||
| 306 | .minScalar(0, s32); | |||
| 307 | ||||
| 308 | getActionDefinitionsBuilder(G_FPTOUI) | |||
| 309 | .libcallForCartesianProduct({s64}, {s64, s32}) | |||
| 310 | .lowerForCartesianProduct({s32}, {s64, s32}) | |||
| 311 | .minScalar(0, s32); | |||
| 312 | ||||
| 313 | // Int to FP conversion instructions | |||
| 314 | getActionDefinitionsBuilder(G_SITOFP) | |||
| 315 | .legalForCartesianProduct({s64, s32}, {s32}) | |||
| 316 | .libcallForCartesianProduct({s64, s32}, {s64}) | |||
| 317 | .minScalar(1, s32); | |||
| 318 | ||||
| 319 | getActionDefinitionsBuilder(G_UITOFP) | |||
| 320 | .libcallForCartesianProduct({s64, s32}, {s64}) | |||
| 321 | .customForCartesianProduct({s64, s32}, {s32}) | |||
| 322 | .minScalar(1, s32); | |||
| 323 | ||||
| 324 | getActionDefinitionsBuilder(G_SEXT_INREG).lower(); | |||
| 325 | ||||
| 326 | getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall(); | |||
| 327 | ||||
| 328 | getLegacyLegalizerInfo().computeTables(); | |||
| 329 | verify(*ST.getInstrInfo()); | |||
| 330 | } | |||
| 331 | ||||
| 332 | bool MipsLegalizerInfo::legalizeCustom(LegalizerHelper &Helper, | |||
| 333 | MachineInstr &MI) const { | |||
| 334 | using namespace TargetOpcode; | |||
| 335 | ||||
| 336 | MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; | |||
| 337 | MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); | |||
| 338 | ||||
| 339 | const LLT s32 = LLT::scalar(32); | |||
| 340 | const LLT s64 = LLT::scalar(64); | |||
| 341 | ||||
| 342 | switch (MI.getOpcode()) { | |||
| ||||
| 343 | case G_LOAD: | |||
| 344 | case G_STORE: { | |||
| 345 | unsigned MemSize = (**MI.memoperands_begin()).getSize(); | |||
| 346 | Register Val = MI.getOperand(0).getReg(); | |||
| 347 | unsigned Size = MRI.getType(Val).getSizeInBits(); | |||
| 348 | ||||
| 349 | MachineMemOperand *MMOBase = *MI.memoperands_begin(); | |||
| 350 | ||||
| 351 | assert(MemSize <= 8 && "MemSize is too large")(static_cast <bool> (MemSize <= 8 && "MemSize is too large" ) ? void (0) : __assert_fail ("MemSize <= 8 && \"MemSize is too large\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 351, __extension__ __PRETTY_FUNCTION__)); | |||
| 352 | assert(Size <= 64 && "Scalar size is too large")(static_cast <bool> (Size <= 64 && "Scalar size is too large" ) ? void (0) : __assert_fail ("Size <= 64 && \"Scalar size is too large\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 352, __extension__ __PRETTY_FUNCTION__)); | |||
| 353 | ||||
| 354 | // Split MemSize into two, P2HalfMemSize is largest power of two smaller | |||
| 355 | // then MemSize. e.g. 8 = 4 + 4 , 6 = 4 + 2, 3 = 2 + 1. | |||
| 356 | unsigned P2HalfMemSize, RemMemSize; | |||
| 357 | if (isPowerOf2_64(MemSize)) { | |||
| 358 | P2HalfMemSize = RemMemSize = MemSize / 2; | |||
| 359 | } else { | |||
| 360 | P2HalfMemSize = 1 << Log2_32(MemSize); | |||
| ||||
| 361 | RemMemSize = MemSize - P2HalfMemSize; | |||
| 362 | } | |||
| 363 | ||||
| 364 | Register BaseAddr = MI.getOperand(1).getReg(); | |||
| 365 | LLT PtrTy = MRI.getType(BaseAddr); | |||
| 366 | MachineFunction &MF = MIRBuilder.getMF(); | |||
| 367 | ||||
| 368 | auto P2HalfMemOp = MF.getMachineMemOperand(MMOBase, 0, P2HalfMemSize); | |||
| 369 | auto RemMemOp = MF.getMachineMemOperand(MMOBase, P2HalfMemSize, RemMemSize); | |||
| 370 | ||||
| 371 | if (MI.getOpcode() == G_STORE) { | |||
| 372 | // Widen Val to s32 or s64 in order to create legal G_LSHR or G_UNMERGE. | |||
| 373 | if (Size < 32) | |||
| 374 | Val = MIRBuilder.buildAnyExt(s32, Val).getReg(0); | |||
| 375 | if (Size > 32 && Size < 64) | |||
| 376 | Val = MIRBuilder.buildAnyExt(s64, Val).getReg(0); | |||
| 377 | ||||
| 378 | auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize); | |||
| 379 | auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize); | |||
| 380 | ||||
| 381 | if (MI.getOpcode() == G_STORE && MemSize <= 4) { | |||
| 382 | MIRBuilder.buildStore(Val, BaseAddr, *P2HalfMemOp); | |||
| 383 | auto C_P2Half_InBits = MIRBuilder.buildConstant(s32, P2HalfMemSize * 8); | |||
| 384 | auto Shift = MIRBuilder.buildLShr(s32, Val, C_P2Half_InBits); | |||
| 385 | MIRBuilder.buildStore(Shift, Addr, *RemMemOp); | |||
| 386 | } else { | |||
| 387 | auto Unmerge = MIRBuilder.buildUnmerge(s32, Val); | |||
| 388 | MIRBuilder.buildStore(Unmerge.getReg(0), BaseAddr, *P2HalfMemOp); | |||
| 389 | MIRBuilder.buildStore(Unmerge.getReg(1), Addr, *RemMemOp); | |||
| 390 | } | |||
| 391 | } | |||
| 392 | ||||
| 393 | if (MI.getOpcode() == G_LOAD) { | |||
| 394 | ||||
| 395 | if (MemSize <= 4) { | |||
| 396 | // This is anyextending load, use 4 byte lwr/lwl. | |||
| 397 | auto *Load4MMO = MF.getMachineMemOperand(MMOBase, 0, 4); | |||
| 398 | ||||
| 399 | if (Size == 32) | |||
| 400 | MIRBuilder.buildLoad(Val, BaseAddr, *Load4MMO); | |||
| 401 | else { | |||
| 402 | auto Load = MIRBuilder.buildLoad(s32, BaseAddr, *Load4MMO); | |||
| 403 | MIRBuilder.buildTrunc(Val, Load.getReg(0)); | |||
| 404 | } | |||
| 405 | ||||
| 406 | } else { | |||
| 407 | auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize); | |||
| 408 | auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize); | |||
| 409 | ||||
| 410 | auto Load_P2Half = MIRBuilder.buildLoad(s32, BaseAddr, *P2HalfMemOp); | |||
| 411 | auto Load_Rem = MIRBuilder.buildLoad(s32, Addr, *RemMemOp); | |||
| 412 | ||||
| 413 | if (Size == 64) | |||
| 414 | MIRBuilder.buildMergeLikeInstr(Val, {Load_P2Half, Load_Rem}); | |||
| 415 | else { | |||
| 416 | auto Merge = | |||
| 417 | MIRBuilder.buildMergeLikeInstr(s64, {Load_P2Half, Load_Rem}); | |||
| 418 | MIRBuilder.buildTrunc(Val, Merge); | |||
| 419 | } | |||
| 420 | } | |||
| 421 | } | |||
| 422 | MI.eraseFromParent(); | |||
| 423 | break; | |||
| 424 | } | |||
| 425 | case G_UITOFP: { | |||
| 426 | Register Dst = MI.getOperand(0).getReg(); | |||
| 427 | Register Src = MI.getOperand(1).getReg(); | |||
| 428 | LLT DstTy = MRI.getType(Dst); | |||
| 429 | LLT SrcTy = MRI.getType(Src); | |||
| 430 | ||||
| 431 | if (SrcTy != s32) | |||
| 432 | return false; | |||
| 433 | if (DstTy != s32 && DstTy != s64) | |||
| 434 | return false; | |||
| 435 | ||||
| 436 | // Let 0xABCDEFGH be given unsigned in MI.getOperand(1). First let's convert | |||
| 437 | // unsigned to double. Mantissa has 52 bits so we use following trick: | |||
| 438 | // First make floating point bit mask 0x43300000ABCDEFGH. | |||
| 439 | // Mask represents 2^52 * 0x1.00000ABCDEFGH i.e. 0x100000ABCDEFGH.0 . | |||
| 440 | // Next, subtract 2^52 * 0x1.0000000000000 i.e. 0x10000000000000.0 from it. | |||
| 441 | // Done. Trunc double to float if needed. | |||
| 442 | ||||
| 443 | auto C_HiMask = MIRBuilder.buildConstant(s32, UINT32_C(0x43300000)0x43300000U); | |||
| 444 | auto Bitcast = | |||
| 445 | MIRBuilder.buildMergeLikeInstr(s64, {Src, C_HiMask.getReg(0)}); | |||
| 446 | ||||
| 447 | MachineInstrBuilder TwoP52FP = MIRBuilder.buildFConstant( | |||
| 448 | s64, llvm::bit_cast<double>(UINT64_C(0x4330000000000000)0x4330000000000000UL)); | |||
| 449 | ||||
| 450 | if (DstTy == s64) | |||
| 451 | MIRBuilder.buildFSub(Dst, Bitcast, TwoP52FP); | |||
| 452 | else { | |||
| 453 | MachineInstrBuilder ResF64 = MIRBuilder.buildFSub(s64, Bitcast, TwoP52FP); | |||
| 454 | MIRBuilder.buildFPTrunc(Dst, ResF64); | |||
| 455 | } | |||
| 456 | ||||
| 457 | MI.eraseFromParent(); | |||
| 458 | break; | |||
| 459 | } | |||
| 460 | default: | |||
| 461 | return false; | |||
| 462 | } | |||
| 463 | ||||
| 464 | return true; | |||
| 465 | } | |||
| 466 | ||||
| 467 | static bool SelectMSA3OpIntrinsic(MachineInstr &MI, unsigned Opcode, | |||
| 468 | MachineIRBuilder &MIRBuilder, | |||
| 469 | const MipsSubtarget &ST) { | |||
| 470 | assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.")(static_cast <bool> (ST.hasMSA() && "MSA intrinsic not supported on target without MSA." ) ? void (0) : __assert_fail ("ST.hasMSA() && \"MSA intrinsic not supported on target without MSA.\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 470, __extension__ __PRETTY_FUNCTION__)); | |||
| 471 | if (!MIRBuilder.buildInstr(Opcode) | |||
| 472 | .add(MI.getOperand(0)) | |||
| 473 | .add(MI.getOperand(2)) | |||
| 474 | .add(MI.getOperand(3)) | |||
| 475 | .constrainAllUses(MIRBuilder.getTII(), *ST.getRegisterInfo(), | |||
| 476 | *ST.getRegBankInfo())) | |||
| 477 | return false; | |||
| 478 | MI.eraseFromParent(); | |||
| 479 | return true; | |||
| 480 | } | |||
| 481 | ||||
| 482 | static bool MSA3OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode, | |||
| 483 | MachineIRBuilder &MIRBuilder, | |||
| 484 | const MipsSubtarget &ST) { | |||
| 485 | assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.")(static_cast <bool> (ST.hasMSA() && "MSA intrinsic not supported on target without MSA." ) ? void (0) : __assert_fail ("ST.hasMSA() && \"MSA intrinsic not supported on target without MSA.\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 485, __extension__ __PRETTY_FUNCTION__)); | |||
| 486 | MIRBuilder.buildInstr(Opcode) | |||
| 487 | .add(MI.getOperand(0)) | |||
| 488 | .add(MI.getOperand(2)) | |||
| 489 | .add(MI.getOperand(3)); | |||
| 490 | MI.eraseFromParent(); | |||
| 491 | return true; | |||
| 492 | } | |||
| 493 | ||||
| 494 | static bool MSA2OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode, | |||
| 495 | MachineIRBuilder &MIRBuilder, | |||
| 496 | const MipsSubtarget &ST) { | |||
| 497 | assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.")(static_cast <bool> (ST.hasMSA() && "MSA intrinsic not supported on target without MSA." ) ? void (0) : __assert_fail ("ST.hasMSA() && \"MSA intrinsic not supported on target without MSA.\"" , "llvm/lib/Target/Mips/MipsLegalizerInfo.cpp", 497, __extension__ __PRETTY_FUNCTION__)); | |||
| 498 | MIRBuilder.buildInstr(Opcode) | |||
| 499 | .add(MI.getOperand(0)) | |||
| 500 | .add(MI.getOperand(2)); | |||
| 501 | MI.eraseFromParent(); | |||
| 502 | return true; | |||
| 503 | } | |||
| 504 | ||||
| 505 | bool MipsLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, | |||
| 506 | MachineInstr &MI) const { | |||
| 507 | MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; | |||
| 508 | const MipsSubtarget &ST = MI.getMF()->getSubtarget<MipsSubtarget>(); | |||
| 509 | const MipsInstrInfo &TII = *ST.getInstrInfo(); | |||
| 510 | const MipsRegisterInfo &TRI = *ST.getRegisterInfo(); | |||
| 511 | const RegisterBankInfo &RBI = *ST.getRegBankInfo(); | |||
| 512 | ||||
| 513 | switch (MI.getIntrinsicID()) { | |||
| 514 | case Intrinsic::trap: { | |||
| 515 | MachineInstr *Trap = MIRBuilder.buildInstr(Mips::TRAP); | |||
| 516 | MI.eraseFromParent(); | |||
| 517 | return constrainSelectedInstRegOperands(*Trap, TII, TRI, RBI); | |||
| 518 | } | |||
| 519 | case Intrinsic::vacopy: { | |||
| 520 | MachinePointerInfo MPO; | |||
| 521 | LLT PtrTy = LLT::pointer(0, 32); | |||
| 522 | auto Tmp = | |||
| 523 | MIRBuilder.buildLoad(PtrTy, MI.getOperand(2), | |||
| 524 | *MI.getMF()->getMachineMemOperand( | |||
| 525 | MPO, MachineMemOperand::MOLoad, PtrTy, Align(4))); | |||
| 526 | MIRBuilder.buildStore(Tmp, MI.getOperand(1), | |||
| 527 | *MI.getMF()->getMachineMemOperand( | |||
| 528 | MPO, MachineMemOperand::MOStore, PtrTy, Align(4))); | |||
| 529 | MI.eraseFromParent(); | |||
| 530 | return true; | |||
| 531 | } | |||
| 532 | case Intrinsic::mips_addv_b: | |||
| 533 | case Intrinsic::mips_addv_h: | |||
| 534 | case Intrinsic::mips_addv_w: | |||
| 535 | case Intrinsic::mips_addv_d: | |||
| 536 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_ADD, MIRBuilder, ST); | |||
| 537 | case Intrinsic::mips_addvi_b: | |||
| 538 | return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_B, MIRBuilder, ST); | |||
| 539 | case Intrinsic::mips_addvi_h: | |||
| 540 | return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_H, MIRBuilder, ST); | |||
| 541 | case Intrinsic::mips_addvi_w: | |||
| 542 | return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_W, MIRBuilder, ST); | |||
| 543 | case Intrinsic::mips_addvi_d: | |||
| 544 | return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_D, MIRBuilder, ST); | |||
| 545 | case Intrinsic::mips_subv_b: | |||
| 546 | case Intrinsic::mips_subv_h: | |||
| 547 | case Intrinsic::mips_subv_w: | |||
| 548 | case Intrinsic::mips_subv_d: | |||
| 549 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SUB, MIRBuilder, ST); | |||
| 550 | case Intrinsic::mips_subvi_b: | |||
| 551 | return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_B, MIRBuilder, ST); | |||
| 552 | case Intrinsic::mips_subvi_h: | |||
| 553 | return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_H, MIRBuilder, ST); | |||
| 554 | case Intrinsic::mips_subvi_w: | |||
| 555 | return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_W, MIRBuilder, ST); | |||
| 556 | case Intrinsic::mips_subvi_d: | |||
| 557 | return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_D, MIRBuilder, ST); | |||
| 558 | case Intrinsic::mips_mulv_b: | |||
| 559 | case Intrinsic::mips_mulv_h: | |||
| 560 | case Intrinsic::mips_mulv_w: | |||
| 561 | case Intrinsic::mips_mulv_d: | |||
| 562 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_MUL, MIRBuilder, ST); | |||
| 563 | case Intrinsic::mips_div_s_b: | |||
| 564 | case Intrinsic::mips_div_s_h: | |||
| 565 | case Intrinsic::mips_div_s_w: | |||
| 566 | case Intrinsic::mips_div_s_d: | |||
| 567 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SDIV, MIRBuilder, ST); | |||
| 568 | case Intrinsic::mips_mod_s_b: | |||
| 569 | case Intrinsic::mips_mod_s_h: | |||
| 570 | case Intrinsic::mips_mod_s_w: | |||
| 571 | case Intrinsic::mips_mod_s_d: | |||
| 572 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SREM, MIRBuilder, ST); | |||
| 573 | case Intrinsic::mips_div_u_b: | |||
| 574 | case Intrinsic::mips_div_u_h: | |||
| 575 | case Intrinsic::mips_div_u_w: | |||
| 576 | case Intrinsic::mips_div_u_d: | |||
| 577 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UDIV, MIRBuilder, ST); | |||
| 578 | case Intrinsic::mips_mod_u_b: | |||
| 579 | case Intrinsic::mips_mod_u_h: | |||
| 580 | case Intrinsic::mips_mod_u_w: | |||
| 581 | case Intrinsic::mips_mod_u_d: | |||
| 582 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UREM, MIRBuilder, ST); | |||
| 583 | case Intrinsic::mips_fadd_w: | |||
| 584 | case Intrinsic::mips_fadd_d: | |||
| 585 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FADD, MIRBuilder, ST); | |||
| 586 | case Intrinsic::mips_fsub_w: | |||
| 587 | case Intrinsic::mips_fsub_d: | |||
| 588 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FSUB, MIRBuilder, ST); | |||
| 589 | case Intrinsic::mips_fmul_w: | |||
| 590 | case Intrinsic::mips_fmul_d: | |||
| 591 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FMUL, MIRBuilder, ST); | |||
| 592 | case Intrinsic::mips_fdiv_w: | |||
| 593 | case Intrinsic::mips_fdiv_d: | |||
| 594 | return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FDIV, MIRBuilder, ST); | |||
| 595 | case Intrinsic::mips_fmax_a_w: | |||
| 596 | return SelectMSA3OpIntrinsic(MI, Mips::FMAX_A_W, MIRBuilder, ST); | |||
| 597 | case Intrinsic::mips_fmax_a_d: | |||
| 598 | return SelectMSA3OpIntrinsic(MI, Mips::FMAX_A_D, MIRBuilder, ST); | |||
| 599 | case Intrinsic::mips_fsqrt_w: | |||
| 600 | return MSA2OpIntrinsicToGeneric(MI, TargetOpcode::G_FSQRT, MIRBuilder, ST); | |||
| 601 | case Intrinsic::mips_fsqrt_d: | |||
| 602 | return MSA2OpIntrinsicToGeneric(MI, TargetOpcode::G_FSQRT, MIRBuilder, ST); | |||
| 603 | default: | |||
| 604 | break; | |||
| 605 | } | |||
| 606 | return true; | |||
| 607 | } |
| 1 | //===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file contains some functions that are useful for math stuff. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #ifndef LLVM_SUPPORT_MATHEXTRAS_H |
| 14 | #define LLVM_SUPPORT_MATHEXTRAS_H |
| 15 | |
| 16 | #include "llvm/ADT/bit.h" |
| 17 | #include "llvm/Support/Compiler.h" |
| 18 | #include <cassert> |
| 19 | #include <climits> |
| 20 | #include <cstdint> |
| 21 | #include <cstring> |
| 22 | #include <limits> |
| 23 | #include <type_traits> |
| 24 | |
| 25 | namespace llvm { |
| 26 | |
| 27 | /// Mathematical constants. |
| 28 | namespace numbers { |
| 29 | // TODO: Track C++20 std::numbers. |
| 30 | // TODO: Favor using the hexadecimal FP constants (requires C++17). |
| 31 | constexpr double e = 2.7182818284590452354, // (0x1.5bf0a8b145749P+1) https://oeis.org/A001113 |
| 32 | egamma = .57721566490153286061, // (0x1.2788cfc6fb619P-1) https://oeis.org/A001620 |
| 33 | ln2 = .69314718055994530942, // (0x1.62e42fefa39efP-1) https://oeis.org/A002162 |
| 34 | ln10 = 2.3025850929940456840, // (0x1.24bb1bbb55516P+1) https://oeis.org/A002392 |
| 35 | log2e = 1.4426950408889634074, // (0x1.71547652b82feP+0) |
| 36 | log10e = .43429448190325182765, // (0x1.bcb7b1526e50eP-2) |
| 37 | pi = 3.1415926535897932385, // (0x1.921fb54442d18P+1) https://oeis.org/A000796 |
| 38 | inv_pi = .31830988618379067154, // (0x1.45f306bc9c883P-2) https://oeis.org/A049541 |
| 39 | sqrtpi = 1.7724538509055160273, // (0x1.c5bf891b4ef6bP+0) https://oeis.org/A002161 |
| 40 | inv_sqrtpi = .56418958354775628695, // (0x1.20dd750429b6dP-1) https://oeis.org/A087197 |
| 41 | sqrt2 = 1.4142135623730950488, // (0x1.6a09e667f3bcdP+0) https://oeis.org/A00219 |
| 42 | inv_sqrt2 = .70710678118654752440, // (0x1.6a09e667f3bcdP-1) |
| 43 | sqrt3 = 1.7320508075688772935, // (0x1.bb67ae8584caaP+0) https://oeis.org/A002194 |
| 44 | inv_sqrt3 = .57735026918962576451, // (0x1.279a74590331cP-1) |
| 45 | phi = 1.6180339887498948482; // (0x1.9e3779b97f4a8P+0) https://oeis.org/A001622 |
| 46 | constexpr float ef = 2.71828183F, // (0x1.5bf0a8P+1) https://oeis.org/A001113 |
| 47 | egammaf = .577215665F, // (0x1.2788d0P-1) https://oeis.org/A001620 |
| 48 | ln2f = .693147181F, // (0x1.62e430P-1) https://oeis.org/A002162 |
| 49 | ln10f = 2.30258509F, // (0x1.26bb1cP+1) https://oeis.org/A002392 |
| 50 | log2ef = 1.44269504F, // (0x1.715476P+0) |
| 51 | log10ef = .434294482F, // (0x1.bcb7b2P-2) |
| 52 | pif = 3.14159265F, // (0x1.921fb6P+1) https://oeis.org/A000796 |
| 53 | inv_pif = .318309886F, // (0x1.45f306P-2) https://oeis.org/A049541 |
| 54 | sqrtpif = 1.77245385F, // (0x1.c5bf8aP+0) https://oeis.org/A002161 |
| 55 | inv_sqrtpif = .564189584F, // (0x1.20dd76P-1) https://oeis.org/A087197 |
| 56 | sqrt2f = 1.41421356F, // (0x1.6a09e6P+0) https://oeis.org/A002193 |
| 57 | inv_sqrt2f = .707106781F, // (0x1.6a09e6P-1) |
| 58 | sqrt3f = 1.73205081F, // (0x1.bb67aeP+0) https://oeis.org/A002194 |
| 59 | inv_sqrt3f = .577350269F, // (0x1.279a74P-1) |
| 60 | phif = 1.61803399F; // (0x1.9e377aP+0) https://oeis.org/A001622 |
| 61 | } // namespace numbers |
| 62 | |
| 63 | /// Count number of 0's from the least significant bit to the most |
| 64 | /// stopping at the first 1. |
| 65 | /// |
| 66 | /// Only unsigned integral types are allowed. |
| 67 | /// |
| 68 | /// Returns std::numeric_limits<T>::digits on an input of 0. |
| 69 | template <typename T> |
| 70 | LLVM_DEPRECATED("Use llvm::countr_zero instead.", "llvm::countr_zero")__attribute__((deprecated("Use llvm::countr_zero instead.", "llvm::countr_zero" ))) |
| 71 | unsigned countTrailingZeros(T Val) { |
| 72 | static_assert(std::is_unsigned_v<T>, |
| 73 | "Only unsigned integral types are allowed."); |
| 74 | return llvm::countr_zero(Val); |
| 75 | } |
| 76 | |
| 77 | /// Count number of 0's from the most significant bit to the least |
| 78 | /// stopping at the first 1. |
| 79 | /// |
| 80 | /// Only unsigned integral types are allowed. |
| 81 | /// |
| 82 | /// Returns std::numeric_limits<T>::digits on an input of 0. |
| 83 | template <typename T> |
| 84 | LLVM_DEPRECATED("Use llvm::countl_zero instead.", "llvm::countl_zero")__attribute__((deprecated("Use llvm::countl_zero instead.", "llvm::countl_zero" ))) |
| 85 | unsigned countLeadingZeros(T Val) { |
| 86 | static_assert(std::is_unsigned_v<T>, |
| 87 | "Only unsigned integral types are allowed."); |
| 88 | return llvm::countl_zero(Val); |
| 89 | } |
| 90 | |
| 91 | /// Create a bitmask with the N right-most bits set to 1, and all other |
| 92 | /// bits set to 0. Only unsigned types are allowed. |
| 93 | template <typename T> T maskTrailingOnes(unsigned N) { |
| 94 | static_assert(std::is_unsigned_v<T>, "Invalid type!"); |
| 95 | const unsigned Bits = CHAR_BIT8 * sizeof(T); |
| 96 | assert(N <= Bits && "Invalid bit index")(static_cast <bool> (N <= Bits && "Invalid bit index" ) ? void (0) : __assert_fail ("N <= Bits && \"Invalid bit index\"" , "llvm/include/llvm/Support/MathExtras.h", 96, __extension__ __PRETTY_FUNCTION__)); |
| 97 | return N == 0 ? 0 : (T(-1) >> (Bits - N)); |
| 98 | } |
| 99 | |
| 100 | /// Create a bitmask with the N left-most bits set to 1, and all other |
| 101 | /// bits set to 0. Only unsigned types are allowed. |
| 102 | template <typename T> T maskLeadingOnes(unsigned N) { |
| 103 | return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); |
| 104 | } |
| 105 | |
| 106 | /// Create a bitmask with the N right-most bits set to 0, and all other |
| 107 | /// bits set to 1. Only unsigned types are allowed. |
| 108 | template <typename T> T maskTrailingZeros(unsigned N) { |
| 109 | return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N); |
| 110 | } |
| 111 | |
| 112 | /// Create a bitmask with the N left-most bits set to 0, and all other |
| 113 | /// bits set to 1. Only unsigned types are allowed. |
| 114 | template <typename T> T maskLeadingZeros(unsigned N) { |
| 115 | return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); |
| 116 | } |
| 117 | |
| 118 | /// Macro compressed bit reversal table for 256 bits. |
| 119 | /// |
| 120 | /// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable |
| 121 | static const unsigned char BitReverseTable256[256] = { |
| 122 | #define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64 |
| 123 | #define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16) |
| 124 | #define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4) |
| 125 | R6(0), R6(2), R6(1), R6(3) |
| 126 | #undef R2 |
| 127 | #undef R4 |
| 128 | #undef R6 |
| 129 | }; |
| 130 | |
| 131 | /// Reverse the bits in \p Val. |
| 132 | template <typename T> T reverseBits(T Val) { |
| 133 | #if __has_builtin(__builtin_bitreverse8)1 |
| 134 | if constexpr (std::is_same_v<T, uint8_t>) |
| 135 | return __builtin_bitreverse8(Val); |
| 136 | #endif |
| 137 | #if __has_builtin(__builtin_bitreverse16)1 |
| 138 | if constexpr (std::is_same_v<T, uint16_t>) |
| 139 | return __builtin_bitreverse16(Val); |
| 140 | #endif |
| 141 | #if __has_builtin(__builtin_bitreverse32)1 |
| 142 | if constexpr (std::is_same_v<T, uint32_t>) |
| 143 | return __builtin_bitreverse32(Val); |
| 144 | #endif |
| 145 | #if __has_builtin(__builtin_bitreverse64)1 |
| 146 | if constexpr (std::is_same_v<T, uint64_t>) |
| 147 | return __builtin_bitreverse64(Val); |
| 148 | #endif |
| 149 | |
| 150 | unsigned char in[sizeof(Val)]; |
| 151 | unsigned char out[sizeof(Val)]; |
| 152 | std::memcpy(in, &Val, sizeof(Val)); |
| 153 | for (unsigned i = 0; i < sizeof(Val); ++i) |
| 154 | out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]]; |
| 155 | std::memcpy(&Val, out, sizeof(Val)); |
| 156 | return Val; |
| 157 | } |
| 158 | |
| 159 | // NOTE: The following support functions use the _32/_64 extensions instead of |
| 160 | // type overloading so that signed and unsigned integers can be used without |
| 161 | // ambiguity. |
| 162 | |
| 163 | /// Return the high 32 bits of a 64 bit value. |
| 164 | constexpr inline uint32_t Hi_32(uint64_t Value) { |
| 165 | return static_cast<uint32_t>(Value >> 32); |
| 166 | } |
| 167 | |
| 168 | /// Return the low 32 bits of a 64 bit value. |
| 169 | constexpr inline uint32_t Lo_32(uint64_t Value) { |
| 170 | return static_cast<uint32_t>(Value); |
| 171 | } |
| 172 | |
| 173 | /// Make a 64-bit integer from a high / low pair of 32-bit integers. |
| 174 | constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) { |
| 175 | return ((uint64_t)High << 32) | (uint64_t)Low; |
| 176 | } |
| 177 | |
| 178 | /// Checks if an integer fits into the given bit width. |
| 179 | template <unsigned N> constexpr inline bool isInt(int64_t x) { |
| 180 | if constexpr (N == 8) |
| 181 | return static_cast<int8_t>(x) == x; |
| 182 | if constexpr (N == 16) |
| 183 | return static_cast<int16_t>(x) == x; |
| 184 | if constexpr (N == 32) |
| 185 | return static_cast<int32_t>(x) == x; |
| 186 | if constexpr (N < 64) |
| 187 | return -(INT64_C(1)1L << (N - 1)) <= x && x < (INT64_C(1)1L << (N - 1)); |
| 188 | (void)x; // MSVC v19.25 warns that x is unused. |
| 189 | return true; |
| 190 | } |
| 191 | |
| 192 | /// Checks if a signed integer is an N bit number shifted left by S. |
| 193 | template <unsigned N, unsigned S> |
| 194 | constexpr inline bool isShiftedInt(int64_t x) { |
| 195 | static_assert( |
| 196 | N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number."); |
| 197 | static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide."); |
| 198 | return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); |
| 199 | } |
| 200 | |
| 201 | /// Checks if an unsigned integer fits into the given bit width. |
| 202 | template <unsigned N> constexpr inline bool isUInt(uint64_t x) { |
| 203 | static_assert(N > 0, "isUInt<0> doesn't make sense"); |
| 204 | if constexpr (N == 8) |
| 205 | return static_cast<uint8_t>(x) == x; |
| 206 | if constexpr (N == 16) |
| 207 | return static_cast<uint16_t>(x) == x; |
| 208 | if constexpr (N == 32) |
| 209 | return static_cast<uint32_t>(x) == x; |
| 210 | if constexpr (N < 64) |
| 211 | return x < (UINT64_C(1)1UL << (N)); |
| 212 | (void)x; // MSVC v19.25 warns that x is unused. |
| 213 | return true; |
| 214 | } |
| 215 | |
| 216 | /// Checks if a unsigned integer is an N bit number shifted left by S. |
| 217 | template <unsigned N, unsigned S> |
| 218 | constexpr inline bool isShiftedUInt(uint64_t x) { |
| 219 | static_assert( |
| 220 | N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)"); |
| 221 | static_assert(N + S <= 64, |
| 222 | "isShiftedUInt<N, S> with N + S > 64 is too wide."); |
| 223 | // Per the two static_asserts above, S must be strictly less than 64. So |
| 224 | // 1 << S is not undefined behavior. |
| 225 | return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); |
| 226 | } |
| 227 | |
| 228 | /// Gets the maximum value for a N-bit unsigned integer. |
| 229 | inline uint64_t maxUIntN(uint64_t N) { |
| 230 | assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 && "integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "llvm/include/llvm/Support/MathExtras.h", 230, __extension__ __PRETTY_FUNCTION__)); |
| 231 | |
| 232 | // uint64_t(1) << 64 is undefined behavior, so we can't do |
| 233 | // (uint64_t(1) << N) - 1 |
| 234 | // without checking first that N != 64. But this works and doesn't have a |
| 235 | // branch. |
| 236 | return UINT64_MAX(18446744073709551615UL) >> (64 - N); |
| 237 | } |
| 238 | |
| 239 | /// Gets the minimum value for a N-bit signed integer. |
| 240 | inline int64_t minIntN(int64_t N) { |
| 241 | assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 && "integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "llvm/include/llvm/Support/MathExtras.h", 241, __extension__ __PRETTY_FUNCTION__)); |
| 242 | |
| 243 | return UINT64_C(1)1UL + ~(UINT64_C(1)1UL << (N - 1)); |
| 244 | } |
| 245 | |
| 246 | /// Gets the maximum value for a N-bit signed integer. |
| 247 | inline int64_t maxIntN(int64_t N) { |
| 248 | assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 && "integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "llvm/include/llvm/Support/MathExtras.h", 248, __extension__ __PRETTY_FUNCTION__)); |
| 249 | |
| 250 | // This relies on two's complement wraparound when N == 64, so we convert to |
| 251 | // int64_t only at the very end to avoid UB. |
| 252 | return (UINT64_C(1)1UL << (N - 1)) - 1; |
| 253 | } |
| 254 | |
| 255 | /// Checks if an unsigned integer fits into the given (dynamic) bit width. |
| 256 | inline bool isUIntN(unsigned N, uint64_t x) { |
| 257 | return N >= 64 || x <= maxUIntN(N); |
| 258 | } |
| 259 | |
| 260 | /// Checks if an signed integer fits into the given (dynamic) bit width. |
| 261 | inline bool isIntN(unsigned N, int64_t x) { |
| 262 | return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N)); |
| 263 | } |
| 264 | |
| 265 | /// Return true if the argument is a non-empty sequence of ones starting at the |
| 266 | /// least significant bit with the remainder zero (32 bit version). |
| 267 | /// Ex. isMask_32(0x0000FFFFU) == true. |
| 268 | constexpr inline bool isMask_32(uint32_t Value) { |
| 269 | return Value && ((Value + 1) & Value) == 0; |
| 270 | } |
| 271 | |
| 272 | /// Return true if the argument is a non-empty sequence of ones starting at the |
| 273 | /// least significant bit with the remainder zero (64 bit version). |
| 274 | constexpr inline bool isMask_64(uint64_t Value) { |
| 275 | return Value && ((Value + 1) & Value) == 0; |
| 276 | } |
| 277 | |
| 278 | /// Return true if the argument contains a non-empty sequence of ones with the |
| 279 | /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. |
| 280 | constexpr inline bool isShiftedMask_32(uint32_t Value) { |
| 281 | return Value && isMask_32((Value - 1) | Value); |
| 282 | } |
| 283 | |
| 284 | /// Return true if the argument contains a non-empty sequence of ones with the |
| 285 | /// remainder zero (64 bit version.) |
| 286 | constexpr inline bool isShiftedMask_64(uint64_t Value) { |
| 287 | return Value && isMask_64((Value - 1) | Value); |
| 288 | } |
| 289 | |
| 290 | /// Return true if the argument is a power of two > 0. |
| 291 | /// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) |
| 292 | constexpr inline bool isPowerOf2_32(uint32_t Value) { |
| 293 | return llvm::has_single_bit(Value); |
| 294 | } |
| 295 | |
| 296 | /// Return true if the argument is a power of two > 0 (64 bit edition.) |
| 297 | constexpr inline bool isPowerOf2_64(uint64_t Value) { |
| 298 | return llvm::has_single_bit(Value); |
| 299 | } |
| 300 | |
| 301 | /// Count the number of ones from the most significant bit to the first |
| 302 | /// zero bit. |
| 303 | /// |
| 304 | /// Ex. countLeadingOnes(0xFF0FFF00) == 8. |
| 305 | /// Only unsigned integral types are allowed. |
| 306 | /// |
| 307 | /// Returns std::numeric_limits<T>::digits on an input of all ones. |
| 308 | template <typename T> |
| 309 | LLVM_DEPRECATED("Use llvm::countl_one instead.", "llvm::countl_one")__attribute__((deprecated("Use llvm::countl_one instead.", "llvm::countl_one" ))) |
| 310 | unsigned countLeadingOnes(T Value) { |
| 311 | static_assert(std::is_unsigned_v<T>, |
| 312 | "Only unsigned integral types are allowed."); |
| 313 | return llvm::countl_one<T>(Value); |
| 314 | } |
| 315 | |
| 316 | /// Count the number of ones from the least significant bit to the first |
| 317 | /// zero bit. |
| 318 | /// |
| 319 | /// Ex. countTrailingOnes(0x00FF00FF) == 8. |
| 320 | /// Only unsigned integral types are allowed. |
| 321 | /// |
| 322 | /// Returns std::numeric_limits<T>::digits on an input of all ones. |
| 323 | template <typename T> |
| 324 | LLVM_DEPRECATED("Use llvm::countr_one instead.", "llvm::countr_one")__attribute__((deprecated("Use llvm::countr_one instead.", "llvm::countr_one" ))) |
| 325 | unsigned countTrailingOnes(T Value) { |
| 326 | static_assert(std::is_unsigned_v<T>, |
| 327 | "Only unsigned integral types are allowed."); |
| 328 | return llvm::countr_one<T>(Value); |
| 329 | } |
| 330 | |
| 331 | /// Count the number of set bits in a value. |
| 332 | /// Ex. countPopulation(0xF000F000) = 8 |
| 333 | /// Returns 0 if the word is zero. |
| 334 | template <typename T> |
| 335 | LLVM_DEPRECATED("Use llvm::popcount instead.", "llvm::popcount")__attribute__((deprecated("Use llvm::popcount instead.", "llvm::popcount" ))) |
| 336 | inline unsigned countPopulation(T Value) { |
| 337 | static_assert(std::is_unsigned_v<T>, |
| 338 | "Only unsigned integral types are allowed."); |
| 339 | return (unsigned)llvm::popcount(Value); |
| 340 | } |
| 341 | |
| 342 | /// Return true if the argument contains a non-empty sequence of ones with the |
| 343 | /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. |
| 344 | /// If true, \p MaskIdx will specify the index of the lowest set bit and \p |
| 345 | /// MaskLen is updated to specify the length of the mask, else neither are |
| 346 | /// updated. |
| 347 | inline bool isShiftedMask_32(uint32_t Value, unsigned &MaskIdx, |
| 348 | unsigned &MaskLen) { |
| 349 | if (!isShiftedMask_32(Value)) |
| 350 | return false; |
| 351 | MaskIdx = llvm::countr_zero(Value); |
| 352 | MaskLen = llvm::popcount(Value); |
| 353 | return true; |
| 354 | } |
| 355 | |
| 356 | /// Return true if the argument contains a non-empty sequence of ones with the |
| 357 | /// remainder zero (64 bit version.) If true, \p MaskIdx will specify the index |
| 358 | /// of the lowest set bit and \p MaskLen is updated to specify the length of the |
| 359 | /// mask, else neither are updated. |
| 360 | inline bool isShiftedMask_64(uint64_t Value, unsigned &MaskIdx, |
| 361 | unsigned &MaskLen) { |
| 362 | if (!isShiftedMask_64(Value)) |
| 363 | return false; |
| 364 | MaskIdx = llvm::countr_zero(Value); |
| 365 | MaskLen = llvm::popcount(Value); |
| 366 | return true; |
| 367 | } |
| 368 | |
| 369 | /// Compile time Log2. |
| 370 | /// Valid only for positive powers of two. |
| 371 | template <size_t kValue> constexpr inline size_t CTLog2() { |
| 372 | static_assert(kValue > 0 && llvm::isPowerOf2_64(kValue), |
| 373 | "Value is not a valid power of 2"); |
| 374 | return 1 + CTLog2<kValue / 2>(); |
| 375 | } |
| 376 | |
| 377 | template <> constexpr inline size_t CTLog2<1>() { return 0; } |
| 378 | |
| 379 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. |
| 380 | /// (32 bit edition.) |
| 381 | /// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 |
| 382 | inline unsigned Log2_32(uint32_t Value) { |
| 383 | return 31 - llvm::countl_zero(Value); |
| 384 | } |
| 385 | |
| 386 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. |
| 387 | /// (64 bit edition.) |
| 388 | inline unsigned Log2_64(uint64_t Value) { |
| 389 | return 63 - llvm::countl_zero(Value); |
| 390 | } |
| 391 | |
| 392 | /// Return the ceil log base 2 of the specified value, 32 if the value is zero. |
| 393 | /// (32 bit edition). |
| 394 | /// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 |
| 395 | inline unsigned Log2_32_Ceil(uint32_t Value) { |
| 396 | return 32 - llvm::countl_zero(Value - 1); |
| 397 | } |
| 398 | |
| 399 | /// Return the ceil log base 2 of the specified value, 64 if the value is zero. |
| 400 | /// (64 bit edition.) |
| 401 | inline unsigned Log2_64_Ceil(uint64_t Value) { |
| 402 | return 64 - llvm::countl_zero(Value - 1); |
| 403 | } |
| 404 | |
| 405 | /// This function takes a 64-bit integer and returns the bit equivalent double. |
| 406 | LLVM_DEPRECATED("use llvm::bit_cast instead", "llvm::bit_cast<double>")__attribute__((deprecated("use llvm::bit_cast instead", "llvm::bit_cast<double>" ))) |
| 407 | inline double BitsToDouble(uint64_t Bits) { |
| 408 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); |
| 409 | return llvm::bit_cast<double>(Bits); |
| 410 | } |
| 411 | |
| 412 | /// This function takes a 32-bit integer and returns the bit equivalent float. |
| 413 | LLVM_DEPRECATED("use llvm::bit_cast instead", "llvm::bit_cast<float>")__attribute__((deprecated("use llvm::bit_cast instead", "llvm::bit_cast<float>" ))) |
| 414 | inline float BitsToFloat(uint32_t Bits) { |
| 415 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); |
| 416 | return llvm::bit_cast<float>(Bits); |
| 417 | } |
| 418 | |
| 419 | /// This function takes a double and returns the bit equivalent 64-bit integer. |
| 420 | /// Note that copying doubles around changes the bits of NaNs on some hosts, |
| 421 | /// notably x86, so this routine cannot be used if these bits are needed. |
| 422 | LLVM_DEPRECATED("use llvm::bit_cast instead", "llvm::bit_cast<uint64_t>")__attribute__((deprecated("use llvm::bit_cast instead", "llvm::bit_cast<uint64_t>" ))) |
| 423 | inline uint64_t DoubleToBits(double Double) { |
| 424 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); |
| 425 | return llvm::bit_cast<uint64_t>(Double); |
| 426 | } |
| 427 | |
| 428 | /// This function takes a float and returns the bit equivalent 32-bit integer. |
| 429 | /// Note that copying floats around changes the bits of NaNs on some hosts, |
| 430 | /// notably x86, so this routine cannot be used if these bits are needed. |
| 431 | LLVM_DEPRECATED("use llvm::bit_cast instead", "llvm::bit_cast<uint32_t>")__attribute__((deprecated("use llvm::bit_cast instead", "llvm::bit_cast<uint32_t>" ))) |
| 432 | inline uint32_t FloatToBits(float Float) { |
| 433 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); |
| 434 | return llvm::bit_cast<uint32_t>(Float); |
| 435 | } |
| 436 | |
| 437 | /// A and B are either alignments or offsets. Return the minimum alignment that |
| 438 | /// may be assumed after adding the two together. |
| 439 | constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) { |
| 440 | // The largest power of 2 that divides both A and B. |
| 441 | // |
| 442 | // Replace "-Value" by "1+~Value" in the following commented code to avoid |
| 443 | // MSVC warning C4146 |
| 444 | // return (A | B) & -(A | B); |
| 445 | return (A | B) & (1 + ~(A | B)); |
| 446 | } |
| 447 | |
| 448 | /// Returns the next power of two (in 64-bits) that is strictly greater than A. |
| 449 | /// Returns zero on overflow. |
| 450 | constexpr inline uint64_t NextPowerOf2(uint64_t A) { |
| 451 | A |= (A >> 1); |
| 452 | A |= (A >> 2); |
| 453 | A |= (A >> 4); |
| 454 | A |= (A >> 8); |
| 455 | A |= (A >> 16); |
| 456 | A |= (A >> 32); |
| 457 | return A + 1; |
| 458 | } |
| 459 | |
| 460 | /// Returns the power of two which is less than or equal to the given value. |
| 461 | /// Essentially, it is a floor operation across the domain of powers of two. |
| 462 | LLVM_DEPRECATED("use llvm::bit_floor instead", "llvm::bit_floor")__attribute__((deprecated("use llvm::bit_floor instead", "llvm::bit_floor" ))) |
| 463 | inline uint64_t PowerOf2Floor(uint64_t A) { |
| 464 | return llvm::bit_floor(A); |
| 465 | } |
| 466 | |
| 467 | /// Returns the power of two which is greater than or equal to the given value. |
| 468 | /// Essentially, it is a ceil operation across the domain of powers of two. |
| 469 | inline uint64_t PowerOf2Ceil(uint64_t A) { |
| 470 | if (!A) |
| 471 | return 0; |
| 472 | return NextPowerOf2(A - 1); |
| 473 | } |
| 474 | |
| 475 | /// Returns the next integer (mod 2**64) that is greater than or equal to |
| 476 | /// \p Value and is a multiple of \p Align. \p Align must be non-zero. |
| 477 | /// |
| 478 | /// Examples: |
| 479 | /// \code |
| 480 | /// alignTo(5, 8) = 8 |
| 481 | /// alignTo(17, 8) = 24 |
| 482 | /// alignTo(~0LL, 8) = 0 |
| 483 | /// alignTo(321, 255) = 510 |
| 484 | /// \endcode |
| 485 | inline uint64_t alignTo(uint64_t Value, uint64_t Align) { |
| 486 | assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0." ) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 486, __extension__ __PRETTY_FUNCTION__)); |
| 487 | return (Value + Align - 1) / Align * Align; |
| 488 | } |
| 489 | |
| 490 | inline uint64_t alignToPowerOf2(uint64_t Value, uint64_t Align) { |
| 491 | assert(Align != 0 && (Align & (Align - 1)) == 0 &&(static_cast <bool> (Align != 0 && (Align & (Align - 1)) == 0 && "Align must be a power of 2") ? void (0) : __assert_fail ("Align != 0 && (Align & (Align - 1)) == 0 && \"Align must be a power of 2\"" , "llvm/include/llvm/Support/MathExtras.h", 492, __extension__ __PRETTY_FUNCTION__)) |
| 492 | "Align must be a power of 2")(static_cast <bool> (Align != 0 && (Align & (Align - 1)) == 0 && "Align must be a power of 2") ? void (0) : __assert_fail ("Align != 0 && (Align & (Align - 1)) == 0 && \"Align must be a power of 2\"" , "llvm/include/llvm/Support/MathExtras.h", 492, __extension__ __PRETTY_FUNCTION__)); |
| 493 | return (Value + Align - 1) & -Align; |
| 494 | } |
| 495 | |
| 496 | /// If non-zero \p Skew is specified, the return value will be a minimal integer |
| 497 | /// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for |
| 498 | /// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p |
| 499 | /// Skew mod \p A'. \p Align must be non-zero. |
| 500 | /// |
| 501 | /// Examples: |
| 502 | /// \code |
| 503 | /// alignTo(5, 8, 7) = 7 |
| 504 | /// alignTo(17, 8, 1) = 17 |
| 505 | /// alignTo(~0LL, 8, 3) = 3 |
| 506 | /// alignTo(321, 255, 42) = 552 |
| 507 | /// \endcode |
| 508 | inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew) { |
| 509 | assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0." ) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 509, __extension__ __PRETTY_FUNCTION__)); |
| 510 | Skew %= Align; |
| 511 | return alignTo(Value - Skew, Align) + Skew; |
| 512 | } |
| 513 | |
| 514 | /// Returns the next integer (mod 2**64) that is greater than or equal to |
| 515 | /// \p Value and is a multiple of \c Align. \c Align must be non-zero. |
| 516 | template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) { |
| 517 | static_assert(Align != 0u, "Align must be non-zero"); |
| 518 | return (Value + Align - 1) / Align * Align; |
| 519 | } |
| 520 | |
| 521 | /// Returns the integer ceil(Numerator / Denominator). |
| 522 | inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) { |
| 523 | return alignTo(Numerator, Denominator) / Denominator; |
| 524 | } |
| 525 | |
| 526 | /// Returns the integer nearest(Numerator / Denominator). |
| 527 | inline uint64_t divideNearest(uint64_t Numerator, uint64_t Denominator) { |
| 528 | return (Numerator + (Denominator / 2)) / Denominator; |
| 529 | } |
| 530 | |
| 531 | /// Returns the largest uint64_t less than or equal to \p Value and is |
| 532 | /// \p Skew mod \p Align. \p Align must be non-zero |
| 533 | inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { |
| 534 | assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0." ) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 534, __extension__ __PRETTY_FUNCTION__)); |
| 535 | Skew %= Align; |
| 536 | return (Value - Skew) / Align * Align + Skew; |
| 537 | } |
| 538 | |
| 539 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. |
| 540 | /// Requires 0 < B <= 32. |
| 541 | template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) { |
| 542 | static_assert(B > 0, "Bit width can't be 0."); |
| 543 | static_assert(B <= 32, "Bit width out of range."); |
| 544 | return int32_t(X << (32 - B)) >> (32 - B); |
| 545 | } |
| 546 | |
| 547 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. |
| 548 | /// Requires 0 < B <= 32. |
| 549 | inline int32_t SignExtend32(uint32_t X, unsigned B) { |
| 550 | assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0." ) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 550, __extension__ __PRETTY_FUNCTION__)); |
| 551 | assert(B <= 32 && "Bit width out of range.")(static_cast <bool> (B <= 32 && "Bit width out of range." ) ? void (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\"" , "llvm/include/llvm/Support/MathExtras.h", 551, __extension__ __PRETTY_FUNCTION__)); |
| 552 | return int32_t(X << (32 - B)) >> (32 - B); |
| 553 | } |
| 554 | |
| 555 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. |
| 556 | /// Requires 0 < B <= 64. |
| 557 | template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) { |
| 558 | static_assert(B > 0, "Bit width can't be 0."); |
| 559 | static_assert(B <= 64, "Bit width out of range."); |
| 560 | return int64_t(x << (64 - B)) >> (64 - B); |
| 561 | } |
| 562 | |
| 563 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. |
| 564 | /// Requires 0 < B <= 64. |
| 565 | inline int64_t SignExtend64(uint64_t X, unsigned B) { |
| 566 | assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0." ) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 566, __extension__ __PRETTY_FUNCTION__)); |
| 567 | assert(B <= 64 && "Bit width out of range.")(static_cast <bool> (B <= 64 && "Bit width out of range." ) ? void (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\"" , "llvm/include/llvm/Support/MathExtras.h", 567, __extension__ __PRETTY_FUNCTION__)); |
| 568 | return int64_t(X << (64 - B)) >> (64 - B); |
| 569 | } |
| 570 | |
| 571 | /// Subtract two unsigned integers, X and Y, of type T and return the absolute |
| 572 | /// value of the result. |
| 573 | template <typename T> |
| 574 | std::enable_if_t<std::is_unsigned_v<T>, T> AbsoluteDifference(T X, T Y) { |
| 575 | return X > Y ? (X - Y) : (Y - X); |
| 576 | } |
| 577 | |
| 578 | /// Add two unsigned integers, X and Y, of type T. Clamp the result to the |
| 579 | /// maximum representable value of T on overflow. ResultOverflowed indicates if |
| 580 | /// the result is larger than the maximum representable value of type T. |
| 581 | template <typename T> |
| 582 | std::enable_if_t<std::is_unsigned_v<T>, T> |
| 583 | SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) { |
| 584 | bool Dummy; |
| 585 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; |
| 586 | // Hacker's Delight, p. 29 |
| 587 | T Z = X + Y; |
| 588 | Overflowed = (Z < X || Z < Y); |
| 589 | if (Overflowed) |
| 590 | return std::numeric_limits<T>::max(); |
| 591 | else |
| 592 | return Z; |
| 593 | } |
| 594 | |
| 595 | /// Add multiple unsigned integers of type T. Clamp the result to the |
| 596 | /// maximum representable value of T on overflow. |
| 597 | template <class T, class... Ts> |
| 598 | std::enable_if_t<std::is_unsigned_v<T>, T> SaturatingAdd(T X, T Y, T Z, |
| 599 | Ts... Args) { |
| 600 | bool Overflowed = false; |
| 601 | T XY = SaturatingAdd(X, Y, &Overflowed); |
| 602 | if (Overflowed) |
| 603 | return SaturatingAdd(std::numeric_limits<T>::max(), T(1), Args...); |
| 604 | return SaturatingAdd(XY, Z, Args...); |
| 605 | } |
| 606 | |
| 607 | /// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the |
| 608 | /// maximum representable value of T on overflow. ResultOverflowed indicates if |
| 609 | /// the result is larger than the maximum representable value of type T. |
| 610 | template <typename T> |
| 611 | std::enable_if_t<std::is_unsigned_v<T>, T> |
| 612 | SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) { |
| 613 | bool Dummy; |
| 614 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; |
| 615 | |
| 616 | // Hacker's Delight, p. 30 has a different algorithm, but we don't use that |
| 617 | // because it fails for uint16_t (where multiplication can have undefined |
| 618 | // behavior due to promotion to int), and requires a division in addition |
| 619 | // to the multiplication. |
| 620 | |
| 621 | Overflowed = false; |
| 622 | |
| 623 | // Log2(Z) would be either Log2Z or Log2Z + 1. |
| 624 | // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z |
| 625 | // will necessarily be less than Log2Max as desired. |
| 626 | int Log2Z = Log2_64(X) + Log2_64(Y); |
| 627 | const T Max = std::numeric_limits<T>::max(); |
| 628 | int Log2Max = Log2_64(Max); |
| 629 | if (Log2Z < Log2Max) { |
| 630 | return X * Y; |
| 631 | } |
| 632 | if (Log2Z > Log2Max) { |
| 633 | Overflowed = true; |
| 634 | return Max; |
| 635 | } |
| 636 | |
| 637 | // We're going to use the top bit, and maybe overflow one |
| 638 | // bit past it. Multiply all but the bottom bit then add |
| 639 | // that on at the end. |
| 640 | T Z = (X >> 1) * Y; |
| 641 | if (Z & ~(Max >> 1)) { |
| 642 | Overflowed = true; |
| 643 | return Max; |
| 644 | } |
| 645 | Z <<= 1; |
| 646 | if (X & 1) |
| 647 | return SaturatingAdd(Z, Y, ResultOverflowed); |
| 648 | |
| 649 | return Z; |
| 650 | } |
| 651 | |
| 652 | /// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to |
| 653 | /// the product. Clamp the result to the maximum representable value of T on |
| 654 | /// overflow. ResultOverflowed indicates if the result is larger than the |
| 655 | /// maximum representable value of type T. |
| 656 | template <typename T> |
| 657 | std::enable_if_t<std::is_unsigned_v<T>, T> |
| 658 | SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) { |
| 659 | bool Dummy; |
| 660 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; |
| 661 | |
| 662 | T Product = SaturatingMultiply(X, Y, &Overflowed); |
| 663 | if (Overflowed) |
| 664 | return Product; |
| 665 | |
| 666 | return SaturatingAdd(A, Product, &Overflowed); |
| 667 | } |
| 668 | |
| 669 | /// Use this rather than HUGE_VALF; the latter causes warnings on MSVC. |
| 670 | extern const float huge_valf; |
| 671 | |
| 672 | |
| 673 | /// Add two signed integers, computing the two's complement truncated result, |
| 674 | /// returning true if overflow occurred. |
| 675 | template <typename T> |
| 676 | std::enable_if_t<std::is_signed_v<T>, T> AddOverflow(T X, T Y, T &Result) { |
| 677 | #if __has_builtin(__builtin_add_overflow)1 |
| 678 | return __builtin_add_overflow(X, Y, &Result); |
| 679 | #else |
| 680 | // Perform the unsigned addition. |
| 681 | using U = std::make_unsigned_t<T>; |
| 682 | const U UX = static_cast<U>(X); |
| 683 | const U UY = static_cast<U>(Y); |
| 684 | const U UResult = UX + UY; |
| 685 | |
| 686 | // Convert to signed. |
| 687 | Result = static_cast<T>(UResult); |
| 688 | |
| 689 | // Adding two positive numbers should result in a positive number. |
| 690 | if (X > 0 && Y > 0) |
| 691 | return Result <= 0; |
| 692 | // Adding two negatives should result in a negative number. |
| 693 | if (X < 0 && Y < 0) |
| 694 | return Result >= 0; |
| 695 | return false; |
| 696 | #endif |
| 697 | } |
| 698 | |
| 699 | /// Subtract two signed integers, computing the two's complement truncated |
| 700 | /// result, returning true if an overflow ocurred. |
| 701 | template <typename T> |
| 702 | std::enable_if_t<std::is_signed_v<T>, T> SubOverflow(T X, T Y, T &Result) { |
| 703 | #if __has_builtin(__builtin_sub_overflow)1 |
| 704 | return __builtin_sub_overflow(X, Y, &Result); |
| 705 | #else |
| 706 | // Perform the unsigned addition. |
| 707 | using U = std::make_unsigned_t<T>; |
| 708 | const U UX = static_cast<U>(X); |
| 709 | const U UY = static_cast<U>(Y); |
| 710 | const U UResult = UX - UY; |
| 711 | |
| 712 | // Convert to signed. |
| 713 | Result = static_cast<T>(UResult); |
| 714 | |
| 715 | // Subtracting a positive number from a negative results in a negative number. |
| 716 | if (X <= 0 && Y > 0) |
| 717 | return Result >= 0; |
| 718 | // Subtracting a negative number from a positive results in a positive number. |
| 719 | if (X >= 0 && Y < 0) |
| 720 | return Result <= 0; |
| 721 | return false; |
| 722 | #endif |
| 723 | } |
| 724 | |
| 725 | /// Multiply two signed integers, computing the two's complement truncated |
| 726 | /// result, returning true if an overflow ocurred. |
| 727 | template <typename T> |
| 728 | std::enable_if_t<std::is_signed_v<T>, T> MulOverflow(T X, T Y, T &Result) { |
| 729 | // Perform the unsigned multiplication on absolute values. |
| 730 | using U = std::make_unsigned_t<T>; |
| 731 | const U UX = X < 0 ? (0 - static_cast<U>(X)) : static_cast<U>(X); |
| 732 | const U UY = Y < 0 ? (0 - static_cast<U>(Y)) : static_cast<U>(Y); |
| 733 | const U UResult = UX * UY; |
| 734 | |
| 735 | // Convert to signed. |
| 736 | const bool IsNegative = (X < 0) ^ (Y < 0); |
| 737 | Result = IsNegative ? (0 - UResult) : UResult; |
| 738 | |
| 739 | // If any of the args was 0, result is 0 and no overflow occurs. |
| 740 | if (UX == 0 || UY == 0) |
| 741 | return false; |
| 742 | |
| 743 | // UX and UY are in [1, 2^n], where n is the number of digits. |
| 744 | // Check how the max allowed absolute value (2^n for negative, 2^(n-1) for |
| 745 | // positive) divided by an argument compares to the other. |
| 746 | if (IsNegative) |
| 747 | return UX > (static_cast<U>(std::numeric_limits<T>::max()) + U(1)) / UY; |
| 748 | else |
| 749 | return UX > (static_cast<U>(std::numeric_limits<T>::max())) / UY; |
| 750 | } |
| 751 | |
| 752 | } // End llvm namespace |
| 753 | |
| 754 | #endif |