Bug Summary

File:build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/AMDGPU/SIModeRegister.cpp
Warning:line 205, column 55
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'int'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name SIModeRegister.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/AMDGPU -I include -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-09-04-125545-48738-1 -x c++ /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/AMDGPU/SIModeRegister.cpp

/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/AMDGPU/SIModeRegister.cpp

1//===-- SIModeRegister.cpp - Mode Register --------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This pass inserts changes to the Mode register settings as required.
10/// Note that currently it only deals with the Double Precision Floating Point
11/// rounding mode setting, but is intended to be generic enough to be easily
12/// expanded.
13///
14//===----------------------------------------------------------------------===//
15//
16#include "AMDGPU.h"
17#include "GCNSubtarget.h"
18#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
19#include "llvm/ADT/Statistic.h"
20#include "llvm/CodeGen/MachineFunctionPass.h"
21#include <queue>
22
23#define DEBUG_TYPE"si-mode-register" "si-mode-register"
24
25STATISTIC(NumSetregInserted, "Number of setreg of mode register inserted.")static llvm::Statistic NumSetregInserted = {"si-mode-register"
, "NumSetregInserted", "Number of setreg of mode register inserted."
}
;
26
27using namespace llvm;
28
29struct Status {
30 // Mask is a bitmask where a '1' indicates the corresponding Mode bit has a
31 // known value
32 unsigned Mask;
33 unsigned Mode;
34
35 Status() : Mask(0), Mode(0){};
36
37 Status(unsigned NewMask, unsigned NewMode) : Mask(NewMask), Mode(NewMode) {
38 Mode &= Mask;
39 };
40
41 // merge two status values such that only values that don't conflict are
42 // preserved
43 Status merge(const Status &S) const {
44 return Status((Mask | S.Mask), ((Mode & ~S.Mask) | (S.Mode & S.Mask)));
45 }
46
47 // merge an unknown value by using the unknown value's mask to remove bits
48 // from the result
49 Status mergeUnknown(unsigned newMask) {
50 return Status(Mask & ~newMask, Mode & ~newMask);
51 }
52
53 // intersect two Status values to produce a mode and mask that is a subset
54 // of both values
55 Status intersect(const Status &S) const {
56 unsigned NewMask = (Mask & S.Mask) & (Mode ^ ~S.Mode);
57 unsigned NewMode = (Mode & NewMask);
58 return Status(NewMask, NewMode);
59 }
60
61 // produce the delta required to change the Mode to the required Mode
62 Status delta(const Status &S) const {
63 return Status((S.Mask & (Mode ^ S.Mode)) | (~Mask & S.Mask), S.Mode);
64 }
65
66 bool operator==(const Status &S) const {
67 return (Mask == S.Mask) && (Mode == S.Mode);
68 }
69
70 bool operator!=(const Status &S) const { return !(*this == S); }
71
72 bool isCompatible(Status &S) {
73 return ((Mask & S.Mask) == S.Mask) && ((Mode & S.Mask) == S.Mode);
74 }
75
76 bool isCombinable(Status &S) { return !(Mask & S.Mask) || isCompatible(S); }
77};
78
79class BlockData {
80public:
81 // The Status that represents the mode register settings required by the
82 // FirstInsertionPoint (if any) in this block. Calculated in Phase 1.
83 Status Require;
84
85 // The Status that represents the net changes to the Mode register made by
86 // this block, Calculated in Phase 1.
87 Status Change;
88
89 // The Status that represents the mode register settings on exit from this
90 // block. Calculated in Phase 2.
91 Status Exit;
92
93 // The Status that represents the intersection of exit Mode register settings
94 // from all predecessor blocks. Calculated in Phase 2, and used by Phase 3.
95 Status Pred;
96
97 // In Phase 1 we record the first instruction that has a mode requirement,
98 // which is used in Phase 3 if we need to insert a mode change.
99 MachineInstr *FirstInsertionPoint;
100
101 // A flag to indicate whether an Exit value has been set (we can't tell by
102 // examining the Exit value itself as all values may be valid results).
103 bool ExitSet;
104
105 BlockData() : FirstInsertionPoint(nullptr), ExitSet(false){};
106};
107
108namespace {
109
110class SIModeRegister : public MachineFunctionPass {
111public:
112 static char ID;
113
114 std::vector<std::unique_ptr<BlockData>> BlockInfo;
115 std::queue<MachineBasicBlock *> Phase2List;
116
117 // The default mode register setting currently only caters for the floating
118 // point double precision rounding mode.
119 // We currently assume the default rounding mode is Round to Nearest
120 // NOTE: this should come from a per function rounding mode setting once such
121 // a setting exists.
122 unsigned DefaultMode = FP_ROUND_ROUND_TO_NEAREST0;
123 Status DefaultStatus =
124 Status(FP_ROUND_MODE_DP(0x3)(((0x3) & 0x3) << 2), FP_ROUND_MODE_DP(DefaultMode)(((DefaultMode) & 0x3) << 2));
125
126 bool Changed = false;
127
128public:
129 SIModeRegister() : MachineFunctionPass(ID) {}
130
131 bool runOnMachineFunction(MachineFunction &MF) override;
132
133 void getAnalysisUsage(AnalysisUsage &AU) const override {
134 AU.setPreservesCFG();
135 MachineFunctionPass::getAnalysisUsage(AU);
136 }
137
138 void processBlockPhase1(MachineBasicBlock &MBB, const SIInstrInfo *TII);
139
140 void processBlockPhase2(MachineBasicBlock &MBB, const SIInstrInfo *TII);
141
142 void processBlockPhase3(MachineBasicBlock &MBB, const SIInstrInfo *TII);
143
144 Status getInstructionMode(MachineInstr &MI, const SIInstrInfo *TII);
145
146 void insertSetreg(MachineBasicBlock &MBB, MachineInstr *I,
147 const SIInstrInfo *TII, Status InstrMode);
148};
149} // End anonymous namespace.
150
151INITIALIZE_PASS(SIModeRegister, DEBUG_TYPE,static void *initializeSIModeRegisterPassOnce(PassRegistry &
Registry) { PassInfo *PI = new PassInfo( "Insert required mode register values"
, "si-mode-register", &SIModeRegister::ID, PassInfo::NormalCtor_t
(callDefaultCtor<SIModeRegister>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeSIModeRegisterPassFlag; void llvm::initializeSIModeRegisterPass
(PassRegistry &Registry) { llvm::call_once(InitializeSIModeRegisterPassFlag
, initializeSIModeRegisterPassOnce, std::ref(Registry)); }
152 "Insert required mode register values", false, false)static void *initializeSIModeRegisterPassOnce(PassRegistry &
Registry) { PassInfo *PI = new PassInfo( "Insert required mode register values"
, "si-mode-register", &SIModeRegister::ID, PassInfo::NormalCtor_t
(callDefaultCtor<SIModeRegister>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeSIModeRegisterPassFlag; void llvm::initializeSIModeRegisterPass
(PassRegistry &Registry) { llvm::call_once(InitializeSIModeRegisterPassFlag
, initializeSIModeRegisterPassOnce, std::ref(Registry)); }
153
154char SIModeRegister::ID = 0;
155
156char &llvm::SIModeRegisterID = SIModeRegister::ID;
157
158FunctionPass *llvm::createSIModeRegisterPass() { return new SIModeRegister(); }
159
160// Determine the Mode register setting required for this instruction.
161// Instructions which don't use the Mode register return a null Status.
162// Note this currently only deals with instructions that use the floating point
163// double precision setting.
164Status SIModeRegister::getInstructionMode(MachineInstr &MI,
165 const SIInstrInfo *TII) {
166 if (TII->usesFPDPRounding(MI) ||
167 MI.getOpcode() == AMDGPU::FPTRUNC_UPWARD_PSEUDO ||
168 MI.getOpcode() == AMDGPU::FPTRUNC_DOWNWARD_PSEUDO) {
169 switch (MI.getOpcode()) {
170 case AMDGPU::V_INTERP_P1LL_F16:
171 case AMDGPU::V_INTERP_P1LV_F16:
172 case AMDGPU::V_INTERP_P2_F16:
173 // f16 interpolation instructions need double precision round to zero
174 return Status(FP_ROUND_MODE_DP(3)(((3) & 0x3) << 2),
175 FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_ZERO)(((3) & 0x3) << 2));
176 case AMDGPU::FPTRUNC_UPWARD_PSEUDO: {
177 // Replacing the pseudo by a real instruction
178 MI.setDesc(TII->get(AMDGPU::V_CVT_F16_F32_e32));
179 return Status(FP_ROUND_MODE_DP(3)(((3) & 0x3) << 2),
180 FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_INF)(((1) & 0x3) << 2));
181 }
182 case AMDGPU::FPTRUNC_DOWNWARD_PSEUDO: {
183 // Replacing the pseudo by a real instruction
184 MI.setDesc(TII->get(AMDGPU::V_CVT_F16_F32_e32));
185 return Status(FP_ROUND_MODE_DP(3)(((3) & 0x3) << 2),
186 FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_NEGINF)(((2) & 0x3) << 2));
187 }
188 default:
189 return DefaultStatus;
190 }
191 }
192 return Status();
193}
194
195// Insert a setreg instruction to update the Mode register.
196// It is possible (though unlikely) for an instruction to require a change to
197// the value of disjoint parts of the Mode register when we don't know the
198// value of the intervening bits. In that case we need to use more than one
199// setreg instruction.
200void SIModeRegister::insertSetreg(MachineBasicBlock &MBB, MachineInstr *MI,
201 const SIInstrInfo *TII, Status InstrMode) {
202 while (InstrMode.Mask) {
8
Loop condition is true. Entering loop body
203 unsigned Offset = countTrailingZeros<unsigned>(InstrMode.Mask);
204 unsigned Width = countTrailingOnes<unsigned>(InstrMode.Mask >> Offset);
9
Calling 'countTrailingOnes<unsigned int>'
19
Returning from 'countTrailingOnes<unsigned int>'
20
'Width' initialized to 32
205 unsigned Value = (InstrMode.Mode >> Offset) & ((1 << Width) - 1);
21
The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'int'
206 BuildMI(MBB, MI, nullptr, TII->get(AMDGPU::S_SETREG_IMM32_B32))
207 .addImm(Value)
208 .addImm(((Width - 1) << AMDGPU::Hwreg::WIDTH_M1_SHIFT_) |
209 (Offset << AMDGPU::Hwreg::OFFSET_SHIFT_) |
210 (AMDGPU::Hwreg::ID_MODE << AMDGPU::Hwreg::ID_SHIFT_));
211 ++NumSetregInserted;
212 Changed = true;
213 InstrMode.Mask &= ~(((1 << Width) - 1) << Offset);
214 }
215}
216
217// In Phase 1 we iterate through the instructions of the block and for each
218// instruction we get its mode usage. If the instruction uses the Mode register
219// we:
220// - update the Change status, which tracks the changes to the Mode register
221// made by this block
222// - if this instruction's requirements are compatible with the current setting
223// of the Mode register we merge the modes
224// - if it isn't compatible and an InsertionPoint isn't set, then we set the
225// InsertionPoint to the current instruction, and we remember the current
226// mode
227// - if it isn't compatible and InsertionPoint is set we insert a seteg before
228// that instruction (unless this instruction forms part of the block's
229// entry requirements in which case the insertion is deferred until Phase 3
230// when predecessor exit values are known), and move the insertion point to
231// this instruction
232// - if this is a setreg instruction we treat it as an incompatible instruction.
233// This is sub-optimal but avoids some nasty corner cases, and is expected to
234// occur very rarely.
235// - on exit we have set the Require, Change, and initial Exit modes.
236void SIModeRegister::processBlockPhase1(MachineBasicBlock &MBB,
237 const SIInstrInfo *TII) {
238 auto NewInfo = std::make_unique<BlockData>();
239 MachineInstr *InsertionPoint = nullptr;
240 // RequirePending is used to indicate whether we are collecting the initial
241 // requirements for the block, and need to defer the first InsertionPoint to
242 // Phase 3. It is set to false once we have set FirstInsertionPoint, or when
243 // we discover an explicit setreg that means this block doesn't have any
244 // initial requirements.
245 bool RequirePending = true;
246 Status IPChange;
247 for (MachineInstr &MI : MBB) {
248 Status InstrMode = getInstructionMode(MI, TII);
249 if (MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
250 MI.getOpcode() == AMDGPU::S_SETREG_B32_mode ||
251 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
252 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32_mode) {
253 // We preserve any explicit mode register setreg instruction we encounter,
254 // as we assume it has been inserted by a higher authority (this is
255 // likely to be a very rare occurrence).
256 unsigned Dst = TII->getNamedOperand(MI, AMDGPU::OpName::simm16)->getImm();
257 if (((Dst & AMDGPU::Hwreg::ID_MASK_) >> AMDGPU::Hwreg::ID_SHIFT_) !=
258 AMDGPU::Hwreg::ID_MODE)
259 continue;
260
261 unsigned Width = ((Dst & AMDGPU::Hwreg::WIDTH_M1_MASK_) >>
262 AMDGPU::Hwreg::WIDTH_M1_SHIFT_) +
263 1;
264 unsigned Offset =
265 (Dst & AMDGPU::Hwreg::OFFSET_MASK_) >> AMDGPU::Hwreg::OFFSET_SHIFT_;
266 unsigned Mask = ((1 << Width) - 1) << Offset;
267
268 // If an InsertionPoint is set we will insert a setreg there.
269 if (InsertionPoint) {
270 insertSetreg(MBB, InsertionPoint, TII, IPChange.delta(NewInfo->Change));
271 InsertionPoint = nullptr;
272 }
273 // If this is an immediate then we know the value being set, but if it is
274 // not an immediate then we treat the modified bits of the mode register
275 // as unknown.
276 if (MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
277 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32_mode) {
278 unsigned Val = TII->getNamedOperand(MI, AMDGPU::OpName::imm)->getImm();
279 unsigned Mode = (Val << Offset) & Mask;
280 Status Setreg = Status(Mask, Mode);
281 // If we haven't already set the initial requirements for the block we
282 // don't need to as the requirements start from this explicit setreg.
283 RequirePending = false;
284 NewInfo->Change = NewInfo->Change.merge(Setreg);
285 } else {
286 NewInfo->Change = NewInfo->Change.mergeUnknown(Mask);
287 }
288 } else if (!NewInfo->Change.isCompatible(InstrMode)) {
289 // This instruction uses the Mode register and its requirements aren't
290 // compatible with the current mode.
291 if (InsertionPoint) {
292 // If the required mode change cannot be included in the current
293 // InsertionPoint changes, we need a setreg and start a new
294 // InsertionPoint.
295 if (!IPChange.delta(NewInfo->Change).isCombinable(InstrMode)) {
296 if (RequirePending) {
297 // This is the first insertionPoint in the block so we will defer
298 // the insertion of the setreg to Phase 3 where we know whether or
299 // not it is actually needed.
300 NewInfo->FirstInsertionPoint = InsertionPoint;
301 NewInfo->Require = NewInfo->Change;
302 RequirePending = false;
303 } else {
304 insertSetreg(MBB, InsertionPoint, TII,
305 IPChange.delta(NewInfo->Change));
306 IPChange = NewInfo->Change;
307 }
308 // Set the new InsertionPoint
309 InsertionPoint = &MI;
310 }
311 NewInfo->Change = NewInfo->Change.merge(InstrMode);
312 } else {
313 // No InsertionPoint is currently set - this is either the first in
314 // the block or we have previously seen an explicit setreg.
315 InsertionPoint = &MI;
316 IPChange = NewInfo->Change;
317 NewInfo->Change = NewInfo->Change.merge(InstrMode);
318 }
319 }
320 }
321 if (RequirePending) {
322 // If we haven't yet set the initial requirements for the block we set them
323 // now.
324 NewInfo->FirstInsertionPoint = InsertionPoint;
325 NewInfo->Require = NewInfo->Change;
326 } else if (InsertionPoint) {
327 // We need to insert a setreg at the InsertionPoint
328 insertSetreg(MBB, InsertionPoint, TII, IPChange.delta(NewInfo->Change));
329 }
330 NewInfo->Exit = NewInfo->Change;
331 BlockInfo[MBB.getNumber()] = std::move(NewInfo);
332}
333
334// In Phase 2 we revisit each block and calculate the common Mode register
335// value provided by all predecessor blocks. If the Exit value for the block
336// is changed, then we add the successor blocks to the worklist so that the
337// exit value is propagated.
338void SIModeRegister::processBlockPhase2(MachineBasicBlock &MBB,
339 const SIInstrInfo *TII) {
340 bool RevisitRequired = false;
341 bool ExitSet = false;
342 unsigned ThisBlock = MBB.getNumber();
343 if (MBB.pred_empty()) {
344 // There are no predecessors, so use the default starting status.
345 BlockInfo[ThisBlock]->Pred = DefaultStatus;
346 ExitSet = true;
347 } else {
348 // Build a status that is common to all the predecessors by intersecting
349 // all the predecessor exit status values.
350 // Mask bits (which represent the Mode bits with a known value) can only be
351 // added by explicit SETREG instructions or the initial default value -
352 // the intersection process may remove Mask bits.
353 // If we find a predecessor that has not yet had an exit value determined
354 // (this can happen for example if a block is its own predecessor) we defer
355 // use of that value as the Mask will be all zero, and we will revisit this
356 // block again later (unless the only predecessor without an exit value is
357 // this block).
358 MachineBasicBlock::pred_iterator P = MBB.pred_begin(), E = MBB.pred_end();
359 MachineBasicBlock &PB = *(*P);
360 unsigned PredBlock = PB.getNumber();
361 if ((ThisBlock == PredBlock) && (std::next(P) == E)) {
362 BlockInfo[ThisBlock]->Pred = DefaultStatus;
363 ExitSet = true;
364 } else if (BlockInfo[PredBlock]->ExitSet) {
365 BlockInfo[ThisBlock]->Pred = BlockInfo[PredBlock]->Exit;
366 ExitSet = true;
367 } else if (PredBlock != ThisBlock)
368 RevisitRequired = true;
369
370 for (P = std::next(P); P != E; P = std::next(P)) {
371 MachineBasicBlock *Pred = *P;
372 unsigned PredBlock = Pred->getNumber();
373 if (BlockInfo[PredBlock]->ExitSet) {
374 if (BlockInfo[ThisBlock]->ExitSet) {
375 BlockInfo[ThisBlock]->Pred =
376 BlockInfo[ThisBlock]->Pred.intersect(BlockInfo[PredBlock]->Exit);
377 } else {
378 BlockInfo[ThisBlock]->Pred = BlockInfo[PredBlock]->Exit;
379 }
380 ExitSet = true;
381 } else if (PredBlock != ThisBlock)
382 RevisitRequired = true;
383 }
384 }
385 Status TmpStatus =
386 BlockInfo[ThisBlock]->Pred.merge(BlockInfo[ThisBlock]->Change);
387 if (BlockInfo[ThisBlock]->Exit != TmpStatus) {
388 BlockInfo[ThisBlock]->Exit = TmpStatus;
389 // Add the successors to the work list so we can propagate the changed exit
390 // status.
391 for (MachineBasicBlock *Succ : MBB.successors())
392 Phase2List.push(Succ);
393 }
394 BlockInfo[ThisBlock]->ExitSet = ExitSet;
395 if (RevisitRequired)
396 Phase2List.push(&MBB);
397}
398
399// In Phase 3 we revisit each block and if it has an insertion point defined we
400// check whether the predecessor mode meets the block's entry requirements. If
401// not we insert an appropriate setreg instruction to modify the Mode register.
402void SIModeRegister::processBlockPhase3(MachineBasicBlock &MBB,
403 const SIInstrInfo *TII) {
404 unsigned ThisBlock = MBB.getNumber();
405 if (!BlockInfo[ThisBlock]->Pred.isCompatible(BlockInfo[ThisBlock]->Require)) {
4
Taking true branch
406 Status Delta =
407 BlockInfo[ThisBlock]->Pred.delta(BlockInfo[ThisBlock]->Require);
408 if (BlockInfo[ThisBlock]->FirstInsertionPoint)
5
Assuming field 'FirstInsertionPoint' is null
6
Taking false branch
409 insertSetreg(MBB, BlockInfo[ThisBlock]->FirstInsertionPoint, TII, Delta);
410 else
411 insertSetreg(MBB, &MBB.instr_front(), TII, Delta);
7
Calling 'SIModeRegister::insertSetreg'
412 }
413}
414
415bool SIModeRegister::runOnMachineFunction(MachineFunction &MF) {
416 BlockInfo.resize(MF.getNumBlockIDs());
417 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
418 const SIInstrInfo *TII = ST.getInstrInfo();
419
420 // Processing is performed in a number of phases
421
422 // Phase 1 - determine the initial mode required by each block, and add setreg
423 // instructions for intra block requirements.
424 for (MachineBasicBlock &BB : MF)
425 processBlockPhase1(BB, TII);
426
427 // Phase 2 - determine the exit mode from each block. We add all blocks to the
428 // list here, but will also add any that need to be revisited during Phase 2
429 // processing.
430 for (MachineBasicBlock &BB : MF)
431 Phase2List.push(&BB);
432 while (!Phase2List.empty()) {
1
Assuming the condition is false
2
Loop condition is false. Execution continues on line 439
433 processBlockPhase2(*Phase2List.front(), TII);
434 Phase2List.pop();
435 }
436
437 // Phase 3 - add an initial setreg to each block where the required entry mode
438 // is not satisfied by the exit mode of all its predecessors.
439 for (MachineBasicBlock &BB : MF)
440 processBlockPhase3(BB, TII);
3
Calling 'SIModeRegister::processBlockPhase3'
441
442 BlockInfo.clear();
443
444 return Changed;
445}

/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include/llvm/Support/MathExtras.h

1//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains some functions that are useful for math stuff.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_SUPPORT_MATHEXTRAS_H
14#define LLVM_SUPPORT_MATHEXTRAS_H
15
16#include "llvm/ADT/bit.h"
17#include "llvm/Support/Compiler.h"
18#include <cassert>
19#include <climits>
20#include <cmath>
21#include <cstdint>
22#include <cstring>
23#include <limits>
24#include <type_traits>
25
26#ifdef _MSC_VER
27// Declare these intrinsics manually rather including intrin.h. It's very
28// expensive, and MathExtras.h is popular.
29// #include <intrin.h>
30extern "C" {
31unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
32unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
33unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
34unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
35}
36#endif
37
38namespace llvm {
39
40/// The behavior an operation has on an input of 0.
41enum ZeroBehavior {
42 /// The returned value is undefined.
43 ZB_Undefined,
44 /// The returned value is numeric_limits<T>::max()
45 ZB_Max,
46 /// The returned value is numeric_limits<T>::digits
47 ZB_Width
48};
49
50/// Mathematical constants.
51namespace numbers {
52// TODO: Track C++20 std::numbers.
53// TODO: Favor using the hexadecimal FP constants (requires C++17).
54constexpr double e = 2.7182818284590452354, // (0x1.5bf0a8b145749P+1) https://oeis.org/A001113
55 egamma = .57721566490153286061, // (0x1.2788cfc6fb619P-1) https://oeis.org/A001620
56 ln2 = .69314718055994530942, // (0x1.62e42fefa39efP-1) https://oeis.org/A002162
57 ln10 = 2.3025850929940456840, // (0x1.24bb1bbb55516P+1) https://oeis.org/A002392
58 log2e = 1.4426950408889634074, // (0x1.71547652b82feP+0)
59 log10e = .43429448190325182765, // (0x1.bcb7b1526e50eP-2)
60 pi = 3.1415926535897932385, // (0x1.921fb54442d18P+1) https://oeis.org/A000796
61 inv_pi = .31830988618379067154, // (0x1.45f306bc9c883P-2) https://oeis.org/A049541
62 sqrtpi = 1.7724538509055160273, // (0x1.c5bf891b4ef6bP+0) https://oeis.org/A002161
63 inv_sqrtpi = .56418958354775628695, // (0x1.20dd750429b6dP-1) https://oeis.org/A087197
64 sqrt2 = 1.4142135623730950488, // (0x1.6a09e667f3bcdP+0) https://oeis.org/A00219
65 inv_sqrt2 = .70710678118654752440, // (0x1.6a09e667f3bcdP-1)
66 sqrt3 = 1.7320508075688772935, // (0x1.bb67ae8584caaP+0) https://oeis.org/A002194
67 inv_sqrt3 = .57735026918962576451, // (0x1.279a74590331cP-1)
68 phi = 1.6180339887498948482; // (0x1.9e3779b97f4a8P+0) https://oeis.org/A001622
69constexpr float ef = 2.71828183F, // (0x1.5bf0a8P+1) https://oeis.org/A001113
70 egammaf = .577215665F, // (0x1.2788d0P-1) https://oeis.org/A001620
71 ln2f = .693147181F, // (0x1.62e430P-1) https://oeis.org/A002162
72 ln10f = 2.30258509F, // (0x1.26bb1cP+1) https://oeis.org/A002392
73 log2ef = 1.44269504F, // (0x1.715476P+0)
74 log10ef = .434294482F, // (0x1.bcb7b2P-2)
75 pif = 3.14159265F, // (0x1.921fb6P+1) https://oeis.org/A000796
76 inv_pif = .318309886F, // (0x1.45f306P-2) https://oeis.org/A049541
77 sqrtpif = 1.77245385F, // (0x1.c5bf8aP+0) https://oeis.org/A002161
78 inv_sqrtpif = .564189584F, // (0x1.20dd76P-1) https://oeis.org/A087197
79 sqrt2f = 1.41421356F, // (0x1.6a09e6P+0) https://oeis.org/A002193
80 inv_sqrt2f = .707106781F, // (0x1.6a09e6P-1)
81 sqrt3f = 1.73205081F, // (0x1.bb67aeP+0) https://oeis.org/A002194
82 inv_sqrt3f = .577350269F, // (0x1.279a74P-1)
83 phif = 1.61803399F; // (0x1.9e377aP+0) https://oeis.org/A001622
84} // namespace numbers
85
86namespace detail {
87template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter {
88 static unsigned count(T Val, ZeroBehavior) {
89 if (!Val)
90 return std::numeric_limits<T>::digits;
91 if (Val & 0x1)
92 return 0;
93
94 // Bisection method.
95 unsigned ZeroBits = 0;
96 T Shift = std::numeric_limits<T>::digits >> 1;
97 T Mask = std::numeric_limits<T>::max() >> Shift;
98 while (Shift) {
99 if ((Val & Mask) == 0) {
100 Val >>= Shift;
101 ZeroBits |= Shift;
102 }
103 Shift >>= 1;
104 Mask >>= Shift;
105 }
106 return ZeroBits;
107 }
108};
109
110#if defined(__GNUC__4) || defined(_MSC_VER)
111template <typename T> struct TrailingZerosCounter<T, 4> {
112 static unsigned count(T Val, ZeroBehavior ZB) {
113 if (ZB
11.1
'ZB' is not equal to ZB_Undefined
11.1
'ZB' is not equal to ZB_Undefined
!= ZB_Undefined && Val == 0)
12
Assuming 'Val' is equal to 0
13
Taking true branch
114 return 32;
14
Returning the value 32
115
116#if __has_builtin(__builtin_ctz)1 || defined(__GNUC__4)
117 return __builtin_ctz(Val);
118#elif defined(_MSC_VER)
119 unsigned long Index;
120 _BitScanForward(&Index, Val);
121 return Index;
122#endif
123 }
124};
125
126#if !defined(_MSC_VER) || defined(_M_X64)
127template <typename T> struct TrailingZerosCounter<T, 8> {
128 static unsigned count(T Val, ZeroBehavior ZB) {
129 if (ZB != ZB_Undefined && Val == 0)
130 return 64;
131
132#if __has_builtin(__builtin_ctzll)1 || defined(__GNUC__4)
133 return __builtin_ctzll(Val);
134#elif defined(_MSC_VER)
135 unsigned long Index;
136 _BitScanForward64(&Index, Val);
137 return Index;
138#endif
139 }
140};
141#endif
142#endif
143} // namespace detail
144
145/// Count number of 0's from the least significant bit to the most
146/// stopping at the first 1.
147///
148/// Only unsigned integral types are allowed.
149///
150/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
151/// valid arguments.
152template <typename T>
153unsigned countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
154 static_assert(std::is_unsigned_v<T>,
155 "Only unsigned integral types are allowed.");
156 return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB);
11
Calling 'TrailingZerosCounter::count'
15
Returning from 'TrailingZerosCounter::count'
16
Returning the value 32
157}
158
159namespace detail {
160template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter {
161 static unsigned count(T Val, ZeroBehavior) {
162 if (!Val)
163 return std::numeric_limits<T>::digits;
164
165 // Bisection method.
166 unsigned ZeroBits = 0;
167 for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
168 T Tmp = Val >> Shift;
169 if (Tmp)
170 Val = Tmp;
171 else
172 ZeroBits |= Shift;
173 }
174 return ZeroBits;
175 }
176};
177
178#if defined(__GNUC__4) || defined(_MSC_VER)
179template <typename T> struct LeadingZerosCounter<T, 4> {
180 static unsigned count(T Val, ZeroBehavior ZB) {
181 if (ZB != ZB_Undefined && Val == 0)
182 return 32;
183
184#if __has_builtin(__builtin_clz)1 || defined(__GNUC__4)
185 return __builtin_clz(Val);
186#elif defined(_MSC_VER)
187 unsigned long Index;
188 _BitScanReverse(&Index, Val);
189 return Index ^ 31;
190#endif
191 }
192};
193
194#if !defined(_MSC_VER) || defined(_M_X64)
195template <typename T> struct LeadingZerosCounter<T, 8> {
196 static unsigned count(T Val, ZeroBehavior ZB) {
197 if (ZB != ZB_Undefined && Val == 0)
198 return 64;
199
200#if __has_builtin(__builtin_clzll)1 || defined(__GNUC__4)
201 return __builtin_clzll(Val);
202#elif defined(_MSC_VER)
203 unsigned long Index;
204 _BitScanReverse64(&Index, Val);
205 return Index ^ 63;
206#endif
207 }
208};
209#endif
210#endif
211} // namespace detail
212
213/// Count number of 0's from the most significant bit to the least
214/// stopping at the first 1.
215///
216/// Only unsigned integral types are allowed.
217///
218/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
219/// valid arguments.
220template <typename T>
221unsigned countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
222 static_assert(std::is_unsigned_v<T>,
223 "Only unsigned integral types are allowed.");
224 return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB);
225}
226
227/// Get the index of the first set bit starting from the least
228/// significant bit.
229///
230/// Only unsigned integral types are allowed.
231///
232/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
233/// valid arguments.
234template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
235 if (ZB == ZB_Max && Val == 0)
236 return std::numeric_limits<T>::max();
237
238 return countTrailingZeros(Val, ZB_Undefined);
239}
240
241/// Create a bitmask with the N right-most bits set to 1, and all other
242/// bits set to 0. Only unsigned types are allowed.
243template <typename T> T maskTrailingOnes(unsigned N) {
244 static_assert(std::is_unsigned<T>::value, "Invalid type!");
245 const unsigned Bits = CHAR_BIT8 * sizeof(T);
246 assert(N <= Bits && "Invalid bit index")(static_cast <bool> (N <= Bits && "Invalid bit index"
) ? void (0) : __assert_fail ("N <= Bits && \"Invalid bit index\""
, "llvm/include/llvm/Support/MathExtras.h", 246, __extension__
__PRETTY_FUNCTION__))
;
247 return N == 0 ? 0 : (T(-1) >> (Bits - N));
248}
249
250/// Create a bitmask with the N left-most bits set to 1, and all other
251/// bits set to 0. Only unsigned types are allowed.
252template <typename T> T maskLeadingOnes(unsigned N) {
253 return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
254}
255
256/// Create a bitmask with the N right-most bits set to 0, and all other
257/// bits set to 1. Only unsigned types are allowed.
258template <typename T> T maskTrailingZeros(unsigned N) {
259 return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
260}
261
262/// Create a bitmask with the N left-most bits set to 0, and all other
263/// bits set to 1. Only unsigned types are allowed.
264template <typename T> T maskLeadingZeros(unsigned N) {
265 return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
266}
267
268/// Get the index of the last set bit starting from the least
269/// significant bit.
270///
271/// Only unsigned integral types are allowed.
272///
273/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
274/// valid arguments.
275template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
276 if (ZB == ZB_Max && Val == 0)
277 return std::numeric_limits<T>::max();
278
279 // Use ^ instead of - because both gcc and llvm can remove the associated ^
280 // in the __builtin_clz intrinsic on x86.
281 return countLeadingZeros(Val, ZB_Undefined) ^
282 (std::numeric_limits<T>::digits - 1);
283}
284
285/// Macro compressed bit reversal table for 256 bits.
286///
287/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
288static const unsigned char BitReverseTable256[256] = {
289#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
290#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
291#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
292 R6(0), R6(2), R6(1), R6(3)
293#undef R2
294#undef R4
295#undef R6
296};
297
298/// Reverse the bits in \p Val.
299template <typename T> T reverseBits(T Val) {
300#if __has_builtin(__builtin_bitreverse8)1
301 if constexpr (std::is_same_v<T, uint8_t>)
302 return __builtin_bitreverse8(Val);
303#endif
304#if __has_builtin(__builtin_bitreverse16)1
305 if constexpr (std::is_same_v<T, uint16_t>)
306 return __builtin_bitreverse16(Val);
307#endif
308#if __has_builtin(__builtin_bitreverse32)1
309 if constexpr (std::is_same_v<T, uint32_t>)
310 return __builtin_bitreverse32(Val);
311#endif
312#if __has_builtin(__builtin_bitreverse64)1
313 if constexpr (std::is_same_v<T, uint64_t>)
314 return __builtin_bitreverse64(Val);
315#endif
316
317 unsigned char in[sizeof(Val)];
318 unsigned char out[sizeof(Val)];
319 std::memcpy(in, &Val, sizeof(Val));
320 for (unsigned i = 0; i < sizeof(Val); ++i)
321 out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
322 std::memcpy(&Val, out, sizeof(Val));
323 return Val;
324}
325
326// NOTE: The following support functions use the _32/_64 extensions instead of
327// type overloading so that signed and unsigned integers can be used without
328// ambiguity.
329
330/// Return the high 32 bits of a 64 bit value.
331constexpr inline uint32_t Hi_32(uint64_t Value) {
332 return static_cast<uint32_t>(Value >> 32);
333}
334
335/// Return the low 32 bits of a 64 bit value.
336constexpr inline uint32_t Lo_32(uint64_t Value) {
337 return static_cast<uint32_t>(Value);
338}
339
340/// Make a 64-bit integer from a high / low pair of 32-bit integers.
341constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
342 return ((uint64_t)High << 32) | (uint64_t)Low;
343}
344
345/// Checks if an integer fits into the given bit width.
346template <unsigned N> constexpr inline bool isInt(int64_t x) {
347 if constexpr (N == 8)
348 return static_cast<int8_t>(x) == x;
349 if constexpr (N == 16)
350 return static_cast<int16_t>(x) == x;
351 if constexpr (N == 32)
352 return static_cast<int32_t>(x) == x;
353 if constexpr (N < 64)
354 return -(INT64_C(1)1L << (N - 1)) <= x && x < (INT64_C(1)1L << (N - 1));
355 (void)x; // MSVC v19.25 warns that x is unused.
356 return true;
357}
358
359/// Checks if a signed integer is an N bit number shifted left by S.
360template <unsigned N, unsigned S>
361constexpr inline bool isShiftedInt(int64_t x) {
362 static_assert(
363 N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
364 static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
365 return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
366}
367
368/// Checks if an unsigned integer fits into the given bit width.
369template <unsigned N> constexpr inline bool isUInt(uint64_t x) {
370 static_assert(N > 0, "isUInt<0> doesn't make sense");
371 if constexpr (N == 8)
372 return static_cast<uint8_t>(x) == x;
373 if constexpr (N == 16)
374 return static_cast<uint16_t>(x) == x;
375 if constexpr (N == 32)
376 return static_cast<uint32_t>(x) == x;
377 if constexpr (N < 64)
378 return x < (UINT64_C(1)1UL << (N));
379 (void)x; // MSVC v19.25 warns that x is unused.
380 return true;
381}
382
383/// Checks if a unsigned integer is an N bit number shifted left by S.
384template <unsigned N, unsigned S>
385constexpr inline bool isShiftedUInt(uint64_t x) {
386 static_assert(
387 N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
388 static_assert(N + S <= 64,
389 "isShiftedUInt<N, S> with N + S > 64 is too wide.");
390 // Per the two static_asserts above, S must be strictly less than 64. So
391 // 1 << S is not undefined behavior.
392 return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
393}
394
395/// Gets the maximum value for a N-bit unsigned integer.
396inline uint64_t maxUIntN(uint64_t N) {
397 assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 &&
"integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "llvm/include/llvm/Support/MathExtras.h", 397, __extension__
__PRETTY_FUNCTION__))
;
398
399 // uint64_t(1) << 64 is undefined behavior, so we can't do
400 // (uint64_t(1) << N) - 1
401 // without checking first that N != 64. But this works and doesn't have a
402 // branch.
403 return UINT64_MAX(18446744073709551615UL) >> (64 - N);
404}
405
406/// Gets the minimum value for a N-bit signed integer.
407inline int64_t minIntN(int64_t N) {
408 assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 &&
"integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "llvm/include/llvm/Support/MathExtras.h", 408, __extension__
__PRETTY_FUNCTION__))
;
409
410 return UINT64_C(1)1UL + ~(UINT64_C(1)1UL << (N - 1));
411}
412
413/// Gets the maximum value for a N-bit signed integer.
414inline int64_t maxIntN(int64_t N) {
415 assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 &&
"integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "llvm/include/llvm/Support/MathExtras.h", 415, __extension__
__PRETTY_FUNCTION__))
;
416
417 // This relies on two's complement wraparound when N == 64, so we convert to
418 // int64_t only at the very end to avoid UB.
419 return (UINT64_C(1)1UL << (N - 1)) - 1;
420}
421
422/// Checks if an unsigned integer fits into the given (dynamic) bit width.
423inline bool isUIntN(unsigned N, uint64_t x) {
424 return N >= 64 || x <= maxUIntN(N);
425}
426
427/// Checks if an signed integer fits into the given (dynamic) bit width.
428inline bool isIntN(unsigned N, int64_t x) {
429 return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
430}
431
432/// Return true if the argument is a non-empty sequence of ones starting at the
433/// least significant bit with the remainder zero (32 bit version).
434/// Ex. isMask_32(0x0000FFFFU) == true.
435constexpr inline bool isMask_32(uint32_t Value) {
436 return Value && ((Value + 1) & Value) == 0;
437}
438
439/// Return true if the argument is a non-empty sequence of ones starting at the
440/// least significant bit with the remainder zero (64 bit version).
441constexpr inline bool isMask_64(uint64_t Value) {
442 return Value && ((Value + 1) & Value) == 0;
443}
444
445/// Return true if the argument contains a non-empty sequence of ones with the
446/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
447constexpr inline bool isShiftedMask_32(uint32_t Value) {
448 return Value && isMask_32((Value - 1) | Value);
449}
450
451/// Return true if the argument contains a non-empty sequence of ones with the
452/// remainder zero (64 bit version.)
453constexpr inline bool isShiftedMask_64(uint64_t Value) {
454 return Value && isMask_64((Value - 1) | Value);
455}
456
457/// Return true if the argument is a power of two > 0.
458/// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
459constexpr inline bool isPowerOf2_32(uint32_t Value) {
460 return llvm::has_single_bit(Value);
461}
462
463/// Return true if the argument is a power of two > 0 (64 bit edition.)
464constexpr inline bool isPowerOf2_64(uint64_t Value) {
465 return llvm::has_single_bit(Value);
466}
467
468/// Count the number of ones from the most significant bit to the first
469/// zero bit.
470///
471/// Ex. countLeadingOnes(0xFF0FFF00) == 8.
472/// Only unsigned integral types are allowed.
473///
474/// \param ZB the behavior on an input of all ones. Only ZB_Width and
475/// ZB_Undefined are valid arguments.
476template <typename T>
477unsigned countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
478 static_assert(std::is_unsigned_v<T>,
479 "Only unsigned integral types are allowed.");
480 return countLeadingZeros<T>(~Value, ZB);
481}
482
483/// Count the number of ones from the least significant bit to the first
484/// zero bit.
485///
486/// Ex. countTrailingOnes(0x00FF00FF) == 8.
487/// Only unsigned integral types are allowed.
488///
489/// \param ZB the behavior on an input of all ones. Only ZB_Width and
490/// ZB_Undefined are valid arguments.
491template <typename T>
492unsigned countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
493 static_assert(std::is_unsigned_v<T>,
494 "Only unsigned integral types are allowed.");
495 return countTrailingZeros<T>(~Value, ZB);
10
Calling 'countTrailingZeros<unsigned int>'
17
Returning from 'countTrailingZeros<unsigned int>'
18
Returning the value 32
496}
497
498/// Count the number of set bits in a value.
499/// Ex. countPopulation(0xF000F000) = 8
500/// Returns 0 if the word is zero.
501template <typename T>
502inline unsigned countPopulation(T Value) {
503 static_assert(std::is_unsigned_v<T>,
504 "Only unsigned integral types are allowed.");
505 return (unsigned)llvm::popcount(Value);
506}
507
508/// Return true if the argument contains a non-empty sequence of ones with the
509/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
510/// If true, \p MaskIdx will specify the index of the lowest set bit and \p
511/// MaskLen is updated to specify the length of the mask, else neither are
512/// updated.
513inline bool isShiftedMask_32(uint32_t Value, unsigned &MaskIdx,
514 unsigned &MaskLen) {
515 if (!isShiftedMask_32(Value))
516 return false;
517 MaskIdx = countTrailingZeros(Value);
518 MaskLen = countPopulation(Value);
519 return true;
520}
521
522/// Return true if the argument contains a non-empty sequence of ones with the
523/// remainder zero (64 bit version.) If true, \p MaskIdx will specify the index
524/// of the lowest set bit and \p MaskLen is updated to specify the length of the
525/// mask, else neither are updated.
526inline bool isShiftedMask_64(uint64_t Value, unsigned &MaskIdx,
527 unsigned &MaskLen) {
528 if (!isShiftedMask_64(Value))
529 return false;
530 MaskIdx = countTrailingZeros(Value);
531 MaskLen = countPopulation(Value);
532 return true;
533}
534
535/// Compile time Log2.
536/// Valid only for positive powers of two.
537template <size_t kValue> constexpr inline size_t CTLog2() {
538 static_assert(kValue > 0 && llvm::isPowerOf2_64(kValue),
539 "Value is not a valid power of 2");
540 return 1 + CTLog2<kValue / 2>();
541}
542
543template <> constexpr inline size_t CTLog2<1>() { return 0; }
544
545/// Return the floor log base 2 of the specified value, -1 if the value is zero.
546/// (32 bit edition.)
547/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
548inline unsigned Log2_32(uint32_t Value) {
549 return 31 - countLeadingZeros(Value);
550}
551
552/// Return the floor log base 2 of the specified value, -1 if the value is zero.
553/// (64 bit edition.)
554inline unsigned Log2_64(uint64_t Value) {
555 return 63 - countLeadingZeros(Value);
556}
557
558/// Return the ceil log base 2 of the specified value, 32 if the value is zero.
559/// (32 bit edition).
560/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
561inline unsigned Log2_32_Ceil(uint32_t Value) {
562 return 32 - countLeadingZeros(Value - 1);
563}
564
565/// Return the ceil log base 2 of the specified value, 64 if the value is zero.
566/// (64 bit edition.)
567inline unsigned Log2_64_Ceil(uint64_t Value) {
568 return 64 - countLeadingZeros(Value - 1);
569}
570
571/// This function takes a 64-bit integer and returns the bit equivalent double.
572inline double BitsToDouble(uint64_t Bits) {
573 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
574 return llvm::bit_cast<double>(Bits);
575}
576
577/// This function takes a 32-bit integer and returns the bit equivalent float.
578inline float BitsToFloat(uint32_t Bits) {
579 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
580 return llvm::bit_cast<float>(Bits);
581}
582
583/// This function takes a double and returns the bit equivalent 64-bit integer.
584/// Note that copying doubles around changes the bits of NaNs on some hosts,
585/// notably x86, so this routine cannot be used if these bits are needed.
586inline uint64_t DoubleToBits(double Double) {
587 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
588 return llvm::bit_cast<uint64_t>(Double);
589}
590
591/// This function takes a float and returns the bit equivalent 32-bit integer.
592/// Note that copying floats around changes the bits of NaNs on some hosts,
593/// notably x86, so this routine cannot be used if these bits are needed.
594inline uint32_t FloatToBits(float Float) {
595 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
596 return llvm::bit_cast<uint32_t>(Float);
597}
598
599/// A and B are either alignments or offsets. Return the minimum alignment that
600/// may be assumed after adding the two together.
601constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
602 // The largest power of 2 that divides both A and B.
603 //
604 // Replace "-Value" by "1+~Value" in the following commented code to avoid
605 // MSVC warning C4146
606 // return (A | B) & -(A | B);
607 return (A | B) & (1 + ~(A | B));
608}
609
610/// Returns the next power of two (in 64-bits) that is strictly greater than A.
611/// Returns zero on overflow.
612constexpr inline uint64_t NextPowerOf2(uint64_t A) {
613 A |= (A >> 1);
614 A |= (A >> 2);
615 A |= (A >> 4);
616 A |= (A >> 8);
617 A |= (A >> 16);
618 A |= (A >> 32);
619 return A + 1;
620}
621
622/// Returns the power of two which is less than or equal to the given value.
623/// Essentially, it is a floor operation across the domain of powers of two.
624inline uint64_t PowerOf2Floor(uint64_t A) {
625 if (!A) return 0;
626 return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
627}
628
629/// Returns the power of two which is greater than or equal to the given value.
630/// Essentially, it is a ceil operation across the domain of powers of two.
631inline uint64_t PowerOf2Ceil(uint64_t A) {
632 if (!A)
633 return 0;
634 return NextPowerOf2(A - 1);
635}
636
637/// Returns the next integer (mod 2**64) that is greater than or equal to
638/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
639///
640/// Examples:
641/// \code
642/// alignTo(5, 8) = 8
643/// alignTo(17, 8) = 24
644/// alignTo(~0LL, 8) = 0
645/// alignTo(321, 255) = 510
646/// \endcode
647inline uint64_t alignTo(uint64_t Value, uint64_t Align) {
648 assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0."
) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 648, __extension__
__PRETTY_FUNCTION__))
;
649 return (Value + Align - 1) / Align * Align;
650}
651
652inline uint64_t alignToPowerOf2(uint64_t Value, uint64_t Align) {
653 assert(Align != 0 && (Align & (Align - 1)) == 0 &&(static_cast <bool> (Align != 0 && (Align &
(Align - 1)) == 0 && "Align must be a power of 2") ?
void (0) : __assert_fail ("Align != 0 && (Align & (Align - 1)) == 0 && \"Align must be a power of 2\""
, "llvm/include/llvm/Support/MathExtras.h", 654, __extension__
__PRETTY_FUNCTION__))
654 "Align must be a power of 2")(static_cast <bool> (Align != 0 && (Align &
(Align - 1)) == 0 && "Align must be a power of 2") ?
void (0) : __assert_fail ("Align != 0 && (Align & (Align - 1)) == 0 && \"Align must be a power of 2\""
, "llvm/include/llvm/Support/MathExtras.h", 654, __extension__
__PRETTY_FUNCTION__))
;
655 return (Value + Align - 1) & -Align;
656}
657
658/// If non-zero \p Skew is specified, the return value will be a minimal integer
659/// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for
660/// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p
661/// Skew mod \p A'. \p Align must be non-zero.
662///
663/// Examples:
664/// \code
665/// alignTo(5, 8, 7) = 7
666/// alignTo(17, 8, 1) = 17
667/// alignTo(~0LL, 8, 3) = 3
668/// alignTo(321, 255, 42) = 552
669/// \endcode
670inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew) {
671 assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0."
) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 671, __extension__
__PRETTY_FUNCTION__))
;
672 Skew %= Align;
673 return alignTo(Value - Skew, Align) + Skew;
674}
675
676/// Returns the next integer (mod 2**64) that is greater than or equal to
677/// \p Value and is a multiple of \c Align. \c Align must be non-zero.
678template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) {
679 static_assert(Align != 0u, "Align must be non-zero");
680 return (Value + Align - 1) / Align * Align;
681}
682
683/// Returns the integer ceil(Numerator / Denominator).
684inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) {
685 return alignTo(Numerator, Denominator) / Denominator;
686}
687
688/// Returns the integer nearest(Numerator / Denominator).
689inline uint64_t divideNearest(uint64_t Numerator, uint64_t Denominator) {
690 return (Numerator + (Denominator / 2)) / Denominator;
691}
692
693/// Returns the largest uint64_t less than or equal to \p Value and is
694/// \p Skew mod \p Align. \p Align must be non-zero
695inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
696 assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0."
) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 696, __extension__
__PRETTY_FUNCTION__))
;
697 Skew %= Align;
698 return (Value - Skew) / Align * Align + Skew;
699}
700
701/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
702/// Requires 0 < B <= 32.
703template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) {
704 static_assert(B > 0, "Bit width can't be 0.");
705 static_assert(B <= 32, "Bit width out of range.");
706 return int32_t(X << (32 - B)) >> (32 - B);
707}
708
709/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
710/// Requires 0 < B <= 32.
711inline int32_t SignExtend32(uint32_t X, unsigned B) {
712 assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0."
) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 712, __extension__
__PRETTY_FUNCTION__))
;
713 assert(B <= 32 && "Bit width out of range.")(static_cast <bool> (B <= 32 && "Bit width out of range."
) ? void (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\""
, "llvm/include/llvm/Support/MathExtras.h", 713, __extension__
__PRETTY_FUNCTION__))
;
714 return int32_t(X << (32 - B)) >> (32 - B);
715}
716
717/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
718/// Requires 0 < B <= 64.
719template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) {
720 static_assert(B > 0, "Bit width can't be 0.");
721 static_assert(B <= 64, "Bit width out of range.");
722 return int64_t(x << (64 - B)) >> (64 - B);
723}
724
725/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
726/// Requires 0 < B <= 64.
727inline int64_t SignExtend64(uint64_t X, unsigned B) {
728 assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0."
) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "llvm/include/llvm/Support/MathExtras.h", 728, __extension__
__PRETTY_FUNCTION__))
;
729 assert(B <= 64 && "Bit width out of range.")(static_cast <bool> (B <= 64 && "Bit width out of range."
) ? void (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\""
, "llvm/include/llvm/Support/MathExtras.h", 729, __extension__
__PRETTY_FUNCTION__))
;
730 return int64_t(X << (64 - B)) >> (64 - B);
731}
732
733/// Subtract two unsigned integers, X and Y, of type T and return the absolute
734/// value of the result.
735template <typename T>
736std::enable_if_t<std::is_unsigned<T>::value, T> AbsoluteDifference(T X, T Y) {
737 return X > Y ? (X - Y) : (Y - X);
738}
739
740/// Add two unsigned integers, X and Y, of type T. Clamp the result to the
741/// maximum representable value of T on overflow. ResultOverflowed indicates if
742/// the result is larger than the maximum representable value of type T.
743template <typename T>
744std::enable_if_t<std::is_unsigned<T>::value, T>
745SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) {
746 bool Dummy;
747 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
748 // Hacker's Delight, p. 29
749 T Z = X + Y;
750 Overflowed = (Z < X || Z < Y);
751 if (Overflowed)
752 return std::numeric_limits<T>::max();
753 else
754 return Z;
755}
756
757/// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the
758/// maximum representable value of T on overflow. ResultOverflowed indicates if
759/// the result is larger than the maximum representable value of type T.
760template <typename T>
761std::enable_if_t<std::is_unsigned<T>::value, T>
762SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) {
763 bool Dummy;
764 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
765
766 // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
767 // because it fails for uint16_t (where multiplication can have undefined
768 // behavior due to promotion to int), and requires a division in addition
769 // to the multiplication.
770
771 Overflowed = false;
772
773 // Log2(Z) would be either Log2Z or Log2Z + 1.
774 // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
775 // will necessarily be less than Log2Max as desired.
776 int Log2Z = Log2_64(X) + Log2_64(Y);
777 const T Max = std::numeric_limits<T>::max();
778 int Log2Max = Log2_64(Max);
779 if (Log2Z < Log2Max) {
780 return X * Y;
781 }
782 if (Log2Z > Log2Max) {
783 Overflowed = true;
784 return Max;
785 }
786
787 // We're going to use the top bit, and maybe overflow one
788 // bit past it. Multiply all but the bottom bit then add
789 // that on at the end.
790 T Z = (X >> 1) * Y;
791 if (Z & ~(Max >> 1)) {
792 Overflowed = true;
793 return Max;
794 }
795 Z <<= 1;
796 if (X & 1)
797 return SaturatingAdd(Z, Y, ResultOverflowed);
798
799 return Z;
800}
801
802/// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
803/// the product. Clamp the result to the maximum representable value of T on
804/// overflow. ResultOverflowed indicates if the result is larger than the
805/// maximum representable value of type T.
806template <typename T>
807std::enable_if_t<std::is_unsigned<T>::value, T>
808SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) {
809 bool Dummy;
810 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
811
812 T Product = SaturatingMultiply(X, Y, &Overflowed);
813 if (Overflowed)
814 return Product;
815
816 return SaturatingAdd(A, Product, &Overflowed);
817}
818
819/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
820extern const float huge_valf;
821
822
823/// Add two signed integers, computing the two's complement truncated result,
824/// returning true if overflow occurred.
825template <typename T>
826std::enable_if_t<std::is_signed<T>::value, T> AddOverflow(T X, T Y, T &Result) {
827#if __has_builtin(__builtin_add_overflow)1
828 return __builtin_add_overflow(X, Y, &Result);
829#else
830 // Perform the unsigned addition.
831 using U = std::make_unsigned_t<T>;
832 const U UX = static_cast<U>(X);
833 const U UY = static_cast<U>(Y);
834 const U UResult = UX + UY;
835
836 // Convert to signed.
837 Result = static_cast<T>(UResult);
838
839 // Adding two positive numbers should result in a positive number.
840 if (X > 0 && Y > 0)
841 return Result <= 0;
842 // Adding two negatives should result in a negative number.
843 if (X < 0 && Y < 0)
844 return Result >= 0;
845 return false;
846#endif
847}
848
849/// Subtract two signed integers, computing the two's complement truncated
850/// result, returning true if an overflow ocurred.
851template <typename T>
852std::enable_if_t<std::is_signed<T>::value, T> SubOverflow(T X, T Y, T &Result) {
853#if __has_builtin(__builtin_sub_overflow)1
854 return __builtin_sub_overflow(X, Y, &Result);
855#else
856 // Perform the unsigned addition.
857 using U = std::make_unsigned_t<T>;
858 const U UX = static_cast<U>(X);
859 const U UY = static_cast<U>(Y);
860 const U UResult = UX - UY;
861
862 // Convert to signed.
863 Result = static_cast<T>(UResult);
864
865 // Subtracting a positive number from a negative results in a negative number.
866 if (X <= 0 && Y > 0)
867 return Result >= 0;
868 // Subtracting a negative number from a positive results in a positive number.
869 if (X >= 0 && Y < 0)
870 return Result <= 0;
871 return false;
872#endif
873}
874
875/// Multiply two signed integers, computing the two's complement truncated
876/// result, returning true if an overflow ocurred.
877template <typename T>
878std::enable_if_t<std::is_signed<T>::value, T> MulOverflow(T X, T Y, T &Result) {
879 // Perform the unsigned multiplication on absolute values.
880 using U = std::make_unsigned_t<T>;
881 const U UX = X < 0 ? (0 - static_cast<U>(X)) : static_cast<U>(X);
882 const U UY = Y < 0 ? (0 - static_cast<U>(Y)) : static_cast<U>(Y);
883 const U UResult = UX * UY;
884
885 // Convert to signed.
886 const bool IsNegative = (X < 0) ^ (Y < 0);
887 Result = IsNegative ? (0 - UResult) : UResult;
888
889 // If any of the args was 0, result is 0 and no overflow occurs.
890 if (UX == 0 || UY == 0)
891 return false;
892
893 // UX and UY are in [1, 2^n], where n is the number of digits.
894 // Check how the max allowed absolute value (2^n for negative, 2^(n-1) for
895 // positive) divided by an argument compares to the other.
896 if (IsNegative)
897 return UX > (static_cast<U>(std::numeric_limits<T>::max()) + U(1)) / UY;
898 else
899 return UX > (static_cast<U>(std::numeric_limits<T>::max())) / UY;
900}
901
902} // End llvm namespace
903
904#endif