Bug Summary

File:bolt/runtime/common.h
Warning:line 209, column 10
Branch condition evaluates to a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name hugify.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -target-cpu x86-64 -target-feature -x87 -target-feature -mmx -target-feature -sse -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins/tools/bolt/bolt_rt-bins -fcoverage-compilation-dir=/build/source/build-llvm/tools/clang/stage2-bins/tools/bolt/bolt_rt-bins -resource-dir /usr/lib/llvm-19/lib/clang/19 -I . -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-19/lib/clang/19/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -source-date-epoch 1713179664 -O3 -std=c++17 -fdeprecated-macro -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2024-04-15-133351-72004-1 -x c++ /build/source/bolt/runtime/hugify.cpp

/build/source/bolt/runtime/hugify.cpp

1//===- bolt/runtime/hugify.cpp -------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===---------------------------------------------------------------------===//
8
9#if defined (__x86_64__1) && !defined(__APPLE__)
10
11#include "common.h"
12
13#pragma GCC visibility push(hidden)
14
15// Enables a very verbose logging to stderr useful when debugging
16// #define ENABLE_DEBUG
17
18#ifdef ENABLE_DEBUG
19#define DEBUG(X){} \
20 { X; }
21#else
22#define DEBUG(X){} \
23 {}
24#endif
25
26// Function constains trampoline to _start,
27// so we can resume regular execution of the function that we hooked.
28extern void __bolt_hugify_start_program();
29
30// The __hot_start and __hot_end symbols set by Bolt. We use them to figure
31// out the rage for marking huge pages.
32extern uint64_t __hot_start;
33extern uint64_t __hot_end;
34
35static void getKernelVersion(uint32_t *Val) {
36 // release should be in the format: %d.%d.%d
37 // major, minor, release
38 struct UtsNameTy UtsName;
39 int Ret = __uname(&UtsName);
1
Calling '__uname'
2
Returning from '__uname'
40 const char *Buf = UtsName.release;
41 const char *End = Buf + strLen(Buf);
3
Calling 'strLen'
42 const char Delims[2][2] = {".", "."};
43
44 for (int i = 0; i < 3; ++i) {
45 if (!scanUInt32(Buf, End, Val[i])) {
46 return;
47 }
48 if (i < sizeof(Delims) / sizeof(Delims[0])) {
49 const char *Ptr = Delims[i];
50 while (*Ptr != '\0') {
51 if (*Ptr != *Buf) {
52 return;
53 }
54 ++Ptr;
55 ++Buf;
56 }
57 }
58 }
59}
60
61/// Check whether the kernel supports THP via corresponding sysfs entry.
62/// thp works only starting from 5.10
63static bool hasPagecacheTHPSupport() {
64 char Buf[64];
65
66 int FD = __open("/sys/kernel/mm/transparent_hugepage/enabled",
67 0 /* O_RDONLY */, 0);
68 if (FD < 0)
69 return false;
70
71 memset(Buf, 0, sizeof(Buf));
72 const size_t Res = __read(FD, Buf, sizeof(Buf));
73 if (Res < 0)
74 return false;
75
76 if (!strStr(Buf, "[always]") && !strStr(Buf, "[madvise]"))
77 return false;
78
79 struct KernelVersionTy {
80 uint32_t major;
81 uint32_t minor;
82 uint32_t release;
83 };
84
85 KernelVersionTy KernelVersion;
86
87 getKernelVersion((uint32_t *)&KernelVersion);
88 if (KernelVersion.major >= 5 && KernelVersion.minor >= 10)
89 return true;
90
91 return false;
92}
93
94static void hugifyForOldKernel(uint8_t *From, uint8_t *To) {
95 const size_t Size = To - From;
96
97 uint8_t *Mem = reinterpret_cast<uint8_t *>(
98 __mmap(0, Size, 0x3 /* PROT_READ | PROT_WRITE */,
99 0x22 /* MAP_PRIVATE | MAP_ANONYMOUS */, -1, 0));
100
101 if (Mem == ((void *)-1) /* MAP_FAILED */) {
102 char Msg[] = "[hugify] could not allocate memory for text move\n";
103 reportError(Msg, sizeof(Msg));
104 }
105
106 DEBUG(reportNumber("[hugify] allocated temporary address: ", (uint64_t)Mem,{}
107 16);){}
108 DEBUG(reportNumber("[hugify] allocated size: ", (uint64_t)Size, 16);){}
109
110 // Copy the hot code to a temporary location.
111 memcpy(Mem, From, Size);
112
113 __prctl(41 /* PR_SET_THP_DISABLE */, 0, 0, 0, 0);
114 // Maps out the existing hot code.
115 if (__mmap(reinterpret_cast<uint64_t>(From), Size,
116 0x3 /* PROT_READ | PROT_WRITE */,
117 0x32 /* MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE */, -1,
118 0) == ((void *)-1) /*MAP_FAILED*/) {
119 char Msg[] =
120 "[hugify] failed to mmap memory for large page move terminating\n";
121 reportError(Msg, sizeof(Msg));
122 }
123
124 // Mark the hot code page to be huge page.
125 if (__madvise(From, Size, 14 /* MADV_HUGEPAGE */) == -1) {
126 char Msg[] = "[hugify] setting MADV_HUGEPAGE is failed\n";
127 reportError(Msg, sizeof(Msg));
128 }
129
130 // Copy the hot code back.
131 memcpy(From, Mem, Size);
132
133 // Change permission back to read-only, ignore failure
134 __mprotect(From, Size, 0x5 /* PROT_READ | PROT_EXEC */);
135
136 __munmap(Mem, Size);
137}
138
139extern "C" void __bolt_hugify_self_impl() {
140 uint8_t *HotStart = (uint8_t *)&__hot_start;
141 uint8_t *HotEnd = (uint8_t *)&__hot_end;
142 // Make sure the start and end are aligned with huge page address
143 const size_t HugePageBytes = 2L * 1024 * 1024;
144 uint8_t *From = HotStart - ((intptr_t)HotStart & (HugePageBytes - 1));
145 uint8_t *To = HotEnd + (HugePageBytes - 1);
146 To -= (intptr_t)To & (HugePageBytes - 1);
147
148 DEBUG(reportNumber("[hugify] hot start: ", (uint64_t)HotStart, 16);){}
149 DEBUG(reportNumber("[hugify] hot end: ", (uint64_t)HotEnd, 16);){}
150 DEBUG(reportNumber("[hugify] aligned huge page from: ", (uint64_t)From, 16);){}
151 DEBUG(reportNumber("[hugify] aligned huge page to: ", (uint64_t)To, 16);){}
152
153 if (!hasPagecacheTHPSupport()) {
154 DEBUG(report({}
155 "[hugify] workaround with memory alignment for kernel < 5.10\n");){}
156 hugifyForOldKernel(From, To);
157 return;
158 }
159
160 if (__madvise(From, (To - From), 14 /* MADV_HUGEPAGE */) == -1) {
161 char Msg[] = "[hugify] failed to allocate large page\n";
162 // TODO: allow user to control the failure behavior.
163 reportError(Msg, sizeof(Msg));
164 }
165}
166
167/// This is hooking ELF's entry, it needs to save all machine state.
168extern "C" __attribute((naked)) void __bolt_hugify_self() {
169#if defined(__x86_64__1)
170 __asm__ __volatile__(SAVE_ALL"push %%rax\n" "push %%rbx\n" "push %%rcx\n" "push %%rdx\n" "push %%rdi\n"
"push %%rsi\n" "push %%rbp\n" "push %%r8\n" "push %%r9\n" "push %%r10\n"
"push %%r11\n" "push %%r12\n" "push %%r13\n" "push %%r14\n" "push %%r15\n"
"sub $8, %%rsp\n"
"call __bolt_hugify_self_impl\n" RESTORE_ALL"add $8, %%rsp\n" "pop %%r15\n" "pop %%r14\n" "pop %%r13\n" "pop %%r12\n"
"pop %%r11\n" "pop %%r10\n" "pop %%r9\n" "pop %%r8\n" "pop %%rbp\n"
"pop %%rsi\n" "pop %%rdi\n" "pop %%rdx\n" "pop %%rcx\n" "pop %%rbx\n"
"pop %%rax\n"
171 "jmp __bolt_hugify_start_program\n" ::
172 :);
173#else
174 exit(1);
175#endif
176}
177#endif

/build/source/bolt/runtime/sys_x86_64.h

1#ifndef LLVM_TOOLS_LLVM_BOLT_SYS_X86_64
2#define LLVM_TOOLS_LLVM_BOLT_SYS_X86_64
3
4// Save all registers while keeping 16B stack alignment
5#define SAVE_ALL"push %%rax\n" "push %%rbx\n" "push %%rcx\n" "push %%rdx\n" "push %%rdi\n"
"push %%rsi\n" "push %%rbp\n" "push %%r8\n" "push %%r9\n" "push %%r10\n"
"push %%r11\n" "push %%r12\n" "push %%r13\n" "push %%r14\n" "push %%r15\n"
"sub $8, %%rsp\n"
\
6 "push %%rax\n" \
7 "push %%rbx\n" \
8 "push %%rcx\n" \
9 "push %%rdx\n" \
10 "push %%rdi\n" \
11 "push %%rsi\n" \
12 "push %%rbp\n" \
13 "push %%r8\n" \
14 "push %%r9\n" \
15 "push %%r10\n" \
16 "push %%r11\n" \
17 "push %%r12\n" \
18 "push %%r13\n" \
19 "push %%r14\n" \
20 "push %%r15\n" \
21 "sub $8, %%rsp\n"
22// Mirrors SAVE_ALL
23#define RESTORE_ALL"add $8, %%rsp\n" "pop %%r15\n" "pop %%r14\n" "pop %%r13\n" "pop %%r12\n"
"pop %%r11\n" "pop %%r10\n" "pop %%r9\n" "pop %%r8\n" "pop %%rbp\n"
"pop %%rsi\n" "pop %%rdi\n" "pop %%rdx\n" "pop %%rcx\n" "pop %%rbx\n"
"pop %%rax\n"
\
24 "add $8, %%rsp\n" \
25 "pop %%r15\n" \
26 "pop %%r14\n" \
27 "pop %%r13\n" \
28 "pop %%r12\n" \
29 "pop %%r11\n" \
30 "pop %%r10\n" \
31 "pop %%r9\n" \
32 "pop %%r8\n" \
33 "pop %%rbp\n" \
34 "pop %%rsi\n" \
35 "pop %%rdi\n" \
36 "pop %%rdx\n" \
37 "pop %%rcx\n" \
38 "pop %%rbx\n" \
39 "pop %%rax\n"
40
41namespace {
42
43// Get the difference between runtime addrress of .text section and
44// static address in section header table. Can be extracted from arbitrary
45// pc value recorded at runtime to get the corresponding static address, which
46// in turn can be used to search for indirect call description. Needed because
47// indirect call descriptions are read-only non-relocatable data.
48uint64_t getTextBaseAddress() {
49 uint64_t DynAddr;
50 uint64_t StaticAddr;
51 __asm__ volatile("leaq __hot_end(%%rip), %0\n\t"
52 "movabsq $__hot_end, %1\n\t"
53 : "=r"(DynAddr), "=r"(StaticAddr));
54 return DynAddr - StaticAddr;
55}
56
57#define _STRINGIFY(x)"x" #x
58#define STRINGIFY(x)"x" _STRINGIFY(x)"x"
59
60uint64_t __read(uint64_t fd, const void *buf, uint64_t count) {
61 uint64_t ret;
62#if defined(__APPLE__)
63#define READ_SYSCALL0 0x2000003
64#else
65#define READ_SYSCALL0 0
66#endif
67 __asm__ __volatile__("movq $" STRINGIFY(READ_SYSCALL)"0" ", %%rax\n"
68 "syscall\n"
69 : "=a"(ret)
70 : "D"(fd), "S"(buf), "d"(count)
71 : "cc", "rcx", "r11", "memory");
72 return ret;
73}
74
75uint64_t __write(uint64_t fd, const void *buf, uint64_t count) {
76 uint64_t ret;
77#if defined(__APPLE__)
78#define WRITE_SYSCALL1 0x2000004
79#else
80#define WRITE_SYSCALL1 1
81#endif
82 __asm__ __volatile__("movq $" STRINGIFY(WRITE_SYSCALL)"1" ", %%rax\n"
83 "syscall\n"
84 : "=a"(ret)
85 : "D"(fd), "S"(buf), "d"(count)
86 : "cc", "rcx", "r11", "memory");
87 return ret;
88}
89
90void *__mmap(uint64_t addr, uint64_t size, uint64_t prot, uint64_t flags,
91 uint64_t fd, uint64_t offset) {
92#if defined(__APPLE__)
93#define MMAP_SYSCALL9 0x20000c5
94#else
95#define MMAP_SYSCALL9 9
96#endif
97 void *ret;
98 register uint64_t r8 asm("r8") = fd;
99 register uint64_t r9 asm("r9") = offset;
100 register uint64_t r10 asm("r10") = flags;
101 __asm__ __volatile__("movq $" STRINGIFY(MMAP_SYSCALL)"9" ", %%rax\n"
102 "syscall\n"
103 : "=a"(ret)
104 : "D"(addr), "S"(size), "d"(prot), "r"(r10), "r"(r8),
105 "r"(r9)
106 : "cc", "rcx", "r11", "memory");
107 return ret;
108}
109
110uint64_t __munmap(void *addr, uint64_t size) {
111#if defined(__APPLE__)
112#define MUNMAP_SYSCALL11 0x2000049
113#else
114#define MUNMAP_SYSCALL11 11
115#endif
116 uint64_t ret;
117 __asm__ __volatile__("movq $" STRINGIFY(MUNMAP_SYSCALL)"11" ", %%rax\n"
118 "syscall\n"
119 : "=a"(ret)
120 : "D"(addr), "S"(size)
121 : "cc", "rcx", "r11", "memory");
122 return ret;
123}
124
125uint64_t __sigprocmask(int how, const void *set, void *oldset) {
126#if defined(__APPLE__)
127#define SIGPROCMASK_SYSCALL14 0x2000030
128#else
129#define SIGPROCMASK_SYSCALL14 14
130#endif
131 uint64_t ret;
132 register long r10 asm("r10") = sizeof(uint64_t);
133 __asm__ __volatile__("movq $" STRINGIFY(SIGPROCMASK_SYSCALL)"14" ", %%rax\n"
134 "syscall\n"
135 : "=a"(ret)
136 : "D"(how), "S"(set), "d"(oldset), "r"(r10)
137 : "cc", "rcx", "r11", "memory");
138 return ret;
139}
140
141uint64_t __getpid() {
142 uint64_t ret;
143#if defined(__APPLE__)
144#define GETPID_SYSCALL39 20
145#else
146#define GETPID_SYSCALL39 39
147#endif
148 __asm__ __volatile__("movq $" STRINGIFY(GETPID_SYSCALL)"39" ", %%rax\n"
149 "syscall\n"
150 : "=a"(ret)
151 :
152 : "cc", "rcx", "r11", "memory");
153 return ret;
154}
155
156uint64_t __exit(uint64_t code) {
157#if defined(__APPLE__)
158#define EXIT_SYSCALL231 0x2000001
159#else
160#define EXIT_SYSCALL231 231
161#endif
162 uint64_t ret;
163 __asm__ __volatile__("movq $" STRINGIFY(EXIT_SYSCALL)"231" ", %%rax\n"
164 "syscall\n"
165 : "=a"(ret)
166 : "D"(code)
167 : "cc", "rcx", "r11", "memory");
168 return ret;
169}
170
171#if !defined(__APPLE__)
172// We use a stack-allocated buffer for string manipulation in many pieces of
173// this code, including the code that prints each line of the fdata file. This
174// buffer needs to accomodate large function names, but shouldn't be arbitrarily
175// large (dynamically allocated) for simplicity of our memory space usage.
176
177// Declare some syscall wrappers we use throughout this code to avoid linking
178// against system libc.
179uint64_t __open(const char *pathname, uint64_t flags, uint64_t mode) {
180 uint64_t ret;
181 __asm__ __volatile__("movq $2, %%rax\n"
182 "syscall"
183 : "=a"(ret)
184 : "D"(pathname), "S"(flags), "d"(mode)
185 : "cc", "rcx", "r11", "memory");
186 return ret;
187}
188
189long __getdents64(unsigned int fd, dirent64 *dirp, size_t count) {
190 long ret;
191 __asm__ __volatile__("movq $217, %%rax\n"
192 "syscall"
193 : "=a"(ret)
194 : "D"(fd), "S"(dirp), "d"(count)
195 : "cc", "rcx", "r11", "memory");
196 return ret;
197}
198
199uint64_t __readlink(const char *pathname, char *buf, size_t bufsize) {
200 uint64_t ret;
201 __asm__ __volatile__("movq $89, %%rax\n"
202 "syscall"
203 : "=a"(ret)
204 : "D"(pathname), "S"(buf), "d"(bufsize)
205 : "cc", "rcx", "r11", "memory");
206 return ret;
207}
208
209uint64_t __lseek(uint64_t fd, uint64_t pos, uint64_t whence) {
210 uint64_t ret;
211 __asm__ __volatile__("movq $8, %%rax\n"
212 "syscall\n"
213 : "=a"(ret)
214 : "D"(fd), "S"(pos), "d"(whence)
215 : "cc", "rcx", "r11", "memory");
216 return ret;
217}
218
219int __ftruncate(uint64_t fd, uint64_t length) {
220 int ret;
221 __asm__ __volatile__("movq $77, %%rax\n"
222 "syscall\n"
223 : "=a"(ret)
224 : "D"(fd), "S"(length)
225 : "cc", "rcx", "r11", "memory");
226 return ret;
227}
228
229int __close(uint64_t fd) {
230 uint64_t ret;
231 __asm__ __volatile__("movq $3, %%rax\n"
232 "syscall\n"
233 : "=a"(ret)
234 : "D"(fd)
235 : "cc", "rcx", "r11", "memory");
236 return ret;
237}
238
239int __madvise(void *addr, size_t length, int advice) {
240 int ret;
241 __asm__ __volatile__("movq $28, %%rax\n"
242 "syscall\n"
243 : "=a"(ret)
244 : "D"(addr), "S"(length), "d"(advice)
245 : "cc", "rcx", "r11", "memory");
246 return ret;
247}
248
249int __uname(struct UtsNameTy *Buf) {
250 int Ret;
251 __asm__ __volatile__("movq $63, %%rax\n"
252 "syscall\n"
253 : "=a"(Ret)
254 : "D"(Buf)
255 : "cc", "rcx", "r11", "memory");
256 return Ret;
257}
258
259uint64_t __nanosleep(const timespec *req, timespec *rem) {
260 uint64_t ret;
261 __asm__ __volatile__("movq $35, %%rax\n"
262 "syscall\n"
263 : "=a"(ret)
264 : "D"(req), "S"(rem)
265 : "cc", "rcx", "r11", "memory");
266 return ret;
267}
268
269int64_t __fork() {
270 uint64_t ret;
271 __asm__ __volatile__("movq $57, %%rax\n"
272 "syscall\n"
273 : "=a"(ret)
274 :
275 : "cc", "rcx", "r11", "memory");
276 return ret;
277}
278
279int __mprotect(void *addr, size_t len, int prot) {
280 int ret;
281 __asm__ __volatile__("movq $10, %%rax\n"
282 "syscall\n"
283 : "=a"(ret)
284 : "D"(addr), "S"(len), "d"(prot)
285 : "cc", "rcx", "r11", "memory");
286 return ret;
287}
288
289uint64_t __getppid() {
290 uint64_t ret;
291 __asm__ __volatile__("movq $110, %%rax\n"
292 "syscall\n"
293 : "=a"(ret)
294 :
295 : "cc", "rcx", "r11", "memory");
296 return ret;
297}
298
299int __setpgid(uint64_t pid, uint64_t pgid) {
300 int ret;
301 __asm__ __volatile__("movq $109, %%rax\n"
302 "syscall\n"
303 : "=a"(ret)
304 : "D"(pid), "S"(pgid)
305 : "cc", "rcx", "r11", "memory");
306 return ret;
307}
308
309uint64_t __getpgid(uint64_t pid) {
310 uint64_t ret;
311 __asm__ __volatile__("movq $121, %%rax\n"
312 "syscall\n"
313 : "=a"(ret)
314 : "D"(pid)
315 : "cc", "rcx", "r11", "memory");
316 return ret;
317}
318
319int __kill(uint64_t pid, int sig) {
320 int ret;
321 __asm__ __volatile__("movq $62, %%rax\n"
322 "syscall\n"
323 : "=a"(ret)
324 : "D"(pid), "S"(sig)
325 : "cc", "rcx", "r11", "memory");
326 return ret;
327}
328
329int __fsync(int fd) {
330 int ret;
331 __asm__ __volatile__("movq $74, %%rax\n"
332 "syscall\n"
333 : "=a"(ret)
334 : "D"(fd)
335 : "cc", "rcx", "r11", "memory");
336 return ret;
337}
338
339// %rdi %rsi %rdx %r10 %r8
340// sys_prctl int option unsigned unsigned unsigned unsigned
341// long arg2 long arg3 long arg4 long arg5
342int __prctl(int Option, unsigned long Arg2, unsigned long Arg3,
343 unsigned long Arg4, unsigned long Arg5) {
344 int Ret;
345 register long rdx asm("rdx") = Arg3;
346 register long r8 asm("r8") = Arg5;
347 register long r10 asm("r10") = Arg4;
348 __asm__ __volatile__("movq $157, %%rax\n"
349 "syscall\n"
350 : "=a"(Ret)
351 : "D"(Option), "S"(Arg2), "d"(rdx), "r"(r10), "r"(r8)
352 :);
353 return Ret;
354}
355
356#endif
357
358} // anonymous namespace
359
360#endif

/build/source/bolt/runtime/common.h

1//===- bolt/runtime/common.h ------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#if defined(__linux__1)
10
11#include <cstddef>
12#include <cstdint>
13
14#include "config.h"
15
16#ifdef HAVE_ELF_H
17#include <elf.h>
18#endif
19
20#elif defined(__APPLE__)
21
22typedef __SIZE_TYPE__long unsigned int size_t;
23#define __SSIZE_TYPE__ \
24 __typeof__(_Generic((__SIZE_TYPE__long unsigned int)0, unsigned long long int \
25 : (long long int)0, unsigned long int \
26 : (long int)0, unsigned int \
27 : (int)0, unsigned short \
28 : (short)0, unsigned char \
29 : (signed char)0))
30typedef __SSIZE_TYPE__ ssize_t;
31
32typedef unsigned long long uint64_t;
33typedef unsigned uint32_t;
34typedef unsigned char uint8_t;
35
36typedef long long int64_t;
37typedef int int32_t;
38
39#else
40#error "For Linux or MacOS only"
41#endif
42
43#define PROT_READ0x1 0x1 /* Page can be read. */
44#define PROT_WRITE0x2 0x2 /* Page can be written. */
45#define PROT_EXEC0x4 0x4 /* Page can be executed. */
46#define PROT_NONE0x0 0x0 /* Page can not be accessed. */
47#define PROT_GROWSDOWN0x01000000 \
48 0x01000000 /* Extend change to start of \
49 growsdown vma (mprotect only). */
50#define PROT_GROWSUP0x02000000 \
51 0x02000000 /* Extend change to start of \
52 growsup vma (mprotect only). */
53
54/* Sharing types (must choose one and only one of these). */
55#define MAP_SHARED0x01 0x01 /* Share changes. */
56#define MAP_PRIVATE0x02 0x02 /* Changes are private. */
57#define MAP_FIXED0x10 0x10 /* Interpret addr exactly. */
58
59#if defined(__APPLE__)
60#define MAP_ANONYMOUS0x20 0x1000
61#else
62#define MAP_ANONYMOUS0x20 0x20
63#endif
64
65#define MAP_FAILED((void *)-1) ((void *)-1)
66
67#define SEEK_SET0 0 /* Seek from beginning of file. */
68#define SEEK_CUR1 1 /* Seek from current position. */
69#define SEEK_END2 2 /* Seek from end of file. */
70
71#define O_RDONLY0 0
72#define O_WRONLY1 1
73#define O_RDWR2 2
74#define O_CREAT64 64
75#define O_TRUNC512 512
76#define O_APPEND1024 1024
77
78// Functions that are required by freestanding environment. Compiler may
79// generate calls to these implicitly.
80extern "C" {
81void *memcpy(void *Dest, const void *Src, size_t Len) {
82 uint8_t *d = static_cast<uint8_t *>(Dest);
83 const uint8_t *s = static_cast<const uint8_t *>(Src);
84 while (Len--)
85 *d++ = *s++;
86 return Dest;
87}
88
89void *memmove(void *Dest, const void *Src, size_t Len) {
90 uint8_t *d = static_cast<uint8_t *>(Dest);
91 const uint8_t *s = static_cast<const uint8_t *>(Src);
92 if (d < s) {
93 while (Len--)
94 *d++ = *s++;
95 } else {
96 s += Len - 1;
97 d += Len - 1;
98 while (Len--)
99 *d-- = *s--;
100 }
101
102 return Dest;
103}
104
105void *memset(void *Buf, int C, size_t Size) {
106 char *S = (char *)Buf;
107 for (size_t I = 0; I < Size; ++I)
108 *S++ = C;
109 return Buf;
110}
111
112int memcmp(const void *s1, const void *s2, size_t n) {
113 const uint8_t *c1 = static_cast<const uint8_t *>(s1);
114 const uint8_t *c2 = static_cast<const uint8_t *>(s2);
115 for (; n--; c1++, c2++) {
116 if (*c1 != *c2)
117 return *c1 < *c2 ? -1 : 1;
118 }
119 return 0;
120}
121} // extern "C"
122
123// Anonymous namespace covering everything but our library entry point
124namespace {
125
126struct dirent64 {
127 uint64_t d_ino; /* Inode number */
128 int64_t d_off; /* Offset to next linux_dirent */
129 unsigned short d_reclen; /* Length of this linux_dirent */
130 unsigned char d_type;
131 char d_name[]; /* Filename (null-terminated) */
132 /* length is actually (d_reclen - 2 -
133 offsetof(struct linux_dirent, d_name)) */
134};
135
136/* Length of the entries in `struct utsname' is 65. */
137#define _UTSNAME_LENGTH65 65
138
139struct UtsNameTy {
140 char sysname[_UTSNAME_LENGTH65]; /* Operating system name (e.g., "Linux") */
141 char nodename[_UTSNAME_LENGTH65]; /* Name within "some implementation-defined
142 network" */
143 char release[_UTSNAME_LENGTH65]; /* Operating system release (e.g., "2.6.28") */
144 char version[_UTSNAME_LENGTH65]; /* Operating system version */
145 char machine[_UTSNAME_LENGTH65]; /* Hardware identifier */
146 char domainname[_UTSNAME_LENGTH65]; /* NIS or YP domain name */
147};
148
149struct timespec {
150 uint64_t tv_sec; /* seconds */
151 uint64_t tv_nsec; /* nanoseconds */
152};
153
154#if defined(__aarch64__)
155#include "sys_aarch64.h"
156#else
157#include "sys_x86_64.h"
158#endif
159
160constexpr uint32_t BufSize = 10240;
161
162// Helper functions for writing strings to the .fdata file. We intentionally
163// avoid using libc names to make it clear it is our impl.
164
165/// Write number Num using Base to the buffer in OutBuf, returns a pointer to
166/// the end of the string.
167char *intToStr(char *OutBuf, uint64_t Num, uint32_t Base) {
168 const char *Chars = "0123456789abcdef";
169 char Buf[21];
170 char *Ptr = Buf;
171 while (Num) {
172 *Ptr++ = *(Chars + (Num % Base));
173 Num /= Base;
174 }
175 if (Ptr == Buf) {
176 *OutBuf++ = '0';
177 return OutBuf;
178 }
179 while (Ptr != Buf)
180 *OutBuf++ = *--Ptr;
181
182 return OutBuf;
183}
184
185/// Copy Str to OutBuf, returns a pointer to the end of the copied string
186char *strCopy(char *OutBuf, const char *Str, int32_t Size = BufSize) {
187 while (*Str) {
188 *OutBuf++ = *Str++;
189 if (--Size <= 0)
190 return OutBuf;
191 }
192 return OutBuf;
193}
194
195/// Compare two strings, at most Num bytes.
196int strnCmp(const char *Str1, const char *Str2, size_t Num) {
197 while (Num && *Str1 && (*Str1 == *Str2)) {
198 Num--;
199 Str1++;
200 Str2++;
201 }
202 if (Num == 0)
203 return 0;
204 return *(unsigned char *)Str1 - *(unsigned char *)Str2;
205}
206
207uint32_t strLen(const char *Str) {
208 uint32_t Size = 0;
209 while (*Str++)
4
Branch condition evaluates to a garbage value
210 ++Size;
211 return Size;
212}
213
214void *strStr(const char *const Haystack, const char *const Needle) {
215 int j = 0;
216
217 for (int i = 0; i < strLen(Haystack); i++) {
218 if (Haystack[i] == Needle[0]) {
219 for (j = 1; j < strLen(Needle); j++) {
220 if (Haystack[i + j] != Needle[j])
221 break;
222 }
223 if (j == strLen(Needle))
224 return (void *)&Haystack[i];
225 }
226 }
227 return nullptr;
228}
229
230void reportNumber(const char *Msg, uint64_t Num, uint32_t Base) {
231 char Buf[BufSize];
232 char *Ptr = Buf;
233 Ptr = strCopy(Ptr, Msg, BufSize - 23);
234 Ptr = intToStr(Ptr, Num, Base);
235 Ptr = strCopy(Ptr, "\n");
236 __write(2, Buf, Ptr - Buf);
237}
238
239void report(const char *Msg) { __write(2, Msg, strLen(Msg)); }
240
241unsigned long hexToLong(const char *Str, char Terminator = '\0') {
242 unsigned long Res = 0;
243 while (*Str != Terminator) {
244 Res <<= 4;
245 if ('0' <= *Str && *Str <= '9')
246 Res += *Str++ - '0';
247 else if ('a' <= *Str && *Str <= 'f')
248 Res += *Str++ - 'a' + 10;
249 else if ('A' <= *Str && *Str <= 'F')
250 Res += *Str++ - 'A' + 10;
251 else
252 return 0;
253 }
254 return Res;
255}
256
257/// Starting from character at \p buf, find the longest consecutive sequence
258/// of digits (0-9) and convert it to uint32_t. The converted value
259/// is put into \p ret. \p end marks the end of the buffer to avoid buffer
260/// overflow. The function \returns whether a valid uint32_t value is found.
261/// \p buf will be updated to the next character right after the digits.
262static bool scanUInt32(const char *&Buf, const char *End, uint32_t &Ret) {
263 uint64_t Result = 0;
264 const char *OldBuf = Buf;
265 while (Buf < End && ((*Buf) >= '0' && (*Buf) <= '9')) {
266 Result = Result * 10 + (*Buf) - '0';
267 ++Buf;
268 }
269 if (OldBuf != Buf && Result <= 0xFFFFFFFFu) {
270 Ret = static_cast<uint32_t>(Result);
271 return true;
272 }
273 return false;
274}
275
276void reportError(const char *Msg, uint64_t Size) {
277 __write(2, Msg, Size);
278 __exit(1);
279}
280
281void assert(bool Assertion, const char *Msg) {
282 if (Assertion)
283 return;
284 char Buf[BufSize];
285 char *Ptr = Buf;
286 Ptr = strCopy(Ptr, "Assertion failed: ");
287 Ptr = strCopy(Ptr, Msg, BufSize - 40);
288 Ptr = strCopy(Ptr, "\n");
289 reportError(Buf, Ptr - Buf);
290}
291
292#define SIG_BLOCK0 0
293#define SIG_UNBLOCK1 1
294#define SIG_SETMASK2 2
295
296static const uint64_t MaskAllSignals[] = {-1ULL};
297
298class Mutex {
299 volatile bool InUse{false};
300
301public:
302 bool acquire() { return !__atomic_test_and_set(&InUse, __ATOMIC_ACQUIRE2); }
303 void release() { __atomic_clear(&InUse, __ATOMIC_RELEASE3); }
304};
305
306/// RAII wrapper for Mutex
307class Lock {
308 Mutex &M;
309 uint64_t SignalMask[1] = {};
310
311public:
312 Lock(Mutex &M) : M(M) {
313 __sigprocmask(SIG_BLOCK0, MaskAllSignals, SignalMask);
314 while (!M.acquire()) {
315 }
316 }
317
318 ~Lock() {
319 M.release();
320 __sigprocmask(SIG_SETMASK2, SignalMask, nullptr);
321 }
322};
323
324/// RAII wrapper for Mutex
325class TryLock {
326 Mutex &M;
327 bool Locked = false;
328
329public:
330 TryLock(Mutex &M) : M(M) {
331 int Retry = 100;
332 while (--Retry && !M.acquire())
333 ;
334 if (Retry)
335 Locked = true;
336 }
337 bool isLocked() { return Locked; }
338
339 ~TryLock() {
340 if (isLocked())
341 M.release();
342 }
343};
344
345inline uint64_t alignTo(uint64_t Value, uint64_t Align) {
346 return (Value + Align - 1) / Align * Align;
347}
348
349} // anonymous namespace