File: | build/llvm-toolchain-snapshot-15~++20220410100727+3c1483609369/mlir/lib/ExecutionEngine/CRunnerUtils.cpp |
Warning: | line 75, column 25 Array access (via field 'strides') results in a null pointer dereference |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- CRunnerUtils.cpp - Utils for MLIR execution ------------------------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This file implements basic functions to manipulate structured MLIR types at | ||||
10 | // runtime. Entities in this file are meant to be retargetable, including on | ||||
11 | // targets without a C++ runtime, and must be kept C compatible. | ||||
12 | // | ||||
13 | //===----------------------------------------------------------------------===// | ||||
14 | |||||
15 | #include "mlir/ExecutionEngine/CRunnerUtils.h" | ||||
16 | |||||
17 | #ifndef _WIN32 | ||||
18 | #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) | ||||
19 | #include <cstdlib> | ||||
20 | #else | ||||
21 | #include <alloca.h> | ||||
22 | #endif | ||||
23 | #include <sys/time.h> | ||||
24 | #else | ||||
25 | #include "malloc.h" | ||||
26 | #endif // _WIN32 | ||||
27 | |||||
28 | #include <cinttypes> | ||||
29 | #include <cstdio> | ||||
30 | #include <string.h> | ||||
31 | |||||
32 | #ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS | ||||
33 | |||||
34 | // Small runtime support "lib" for vector.print lowering. | ||||
35 | // By providing elementary printing methods only, this | ||||
36 | // library can remain fully unaware of low-level implementation | ||||
37 | // details of our vectors. Also useful for direct LLVM IR output. | ||||
38 | extern "C" void printI64(int64_t i) { fprintf(stdoutstdout, "%" PRId64"l" "d", i); } | ||||
39 | extern "C" void printU64(uint64_t u) { fprintf(stdoutstdout, "%" PRIu64"l" "u", u); } | ||||
40 | extern "C" void printF32(float f) { fprintf(stdoutstdout, "%g", f); } | ||||
41 | extern "C" void printF64(double d) { fprintf(stdoutstdout, "%lg", d); } | ||||
42 | extern "C" void printOpen() { fputs("( ", stdoutstdout); } | ||||
43 | extern "C" void printClose() { fputs(" )", stdoutstdout); } | ||||
44 | extern "C" void printComma() { fputs(", ", stdoutstdout); } | ||||
45 | extern "C" void printNewline() { fputc('\n', stdoutstdout); } | ||||
46 | |||||
47 | extern "C" MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) void | ||||
48 | memrefCopy(int64_t elemSize, UnrankedMemRefType<char> *srcArg, | ||||
49 | UnrankedMemRefType<char> *dstArg) { | ||||
50 | DynamicMemRefType<char> src(*srcArg); | ||||
51 | DynamicMemRefType<char> dst(*dstArg); | ||||
| |||||
52 | |||||
53 | int64_t rank = src.rank; | ||||
54 | // Handle empty shapes -> nothing to copy. | ||||
55 | for (int rankp = 0; rankp < rank; ++rankp) | ||||
56 | if (src.sizes[rankp] == 0) | ||||
57 | return; | ||||
58 | |||||
59 | char *srcPtr = src.data + src.offset * elemSize; | ||||
60 | char *dstPtr = dst.data + dst.offset * elemSize; | ||||
61 | |||||
62 | if (rank
| ||||
63 | memcpy(dstPtr, srcPtr, elemSize); | ||||
64 | return; | ||||
65 | } | ||||
66 | |||||
67 | int64_t *indices = static_cast<int64_t *>(alloca(sizeof(int64_t) * rank)__builtin_alloca (sizeof(int64_t) * rank)); | ||||
68 | int64_t *srcStrides = static_cast<int64_t *>(alloca(sizeof(int64_t) * rank)__builtin_alloca (sizeof(int64_t) * rank)); | ||||
69 | int64_t *dstStrides = static_cast<int64_t *>(alloca(sizeof(int64_t) * rank)__builtin_alloca (sizeof(int64_t) * rank)); | ||||
70 | |||||
71 | // Initialize index and scale strides. | ||||
72 | for (int rankp = 0; rankp < rank; ++rankp) { | ||||
73 | indices[rankp] = 0; | ||||
74 | srcStrides[rankp] = src.strides[rankp] * elemSize; | ||||
75 | dstStrides[rankp] = dst.strides[rankp] * elemSize; | ||||
| |||||
76 | } | ||||
77 | |||||
78 | int64_t readIndex = 0, writeIndex = 0; | ||||
79 | for (;;) { | ||||
80 | // Copy over the element, byte by byte. | ||||
81 | memcpy(dstPtr + writeIndex, srcPtr + readIndex, elemSize); | ||||
82 | // Advance index and read position. | ||||
83 | for (int64_t axis = rank - 1; axis >= 0; --axis) { | ||||
84 | // Advance at current axis. | ||||
85 | auto newIndex = ++indices[axis]; | ||||
86 | readIndex += srcStrides[axis]; | ||||
87 | writeIndex += dstStrides[axis]; | ||||
88 | // If this is a valid index, we have our next index, so continue copying. | ||||
89 | if (src.sizes[axis] != newIndex) | ||||
90 | break; | ||||
91 | // We reached the end of this axis. If this is axis 0, we are done. | ||||
92 | if (axis == 0) | ||||
93 | return; | ||||
94 | // Else, reset to 0 and undo the advancement of the linear index that | ||||
95 | // this axis had. Then continue with the axis one outer. | ||||
96 | indices[axis] = 0; | ||||
97 | readIndex -= src.sizes[axis] * srcStrides[axis]; | ||||
98 | writeIndex -= dst.sizes[axis] * dstStrides[axis]; | ||||
99 | } | ||||
100 | } | ||||
101 | } | ||||
102 | |||||
103 | /// Prints GFLOPS rating. | ||||
104 | extern "C" void print_flops(double flops) { | ||||
105 | fprintf(stderrstderr, "%lf GFLOPS\n", flops / 1.0E9); | ||||
106 | } | ||||
107 | |||||
108 | /// Returns the number of seconds since Epoch 1970-01-01 00:00:00 +0000 (UTC). | ||||
109 | extern "C" double rtclock() { | ||||
110 | #ifndef _WIN32 | ||||
111 | struct timeval tp; | ||||
112 | int stat = gettimeofday(&tp, nullptr); | ||||
113 | if (stat != 0) | ||||
114 | fprintf(stderrstderr, "Error returning time from gettimeofday: %d\n", stat); | ||||
115 | return (tp.tv_sec + tp.tv_usec * 1.0e-6); | ||||
116 | #else | ||||
117 | fprintf(stderrstderr, "Timing utility not implemented on Windows\n"); | ||||
118 | return 0.0; | ||||
119 | #endif // _WIN32 | ||||
120 | } | ||||
121 | |||||
122 | #endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS |
1 | //===- CRunnerUtils.h - Utils for debugging MLIR execution ----------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file declares basic classes and functions to manipulate structured MLIR |
10 | // types at runtime. Entities in this file must be compliant with C++11 and be |
11 | // retargetable, including on targets without a C++ runtime. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef MLIR_EXECUTIONENGINE_CRUNNERUTILS_H |
16 | #define MLIR_EXECUTIONENGINE_CRUNNERUTILS_H |
17 | |
18 | #ifdef _WIN32 |
19 | #ifndef MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) |
20 | #ifdef mlir_c_runner_utils_EXPORTS1 |
21 | // We are building this library |
22 | #define MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) __declspec(dllexport) |
23 | #define MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS |
24 | #else |
25 | // We are using this library |
26 | #define MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) __declspec(dllimport) |
27 | #endif // mlir_c_runner_utils_EXPORTS |
28 | #endif // MLIR_CRUNNERUTILS_EXPORT |
29 | #else // _WIN32 |
30 | // Non-windows: use visibility attributes. |
31 | #define MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) __attribute__((visibility("default"))) |
32 | #define MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS |
33 | #endif // _WIN32 |
34 | |
35 | #include <array> |
36 | #include <cassert> |
37 | #include <cstdint> |
38 | #include <initializer_list> |
39 | |
40 | //===----------------------------------------------------------------------===// |
41 | // Codegen-compatible structures for Vector type. |
42 | //===----------------------------------------------------------------------===// |
43 | namespace mlir { |
44 | namespace detail { |
45 | |
46 | constexpr bool isPowerOf2(int n) { return (!(n & (n - 1))); } |
47 | |
48 | constexpr unsigned nextPowerOf2(int n) { |
49 | return (n <= 1) ? 1 : (isPowerOf2(n) ? n : (2 * nextPowerOf2((n + 1) / 2))); |
50 | } |
51 | |
52 | template <typename T, int Dim, bool IsPowerOf2> |
53 | struct Vector1D; |
54 | |
55 | template <typename T, int Dim> |
56 | struct Vector1D<T, Dim, /*IsPowerOf2=*/true> { |
57 | Vector1D() { |
58 | static_assert(detail::nextPowerOf2(sizeof(T[Dim])) == sizeof(T[Dim]), |
59 | "size error"); |
60 | } |
61 | inline T &operator[](unsigned i) { return vector[i]; } |
62 | inline const T &operator[](unsigned i) const { return vector[i]; } |
63 | |
64 | private: |
65 | T vector[Dim]; |
66 | }; |
67 | |
68 | // 1-D vector, padded to the next power of 2 allocation. |
69 | // Specialization occurs to avoid zero size arrays (which fail in -Werror). |
70 | template <typename T, int Dim> |
71 | struct Vector1D<T, Dim, /*IsPowerOf2=*/false> { |
72 | Vector1D() { |
73 | static_assert(nextPowerOf2(sizeof(T[Dim])) > sizeof(T[Dim]), "size error"); |
74 | static_assert(nextPowerOf2(sizeof(T[Dim])) < 2 * sizeof(T[Dim]), |
75 | "size error"); |
76 | } |
77 | inline T &operator[](unsigned i) { return vector[i]; } |
78 | inline const T &operator[](unsigned i) const { return vector[i]; } |
79 | |
80 | private: |
81 | T vector[Dim]; |
82 | char padding[nextPowerOf2(sizeof(T[Dim])) - sizeof(T[Dim])]; |
83 | }; |
84 | } // namespace detail |
85 | } // namespace mlir |
86 | |
87 | // N-D vectors recurse down to 1-D. |
88 | template <typename T, int Dim, int... Dims> |
89 | struct Vector { |
90 | inline Vector<T, Dims...> &operator[](unsigned i) { return vector[i]; } |
91 | inline const Vector<T, Dims...> &operator[](unsigned i) const { |
92 | return vector[i]; |
93 | } |
94 | |
95 | private: |
96 | Vector<T, Dims...> vector[Dim]; |
97 | }; |
98 | |
99 | // 1-D vectors in LLVM are automatically padded to the next power of 2. |
100 | // We insert explicit padding in to account for this. |
101 | template <typename T, int Dim> |
102 | struct Vector<T, Dim> |
103 | : public mlir::detail::Vector1D<T, Dim, |
104 | mlir::detail::isPowerOf2(sizeof(T[Dim]))> { |
105 | }; |
106 | |
107 | template <int D1, typename T> |
108 | using Vector1D = Vector<T, D1>; |
109 | template <int D1, int D2, typename T> |
110 | using Vector2D = Vector<T, D1, D2>; |
111 | template <int D1, int D2, int D3, typename T> |
112 | using Vector3D = Vector<T, D1, D2, D3>; |
113 | template <int D1, int D2, int D3, int D4, typename T> |
114 | using Vector4D = Vector<T, D1, D2, D3, D4>; |
115 | |
116 | template <int N> |
117 | void dropFront(int64_t arr[N], int64_t *res) { |
118 | for (unsigned i = 1; i < N; ++i) |
119 | *(res + i - 1) = arr[i]; |
120 | } |
121 | |
122 | //===----------------------------------------------------------------------===// |
123 | // Codegen-compatible structures for StridedMemRef type. |
124 | //===----------------------------------------------------------------------===// |
125 | template <typename T, int Rank> |
126 | class StridedMemrefIterator; |
127 | |
128 | /// StridedMemRef descriptor type with static rank. |
129 | template <typename T, int N> |
130 | struct StridedMemRefType { |
131 | T *basePtr; |
132 | T *data; |
133 | int64_t offset; |
134 | int64_t sizes[N]; |
135 | int64_t strides[N]; |
136 | |
137 | template <typename Range, |
138 | typename sfinae = decltype(std::declval<Range>().begin())> |
139 | T &operator[](Range &&indices) { |
140 | assert(indices.size() == N &&(static_cast <bool> (indices.size() == N && "indices should match rank in memref subscript" ) ? void (0) : __assert_fail ("indices.size() == N && \"indices should match rank in memref subscript\"" , "mlir/include/mlir/ExecutionEngine/CRunnerUtils.h", 141, __extension__ __PRETTY_FUNCTION__)) |
141 | "indices should match rank in memref subscript")(static_cast <bool> (indices.size() == N && "indices should match rank in memref subscript" ) ? void (0) : __assert_fail ("indices.size() == N && \"indices should match rank in memref subscript\"" , "mlir/include/mlir/ExecutionEngine/CRunnerUtils.h", 141, __extension__ __PRETTY_FUNCTION__)); |
142 | int64_t curOffset = offset; |
143 | for (int dim = N - 1; dim >= 0; --dim) { |
144 | int64_t currentIndex = *(indices.begin() + dim); |
145 | assert(currentIndex < sizes[dim] && "Index overflow")(static_cast <bool> (currentIndex < sizes[dim] && "Index overflow") ? void (0) : __assert_fail ("currentIndex < sizes[dim] && \"Index overflow\"" , "mlir/include/mlir/ExecutionEngine/CRunnerUtils.h", 145, __extension__ __PRETTY_FUNCTION__)); |
146 | curOffset += currentIndex * strides[dim]; |
147 | } |
148 | return data[curOffset]; |
149 | } |
150 | |
151 | StridedMemrefIterator<T, N> begin() { return {*this}; } |
152 | StridedMemrefIterator<T, N> end() { return {*this, -1}; } |
153 | |
154 | // This operator[] is extremely slow and only for sugaring purposes. |
155 | StridedMemRefType<T, N - 1> operator[](int64_t idx) { |
156 | StridedMemRefType<T, N - 1> res; |
157 | res.basePtr = basePtr; |
158 | res.data = data; |
159 | res.offset = offset + idx * strides[0]; |
160 | dropFront<N>(sizes, res.sizes); |
161 | dropFront<N>(strides, res.strides); |
162 | return res; |
163 | } |
164 | }; |
165 | |
166 | /// StridedMemRef descriptor type specialized for rank 1. |
167 | template <typename T> |
168 | struct StridedMemRefType<T, 1> { |
169 | T *basePtr; |
170 | T *data; |
171 | int64_t offset; |
172 | int64_t sizes[1]; |
173 | int64_t strides[1]; |
174 | |
175 | template <typename Range, |
176 | typename sfinae = decltype(std::declval<Range>().begin())> |
177 | T &operator[](Range indices) { |
178 | assert(indices.size() == 1 &&(static_cast <bool> (indices.size() == 1 && "indices should match rank in memref subscript" ) ? void (0) : __assert_fail ("indices.size() == 1 && \"indices should match rank in memref subscript\"" , "mlir/include/mlir/ExecutionEngine/CRunnerUtils.h", 179, __extension__ __PRETTY_FUNCTION__)) |
179 | "indices should match rank in memref subscript")(static_cast <bool> (indices.size() == 1 && "indices should match rank in memref subscript" ) ? void (0) : __assert_fail ("indices.size() == 1 && \"indices should match rank in memref subscript\"" , "mlir/include/mlir/ExecutionEngine/CRunnerUtils.h", 179, __extension__ __PRETTY_FUNCTION__)); |
180 | return (*this)[*indices.begin()]; |
181 | } |
182 | |
183 | StridedMemrefIterator<T, 1> begin() { return {*this}; } |
184 | StridedMemrefIterator<T, 1> end() { return {*this, -1}; } |
185 | |
186 | T &operator[](int64_t idx) { return *(data + offset + idx * strides[0]); } |
187 | }; |
188 | |
189 | /// StridedMemRef descriptor type specialized for rank 0. |
190 | template <typename T> |
191 | struct StridedMemRefType<T, 0> { |
192 | T *basePtr; |
193 | T *data; |
194 | int64_t offset; |
195 | |
196 | template <typename Range, |
197 | typename sfinae = decltype(std::declval<Range>().begin())> |
198 | T &operator[](Range indices) { |
199 | assert((indices.size() == 0) &&(static_cast <bool> ((indices.size() == 0) && "Expect empty indices for 0-rank memref subscript" ) ? void (0) : __assert_fail ("(indices.size() == 0) && \"Expect empty indices for 0-rank memref subscript\"" , "mlir/include/mlir/ExecutionEngine/CRunnerUtils.h", 200, __extension__ __PRETTY_FUNCTION__)) |
200 | "Expect empty indices for 0-rank memref subscript")(static_cast <bool> ((indices.size() == 0) && "Expect empty indices for 0-rank memref subscript" ) ? void (0) : __assert_fail ("(indices.size() == 0) && \"Expect empty indices for 0-rank memref subscript\"" , "mlir/include/mlir/ExecutionEngine/CRunnerUtils.h", 200, __extension__ __PRETTY_FUNCTION__)); |
201 | return data[offset]; |
202 | } |
203 | |
204 | StridedMemrefIterator<T, 0> begin() { return {*this}; } |
205 | StridedMemrefIterator<T, 0> end() { return {*this, 1}; } |
206 | }; |
207 | |
208 | /// Iterate over all elements in a strided memref. |
209 | template <typename T, int Rank> |
210 | class StridedMemrefIterator { |
211 | public: |
212 | StridedMemrefIterator(StridedMemRefType<T, Rank> &descriptor, |
213 | int64_t offset = 0) |
214 | : offset(offset), descriptor(descriptor) {} |
215 | StridedMemrefIterator<T, Rank> &operator++() { |
216 | int dim = Rank - 1; |
217 | while (dim >= 0 && indices[dim] == (descriptor.sizes[dim] - 1)) { |
218 | offset -= indices[dim] * descriptor.strides[dim]; |
219 | indices[dim] = 0; |
220 | --dim; |
221 | } |
222 | if (dim < 0) { |
223 | offset = -1; |
224 | return *this; |
225 | } |
226 | ++indices[dim]; |
227 | offset += descriptor.strides[dim]; |
228 | return *this; |
229 | } |
230 | |
231 | T &operator*() { return descriptor.data[offset]; } |
232 | T *operator->() { return &descriptor.data[offset]; } |
233 | |
234 | const std::array<int64_t, Rank> &getIndices() { return indices; } |
235 | |
236 | bool operator==(const StridedMemrefIterator &other) const { |
237 | return other.offset == offset && &other.descriptor == &descriptor; |
238 | } |
239 | |
240 | bool operator!=(const StridedMemrefIterator &other) const { |
241 | return !(*this == other); |
242 | } |
243 | |
244 | private: |
245 | /// Offset in the buffer. This can be derived from the indices and the |
246 | /// descriptor. |
247 | int64_t offset = 0; |
248 | /// Array of indices in the multi-dimensional memref. |
249 | std::array<int64_t, Rank> indices = {}; |
250 | /// Descriptor for the strided memref. |
251 | StridedMemRefType<T, Rank> &descriptor; |
252 | }; |
253 | |
254 | /// Iterate over all elements in a 0-ranked strided memref. |
255 | template <typename T> |
256 | class StridedMemrefIterator<T, 0> { |
257 | public: |
258 | StridedMemrefIterator(StridedMemRefType<T, 0> &descriptor, int64_t offset = 0) |
259 | : elt(descriptor.data + offset) {} |
260 | |
261 | StridedMemrefIterator<T, 0> &operator++() { |
262 | ++elt; |
263 | return *this; |
264 | } |
265 | |
266 | T &operator*() { return *elt; } |
267 | T *operator->() { return elt; } |
268 | |
269 | // There are no indices for a 0-ranked memref, but this API is provided for |
270 | // consistency with the general case. |
271 | const std::array<int64_t, 0> &getIndices() { |
272 | // Since this is a 0-array of indices we can keep a single global const |
273 | // copy. |
274 | static const std::array<int64_t, 0> indices = {}; |
275 | return indices; |
276 | } |
277 | |
278 | bool operator==(const StridedMemrefIterator &other) const { |
279 | return other.elt == elt; |
280 | } |
281 | |
282 | bool operator!=(const StridedMemrefIterator &other) const { |
283 | return !(*this == other); |
284 | } |
285 | |
286 | private: |
287 | /// Pointer to the single element in the zero-ranked memref. |
288 | T *elt; |
289 | }; |
290 | |
291 | //===----------------------------------------------------------------------===// |
292 | // Codegen-compatible structure for UnrankedMemRef type. |
293 | //===----------------------------------------------------------------------===// |
294 | // Unranked MemRef |
295 | template <typename T> |
296 | struct UnrankedMemRefType { |
297 | int64_t rank; |
298 | void *descriptor; |
299 | }; |
300 | |
301 | //===----------------------------------------------------------------------===// |
302 | // DynamicMemRefType type. |
303 | //===----------------------------------------------------------------------===// |
304 | // A reference to one of the StridedMemRef types. |
305 | template <typename T> |
306 | class DynamicMemRefType { |
307 | public: |
308 | explicit DynamicMemRefType(const StridedMemRefType<T, 0> &memRef) |
309 | : rank(0), basePtr(memRef.basePtr), data(memRef.data), |
310 | offset(memRef.offset), sizes(nullptr), strides(nullptr) {} |
311 | template <int N> |
312 | explicit DynamicMemRefType(const StridedMemRefType<T, N> &memRef) |
313 | : rank(N), basePtr(memRef.basePtr), data(memRef.data), |
314 | offset(memRef.offset), sizes(memRef.sizes), strides(memRef.strides) {} |
315 | explicit DynamicMemRefType(const UnrankedMemRefType<T> &memRef) |
316 | : rank(memRef.rank) { |
317 | auto *desc = static_cast<StridedMemRefType<T, 1> *>(memRef.descriptor); |
318 | basePtr = desc->basePtr; |
319 | data = desc->data; |
320 | offset = desc->offset; |
321 | sizes = rank == 0 ? nullptr : desc->sizes; |
322 | strides = sizes + rank; |
323 | } |
324 | |
325 | int64_t rank; |
326 | T *basePtr; |
327 | T *data; |
328 | int64_t offset; |
329 | const int64_t *sizes; |
330 | const int64_t *strides; |
331 | }; |
332 | |
333 | //===----------------------------------------------------------------------===// |
334 | // Small runtime support library for memref.copy lowering during codegen. |
335 | //===----------------------------------------------------------------------===// |
336 | extern "C" MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) void |
337 | memrefCopy(int64_t elemSize, UnrankedMemRefType<char> *src, |
338 | UnrankedMemRefType<char> *dst); |
339 | |
340 | //===----------------------------------------------------------------------===// |
341 | // Small runtime support library for vector.print lowering during codegen. |
342 | //===----------------------------------------------------------------------===// |
343 | extern "C" MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) void printI64(int64_t i); |
344 | extern "C" MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) void printU64(uint64_t u); |
345 | extern "C" MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) void printF32(float f); |
346 | extern "C" MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) void printF64(double d); |
347 | extern "C" MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) void printOpen(); |
348 | extern "C" MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) void printClose(); |
349 | extern "C" MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) void printComma(); |
350 | extern "C" MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) void printNewline(); |
351 | |
352 | //===----------------------------------------------------------------------===// |
353 | // Small runtime support library for timing execution and printing GFLOPS |
354 | //===----------------------------------------------------------------------===// |
355 | extern "C" MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) void printFlops(double flops); |
356 | extern "C" MLIR_CRUNNERUTILS_EXPORT__attribute__((visibility("default"))) double rtclock(); |
357 | |
358 | #endif // MLIR_EXECUTIONENGINE_CRUNNERUTILS_H |