File: | projects/compiler-rt/lib/scudo/scudo_allocator_secondary.h |
Warning: | line 122, column 9 Value stored to 'ReservedEnd' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | /// |
10 | /// Scudo Secondary Allocator. |
11 | /// This services allocation that are too large to be serviced by the Primary |
12 | /// Allocator. It is directly backed by the memory mapping functions of the |
13 | /// operating system. |
14 | /// |
15 | //===----------------------------------------------------------------------===// |
16 | |
17 | #ifndef SCUDO_ALLOCATOR_SECONDARY_H_ |
18 | #define SCUDO_ALLOCATOR_SECONDARY_H_ |
19 | |
20 | #ifndef SCUDO_ALLOCATOR_H_ |
21 | # error "This file must be included inside scudo_allocator.h." |
22 | #endif |
23 | |
24 | // Secondary backed allocations are standalone chunks that contain extra |
25 | // information stored in a LargeChunk::Header prior to the frontend's header. |
26 | // |
27 | // The secondary takes care of alignment requirements (so that it can release |
28 | // unnecessary pages in the rare event of larger alignments), and as such must |
29 | // know about the frontend's header size. |
30 | // |
31 | // Since Windows doesn't support partial releasing of a reserved memory region, |
32 | // we have to keep track of both the reserved and the committed memory. |
33 | // |
34 | // The resulting chunk resembles the following: |
35 | // |
36 | // +--------------------+ |
37 | // | Guard page(s) | |
38 | // +--------------------+ |
39 | // | Unused space* | |
40 | // +--------------------+ |
41 | // | LargeChunk::Header | |
42 | // +--------------------+ |
43 | // | {Unp,P}ackedHeader | |
44 | // +--------------------+ |
45 | // | Data (aligned) | |
46 | // +--------------------+ |
47 | // | Unused space** | |
48 | // +--------------------+ |
49 | // | Guard page(s) | |
50 | // +--------------------+ |
51 | |
52 | namespace LargeChunk { |
53 | struct Header { |
54 | ReservedAddressRange StoredRange; |
55 | uptr CommittedSize; |
56 | uptr Size; |
57 | }; |
58 | constexpr uptr getHeaderSize() { |
59 | return RoundUpTo(sizeof(Header), MinAlignment); |
60 | } |
61 | static Header *getHeader(uptr Ptr) { |
62 | return reinterpret_cast<Header *>(Ptr - getHeaderSize()); |
63 | } |
64 | static Header *getHeader(const void *Ptr) { |
65 | return getHeader(reinterpret_cast<uptr>(Ptr)); |
66 | } |
67 | } // namespace LargeChunk |
68 | |
69 | class LargeMmapAllocator { |
70 | public: |
71 | void Init() { |
72 | internal_memset(this, 0, sizeof(*this)); |
73 | } |
74 | |
75 | void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) { |
76 | const uptr UserSize = Size - Chunk::getHeaderSize(); |
77 | // The Scudo frontend prevents us from allocating more than |
78 | // MaxAllowedMallocSize, so integer overflow checks would be superfluous. |
79 | uptr ReservedSize = Size + LargeChunk::getHeaderSize(); |
80 | if (UNLIKELY(Alignment > MinAlignment)__builtin_expect(!!(Alignment > MinAlignment), 0)) |
81 | ReservedSize += Alignment; |
82 | const uptr PageSize = GetPageSizeCached(); |
83 | ReservedSize = RoundUpTo(ReservedSize, PageSize); |
84 | // Account for 2 guard pages, one before and one after the chunk. |
85 | ReservedSize += 2 * PageSize; |
86 | |
87 | ReservedAddressRange AddressRange; |
88 | uptr ReservedBeg = AddressRange.Init(ReservedSize, SecondaryAllocatorName); |
89 | if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0))__builtin_expect(!!(ReservedBeg == ~static_cast<uptr>(0 )), 0)) |
90 | return nullptr; |
91 | // A page-aligned pointer is assumed after that, so check it now. |
92 | DCHECK(IsAligned(ReservedBeg, PageSize)); |
93 | uptr ReservedEnd = ReservedBeg + ReservedSize; |
94 | // The beginning of the user area for that allocation comes after the |
95 | // initial guard page, and both headers. This is the pointer that has to |
96 | // abide by alignment requirements. |
97 | uptr CommittedBeg = ReservedBeg + PageSize; |
98 | uptr UserBeg = CommittedBeg + HeadersSize; |
99 | uptr UserEnd = UserBeg + UserSize; |
100 | uptr CommittedEnd = RoundUpTo(UserEnd, PageSize); |
101 | |
102 | // In the rare event of larger alignments, we will attempt to fit the mmap |
103 | // area better and unmap extraneous memory. This will also ensure that the |
104 | // offset and unused bytes field of the header stay small. |
105 | if (UNLIKELY(Alignment > MinAlignment)__builtin_expect(!!(Alignment > MinAlignment), 0)) { |
106 | if (!IsAligned(UserBeg, Alignment)) { |
107 | UserBeg = RoundUpTo(UserBeg, Alignment); |
108 | CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize); |
109 | const uptr NewReservedBeg = CommittedBeg - PageSize; |
110 | DCHECK_GE(NewReservedBeg, ReservedBeg); |
111 | if (!SANITIZER_WINDOWS0 && NewReservedBeg != ReservedBeg) { |
112 | AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg); |
113 | ReservedBeg = NewReservedBeg; |
114 | } |
115 | UserEnd = UserBeg + UserSize; |
116 | CommittedEnd = RoundUpTo(UserEnd, PageSize); |
117 | } |
118 | const uptr NewReservedEnd = CommittedEnd + PageSize; |
119 | DCHECK_LE(NewReservedEnd, ReservedEnd); |
120 | if (!SANITIZER_WINDOWS0 && NewReservedEnd != ReservedEnd) { |
121 | AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd); |
122 | ReservedEnd = NewReservedEnd; |
Value stored to 'ReservedEnd' is never read | |
123 | } |
124 | } |
125 | |
126 | DCHECK_LE(UserEnd, CommittedEnd); |
127 | const uptr CommittedSize = CommittedEnd - CommittedBeg; |
128 | // Actually mmap the memory, preserving the guard pages on either sides. |
129 | CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize))do { __sanitizer::u64 v1 = (__sanitizer::u64)((CommittedBeg)) ; __sanitizer::u64 v2 = (__sanitizer::u64)((AddressRange.Map( CommittedBeg, CommittedSize))); if (__builtin_expect(!!(!(v1 == v2)), 0)) __sanitizer::CheckFailed("/build/llvm-toolchain-snapshot-8~svn350071/projects/compiler-rt/lib/scudo/scudo_allocator_secondary.h" , 129, "(" "(CommittedBeg)" ") " "==" " (" "(AddressRange.Map(CommittedBeg, CommittedSize))" ")", v1, v2); } while (false); |
130 | const uptr Ptr = UserBeg - Chunk::getHeaderSize(); |
131 | LargeChunk::Header *H = LargeChunk::getHeader(Ptr); |
132 | H->StoredRange = AddressRange; |
133 | H->Size = CommittedEnd - Ptr; |
134 | H->CommittedSize = CommittedSize; |
135 | |
136 | // The primary adds the whole class size to the stats when allocating a |
137 | // chunk, so we will do something similar here. But we will not account for |
138 | // the guard pages. |
139 | { |
140 | SpinMutexLock l(&StatsMutex); |
141 | Stats->Add(AllocatorStatAllocated, CommittedSize); |
142 | Stats->Add(AllocatorStatMapped, CommittedSize); |
143 | AllocatedBytes += CommittedSize; |
144 | if (LargestSize < CommittedSize) |
145 | LargestSize = CommittedSize; |
146 | NumberOfAllocs++; |
147 | } |
148 | |
149 | return reinterpret_cast<void *>(Ptr); |
150 | } |
151 | |
152 | void Deallocate(AllocatorStats *Stats, void *Ptr) { |
153 | LargeChunk::Header *H = LargeChunk::getHeader(Ptr); |
154 | // Since we're unmapping the entirety of where the ReservedAddressRange |
155 | // actually is, copy onto the stack. |
156 | ReservedAddressRange AddressRange = H->StoredRange; |
157 | const uptr Size = H->CommittedSize; |
158 | { |
159 | SpinMutexLock l(&StatsMutex); |
160 | Stats->Sub(AllocatorStatAllocated, Size); |
161 | Stats->Sub(AllocatorStatMapped, Size); |
162 | FreedBytes += Size; |
163 | NumberOfFrees++; |
164 | } |
165 | AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()), |
166 | AddressRange.size()); |
167 | } |
168 | |
169 | static uptr GetActuallyAllocatedSize(void *Ptr) { |
170 | return LargeChunk::getHeader(Ptr)->Size; |
171 | } |
172 | |
173 | void PrintStats() { |
174 | Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), " |
175 | "freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n", |
176 | NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, |
177 | FreedBytes >> 10, NumberOfAllocs - NumberOfFrees, |
178 | (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20); |
179 | } |
180 | |
181 | private: |
182 | static constexpr uptr HeadersSize = |
183 | LargeChunk::getHeaderSize() + Chunk::getHeaderSize(); |
184 | |
185 | StaticSpinMutex StatsMutex; |
186 | u32 NumberOfAllocs; |
187 | u32 NumberOfFrees; |
188 | uptr AllocatedBytes; |
189 | uptr FreedBytes; |
190 | uptr LargestSize; |
191 | }; |
192 | |
193 | #endif // SCUDO_ALLOCATOR_SECONDARY_H_ |