1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
| //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Memory quarantine for AddressSanitizer and potentially other tools.
// Quarantine caches some specified amount of memory in per-thread caches,
// then evicts to global FIFO queue. When the queue reaches specified threshold,
// oldest memory is recycled.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_QUARANTINE_H
#define SANITIZER_QUARANTINE_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
#include "sanitizer_list.h"
namespace __sanitizer {
template<typename Node> class QuarantineCache;
struct QuarantineBatch {
static const uptr kSize = 1021;
QuarantineBatch *next;
uptr size;
uptr count;
void *batch[kSize];
void init(void *ptr, uptr size) {
count = 1;
batch[0] = ptr;
this->size = size + sizeof(QuarantineBatch); // Account for the batch size.
}
// The total size of quarantined nodes recorded in this batch.
uptr quarantined_size() const {
return size - sizeof(QuarantineBatch);
}
void push_back(void *ptr, uptr size) {
CHECK_LT(count, kSize);
batch[count++] = ptr;
this->size += size;
}
bool can_merge(const QuarantineBatch* const from) const {
return count + from->count <= kSize;
}
void merge(QuarantineBatch* const from) {
CHECK_LE(count + from->count, kSize);
CHECK_GE(size, sizeof(QuarantineBatch));
for (uptr i = 0; i < from->count; ++i)
batch[count + i] = from->batch[i];
count += from->count;
size += from->quarantined_size();
from->count = 0;
from->size = sizeof(QuarantineBatch);
}
};
COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
// The callback interface is:
// void Callback::Recycle(Node *ptr);
// void *cb.Allocate(uptr size);
// void cb.Deallocate(void *ptr);
template<typename Callback, typename Node>
class Quarantine {
public:
typedef QuarantineCache<Callback> Cache;
explicit Quarantine(LinkerInitialized)
: cache_(LINKER_INITIALIZED) {
}
void Init(uptr size, uptr cache_size) {
// Thread local quarantine size can be zero only when global quarantine size
// is zero (it allows us to perform just one atomic read per Put() call).
CHECK((size == 0 && cache_size == 0) || cache_size != 0);
atomic_store_relaxed(&max_size_, size);
atomic_store_relaxed(&min_size_, size / 10 * 9); // 90% of max size.
atomic_store_relaxed(&max_cache_size_, cache_size);
cache_mutex_.Init();
recycle_mutex_.Init();
}
uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
uptr GetCacheSize() const {
return atomic_load_relaxed(&max_cache_size_);
}
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
uptr cache_size = GetCacheSize();
if (cache_size) {
c->Enqueue(cb, ptr, size);
} else {
// GetCacheSize() == 0 only when GetSize() == 0 (see Init).
cb.Recycle(ptr);
}
// Check cache size anyway to accommodate for runtime cache_size change.
if (c->Size() > cache_size)
Drain(c, cb);
}
void NOINLINE Drain(Cache *c, Callback cb) {
{
SpinMutexLock l(&cache_mutex_);
cache_.Transfer(c);
}
if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
Recycle(atomic_load_relaxed(&min_size_), cb);
}
void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {
{
SpinMutexLock l(&cache_mutex_);
cache_.Transfer(c);
}
recycle_mutex_.Lock();
Recycle(0, cb);
}
void PrintStats() const {
// It assumes that the world is stopped, just as the allocator's PrintStats.
Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
GetSize() >> 20, GetCacheSize() >> 10);
cache_.PrintStats();
}
private:
// Read-only data.
char pad0_[kCacheLineSize];
atomic_uintptr_t max_size_;
atomic_uintptr_t min_size_;
atomic_uintptr_t max_cache_size_;
char pad1_[kCacheLineSize];
StaticSpinMutex cache_mutex_;
StaticSpinMutex recycle_mutex_;
Cache cache_;
char pad2_[kCacheLineSize];
void NOINLINE Recycle(uptr min_size, Callback cb) {
Cache tmp;
{
SpinMutexLock l(&cache_mutex_);
// Go over the batches and merge partially filled ones to
// save some memory, otherwise batches themselves (since the memory used
// by them is counted against quarantine limit) can overcome the actual
// user's quarantined chunks, which diminishes the purpose of the
// quarantine.
uptr cache_size = cache_.Size();
uptr overhead_size = cache_.OverheadSize();
CHECK_GE(cache_size, overhead_size);
// Do the merge only when overhead exceeds this predefined limit (might
// require some tuning). It saves us merge attempt when the batch list
// quarantine is unlikely to contain batches suitable for merge.
const uptr kOverheadThresholdPercents = 100;
if (cache_size > overhead_size &&
overhead_size * (100 + kOverheadThresholdPercents) >
cache_size * kOverheadThresholdPercents) {
cache_.MergeBatches(&tmp);
}
// Extract enough chunks from the quarantine to get below the max
// quarantine size and leave some leeway for the newly quarantined chunks.
while (cache_.Size() > min_size) {
tmp.EnqueueBatch(cache_.DequeueBatch());
}
}
recycle_mutex_.Unlock();
DoRecycle(&tmp, cb);
}
void NOINLINE DoRecycle(Cache *c, Callback cb) {
while (QuarantineBatch *b = c->DequeueBatch()) {
const uptr kPrefetch = 16;
CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
for (uptr i = 0; i < kPrefetch; i++)
PREFETCH(b->batch[i]);
for (uptr i = 0, count = b->count; i < count; i++) {
if (i + kPrefetch < count)
PREFETCH(b->batch[i + kPrefetch]);
cb.Recycle((Node*)b->batch[i]);
}
cb.Deallocate(b);
}
}
};
// Per-thread cache of memory blocks.
template<typename Callback>
class QuarantineCache {
public:
explicit QuarantineCache(LinkerInitialized) {
}
QuarantineCache()
: size_() {
list_.clear();
}
// Total memory used, including internal accounting.
uptr Size() const {
return atomic_load_relaxed(&size_);
}
// Memory used for internal accounting.
uptr OverheadSize() const {
return list_.size() * sizeof(QuarantineBatch);
}
void Enqueue(Callback cb, void *ptr, uptr size) {
if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
CHECK(b);
b->init(ptr, size);
EnqueueBatch(b);
} else {
list_.back()->push_back(ptr, size);
SizeAdd(size);
}
}
void Transfer(QuarantineCache *from_cache) {
list_.append_back(&from_cache->list_);
SizeAdd(from_cache->Size());
atomic_store_relaxed(&from_cache->size_, 0);
}
void EnqueueBatch(QuarantineBatch *b) {
list_.push_back(b);
SizeAdd(b->size);
}
QuarantineBatch *DequeueBatch() {
if (list_.empty())
return nullptr;
QuarantineBatch *b = list_.front();
list_.pop_front();
SizeSub(b->size);
return b;
}
void MergeBatches(QuarantineCache *to_deallocate) {
uptr extracted_size = 0;
QuarantineBatch *current = list_.front();
while (current && current->next) {
if (current->can_merge(current->next)) {
QuarantineBatch *extracted = current->next;
// Move all the chunks into the current batch.
current->merge(extracted);
CHECK_EQ(extracted->count, 0);
CHECK_EQ(extracted->size, sizeof(QuarantineBatch));
// Remove the next batch from the list and account for its size.
list_.extract(current, extracted);
extracted_size += extracted->size;
// Add it to deallocation list.
to_deallocate->EnqueueBatch(extracted);
} else {
current = current->next;
}
}
SizeSub(extracted_size);
}
void PrintStats() const {
uptr batch_count = 0;
uptr total_overhead_bytes = 0;
uptr total_bytes = 0;
uptr total_quarantine_chunks = 0;
for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
batch_count++;
total_bytes += (*it).size;
total_overhead_bytes += (*it).size - (*it).quarantined_size();
total_quarantine_chunks += (*it).count;
}
uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;
int chunks_usage_percent = quarantine_chunks_capacity == 0 ?
0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;
uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;
int memory_overhead_percent = total_quarantined_bytes == 0 ?
0 : total_overhead_bytes * 100 / total_quarantined_bytes;
Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
"chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
"\n",
batch_count, total_bytes, total_quarantined_bytes,
total_quarantine_chunks, quarantine_chunks_capacity,
chunks_usage_percent, memory_overhead_percent);
}
private:
typedef IntrusiveList<QuarantineBatch> List;
List list_;
atomic_uintptr_t size_;
void SizeAdd(uptr add) {
atomic_store_relaxed(&size_, Size() + add);
}
void SizeSub(uptr sub) {
atomic_store_relaxed(&size_, Size() - sub);
}
};
} // namespace __sanitizer
#endif // SANITIZER_QUARANTINE_H
|