OILS / mycpp / mark_sweep_heap.cc View on Github | oilshell.org

439 lines, 267 significant
1#include "mycpp/mark_sweep_heap.h"
2
3#include <inttypes.h> // PRId64
4#include <stdlib.h> // getenv()
5#include <string.h> // strlen()
6#include <sys/time.h> // gettimeofday()
7#include <time.h> // clock_gettime(), CLOCK_PROCESS_CPUTIME_ID
8#include <unistd.h> // STDERR_FILENO
9
10#include "_build/detected-cpp-config.h" // for GC_TIMING
11#include "mycpp/gc_builtins.h" // StringToInt()
12#include "mycpp/gc_slab.h"
13
14// TODO: Remove this guard when we have separate binaries
15#if MARK_SWEEP
16
17void MarkSweepHeap::Init() {
18 Init(1000); // collect at 1000 objects in tests
19}
20
21void MarkSweepHeap::Init(int gc_threshold) {
22 gc_threshold_ = gc_threshold;
23
24 char* e;
25 e = getenv("OILS_GC_THRESHOLD");
26 if (e) {
27 int result;
28 if (StringToInt(e, strlen(e), 10, &result)) {
29 // Override collection threshold
30 gc_threshold_ = result;
31 }
32 }
33
34 // only for developers
35 e = getenv("_OILS_GC_VERBOSE");
36 if (e && strcmp(e, "1") == 0) {
37 gc_verbose_ = true;
38 }
39
40 live_objs_.reserve(KiB(10));
41 roots_.reserve(KiB(1)); // prevent resizing in common case
42}
43
44int MarkSweepHeap::MaybeCollect() {
45 // Maybe collect BEFORE allocation, because the new object won't be rooted
46 #if GC_ALWAYS
47 int result = Collect();
48 #else
49 int result = -1;
50 if (num_live() > gc_threshold_) {
51 result = Collect();
52 }
53 #endif
54
55 num_gc_points_++; // this is a manual collection point
56 return result;
57}
58
59 #if defined(BUMP_SMALL)
60 #include "mycpp/bump_leak_heap.h"
61
62BumpLeakHeap gBumpLeak;
63 #endif
64
65// Allocate and update stats
66// TODO: Make this interface nicer.
67void* MarkSweepHeap::Allocate(size_t num_bytes, int* obj_id, int* pool_id) {
68 // log("Allocate %d", num_bytes);
69 #ifndef NO_POOL_ALLOC
70 if (num_bytes <= pool1_.kMaxObjSize) {
71 *pool_id = 1;
72 return pool1_.Allocate(obj_id);
73 }
74 if (num_bytes <= pool2_.kMaxObjSize) {
75 *pool_id = 2;
76 return pool2_.Allocate(obj_id);
77 }
78 *pool_id = 0; // malloc(), not a pool
79 #endif
80
81 // Does the pool allocator approximate a bump allocator? Use pool2_
82 // threshold of 48 bytes.
83 // These only work with GC off -- OILS_GC_THRESHOLD=[big]
84 #ifdef BUMP_SMALL
85 if (num_bytes <= 48) {
86 return gBumpLeak.Allocate(num_bytes);
87 }
88 #endif
89
90 if (to_free_.empty()) {
91 // Use higher object IDs
92 *obj_id = greatest_obj_id_;
93 greatest_obj_id_++;
94
95 // This check is ON in release mode
96 CHECK(greatest_obj_id_ <= kMaxObjId);
97 } else {
98 ObjHeader* dead = to_free_.back();
99 to_free_.pop_back();
100
101 *obj_id = dead->obj_id; // reuse the dead object's ID
102
103 free(dead);
104 }
105
106 void* result = malloc(num_bytes);
107 DCHECK(result != nullptr);
108
109 live_objs_.push_back(static_cast<ObjHeader*>(result));
110
111 num_live_++;
112 num_allocated_++;
113 bytes_allocated_ += num_bytes;
114
115 return result;
116}
117
118 #if 0
119void* MarkSweepHeap::Reallocate(void* p, size_t num_bytes) {
120 FAIL(kNotImplemented);
121 // This causes a double-free in the GC!
122 // return realloc(p, num_bytes);
123}
124 #endif
125
126// "Leaf" for marking / TraceChildren
127//
128// - Abort if nullptr
129// - Find the header (get rid of this when remove ObjHeader member)
130// - Tag::{Opaque,FixedSized,Scanned} have their mark bits set
131// - Tag::{FixedSize,Scanned} are also pushed on the gray stack
132
133void MarkSweepHeap::MaybeMarkAndPush(RawObject* obj) {
134 ObjHeader* header = ObjHeader::FromObject(obj);
135 if (header->heap_tag == HeapTag::Global) { // don't mark or push
136 return;
137 }
138
139 int obj_id = header->obj_id;
140 #ifndef NO_POOL_ALLOC
141 if (header->pool_id == 1) {
142 if (pool1_.IsMarked(obj_id)) {
143 return;
144 }
145 pool1_.Mark(obj_id);
146 } else if (header->pool_id == 2) {
147 if (pool2_.IsMarked(obj_id)) {
148 return;
149 }
150 pool2_.Mark(obj_id);
151 } else
152 #endif
153 {
154 if (mark_set_.IsMarked(obj_id)) {
155 return;
156 }
157 mark_set_.Mark(obj_id);
158 }
159
160 switch (header->heap_tag) {
161 case HeapTag::Opaque: // e.g. strings have no children
162 break;
163
164 case HeapTag::Scanned: // these 2 types have children
165 case HeapTag::FixedSize:
166 gray_stack_.push_back(header); // Push the header, not the object!
167 break;
168
169 default:
170 FAIL(kShouldNotGetHere);
171 }
172}
173
174void MarkSweepHeap::TraceChildren() {
175 while (!gray_stack_.empty()) {
176 ObjHeader* header = gray_stack_.back();
177 gray_stack_.pop_back();
178
179 switch (header->heap_tag) {
180 case HeapTag::FixedSize: {
181 auto fixed = reinterpret_cast<LayoutFixed*>(header->ObjectAddress());
182 int mask = FIELD_MASK(*header);
183
184 for (int i = 0; i < kFieldMaskBits; ++i) {
185 if (mask & (1 << i)) {
186 RawObject* child = fixed->children_[i];
187 if (child) {
188 MaybeMarkAndPush(child);
189 }
190 }
191 }
192 break;
193 }
194
195 case HeapTag::Scanned: {
196 auto slab = reinterpret_cast<Slab<RawObject*>*>(header->ObjectAddress());
197
198 int n = NUM_POINTERS(*header);
199 for (int i = 0; i < n; ++i) {
200 RawObject* child = slab->items_[i];
201 if (child) {
202 MaybeMarkAndPush(child);
203 }
204 }
205 break;
206 }
207 default:
208 // Only FixedSize and Scanned are pushed
209 FAIL(kShouldNotGetHere);
210 }
211 }
212}
213
214void MarkSweepHeap::Sweep() {
215 #ifndef NO_POOL_ALLOC
216 pool1_.Sweep();
217 pool2_.Sweep();
218 #endif
219
220 int last_live_index = 0;
221 int num_objs = live_objs_.size();
222 for (int i = 0; i < num_objs; ++i) {
223 ObjHeader* obj = live_objs_[i];
224 DCHECK(obj); // malloc() shouldn't have returned nullptr
225
226 bool is_live = mark_set_.IsMarked(obj->obj_id);
227
228 // Compact live_objs_ and populate to_free_. Note: doing the reverse could
229 // be more efficient when many objects are dead.
230 if (is_live) {
231 live_objs_[last_live_index++] = obj;
232 } else {
233 to_free_.push_back(obj);
234 // free(obj);
235 num_live_--;
236 }
237 }
238 live_objs_.resize(last_live_index); // remove dangling objects
239
240 num_collections_++;
241 max_survived_ = std::max(max_survived_, num_live());
242}
243
244int MarkSweepHeap::Collect() {
245 #ifdef GC_TIMING
246 struct timespec start, end;
247 if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &start) < 0) {
248 FAIL("clock_gettime failed");
249 }
250 #endif
251
252 int num_roots = roots_.size();
253 int num_globals = global_roots_.size();
254
255 if (gc_verbose_) {
256 log("");
257 log("%2d. GC with %d roots (%d global) and %d live objects",
258 num_collections_, num_roots + num_globals, num_globals, num_live());
259 }
260
261 // Resize it
262 mark_set_.ReInit(greatest_obj_id_);
263 #ifndef NO_POOL_ALLOC
264 pool1_.PrepareForGc();
265 pool2_.PrepareForGc();
266 #endif
267
268 // Mark roots.
269 // Note: It might be nice to get rid of double pointers
270 for (int i = 0; i < num_roots; ++i) {
271 RawObject* root = *(roots_[i]);
272 if (root) {
273 MaybeMarkAndPush(root);
274 }
275 }
276
277 for (int i = 0; i < num_globals; ++i) {
278 RawObject* root = global_roots_[i];
279 if (root) {
280 MaybeMarkAndPush(root);
281 }
282 }
283
284 // Traverse object graph.
285 TraceChildren();
286
287 Sweep();
288
289 if (gc_verbose_) {
290 log(" %d live after sweep", num_live());
291 }
292
293 // We know how many are live. If the number of objects is close to the
294 // threshold (above 75%), then set the threshold to 2 times the number of
295 // live objects. This is an ad hoc policy that removes observed "thrashing"
296 // -- being at 99% of the threshold and doing FUTILE mark and sweep.
297
298 int water_mark = (gc_threshold_ * 3) / 4;
299 if (num_live() > water_mark) {
300 gc_threshold_ = num_live() * 2;
301 num_growths_++;
302 if (gc_verbose_) {
303 log(" exceeded %d live objects; gc_threshold set to %d", water_mark,
304 gc_threshold_);
305 }
306 }
307
308 #ifdef GC_TIMING
309 if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &end) < 0) {
310 FAIL("clock_gettime failed");
311 }
312
313 double start_secs = start.tv_sec + start.tv_nsec / 1e9;
314 double end_secs = end.tv_sec + end.tv_nsec / 1e9;
315 double gc_millis = (end_secs - start_secs) * 1000.0;
316
317 if (gc_verbose_) {
318 log(" %.1f ms GC", gc_millis);
319 }
320
321 total_gc_millis_ += gc_millis;
322 if (gc_millis > max_gc_millis_) {
323 max_gc_millis_ = gc_millis;
324 }
325 #endif
326
327 return num_live(); // for unit tests only
328}
329
330void MarkSweepHeap::PrintStats(int fd) {
331 dprintf(fd, " num live = %10d\n", num_live());
332 // max survived_ can be less than num_live(), because leave off the last GC
333 dprintf(fd, " max survived = %10d\n", max_survived_);
334 dprintf(fd, "\n");
335
336 #ifndef NO_POOL_ALLOC
337 dprintf(fd, " num allocated = %10d\n",
338 num_allocated_ + pool1_.num_allocated() + pool2_.num_allocated());
339 dprintf(fd, " num in heap = %10d\n", num_allocated_);
340 #else
341 dprintf(fd, " num allocated = %10d\n", num_allocated_);
342 #endif
343
344 #ifndef NO_POOL_ALLOC
345 dprintf(fd, " num in pool 1 = %10d\n", pool1_.num_allocated());
346 dprintf(fd, " num in pool 2 = %10d\n", pool2_.num_allocated());
347 dprintf(
348 fd, "bytes allocated = %10" PRId64 "\n",
349 bytes_allocated_ + pool1_.bytes_allocated() + pool2_.bytes_allocated());
350 #else
351 dprintf(fd, "bytes allocated = %10" PRId64 "\n", bytes_allocated_);
352 #endif
353
354 dprintf(fd, "\n");
355 dprintf(fd, " num gc points = %10d\n", num_gc_points_);
356 dprintf(fd, " num collections = %10d\n", num_collections_);
357 dprintf(fd, "\n");
358 dprintf(fd, " gc threshold = %10d\n", gc_threshold_);
359 dprintf(fd, " num growths = %10d\n", num_growths_);
360 dprintf(fd, "\n");
361 dprintf(fd, " max gc millis = %10.1f\n", max_gc_millis_);
362 dprintf(fd, "total gc millis = %10.1f\n", total_gc_millis_);
363 dprintf(fd, "\n");
364 dprintf(fd, "roots capacity = %10d\n",
365 static_cast<int>(roots_.capacity()));
366 dprintf(fd, " objs capacity = %10d\n",
367 static_cast<int>(live_objs_.capacity()));
368}
369
370// Cleanup at the end of main() to remain ASAN-safe
371void MarkSweepHeap::MaybePrintStats() {
372 int stats_fd = -1;
373 char* e = getenv("OILS_GC_STATS");
374 if (e && strlen(e)) { // env var set and non-empty
375 stats_fd = STDERR_FILENO;
376 } else {
377 // A raw file descriptor lets benchmarks extract stats even if the script
378 // writes to stdout and stderr. Shells can't use open() without potential
379 // conflicts.
380
381 e = getenv("OILS_GC_STATS_FD");
382 if (e && strlen(e)) {
383 // Try setting 'stats_fd'. If there's an error, it will be unchanged, and
384 // we don't PrintStats();
385 StringToInt(e, strlen(e), 10, &stats_fd);
386 }
387 }
388
389 if (stats_fd != -1) {
390 PrintStats(stats_fd);
391 }
392}
393
394void MarkSweepHeap::FreeEverything() {
395 roots_.clear();
396 global_roots_.clear();
397
398 Collect();
399
400 // Collect() told us what to free()
401 for (auto obj : to_free_) {
402 free(obj);
403 }
404 #ifndef NO_POOL_ALLOC
405 pool1_.Free();
406 pool2_.Free();
407 #endif
408}
409
410void MarkSweepHeap::CleanProcessExit() {
411 MaybePrintStats();
412
413 char* e = getenv("OILS_GC_ON_EXIT");
414 // collect by default; OILS_GC_ON_EXIT=0 overrides
415 if (e && strcmp(e, "0") == 0) {
416 ;
417 } else {
418 FreeEverything();
419 }
420}
421
422// for the main binary
423void MarkSweepHeap::ProcessExit() {
424 MaybePrintStats();
425
426 #ifdef CLEAN_PROCESS_EXIT
427 FreeEverything();
428 #else
429 char* e = getenv("OILS_GC_ON_EXIT");
430 // don't collect by default; OILS_GC_ON_EXIT=1 overrides
431 if (e && strcmp(e, "1") == 0) {
432 FreeEverything();
433 }
434 #endif
435}
436
437MarkSweepHeap gHeap;
438
439#endif // MARK_SWEEP