/home/uke/oil/mycpp/gc_mylib.h
Line | Count | Source (jump to first uncovered line) |
1 | | // gc_mylib.h - corresponds to mycpp/mylib.py |
2 | | |
3 | | #ifndef MYCPP_GC_MYLIB_H |
4 | | #define MYCPP_GC_MYLIB_H |
5 | | |
6 | | #include <limits.h> // CHAR_BIT |
7 | | |
8 | | #include "mycpp/gc_alloc.h" // gHeap |
9 | | #include "mycpp/gc_dict.h" // for dict_erase() |
10 | | #include "mycpp/gc_mops.h" |
11 | | #include "mycpp/gc_tuple.h" |
12 | | |
13 | | template <class K, class V> |
14 | | class Dict; |
15 | | |
16 | | // https://stackoverflow.com/questions/3919995/determining-sprintf-buffer-size-whats-the-standard/11092994#11092994 |
17 | | // Notes: |
18 | | // - Python 2.7's intobject.c has an erroneous +6 |
19 | | // - This is 13, but len('-2147483648') is 11, which means we only need 12? |
20 | | // - This formula is valid for octal(), because 2^(3 bits) = 8 |
21 | | |
22 | | const int kIntBufSize = CHAR_BIT * sizeof(int) / 3 + 3; |
23 | | |
24 | | namespace mylib { |
25 | | |
26 | | void InitCppOnly(); |
27 | | |
28 | | // Wrappers around our C++ APIs |
29 | | |
30 | 1.07k | inline void MaybeCollect() { |
31 | 1.07k | gHeap.MaybeCollect(); |
32 | 1.07k | } |
33 | | |
34 | 0 | inline void PrintGcStats() { |
35 | 0 | gHeap.PrintShortStats(); // print to stderr |
36 | 0 | } |
37 | | |
38 | | void print_stderr(BigStr* s); |
39 | | |
40 | 286 | inline int ByteAt(BigStr* s, int i) { |
41 | 286 | DCHECK(0 <= i); |
42 | 286 | DCHECK(i <= len(s)); |
43 | | |
44 | 0 | return static_cast<unsigned char>(s->data_[i]); |
45 | 286 | } |
46 | | |
47 | 1.02k | inline int ByteEquals(int byte, BigStr* ch) { |
48 | 1.02k | DCHECK(0 <= byte); |
49 | 1.02k | DCHECK(byte < 256); |
50 | | |
51 | 1.02k | DCHECK(len(ch) == 1); |
52 | | |
53 | 0 | return byte == static_cast<unsigned char>(ch->data_[0]); |
54 | 1.02k | } |
55 | | |
56 | 256 | inline int ByteInSet(int byte, BigStr* byte_set) { |
57 | 256 | DCHECK(0 <= byte); |
58 | 256 | DCHECK(byte < 256); |
59 | | |
60 | 0 | int n = len(byte_set); |
61 | 1.77k | for (int i = 0; i < n; ++i) { |
62 | 1.52k | int b = static_cast<unsigned char>(byte_set->data_[i]); |
63 | 1.52k | if (byte == b) { |
64 | 6 | return true; |
65 | 6 | } |
66 | 1.52k | } |
67 | 250 | return false; |
68 | 256 | } |
69 | | |
70 | | BigStr* JoinBytes(List<int>* byte_list); |
71 | | |
72 | | void BigIntSort(List<mops::BigInt>* keys); |
73 | | |
74 | | // const int kStdout = 1; |
75 | | // const int kStderr = 2; |
76 | | |
77 | | // void writeln(BigStr* s, int fd = kStdout); |
78 | | |
79 | | Tuple2<BigStr*, BigStr*> split_once(BigStr* s, BigStr* delim); |
80 | | |
81 | | template <typename K, typename V> |
82 | 280 | void dict_erase(Dict<K, V>* haystack, K needle) { |
83 | 280 | DCHECK(haystack->obj_header().heap_tag != HeapTag::Global); |
84 | | |
85 | 0 | int pos = haystack->hash_and_probe(needle); |
86 | 280 | if (pos == kTooSmall) { |
87 | 0 | return; |
88 | 0 | } |
89 | 280 | DCHECK(pos >= 0); |
90 | 0 | int kv_index = haystack->index_->items_[pos]; |
91 | 280 | if (kv_index < 0) { |
92 | 2 | return; |
93 | 2 | } |
94 | | |
95 | 278 | int last_kv_index = haystack->len_ - 1; |
96 | 278 | DCHECK(kv_index <= last_kv_index); |
97 | | |
98 | | // Swap the target entry with the most recently inserted one before removing |
99 | | // it. This has two benefits. |
100 | | // (1) It keeps the entry arrays compact. All valid entries occupy a |
101 | | // contiguous region in memory. |
102 | | // (2) It prevents holes in the entry arrays. This makes iterating over |
103 | | // entries (e.g. in keys() or DictIter()) trivial and doesn't require |
104 | | // any extra validity state (like a bitset of unusable slots). This is |
105 | | // important because keys and values wont't always be pointers, so we |
106 | | // can't rely on NULL checks for validity. We also can't wrap the slab |
107 | | // entry types in some other type without modifying the garbage |
108 | | // collector to trace through unmanaged types (or paying the extra |
109 | | // allocations for the outer type). |
110 | 278 | if (kv_index != last_kv_index) { |
111 | 142 | K last_key = haystack->keys_->items_[last_kv_index]; |
112 | 142 | V last_val = haystack->values_->items_[last_kv_index]; |
113 | 142 | int last_pos = haystack->hash_and_probe(last_key); |
114 | 142 | DCHECK(last_pos != kNotFound); |
115 | 0 | haystack->keys_->items_[kv_index] = last_key; |
116 | 142 | haystack->values_->items_[kv_index] = last_val; |
117 | 142 | haystack->index_->items_[last_pos] = kv_index; |
118 | 142 | } |
119 | | |
120 | | // Zero out for GC. These could be nullptr or 0 |
121 | 0 | haystack->keys_->items_[last_kv_index] = 0; |
122 | 278 | haystack->values_->items_[last_kv_index] = 0; |
123 | 278 | haystack->index_->items_[pos] = kDeletedEntry; |
124 | 278 | haystack->len_--; |
125 | 278 | DCHECK(haystack->len_ < haystack->capacity_); |
126 | 278 | } _ZN5mylib10dict_eraseIP6BigStriEEvP4DictIT_T0_ES4_ Line | Count | Source | 82 | 2 | void dict_erase(Dict<K, V>* haystack, K needle) { | 83 | 2 | DCHECK(haystack->obj_header().heap_tag != HeapTag::Global); | 84 | | | 85 | 0 | int pos = haystack->hash_and_probe(needle); | 86 | 2 | if (pos == kTooSmall) { | 87 | 0 | return; | 88 | 0 | } | 89 | 2 | DCHECK(pos >= 0); | 90 | 0 | int kv_index = haystack->index_->items_[pos]; | 91 | 2 | if (kv_index < 0) { | 92 | 0 | return; | 93 | 0 | } | 94 | | | 95 | 2 | int last_kv_index = haystack->len_ - 1; | 96 | 2 | DCHECK(kv_index <= last_kv_index); | 97 | | | 98 | | // Swap the target entry with the most recently inserted one before removing | 99 | | // it. This has two benefits. | 100 | | // (1) It keeps the entry arrays compact. All valid entries occupy a | 101 | | // contiguous region in memory. | 102 | | // (2) It prevents holes in the entry arrays. This makes iterating over | 103 | | // entries (e.g. in keys() or DictIter()) trivial and doesn't require | 104 | | // any extra validity state (like a bitset of unusable slots). This is | 105 | | // important because keys and values wont't always be pointers, so we | 106 | | // can't rely on NULL checks for validity. We also can't wrap the slab | 107 | | // entry types in some other type without modifying the garbage | 108 | | // collector to trace through unmanaged types (or paying the extra | 109 | | // allocations for the outer type). | 110 | 2 | if (kv_index != last_kv_index) { | 111 | 0 | K last_key = haystack->keys_->items_[last_kv_index]; | 112 | 0 | V last_val = haystack->values_->items_[last_kv_index]; | 113 | 0 | int last_pos = haystack->hash_and_probe(last_key); | 114 | 0 | DCHECK(last_pos != kNotFound); | 115 | 0 | haystack->keys_->items_[kv_index] = last_key; | 116 | 0 | haystack->values_->items_[kv_index] = last_val; | 117 | 0 | haystack->index_->items_[last_pos] = kv_index; | 118 | 0 | } | 119 | | | 120 | | // Zero out for GC. These could be nullptr or 0 | 121 | 0 | haystack->keys_->items_[last_kv_index] = 0; | 122 | 2 | haystack->values_->items_[last_kv_index] = 0; | 123 | 2 | haystack->index_->items_[pos] = kDeletedEntry; | 124 | 2 | haystack->len_--; | 125 | 2 | DCHECK(haystack->len_ < haystack->capacity_); | 126 | 2 | } |
_ZN5mylib10dict_eraseIP6BigStrS2_EEvP4DictIT_T0_ES4_ Line | Count | Source | 82 | 2 | void dict_erase(Dict<K, V>* haystack, K needle) { | 83 | 2 | DCHECK(haystack->obj_header().heap_tag != HeapTag::Global); | 84 | | | 85 | 0 | int pos = haystack->hash_and_probe(needle); | 86 | 2 | if (pos == kTooSmall) { | 87 | 0 | return; | 88 | 0 | } | 89 | 2 | DCHECK(pos >= 0); | 90 | 0 | int kv_index = haystack->index_->items_[pos]; | 91 | 2 | if (kv_index < 0) { | 92 | 0 | return; | 93 | 0 | } | 94 | | | 95 | 2 | int last_kv_index = haystack->len_ - 1; | 96 | 2 | DCHECK(kv_index <= last_kv_index); | 97 | | | 98 | | // Swap the target entry with the most recently inserted one before removing | 99 | | // it. This has two benefits. | 100 | | // (1) It keeps the entry arrays compact. All valid entries occupy a | 101 | | // contiguous region in memory. | 102 | | // (2) It prevents holes in the entry arrays. This makes iterating over | 103 | | // entries (e.g. in keys() or DictIter()) trivial and doesn't require | 104 | | // any extra validity state (like a bitset of unusable slots). This is | 105 | | // important because keys and values wont't always be pointers, so we | 106 | | // can't rely on NULL checks for validity. We also can't wrap the slab | 107 | | // entry types in some other type without modifying the garbage | 108 | | // collector to trace through unmanaged types (or paying the extra | 109 | | // allocations for the outer type). | 110 | 2 | if (kv_index != last_kv_index) { | 111 | 0 | K last_key = haystack->keys_->items_[last_kv_index]; | 112 | 0 | V last_val = haystack->values_->items_[last_kv_index]; | 113 | 0 | int last_pos = haystack->hash_and_probe(last_key); | 114 | 0 | DCHECK(last_pos != kNotFound); | 115 | 0 | haystack->keys_->items_[kv_index] = last_key; | 116 | 0 | haystack->values_->items_[kv_index] = last_val; | 117 | 0 | haystack->index_->items_[last_pos] = kv_index; | 118 | 0 | } | 119 | | | 120 | | // Zero out for GC. These could be nullptr or 0 | 121 | 0 | haystack->keys_->items_[last_kv_index] = 0; | 122 | 2 | haystack->values_->items_[last_kv_index] = 0; | 123 | 2 | haystack->index_->items_[pos] = kDeletedEntry; | 124 | 2 | haystack->len_--; | 125 | 2 | DCHECK(haystack->len_ < haystack->capacity_); | 126 | 2 | } |
_ZN5mylib10dict_eraseIiiEEvP4DictIT_T0_ES2_ Line | Count | Source | 82 | 274 | void dict_erase(Dict<K, V>* haystack, K needle) { | 83 | 274 | DCHECK(haystack->obj_header().heap_tag != HeapTag::Global); | 84 | | | 85 | 0 | int pos = haystack->hash_and_probe(needle); | 86 | 274 | if (pos == kTooSmall) { | 87 | 0 | return; | 88 | 0 | } | 89 | 274 | DCHECK(pos >= 0); | 90 | 0 | int kv_index = haystack->index_->items_[pos]; | 91 | 274 | if (kv_index < 0) { | 92 | 0 | return; | 93 | 0 | } | 94 | | | 95 | 274 | int last_kv_index = haystack->len_ - 1; | 96 | 274 | DCHECK(kv_index <= last_kv_index); | 97 | | | 98 | | // Swap the target entry with the most recently inserted one before removing | 99 | | // it. This has two benefits. | 100 | | // (1) It keeps the entry arrays compact. All valid entries occupy a | 101 | | // contiguous region in memory. | 102 | | // (2) It prevents holes in the entry arrays. This makes iterating over | 103 | | // entries (e.g. in keys() or DictIter()) trivial and doesn't require | 104 | | // any extra validity state (like a bitset of unusable slots). This is | 105 | | // important because keys and values wont't always be pointers, so we | 106 | | // can't rely on NULL checks for validity. We also can't wrap the slab | 107 | | // entry types in some other type without modifying the garbage | 108 | | // collector to trace through unmanaged types (or paying the extra | 109 | | // allocations for the outer type). | 110 | 274 | if (kv_index != last_kv_index) { | 111 | 142 | K last_key = haystack->keys_->items_[last_kv_index]; | 112 | 142 | V last_val = haystack->values_->items_[last_kv_index]; | 113 | 142 | int last_pos = haystack->hash_and_probe(last_key); | 114 | 142 | DCHECK(last_pos != kNotFound); | 115 | 0 | haystack->keys_->items_[kv_index] = last_key; | 116 | 142 | haystack->values_->items_[kv_index] = last_val; | 117 | 142 | haystack->index_->items_[last_pos] = kv_index; | 118 | 142 | } | 119 | | | 120 | | // Zero out for GC. These could be nullptr or 0 | 121 | 0 | haystack->keys_->items_[last_kv_index] = 0; | 122 | 274 | haystack->values_->items_[last_kv_index] = 0; | 123 | 274 | haystack->index_->items_[pos] = kDeletedEntry; | 124 | 274 | haystack->len_--; | 125 | 274 | DCHECK(haystack->len_ < haystack->capacity_); | 126 | 274 | } |
_ZN5mylib10dict_eraseIiP6BigStrEEvP4DictIT_T0_ES4_ Line | Count | Source | 82 | 2 | void dict_erase(Dict<K, V>* haystack, K needle) { | 83 | 2 | DCHECK(haystack->obj_header().heap_tag != HeapTag::Global); | 84 | | | 85 | 0 | int pos = haystack->hash_and_probe(needle); | 86 | 2 | if (pos == kTooSmall) { | 87 | 0 | return; | 88 | 0 | } | 89 | 2 | DCHECK(pos >= 0); | 90 | 0 | int kv_index = haystack->index_->items_[pos]; | 91 | 2 | if (kv_index < 0) { | 92 | 2 | return; | 93 | 2 | } | 94 | | | 95 | 0 | int last_kv_index = haystack->len_ - 1; | 96 | 0 | DCHECK(kv_index <= last_kv_index); | 97 | | | 98 | | // Swap the target entry with the most recently inserted one before removing | 99 | | // it. This has two benefits. | 100 | | // (1) It keeps the entry arrays compact. All valid entries occupy a | 101 | | // contiguous region in memory. | 102 | | // (2) It prevents holes in the entry arrays. This makes iterating over | 103 | | // entries (e.g. in keys() or DictIter()) trivial and doesn't require | 104 | | // any extra validity state (like a bitset of unusable slots). This is | 105 | | // important because keys and values wont't always be pointers, so we | 106 | | // can't rely on NULL checks for validity. We also can't wrap the slab | 107 | | // entry types in some other type without modifying the garbage | 108 | | // collector to trace through unmanaged types (or paying the extra | 109 | | // allocations for the outer type). | 110 | 0 | if (kv_index != last_kv_index) { | 111 | 0 | K last_key = haystack->keys_->items_[last_kv_index]; | 112 | 0 | V last_val = haystack->values_->items_[last_kv_index]; | 113 | 0 | int last_pos = haystack->hash_and_probe(last_key); | 114 | 0 | DCHECK(last_pos != kNotFound); | 115 | 0 | haystack->keys_->items_[kv_index] = last_key; | 116 | 0 | haystack->values_->items_[kv_index] = last_val; | 117 | 0 | haystack->index_->items_[last_pos] = kv_index; | 118 | 0 | } | 119 | | | 120 | | // Zero out for GC. These could be nullptr or 0 | 121 | 0 | haystack->keys_->items_[last_kv_index] = 0; | 122 | 0 | haystack->values_->items_[last_kv_index] = 0; | 123 | 0 | haystack->index_->items_[pos] = kDeletedEntry; | 124 | 0 | haystack->len_--; | 125 | 0 | DCHECK(haystack->len_ < haystack->capacity_); | 126 | 0 | } |
|
127 | | |
128 | 4 | inline BigStr* hex_lower(int i) { |
129 | | // Note: Could also use OverAllocatedStr, but most strings are small? |
130 | 4 | char buf[kIntBufSize]; |
131 | 4 | int len = snprintf(buf, kIntBufSize, "%x", i); |
132 | 4 | return ::StrFromC(buf, len); |
133 | 4 | } |
134 | | |
135 | | // Abstract type: Union of LineReader and Writer |
136 | | class File { |
137 | | public: |
138 | 113 | File() { |
139 | 113 | } |
140 | | // Writer |
141 | | virtual void write(BigStr* s) = 0; |
142 | | virtual void flush() = 0; |
143 | | |
144 | | // Reader |
145 | | virtual BigStr* readline() = 0; |
146 | | |
147 | | // Both |
148 | | virtual bool isatty() = 0; |
149 | | virtual void close() = 0; |
150 | | |
151 | 0 | static constexpr ObjHeader obj_header() { |
152 | 0 | return ObjHeader::ClassFixed(field_mask(), sizeof(File)); |
153 | 0 | } |
154 | | |
155 | 17 | static constexpr uint32_t field_mask() { |
156 | 17 | return kZeroMask; |
157 | 17 | } |
158 | | }; |
159 | | |
160 | | // Wrap a FILE* for read and write |
161 | | class CFile : public File { |
162 | | public: |
163 | 17 | explicit CFile(FILE* f) : File(), f_(f) { |
164 | 17 | } |
165 | | // Writer |
166 | | void write(BigStr* s) override; |
167 | | void flush() override; |
168 | | |
169 | | // Reader |
170 | | BigStr* readline() override; |
171 | | |
172 | | // Both |
173 | | bool isatty() override; |
174 | | void close() override; |
175 | | |
176 | 17 | static constexpr ObjHeader obj_header() { |
177 | 17 | return ObjHeader::ClassFixed(field_mask(), sizeof(CFile)); |
178 | 17 | } |
179 | | |
180 | 17 | static constexpr uint32_t field_mask() { |
181 | | // not mutating field_mask because FILE* isn't a GC object |
182 | 17 | return File::field_mask(); |
183 | 17 | } |
184 | | |
185 | | private: |
186 | | FILE* f_; |
187 | | |
188 | | DISALLOW_COPY_AND_ASSIGN(CFile) |
189 | | }; |
190 | | |
191 | | // Abstract File we can only read from. |
192 | | // TODO: can we get rid of DCHECK() and reinterpret_cast? |
193 | | class LineReader : public File { |
194 | | public: |
195 | 6 | LineReader() : File() { |
196 | 6 | } |
197 | 0 | void write(BigStr* s) override { |
198 | 0 | CHECK(false); // should not happen |
199 | 0 | } |
200 | 0 | void flush() override { |
201 | 0 | CHECK(false); // should not happen |
202 | 0 | } |
203 | | |
204 | 0 | static constexpr ObjHeader obj_header() { |
205 | 0 | return ObjHeader::ClassFixed(field_mask(), sizeof(LineReader)); |
206 | 0 | } |
207 | | |
208 | 6 | static constexpr uint32_t field_mask() { |
209 | 6 | return kZeroMask; |
210 | 6 | } |
211 | | }; |
212 | | |
213 | | class BufLineReader : public LineReader { |
214 | | public: |
215 | 6 | explicit BufLineReader(BigStr* s) : LineReader(), s_(s), pos_(0) { |
216 | 6 | } |
217 | | virtual BigStr* readline(); |
218 | 2 | virtual bool isatty() { |
219 | 2 | return false; |
220 | 2 | } |
221 | 2 | virtual void close() { |
222 | 2 | } |
223 | | |
224 | | BigStr* s_; |
225 | | int pos_; |
226 | | |
227 | 6 | static constexpr ObjHeader obj_header() { |
228 | 6 | return ObjHeader::ClassFixed(field_mask(), sizeof(LineReader)); |
229 | 6 | } |
230 | | |
231 | 6 | static constexpr uint32_t field_mask() { |
232 | 6 | return LineReader::field_mask() | maskbit(offsetof(BufLineReader, s_)); |
233 | 6 | } |
234 | | |
235 | | DISALLOW_COPY_AND_ASSIGN(BufLineReader) |
236 | | }; |
237 | | |
238 | | extern LineReader* gStdin; |
239 | | |
240 | 2 | inline LineReader* Stdin() { |
241 | 2 | if (gStdin == nullptr) { |
242 | 2 | gStdin = reinterpret_cast<LineReader*>(Alloc<CFile>(stdin)); |
243 | 2 | } |
244 | 2 | return gStdin; |
245 | 2 | } |
246 | | |
247 | | LineReader* open(BigStr* path); |
248 | | |
249 | | // Abstract File we can only write to. |
250 | | // TODO: can we get rid of DCHECK() and reinterpret_cast? |
251 | | class Writer : public File { |
252 | | public: |
253 | 90 | Writer() : File() { |
254 | 90 | } |
255 | 0 | BigStr* readline() override { |
256 | 0 | CHECK(false); // should not happen |
257 | 0 | } |
258 | | |
259 | 0 | static constexpr ObjHeader obj_header() { |
260 | 0 | return ObjHeader::ClassFixed(field_mask(), sizeof(Writer)); |
261 | 0 | } |
262 | | |
263 | 90 | static constexpr uint32_t field_mask() { |
264 | 90 | return kZeroMask; |
265 | 90 | } |
266 | | }; |
267 | | |
268 | | class MutableStr; |
269 | | |
270 | | class BufWriter : public Writer { |
271 | | public: |
272 | 90 | BufWriter() : Writer(), str_(nullptr), len_(0) { |
273 | 90 | } |
274 | | void write(BigStr* s) override; |
275 | | void write_spaces(int n); |
276 | 2 | void clear() { // Reuse this instance |
277 | 2 | str_ = nullptr; |
278 | 2 | len_ = 0; |
279 | 2 | is_valid_ = true; |
280 | 2 | } |
281 | 0 | void close() override { |
282 | 0 | } |
283 | 2 | void flush() override { |
284 | 2 | } |
285 | 2 | bool isatty() override { |
286 | 2 | return false; |
287 | 2 | } |
288 | | BigStr* getvalue(); // part of cStringIO API |
289 | | |
290 | | // |
291 | | // Low Level API for C++ usage only |
292 | | // |
293 | | |
294 | | // Convenient API that avoids BigStr* |
295 | | void WriteConst(const char* c_string); |
296 | | |
297 | | // Potentially resizes the buffer. |
298 | | void EnsureMoreSpace(int n); |
299 | | // After EnsureMoreSpace(42), you can write 42 more bytes safely. |
300 | | // |
301 | | // Note that if you call EnsureMoreSpace(42), write 5 byte, and then |
302 | | // EnsureMoreSpace(42) again, the amount of additional space reserved is 47. |
303 | | |
304 | | // (Similar to vector::reserve(n), but it takes an integer to ADD to the |
305 | | // capacity.) |
306 | | |
307 | | uint8_t* LengthPointer(); // start + length |
308 | | uint8_t* CapacityPointer(); // start + capacity |
309 | | void SetLengthFrom(uint8_t* length_ptr); |
310 | | |
311 | 59 | int Length() { |
312 | 59 | return len_; |
313 | 59 | } |
314 | | |
315 | | // Rewind to earlier position, future writes start there |
316 | | void Truncate(int length); |
317 | | |
318 | 90 | static constexpr ObjHeader obj_header() { |
319 | 90 | return ObjHeader::ClassFixed(field_mask(), sizeof(BufWriter)); |
320 | 90 | } |
321 | | |
322 | 90 | static constexpr unsigned field_mask() { |
323 | | // maskvit_v() because BufWriter has virtual methods |
324 | 90 | return Writer::field_mask() | maskbit(offsetof(BufWriter, str_)); |
325 | 90 | } |
326 | | |
327 | | private: |
328 | | void WriteRaw(char* s, int n); |
329 | | |
330 | | MutableStr* str_; // getvalue() turns this directly into Str*, no copying |
331 | | int len_; // how many bytes have been written so far |
332 | | bool is_valid_ = true; // It becomes invalid after getvalue() is called |
333 | | }; |
334 | | |
335 | | extern Writer* gStdout; |
336 | | |
337 | 14 | inline Writer* Stdout() { |
338 | 14 | if (gStdout == nullptr) { |
339 | 5 | gStdout = reinterpret_cast<Writer*>(Alloc<CFile>(stdout)); |
340 | 5 | gHeap.RootGlobalVar(gStdout); |
341 | 5 | } |
342 | 14 | return gStdout; |
343 | 14 | } |
344 | | |
345 | | extern Writer* gStderr; |
346 | | |
347 | 2 | inline Writer* Stderr() { |
348 | 2 | if (gStderr == nullptr) { |
349 | 2 | gStderr = reinterpret_cast<Writer*>(Alloc<CFile>(stderr)); |
350 | 2 | gHeap.RootGlobalVar(gStderr); |
351 | 2 | } |
352 | 2 | return gStderr; |
353 | 2 | } |
354 | | |
355 | | class UniqueObjects { |
356 | | // Can't be expressed in typed Python because we don't have uint64_t for |
357 | | // addresses |
358 | | |
359 | | public: |
360 | 0 | UniqueObjects() { |
361 | 0 | } |
362 | 0 | void Add(void* obj) { |
363 | 0 | } |
364 | 0 | int Get(void* obj) { |
365 | 0 | return -1; |
366 | 0 | } |
367 | | |
368 | 0 | static constexpr ObjHeader obj_header() { |
369 | 0 | return ObjHeader::ClassFixed(field_mask(), sizeof(UniqueObjects)); |
370 | 0 | } |
371 | | |
372 | | // SPECIAL CASE? We should never have a unique reference to an object? So |
373 | | // don't bother tracing |
374 | 0 | static constexpr uint32_t field_mask() { |
375 | 0 | return kZeroMask; |
376 | 0 | } |
377 | | |
378 | | private: |
379 | | // address -> small integer ID |
380 | | Dict<void*, int> addresses_; |
381 | | }; |
382 | | |
383 | | } // namespace mylib |
384 | | |
385 | | #endif // MYCPP_GC_MYLIB_H |