/home/uke/oil/mycpp/gc_mylib.h
Line | Count | Source (jump to first uncovered line) |
1 | | // gc_mylib.h - corresponds to mycpp/mylib.py |
2 | | |
3 | | #ifndef MYCPP_GC_MYLIB_H |
4 | | #define MYCPP_GC_MYLIB_H |
5 | | |
6 | | #include "mycpp/gc_alloc.h" // gHeap |
7 | | #include "mycpp/gc_dict.h" // for dict_erase() |
8 | | #include "mycpp/gc_mops.h" |
9 | | #include "mycpp/gc_tuple.h" |
10 | | |
11 | | template <class K, class V> |
12 | | class Dict; |
13 | | |
14 | | namespace mylib { |
15 | | |
16 | | void InitCppOnly(); |
17 | | |
18 | | // Wrappers around our C++ APIs |
19 | | |
20 | 1 | inline void MaybeCollect() { |
21 | 1 | gHeap.MaybeCollect(); |
22 | 1 | } |
23 | | |
24 | 0 | inline void PrintGcStats() { |
25 | 0 | gHeap.PrintShortStats(); // print to stderr |
26 | 0 | } |
27 | | |
28 | | void print_stderr(BigStr* s); |
29 | | |
30 | 0 | inline int ByteAt(BigStr* s, int i) { |
31 | 0 | DCHECK(0 <= i); |
32 | 0 | DCHECK(i <= len(s)); |
33 | 0 |
|
34 | 0 | return static_cast<unsigned char>(s->data_[i]); |
35 | 0 | } |
36 | | |
37 | 0 | inline int ByteEquals(int byte, BigStr* ch) { |
38 | 0 | DCHECK(0 <= byte); |
39 | 0 | DCHECK(byte < 256); |
40 | 0 |
|
41 | 0 | DCHECK(len(ch) == 1); |
42 | 0 |
|
43 | 0 | return byte == static_cast<unsigned char>(ch->data_[0]); |
44 | 0 | } |
45 | | |
46 | 0 | inline int ByteInSet(int byte, BigStr* byte_set) { |
47 | 0 | DCHECK(0 <= byte); |
48 | 0 | DCHECK(byte < 256); |
49 | 0 |
|
50 | 0 | int n = len(byte_set); |
51 | 0 | for (int i = 0; i < n; ++i) { |
52 | 0 | int b = static_cast<unsigned char>(byte_set->data_[i]); |
53 | 0 | if (byte == b) { |
54 | 0 | return true; |
55 | 0 | } |
56 | 0 | } |
57 | 0 | return false; |
58 | 0 | } |
59 | | |
60 | | BigStr* JoinBytes(List<int>* byte_list); |
61 | | |
62 | | void BigIntSort(List<mops::BigInt>* keys); |
63 | | |
64 | | // const int kStdout = 1; |
65 | | // const int kStderr = 2; |
66 | | |
67 | | // void writeln(BigStr* s, int fd = kStdout); |
68 | | |
69 | | Tuple2<BigStr*, BigStr*> split_once(BigStr* s, BigStr* delim); |
70 | | |
71 | | template <typename K, typename V> |
72 | 140 | void dict_erase(Dict<K, V>* haystack, K needle) { |
73 | 140 | DCHECK(haystack->obj_header().heap_tag != HeapTag::Global); |
74 | | |
75 | 0 | int pos = haystack->hash_and_probe(needle); |
76 | 140 | if (pos == kTooSmall) { |
77 | 0 | return; |
78 | 0 | } |
79 | 140 | DCHECK(pos >= 0); |
80 | 0 | int kv_index = haystack->index_->items_[pos]; |
81 | 140 | if (kv_index < 0) { |
82 | 1 | return; |
83 | 1 | } |
84 | | |
85 | 139 | int last_kv_index = haystack->len_ - 1; |
86 | 139 | DCHECK(kv_index <= last_kv_index); |
87 | | |
88 | | // Swap the target entry with the most recently inserted one before removing |
89 | | // it. This has two benefits. |
90 | | // (1) It keeps the entry arrays compact. All valid entries occupy a |
91 | | // contiguous region in memory. |
92 | | // (2) It prevents holes in the entry arrays. This makes iterating over |
93 | | // entries (e.g. in keys() or DictIter()) trivial and doesn't require |
94 | | // any extra validity state (like a bitset of unusable slots). This is |
95 | | // important because keys and values wont't always be pointers, so we |
96 | | // can't rely on NULL checks for validity. We also can't wrap the slab |
97 | | // entry types in some other type without modifying the garbage |
98 | | // collector to trace through unmanaged types (or paying the extra |
99 | | // allocations for the outer type). |
100 | 139 | if (kv_index != last_kv_index) { |
101 | 71 | K last_key = haystack->keys_->items_[last_kv_index]; |
102 | 71 | V last_val = haystack->values_->items_[last_kv_index]; |
103 | 71 | int last_pos = haystack->hash_and_probe(last_key); |
104 | 71 | DCHECK(last_pos != kNotFound); |
105 | 0 | haystack->keys_->items_[kv_index] = last_key; |
106 | 71 | haystack->values_->items_[kv_index] = last_val; |
107 | 71 | haystack->index_->items_[last_pos] = kv_index; |
108 | 71 | } |
109 | | |
110 | | // Zero out for GC. These could be nullptr or 0 |
111 | 0 | haystack->keys_->items_[last_kv_index] = 0; |
112 | 139 | haystack->values_->items_[last_kv_index] = 0; |
113 | 139 | haystack->index_->items_[pos] = kDeletedEntry; |
114 | 139 | haystack->len_--; |
115 | 139 | DCHECK(haystack->len_ < haystack->capacity_); |
116 | 139 | } _ZN5mylib10dict_eraseIP6BigStriEEvP4DictIT_T0_ES4_ Line | Count | Source | 72 | 1 | void dict_erase(Dict<K, V>* haystack, K needle) { | 73 | 1 | DCHECK(haystack->obj_header().heap_tag != HeapTag::Global); | 74 | | | 75 | 0 | int pos = haystack->hash_and_probe(needle); | 76 | 1 | if (pos == kTooSmall) { | 77 | 0 | return; | 78 | 0 | } | 79 | 1 | DCHECK(pos >= 0); | 80 | 0 | int kv_index = haystack->index_->items_[pos]; | 81 | 1 | if (kv_index < 0) { | 82 | 0 | return; | 83 | 0 | } | 84 | | | 85 | 1 | int last_kv_index = haystack->len_ - 1; | 86 | 1 | DCHECK(kv_index <= last_kv_index); | 87 | | | 88 | | // Swap the target entry with the most recently inserted one before removing | 89 | | // it. This has two benefits. | 90 | | // (1) It keeps the entry arrays compact. All valid entries occupy a | 91 | | // contiguous region in memory. | 92 | | // (2) It prevents holes in the entry arrays. This makes iterating over | 93 | | // entries (e.g. in keys() or DictIter()) trivial and doesn't require | 94 | | // any extra validity state (like a bitset of unusable slots). This is | 95 | | // important because keys and values wont't always be pointers, so we | 96 | | // can't rely on NULL checks for validity. We also can't wrap the slab | 97 | | // entry types in some other type without modifying the garbage | 98 | | // collector to trace through unmanaged types (or paying the extra | 99 | | // allocations for the outer type). | 100 | 1 | if (kv_index != last_kv_index) { | 101 | 0 | K last_key = haystack->keys_->items_[last_kv_index]; | 102 | 0 | V last_val = haystack->values_->items_[last_kv_index]; | 103 | 0 | int last_pos = haystack->hash_and_probe(last_key); | 104 | 0 | DCHECK(last_pos != kNotFound); | 105 | 0 | haystack->keys_->items_[kv_index] = last_key; | 106 | 0 | haystack->values_->items_[kv_index] = last_val; | 107 | 0 | haystack->index_->items_[last_pos] = kv_index; | 108 | 0 | } | 109 | | | 110 | | // Zero out for GC. These could be nullptr or 0 | 111 | 0 | haystack->keys_->items_[last_kv_index] = 0; | 112 | 1 | haystack->values_->items_[last_kv_index] = 0; | 113 | 1 | haystack->index_->items_[pos] = kDeletedEntry; | 114 | 1 | haystack->len_--; | 115 | 1 | DCHECK(haystack->len_ < haystack->capacity_); | 116 | 1 | } |
_ZN5mylib10dict_eraseIP6BigStrS2_EEvP4DictIT_T0_ES4_ Line | Count | Source | 72 | 1 | void dict_erase(Dict<K, V>* haystack, K needle) { | 73 | 1 | DCHECK(haystack->obj_header().heap_tag != HeapTag::Global); | 74 | | | 75 | 0 | int pos = haystack->hash_and_probe(needle); | 76 | 1 | if (pos == kTooSmall) { | 77 | 0 | return; | 78 | 0 | } | 79 | 1 | DCHECK(pos >= 0); | 80 | 0 | int kv_index = haystack->index_->items_[pos]; | 81 | 1 | if (kv_index < 0) { | 82 | 0 | return; | 83 | 0 | } | 84 | | | 85 | 1 | int last_kv_index = haystack->len_ - 1; | 86 | 1 | DCHECK(kv_index <= last_kv_index); | 87 | | | 88 | | // Swap the target entry with the most recently inserted one before removing | 89 | | // it. This has two benefits. | 90 | | // (1) It keeps the entry arrays compact. All valid entries occupy a | 91 | | // contiguous region in memory. | 92 | | // (2) It prevents holes in the entry arrays. This makes iterating over | 93 | | // entries (e.g. in keys() or DictIter()) trivial and doesn't require | 94 | | // any extra validity state (like a bitset of unusable slots). This is | 95 | | // important because keys and values wont't always be pointers, so we | 96 | | // can't rely on NULL checks for validity. We also can't wrap the slab | 97 | | // entry types in some other type without modifying the garbage | 98 | | // collector to trace through unmanaged types (or paying the extra | 99 | | // allocations for the outer type). | 100 | 1 | if (kv_index != last_kv_index) { | 101 | 0 | K last_key = haystack->keys_->items_[last_kv_index]; | 102 | 0 | V last_val = haystack->values_->items_[last_kv_index]; | 103 | 0 | int last_pos = haystack->hash_and_probe(last_key); | 104 | 0 | DCHECK(last_pos != kNotFound); | 105 | 0 | haystack->keys_->items_[kv_index] = last_key; | 106 | 0 | haystack->values_->items_[kv_index] = last_val; | 107 | 0 | haystack->index_->items_[last_pos] = kv_index; | 108 | 0 | } | 109 | | | 110 | | // Zero out for GC. These could be nullptr or 0 | 111 | 0 | haystack->keys_->items_[last_kv_index] = 0; | 112 | 1 | haystack->values_->items_[last_kv_index] = 0; | 113 | 1 | haystack->index_->items_[pos] = kDeletedEntry; | 114 | 1 | haystack->len_--; | 115 | 1 | DCHECK(haystack->len_ < haystack->capacity_); | 116 | 1 | } |
_ZN5mylib10dict_eraseIiiEEvP4DictIT_T0_ES2_ Line | Count | Source | 72 | 137 | void dict_erase(Dict<K, V>* haystack, K needle) { | 73 | 137 | DCHECK(haystack->obj_header().heap_tag != HeapTag::Global); | 74 | | | 75 | 0 | int pos = haystack->hash_and_probe(needle); | 76 | 137 | if (pos == kTooSmall) { | 77 | 0 | return; | 78 | 0 | } | 79 | 137 | DCHECK(pos >= 0); | 80 | 0 | int kv_index = haystack->index_->items_[pos]; | 81 | 137 | if (kv_index < 0) { | 82 | 0 | return; | 83 | 0 | } | 84 | | | 85 | 137 | int last_kv_index = haystack->len_ - 1; | 86 | 137 | DCHECK(kv_index <= last_kv_index); | 87 | | | 88 | | // Swap the target entry with the most recently inserted one before removing | 89 | | // it. This has two benefits. | 90 | | // (1) It keeps the entry arrays compact. All valid entries occupy a | 91 | | // contiguous region in memory. | 92 | | // (2) It prevents holes in the entry arrays. This makes iterating over | 93 | | // entries (e.g. in keys() or DictIter()) trivial and doesn't require | 94 | | // any extra validity state (like a bitset of unusable slots). This is | 95 | | // important because keys and values wont't always be pointers, so we | 96 | | // can't rely on NULL checks for validity. We also can't wrap the slab | 97 | | // entry types in some other type without modifying the garbage | 98 | | // collector to trace through unmanaged types (or paying the extra | 99 | | // allocations for the outer type). | 100 | 137 | if (kv_index != last_kv_index) { | 101 | 71 | K last_key = haystack->keys_->items_[last_kv_index]; | 102 | 71 | V last_val = haystack->values_->items_[last_kv_index]; | 103 | 71 | int last_pos = haystack->hash_and_probe(last_key); | 104 | 71 | DCHECK(last_pos != kNotFound); | 105 | 0 | haystack->keys_->items_[kv_index] = last_key; | 106 | 71 | haystack->values_->items_[kv_index] = last_val; | 107 | 71 | haystack->index_->items_[last_pos] = kv_index; | 108 | 71 | } | 109 | | | 110 | | // Zero out for GC. These could be nullptr or 0 | 111 | 0 | haystack->keys_->items_[last_kv_index] = 0; | 112 | 137 | haystack->values_->items_[last_kv_index] = 0; | 113 | 137 | haystack->index_->items_[pos] = kDeletedEntry; | 114 | 137 | haystack->len_--; | 115 | 137 | DCHECK(haystack->len_ < haystack->capacity_); | 116 | 137 | } |
_ZN5mylib10dict_eraseIiP6BigStrEEvP4DictIT_T0_ES4_ Line | Count | Source | 72 | 1 | void dict_erase(Dict<K, V>* haystack, K needle) { | 73 | 1 | DCHECK(haystack->obj_header().heap_tag != HeapTag::Global); | 74 | | | 75 | 0 | int pos = haystack->hash_and_probe(needle); | 76 | 1 | if (pos == kTooSmall) { | 77 | 0 | return; | 78 | 0 | } | 79 | 1 | DCHECK(pos >= 0); | 80 | 0 | int kv_index = haystack->index_->items_[pos]; | 81 | 1 | if (kv_index < 0) { | 82 | 1 | return; | 83 | 1 | } | 84 | | | 85 | 0 | int last_kv_index = haystack->len_ - 1; | 86 | 0 | DCHECK(kv_index <= last_kv_index); | 87 | | | 88 | | // Swap the target entry with the most recently inserted one before removing | 89 | | // it. This has two benefits. | 90 | | // (1) It keeps the entry arrays compact. All valid entries occupy a | 91 | | // contiguous region in memory. | 92 | | // (2) It prevents holes in the entry arrays. This makes iterating over | 93 | | // entries (e.g. in keys() or DictIter()) trivial and doesn't require | 94 | | // any extra validity state (like a bitset of unusable slots). This is | 95 | | // important because keys and values wont't always be pointers, so we | 96 | | // can't rely on NULL checks for validity. We also can't wrap the slab | 97 | | // entry types in some other type without modifying the garbage | 98 | | // collector to trace through unmanaged types (or paying the extra | 99 | | // allocations for the outer type). | 100 | 0 | if (kv_index != last_kv_index) { | 101 | 0 | K last_key = haystack->keys_->items_[last_kv_index]; | 102 | 0 | V last_val = haystack->values_->items_[last_kv_index]; | 103 | 0 | int last_pos = haystack->hash_and_probe(last_key); | 104 | 0 | DCHECK(last_pos != kNotFound); | 105 | 0 | haystack->keys_->items_[kv_index] = last_key; | 106 | 0 | haystack->values_->items_[kv_index] = last_val; | 107 | 0 | haystack->index_->items_[last_pos] = kv_index; | 108 | 0 | } | 109 | | | 110 | | // Zero out for GC. These could be nullptr or 0 | 111 | 0 | haystack->keys_->items_[last_kv_index] = 0; | 112 | 0 | haystack->values_->items_[last_kv_index] = 0; | 113 | 0 | haystack->index_->items_[pos] = kDeletedEntry; | 114 | 0 | haystack->len_--; | 115 | 0 | DCHECK(haystack->len_ < haystack->capacity_); | 116 | 0 | } |
|
117 | | |
118 | 2 | inline BigStr* hex_lower(int i) { |
119 | | // Note: Could also use OverAllocatedStr, but most strings are small? |
120 | 2 | char buf[kIntBufSize]; |
121 | 2 | int len = snprintf(buf, kIntBufSize, "%x", i); |
122 | 2 | return ::StrFromC(buf, len); |
123 | 2 | } |
124 | | |
125 | | // Abstract type: Union of LineReader and Writer |
126 | | class File { |
127 | | public: |
128 | 14 | File() { |
129 | 14 | } |
130 | | // Writer |
131 | | virtual void write(BigStr* s) = 0; |
132 | | virtual void flush() = 0; |
133 | | |
134 | | // Reader |
135 | | virtual BigStr* readline() = 0; |
136 | | |
137 | | // Both |
138 | | virtual bool isatty() = 0; |
139 | | virtual void close() = 0; |
140 | | |
141 | 0 | static constexpr ObjHeader obj_header() { |
142 | 0 | return ObjHeader::ClassFixed(field_mask(), sizeof(File)); |
143 | 0 | } |
144 | | |
145 | 7 | static constexpr uint32_t field_mask() { |
146 | 7 | return kZeroMask; |
147 | 7 | } |
148 | | }; |
149 | | |
150 | | // Wrap a FILE* for read and write |
151 | | class CFile : public File { |
152 | | public: |
153 | 7 | explicit CFile(FILE* f) : File(), f_(f) { |
154 | 7 | } |
155 | | // Writer |
156 | | void write(BigStr* s) override; |
157 | | void flush() override; |
158 | | |
159 | | // Reader |
160 | | BigStr* readline() override; |
161 | | |
162 | | // Both |
163 | | bool isatty() override; |
164 | | void close() override; |
165 | | |
166 | 7 | static constexpr ObjHeader obj_header() { |
167 | 7 | return ObjHeader::ClassFixed(field_mask(), sizeof(CFile)); |
168 | 7 | } |
169 | | |
170 | 7 | static constexpr uint32_t field_mask() { |
171 | | // not mutating field_mask because FILE* isn't a GC object |
172 | 7 | return File::field_mask(); |
173 | 7 | } |
174 | | |
175 | | private: |
176 | | FILE* f_; |
177 | | |
178 | | DISALLOW_COPY_AND_ASSIGN(CFile) |
179 | | }; |
180 | | |
181 | | // Abstract File we can only read from. |
182 | | // TODO: can we get rid of DCHECK() and reinterpret_cast? |
183 | | class LineReader : public File { |
184 | | public: |
185 | 3 | LineReader() : File() { |
186 | 3 | } |
187 | 0 | void write(BigStr* s) override { |
188 | 0 | CHECK(false); // should not happen |
189 | 0 | } |
190 | 0 | void flush() override { |
191 | 0 | CHECK(false); // should not happen |
192 | 0 | } |
193 | | |
194 | 0 | static constexpr ObjHeader obj_header() { |
195 | 0 | return ObjHeader::ClassFixed(field_mask(), sizeof(LineReader)); |
196 | 0 | } |
197 | | |
198 | 3 | static constexpr uint32_t field_mask() { |
199 | 3 | return kZeroMask; |
200 | 3 | } |
201 | | }; |
202 | | |
203 | | class BufLineReader : public LineReader { |
204 | | public: |
205 | 3 | explicit BufLineReader(BigStr* s) : LineReader(), s_(s), pos_(0) { |
206 | 3 | } |
207 | | virtual BigStr* readline(); |
208 | 1 | virtual bool isatty() { |
209 | 1 | return false; |
210 | 1 | } |
211 | 1 | virtual void close() { |
212 | 1 | } |
213 | | |
214 | | BigStr* s_; |
215 | | int pos_; |
216 | | |
217 | 3 | static constexpr ObjHeader obj_header() { |
218 | 3 | return ObjHeader::ClassFixed(field_mask(), sizeof(LineReader)); |
219 | 3 | } |
220 | | |
221 | 3 | static constexpr uint32_t field_mask() { |
222 | 3 | return LineReader::field_mask() | maskbit(offsetof(BufLineReader, s_)); |
223 | 3 | } |
224 | | |
225 | | DISALLOW_COPY_AND_ASSIGN(BufLineReader) |
226 | | }; |
227 | | |
228 | | extern LineReader* gStdin; |
229 | | |
230 | 1 | inline LineReader* Stdin() { |
231 | 1 | if (gStdin == nullptr) { |
232 | 1 | gStdin = reinterpret_cast<LineReader*>(Alloc<CFile>(stdin)); |
233 | 1 | } |
234 | 1 | return gStdin; |
235 | 1 | } |
236 | | |
237 | | LineReader* open(BigStr* path); |
238 | | |
239 | | // Abstract File we can only write to. |
240 | | // TODO: can we get rid of DCHECK() and reinterpret_cast? |
241 | | class Writer : public File { |
242 | | public: |
243 | 4 | Writer() : File() { |
244 | 4 | } |
245 | 0 | BigStr* readline() override { |
246 | 0 | CHECK(false); // should not happen |
247 | 0 | } |
248 | | |
249 | 0 | static constexpr ObjHeader obj_header() { |
250 | 0 | return ObjHeader::ClassFixed(field_mask(), sizeof(Writer)); |
251 | 0 | } |
252 | | |
253 | 4 | static constexpr uint32_t field_mask() { |
254 | 4 | return kZeroMask; |
255 | 4 | } |
256 | | }; |
257 | | |
258 | | class MutableStr; |
259 | | |
260 | | class BufWriter : public Writer { |
261 | | public: |
262 | 4 | BufWriter() : Writer(), str_(nullptr), len_(0) { |
263 | 4 | } |
264 | | void write(BigStr* s) override; |
265 | | void write_spaces(int n); |
266 | 1 | void clear() { // Reuse this instance |
267 | 1 | str_ = nullptr; |
268 | 1 | len_ = 0; |
269 | 1 | is_valid_ = true; |
270 | 1 | } |
271 | 0 | void close() override { |
272 | 0 | } |
273 | 1 | void flush() override { |
274 | 1 | } |
275 | 1 | bool isatty() override { |
276 | 1 | return false; |
277 | 1 | } |
278 | | BigStr* getvalue(); // part of cStringIO API |
279 | | |
280 | | // |
281 | | // Low Level API for C++ usage only |
282 | | // |
283 | | |
284 | | // Convenient API that avoids BigStr* |
285 | | void WriteConst(const char* c_string); |
286 | | |
287 | | // Potentially resizes the buffer. |
288 | | void EnsureMoreSpace(int n); |
289 | | // After EnsureMoreSpace(42), you can write 42 more bytes safely. |
290 | | // |
291 | | // Note that if you call EnsureMoreSpace(42), write 5 byte, and then |
292 | | // EnsureMoreSpace(42) again, the amount of additional space reserved is 47. |
293 | | |
294 | | // (Similar to vector::reserve(n), but it takes an integer to ADD to the |
295 | | // capacity.) |
296 | | |
297 | | uint8_t* LengthPointer(); // start + length |
298 | | uint8_t* CapacityPointer(); // start + capacity |
299 | | void SetLengthFrom(uint8_t* length_ptr); |
300 | | |
301 | 0 | int Length() { |
302 | 0 | return len_; |
303 | 0 | } |
304 | | |
305 | | // Rewind to earlier position, future writes start there |
306 | | void Truncate(int length); |
307 | | |
308 | 4 | static constexpr ObjHeader obj_header() { |
309 | 4 | return ObjHeader::ClassFixed(field_mask(), sizeof(BufWriter)); |
310 | 4 | } |
311 | | |
312 | 4 | static constexpr unsigned field_mask() { |
313 | | // maskvit_v() because BufWriter has virtual methods |
314 | 4 | return Writer::field_mask() | maskbit(offsetof(BufWriter, str_)); |
315 | 4 | } |
316 | | |
317 | | private: |
318 | | void WriteRaw(char* s, int n); |
319 | | |
320 | | MutableStr* str_; // getvalue() turns this directly into Str*, no copying |
321 | | int len_; // how many bytes have been written so far |
322 | | bool is_valid_ = true; // It becomes invalid after getvalue() is called |
323 | | }; |
324 | | |
325 | | extern Writer* gStdout; |
326 | | |
327 | 1 | inline Writer* Stdout() { |
328 | 1 | if (gStdout == nullptr) { |
329 | 1 | gStdout = reinterpret_cast<Writer*>(Alloc<CFile>(stdout)); |
330 | 1 | gHeap.RootGlobalVar(gStdout); |
331 | 1 | } |
332 | 1 | return gStdout; |
333 | 1 | } |
334 | | |
335 | | extern Writer* gStderr; |
336 | | |
337 | 1 | inline Writer* Stderr() { |
338 | 1 | if (gStderr == nullptr) { |
339 | 1 | gStderr = reinterpret_cast<Writer*>(Alloc<CFile>(stderr)); |
340 | 1 | gHeap.RootGlobalVar(gStderr); |
341 | 1 | } |
342 | 1 | return gStderr; |
343 | 1 | } |
344 | | |
345 | | class UniqueObjects { |
346 | | // Can't be expressed in typed Python because we don't have uint64_t for |
347 | | // addresses |
348 | | |
349 | | public: |
350 | 0 | UniqueObjects() { |
351 | 0 | } |
352 | 0 | void Add(void* obj) { |
353 | 0 | } |
354 | 0 | int Get(void* obj) { |
355 | 0 | return -1; |
356 | 0 | } |
357 | | |
358 | 0 | static constexpr ObjHeader obj_header() { |
359 | 0 | return ObjHeader::ClassFixed(field_mask(), sizeof(UniqueObjects)); |
360 | 0 | } |
361 | | |
362 | | // SPECIAL CASE? We should never have a unique reference to an object? So |
363 | | // don't bother tracing |
364 | 0 | static constexpr uint32_t field_mask() { |
365 | 0 | return kZeroMask; |
366 | 0 | } |
367 | | |
368 | | private: |
369 | | // address -> small integer ID |
370 | | Dict<void*, int> addresses_; |
371 | | }; |
372 | | |
373 | | } // namespace mylib |
374 | | |
375 | | #endif // MYCPP_GC_MYLIB_H |