/home/uke/oil/mycpp/gc_dict.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Hash table based on CPython's "compact dict": |
2 | | // |
3 | | // https://mail.python.org/pipermail/python-dev/2012-December/123028.html |
4 | | // https://code.activestate.com/recipes/578375/ |
5 | | // |
6 | | // Main differences: |
7 | | // - It's type-specialized in C++ -- Dict<K, V>. |
8 | | // - It's integrated with our mark and sweep GC, using Slab<int>, Slab<K>, and |
9 | | // Slab<V> |
10 | | // - We use linear probing, not the pseudo-random number generator |
11 | | |
12 | | #ifndef MYCPP_GC_DICT_H |
13 | | #define MYCPP_GC_DICT_H |
14 | | |
15 | | #include "mycpp/comparators.h" |
16 | | #include "mycpp/gc_builtins.h" |
17 | | #include "mycpp/gc_list.h" |
18 | | #include "mycpp/hash.h" |
19 | | |
20 | | // Non-negative entries in index_ are array indices into keys_ and values_. |
21 | | // There are two special negative entries: |
22 | | |
23 | | // index_ value to say this Dict item was deleted (a tombstone). |
24 | | const int kDeletedEntry = -1; |
25 | | |
26 | | // index_ value to say this Dict entry is free. |
27 | | const int kEmptyEntry = -2; |
28 | | |
29 | | // Return value for find_kv_index(), not stored in index_. |
30 | | const int kNotFound = -3; |
31 | | |
32 | | // Return value for hash_and_probe(), not stored in index_. |
33 | | const int kTooSmall = -4; |
34 | | |
35 | | // Helper for keys() and values() |
36 | | template <typename T> |
37 | 9 | List<T>* ListFromDictSlab(Slab<T>* slab, int n) { |
38 | 9 | List<T>* result = Alloc<List<T>>(); |
39 | 9 | result->reserve(n); |
40 | | |
41 | 51 | for (int i = 0; i < n; ++i) { |
42 | 42 | result->append(slab->items_[i]); |
43 | 42 | } |
44 | 9 | return result; |
45 | 9 | } _Z16ListFromDictSlabIP6BigStrEP4ListIT_EP4SlabIS3_Ei Line | Count | Source | 37 | 6 | List<T>* ListFromDictSlab(Slab<T>* slab, int n) { | 38 | 6 | List<T>* result = Alloc<List<T>>(); | 39 | 6 | result->reserve(n); | 40 | | | 41 | 19 | for (int i = 0; i < n; ++i) { | 42 | 13 | result->append(slab->items_[i]); | 43 | 13 | } | 44 | 6 | return result; | 45 | 6 | } |
_Z16ListFromDictSlabIiEP4ListIT_EP4SlabIS1_Ei Line | Count | Source | 37 | 3 | List<T>* ListFromDictSlab(Slab<T>* slab, int n) { | 38 | 3 | List<T>* result = Alloc<List<T>>(); | 39 | 3 | result->reserve(n); | 40 | | | 41 | 32 | for (int i = 0; i < n; ++i) { | 42 | 29 | result->append(slab->items_[i]); | 43 | 29 | } | 44 | 3 | return result; | 45 | 3 | } |
|
46 | | |
47 | | // GlobalDict is layout-compatible with Dict (unit tests assert this), and it |
48 | | // can be a true C global (incurs zero startup time) |
49 | | |
50 | | template <typename K, typename V, int N> |
51 | | class GlobalDict { |
52 | | public: |
53 | | int len_; |
54 | | int capacity_; |
55 | | int index_len_; |
56 | | GlobalSlab<int, N>* index_; |
57 | | GlobalSlab<K, N>* keys_; |
58 | | GlobalSlab<V, N>* values_; |
59 | | }; |
60 | | |
61 | | #define GLOBAL_DICT(name, K, V, N, keys, vals) \ |
62 | | GcGlobal<GlobalSlab<K, N>> _keys_##name = {ObjHeader::Global(TypeTag::Slab), \ |
63 | | {.items_ = keys}}; \ |
64 | | GcGlobal<GlobalSlab<V, N>> _vals_##name = {ObjHeader::Global(TypeTag::Slab), \ |
65 | | {.items_ = vals}}; \ |
66 | | GcGlobal<GlobalDict<K, V, N>> _dict_##name = { \ |
67 | | ObjHeader::Global(TypeTag::Dict), \ |
68 | | {.len_ = N, \ |
69 | | .capacity_ = N, \ |
70 | | .index_len_ = 0, \ |
71 | | .index_ = nullptr, \ |
72 | | .keys_ = &_keys_##name.obj, \ |
73 | | .values_ = &_vals_##name.obj}, \ |
74 | | }; \ |
75 | | Dict<K, V>* name = reinterpret_cast<Dict<K, V>*>(&_dict_##name.obj); |
76 | | |
77 | | template <class K, class V> |
78 | | class Dict { |
79 | | public: |
80 | | Dict() |
81 | | : len_(0), |
82 | | capacity_(0), |
83 | | index_len_(0), |
84 | | index_(nullptr), |
85 | | keys_(nullptr), |
86 | 29 | values_(nullptr) { |
87 | 29 | } _ZN4DictIP5PointP6BigStrEC2Ev Line | Count | Source | 86 | 1 | values_(nullptr) { | 87 | 1 | } |
Line | Count | Source | 86 | 5 | values_(nullptr) { | 87 | 5 | } |
Line | Count | Source | 86 | 2 | values_(nullptr) { | 87 | 2 | } |
Line | Count | Source | 86 | 8 | values_(nullptr) { | 87 | 8 | } |
_ZN4DictIP6BigStrS1_EC2Ev Line | Count | Source | 86 | 3 | values_(nullptr) { | 87 | 3 | } |
Line | Count | Source | 86 | 8 | values_(nullptr) { | 87 | 8 | } |
_ZN4DictIP6Tuple2IiiEiEC2Ev Line | Count | Source | 86 | 1 | values_(nullptr) { | 87 | 1 | } |
_ZN4DictIP6Tuple2IP6BigStriEiEC2Ev Line | Count | Source | 86 | 1 | values_(nullptr) { | 87 | 1 | } |
|
88 | | |
89 | | Dict(std::initializer_list<K> keys, std::initializer_list<V> values) |
90 | | : len_(0), |
91 | | capacity_(0), |
92 | | index_len_(0), |
93 | | index_(nullptr), |
94 | | keys_(nullptr), |
95 | 2 | values_(nullptr) { |
96 | 2 | DCHECK(keys.size() == values.size()); |
97 | 0 | auto v = values.begin(); // This simulates a "zip" loop |
98 | 3 | for (auto key : keys) { |
99 | | // note: calls reserve(), and may allocate |
100 | 3 | this->set(key, *v); |
101 | 3 | ++v; |
102 | 3 | } |
103 | 2 | } _ZN4DictIiP6BigStrEC2ESt16initializer_listIiES3_IS1_E Line | Count | Source | 95 | 1 | values_(nullptr) { | 96 | 1 | DCHECK(keys.size() == values.size()); | 97 | 0 | auto v = values.begin(); // This simulates a "zip" loop | 98 | 1 | for (auto key : keys) { | 99 | | // note: calls reserve(), and may allocate | 100 | 1 | this->set(key, *v); | 101 | 1 | ++v; | 102 | 1 | } | 103 | 1 | } |
_ZN4DictIP6BigStriEC2ESt16initializer_listIS1_ES3_IiE Line | Count | Source | 95 | 1 | values_(nullptr) { | 96 | 1 | DCHECK(keys.size() == values.size()); | 97 | 0 | auto v = values.begin(); // This simulates a "zip" loop | 98 | 2 | for (auto key : keys) { | 99 | | // note: calls reserve(), and may allocate | 100 | 2 | this->set(key, *v); | 101 | 2 | ++v; | 102 | 2 | } | 103 | 1 | } |
|
104 | | |
105 | | // Reserve enough space for at LEAST this many key-value pairs. |
106 | | void reserve(int num_desired); |
107 | | |
108 | | // d[key] in Python: raises KeyError if not found |
109 | | V at(K key) const; |
110 | | |
111 | | // d.get(key) in Python. (Can't use this if V isn't a pointer!) |
112 | | V get(K key) const; |
113 | | |
114 | | // Get a key, but return a default if not found. |
115 | | // expr_parse.py uses this with OTHER_BALANCE |
116 | | V get(K key, V default_val) const; |
117 | | |
118 | | // Implements d[k] = v. May resize the dictionary. |
119 | | void set(K key, V val); |
120 | | |
121 | | void update(List<Tuple2<K, V>*>* pairs); |
122 | | void update(Dict<K, V>* other); |
123 | | |
124 | | List<K>* keys() const; |
125 | | |
126 | | List<V>* values() const; |
127 | | |
128 | | void clear(); |
129 | | |
130 | | // Helper used by find_kv_index(), set(), mylib::dict_erase() in |
131 | | // gc_mylib.h |
132 | | // Returns either: |
133 | | // - the slot for an existing key, or an empty slot for a new key |
134 | | // - kTooSmall if the table is full |
135 | | int hash_and_probe(K key) const; |
136 | | |
137 | | // Helper used by at(), get(), dict_contains() |
138 | | // Given a key, returns either: |
139 | | // - an index into keys_ and values_ |
140 | | // - kNotFound |
141 | | int find_kv_index(K key) const; |
142 | | |
143 | 171 | static constexpr ObjHeader obj_header() { |
144 | 171 | return ObjHeader::ClassFixed(field_mask(), sizeof(Dict)); |
145 | 171 | } _ZN4DictIP5PointP6BigStrE10obj_headerEv Line | Count | Source | 143 | 1 | static constexpr ObjHeader obj_header() { | 144 | 1 | return ObjHeader::ClassFixed(field_mask(), sizeof(Dict)); | 145 | 1 | } |
_ZN4DictIiP6BigStrE10obj_headerEv Line | Count | Source | 143 | 7 | static constexpr ObjHeader obj_header() { | 144 | 7 | return ObjHeader::ClassFixed(field_mask(), sizeof(Dict)); | 145 | 7 | } |
_ZN4DictIlP6BigStrE10obj_headerEv Line | Count | Source | 143 | 2 | static constexpr ObjHeader obj_header() { | 144 | 2 | return ObjHeader::ClassFixed(field_mask(), sizeof(Dict)); | 145 | 2 | } |
_ZN4DictIP6BigStriE10obj_headerEv Line | Count | Source | 143 | 10 | static constexpr ObjHeader obj_header() { | 144 | 10 | return ObjHeader::ClassFixed(field_mask(), sizeof(Dict)); | 145 | 10 | } |
_ZN4DictIP6BigStrS1_E10obj_headerEv Line | Count | Source | 143 | 4 | static constexpr ObjHeader obj_header() { | 144 | 4 | return ObjHeader::ClassFixed(field_mask(), sizeof(Dict)); | 145 | 4 | } |
_ZN4DictIiiE10obj_headerEv Line | Count | Source | 143 | 145 | static constexpr ObjHeader obj_header() { | 144 | 145 | return ObjHeader::ClassFixed(field_mask(), sizeof(Dict)); | 145 | 145 | } |
_ZN4DictIP6Tuple2IiiEiE10obj_headerEv Line | Count | Source | 143 | 1 | static constexpr ObjHeader obj_header() { | 144 | 1 | return ObjHeader::ClassFixed(field_mask(), sizeof(Dict)); | 145 | 1 | } |
_ZN4DictIP6Tuple2IP6BigStriEiE10obj_headerEv Line | Count | Source | 143 | 1 | static constexpr ObjHeader obj_header() { | 144 | 1 | return ObjHeader::ClassFixed(field_mask(), sizeof(Dict)); | 145 | 1 | } |
|
146 | | |
147 | | int len_; // number of entries (keys and values, almost dense) |
148 | | int capacity_; // number of k/v slots |
149 | | int index_len_; // number of index slots |
150 | | |
151 | | // These 3 slabs are resized at the same time. |
152 | | Slab<int>* index_; // kEmptyEntry, kDeletedEntry, or a valid index into |
153 | | // keys_ and values_ |
154 | | Slab<K>* keys_; // Dict<K, int> |
155 | | Slab<V>* values_; // Dict<int, V> |
156 | | |
157 | | // A dict has 3 pointers the GC needs to follow. |
158 | 172 | static constexpr uint32_t field_mask() { |
159 | 172 | return maskbit(offsetof(Dict, index_)) | maskbit(offsetof(Dict, keys_)) | |
160 | 172 | maskbit(offsetof(Dict, values_)); |
161 | 172 | } _ZN4DictIP5PointP6BigStrE10field_maskEv Line | Count | Source | 158 | 1 | static constexpr uint32_t field_mask() { | 159 | 1 | return maskbit(offsetof(Dict, index_)) | maskbit(offsetof(Dict, keys_)) | | 160 | 1 | maskbit(offsetof(Dict, values_)); | 161 | 1 | } |
_ZN4DictIiP6BigStrE10field_maskEv Line | Count | Source | 158 | 7 | static constexpr uint32_t field_mask() { | 159 | 7 | return maskbit(offsetof(Dict, index_)) | maskbit(offsetof(Dict, keys_)) | | 160 | 7 | maskbit(offsetof(Dict, values_)); | 161 | 7 | } |
_ZN4DictIlP6BigStrE10field_maskEv Line | Count | Source | 158 | 2 | static constexpr uint32_t field_mask() { | 159 | 2 | return maskbit(offsetof(Dict, index_)) | maskbit(offsetof(Dict, keys_)) | | 160 | 2 | maskbit(offsetof(Dict, values_)); | 161 | 2 | } |
_ZN4DictIP6BigStriE10field_maskEv Line | Count | Source | 158 | 10 | static constexpr uint32_t field_mask() { | 159 | 10 | return maskbit(offsetof(Dict, index_)) | maskbit(offsetof(Dict, keys_)) | | 160 | 10 | maskbit(offsetof(Dict, values_)); | 161 | 10 | } |
_ZN4DictIP6BigStrS1_E10field_maskEv Line | Count | Source | 158 | 4 | static constexpr uint32_t field_mask() { | 159 | 4 | return maskbit(offsetof(Dict, index_)) | maskbit(offsetof(Dict, keys_)) | | 160 | 4 | maskbit(offsetof(Dict, values_)); | 161 | 4 | } |
_ZN4DictIiiE10field_maskEv Line | Count | Source | 158 | 146 | static constexpr uint32_t field_mask() { | 159 | 146 | return maskbit(offsetof(Dict, index_)) | maskbit(offsetof(Dict, keys_)) | | 160 | 146 | maskbit(offsetof(Dict, values_)); | 161 | 146 | } |
_ZN4DictIP6Tuple2IiiEiE10field_maskEv Line | Count | Source | 158 | 1 | static constexpr uint32_t field_mask() { | 159 | 1 | return maskbit(offsetof(Dict, index_)) | maskbit(offsetof(Dict, keys_)) | | 160 | 1 | maskbit(offsetof(Dict, values_)); | 161 | 1 | } |
_ZN4DictIP6Tuple2IP6BigStriEiE10field_maskEv Line | Count | Source | 158 | 1 | static constexpr uint32_t field_mask() { | 159 | 1 | return maskbit(offsetof(Dict, index_)) | maskbit(offsetof(Dict, keys_)) | | 160 | 1 | maskbit(offsetof(Dict, values_)); | 161 | 1 | } |
|
162 | | |
163 | | DISALLOW_COPY_AND_ASSIGN(Dict); |
164 | | |
165 | | // kItemSize is max of K and V size. That is, on 64-bit machines, the RARE |
166 | | // Dict<int, int> is smaller than other dicts |
167 | | static constexpr int kItemSize = sizeof(K) > sizeof(V) ? sizeof(K) |
168 | | : sizeof(V); |
169 | | |
170 | | // Matches mark_sweep_heap.h |
171 | | static constexpr int kPoolBytes2 = 48 - sizeof(ObjHeader); |
172 | | static_assert(kPoolBytes2 % kItemSize == 0, |
173 | | "An integral number of items should fit in second pool"); |
174 | | static constexpr int kNumItems2 = kPoolBytes2 / kItemSize; |
175 | | |
176 | | static const int kHeaderFudge = sizeof(ObjHeader) / kItemSize; |
177 | | static_assert(sizeof(ObjHeader) % kItemSize == 0, |
178 | | "Slab header size should be multiple of key size"); |
179 | | |
180 | | #if 0 |
181 | | static constexpr int kMinBytes2 = 128 - sizeof(ObjHeader); |
182 | | static_assert(kMinBytes2 % kItemSize == 0, |
183 | | "An integral number of items should fit"); |
184 | | static constexpr int kMinItems2 = kMinBytes2 / kItemSize; |
185 | | #endif |
186 | | |
187 | 73 | int HowManyPairs(int num_desired) { |
188 | | // See gc_list.h for comments on nearly identical logic |
189 | | |
190 | 73 | if (num_desired <= kNumItems2) { // use full cell in pool 2 |
191 | 30 | return kNumItems2; |
192 | 30 | } |
193 | | #if 0 |
194 | | if (num_desired <= kMinItems2) { // 48 -> 128, not 48 -> 64 |
195 | | return kMinItems2; |
196 | | } |
197 | | #endif |
198 | 43 | return RoundUp(num_desired + kHeaderFudge) - kHeaderFudge; |
199 | 73 | } _ZN4DictIP5PointP6BigStrE12HowManyPairsEi Line | Count | Source | 187 | 5 | int HowManyPairs(int num_desired) { | 188 | | // See gc_list.h for comments on nearly identical logic | 189 | | | 190 | 5 | if (num_desired <= kNumItems2) { // use full cell in pool 2 | 191 | 1 | return kNumItems2; | 192 | 1 | } | 193 | | #if 0 | 194 | | if (num_desired <= kMinItems2) { // 48 -> 128, not 48 -> 64 | 195 | | return kMinItems2; | 196 | | } | 197 | | #endif | 198 | 4 | return RoundUp(num_desired + kHeaderFudge) - kHeaderFudge; | 199 | 5 | } |
_ZN4DictIiP6BigStrE12HowManyPairsEi Line | Count | Source | 187 | 11 | int HowManyPairs(int num_desired) { | 188 | | // See gc_list.h for comments on nearly identical logic | 189 | | | 190 | 11 | if (num_desired <= kNumItems2) { // use full cell in pool 2 | 191 | 6 | return kNumItems2; | 192 | 6 | } | 193 | | #if 0 | 194 | | if (num_desired <= kMinItems2) { // 48 -> 128, not 48 -> 64 | 195 | | return kMinItems2; | 196 | | } | 197 | | #endif | 198 | 5 | return RoundUp(num_desired + kHeaderFudge) - kHeaderFudge; | 199 | 11 | } |
_ZN4DictIlP6BigStrE12HowManyPairsEi Line | Count | Source | 187 | 20 | int HowManyPairs(int num_desired) { | 188 | | // See gc_list.h for comments on nearly identical logic | 189 | | | 190 | 20 | if (num_desired <= kNumItems2) { // use full cell in pool 2 | 191 | 2 | return kNumItems2; | 192 | 2 | } | 193 | | #if 0 | 194 | | if (num_desired <= kMinItems2) { // 48 -> 128, not 48 -> 64 | 195 | | return kMinItems2; | 196 | | } | 197 | | #endif | 198 | 18 | return RoundUp(num_desired + kHeaderFudge) - kHeaderFudge; | 199 | 20 | } |
_ZN4DictIP6BigStriE12HowManyPairsEi Line | Count | Source | 187 | 16 | int HowManyPairs(int num_desired) { | 188 | | // See gc_list.h for comments on nearly identical logic | 189 | | | 190 | 16 | if (num_desired <= kNumItems2) { // use full cell in pool 2 | 191 | 9 | return kNumItems2; | 192 | 9 | } | 193 | | #if 0 | 194 | | if (num_desired <= kMinItems2) { // 48 -> 128, not 48 -> 64 | 195 | | return kMinItems2; | 196 | | } | 197 | | #endif | 198 | 7 | return RoundUp(num_desired + kHeaderFudge) - kHeaderFudge; | 199 | 16 | } |
_ZN4DictIP6BigStrS1_E12HowManyPairsEi Line | Count | Source | 187 | 2 | int HowManyPairs(int num_desired) { | 188 | | // See gc_list.h for comments on nearly identical logic | 189 | | | 190 | 2 | if (num_desired <= kNumItems2) { // use full cell in pool 2 | 191 | 2 | return kNumItems2; | 192 | 2 | } | 193 | | #if 0 | 194 | | if (num_desired <= kMinItems2) { // 48 -> 128, not 48 -> 64 | 195 | | return kMinItems2; | 196 | | } | 197 | | #endif | 198 | 0 | return RoundUp(num_desired + kHeaderFudge) - kHeaderFudge; | 199 | 2 | } |
_ZN4DictIiiE12HowManyPairsEi Line | Count | Source | 187 | 17 | int HowManyPairs(int num_desired) { | 188 | | // See gc_list.h for comments on nearly identical logic | 189 | | | 190 | 17 | if (num_desired <= kNumItems2) { // use full cell in pool 2 | 191 | 8 | return kNumItems2; | 192 | 8 | } | 193 | | #if 0 | 194 | | if (num_desired <= kMinItems2) { // 48 -> 128, not 48 -> 64 | 195 | | return kMinItems2; | 196 | | } | 197 | | #endif | 198 | 9 | return RoundUp(num_desired + kHeaderFudge) - kHeaderFudge; | 199 | 17 | } |
_ZN4DictIP6Tuple2IiiEiE12HowManyPairsEi Line | Count | Source | 187 | 1 | int HowManyPairs(int num_desired) { | 188 | | // See gc_list.h for comments on nearly identical logic | 189 | | | 190 | 1 | if (num_desired <= kNumItems2) { // use full cell in pool 2 | 191 | 1 | return kNumItems2; | 192 | 1 | } | 193 | | #if 0 | 194 | | if (num_desired <= kMinItems2) { // 48 -> 128, not 48 -> 64 | 195 | | return kMinItems2; | 196 | | } | 197 | | #endif | 198 | 0 | return RoundUp(num_desired + kHeaderFudge) - kHeaderFudge; | 199 | 1 | } |
_ZN4DictIP6Tuple2IP6BigStriEiE12HowManyPairsEi Line | Count | Source | 187 | 1 | int HowManyPairs(int num_desired) { | 188 | | // See gc_list.h for comments on nearly identical logic | 189 | | | 190 | 1 | if (num_desired <= kNumItems2) { // use full cell in pool 2 | 191 | 1 | return kNumItems2; | 192 | 1 | } | 193 | | #if 0 | 194 | | if (num_desired <= kMinItems2) { // 48 -> 128, not 48 -> 64 | 195 | | return kMinItems2; | 196 | | } | 197 | | #endif | 198 | 0 | return RoundUp(num_desired + kHeaderFudge) - kHeaderFudge; | 199 | 1 | } |
|
200 | | }; |
201 | | |
202 | | template <typename K, typename V> |
203 | 9 | inline bool dict_contains(const Dict<K, V>* haystack, K needle) { |
204 | 9 | return haystack->find_kv_index(needle) != kNotFound; |
205 | 9 | } _Z13dict_containsIiP6BigStrEbPK4DictIT_T0_ES3_ Line | Count | Source | 203 | 2 | inline bool dict_contains(const Dict<K, V>* haystack, K needle) { | 204 | 2 | return haystack->find_kv_index(needle) != kNotFound; | 205 | 2 | } |
_Z13dict_containsIP6BigStriEbPK4DictIT_T0_ES3_ Line | Count | Source | 203 | 2 | inline bool dict_contains(const Dict<K, V>* haystack, K needle) { | 204 | 2 | return haystack->find_kv_index(needle) != kNotFound; | 205 | 2 | } |
_Z13dict_containsIiiEbPK4DictIT_T0_ES1_ Line | Count | Source | 203 | 2 | inline bool dict_contains(const Dict<K, V>* haystack, K needle) { | 204 | 2 | return haystack->find_kv_index(needle) != kNotFound; | 205 | 2 | } |
_Z13dict_containsIP6BigStrS1_EbPK4DictIT_T0_ES3_ Line | Count | Source | 203 | 3 | inline bool dict_contains(const Dict<K, V>* haystack, K needle) { | 204 | 3 | return haystack->find_kv_index(needle) != kNotFound; | 205 | 3 | } |
|
206 | | |
207 | | template <typename K, typename V> |
208 | 74 | void Dict<K, V>::reserve(int num_desired) { |
209 | 74 | if (capacity_ >= num_desired) { |
210 | 1 | return; // Don't do anything if there's already enough space. |
211 | 1 | } |
212 | | |
213 | 73 | int old_len = len_; |
214 | 73 | Slab<K>* old_k = keys_; |
215 | 73 | Slab<V>* old_v = values_; |
216 | | |
217 | | // Calculate the number of keys and values we should have |
218 | 73 | capacity_ = HowManyPairs(num_desired); |
219 | | |
220 | | // 1) Ensure index len a power of 2, to avoid expensive modulus % operation |
221 | | // 2) Introduce hash table load factor. Use capacity_+1 to simulate ceil() |
222 | | // div, not floor() div. |
223 | 73 | index_len_ = RoundUp((capacity_ + 1) * 5 / 4); |
224 | 73 | DCHECK(index_len_ > capacity_); |
225 | | |
226 | 0 | index_ = NewSlab<int>(index_len_); |
227 | 133k | for (int i = 0; i < index_len_; ++i) { |
228 | 133k | index_->items_[i] = kEmptyEntry; |
229 | 133k | } |
230 | | |
231 | | // These are DENSE, while index_ is sparse. |
232 | 73 | keys_ = NewSlab<K>(capacity_); |
233 | 73 | values_ = NewSlab<V>(capacity_); |
234 | | |
235 | 73 | if (old_k != nullptr) { // rehash if there were any entries |
236 | | // log("REHASH num_desired %d", num_desired); |
237 | 43 | len_ = 0; |
238 | 33.2k | for (int i = 0; i < old_len; ++i) { |
239 | 33.2k | set(old_k->items_[i], old_v->items_[i]); |
240 | 33.2k | } |
241 | 43 | } |
242 | 73 | } _ZN4DictIP5PointP6BigStrE7reserveEi Line | Count | Source | 208 | 5 | void Dict<K, V>::reserve(int num_desired) { | 209 | 5 | if (capacity_ >= num_desired) { | 210 | 0 | return; // Don't do anything if there's already enough space. | 211 | 0 | } | 212 | | | 213 | 5 | int old_len = len_; | 214 | 5 | Slab<K>* old_k = keys_; | 215 | 5 | Slab<V>* old_v = values_; | 216 | | | 217 | | // Calculate the number of keys and values we should have | 218 | 5 | capacity_ = HowManyPairs(num_desired); | 219 | | | 220 | | // 1) Ensure index len a power of 2, to avoid expensive modulus % operation | 221 | | // 2) Introduce hash table load factor. Use capacity_+1 to simulate ceil() | 222 | | // div, not floor() div. | 223 | 5 | index_len_ = RoundUp((capacity_ + 1) * 5 / 4); | 224 | 5 | DCHECK(index_len_ > capacity_); | 225 | | | 226 | 0 | index_ = NewSlab<int>(index_len_); | 227 | 253 | for (int i = 0; i < index_len_; ++i) { | 228 | 248 | index_->items_[i] = kEmptyEntry; | 229 | 248 | } | 230 | | | 231 | | // These are DENSE, while index_ is sparse. | 232 | 5 | keys_ = NewSlab<K>(capacity_); | 233 | 5 | values_ = NewSlab<V>(capacity_); | 234 | | | 235 | 5 | if (old_k != nullptr) { // rehash if there were any entries | 236 | | // log("REHASH num_desired %d", num_desired); | 237 | 4 | len_ = 0; | 238 | 62 | for (int i = 0; i < old_len; ++i) { | 239 | 58 | set(old_k->items_[i], old_v->items_[i]); | 240 | 58 | } | 241 | 4 | } | 242 | 5 | } |
_ZN4DictIiP6BigStrE7reserveEi Line | Count | Source | 208 | 11 | void Dict<K, V>::reserve(int num_desired) { | 209 | 11 | if (capacity_ >= num_desired) { | 210 | 0 | return; // Don't do anything if there's already enough space. | 211 | 0 | } | 212 | | | 213 | 11 | int old_len = len_; | 214 | 11 | Slab<K>* old_k = keys_; | 215 | 11 | Slab<V>* old_v = values_; | 216 | | | 217 | | // Calculate the number of keys and values we should have | 218 | 11 | capacity_ = HowManyPairs(num_desired); | 219 | | | 220 | | // 1) Ensure index len a power of 2, to avoid expensive modulus % operation | 221 | | // 2) Introduce hash table load factor. Use capacity_+1 to simulate ceil() | 222 | | // div, not floor() div. | 223 | 11 | index_len_ = RoundUp((capacity_ + 1) * 5 / 4); | 224 | 11 | DCHECK(index_len_ > capacity_); | 225 | | | 226 | 0 | index_ = NewSlab<int>(index_len_); | 227 | 315 | for (int i = 0; i < index_len_; ++i) { | 228 | 304 | index_->items_[i] = kEmptyEntry; | 229 | 304 | } | 230 | | | 231 | | // These are DENSE, while index_ is sparse. | 232 | 11 | keys_ = NewSlab<K>(capacity_); | 233 | 11 | values_ = NewSlab<V>(capacity_); | 234 | | | 235 | 11 | if (old_k != nullptr) { // rehash if there were any entries | 236 | | // log("REHASH num_desired %d", num_desired); | 237 | 5 | len_ = 0; | 238 | 68 | for (int i = 0; i < old_len; ++i) { | 239 | 63 | set(old_k->items_[i], old_v->items_[i]); | 240 | 63 | } | 241 | 5 | } | 242 | 11 | } |
_ZN4DictIlP6BigStrE7reserveEi Line | Count | Source | 208 | 20 | void Dict<K, V>::reserve(int num_desired) { | 209 | 20 | if (capacity_ >= num_desired) { | 210 | 0 | return; // Don't do anything if there's already enough space. | 211 | 0 | } | 212 | | | 213 | 20 | int old_len = len_; | 214 | 20 | Slab<K>* old_k = keys_; | 215 | 20 | Slab<V>* old_v = values_; | 216 | | | 217 | | // Calculate the number of keys and values we should have | 218 | 20 | capacity_ = HowManyPairs(num_desired); | 219 | | | 220 | | // 1) Ensure index len a power of 2, to avoid expensive modulus % operation | 221 | | // 2) Introduce hash table load factor. Use capacity_+1 to simulate ceil() | 222 | | // div, not floor() div. | 223 | 20 | index_len_ = RoundUp((capacity_ + 1) * 5 / 4); | 224 | 20 | DCHECK(index_len_ > capacity_); | 225 | | | 226 | 0 | index_ = NewSlab<int>(index_len_); | 227 | 131k | for (int i = 0; i < index_len_; ++i) { | 228 | 131k | index_->items_[i] = kEmptyEntry; | 229 | 131k | } | 230 | | | 231 | | // These are DENSE, while index_ is sparse. | 232 | 20 | keys_ = NewSlab<K>(capacity_); | 233 | 20 | values_ = NewSlab<V>(capacity_); | 234 | | | 235 | 20 | if (old_k != nullptr) { // rehash if there were any entries | 236 | | // log("REHASH num_desired %d", num_desired); | 237 | 18 | len_ = 0; | 238 | 32.8k | for (int i = 0; i < old_len; ++i) { | 239 | 32.8k | set(old_k->items_[i], old_v->items_[i]); | 240 | 32.8k | } | 241 | 18 | } | 242 | 20 | } |
_ZN4DictIP6BigStriE7reserveEi Line | Count | Source | 208 | 16 | void Dict<K, V>::reserve(int num_desired) { | 209 | 16 | if (capacity_ >= num_desired) { | 210 | 0 | return; // Don't do anything if there's already enough space. | 211 | 0 | } | 212 | | | 213 | 16 | int old_len = len_; | 214 | 16 | Slab<K>* old_k = keys_; | 215 | 16 | Slab<V>* old_v = values_; | 216 | | | 217 | | // Calculate the number of keys and values we should have | 218 | 16 | capacity_ = HowManyPairs(num_desired); | 219 | | | 220 | | // 1) Ensure index len a power of 2, to avoid expensive modulus % operation | 221 | | // 2) Introduce hash table load factor. Use capacity_+1 to simulate ceil() | 222 | | // div, not floor() div. | 223 | 16 | index_len_ = RoundUp((capacity_ + 1) * 5 / 4); | 224 | 16 | DCHECK(index_len_ > capacity_); | 225 | | | 226 | 0 | index_ = NewSlab<int>(index_len_); | 227 | 440 | for (int i = 0; i < index_len_; ++i) { | 228 | 424 | index_->items_[i] = kEmptyEntry; | 229 | 424 | } | 230 | | | 231 | | // These are DENSE, while index_ is sparse. | 232 | 16 | keys_ = NewSlab<K>(capacity_); | 233 | 16 | values_ = NewSlab<V>(capacity_); | 234 | | | 235 | 16 | if (old_k != nullptr) { // rehash if there were any entries | 236 | | // log("REHASH num_desired %d", num_desired); | 237 | 7 | len_ = 0; | 238 | 92 | for (int i = 0; i < old_len; ++i) { | 239 | 85 | set(old_k->items_[i], old_v->items_[i]); | 240 | 85 | } | 241 | 7 | } | 242 | 16 | } |
_ZN4DictIP6BigStrS1_E7reserveEi Line | Count | Source | 208 | 2 | void Dict<K, V>::reserve(int num_desired) { | 209 | 2 | if (capacity_ >= num_desired) { | 210 | 0 | return; // Don't do anything if there's already enough space. | 211 | 0 | } | 212 | | | 213 | 2 | int old_len = len_; | 214 | 2 | Slab<K>* old_k = keys_; | 215 | 2 | Slab<V>* old_v = values_; | 216 | | | 217 | | // Calculate the number of keys and values we should have | 218 | 2 | capacity_ = HowManyPairs(num_desired); | 219 | | | 220 | | // 1) Ensure index len a power of 2, to avoid expensive modulus % operation | 221 | | // 2) Introduce hash table load factor. Use capacity_+1 to simulate ceil() | 222 | | // div, not floor() div. | 223 | 2 | index_len_ = RoundUp((capacity_ + 1) * 5 / 4); | 224 | 2 | DCHECK(index_len_ > capacity_); | 225 | | | 226 | 0 | index_ = NewSlab<int>(index_len_); | 227 | 18 | for (int i = 0; i < index_len_; ++i) { | 228 | 16 | index_->items_[i] = kEmptyEntry; | 229 | 16 | } | 230 | | | 231 | | // These are DENSE, while index_ is sparse. | 232 | 2 | keys_ = NewSlab<K>(capacity_); | 233 | 2 | values_ = NewSlab<V>(capacity_); | 234 | | | 235 | 2 | if (old_k != nullptr) { // rehash if there were any entries | 236 | | // log("REHASH num_desired %d", num_desired); | 237 | 0 | len_ = 0; | 238 | 0 | for (int i = 0; i < old_len; ++i) { | 239 | 0 | set(old_k->items_[i], old_v->items_[i]); | 240 | 0 | } | 241 | 0 | } | 242 | 2 | } |
Line | Count | Source | 208 | 18 | void Dict<K, V>::reserve(int num_desired) { | 209 | 18 | if (capacity_ >= num_desired) { | 210 | 1 | return; // Don't do anything if there's already enough space. | 211 | 1 | } | 212 | | | 213 | 17 | int old_len = len_; | 214 | 17 | Slab<K>* old_k = keys_; | 215 | 17 | Slab<V>* old_v = values_; | 216 | | | 217 | | // Calculate the number of keys and values we should have | 218 | 17 | capacity_ = HowManyPairs(num_desired); | 219 | | | 220 | | // 1) Ensure index len a power of 2, to avoid expensive modulus % operation | 221 | | // 2) Introduce hash table load factor. Use capacity_+1 to simulate ceil() | 222 | | // div, not floor() div. | 223 | 17 | index_len_ = RoundUp((capacity_ + 1) * 5 / 4); | 224 | 17 | DCHECK(index_len_ > capacity_); | 225 | | | 226 | 0 | index_ = NewSlab<int>(index_len_); | 227 | 849 | for (int i = 0; i < index_len_; ++i) { | 228 | 832 | index_->items_[i] = kEmptyEntry; | 229 | 832 | } | 230 | | | 231 | | // These are DENSE, while index_ is sparse. | 232 | 17 | keys_ = NewSlab<K>(capacity_); | 233 | 17 | values_ = NewSlab<V>(capacity_); | 234 | | | 235 | 17 | if (old_k != nullptr) { // rehash if there were any entries | 236 | | // log("REHASH num_desired %d", num_desired); | 237 | 9 | len_ = 0; | 238 | 154 | for (int i = 0; i < old_len; ++i) { | 239 | 145 | set(old_k->items_[i], old_v->items_[i]); | 240 | 145 | } | 241 | 9 | } | 242 | 17 | } |
_ZN4DictIP6Tuple2IiiEiE7reserveEi Line | Count | Source | 208 | 1 | void Dict<K, V>::reserve(int num_desired) { | 209 | 1 | if (capacity_ >= num_desired) { | 210 | 0 | return; // Don't do anything if there's already enough space. | 211 | 0 | } | 212 | | | 213 | 1 | int old_len = len_; | 214 | 1 | Slab<K>* old_k = keys_; | 215 | 1 | Slab<V>* old_v = values_; | 216 | | | 217 | | // Calculate the number of keys and values we should have | 218 | 1 | capacity_ = HowManyPairs(num_desired); | 219 | | | 220 | | // 1) Ensure index len a power of 2, to avoid expensive modulus % operation | 221 | | // 2) Introduce hash table load factor. Use capacity_+1 to simulate ceil() | 222 | | // div, not floor() div. | 223 | 1 | index_len_ = RoundUp((capacity_ + 1) * 5 / 4); | 224 | 1 | DCHECK(index_len_ > capacity_); | 225 | | | 226 | 0 | index_ = NewSlab<int>(index_len_); | 227 | 9 | for (int i = 0; i < index_len_; ++i) { | 228 | 8 | index_->items_[i] = kEmptyEntry; | 229 | 8 | } | 230 | | | 231 | | // These are DENSE, while index_ is sparse. | 232 | 1 | keys_ = NewSlab<K>(capacity_); | 233 | 1 | values_ = NewSlab<V>(capacity_); | 234 | | | 235 | 1 | if (old_k != nullptr) { // rehash if there were any entries | 236 | | // log("REHASH num_desired %d", num_desired); | 237 | 0 | len_ = 0; | 238 | 0 | for (int i = 0; i < old_len; ++i) { | 239 | 0 | set(old_k->items_[i], old_v->items_[i]); | 240 | 0 | } | 241 | 0 | } | 242 | 1 | } |
_ZN4DictIP6Tuple2IP6BigStriEiE7reserveEi Line | Count | Source | 208 | 1 | void Dict<K, V>::reserve(int num_desired) { | 209 | 1 | if (capacity_ >= num_desired) { | 210 | 0 | return; // Don't do anything if there's already enough space. | 211 | 0 | } | 212 | | | 213 | 1 | int old_len = len_; | 214 | 1 | Slab<K>* old_k = keys_; | 215 | 1 | Slab<V>* old_v = values_; | 216 | | | 217 | | // Calculate the number of keys and values we should have | 218 | 1 | capacity_ = HowManyPairs(num_desired); | 219 | | | 220 | | // 1) Ensure index len a power of 2, to avoid expensive modulus % operation | 221 | | // 2) Introduce hash table load factor. Use capacity_+1 to simulate ceil() | 222 | | // div, not floor() div. | 223 | 1 | index_len_ = RoundUp((capacity_ + 1) * 5 / 4); | 224 | 1 | DCHECK(index_len_ > capacity_); | 225 | | | 226 | 0 | index_ = NewSlab<int>(index_len_); | 227 | 9 | for (int i = 0; i < index_len_; ++i) { | 228 | 8 | index_->items_[i] = kEmptyEntry; | 229 | 8 | } | 230 | | | 231 | | // These are DENSE, while index_ is sparse. | 232 | 1 | keys_ = NewSlab<K>(capacity_); | 233 | 1 | values_ = NewSlab<V>(capacity_); | 234 | | | 235 | 1 | if (old_k != nullptr) { // rehash if there were any entries | 236 | | // log("REHASH num_desired %d", num_desired); | 237 | 0 | len_ = 0; | 238 | 0 | for (int i = 0; i < old_len; ++i) { | 239 | 0 | set(old_k->items_[i], old_v->items_[i]); | 240 | 0 | } | 241 | 0 | } | 242 | 1 | } |
|
243 | | |
244 | | template <typename K, typename V> |
245 | 8.11k | V Dict<K, V>::at(K key) const { |
246 | 8.11k | int kv_index = find_kv_index(key); |
247 | 8.11k | if (kv_index == kNotFound) { |
248 | 0 | throw Alloc<KeyError>(); |
249 | 8.11k | } else { |
250 | 8.11k | return values_->items_[kv_index]; |
251 | 8.11k | } |
252 | 8.11k | } _ZNK4DictIiP6BigStrE2atEi Line | Count | Source | 245 | 3 | V Dict<K, V>::at(K key) const { | 246 | 3 | int kv_index = find_kv_index(key); | 247 | 3 | if (kv_index == kNotFound) { | 248 | 0 | throw Alloc<KeyError>(); | 249 | 3 | } else { | 250 | 3 | return values_->items_[kv_index]; | 251 | 3 | } | 252 | 3 | } |
_ZNK4DictIP6BigStriE2atES1_ Line | Count | Source | 245 | 7 | V Dict<K, V>::at(K key) const { | 246 | 7 | int kv_index = find_kv_index(key); | 247 | 7 | if (kv_index == kNotFound) { | 248 | 0 | throw Alloc<KeyError>(); | 249 | 7 | } else { | 250 | 7 | return values_->items_[kv_index]; | 251 | 7 | } | 252 | 7 | } |
Line | Count | Source | 245 | 8.09k | V Dict<K, V>::at(K key) const { | 246 | 8.09k | int kv_index = find_kv_index(key); | 247 | 8.09k | if (kv_index == kNotFound) { | 248 | 0 | throw Alloc<KeyError>(); | 249 | 8.09k | } else { | 250 | 8.09k | return values_->items_[kv_index]; | 251 | 8.09k | } | 252 | 8.09k | } |
_ZNK4DictIP6BigStrS1_E2atES1_ Line | Count | Source | 245 | 3 | V Dict<K, V>::at(K key) const { | 246 | 3 | int kv_index = find_kv_index(key); | 247 | 3 | if (kv_index == kNotFound) { | 248 | 0 | throw Alloc<KeyError>(); | 249 | 3 | } else { | 250 | 3 | return values_->items_[kv_index]; | 251 | 3 | } | 252 | 3 | } |
_ZNK4DictIP6Tuple2IiiEiE2atES2_ Line | Count | Source | 245 | 2 | V Dict<K, V>::at(K key) const { | 246 | 2 | int kv_index = find_kv_index(key); | 247 | 2 | if (kv_index == kNotFound) { | 248 | 0 | throw Alloc<KeyError>(); | 249 | 2 | } else { | 250 | 2 | return values_->items_[kv_index]; | 251 | 2 | } | 252 | 2 | } |
_ZNK4DictIP6Tuple2IP6BigStriEiE2atES4_ Line | Count | Source | 245 | 2 | V Dict<K, V>::at(K key) const { | 246 | 2 | int kv_index = find_kv_index(key); | 247 | 2 | if (kv_index == kNotFound) { | 248 | 0 | throw Alloc<KeyError>(); | 249 | 2 | } else { | 250 | 2 | return values_->items_[kv_index]; | 251 | 2 | } | 252 | 2 | } |
|
253 | | |
254 | | template <typename K, typename V> |
255 | 3 | V Dict<K, V>::get(K key) const { |
256 | 3 | int kv_index = find_kv_index(key); |
257 | 3 | if (kv_index == kNotFound) { |
258 | 2 | return nullptr; |
259 | 2 | } else { |
260 | 1 | return values_->items_[kv_index]; |
261 | 1 | } |
262 | 3 | } _ZNK4DictIiP6BigStrE3getEi Line | Count | Source | 255 | 2 | V Dict<K, V>::get(K key) const { | 256 | 2 | int kv_index = find_kv_index(key); | 257 | 2 | if (kv_index == kNotFound) { | 258 | 1 | return nullptr; | 259 | 1 | } else { | 260 | 1 | return values_->items_[kv_index]; | 261 | 1 | } | 262 | 2 | } |
_ZNK4DictIP6BigStrS1_E3getES1_ Line | Count | Source | 255 | 1 | V Dict<K, V>::get(K key) const { | 256 | 1 | int kv_index = find_kv_index(key); | 257 | 1 | if (kv_index == kNotFound) { | 258 | 1 | return nullptr; | 259 | 1 | } else { | 260 | 0 | return values_->items_[kv_index]; | 261 | 0 | } | 262 | 1 | } |
|
263 | | |
264 | | template <typename K, typename V> |
265 | 1 | V Dict<K, V>::get(K key, V default_val) const { |
266 | 1 | int kv_index = find_kv_index(key); |
267 | 1 | if (kv_index == kNotFound) { |
268 | 1 | return default_val; |
269 | 1 | } else { |
270 | 0 | return values_->items_[kv_index]; |
271 | 0 | } |
272 | 1 | } _ZNK4DictIP6BigStrS1_E3getES1_S1_ Line | Count | Source | 265 | 1 | V Dict<K, V>::get(K key, V default_val) const { | 266 | 1 | int kv_index = find_kv_index(key); | 267 | 1 | if (kv_index == kNotFound) { | 268 | 1 | return default_val; | 269 | 1 | } else { | 270 | 0 | return values_->items_[kv_index]; | 271 | 0 | } | 272 | 1 | } |
Unexecuted instantiation: _ZNK4DictIiP6BigStrE3getEiS1_ Unexecuted instantiation: _ZNK4DictIP6BigStriE3getES1_i |
273 | | |
274 | | template <typename K, typename V> |
275 | 7 | List<K>* Dict<K, V>::keys() const { |
276 | 7 | return ListFromDictSlab<K>(keys_, len_); |
277 | 7 | } _ZNK4DictIP6BigStriE4keysEv Line | Count | Source | 275 | 4 | List<K>* Dict<K, V>::keys() const { | 276 | 4 | return ListFromDictSlab<K>(keys_, len_); | 277 | 4 | } |
_ZNK4DictIP6BigStrS1_E4keysEv Line | Count | Source | 275 | 1 | List<K>* Dict<K, V>::keys() const { | 276 | 1 | return ListFromDictSlab<K>(keys_, len_); | 277 | 1 | } |
Line | Count | Source | 275 | 2 | List<K>* Dict<K, V>::keys() const { | 276 | 2 | return ListFromDictSlab<K>(keys_, len_); | 277 | 2 | } |
|
278 | | |
279 | | template <typename K, typename V> |
280 | 2 | List<V>* Dict<K, V>::values() const { |
281 | 2 | return ListFromDictSlab<V>(values_, len_); |
282 | 2 | } _ZNK4DictIP6BigStriE6valuesEv Line | Count | Source | 280 | 1 | List<V>* Dict<K, V>::values() const { | 281 | 1 | return ListFromDictSlab<V>(values_, len_); | 282 | 1 | } |
_ZNK4DictIP6BigStrS1_E6valuesEv Line | Count | Source | 280 | 1 | List<V>* Dict<K, V>::values() const { | 281 | 1 | return ListFromDictSlab<V>(values_, len_); | 282 | 1 | } |
|
283 | | |
284 | | template <typename K, typename V> |
285 | 4 | void Dict<K, V>::clear() { |
286 | | // Maintain invariant |
287 | 396 | for (int i = 0; i < index_len_; ++i) { |
288 | 392 | index_->items_[i] = kEmptyEntry; |
289 | 392 | } |
290 | | |
291 | 4 | if (keys_) { |
292 | 3 | memset(keys_->items_, 0, len_ * sizeof(K)); // zero for GC scan |
293 | 3 | } |
294 | 4 | if (values_) { |
295 | 3 | memset(values_->items_, 0, len_ * sizeof(V)); // zero for GC scan |
296 | 3 | } |
297 | 4 | len_ = 0; |
298 | 4 | } _ZN4DictIiP6BigStrE5clearEv Line | Count | Source | 285 | 1 | void Dict<K, V>::clear() { | 286 | | // Maintain invariant | 287 | 1 | for (int i = 0; i < index_len_; ++i) { | 288 | 0 | index_->items_[i] = kEmptyEntry; | 289 | 0 | } | 290 | | | 291 | 1 | if (keys_) { | 292 | 0 | memset(keys_->items_, 0, len_ * sizeof(K)); // zero for GC scan | 293 | 0 | } | 294 | 1 | if (values_) { | 295 | 0 | memset(values_->items_, 0, len_ * sizeof(V)); // zero for GC scan | 296 | 0 | } | 297 | 1 | len_ = 0; | 298 | 1 | } |
_ZN4DictIP6BigStriE5clearEv Line | Count | Source | 285 | 1 | void Dict<K, V>::clear() { | 286 | | // Maintain invariant | 287 | 9 | for (int i = 0; i < index_len_; ++i) { | 288 | 8 | index_->items_[i] = kEmptyEntry; | 289 | 8 | } | 290 | | | 291 | 1 | if (keys_) { | 292 | 1 | memset(keys_->items_, 0, len_ * sizeof(K)); // zero for GC scan | 293 | 1 | } | 294 | 1 | if (values_) { | 295 | 1 | memset(values_->items_, 0, len_ * sizeof(V)); // zero for GC scan | 296 | 1 | } | 297 | 1 | len_ = 0; | 298 | 1 | } |
Line | Count | Source | 285 | 2 | void Dict<K, V>::clear() { | 286 | | // Maintain invariant | 287 | 386 | for (int i = 0; i < index_len_; ++i) { | 288 | 384 | index_->items_[i] = kEmptyEntry; | 289 | 384 | } | 290 | | | 291 | 2 | if (keys_) { | 292 | 2 | memset(keys_->items_, 0, len_ * sizeof(K)); // zero for GC scan | 293 | 2 | } | 294 | 2 | if (values_) { | 295 | 2 | memset(values_->items_, 0, len_ * sizeof(V)); // zero for GC scan | 296 | 2 | } | 297 | 2 | len_ = 0; | 298 | 2 | } |
|
299 | | |
300 | | // TODO: |
301 | | // - Special case to intern BigStr* when it's hashed? How? |
302 | | // - Should we have wrappers like: |
303 | | // - V GetAndIntern<V>(D, &string_key) |
304 | | // - SetAndIntern<V>(D, &string_key, value) |
305 | | // This will enable duplicate copies of the string to be garbage collected |
306 | | template <typename K, typename V> |
307 | 98.0k | int Dict<K, V>::hash_and_probe(K key) const { |
308 | 98.0k | if (capacity_ == 0) { |
309 | 29 | return kTooSmall; |
310 | 29 | } |
311 | | |
312 | | // Hash the key onto a slot in the index. If the first slot is occupied, |
313 | | // probe until an empty one is found. |
314 | 98.0k | unsigned h = hash_key(key); |
315 | | // faster % using & -- assuming index_len_ is power of 2 |
316 | 98.0k | int init_bucket = h & (index_len_ - 1); |
317 | | |
318 | | // If we see a tombstone along the probing path, stash it. |
319 | 98.0k | int open_slot = -1; |
320 | | |
321 | 131k | for (int i = 0; i < index_len_; ++i) { |
322 | | // Start at init_bucket and wrap araound |
323 | | |
324 | | // faster % using & -- assuming index_len_ is power of 2 |
325 | 131k | int slot = (i + init_bucket) & (index_len_ - 1); |
326 | | |
327 | 131k | int kv_index = index_->items_[slot]; |
328 | 131k | DCHECK(kv_index < len_); |
329 | | // Optimistically this is the common case once the table has been populated. |
330 | 131k | if (kv_index >= 0) { |
331 | 65.7k | unsigned h2 = hash_key(keys_->items_[kv_index]); |
332 | 65.7k | if (h == h2 && keys_equal(keys_->items_[kv_index], key)) { |
333 | 36.2k | return slot; |
334 | 36.2k | } |
335 | 65.7k | } |
336 | | |
337 | 94.8k | if (kv_index == kEmptyEntry) { |
338 | 61.8k | if (open_slot != -1) { |
339 | 3 | slot = open_slot; |
340 | 3 | } |
341 | | // If there isn't room in the entry arrays, tell the caller to resize. |
342 | 61.8k | return len_ < capacity_ ? slot : kTooSmall; |
343 | 61.8k | } |
344 | | |
345 | | // Tombstone or collided keys unequal. Keep scanning. |
346 | 32.9k | DCHECK(kv_index >= 0 || kv_index == kDeletedEntry); |
347 | 32.9k | if (kv_index == kDeletedEntry && open_slot == -1) { |
348 | | // NOTE: We only record the open slot here. We DON'T return it. If we're |
349 | | // looking for a key that was writen before this tombstone was written to |
350 | | // the index we should continue probing until we get to that key. If we |
351 | | // get to an empty index slot or the end of the index then we know we are |
352 | | // dealing with a new key and can safely replace the tombstone without |
353 | | // disrupting any existing keys. |
354 | 2.52k | open_slot = slot; |
355 | 2.52k | } |
356 | 32.9k | } |
357 | | |
358 | 1 | if (open_slot != -1) { |
359 | 1 | return len_ < capacity_ ? open_slot : kTooSmall; |
360 | 1 | } |
361 | | |
362 | 0 | return kTooSmall; |
363 | 1 | } _ZNK4DictIP5PointP6BigStrE14hash_and_probeES1_ Line | Count | Source | 307 | 95 | int Dict<K, V>::hash_and_probe(K key) const { | 308 | 95 | if (capacity_ == 0) { | 309 | 1 | return kTooSmall; | 310 | 1 | } | 311 | | | 312 | | // Hash the key onto a slot in the index. If the first slot is occupied, | 313 | | // probe until an empty one is found. | 314 | 94 | unsigned h = hash_key(key); | 315 | | // faster % using & -- assuming index_len_ is power of 2 | 316 | 94 | int init_bucket = h & (index_len_ - 1); | 317 | | | 318 | | // If we see a tombstone along the probing path, stash it. | 319 | 94 | int open_slot = -1; | 320 | | | 321 | 149 | for (int i = 0; i < index_len_; ++i) { | 322 | | // Start at init_bucket and wrap araound | 323 | | | 324 | | // faster % using & -- assuming index_len_ is power of 2 | 325 | 149 | int slot = (i + init_bucket) & (index_len_ - 1); | 326 | | | 327 | 149 | int kv_index = index_->items_[slot]; | 328 | 149 | DCHECK(kv_index < len_); | 329 | | // Optimistically this is the common case once the table has been populated. | 330 | 149 | if (kv_index >= 0) { | 331 | 55 | unsigned h2 = hash_key(keys_->items_[kv_index]); | 332 | 55 | if (h == h2 && keys_equal(keys_->items_[kv_index], key)) { | 333 | 0 | return slot; | 334 | 0 | } | 335 | 55 | } | 336 | | | 337 | 149 | if (kv_index == kEmptyEntry) { | 338 | 94 | if (open_slot != -1) { | 339 | 0 | slot = open_slot; | 340 | 0 | } | 341 | | // If there isn't room in the entry arrays, tell the caller to resize. | 342 | 94 | return len_ < capacity_ ? slot : kTooSmall; | 343 | 94 | } | 344 | | | 345 | | // Tombstone or collided keys unequal. Keep scanning. | 346 | 55 | DCHECK(kv_index >= 0 || kv_index == kDeletedEntry); | 347 | 55 | if (kv_index == kDeletedEntry && open_slot == -1) { | 348 | | // NOTE: We only record the open slot here. We DON'T return it. If we're | 349 | | // looking for a key that was writen before this tombstone was written to | 350 | | // the index we should continue probing until we get to that key. If we | 351 | | // get to an empty index slot or the end of the index then we know we are | 352 | | // dealing with a new key and can safely replace the tombstone without | 353 | | // disrupting any existing keys. | 354 | 0 | open_slot = slot; | 355 | 0 | } | 356 | 55 | } | 357 | | | 358 | 0 | if (open_slot != -1) { | 359 | 0 | return len_ < capacity_ ? open_slot : kTooSmall; | 360 | 0 | } | 361 | | | 362 | 0 | return kTooSmall; | 363 | 0 | } |
_ZNK4DictIiP6BigStrE14hash_and_probeEi Line | Count | Source | 307 | 124 | int Dict<K, V>::hash_and_probe(K key) const { | 308 | 124 | if (capacity_ == 0) { | 309 | 6 | return kTooSmall; | 310 | 6 | } | 311 | | | 312 | | // Hash the key onto a slot in the index. If the first slot is occupied, | 313 | | // probe until an empty one is found. | 314 | 118 | unsigned h = hash_key(key); | 315 | | // faster % using & -- assuming index_len_ is power of 2 | 316 | 118 | int init_bucket = h & (index_len_ - 1); | 317 | | | 318 | | // If we see a tombstone along the probing path, stash it. | 319 | 118 | int open_slot = -1; | 320 | | | 321 | 209 | for (int i = 0; i < index_len_; ++i) { | 322 | | // Start at init_bucket and wrap araound | 323 | | | 324 | | // faster % using & -- assuming index_len_ is power of 2 | 325 | 209 | int slot = (i + init_bucket) & (index_len_ - 1); | 326 | | | 327 | 209 | int kv_index = index_->items_[slot]; | 328 | 209 | DCHECK(kv_index < len_); | 329 | | // Optimistically this is the common case once the table has been populated. | 330 | 209 | if (kv_index >= 0) { | 331 | 96 | unsigned h2 = hash_key(keys_->items_[kv_index]); | 332 | 96 | if (h == h2 && keys_equal(keys_->items_[kv_index], key)) { | 333 | 5 | return slot; | 334 | 5 | } | 335 | 96 | } | 336 | | | 337 | 204 | if (kv_index == kEmptyEntry) { | 338 | 113 | if (open_slot != -1) { | 339 | 0 | slot = open_slot; | 340 | 0 | } | 341 | | // If there isn't room in the entry arrays, tell the caller to resize. | 342 | 113 | return len_ < capacity_ ? slot : kTooSmall; | 343 | 113 | } | 344 | | | 345 | | // Tombstone or collided keys unequal. Keep scanning. | 346 | 91 | DCHECK(kv_index >= 0 || kv_index == kDeletedEntry); | 347 | 91 | if (kv_index == kDeletedEntry && open_slot == -1) { | 348 | | // NOTE: We only record the open slot here. We DON'T return it. If we're | 349 | | // looking for a key that was writen before this tombstone was written to | 350 | | // the index we should continue probing until we get to that key. If we | 351 | | // get to an empty index slot or the end of the index then we know we are | 352 | | // dealing with a new key and can safely replace the tombstone without | 353 | | // disrupting any existing keys. | 354 | 0 | open_slot = slot; | 355 | 0 | } | 356 | 91 | } | 357 | | | 358 | 0 | if (open_slot != -1) { | 359 | 0 | return len_ < capacity_ ? open_slot : kTooSmall; | 360 | 0 | } | 361 | | | 362 | 0 | return kTooSmall; | 363 | 0 | } |
_ZNK4DictIlP6BigStrE14hash_and_probeEl Line | Count | Source | 307 | 80.9k | int Dict<K, V>::hash_and_probe(K key) const { | 308 | 80.9k | if (capacity_ == 0) { | 309 | 2 | return kTooSmall; | 310 | 2 | } | 311 | | | 312 | | // Hash the key onto a slot in the index. If the first slot is occupied, | 313 | | // probe until an empty one is found. | 314 | 80.9k | unsigned h = hash_key(key); | 315 | | // faster % using & -- assuming index_len_ is power of 2 | 316 | 80.9k | int init_bucket = h & (index_len_ - 1); | 317 | | | 318 | | // If we see a tombstone along the probing path, stash it. | 319 | 80.9k | int open_slot = -1; | 320 | | | 321 | 106k | for (int i = 0; i < index_len_; ++i) { | 322 | | // Start at init_bucket and wrap araound | 323 | | | 324 | | // faster % using & -- assuming index_len_ is power of 2 | 325 | 106k | int slot = (i + init_bucket) & (index_len_ - 1); | 326 | | | 327 | 106k | int kv_index = index_->items_[slot]; | 328 | 106k | DCHECK(kv_index < len_); | 329 | | // Optimistically this is the common case once the table has been populated. | 330 | 106k | if (kv_index >= 0) { | 331 | 45.3k | unsigned h2 = hash_key(keys_->items_[kv_index]); | 332 | 45.3k | if (h == h2 && keys_equal(keys_->items_[kv_index], key)) { | 333 | 19.9k | return slot; | 334 | 19.9k | } | 335 | 45.3k | } | 336 | | | 337 | 86.3k | if (kv_index == kEmptyEntry) { | 338 | 61.0k | if (open_slot != -1) { | 339 | 0 | slot = open_slot; | 340 | 0 | } | 341 | | // If there isn't room in the entry arrays, tell the caller to resize. | 342 | 61.0k | return len_ < capacity_ ? slot : kTooSmall; | 343 | 61.0k | } | 344 | | | 345 | | // Tombstone or collided keys unequal. Keep scanning. | 346 | 25.3k | DCHECK(kv_index >= 0 || kv_index == kDeletedEntry); | 347 | 25.3k | if (kv_index == kDeletedEntry && open_slot == -1) { | 348 | | // NOTE: We only record the open slot here. We DON'T return it. If we're | 349 | | // looking for a key that was writen before this tombstone was written to | 350 | | // the index we should continue probing until we get to that key. If we | 351 | | // get to an empty index slot or the end of the index then we know we are | 352 | | // dealing with a new key and can safely replace the tombstone without | 353 | | // disrupting any existing keys. | 354 | 0 | open_slot = slot; | 355 | 0 | } | 356 | 25.3k | } | 357 | | | 358 | 0 | if (open_slot != -1) { | 359 | 0 | return len_ < capacity_ ? open_slot : kTooSmall; | 360 | 0 | } | 361 | | | 362 | 0 | return kTooSmall; | 363 | 0 | } |
_ZNK4DictIP6BigStriE14hash_and_probeES1_ Line | Count | Source | 307 | 184 | int Dict<K, V>::hash_and_probe(K key) const { | 308 | 184 | if (capacity_ == 0) { | 309 | 9 | return kTooSmall; | 310 | 9 | } | 311 | | | 312 | | // Hash the key onto a slot in the index. If the first slot is occupied, | 313 | | // probe until an empty one is found. | 314 | 175 | unsigned h = hash_key(key); | 315 | | // faster % using & -- assuming index_len_ is power of 2 | 316 | 175 | int init_bucket = h & (index_len_ - 1); | 317 | | | 318 | | // If we see a tombstone along the probing path, stash it. | 319 | 175 | int open_slot = -1; | 320 | | | 321 | 230 | for (int i = 0; i < index_len_; ++i) { | 322 | | // Start at init_bucket and wrap araound | 323 | | | 324 | | // faster % using & -- assuming index_len_ is power of 2 | 325 | 230 | int slot = (i + init_bucket) & (index_len_ - 1); | 326 | | | 327 | 230 | int kv_index = index_->items_[slot]; | 328 | 230 | DCHECK(kv_index < len_); | 329 | | // Optimistically this is the common case once the table has been populated. | 330 | 230 | if (kv_index >= 0) { | 331 | 63 | unsigned h2 = hash_key(keys_->items_[kv_index]); | 332 | 63 | if (h == h2 && keys_equal(keys_->items_[kv_index], key)) { | 333 | 9 | return slot; | 334 | 9 | } | 335 | 63 | } | 336 | | | 337 | 221 | if (kv_index == kEmptyEntry) { | 338 | 166 | if (open_slot != -1) { | 339 | 1 | slot = open_slot; | 340 | 1 | } | 341 | | // If there isn't room in the entry arrays, tell the caller to resize. | 342 | 166 | return len_ < capacity_ ? slot : kTooSmall; | 343 | 166 | } | 344 | | | 345 | | // Tombstone or collided keys unequal. Keep scanning. | 346 | 55 | DCHECK(kv_index >= 0 || kv_index == kDeletedEntry); | 347 | 55 | if (kv_index == kDeletedEntry && open_slot == -1) { | 348 | | // NOTE: We only record the open slot here. We DON'T return it. If we're | 349 | | // looking for a key that was writen before this tombstone was written to | 350 | | // the index we should continue probing until we get to that key. If we | 351 | | // get to an empty index slot or the end of the index then we know we are | 352 | | // dealing with a new key and can safely replace the tombstone without | 353 | | // disrupting any existing keys. | 354 | 1 | open_slot = slot; | 355 | 1 | } | 356 | 55 | } | 357 | | | 358 | 0 | if (open_slot != -1) { | 359 | 0 | return len_ < capacity_ ? open_slot : kTooSmall; | 360 | 0 | } | 361 | | | 362 | 0 | return kTooSmall; | 363 | 0 | } |
_ZNK4DictIP6BigStrS1_E14hash_and_probeES1_ Line | Count | Source | 307 | 6 | int Dict<K, V>::hash_and_probe(K key) const { | 308 | 6 | if (capacity_ == 0) { | 309 | 2 | return kTooSmall; | 310 | 2 | } | 311 | | | 312 | | // Hash the key onto a slot in the index. If the first slot is occupied, | 313 | | // probe until an empty one is found. | 314 | 4 | unsigned h = hash_key(key); | 315 | | // faster % using & -- assuming index_len_ is power of 2 | 316 | 4 | int init_bucket = h & (index_len_ - 1); | 317 | | | 318 | | // If we see a tombstone along the probing path, stash it. | 319 | 4 | int open_slot = -1; | 320 | | | 321 | 4 | for (int i = 0; i < index_len_; ++i) { | 322 | | // Start at init_bucket and wrap araound | 323 | | | 324 | | // faster % using & -- assuming index_len_ is power of 2 | 325 | 4 | int slot = (i + init_bucket) & (index_len_ - 1); | 326 | | | 327 | 4 | int kv_index = index_->items_[slot]; | 328 | 4 | DCHECK(kv_index < len_); | 329 | | // Optimistically this is the common case once the table has been populated. | 330 | 4 | if (kv_index >= 0) { | 331 | 2 | unsigned h2 = hash_key(keys_->items_[kv_index]); | 332 | 2 | if (h == h2 && keys_equal(keys_->items_[kv_index], key)) { | 333 | 2 | return slot; | 334 | 2 | } | 335 | 2 | } | 336 | | | 337 | 2 | if (kv_index == kEmptyEntry) { | 338 | 2 | if (open_slot != -1) { | 339 | 0 | slot = open_slot; | 340 | 0 | } | 341 | | // If there isn't room in the entry arrays, tell the caller to resize. | 342 | 2 | return len_ < capacity_ ? slot : kTooSmall; | 343 | 2 | } | 344 | | | 345 | | // Tombstone or collided keys unequal. Keep scanning. | 346 | 0 | DCHECK(kv_index >= 0 || kv_index == kDeletedEntry); | 347 | 0 | if (kv_index == kDeletedEntry && open_slot == -1) { | 348 | | // NOTE: We only record the open slot here. We DON'T return it. If we're | 349 | | // looking for a key that was writen before this tombstone was written to | 350 | | // the index we should continue probing until we get to that key. If we | 351 | | // get to an empty index slot or the end of the index then we know we are | 352 | | // dealing with a new key and can safely replace the tombstone without | 353 | | // disrupting any existing keys. | 354 | 0 | open_slot = slot; | 355 | 0 | } | 356 | 0 | } | 357 | | | 358 | 0 | if (open_slot != -1) { | 359 | 0 | return len_ < capacity_ ? open_slot : kTooSmall; | 360 | 0 | } | 361 | | | 362 | 0 | return kTooSmall; | 363 | 0 | } |
_ZNK4DictIiiE14hash_and_probeEi Line | Count | Source | 307 | 16.6k | int Dict<K, V>::hash_and_probe(K key) const { | 308 | 16.6k | if (capacity_ == 0) { | 309 | 7 | return kTooSmall; | 310 | 7 | } | 311 | | | 312 | | // Hash the key onto a slot in the index. If the first slot is occupied, | 313 | | // probe until an empty one is found. | 314 | 16.6k | unsigned h = hash_key(key); | 315 | | // faster % using & -- assuming index_len_ is power of 2 | 316 | 16.6k | int init_bucket = h & (index_len_ - 1); | 317 | | | 318 | | // If we see a tombstone along the probing path, stash it. | 319 | 16.6k | int open_slot = -1; | 320 | | | 321 | 24.0k | for (int i = 0; i < index_len_; ++i) { | 322 | | // Start at init_bucket and wrap araound | 323 | | | 324 | | // faster % using & -- assuming index_len_ is power of 2 | 325 | 24.0k | int slot = (i + init_bucket) & (index_len_ - 1); | 326 | | | 327 | 24.0k | int kv_index = index_->items_[slot]; | 328 | 24.0k | DCHECK(kv_index < len_); | 329 | | // Optimistically this is the common case once the table has been populated. | 330 | 24.0k | if (kv_index >= 0) { | 331 | 20.2k | unsigned h2 = hash_key(keys_->items_[kv_index]); | 332 | 20.2k | if (h == h2 && keys_equal(keys_->items_[kv_index], key)) { | 333 | 16.2k | return slot; | 334 | 16.2k | } | 335 | 20.2k | } | 336 | | | 337 | 7.85k | if (kv_index == kEmptyEntry) { | 338 | 421 | if (open_slot != -1) { | 339 | 2 | slot = open_slot; | 340 | 2 | } | 341 | | // If there isn't room in the entry arrays, tell the caller to resize. | 342 | 421 | return len_ < capacity_ ? slot : kTooSmall; | 343 | 421 | } | 344 | | | 345 | | // Tombstone or collided keys unequal. Keep scanning. | 346 | 7.43k | DCHECK(kv_index >= 0 || kv_index == kDeletedEntry); | 347 | 7.43k | if (kv_index == kDeletedEntry && open_slot == -1) { | 348 | | // NOTE: We only record the open slot here. We DON'T return it. If we're | 349 | | // looking for a key that was writen before this tombstone was written to | 350 | | // the index we should continue probing until we get to that key. If we | 351 | | // get to an empty index slot or the end of the index then we know we are | 352 | | // dealing with a new key and can safely replace the tombstone without | 353 | | // disrupting any existing keys. | 354 | 2.52k | open_slot = slot; | 355 | 2.52k | } | 356 | 7.43k | } | 357 | | | 358 | 1 | if (open_slot != -1) { | 359 | 1 | return len_ < capacity_ ? open_slot : kTooSmall; | 360 | 1 | } | 361 | | | 362 | 0 | return kTooSmall; | 363 | 1 | } |
_ZNK4DictIP6Tuple2IiiEiE14hash_and_probeES2_ Line | Count | Source | 307 | 5 | int Dict<K, V>::hash_and_probe(K key) const { | 308 | 5 | if (capacity_ == 0) { | 309 | 1 | return kTooSmall; | 310 | 1 | } | 311 | | | 312 | | // Hash the key onto a slot in the index. If the first slot is occupied, | 313 | | // probe until an empty one is found. | 314 | 4 | unsigned h = hash_key(key); | 315 | | // faster % using & -- assuming index_len_ is power of 2 | 316 | 4 | int init_bucket = h & (index_len_ - 1); | 317 | | | 318 | | // If we see a tombstone along the probing path, stash it. | 319 | 4 | int open_slot = -1; | 320 | | | 321 | 4 | for (int i = 0; i < index_len_; ++i) { | 322 | | // Start at init_bucket and wrap araound | 323 | | | 324 | | // faster % using & -- assuming index_len_ is power of 2 | 325 | 4 | int slot = (i + init_bucket) & (index_len_ - 1); | 326 | | | 327 | 4 | int kv_index = index_->items_[slot]; | 328 | 4 | DCHECK(kv_index < len_); | 329 | | // Optimistically this is the common case once the table has been populated. | 330 | 4 | if (kv_index >= 0) { | 331 | 2 | unsigned h2 = hash_key(keys_->items_[kv_index]); | 332 | 2 | if (h == h2 && keys_equal(keys_->items_[kv_index], key)) { | 333 | 2 | return slot; | 334 | 2 | } | 335 | 2 | } | 336 | | | 337 | 2 | if (kv_index == kEmptyEntry) { | 338 | 2 | if (open_slot != -1) { | 339 | 0 | slot = open_slot; | 340 | 0 | } | 341 | | // If there isn't room in the entry arrays, tell the caller to resize. | 342 | 2 | return len_ < capacity_ ? slot : kTooSmall; | 343 | 2 | } | 344 | | | 345 | | // Tombstone or collided keys unequal. Keep scanning. | 346 | 0 | DCHECK(kv_index >= 0 || kv_index == kDeletedEntry); | 347 | 0 | if (kv_index == kDeletedEntry && open_slot == -1) { | 348 | | // NOTE: We only record the open slot here. We DON'T return it. If we're | 349 | | // looking for a key that was writen before this tombstone was written to | 350 | | // the index we should continue probing until we get to that key. If we | 351 | | // get to an empty index slot or the end of the index then we know we are | 352 | | // dealing with a new key and can safely replace the tombstone without | 353 | | // disrupting any existing keys. | 354 | 0 | open_slot = slot; | 355 | 0 | } | 356 | 0 | } | 357 | | | 358 | 0 | if (open_slot != -1) { | 359 | 0 | return len_ < capacity_ ? open_slot : kTooSmall; | 360 | 0 | } | 361 | | | 362 | 0 | return kTooSmall; | 363 | 0 | } |
_ZNK4DictIP6Tuple2IP6BigStriEiE14hash_and_probeES4_ Line | Count | Source | 307 | 5 | int Dict<K, V>::hash_and_probe(K key) const { | 308 | 5 | if (capacity_ == 0) { | 309 | 1 | return kTooSmall; | 310 | 1 | } | 311 | | | 312 | | // Hash the key onto a slot in the index. If the first slot is occupied, | 313 | | // probe until an empty one is found. | 314 | 4 | unsigned h = hash_key(key); | 315 | | // faster % using & -- assuming index_len_ is power of 2 | 316 | 4 | int init_bucket = h & (index_len_ - 1); | 317 | | | 318 | | // If we see a tombstone along the probing path, stash it. | 319 | 4 | int open_slot = -1; | 320 | | | 321 | 4 | for (int i = 0; i < index_len_; ++i) { | 322 | | // Start at init_bucket and wrap araound | 323 | | | 324 | | // faster % using & -- assuming index_len_ is power of 2 | 325 | 4 | int slot = (i + init_bucket) & (index_len_ - 1); | 326 | | | 327 | 4 | int kv_index = index_->items_[slot]; | 328 | 4 | DCHECK(kv_index < len_); | 329 | | // Optimistically this is the common case once the table has been populated. | 330 | 4 | if (kv_index >= 0) { | 331 | 2 | unsigned h2 = hash_key(keys_->items_[kv_index]); | 332 | 2 | if (h == h2 && keys_equal(keys_->items_[kv_index], key)) { | 333 | 2 | return slot; | 334 | 2 | } | 335 | 2 | } | 336 | | | 337 | 2 | if (kv_index == kEmptyEntry) { | 338 | 2 | if (open_slot != -1) { | 339 | 0 | slot = open_slot; | 340 | 0 | } | 341 | | // If there isn't room in the entry arrays, tell the caller to resize. | 342 | 2 | return len_ < capacity_ ? slot : kTooSmall; | 343 | 2 | } | 344 | | | 345 | | // Tombstone or collided keys unequal. Keep scanning. | 346 | 0 | DCHECK(kv_index >= 0 || kv_index == kDeletedEntry); | 347 | 0 | if (kv_index == kDeletedEntry && open_slot == -1) { | 348 | | // NOTE: We only record the open slot here. We DON'T return it. If we're | 349 | | // looking for a key that was writen before this tombstone was written to | 350 | | // the index we should continue probing until we get to that key. If we | 351 | | // get to an empty index slot or the end of the index then we know we are | 352 | | // dealing with a new key and can safely replace the tombstone without | 353 | | // disrupting any existing keys. | 354 | 0 | open_slot = slot; | 355 | 0 | } | 356 | 0 | } | 357 | | | 358 | 0 | if (open_slot != -1) { | 359 | 0 | return len_ < capacity_ ? open_slot : kTooSmall; | 360 | 0 | } | 361 | | | 362 | 0 | return kTooSmall; | 363 | 0 | } |
|
364 | | |
365 | | template <typename K, typename V> |
366 | 8.12k | int Dict<K, V>::find_kv_index(K key) const { |
367 | 8.12k | if (index_ != nullptr) { // Common case |
368 | 8.11k | int pos = hash_and_probe(key); |
369 | 8.11k | if (pos == kTooSmall) { |
370 | 0 | return kNotFound; |
371 | 0 | } |
372 | 8.11k | DCHECK(pos >= 0); |
373 | 0 | int kv_index = index_->items_[pos]; |
374 | 8.11k | if (kv_index < 0) { |
375 | 5 | return kNotFound; |
376 | 5 | } |
377 | 8.11k | return kv_index; |
378 | 8.11k | } |
379 | | |
380 | | // Linear search on GlobalDict instances. |
381 | | // TODO: Should we populate and compare their hash values? |
382 | 14 | for (int i = 0; i < len_; ++i) { |
383 | 11 | if (keys_equal(keys_->items_[i], key)) { |
384 | 6 | return i; |
385 | 6 | } |
386 | 11 | } |
387 | | |
388 | 3 | return kNotFound; |
389 | 9 | } _ZNK4DictIiP6BigStrE13find_kv_indexEi Line | Count | Source | 366 | 7 | int Dict<K, V>::find_kv_index(K key) const { | 367 | 7 | if (index_ != nullptr) { // Common case | 368 | 7 | int pos = hash_and_probe(key); | 369 | 7 | if (pos == kTooSmall) { | 370 | 0 | return kNotFound; | 371 | 0 | } | 372 | 7 | DCHECK(pos >= 0); | 373 | 0 | int kv_index = index_->items_[pos]; | 374 | 7 | if (kv_index < 0) { | 375 | 2 | return kNotFound; | 376 | 2 | } | 377 | 5 | return kv_index; | 378 | 7 | } | 379 | | | 380 | | // Linear search on GlobalDict instances. | 381 | | // TODO: Should we populate and compare their hash values? | 382 | 0 | for (int i = 0; i < len_; ++i) { | 383 | 0 | if (keys_equal(keys_->items_[i], key)) { | 384 | 0 | return i; | 385 | 0 | } | 386 | 0 | } | 387 | | | 388 | 0 | return kNotFound; | 389 | 0 | } |
_ZNK4DictIP6BigStriE13find_kv_indexES1_ Line | Count | Source | 366 | 9 | int Dict<K, V>::find_kv_index(K key) const { | 367 | 9 | if (index_ != nullptr) { // Common case | 368 | 9 | int pos = hash_and_probe(key); | 369 | 9 | if (pos == kTooSmall) { | 370 | 0 | return kNotFound; | 371 | 0 | } | 372 | 9 | DCHECK(pos >= 0); | 373 | 0 | int kv_index = index_->items_[pos]; | 374 | 9 | if (kv_index < 0) { | 375 | 1 | return kNotFound; | 376 | 1 | } | 377 | 8 | return kv_index; | 378 | 9 | } | 379 | | | 380 | | // Linear search on GlobalDict instances. | 381 | | // TODO: Should we populate and compare their hash values? | 382 | 0 | for (int i = 0; i < len_; ++i) { | 383 | 0 | if (keys_equal(keys_->items_[i], key)) { | 384 | 0 | return i; | 385 | 0 | } | 386 | 0 | } | 387 | | | 388 | 0 | return kNotFound; | 389 | 0 | } |
_ZNK4DictIiiE13find_kv_indexEi Line | Count | Source | 366 | 8.09k | int Dict<K, V>::find_kv_index(K key) const { | 367 | 8.09k | if (index_ != nullptr) { // Common case | 368 | 8.09k | int pos = hash_and_probe(key); | 369 | 8.09k | if (pos == kTooSmall) { | 370 | 0 | return kNotFound; | 371 | 0 | } | 372 | 8.09k | DCHECK(pos >= 0); | 373 | 0 | int kv_index = index_->items_[pos]; | 374 | 8.09k | if (kv_index < 0) { | 375 | 2 | return kNotFound; | 376 | 2 | } | 377 | 8.09k | return kv_index; | 378 | 8.09k | } | 379 | | | 380 | | // Linear search on GlobalDict instances. | 381 | | // TODO: Should we populate and compare their hash values? | 382 | 3 | for (int i = 0; i < len_; ++i) { | 383 | 3 | if (keys_equal(keys_->items_[i], key)) { | 384 | 2 | return i; | 385 | 2 | } | 386 | 3 | } | 387 | | | 388 | 0 | return kNotFound; | 389 | 2 | } |
_ZNK4DictIP6BigStrS1_E13find_kv_indexES1_ Line | Count | Source | 366 | 8 | int Dict<K, V>::find_kv_index(K key) const { | 367 | 8 | if (index_ != nullptr) { // Common case | 368 | 1 | int pos = hash_and_probe(key); | 369 | 1 | if (pos == kTooSmall) { | 370 | 0 | return kNotFound; | 371 | 0 | } | 372 | 1 | DCHECK(pos >= 0); | 373 | 0 | int kv_index = index_->items_[pos]; | 374 | 1 | if (kv_index < 0) { | 375 | 0 | return kNotFound; | 376 | 0 | } | 377 | 1 | return kv_index; | 378 | 1 | } | 379 | | | 380 | | // Linear search on GlobalDict instances. | 381 | | // TODO: Should we populate and compare their hash values? | 382 | 11 | for (int i = 0; i < len_; ++i) { | 383 | 8 | if (keys_equal(keys_->items_[i], key)) { | 384 | 4 | return i; | 385 | 4 | } | 386 | 8 | } | 387 | | | 388 | 3 | return kNotFound; | 389 | 7 | } |
_ZNK4DictIP6Tuple2IiiEiE13find_kv_indexES2_ Line | Count | Source | 366 | 2 | int Dict<K, V>::find_kv_index(K key) const { | 367 | 2 | if (index_ != nullptr) { // Common case | 368 | 2 | int pos = hash_and_probe(key); | 369 | 2 | if (pos == kTooSmall) { | 370 | 0 | return kNotFound; | 371 | 0 | } | 372 | 2 | DCHECK(pos >= 0); | 373 | 0 | int kv_index = index_->items_[pos]; | 374 | 2 | if (kv_index < 0) { | 375 | 0 | return kNotFound; | 376 | 0 | } | 377 | 2 | return kv_index; | 378 | 2 | } | 379 | | | 380 | | // Linear search on GlobalDict instances. | 381 | | // TODO: Should we populate and compare their hash values? | 382 | 0 | for (int i = 0; i < len_; ++i) { | 383 | 0 | if (keys_equal(keys_->items_[i], key)) { | 384 | 0 | return i; | 385 | 0 | } | 386 | 0 | } | 387 | | | 388 | 0 | return kNotFound; | 389 | 0 | } |
_ZNK4DictIP6Tuple2IP6BigStriEiE13find_kv_indexES4_ Line | Count | Source | 366 | 2 | int Dict<K, V>::find_kv_index(K key) const { | 367 | 2 | if (index_ != nullptr) { // Common case | 368 | 2 | int pos = hash_and_probe(key); | 369 | 2 | if (pos == kTooSmall) { | 370 | 0 | return kNotFound; | 371 | 0 | } | 372 | 2 | DCHECK(pos >= 0); | 373 | 0 | int kv_index = index_->items_[pos]; | 374 | 2 | if (kv_index < 0) { | 375 | 0 | return kNotFound; | 376 | 0 | } | 377 | 2 | return kv_index; | 378 | 2 | } | 379 | | | 380 | | // Linear search on GlobalDict instances. | 381 | | // TODO: Should we populate and compare their hash values? | 382 | 0 | for (int i = 0; i < len_; ++i) { | 383 | 0 | if (keys_equal(keys_->items_[i], key)) { | 384 | 0 | return i; | 385 | 0 | } | 386 | 0 | } | 387 | | | 388 | 0 | return kNotFound; | 389 | 0 | } |
|
390 | | |
391 | | template <typename K, typename V> |
392 | 89.6k | void Dict<K, V>::set(K key, V val) { |
393 | 89.6k | DCHECK(obj_header().heap_tag != HeapTag::Global); |
394 | 0 | int pos = hash_and_probe(key); |
395 | 89.6k | if (pos == kTooSmall) { |
396 | 70 | reserve(len_ + 1); |
397 | 70 | pos = hash_and_probe(key); |
398 | 70 | } |
399 | 89.6k | DCHECK(pos >= 0); |
400 | | |
401 | 0 | int kv_index = index_->items_[pos]; |
402 | 89.6k | DCHECK(kv_index < len_); |
403 | 89.6k | if (kv_index < 0) { |
404 | | // Write new entries to the end of the k/v arrays. This allows us to recall |
405 | | // insertion order until the first deletion. |
406 | 61.7k | keys_->items_[len_] = key; |
407 | 61.7k | values_->items_[len_] = val; |
408 | 61.7k | index_->items_[pos] = len_; |
409 | 61.7k | len_++; |
410 | 61.7k | DCHECK(len_ <= capacity_); |
411 | 61.7k | } else { |
412 | 27.8k | values_->items_[kv_index] = val; |
413 | 27.8k | } |
414 | 89.6k | } _ZN4DictIP5PointP6BigStrE3setES1_S3_ Line | Count | Source | 392 | 90 | void Dict<K, V>::set(K key, V val) { | 393 | 90 | DCHECK(obj_header().heap_tag != HeapTag::Global); | 394 | 0 | int pos = hash_and_probe(key); | 395 | 90 | if (pos == kTooSmall) { | 396 | 5 | reserve(len_ + 1); | 397 | 5 | pos = hash_and_probe(key); | 398 | 5 | } | 399 | 90 | DCHECK(pos >= 0); | 400 | | | 401 | 0 | int kv_index = index_->items_[pos]; | 402 | 90 | DCHECK(kv_index < len_); | 403 | 90 | if (kv_index < 0) { | 404 | | // Write new entries to the end of the k/v arrays. This allows us to recall | 405 | | // insertion order until the first deletion. | 406 | 90 | keys_->items_[len_] = key; | 407 | 90 | values_->items_[len_] = val; | 408 | 90 | index_->items_[pos] = len_; | 409 | 90 | len_++; | 410 | 90 | DCHECK(len_ <= capacity_); | 411 | 90 | } else { | 412 | 0 | values_->items_[kv_index] = val; | 413 | 0 | } | 414 | 90 | } |
_ZN4DictIiP6BigStrE3setEiS1_ Line | Count | Source | 392 | 105 | void Dict<K, V>::set(K key, V val) { | 393 | 105 | DCHECK(obj_header().heap_tag != HeapTag::Global); | 394 | 0 | int pos = hash_and_probe(key); | 395 | 105 | if (pos == kTooSmall) { | 396 | 11 | reserve(len_ + 1); | 397 | 11 | pos = hash_and_probe(key); | 398 | 11 | } | 399 | 105 | DCHECK(pos >= 0); | 400 | | | 401 | 0 | int kv_index = index_->items_[pos]; | 402 | 105 | DCHECK(kv_index < len_); | 403 | 105 | if (kv_index < 0) { | 404 | | // Write new entries to the end of the k/v arrays. This allows us to recall | 405 | | // insertion order until the first deletion. | 406 | 105 | keys_->items_[len_] = key; | 407 | 105 | values_->items_[len_] = val; | 408 | 105 | index_->items_[pos] = len_; | 409 | 105 | len_++; | 410 | 105 | DCHECK(len_ <= capacity_); | 411 | 105 | } else { | 412 | 0 | values_->items_[kv_index] = val; | 413 | 0 | } | 414 | 105 | } |
_ZN4DictIlP6BigStrE3setElS1_ Line | Count | Source | 392 | 80.9k | void Dict<K, V>::set(K key, V val) { | 393 | 80.9k | DCHECK(obj_header().heap_tag != HeapTag::Global); | 394 | 0 | int pos = hash_and_probe(key); | 395 | 80.9k | if (pos == kTooSmall) { | 396 | 20 | reserve(len_ + 1); | 397 | 20 | pos = hash_and_probe(key); | 398 | 20 | } | 399 | 80.9k | DCHECK(pos >= 0); | 400 | | | 401 | 0 | int kv_index = index_->items_[pos]; | 402 | 80.9k | DCHECK(kv_index < len_); | 403 | 80.9k | if (kv_index < 0) { | 404 | | // Write new entries to the end of the k/v arrays. This allows us to recall | 405 | | // insertion order until the first deletion. | 406 | 60.9k | keys_->items_[len_] = key; | 407 | 60.9k | values_->items_[len_] = val; | 408 | 60.9k | index_->items_[pos] = len_; | 409 | 60.9k | len_++; | 410 | 60.9k | DCHECK(len_ <= capacity_); | 411 | 60.9k | } else { | 412 | 19.9k | values_->items_[kv_index] = val; | 413 | 19.9k | } | 414 | 80.9k | } |
_ZN4DictIP6BigStriE3setES1_i Line | Count | Source | 392 | 158 | void Dict<K, V>::set(K key, V val) { | 393 | 158 | DCHECK(obj_header().heap_tag != HeapTag::Global); | 394 | 0 | int pos = hash_and_probe(key); | 395 | 158 | if (pos == kTooSmall) { | 396 | 16 | reserve(len_ + 1); | 397 | 16 | pos = hash_and_probe(key); | 398 | 16 | } | 399 | 158 | DCHECK(pos >= 0); | 400 | | | 401 | 0 | int kv_index = index_->items_[pos]; | 402 | 158 | DCHECK(kv_index < len_); | 403 | 158 | if (kv_index < 0) { | 404 | | // Write new entries to the end of the k/v arrays. This allows us to recall | 405 | | // insertion order until the first deletion. | 406 | 158 | keys_->items_[len_] = key; | 407 | 158 | values_->items_[len_] = val; | 408 | 158 | index_->items_[pos] = len_; | 409 | 158 | len_++; | 410 | 158 | DCHECK(len_ <= capacity_); | 411 | 158 | } else { | 412 | 0 | values_->items_[kv_index] = val; | 413 | 0 | } | 414 | 158 | } |
_ZN4DictIP6BigStrS1_E3setES1_S1_ Line | Count | Source | 392 | 2 | void Dict<K, V>::set(K key, V val) { | 393 | 2 | DCHECK(obj_header().heap_tag != HeapTag::Global); | 394 | 0 | int pos = hash_and_probe(key); | 395 | 2 | if (pos == kTooSmall) { | 396 | 2 | reserve(len_ + 1); | 397 | 2 | pos = hash_and_probe(key); | 398 | 2 | } | 399 | 2 | DCHECK(pos >= 0); | 400 | | | 401 | 0 | int kv_index = index_->items_[pos]; | 402 | 2 | DCHECK(kv_index < len_); | 403 | 2 | if (kv_index < 0) { | 404 | | // Write new entries to the end of the k/v arrays. This allows us to recall | 405 | | // insertion order until the first deletion. | 406 | 2 | keys_->items_[len_] = key; | 407 | 2 | values_->items_[len_] = val; | 408 | 2 | index_->items_[pos] = len_; | 409 | 2 | len_++; | 410 | 2 | DCHECK(len_ <= capacity_); | 411 | 2 | } else { | 412 | 0 | values_->items_[kv_index] = val; | 413 | 0 | } | 414 | 2 | } |
Line | Count | Source | 392 | 8.35k | void Dict<K, V>::set(K key, V val) { | 393 | 8.35k | DCHECK(obj_header().heap_tag != HeapTag::Global); | 394 | 0 | int pos = hash_and_probe(key); | 395 | 8.35k | if (pos == kTooSmall) { | 396 | 14 | reserve(len_ + 1); | 397 | 14 | pos = hash_and_probe(key); | 398 | 14 | } | 399 | 8.35k | DCHECK(pos >= 0); | 400 | | | 401 | 0 | int kv_index = index_->items_[pos]; | 402 | 8.35k | DCHECK(kv_index < len_); | 403 | 8.35k | if (kv_index < 0) { | 404 | | // Write new entries to the end of the k/v arrays. This allows us to recall | 405 | | // insertion order until the first deletion. | 406 | 413 | keys_->items_[len_] = key; | 407 | 413 | values_->items_[len_] = val; | 408 | 413 | index_->items_[pos] = len_; | 409 | 413 | len_++; | 410 | 413 | DCHECK(len_ <= capacity_); | 411 | 7.94k | } else { | 412 | 7.94k | values_->items_[kv_index] = val; | 413 | 7.94k | } | 414 | 8.35k | } |
_ZN4DictIP6Tuple2IiiEiE3setES2_i Line | Count | Source | 392 | 2 | void Dict<K, V>::set(K key, V val) { | 393 | 2 | DCHECK(obj_header().heap_tag != HeapTag::Global); | 394 | 0 | int pos = hash_and_probe(key); | 395 | 2 | if (pos == kTooSmall) { | 396 | 1 | reserve(len_ + 1); | 397 | 1 | pos = hash_and_probe(key); | 398 | 1 | } | 399 | 2 | DCHECK(pos >= 0); | 400 | | | 401 | 0 | int kv_index = index_->items_[pos]; | 402 | 2 | DCHECK(kv_index < len_); | 403 | 2 | if (kv_index < 0) { | 404 | | // Write new entries to the end of the k/v arrays. This allows us to recall | 405 | | // insertion order until the first deletion. | 406 | 2 | keys_->items_[len_] = key; | 407 | 2 | values_->items_[len_] = val; | 408 | 2 | index_->items_[pos] = len_; | 409 | 2 | len_++; | 410 | 2 | DCHECK(len_ <= capacity_); | 411 | 2 | } else { | 412 | 0 | values_->items_[kv_index] = val; | 413 | 0 | } | 414 | 2 | } |
_ZN4DictIP6Tuple2IP6BigStriEiE3setES4_i Line | Count | Source | 392 | 2 | void Dict<K, V>::set(K key, V val) { | 393 | 2 | DCHECK(obj_header().heap_tag != HeapTag::Global); | 394 | 0 | int pos = hash_and_probe(key); | 395 | 2 | if (pos == kTooSmall) { | 396 | 1 | reserve(len_ + 1); | 397 | 1 | pos = hash_and_probe(key); | 398 | 1 | } | 399 | 2 | DCHECK(pos >= 0); | 400 | | | 401 | 0 | int kv_index = index_->items_[pos]; | 402 | 2 | DCHECK(kv_index < len_); | 403 | 2 | if (kv_index < 0) { | 404 | | // Write new entries to the end of the k/v arrays. This allows us to recall | 405 | | // insertion order until the first deletion. | 406 | 2 | keys_->items_[len_] = key; | 407 | 2 | values_->items_[len_] = val; | 408 | 2 | index_->items_[pos] = len_; | 409 | 2 | len_++; | 410 | 2 | DCHECK(len_ <= capacity_); | 411 | 2 | } else { | 412 | 0 | values_->items_[kv_index] = val; | 413 | 0 | } | 414 | 2 | } |
|
415 | | |
416 | | template <typename K, typename V> |
417 | 28 | inline int len(const Dict<K, V>* d) { |
418 | 28 | return d->len_; |
419 | 28 | } _Z3lenIP5PointP6BigStrEiPK4DictIT_T0_E Line | Count | Source | 417 | 1 | inline int len(const Dict<K, V>* d) { | 418 | 1 | return d->len_; | 419 | 1 | } |
_Z3lenIiP6BigStrEiPK4DictIT_T0_E Line | Count | Source | 417 | 4 | inline int len(const Dict<K, V>* d) { | 418 | 4 | return d->len_; | 419 | 4 | } |
_Z3lenIlP6BigStrEiPK4DictIT_T0_E Line | Count | Source | 417 | 2 | inline int len(const Dict<K, V>* d) { | 418 | 2 | return d->len_; | 419 | 2 | } |
_Z3lenIP6BigStriEiPK4DictIT_T0_E Line | Count | Source | 417 | 9 | inline int len(const Dict<K, V>* d) { | 418 | 9 | return d->len_; | 419 | 9 | } |
_Z3lenIP6BigStrS1_EiPK4DictIT_T0_E Line | Count | Source | 417 | 5 | inline int len(const Dict<K, V>* d) { | 418 | 5 | return d->len_; | 419 | 5 | } |
_Z3lenIiiEiPK4DictIT_T0_E Line | Count | Source | 417 | 7 | inline int len(const Dict<K, V>* d) { | 418 | 7 | return d->len_; | 419 | 7 | } |
|
420 | | |
421 | | template <class K, class V> |
422 | | class DictIter { |
423 | | public: |
424 | 4 | explicit DictIter(Dict<K, V>* D) : D_(D), pos_(0) { |
425 | 4 | } _ZN8DictIterIP6BigStriEC2EP4DictIS1_iE Line | Count | Source | 424 | 3 | explicit DictIter(Dict<K, V>* D) : D_(D), pos_(0) { | 425 | 3 | } |
_ZN8DictIterIP6BigStrS1_EC2EP4DictIS1_S1_E Line | Count | Source | 424 | 1 | explicit DictIter(Dict<K, V>* D) : D_(D), pos_(0) { | 425 | 1 | } |
|
426 | 4 | void Next() { |
427 | 4 | pos_++; |
428 | 4 | } _ZN8DictIterIP6BigStriE4NextEv Line | Count | Source | 426 | 4 | void Next() { | 427 | 4 | pos_++; | 428 | 4 | } |
Unexecuted instantiation: _ZN8DictIterIP6BigStrS1_E4NextEv |
429 | 8 | bool Done() { |
430 | 8 | return pos_ == D_->len_; |
431 | 8 | } _ZN8DictIterIP6BigStriE4DoneEv Line | Count | Source | 429 | 7 | bool Done() { | 430 | 7 | return pos_ == D_->len_; | 431 | 7 | } |
_ZN8DictIterIP6BigStrS1_E4DoneEv Line | Count | Source | 429 | 1 | bool Done() { | 430 | 1 | return pos_ == D_->len_; | 431 | 1 | } |
|
432 | 4 | K Key() { |
433 | 4 | return D_->keys_->items_[pos_]; |
434 | 4 | } _ZN8DictIterIP6BigStriE3KeyEv Line | Count | Source | 432 | 4 | K Key() { | 433 | 4 | return D_->keys_->items_[pos_]; | 434 | 4 | } |
Unexecuted instantiation: _ZN8DictIterIP6BigStrS1_E3KeyEv |
435 | 2 | V Value() { |
436 | 2 | return D_->values_->items_[pos_]; |
437 | 2 | } |
438 | | |
439 | | private: |
440 | | const Dict<K, V>* D_; |
441 | | int pos_; |
442 | | }; |
443 | | |
444 | | // dict(mylist) converts a list of (k, v) tuples into a dict |
445 | | template <typename K, typename V> |
446 | 1 | Dict<K, V>* dict(List<Tuple2<K, V>*>* l) { |
447 | 1 | auto ret = Alloc<Dict<K, V>>(); |
448 | 1 | ret->reserve(len(l)); |
449 | 3 | for (ListIter<Tuple2<K, V>*> it(l); !it.Done(); it.Next()) { |
450 | 2 | ret->set(it.Value()->at0(), it.Value()->at1()); |
451 | 2 | } |
452 | 1 | return ret; |
453 | 1 | } |
454 | | |
455 | | template <class K, class V> |
456 | 1 | void Dict<K, V>::update(List<Tuple2<K, V>*>* pairs) { |
457 | 1 | reserve(len_ + len(pairs)); |
458 | 3 | for (ListIter<Tuple2<K, V>*> it(pairs); !it.Done(); it.Next()) { |
459 | 2 | set(it.Value()->at0(), it.Value()->at1()); |
460 | 2 | } |
461 | 1 | } |
462 | | |
463 | | template <class K, class V> |
464 | | void Dict<K, V>::update(Dict<K, V>* other) { |
465 | | reserve(len_ + len(other)); |
466 | | for (DictIter<K, V> it(other); !it.Done(); it.Next()) { |
467 | | set(it.Key(), it.Value()); |
468 | | } |
469 | | } |
470 | | |
471 | | #endif // MYCPP_GC_DICT_H |