@@ -82,7 +82,7 @@ class bit_packed_atomic_flags
8282 std::swap (mem, d.mem );
8383 }
8484
85- /* * bit_set sets an entry as discardable.
85+ /* * bit_set sets an entry as discardable.
8686 *
8787 * @param s the index of the entry to bit_set.
8888 * @post immediately subsequent call (assuming proper external memory
@@ -94,7 +94,7 @@ class bit_packed_atomic_flags
9494 mem[s >> 3 ].fetch_or (1 << (s & 7 ), std::memory_order_relaxed);
9595 }
9696
97- /* * bit_unset marks an entry as something that should not be overwritten
97+ /* * bit_unset marks an entry as something that should not be overwritten
9898 *
9999 * @param s the index of the entry to bit_unset.
100100 * @post immediately subsequent call (assuming proper external memory
@@ -118,21 +118,22 @@ class bit_packed_atomic_flags
118118
119119/* * cache implements a cache with properties similar to a cuckoo-set
120120 *
121- * The cache is able to hold up to (~(uint32_t) 1) elements.
121+ * The cache is able to hold up to (~(uint32_t)0) - 1 elements.
122122 *
123123 * Read Operations:
124124 * - contains(*, false)
125125 *
126- * Read/ Erase Operations:
126+ * Read+ Erase Operations:
127127 * - contains(*, true)
128+ *
129+ * Erase Operations:
128130 * - allow_erase()
129131 *
130132 * Write Operations:
131133 * - setup()
132134 * - setup_bytes()
133135 * - insert()
134136 * - please_keep()
135- * - rehash()
136137 *
137138 * Synchronization Free Operations:
138139 * - invalid()
@@ -143,17 +144,17 @@ class bit_packed_atomic_flags
143144 * 1) Write Requires synchronized access (e.g., a lock)
144145 * 2) Read Requires no concurrent Write, synchronized with the last insert.
145146 * 3) Erase requires no concurrent Write, synchronized with last insert.
146- * 4) An Eraser must release all Erases before allowing a new Writer.
147+ * 4) An Erase caller must release all memory before allowing a new Writer.
147148 *
148149 *
149150 * Note on function names:
150151 * - The name "allow_erase" is used because the real discard happens later.
151152 * - The name "please_keep" is used because elements may be erased anyways on insert.
152153 *
153- * @tparam Element should be a POD type that is 32-alignable
154+ * @tparam Element should be a movable and copyable type
154155 * @tparam Hash should be a function/callable which takes a template parameter
155156 * hash_select and an Element and extracts a hash from it. Should return
156- * high-entropy hashes for `Hash h; h<0>(e) and h<1 >(e)`.
157+ * high-entropy hashes for `Hash h; h<0>(e) ... h<7 >(e)`.
157158 */
158159template <typename Element, typename Hash>
159160class cache
@@ -175,11 +176,10 @@ class cache
175176 */
176177 mutable std::vector<bool > epoch_flags;
177178
178- /* * epoch_heuristic_counter is used to determine when a epoch
179- * might be aged & an expensive scan should be done.
180- * epoch_heuristic_counter is incremented on insert and reset to the
181- * new number of inserts which would cause the epoch to reach
182- * epoch_size when it reaches zero.
179+ /* * epoch_heuristic_counter is used to determine when a epoch might be aged
180+ * & an expensive scan should be done. epoch_heuristic_counter is
181+ * decremented on insert and reset to the new number of inserts which would
182+ * cause the epoch to reach epoch_size when it reaches zero.
183183 */
184184 uint32_t epoch_heuristic_counter;
185185
@@ -195,7 +195,7 @@ class cache
195195
196196 /* * hash_mask should be set to appropriately mask out a hash such that every
197197 * masked hash is [0,size), eg, if floor(log2(size)) == 20, then hash_mask
198- * should be (1<<20)-1
198+ * should be (1<<20)-1
199199 */
200200 uint32_t hash_mask;
201201
@@ -232,7 +232,7 @@ class cache
232232 * @returns a constexpr index that can never be inserted to */
233233 constexpr uint32_t invalid () const
234234 {
235- return ~(uint32_t )1 ;
235+ return ~(uint32_t )0 ;
236236 }
237237
238238 /* * allow_erase marks the element at index n as discardable. Threadsafe
@@ -264,8 +264,10 @@ class cache
264264 */
265265 void epoch_check ()
266266 {
267- if ((--epoch_heuristic_counter) != 0 )
267+ if (epoch_heuristic_counter != 0 ) {
268+ --epoch_heuristic_counter;
268269 return ;
270+ }
269271 // count the number of elements from the latest epoch which
270272 // have not been erased.
271273 uint32_t epoch_unused_count = 0 ;
@@ -287,8 +289,11 @@ class cache
287289 // reset the epoch_heuristic_counter to next do a scan when worst
288290 // case behavior (no intermittent erases) would exceed epoch size,
289291 // with a reasonable minimum scan size.
292+ // Ordinarily, we would have to sanity check std::min(epoch_size,
293+ // epoch_unused_count), but we already know that `epoch_unused_count
294+ // < epoch_size` in this branch
290295 epoch_heuristic_counter = std::max (1u , std::max (epoch_size / 16 ,
291- epoch_size - std::min (epoch_size, epoch_unused_count) ));
296+ epoch_size - epoch_unused_count));
292297 }
293298
294299public:
@@ -343,7 +348,7 @@ class cache
343348
344349 /* * insert loops at most depth_limit times trying to insert a hash
345350 * at various locations in the table via a variant of the Cuckoo Algorithm
346- * with two hash locations.
351+ * with eight hash locations.
347352 *
348353 * It drops the last tried element if it runs out of depth before
349354 * encountering an open slot.
0 commit comments