1 | /* |
2 | * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP |
26 | #define SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP |
27 | |
28 | #include "memory/allocation.inline.hpp" |
29 | #include "runtime/atomic.hpp" |
30 | #include "runtime/orderAccess.hpp" |
31 | #include "runtime/prefetch.inline.hpp" |
32 | #include "utilities/concurrentHashTable.hpp" |
33 | #include "utilities/globalCounter.inline.hpp" |
34 | #include "utilities/numberSeq.hpp" |
35 | #include "utilities/spinYield.hpp" |
36 | |
37 | // 2^30 = 1G buckets |
38 | #define SIZE_BIG_LOG2 30 |
39 | // 2^5 = 32 buckets |
40 | #define SIZE_SMALL_LOG2 5 |
41 | |
42 | // Number from spinYield.hpp. In some loops SpinYield would be unfair. |
43 | #define SPINPAUSES_PER_YIELD 8192 |
44 | |
45 | #ifdef ASSERT |
46 | #ifdef _LP64 |
47 | // Two low bits are not usable. |
48 | static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac); |
49 | #else |
50 | // Two low bits are not usable. |
51 | static const void* POISON_PTR = (void*)0xffbadbac; |
52 | #endif |
53 | #endif |
54 | |
55 | // Node |
56 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
57 | inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* |
58 | ConcurrentHashTable<VALUE, CONFIG, F>:: |
59 | Node::next() const |
60 | { |
61 | return OrderAccess::load_acquire(&_next); |
62 | } |
63 | |
64 | // Bucket |
65 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
66 | inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* |
67 | ConcurrentHashTable<VALUE, CONFIG, F>:: |
68 | Bucket::first_raw() const |
69 | { |
70 | return OrderAccess::load_acquire(&_first); |
71 | } |
72 | |
73 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
74 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
75 | Bucket::release_assign_node_ptr( |
76 | typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* const volatile * dst, |
77 | typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) const |
78 | { |
79 | // Due to this assert this methods is not static. |
80 | assert(is_locked(), "Must be locked." ); |
81 | Node** tmp = (Node**)dst; |
82 | OrderAccess::release_store(tmp, clear_set_state(node, *dst)); |
83 | } |
84 | |
85 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
86 | inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* |
87 | ConcurrentHashTable<VALUE, CONFIG, F>:: |
88 | Bucket::first() const |
89 | { |
90 | // We strip the states bit before returning the ptr. |
91 | return clear_state(OrderAccess::load_acquire(&_first)); |
92 | } |
93 | |
94 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
95 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
96 | Bucket::have_redirect() const |
97 | { |
98 | return is_state(first_raw(), STATE_REDIRECT_BIT); |
99 | } |
100 | |
101 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
102 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
103 | Bucket::is_locked() const |
104 | { |
105 | return is_state(first_raw(), STATE_LOCK_BIT); |
106 | } |
107 | |
108 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
109 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
110 | Bucket::lock() |
111 | { |
112 | int i = 0; |
113 | // SpinYield would be unfair here |
114 | while (!this->trylock()) { |
115 | if ((++i) == SPINPAUSES_PER_YIELD) { |
116 | // On contemporary OS yielding will give CPU to another runnable thread if |
117 | // there is no CPU available. |
118 | os::naked_yield(); |
119 | i = 0; |
120 | } else { |
121 | SpinPause(); |
122 | } |
123 | } |
124 | } |
125 | |
126 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
127 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
128 | Bucket::release_assign_last_node_next( |
129 | typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node) |
130 | { |
131 | assert(is_locked(), "Must be locked." ); |
132 | Node* const volatile * ret = first_ptr(); |
133 | while (clear_state(*ret) != NULL) { |
134 | ret = clear_state(*ret)->next_ptr(); |
135 | } |
136 | release_assign_node_ptr(ret, node); |
137 | } |
138 | |
139 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
140 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
141 | Bucket::cas_first(typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* node, |
142 | typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* expect |
143 | ) |
144 | { |
145 | if (is_locked()) { |
146 | return false; |
147 | } |
148 | if (Atomic::cmpxchg(node, &_first, expect) == expect) { |
149 | return true; |
150 | } |
151 | return false; |
152 | } |
153 | |
154 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
155 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
156 | Bucket::trylock() |
157 | { |
158 | if (is_locked()) { |
159 | return false; |
160 | } |
161 | // We will expect a clean first pointer. |
162 | Node* tmp = first(); |
163 | if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) { |
164 | return true; |
165 | } |
166 | return false; |
167 | } |
168 | |
169 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
170 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
171 | Bucket::unlock() |
172 | { |
173 | assert(is_locked(), "Must be locked." ); |
174 | assert(!have_redirect(), |
175 | "Unlocking a bucket after it has reached terminal state." ); |
176 | OrderAccess::release_store(&_first, clear_state(first())); |
177 | } |
178 | |
179 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
180 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
181 | Bucket::redirect() |
182 | { |
183 | assert(is_locked(), "Must be locked." ); |
184 | OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT)); |
185 | } |
186 | |
187 | // InternalTable |
188 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
189 | inline ConcurrentHashTable<VALUE, CONFIG, F>:: |
190 | InternalTable::InternalTable(size_t log2_size) |
191 | : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size), |
192 | _hash_mask(~(~((size_t)0) << _log2_size)) |
193 | { |
194 | assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2, |
195 | "Bad size" ); |
196 | _buckets = NEW_C_HEAP_ARRAY(Bucket, _size, F); |
197 | // Use placement new for each element instead of new[] which could use more |
198 | // memory than allocated. |
199 | for (size_t i = 0; i < _size; ++i) { |
200 | new (_buckets + i) Bucket(); |
201 | } |
202 | } |
203 | |
204 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
205 | inline ConcurrentHashTable<VALUE, CONFIG, F>:: |
206 | InternalTable::~InternalTable() |
207 | { |
208 | FREE_C_HEAP_ARRAY(Bucket, _buckets); |
209 | } |
210 | |
211 | // ScopedCS |
212 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
213 | inline ConcurrentHashTable<VALUE, CONFIG, F>:: |
214 | ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* cht) |
215 | : _thread(thread), |
216 | _cht(cht), |
217 | _cs_context(GlobalCounter::critical_section_begin(_thread)) |
218 | { |
219 | // This version is published now. |
220 | if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) { |
221 | OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL); |
222 | } |
223 | } |
224 | |
225 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
226 | inline ConcurrentHashTable<VALUE, CONFIG, F>:: |
227 | ScopedCS::~ScopedCS() |
228 | { |
229 | GlobalCounter::critical_section_end(_thread, _cs_context); |
230 | } |
231 | |
232 | // BaseConfig |
233 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
234 | inline void* ConcurrentHashTable<VALUE, CONFIG, F>:: |
235 | BaseConfig::allocate_node(size_t size, const VALUE& value) |
236 | { |
237 | return AllocateHeap(size, F); |
238 | } |
239 | |
240 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
241 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
242 | BaseConfig::free_node(void* memory, const VALUE& value) |
243 | { |
244 | FreeHeap(memory); |
245 | } |
246 | |
247 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
248 | template <typename LOOKUP_FUNC> |
249 | inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>:: |
250 | MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint) |
251 | { |
252 | return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint); |
253 | } |
254 | |
255 | // HaveDeletables |
256 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
257 | template <typename EVALUATE_FUNC> |
258 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
259 | HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket, |
260 | EVALUATE_FUNC& eval_f, |
261 | Bucket* prefetch_bucket) |
262 | { |
263 | // Instantiated for pointer type (true), so we can use prefetch. |
264 | // When visiting all Nodes doing this prefetch give around 30%. |
265 | Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL; |
266 | for (Node* next = bucket->first(); next != NULL ; next = next->next()) { |
267 | if (pref != NULL) { |
268 | Prefetch::read(*pref->value(), 0); |
269 | pref = pref->next(); |
270 | } |
271 | // Read next() Node* once. May be racing with a thread moving the next |
272 | // pointers. |
273 | Node* next_pref = next->next(); |
274 | if (next_pref != NULL) { |
275 | Prefetch::read(*next_pref->value(), 0); |
276 | } |
277 | if (eval_f(next->value())) { |
278 | return true; |
279 | } |
280 | } |
281 | return false; |
282 | } |
283 | |
284 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
285 | template <bool b, typename EVALUATE_FUNC> |
286 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
287 | HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket, |
288 | EVALUATE_FUNC& eval_f, |
289 | Bucket* preb) |
290 | { |
291 | for (Node* next = bucket->first(); next != NULL ; next = next->next()) { |
292 | if (eval_f(next->value())) { |
293 | return true; |
294 | } |
295 | } |
296 | return false; |
297 | } |
298 | |
299 | // ConcurrentHashTable |
300 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
301 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
302 | write_synchonize_on_visible_epoch(Thread* thread) |
303 | { |
304 | assert(_resize_lock_owner == thread, "Re-size lock not held" ); |
305 | OrderAccess::fence(); // Prevent below load from floating up. |
306 | // If no reader saw this version we can skip write_synchronize. |
307 | if (OrderAccess::load_acquire(&_invisible_epoch) == thread) { |
308 | return; |
309 | } |
310 | assert(_invisible_epoch == NULL, "Two thread doing bulk operations" ); |
311 | // We set this/next version that we are synchronizing for to not published. |
312 | // A reader will zero this flag if it reads this/next version. |
313 | OrderAccess::release_store(&_invisible_epoch, thread); |
314 | GlobalCounter::write_synchronize(); |
315 | } |
316 | |
317 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
318 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
319 | try_resize_lock(Thread* locker) |
320 | { |
321 | if (_resize_lock->try_lock()) { |
322 | if (_resize_lock_owner != NULL) { |
323 | assert(locker != _resize_lock_owner, "Already own lock" ); |
324 | // We got mutex but internal state is locked. |
325 | _resize_lock->unlock(); |
326 | return false; |
327 | } |
328 | } else { |
329 | return false; |
330 | } |
331 | _invisible_epoch = 0; |
332 | _resize_lock_owner = locker; |
333 | return true; |
334 | } |
335 | |
336 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
337 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
338 | lock_resize_lock(Thread* locker) |
339 | { |
340 | size_t i = 0; |
341 | // If lock is hold by some other thread, the chances that it is return quick |
342 | // is low. So we will prefer yielding. |
343 | SpinYield yield(1, 512); |
344 | do { |
345 | _resize_lock->lock_without_safepoint_check(); |
346 | // If holder of lock dropped mutex for safepoint mutex might be unlocked, |
347 | // and _resize_lock_owner will contain the owner. |
348 | if (_resize_lock_owner != NULL) { |
349 | assert(locker != _resize_lock_owner, "Already own lock" ); |
350 | // We got mutex but internal state is locked. |
351 | _resize_lock->unlock(); |
352 | yield.wait(); |
353 | } else { |
354 | break; |
355 | } |
356 | } while(true); |
357 | _resize_lock_owner = locker; |
358 | _invisible_epoch = 0; |
359 | } |
360 | |
361 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
362 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
363 | unlock_resize_lock(Thread* locker) |
364 | { |
365 | _invisible_epoch = 0; |
366 | assert(locker == _resize_lock_owner, "Not unlocked by locker." ); |
367 | _resize_lock_owner = NULL; |
368 | _resize_lock->unlock(); |
369 | } |
370 | |
371 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
372 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
373 | free_nodes() |
374 | { |
375 | // We assume we are not MT during freeing. |
376 | for (size_t node_it = 0; node_it < _table->_size; node_it++) { |
377 | Bucket* bucket = _table->get_buckets() + node_it; |
378 | Node* node = bucket->first(); |
379 | while (node != NULL) { |
380 | Node* free_node = node; |
381 | node = node->next(); |
382 | Node::destroy_node(free_node); |
383 | } |
384 | } |
385 | } |
386 | |
387 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
388 | inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable* |
389 | ConcurrentHashTable<VALUE, CONFIG, F>:: |
390 | get_table() const |
391 | { |
392 | return OrderAccess::load_acquire(&_table); |
393 | } |
394 | |
395 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
396 | inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable* |
397 | ConcurrentHashTable<VALUE, CONFIG, F>:: |
398 | get_new_table() const |
399 | { |
400 | return OrderAccess::load_acquire(&_new_table); |
401 | } |
402 | |
403 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
404 | inline typename ConcurrentHashTable<VALUE, CONFIG, F>::InternalTable* |
405 | ConcurrentHashTable<VALUE, CONFIG, F>:: |
406 | set_table_from_new() |
407 | { |
408 | InternalTable* old_table = _table; |
409 | // Publish the new table. |
410 | OrderAccess::release_store(&_table, _new_table); |
411 | // All must see this. |
412 | GlobalCounter::write_synchronize(); |
413 | // _new_table not read any more. |
414 | _new_table = NULL; |
415 | DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;) |
416 | return old_table; |
417 | } |
418 | |
419 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
420 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
421 | internal_grow_range(Thread* thread, size_t start, size_t stop) |
422 | { |
423 | assert(stop <= _table->_size, "Outside backing array" ); |
424 | assert(_new_table != NULL, "Grow not proper setup before start" ); |
425 | // The state is also copied here. Hence all buckets in new table will be |
426 | // locked. I call the siblings odd/even, where even have high bit 0 and odd |
427 | // have high bit 1. |
428 | for (size_t even_index = start; even_index < stop; even_index++) { |
429 | Bucket* bucket = _table->get_bucket(even_index); |
430 | |
431 | bucket->lock(); |
432 | |
433 | size_t odd_index = even_index + _table->_size; |
434 | _new_table->get_buckets()[even_index] = *bucket; |
435 | _new_table->get_buckets()[odd_index] = *bucket; |
436 | |
437 | // Moves lockers go to new table, where they will wait until unlock() below. |
438 | bucket->redirect(); /* Must release stores above */ |
439 | |
440 | // When this is done we have separated the nodes into corresponding buckets |
441 | // in new table. |
442 | if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) { |
443 | // If bucket is empty, unzip does nothing. |
444 | // We must make sure readers go to new table before we poison the bucket. |
445 | DEBUG_ONLY(GlobalCounter::write_synchronize();) |
446 | } |
447 | |
448 | // Unlock for writes into the new table buckets. |
449 | _new_table->get_bucket(even_index)->unlock(); |
450 | _new_table->get_bucket(odd_index)->unlock(); |
451 | |
452 | DEBUG_ONLY( |
453 | bucket->release_assign_node_ptr( |
454 | _table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR); |
455 | ) |
456 | } |
457 | } |
458 | |
459 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
460 | template <typename LOOKUP_FUNC, typename DELETE_FUNC> |
461 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
462 | internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f) |
463 | { |
464 | Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); |
465 | assert(bucket->is_locked(), "Must be locked." ); |
466 | Node* const volatile * rem_n_prev = bucket->first_ptr(); |
467 | Node* rem_n = bucket->first(); |
468 | bool have_dead = false; |
469 | while (rem_n != NULL) { |
470 | if (lookup_f.equals(rem_n->value(), &have_dead)) { |
471 | bucket->release_assign_node_ptr(rem_n_prev, rem_n->next()); |
472 | break; |
473 | } else { |
474 | rem_n_prev = rem_n->next_ptr(); |
475 | rem_n = rem_n->next(); |
476 | } |
477 | } |
478 | |
479 | bucket->unlock(); |
480 | |
481 | if (rem_n == NULL) { |
482 | return false; |
483 | } |
484 | // Publish the deletion. |
485 | GlobalCounter::write_synchronize(); |
486 | delete_f(rem_n->value()); |
487 | Node::destroy_node(rem_n); |
488 | JFR_ONLY(_stats_rate.remove();) |
489 | return true; |
490 | } |
491 | |
492 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
493 | template <typename EVALUATE_FUNC, typename DELETE_FUNC> |
494 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
495 | do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx, |
496 | EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt) |
497 | { |
498 | // Here we have resize lock so table is SMR safe, and there is no new |
499 | // table. Can do this in parallel if we want. |
500 | assert((is_mt && _resize_lock_owner != NULL) || |
501 | (!is_mt && _resize_lock_owner == thread), "Re-size lock not held" ); |
502 | Node* ndel[BULK_DELETE_LIMIT]; |
503 | InternalTable* table = get_table(); |
504 | assert(start_idx < stop_idx, "Must be" ); |
505 | assert(stop_idx <= _table->_size, "Must be" ); |
506 | // Here manual do critical section since we don't want to take the cost of |
507 | // locking the bucket if there is nothing to delete. But we can have |
508 | // concurrent single deletes. The _invisible_epoch can only be used by the |
509 | // owner of _resize_lock, us here. There we should not changed it in our |
510 | // own read-side. |
511 | GlobalCounter::CSContext cs_context = GlobalCounter::critical_section_begin(thread); |
512 | for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) { |
513 | Bucket* bucket = table->get_bucket(bucket_it); |
514 | Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ? |
515 | table->get_bucket(bucket_it+1) : NULL; |
516 | |
517 | if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>:: |
518 | have_deletable(bucket, eval_f, prefetch_bucket)) { |
519 | // Nothing to remove in this bucket. |
520 | continue; |
521 | } |
522 | |
523 | GlobalCounter::critical_section_end(thread, cs_context); |
524 | // We left critical section but the bucket cannot be removed while we hold |
525 | // the _resize_lock. |
526 | bucket->lock(); |
527 | size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel); |
528 | bucket->unlock(); |
529 | if (is_mt) { |
530 | GlobalCounter::write_synchronize(); |
531 | } else { |
532 | write_synchonize_on_visible_epoch(thread); |
533 | } |
534 | for (size_t node_it = 0; node_it < nd; node_it++) { |
535 | del_f(ndel[node_it]->value()); |
536 | Node::destroy_node(ndel[node_it]); |
537 | JFR_ONLY(_stats_rate.remove();) |
538 | DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;) |
539 | } |
540 | cs_context = GlobalCounter::critical_section_begin(thread); |
541 | } |
542 | GlobalCounter::critical_section_end(thread, cs_context); |
543 | } |
544 | |
545 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
546 | template <typename LOOKUP_FUNC> |
547 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
548 | delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f) |
549 | { |
550 | assert(bucket->is_locked(), "Must be locked." ); |
551 | |
552 | size_t dels = 0; |
553 | Node* ndel[BULK_DELETE_LIMIT]; |
554 | Node* const volatile * rem_n_prev = bucket->first_ptr(); |
555 | Node* rem_n = bucket->first(); |
556 | while (rem_n != NULL) { |
557 | bool is_dead = false; |
558 | lookup_f.equals(rem_n->value(), &is_dead); |
559 | if (is_dead) { |
560 | ndel[dels++] = rem_n; |
561 | Node* next_node = rem_n->next(); |
562 | bucket->release_assign_node_ptr(rem_n_prev, next_node); |
563 | rem_n = next_node; |
564 | if (dels == BULK_DELETE_LIMIT) { |
565 | break; |
566 | } |
567 | } else { |
568 | rem_n_prev = rem_n->next_ptr(); |
569 | rem_n = rem_n->next(); |
570 | } |
571 | } |
572 | if (dels > 0) { |
573 | GlobalCounter::write_synchronize(); |
574 | for (size_t node_it = 0; node_it < dels; node_it++) { |
575 | Node::destroy_node(ndel[node_it]); |
576 | JFR_ONLY(_stats_rate.remove();) |
577 | DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;) |
578 | } |
579 | } |
580 | } |
581 | |
582 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
583 | inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket* |
584 | ConcurrentHashTable<VALUE, CONFIG, F>:: |
585 | get_bucket(uintx hash) const |
586 | { |
587 | InternalTable* table = get_table(); |
588 | Bucket* bucket = get_bucket_in(table, hash); |
589 | if (bucket->have_redirect()) { |
590 | table = get_new_table(); |
591 | bucket = get_bucket_in(table, hash); |
592 | } |
593 | return bucket; |
594 | } |
595 | |
596 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
597 | inline typename ConcurrentHashTable<VALUE, CONFIG, F>::Bucket* |
598 | ConcurrentHashTable<VALUE, CONFIG, F>:: |
599 | get_bucket_locked(Thread* thread, const uintx hash) |
600 | { |
601 | Bucket* bucket; |
602 | int i = 0; |
603 | // SpinYield would be unfair here |
604 | while(true) { |
605 | { |
606 | // We need a critical section to protect the table itself. But if we fail |
607 | // we must leave critical section otherwise we would deadlock. |
608 | ScopedCS cs(thread, this); |
609 | bucket = get_bucket(hash); |
610 | if (bucket->trylock()) { |
611 | break; /* ends critical section */ |
612 | } |
613 | } /* ends critical section */ |
614 | if ((++i) == SPINPAUSES_PER_YIELD) { |
615 | // On contemporary OS yielding will give CPU to another runnable thread if |
616 | // there is no CPU available. |
617 | os::naked_yield(); |
618 | i = 0; |
619 | } else { |
620 | SpinPause(); |
621 | } |
622 | } |
623 | return bucket; |
624 | } |
625 | |
626 | // Always called within critical section |
627 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
628 | template <typename LOOKUP_FUNC> |
629 | typename ConcurrentHashTable<VALUE, CONFIG, F>::Node* |
630 | ConcurrentHashTable<VALUE, CONFIG, F>:: |
631 | get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f, |
632 | bool* have_dead, size_t* loops) const |
633 | { |
634 | size_t loop_count = 0; |
635 | Node* node = bucket->first(); |
636 | while (node != NULL) { |
637 | bool is_dead = false; |
638 | ++loop_count; |
639 | if (lookup_f.equals(node->value(), &is_dead)) { |
640 | break; |
641 | } |
642 | if (is_dead && !(*have_dead)) { |
643 | *have_dead = true; |
644 | } |
645 | node = node->next(); |
646 | } |
647 | if (loops != NULL) { |
648 | *loops = loop_count; |
649 | } |
650 | return node; |
651 | } |
652 | |
653 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
654 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
655 | unzip_bucket(Thread* thread, InternalTable* old_table, |
656 | InternalTable* new_table, size_t even_index, size_t odd_index) |
657 | { |
658 | Node* aux = old_table->get_bucket(even_index)->first(); |
659 | if (aux == NULL) { |
660 | // This is an empty bucket and in debug we poison first ptr in bucket. |
661 | // Therefore we must make sure no readers are looking at this bucket. |
662 | // If we don't do a write_synch here, caller must do it. |
663 | return false; |
664 | } |
665 | Node* delete_me = NULL; |
666 | Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr(); |
667 | Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr(); |
668 | while (aux != NULL) { |
669 | bool dead_hash = false; |
670 | size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash); |
671 | Node* aux_next = aux->next(); |
672 | if (dead_hash) { |
673 | delete_me = aux; |
674 | // This item is dead, move both list to next |
675 | new_table->get_bucket(odd_index)->release_assign_node_ptr(odd, |
676 | aux_next); |
677 | new_table->get_bucket(even_index)->release_assign_node_ptr(even, |
678 | aux_next); |
679 | } else { |
680 | size_t aux_index = bucket_idx_hash(new_table, aux_hash); |
681 | if (aux_index == even_index) { |
682 | // This is a even, so move odd to aux/even next |
683 | new_table->get_bucket(odd_index)->release_assign_node_ptr(odd, |
684 | aux_next); |
685 | // Keep in even list |
686 | even = aux->next_ptr(); |
687 | } else if (aux_index == odd_index) { |
688 | // This is a odd, so move odd to aux/odd next |
689 | new_table->get_bucket(even_index)->release_assign_node_ptr(even, |
690 | aux_next); |
691 | // Keep in odd list |
692 | odd = aux->next_ptr(); |
693 | } else { |
694 | fatal("aux_index does not match even or odd indices" ); |
695 | } |
696 | } |
697 | aux = aux_next; |
698 | |
699 | // We can only move 1 pointer otherwise a reader might be moved to the wrong |
700 | // chain. E.g. looking for even hash value but got moved to the odd bucket |
701 | // chain. |
702 | write_synchonize_on_visible_epoch(thread); |
703 | if (delete_me != NULL) { |
704 | Node::destroy_node(delete_me); |
705 | delete_me = NULL; |
706 | } |
707 | } |
708 | return true; |
709 | } |
710 | |
711 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
712 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
713 | internal_shrink_prolog(Thread* thread, size_t log2_size) |
714 | { |
715 | if (!try_resize_lock(thread)) { |
716 | return false; |
717 | } |
718 | assert(_resize_lock_owner == thread, "Re-size lock not held" ); |
719 | if (_table->_log2_size == _log2_start_size || |
720 | _table->_log2_size <= log2_size) { |
721 | unlock_resize_lock(thread); |
722 | return false; |
723 | } |
724 | _new_table = new InternalTable(_table->_log2_size - 1); |
725 | return true; |
726 | } |
727 | |
728 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
729 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
730 | internal_shrink_epilog(Thread* thread) |
731 | { |
732 | assert(_resize_lock_owner == thread, "Re-size lock not held" ); |
733 | |
734 | InternalTable* old_table = set_table_from_new(); |
735 | _size_limit_reached = false; |
736 | unlock_resize_lock(thread); |
737 | #ifdef ASSERT |
738 | for (size_t i = 0; i < old_table->_size; i++) { |
739 | assert(old_table->get_bucket(i++)->first() == POISON_PTR, |
740 | "No poison found" ); |
741 | } |
742 | #endif |
743 | // ABA safe, old_table not visible to any other threads. |
744 | delete old_table; |
745 | } |
746 | |
747 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
748 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
749 | internal_shrink_range(Thread* thread, size_t start, size_t stop) |
750 | { |
751 | // The state is also copied here. |
752 | // Hence all buckets in new table will be locked. |
753 | for (size_t bucket_it = start; bucket_it < stop; bucket_it++) { |
754 | size_t even_hash_index = bucket_it; // High bit 0 |
755 | size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1 |
756 | |
757 | Bucket* b_old_even = _table->get_bucket(even_hash_index); |
758 | Bucket* b_old_odd = _table->get_bucket(odd_hash_index); |
759 | |
760 | b_old_even->lock(); |
761 | b_old_odd->lock(); |
762 | |
763 | _new_table->get_buckets()[bucket_it] = *b_old_even; |
764 | |
765 | // Put chains together. |
766 | _new_table->get_bucket(bucket_it)-> |
767 | release_assign_last_node_next(*(b_old_odd->first_ptr())); |
768 | |
769 | b_old_even->redirect(); |
770 | b_old_odd->redirect(); |
771 | |
772 | write_synchonize_on_visible_epoch(thread); |
773 | |
774 | // Unlock for writes into new smaller table. |
775 | _new_table->get_bucket(bucket_it)->unlock(); |
776 | |
777 | DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(), |
778 | (Node*)POISON_PTR);) |
779 | DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(), |
780 | (Node*)POISON_PTR);) |
781 | } |
782 | } |
783 | |
784 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
785 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
786 | internal_shrink(Thread* thread, size_t log2_size) |
787 | { |
788 | if (!internal_shrink_prolog(thread, log2_size)) { |
789 | assert(_resize_lock_owner != thread, "Re-size lock held" ); |
790 | return false; |
791 | } |
792 | assert(_resize_lock_owner == thread, "Should be locked by me" ); |
793 | internal_shrink_range(thread, 0, _new_table->_size); |
794 | internal_shrink_epilog(thread); |
795 | assert(_resize_lock_owner != thread, "Re-size lock held" ); |
796 | return true; |
797 | } |
798 | |
799 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
800 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
801 | internal_grow_prolog(Thread* thread, size_t log2_size) |
802 | { |
803 | // This double checking of _size_limit_reached/is_max_size_reached() |
804 | // we only do in grow path, since grow means high load on table |
805 | // while shrink means low load. |
806 | if (is_max_size_reached()) { |
807 | return false; |
808 | } |
809 | if (!try_resize_lock(thread)) { |
810 | // Either we have an ongoing resize or an operation which doesn't want us |
811 | // to resize now. |
812 | return false; |
813 | } |
814 | if (is_max_size_reached() || _table->_log2_size >= log2_size) { |
815 | unlock_resize_lock(thread); |
816 | return false; |
817 | } |
818 | |
819 | _new_table = new InternalTable(_table->_log2_size + 1); |
820 | |
821 | if (_new_table->_log2_size == _log2_size_limit) { |
822 | _size_limit_reached = true; |
823 | } |
824 | |
825 | return true; |
826 | } |
827 | |
828 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
829 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
830 | internal_grow_epilog(Thread* thread) |
831 | { |
832 | assert(_resize_lock_owner == thread, "Should be locked" ); |
833 | |
834 | InternalTable* old_table = set_table_from_new(); |
835 | unlock_resize_lock(thread); |
836 | #ifdef ASSERT |
837 | for (size_t i = 0; i < old_table->_size; i++) { |
838 | assert(old_table->get_bucket(i++)->first() == POISON_PTR, |
839 | "No poison found" ); |
840 | } |
841 | #endif |
842 | // ABA safe, old_table not visible to any other threads. |
843 | delete old_table; |
844 | } |
845 | |
846 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
847 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
848 | internal_grow(Thread* thread, size_t log2_size) |
849 | { |
850 | if (!internal_grow_prolog(thread, log2_size)) { |
851 | assert(_resize_lock_owner != thread, "Re-size lock held" ); |
852 | return false; |
853 | } |
854 | assert(_resize_lock_owner == thread, "Should be locked by me" ); |
855 | internal_grow_range(thread, 0, _table->_size); |
856 | internal_grow_epilog(thread); |
857 | assert(_resize_lock_owner != thread, "Re-size lock held" ); |
858 | return true; |
859 | } |
860 | |
861 | // Always called within critical section |
862 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
863 | template <typename LOOKUP_FUNC> |
864 | inline VALUE* ConcurrentHashTable<VALUE, CONFIG, F>:: |
865 | internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint) |
866 | { |
867 | bool clean = false; |
868 | size_t loops = 0; |
869 | VALUE* ret = NULL; |
870 | |
871 | const Bucket* bucket = get_bucket(lookup_f.get_hash()); |
872 | Node* node = get_node(bucket, lookup_f, &clean, &loops); |
873 | if (node != NULL) { |
874 | ret = node->value(); |
875 | } |
876 | if (grow_hint != NULL) { |
877 | *grow_hint = loops > _grow_hint; |
878 | } |
879 | |
880 | return ret; |
881 | } |
882 | |
883 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
884 | template <typename LOOKUP_FUNC> |
885 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
886 | internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value, |
887 | bool* grow_hint, bool* clean_hint) |
888 | { |
889 | bool ret = false; |
890 | bool clean = false; |
891 | bool locked; |
892 | size_t loops = 0; |
893 | size_t i = 0; |
894 | uintx hash = lookup_f.get_hash(); |
895 | Node* new_node = Node::create_node(value, NULL); |
896 | |
897 | while (true) { |
898 | { |
899 | ScopedCS cs(thread, this); /* protected the table/bucket */ |
900 | Bucket* bucket = get_bucket(hash); |
901 | Node* first_at_start = bucket->first(); |
902 | Node* old = get_node(bucket, lookup_f, &clean, &loops); |
903 | if (old == NULL) { |
904 | new_node->set_next(first_at_start); |
905 | if (bucket->cas_first(new_node, first_at_start)) { |
906 | JFR_ONLY(_stats_rate.add();) |
907 | new_node = NULL; |
908 | ret = true; |
909 | break; /* leave critical section */ |
910 | } |
911 | // CAS failed we must leave critical section and retry. |
912 | locked = bucket->is_locked(); |
913 | } else { |
914 | // There is a duplicate. |
915 | break; /* leave critical section */ |
916 | } |
917 | } /* leave critical section */ |
918 | i++; |
919 | if (locked) { |
920 | os::naked_yield(); |
921 | } else { |
922 | SpinPause(); |
923 | } |
924 | } |
925 | |
926 | if (new_node != NULL) { |
927 | // CAS failed and a duplicate was inserted, we must free this node. |
928 | Node::destroy_node(new_node); |
929 | } else if (i == 0 && clean) { |
930 | // We only do cleaning on fast inserts. |
931 | Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); |
932 | delete_in_bucket(thread, bucket, lookup_f); |
933 | bucket->unlock(); |
934 | clean = false; |
935 | } |
936 | |
937 | if (grow_hint != NULL) { |
938 | *grow_hint = loops > _grow_hint; |
939 | } |
940 | |
941 | if (clean_hint != NULL) { |
942 | *clean_hint = clean; |
943 | } |
944 | |
945 | return ret; |
946 | } |
947 | |
948 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
949 | template <typename FUNC> |
950 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
951 | visit_nodes(Bucket* bucket, FUNC& visitor_f) |
952 | { |
953 | Node* current_node = bucket->first(); |
954 | while (current_node != NULL) { |
955 | if (!visitor_f(current_node->value())) { |
956 | return false; |
957 | } |
958 | current_node = current_node->next(); |
959 | } |
960 | return true; |
961 | } |
962 | |
963 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
964 | template <typename FUNC> |
965 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
966 | do_scan_locked(Thread* thread, FUNC& scan_f) |
967 | { |
968 | assert(_resize_lock_owner == thread, "Re-size lock not held" ); |
969 | // We can do a critical section over the entire loop but that would block |
970 | // updates for a long time. Instead we choose to block resizes. |
971 | InternalTable* table = get_table(); |
972 | for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { |
973 | ScopedCS cs(thread, this); |
974 | if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) { |
975 | break; /* ends critical section */ |
976 | } |
977 | } /* ends critical section */ |
978 | } |
979 | |
980 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
981 | template <typename EVALUATE_FUNC> |
982 | inline size_t ConcurrentHashTable<VALUE, CONFIG, F>:: |
983 | delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f, |
984 | size_t num_del, Node** ndel) |
985 | { |
986 | size_t dels = 0; |
987 | Node* const volatile * rem_n_prev = bucket->first_ptr(); |
988 | Node* rem_n = bucket->first(); |
989 | while (rem_n != NULL) { |
990 | if (eval_f(rem_n->value())) { |
991 | ndel[dels++] = rem_n; |
992 | Node* next_node = rem_n->next(); |
993 | bucket->release_assign_node_ptr(rem_n_prev, next_node); |
994 | rem_n = next_node; |
995 | if (dels == num_del) { |
996 | break; |
997 | } |
998 | } else { |
999 | rem_n_prev = rem_n->next_ptr(); |
1000 | rem_n = rem_n->next(); |
1001 | } |
1002 | } |
1003 | return dels; |
1004 | } |
1005 | |
1006 | // Constructor |
1007 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1008 | inline ConcurrentHashTable<VALUE, CONFIG, F>:: |
1009 | ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint) |
1010 | : _new_table(NULL), _log2_size_limit(log2size_limit), |
1011 | _log2_start_size(log2size), _grow_hint(grow_hint), |
1012 | _size_limit_reached(false), _resize_lock_owner(NULL), |
1013 | _invisible_epoch(0) |
1014 | { |
1015 | _stats_rate = TableRateStatistics(); |
1016 | _resize_lock = |
1017 | new Mutex(Mutex::leaf, "ConcurrentHashTable" , false, |
1018 | Monitor::_safepoint_check_never); |
1019 | _table = new InternalTable(log2size); |
1020 | assert(log2size_limit >= log2size, "bad ergo" ); |
1021 | _size_limit_reached = _table->_log2_size == _log2_size_limit; |
1022 | } |
1023 | |
1024 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1025 | inline ConcurrentHashTable<VALUE, CONFIG, F>:: |
1026 | ~ConcurrentHashTable() |
1027 | { |
1028 | delete _resize_lock; |
1029 | free_nodes(); |
1030 | delete _table; |
1031 | } |
1032 | |
1033 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1034 | inline size_t ConcurrentHashTable<VALUE, CONFIG, F>:: |
1035 | get_size_log2(Thread* thread) |
1036 | { |
1037 | ScopedCS cs(thread, this); |
1038 | return _table->_log2_size; |
1039 | } |
1040 | |
1041 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1042 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
1043 | shrink(Thread* thread, size_t size_limit_log2) |
1044 | { |
1045 | size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2; |
1046 | bool ret = internal_shrink(thread, tmp); |
1047 | return ret; |
1048 | } |
1049 | |
1050 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1051 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
1052 | grow(Thread* thread, size_t size_limit_log2) |
1053 | { |
1054 | size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2; |
1055 | return internal_grow(thread, tmp); |
1056 | } |
1057 | |
1058 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1059 | template <typename LOOKUP_FUNC, typename FOUND_FUNC> |
1060 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
1061 | get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint) |
1062 | { |
1063 | bool ret = false; |
1064 | ScopedCS cs(thread, this); |
1065 | VALUE* val = internal_get(thread, lookup_f, grow_hint); |
1066 | if (val != NULL) { |
1067 | found_f(val); |
1068 | ret = true; |
1069 | } |
1070 | return ret; |
1071 | } |
1072 | |
1073 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1074 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
1075 | unsafe_insert(const VALUE& value) { |
1076 | bool dead_hash = false; |
1077 | size_t hash = CONFIG::get_hash(value, &dead_hash); |
1078 | if (dead_hash) { |
1079 | return false; |
1080 | } |
1081 | // This is an unsafe operation. |
1082 | InternalTable* table = get_table(); |
1083 | Bucket* bucket = get_bucket_in(table, hash); |
1084 | assert(!bucket->have_redirect() && !bucket->is_locked(), "bad" ); |
1085 | Node* new_node = Node::create_node(value, bucket->first()); |
1086 | if (!bucket->cas_first(new_node, bucket->first())) { |
1087 | assert(false, "bad" ); |
1088 | } |
1089 | JFR_ONLY(_stats_rate.add();) |
1090 | return true; |
1091 | } |
1092 | |
1093 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1094 | template <typename SCAN_FUNC> |
1095 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
1096 | try_scan(Thread* thread, SCAN_FUNC& scan_f) |
1097 | { |
1098 | if (!try_resize_lock(thread)) { |
1099 | return false; |
1100 | } |
1101 | do_scan_locked(thread, scan_f); |
1102 | unlock_resize_lock(thread); |
1103 | return true; |
1104 | } |
1105 | |
1106 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1107 | template <typename SCAN_FUNC> |
1108 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
1109 | do_scan(Thread* thread, SCAN_FUNC& scan_f) |
1110 | { |
1111 | assert(!SafepointSynchronize::is_at_safepoint(), |
1112 | "must be outside a safepoint" ); |
1113 | assert(_resize_lock_owner != thread, "Re-size lock held" ); |
1114 | lock_resize_lock(thread); |
1115 | do_scan_locked(thread, scan_f); |
1116 | unlock_resize_lock(thread); |
1117 | assert(_resize_lock_owner != thread, "Re-size lock held" ); |
1118 | } |
1119 | |
1120 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1121 | template <typename SCAN_FUNC> |
1122 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
1123 | do_safepoint_scan(SCAN_FUNC& scan_f) |
1124 | { |
1125 | // We only allow this method to be used during a safepoint. |
1126 | assert(SafepointSynchronize::is_at_safepoint(), |
1127 | "must only be called in a safepoint" ); |
1128 | assert(Thread::current()->is_VM_thread(), |
1129 | "should be in vm thread" ); |
1130 | |
1131 | // Here we skip protection, |
1132 | // thus no other thread may use this table at the same time. |
1133 | InternalTable* table = get_table(); |
1134 | for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { |
1135 | Bucket* bucket = table->get_bucket(bucket_it); |
1136 | // If bucket have a redirect the items will be in the new table. |
1137 | // We must visit them there since the new table will contain any |
1138 | // concurrent inserts done after this bucket was resized. |
1139 | // If the bucket don't have redirect flag all items is in this table. |
1140 | if (!bucket->have_redirect()) { |
1141 | if(!visit_nodes(bucket, scan_f)) { |
1142 | return; |
1143 | } |
1144 | } else { |
1145 | assert(bucket->is_locked(), "Bucket must be locked." ); |
1146 | } |
1147 | } |
1148 | // If there is a paused resize we also need to visit the already resized items. |
1149 | table = get_new_table(); |
1150 | if (table == NULL) { |
1151 | return; |
1152 | } |
1153 | DEBUG_ONLY(if (table == POISON_PTR) { return; }) |
1154 | for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { |
1155 | Bucket* bucket = table->get_bucket(bucket_it); |
1156 | assert(!bucket->is_locked(), "Bucket must be unlocked." ); |
1157 | if (!visit_nodes(bucket, scan_f)) { |
1158 | return; |
1159 | } |
1160 | } |
1161 | } |
1162 | |
1163 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1164 | template <typename EVALUATE_FUNC, typename DELETE_FUNC> |
1165 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
1166 | try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) |
1167 | { |
1168 | if (!try_resize_lock(thread)) { |
1169 | return false; |
1170 | } |
1171 | do_bulk_delete_locked(thread, eval_f, del_f); |
1172 | unlock_resize_lock(thread); |
1173 | assert(_resize_lock_owner != thread, "Re-size lock held" ); |
1174 | return true; |
1175 | } |
1176 | |
1177 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1178 | template <typename EVALUATE_FUNC, typename DELETE_FUNC> |
1179 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
1180 | bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) |
1181 | { |
1182 | assert(!SafepointSynchronize::is_at_safepoint(), |
1183 | "must be outside a safepoint" ); |
1184 | lock_resize_lock(thread); |
1185 | do_bulk_delete_locked(thread, eval_f, del_f); |
1186 | unlock_resize_lock(thread); |
1187 | } |
1188 | |
1189 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1190 | template <typename VALUE_SIZE_FUNC> |
1191 | inline TableStatistics ConcurrentHashTable<VALUE, CONFIG, F>:: |
1192 | statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f) |
1193 | { |
1194 | NumberSeq summary; |
1195 | size_t literal_bytes = 0; |
1196 | InternalTable* table = get_table(); |
1197 | for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { |
1198 | ScopedCS cs(thread, this); |
1199 | size_t count = 0; |
1200 | Bucket* bucket = table->get_bucket(bucket_it); |
1201 | if (bucket->have_redirect() || bucket->is_locked()) { |
1202 | continue; |
1203 | } |
1204 | Node* current_node = bucket->first(); |
1205 | while (current_node != NULL) { |
1206 | ++count; |
1207 | literal_bytes += vs_f(current_node->value()); |
1208 | current_node = current_node->next(); |
1209 | } |
1210 | summary.add((double)count); |
1211 | } |
1212 | |
1213 | return TableStatistics(_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node)); |
1214 | } |
1215 | |
1216 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1217 | template <typename VALUE_SIZE_FUNC> |
1218 | inline TableStatistics ConcurrentHashTable<VALUE, CONFIG, F>:: |
1219 | statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old) |
1220 | { |
1221 | if (!try_resize_lock(thread)) { |
1222 | return old; |
1223 | } |
1224 | |
1225 | TableStatistics ts = statistics_calculate(thread, vs_f); |
1226 | unlock_resize_lock(thread); |
1227 | |
1228 | return ts; |
1229 | } |
1230 | |
1231 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1232 | template <typename VALUE_SIZE_FUNC> |
1233 | inline void ConcurrentHashTable<VALUE, CONFIG, F>:: |
1234 | statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f, |
1235 | outputStream* st, const char* table_name) |
1236 | { |
1237 | if (!try_resize_lock(thread)) { |
1238 | st->print_cr("statistics unavailable at this moment" ); |
1239 | return; |
1240 | } |
1241 | |
1242 | TableStatistics ts = statistics_calculate(thread, vs_f); |
1243 | unlock_resize_lock(thread); |
1244 | |
1245 | ts.print(st, table_name); |
1246 | } |
1247 | |
1248 | template <typename VALUE, typename CONFIG, MEMFLAGS F> |
1249 | inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: |
1250 | try_move_nodes_to(Thread* thread, ConcurrentHashTable<VALUE, CONFIG, F>* to_cht) |
1251 | { |
1252 | if (!try_resize_lock(thread)) { |
1253 | return false; |
1254 | } |
1255 | assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL" ); |
1256 | for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) { |
1257 | Bucket* bucket = _table->get_bucket(bucket_it); |
1258 | assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended" ); |
1259 | while (bucket->first() != NULL) { |
1260 | Node* move_node = bucket->first(); |
1261 | bool ok = bucket->cas_first(move_node->next(), move_node); |
1262 | assert(ok, "Uncontended cas must work" ); |
1263 | bool dead_hash = false; |
1264 | size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash); |
1265 | if (!dead_hash) { |
1266 | Bucket* insert_bucket = to_cht->get_bucket(insert_hash); |
1267 | assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present" ); |
1268 | move_node->set_next(insert_bucket->first()); |
1269 | ok = insert_bucket->cas_first(move_node, insert_bucket->first()); |
1270 | assert(ok, "Uncontended cas must work" ); |
1271 | } |
1272 | } |
1273 | } |
1274 | unlock_resize_lock(thread); |
1275 | return true; |
1276 | } |
1277 | |
1278 | #endif // SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP |
1279 | |