1/*
2 * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_UTILITIES_CONCURRENTHASHTABLETASKS_INLINE_HPP
26#define SHARE_UTILITIES_CONCURRENTHASHTABLETASKS_INLINE_HPP
27
28#include "utilities/globalDefinitions.hpp"
29#include "utilities/concurrentHashTable.inline.hpp"
30
31// This inline file contains BulkDeleteTask and GrowTasks which are both bucket
32// operations, which they are serialized with each other.
33
34// Base class for pause and/or parallel bulk operations.
35template <typename VALUE, typename CONFIG, MEMFLAGS F>
36class ConcurrentHashTable<VALUE, CONFIG, F>::BucketsOperation {
37 protected:
38 ConcurrentHashTable<VALUE, CONFIG, F>* _cht;
39
40 // Default size of _task_size_log2
41 static const size_t DEFAULT_TASK_SIZE_LOG2 = 12;
42
43 // The table is split into ranges, every increment is one range.
44 volatile size_t _next_to_claim;
45 size_t _task_size_log2; // Number of buckets.
46 size_t _stop_task; // Last task
47 size_t _size_log2; // Table size.
48 bool _is_mt;
49
50 BucketsOperation(ConcurrentHashTable<VALUE, CONFIG, F>* cht, bool is_mt = false)
51 : _cht(cht), _next_to_claim(0), _task_size_log2(DEFAULT_TASK_SIZE_LOG2),
52 _stop_task(0), _size_log2(0), _is_mt(is_mt) {}
53
54 // Returns true if you succeeded to claim the range start -> (stop-1).
55 bool claim(size_t* start, size_t* stop) {
56 size_t claimed = Atomic::add((size_t)1, &_next_to_claim) - 1;
57 if (claimed >= _stop_task) {
58 return false;
59 }
60 *start = claimed * (((size_t)1) << _task_size_log2);
61 *stop = ((*start) + (((size_t)1) << _task_size_log2));
62 return true;
63 }
64
65 // Calculate starting values.
66 void setup(Thread* thread) {
67 thread_owns_resize_lock(thread);
68 _size_log2 = _cht->_table->_log2_size;
69 _task_size_log2 = MIN2(_task_size_log2, _size_log2);
70 size_t tmp = _size_log2 > _task_size_log2 ?
71 _size_log2 - _task_size_log2 : 0;
72 _stop_task = (((size_t)1) << tmp);
73 }
74
75 // Returns false if all ranges are claimed.
76 bool have_more_work() {
77 return OrderAccess::load_acquire(&_next_to_claim) >= _stop_task;
78 }
79
80 void thread_owns_resize_lock(Thread* thread) {
81 assert(BucketsOperation::_cht->_resize_lock_owner == thread,
82 "Should be locked by me");
83 assert(BucketsOperation::_cht->_resize_lock->owned_by_self(),
84 "Operations lock not held");
85 }
86 void thread_owns_only_state_lock(Thread* thread) {
87 assert(BucketsOperation::_cht->_resize_lock_owner == thread,
88 "Should be locked by me");
89 assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
90 "Operations lock held");
91 }
92 void thread_do_not_own_resize_lock(Thread* thread) {
93 assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
94 "Operations lock held");
95 assert(BucketsOperation::_cht->_resize_lock_owner != thread,
96 "Should not be locked by me");
97 }
98
99public:
100 // Pauses for safepoint
101 void pause(Thread* thread) {
102 // This leaves internal state locked.
103 this->thread_owns_resize_lock(thread);
104 BucketsOperation::_cht->_resize_lock->unlock();
105 this->thread_owns_only_state_lock(thread);
106 }
107
108 // Continues after safepoint.
109 void cont(Thread* thread) {
110 this->thread_owns_only_state_lock(thread);
111 // If someone slips in here directly after safepoint.
112 while (!BucketsOperation::_cht->_resize_lock->try_lock())
113 { /* for ever */ };
114 this->thread_owns_resize_lock(thread);
115 }
116};
117
118// For doing pausable/parallel bulk delete.
119template <typename VALUE, typename CONFIG, MEMFLAGS F>
120class ConcurrentHashTable<VALUE, CONFIG, F>::BulkDeleteTask :
121 public BucketsOperation
122{
123 public:
124 BulkDeleteTask(ConcurrentHashTable<VALUE, CONFIG, F>* cht, bool is_mt = false)
125 : BucketsOperation(cht, is_mt) {
126 }
127 // Before start prepare must be called.
128 bool prepare(Thread* thread) {
129 bool lock = BucketsOperation::_cht->try_resize_lock(thread);
130 if (!lock) {
131 return false;
132 }
133 this->setup(thread);
134 return true;
135 }
136
137 // Does one range destroying all matching EVALUATE_FUNC and
138 // DELETE_FUNC is called be destruction. Returns true if there is more work.
139 template <typename EVALUATE_FUNC, typename DELETE_FUNC>
140 bool do_task(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) {
141 size_t start, stop;
142 assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
143 "Should be locked");
144 if (!this->claim(&start, &stop)) {
145 return false;
146 }
147 BucketsOperation::_cht->do_bulk_delete_locked_for(thread, start, stop,
148 eval_f, del_f,
149 BucketsOperation::_is_mt);
150 assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
151 "Should be locked");
152 return true;
153 }
154
155 // Must be called after ranges are done.
156 void done(Thread* thread) {
157 this->thread_owns_resize_lock(thread);
158 BucketsOperation::_cht->unlock_resize_lock(thread);
159 this->thread_do_not_own_resize_lock(thread);
160 }
161};
162
163template <typename VALUE, typename CONFIG, MEMFLAGS F>
164class ConcurrentHashTable<VALUE, CONFIG, F>::GrowTask :
165 public BucketsOperation
166{
167 public:
168 GrowTask(ConcurrentHashTable<VALUE, CONFIG, F>* cht) : BucketsOperation(cht) {
169 }
170 // Before start prepare must be called.
171 bool prepare(Thread* thread) {
172 if (!BucketsOperation::_cht->internal_grow_prolog(
173 thread, BucketsOperation::_cht->_log2_size_limit)) {
174 return false;
175 }
176 this->setup(thread);
177 return true;
178 }
179
180 // Re-sizes a portion of the table. Returns true if there is more work.
181 bool do_task(Thread* thread) {
182 size_t start, stop;
183 assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
184 "Should be locked");
185 if (!this->claim(&start, &stop)) {
186 return false;
187 }
188 BucketsOperation::_cht->internal_grow_range(thread, start, stop);
189 assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
190 "Should be locked");
191 return true;
192 }
193
194 // Must be called after do_task returns false.
195 void done(Thread* thread) {
196 this->thread_owns_resize_lock(thread);
197 BucketsOperation::_cht->internal_grow_epilog(thread);
198 this->thread_do_not_own_resize_lock(thread);
199 }
200};
201
202#endif // SHARE_UTILITIES_CONCURRENTHASHTABLETASKS_INLINE_HPP
203