1 | /* |
2 | * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_GC_PARALLEL_GCTASKMANAGER_HPP |
26 | #define SHARE_GC_PARALLEL_GCTASKMANAGER_HPP |
27 | |
28 | #include "runtime/mutex.hpp" |
29 | #include "utilities/growableArray.hpp" |
30 | |
31 | // |
32 | // The GCTaskManager is a queue of GCTasks, and accessors |
33 | // to allow the queue to be accessed from many threads. |
34 | // |
35 | |
36 | // Forward declarations of types defined in this file. |
37 | class GCTask; |
38 | class GCTaskQueue; |
39 | class SynchronizedGCTaskQueue; |
40 | class GCTaskManager; |
41 | // Some useful subclasses of GCTask. You can also make up your own. |
42 | class NoopGCTask; |
43 | class WaitForBarrierGCTask; |
44 | class IdleGCTask; |
45 | // A free list of Monitor*'s. |
46 | class MonitorSupply; |
47 | |
48 | // Forward declarations of classes referenced in this file via pointer. |
49 | class GCTaskThread; |
50 | class Mutex; |
51 | class Monitor; |
52 | class ThreadClosure; |
53 | |
54 | // The abstract base GCTask. |
55 | class GCTask : public ResourceObj { |
56 | public: |
57 | // Known kinds of GCTasks, for predicates. |
58 | class Kind : AllStatic { |
59 | public: |
60 | enum kind { |
61 | unknown_task, |
62 | ordinary_task, |
63 | wait_for_barrier_task, |
64 | noop_task, |
65 | idle_task |
66 | }; |
67 | static const char* to_string(kind value); |
68 | }; |
69 | private: |
70 | // Instance state. |
71 | Kind::kind _kind; // For runtime type checking. |
72 | uint _affinity; // Which worker should run task. |
73 | GCTask* _newer; // Tasks are on doubly-linked ... |
74 | GCTask* _older; // ... lists. |
75 | uint _gc_id; // GC Id to use for the thread that executes this task |
76 | public: |
77 | virtual char* name() { return (char *)"task" ; } |
78 | |
79 | uint gc_id() { return _gc_id; } |
80 | |
81 | // Abstract do_it method |
82 | virtual void do_it(GCTaskManager* manager, uint which) = 0; |
83 | // Accessors |
84 | Kind::kind kind() const { |
85 | return _kind; |
86 | } |
87 | uint affinity() const { |
88 | return _affinity; |
89 | } |
90 | GCTask* newer() const { |
91 | return _newer; |
92 | } |
93 | void set_newer(GCTask* n) { |
94 | _newer = n; |
95 | } |
96 | GCTask* older() const { |
97 | return _older; |
98 | } |
99 | void set_older(GCTask* p) { |
100 | _older = p; |
101 | } |
102 | // Predicates. |
103 | bool is_ordinary_task() const { |
104 | return kind()==Kind::ordinary_task; |
105 | } |
106 | bool is_barrier_task() const { |
107 | return kind()==Kind::wait_for_barrier_task; |
108 | } |
109 | bool is_noop_task() const { |
110 | return kind()==Kind::noop_task; |
111 | } |
112 | bool is_idle_task() const { |
113 | return kind()==Kind::idle_task; |
114 | } |
115 | void print(const char* message) const PRODUCT_RETURN; |
116 | protected: |
117 | // Constructors: Only create subclasses. |
118 | // An ordinary GCTask. |
119 | GCTask(); |
120 | // A GCTask of a particular kind, usually barrier or noop. |
121 | GCTask(Kind::kind kind); |
122 | GCTask(Kind::kind kind, uint gc_id); |
123 | // We want a virtual destructor because virtual methods, |
124 | // but since ResourceObj's don't have their destructors |
125 | // called, we don't have one at all. Instead we have |
126 | // this method, which gets called by subclasses to clean up. |
127 | virtual void destruct(); |
128 | // Methods. |
129 | void initialize(Kind::kind kind, uint gc_id); |
130 | }; |
131 | |
132 | // A doubly-linked list of GCTasks. |
133 | // The list is not synchronized, because sometimes we want to |
134 | // build up a list and then make it available to other threads. |
135 | // See also: SynchronizedGCTaskQueue. |
136 | class GCTaskQueue : public ResourceObj { |
137 | private: |
138 | // Instance state. |
139 | GCTask* _insert_end; // Tasks are enqueued at this end. |
140 | GCTask* _remove_end; // Tasks are dequeued from this end. |
141 | uint _length; // The current length of the queue. |
142 | const bool _is_c_heap_obj; // Is this a CHeapObj? |
143 | public: |
144 | // Factory create and destroy methods. |
145 | // Create as ResourceObj. |
146 | static GCTaskQueue* create(); |
147 | // Create as CHeapObj. |
148 | static GCTaskQueue* create_on_c_heap(); |
149 | // Destroyer. |
150 | static void destroy(GCTaskQueue* that); |
151 | // Accessors. |
152 | // These just examine the state of the queue. |
153 | bool is_empty() const { |
154 | assert(((insert_end() == NULL && remove_end() == NULL) || |
155 | (insert_end() != NULL && remove_end() != NULL)), |
156 | "insert_end and remove_end don't match" ); |
157 | assert((insert_end() != NULL) || (_length == 0), "Not empty" ); |
158 | return insert_end() == NULL; |
159 | } |
160 | uint length() const { |
161 | return _length; |
162 | } |
163 | // Methods. |
164 | // Enqueue one task. |
165 | void enqueue(GCTask* task); |
166 | // Enqueue a list of tasks. Empties the argument list. |
167 | void enqueue(GCTaskQueue* list); |
168 | // Dequeue one task. |
169 | GCTask* dequeue(); |
170 | // Dequeue one task, preferring one with affinity. |
171 | GCTask* dequeue(uint affinity); |
172 | protected: |
173 | // Constructor. Clients use factory, but there might be subclasses. |
174 | GCTaskQueue(bool on_c_heap); |
175 | // Destructor-like method. |
176 | // Because ResourceMark doesn't call destructors. |
177 | // This method cleans up like one. |
178 | virtual void destruct(); |
179 | // Accessors. |
180 | GCTask* insert_end() const { |
181 | return _insert_end; |
182 | } |
183 | void set_insert_end(GCTask* value) { |
184 | _insert_end = value; |
185 | } |
186 | GCTask* remove_end() const { |
187 | return _remove_end; |
188 | } |
189 | void set_remove_end(GCTask* value) { |
190 | _remove_end = value; |
191 | } |
192 | void increment_length() { |
193 | _length += 1; |
194 | } |
195 | void decrement_length() { |
196 | _length -= 1; |
197 | } |
198 | void set_length(uint value) { |
199 | _length = value; |
200 | } |
201 | bool is_c_heap_obj() const { |
202 | return _is_c_heap_obj; |
203 | } |
204 | // Methods. |
205 | void initialize(); |
206 | GCTask* remove(); // Remove from remove end. |
207 | GCTask* remove(GCTask* task); // Remove from the middle. |
208 | void print(const char* message) const PRODUCT_RETURN; |
209 | // Debug support |
210 | void verify_length() const PRODUCT_RETURN; |
211 | }; |
212 | |
213 | // A GCTaskQueue that can be synchronized. |
214 | // This "has-a" GCTaskQueue and a mutex to do the exclusion. |
215 | class SynchronizedGCTaskQueue : public CHeapObj<mtGC> { |
216 | private: |
217 | // Instance state. |
218 | GCTaskQueue* _unsynchronized_queue; // Has-a unsynchronized queue. |
219 | Monitor * _lock; // Lock to control access. |
220 | public: |
221 | // Factory create and destroy methods. |
222 | static SynchronizedGCTaskQueue* create(GCTaskQueue* queue, Monitor * lock) { |
223 | return new SynchronizedGCTaskQueue(queue, lock); |
224 | } |
225 | static void destroy(SynchronizedGCTaskQueue* that) { |
226 | if (that != NULL) { |
227 | delete that; |
228 | } |
229 | } |
230 | // Accessors |
231 | GCTaskQueue* unsynchronized_queue() const { |
232 | return _unsynchronized_queue; |
233 | } |
234 | Monitor * lock() const { |
235 | return _lock; |
236 | } |
237 | // GCTaskQueue wrapper methods. |
238 | // These check that you hold the lock |
239 | // and then call the method on the queue. |
240 | bool is_empty() const { |
241 | guarantee(own_lock(), "don't own the lock" ); |
242 | return unsynchronized_queue()->is_empty(); |
243 | } |
244 | void enqueue(GCTask* task) { |
245 | guarantee(own_lock(), "don't own the lock" ); |
246 | unsynchronized_queue()->enqueue(task); |
247 | } |
248 | void enqueue(GCTaskQueue* list) { |
249 | guarantee(own_lock(), "don't own the lock" ); |
250 | unsynchronized_queue()->enqueue(list); |
251 | } |
252 | GCTask* dequeue() { |
253 | guarantee(own_lock(), "don't own the lock" ); |
254 | return unsynchronized_queue()->dequeue(); |
255 | } |
256 | GCTask* dequeue(uint affinity) { |
257 | guarantee(own_lock(), "don't own the lock" ); |
258 | return unsynchronized_queue()->dequeue(affinity); |
259 | } |
260 | uint length() const { |
261 | guarantee(own_lock(), "don't own the lock" ); |
262 | return unsynchronized_queue()->length(); |
263 | } |
264 | // For guarantees. |
265 | bool own_lock() const { |
266 | return lock()->owned_by_self(); |
267 | } |
268 | protected: |
269 | // Constructor. Clients use factory, but there might be subclasses. |
270 | SynchronizedGCTaskQueue(GCTaskQueue* queue, Monitor * lock); |
271 | // Destructor. Not virtual because no virtuals. |
272 | ~SynchronizedGCTaskQueue(); |
273 | }; |
274 | |
275 | class WaitHelper { |
276 | private: |
277 | Monitor* _monitor; |
278 | volatile bool _should_wait; |
279 | public: |
280 | WaitHelper(); |
281 | ~WaitHelper(); |
282 | void wait_for(bool reset); |
283 | void notify(); |
284 | void set_should_wait(bool value) { |
285 | _should_wait = value; |
286 | } |
287 | |
288 | Monitor* monitor() const { |
289 | return _monitor; |
290 | } |
291 | bool should_wait() const { |
292 | return _should_wait; |
293 | } |
294 | void release_monitor(); |
295 | }; |
296 | |
297 | // Dynamic number of GC threads |
298 | // |
299 | // GC threads wait in get_task() for work (i.e., a task) to perform. |
300 | // When the number of GC threads was static, the number of tasks |
301 | // created to do a job was equal to or greater than the maximum |
302 | // number of GC threads (ParallelGCThreads). The job might be divided |
303 | // into a number of tasks greater than the number of GC threads for |
304 | // load balancing (i.e., over partitioning). The last task to be |
305 | // executed by a GC thread in a job is a work stealing task. A |
306 | // GC thread that gets a work stealing task continues to execute |
307 | // that task until the job is done. In the static number of GC threads |
308 | // case, tasks are added to a queue (FIFO). The work stealing tasks are |
309 | // the last to be added. Once the tasks are added, the GC threads grab |
310 | // a task and go. A single thread can do all the non-work stealing tasks |
311 | // and then execute a work stealing and wait for all the other GC threads |
312 | // to execute their work stealing task. |
313 | // In the dynamic number of GC threads implementation, idle-tasks are |
314 | // created to occupy the non-participating or "inactive" threads. An |
315 | // idle-task makes the GC thread wait on a barrier that is part of the |
316 | // GCTaskManager. The GC threads that have been "idled" in a IdleGCTask |
317 | // are released once all the active GC threads have finished their work |
318 | // stealing tasks. The GCTaskManager does not wait for all the "idled" |
319 | // GC threads to resume execution. When those GC threads do resume |
320 | // execution in the course of the thread scheduling, they call get_tasks() |
321 | // as all the other GC threads do. Because all the "idled" threads are |
322 | // not required to execute in order to finish a job, it is possible for |
323 | // a GC thread to still be "idled" when the next job is started. Such |
324 | // a thread stays "idled" for the next job. This can result in a new |
325 | // job not having all the expected active workers. For example if on |
326 | // job requests 4 active workers out of a total of 10 workers so the |
327 | // remaining 6 are "idled", if the next job requests 6 active workers |
328 | // but all 6 of the "idled" workers are still idle, then the next job |
329 | // will only get 4 active workers. |
330 | // The implementation for the parallel old compaction phase has an |
331 | // added complication. In the static case parold partitions the chunks |
332 | // ready to be filled into stacks, one for each GC thread. A GC thread |
333 | // executing a draining task (drains the stack of ready chunks) |
334 | // claims a stack according to it's id (the unique ordinal value assigned |
335 | // to each GC thread). In the dynamic case not all GC threads will |
336 | // actively participate so stacks with ready to fill chunks can only be |
337 | // given to the active threads. An initial implementation chose stacks |
338 | // number 1-n to get the ready chunks and required that GC threads |
339 | // 1-n be the active workers. This was undesirable because it required |
340 | // certain threads to participate. In the final implementation a |
341 | // list of stacks equal in number to the active workers are filled |
342 | // with ready chunks. GC threads that participate get a stack from |
343 | // the task (DrainStacksCompactionTask), empty the stack, and then add it to a |
344 | // recycling list at the end of the task. If the same GC thread gets |
345 | // a second task, it gets a second stack to drain and returns it. The |
346 | // stacks are added to a recycling list so that later stealing tasks |
347 | // for this tasks can get a stack from the recycling list. Stealing tasks |
348 | // use the stacks in its work in a way similar to the draining tasks. |
349 | // A thread is not guaranteed to get anything but a stealing task and |
350 | // a thread that only gets a stealing task has to get a stack. A failed |
351 | // implementation tried to have the GC threads keep the stack they used |
352 | // during a draining task for later use in the stealing task but that didn't |
353 | // work because as noted a thread is not guaranteed to get a draining task. |
354 | // |
355 | // For PSScavenge and ParCompactionManager the GC threads are |
356 | // held in the GCTaskThread** _thread array in GCTaskManager. |
357 | |
358 | |
359 | class GCTaskManager : public CHeapObj<mtGC> { |
360 | friend class ParCompactionManager; |
361 | friend class PSParallelCompact; |
362 | friend class PSScavenge; |
363 | friend class PSRefProcTaskExecutor; |
364 | friend class RefProcTaskExecutor; |
365 | friend class GCTaskThread; |
366 | friend class IdleGCTask; |
367 | private: |
368 | // Instance state. |
369 | const uint _workers; // Number of workers. |
370 | Monitor* _monitor; // Notification of changes. |
371 | SynchronizedGCTaskQueue* _queue; // Queue of tasks. |
372 | GCTaskThread** _thread; // Array of worker threads. |
373 | uint _created_workers; // Number of workers created. |
374 | uint _active_workers; // Number of active workers. |
375 | uint _busy_workers; // Number of busy workers. |
376 | uint _blocking_worker; // The worker that's blocking. |
377 | bool* _resource_flag; // Array of flag per threads. |
378 | uint _delivered_tasks; // Count of delivered tasks. |
379 | uint _completed_tasks; // Count of completed tasks. |
380 | uint _barriers; // Count of barrier tasks. |
381 | uint _emptied_queue; // Times we emptied the queue. |
382 | NoopGCTask* _noop_task; // The NoopGCTask instance. |
383 | WaitHelper _wait_helper; // Used by inactive worker |
384 | volatile uint _idle_workers; // Number of idled workers |
385 | uint* _processor_assignment; // Worker to cpu mappings. May |
386 | // be used lazily |
387 | public: |
388 | // Factory create and destroy methods. |
389 | static GCTaskManager* create(uint workers) { |
390 | return new GCTaskManager(workers); |
391 | } |
392 | static void destroy(GCTaskManager* that) { |
393 | if (that != NULL) { |
394 | delete that; |
395 | } |
396 | } |
397 | // Accessors. |
398 | uint busy_workers() const { |
399 | return _busy_workers; |
400 | } |
401 | volatile uint idle_workers() const { |
402 | return _idle_workers; |
403 | } |
404 | // Pun between Monitor* and Mutex* |
405 | Monitor* monitor() const { |
406 | return _monitor; |
407 | } |
408 | Monitor * lock() const { |
409 | return _monitor; |
410 | } |
411 | WaitHelper* wait_helper() { |
412 | return &_wait_helper; |
413 | } |
414 | // Methods. |
415 | // Add the argument task to be run. |
416 | void add_task(GCTask* task); |
417 | // Add a list of tasks. Removes task from the argument list. |
418 | void add_list(GCTaskQueue* list); |
419 | // Claim a task for argument worker. |
420 | GCTask* get_task(uint which); |
421 | // Note the completion of a task by the argument worker. |
422 | void note_completion(uint which); |
423 | // Is the queue blocked from handing out new tasks? |
424 | bool is_blocked() const { |
425 | return (blocking_worker() != sentinel_worker()); |
426 | } |
427 | // Request that all workers release their resources. |
428 | void release_all_resources(); |
429 | // Ask if a particular worker should release its resources. |
430 | bool should_release_resources(uint which); // Predicate. |
431 | // Note the release of resources by the argument worker. |
432 | void note_release(uint which); |
433 | // Create IdleGCTasks for inactive workers and start workers |
434 | void task_idle_workers(); |
435 | // Release the workers in IdleGCTasks |
436 | void release_idle_workers(); |
437 | // Constants. |
438 | // A sentinel worker identifier. |
439 | static uint sentinel_worker() { |
440 | return (uint) -1; // Why isn't there a max_uint? |
441 | } |
442 | |
443 | // Execute the task queue and wait for the completion. |
444 | void execute_and_wait(GCTaskQueue* list); |
445 | |
446 | void print_task_time_stamps(); |
447 | void print_threads_on(outputStream* st); |
448 | void threads_do(ThreadClosure* tc); |
449 | |
450 | protected: |
451 | // Constructors. Clients use factory, but there might be subclasses. |
452 | // Create a GCTaskManager with the appropriate number of workers. |
453 | GCTaskManager(uint workers); |
454 | // Make virtual if necessary. |
455 | ~GCTaskManager(); |
456 | // Accessors. |
457 | uint workers() const { |
458 | return _workers; |
459 | } |
460 | uint update_active_workers(uint v) { |
461 | assert(v <= _workers, "Trying to set more workers active than there are" ); |
462 | _active_workers = MIN2(v, _workers); |
463 | assert(v != 0, "Trying to set active workers to 0" ); |
464 | _active_workers = MAX2(1U, _active_workers); |
465 | return _active_workers; |
466 | } |
467 | // Sets the number of threads that will be used in a collection |
468 | void set_active_gang(); |
469 | |
470 | SynchronizedGCTaskQueue* queue() const { |
471 | return _queue; |
472 | } |
473 | NoopGCTask* noop_task() const { |
474 | return _noop_task; |
475 | } |
476 | // Bounds-checking per-thread data accessors. |
477 | GCTaskThread* thread(uint which); |
478 | void set_thread(uint which, GCTaskThread* value); |
479 | bool resource_flag(uint which); |
480 | void set_resource_flag(uint which, bool value); |
481 | // Modifier methods with some semantics. |
482 | // Is any worker blocking handing out new tasks? |
483 | uint blocking_worker() const { |
484 | return _blocking_worker; |
485 | } |
486 | void set_blocking_worker(uint value) { |
487 | _blocking_worker = value; |
488 | } |
489 | void set_unblocked() { |
490 | set_blocking_worker(sentinel_worker()); |
491 | } |
492 | // Count of busy workers. |
493 | void reset_busy_workers() { |
494 | _busy_workers = 0; |
495 | } |
496 | uint increment_busy_workers(); |
497 | uint decrement_busy_workers(); |
498 | // Count of tasks delivered to workers. |
499 | uint delivered_tasks() const { |
500 | return _delivered_tasks; |
501 | } |
502 | void increment_delivered_tasks() { |
503 | _delivered_tasks += 1; |
504 | } |
505 | void reset_delivered_tasks() { |
506 | _delivered_tasks = 0; |
507 | } |
508 | // Count of tasks completed by workers. |
509 | uint completed_tasks() const { |
510 | return _completed_tasks; |
511 | } |
512 | void increment_completed_tasks() { |
513 | _completed_tasks += 1; |
514 | } |
515 | void reset_completed_tasks() { |
516 | _completed_tasks = 0; |
517 | } |
518 | // Count of barrier tasks completed. |
519 | uint barriers() const { |
520 | return _barriers; |
521 | } |
522 | void increment_barriers() { |
523 | _barriers += 1; |
524 | } |
525 | void reset_barriers() { |
526 | _barriers = 0; |
527 | } |
528 | // Count of how many times the queue has emptied. |
529 | uint emptied_queue() const { |
530 | return _emptied_queue; |
531 | } |
532 | void increment_emptied_queue() { |
533 | _emptied_queue += 1; |
534 | } |
535 | void reset_emptied_queue() { |
536 | _emptied_queue = 0; |
537 | } |
538 | void increment_idle_workers() { |
539 | _idle_workers++; |
540 | } |
541 | void decrement_idle_workers() { |
542 | _idle_workers--; |
543 | } |
544 | // Other methods. |
545 | void initialize(); |
546 | |
547 | public: |
548 | // Return true if all workers are currently active. |
549 | bool all_workers_active() { return workers() == active_workers(); } |
550 | uint active_workers() const { |
551 | return _active_workers; |
552 | } |
553 | uint created_workers() const { |
554 | return _created_workers; |
555 | } |
556 | // Create a GC worker and install into GCTaskManager |
557 | GCTaskThread* install_worker(uint worker_id); |
558 | // Add GC workers as needed. |
559 | void add_workers(bool initializing); |
560 | // Base name (without worker id #) of threads. |
561 | const char* group_name(); |
562 | }; |
563 | |
564 | // |
565 | // Some exemplary GCTasks. |
566 | // |
567 | |
568 | // A noop task that does nothing, |
569 | // except take us around the GCTaskThread loop. |
570 | class NoopGCTask : public GCTask { |
571 | public: |
572 | // Factory create and destroy methods. |
573 | static NoopGCTask* create_on_c_heap(); |
574 | static void destroy(NoopGCTask* that); |
575 | |
576 | virtual char* name() { return (char *)"noop task" ; } |
577 | // Methods from GCTask. |
578 | void do_it(GCTaskManager* manager, uint which) { |
579 | // Nothing to do. |
580 | } |
581 | protected: |
582 | // Constructor. |
583 | NoopGCTask(); |
584 | // Destructor-like method. |
585 | void destruct(); |
586 | }; |
587 | |
588 | // A WaitForBarrierGCTask is a GCTask |
589 | // with a method you can call to wait until |
590 | // the BarrierGCTask is done. |
591 | class WaitForBarrierGCTask : public GCTask { |
592 | friend class GCTaskManager; |
593 | friend class IdleGCTask; |
594 | private: |
595 | // Instance state. |
596 | WaitHelper _wait_helper; |
597 | WaitForBarrierGCTask(); |
598 | public: |
599 | virtual char* name() { return (char *) "waitfor-barrier-task" ; } |
600 | |
601 | // Factory create and destroy methods. |
602 | static WaitForBarrierGCTask* create(); |
603 | static void destroy(WaitForBarrierGCTask* that); |
604 | // Methods. |
605 | void do_it(GCTaskManager* manager, uint which); |
606 | protected: |
607 | // Destructor-like method. |
608 | void destruct(); |
609 | |
610 | // Methods. |
611 | // Wait for this to be the only task running. |
612 | void do_it_internal(GCTaskManager* manager, uint which); |
613 | |
614 | void wait_for(bool reset) { |
615 | _wait_helper.wait_for(reset); |
616 | } |
617 | }; |
618 | |
619 | // Task that is used to idle a GC task when fewer than |
620 | // the maximum workers are wanted. |
621 | class IdleGCTask : public GCTask { |
622 | const bool _is_c_heap_obj; // Was allocated on the heap. |
623 | public: |
624 | bool is_c_heap_obj() { |
625 | return _is_c_heap_obj; |
626 | } |
627 | // Factory create and destroy methods. |
628 | static IdleGCTask* create(); |
629 | static IdleGCTask* create_on_c_heap(); |
630 | static void destroy(IdleGCTask* that); |
631 | |
632 | virtual char* name() { return (char *)"idle task" ; } |
633 | // Methods from GCTask. |
634 | virtual void do_it(GCTaskManager* manager, uint which); |
635 | protected: |
636 | // Constructor. |
637 | IdleGCTask(bool on_c_heap) : |
638 | GCTask(GCTask::Kind::idle_task), |
639 | _is_c_heap_obj(on_c_heap) { |
640 | // Nothing to do. |
641 | } |
642 | // Destructor-like method. |
643 | void destruct(); |
644 | }; |
645 | |
646 | class MonitorSupply : public AllStatic { |
647 | private: |
648 | // State. |
649 | // Control multi-threaded access. |
650 | static Mutex* _lock; |
651 | // The list of available Monitor*'s. |
652 | static GrowableArray<Monitor*>* _freelist; |
653 | public: |
654 | // Reserve a Monitor*. |
655 | static Monitor* reserve(); |
656 | // Release a Monitor*. |
657 | static void release(Monitor* instance); |
658 | private: |
659 | // Accessors. |
660 | static Mutex* lock() { |
661 | return _lock; |
662 | } |
663 | static GrowableArray<Monitor*>* freelist() { |
664 | return _freelist; |
665 | } |
666 | }; |
667 | |
668 | #endif // SHARE_GC_PARALLEL_GCTASKMANAGER_HPP |
669 | |