1/*
2 * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "jfr/jfrEvents.hpp"
27#include "jfr/jni/jfrJavaSupport.hpp"
28#include "jfr/recorder/jfrRecorder.hpp"
29#include "jfr/recorder/repository/jfrChunkWriter.hpp"
30#include "jfr/recorder/service/jfrOptionSet.hpp"
31#include "jfr/recorder/service/jfrPostBox.hpp"
32#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
33#include "jfr/recorder/storage/jfrStorage.hpp"
34#include "jfr/recorder/storage/jfrStorageControl.hpp"
35#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
36#include "jfr/utilities/jfrIterator.hpp"
37#include "jfr/utilities/jfrTime.hpp"
38#include "jfr/writers/jfrNativeEventWriter.hpp"
39#include "logging/log.hpp"
40#include "runtime/mutexLocker.hpp"
41#include "runtime/orderAccess.hpp"
42#include "runtime/os.inline.hpp"
43#include "runtime/safepoint.hpp"
44#include "runtime/thread.hpp"
45
46typedef JfrStorage::Buffer* BufferPtr;
47
48static JfrStorage* _instance = NULL;
49static JfrStorageControl* _control;
50
51JfrStorage& JfrStorage::instance() {
52 return *_instance;
53}
54
55JfrStorage* JfrStorage::create(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) {
56 assert(_instance == NULL, "invariant");
57 _instance = new JfrStorage(chunkwriter, post_box);
58 return _instance;
59}
60
61void JfrStorage::destroy() {
62 if (_instance != NULL) {
63 delete _instance;
64 _instance = NULL;
65 }
66}
67
68JfrStorage::JfrStorage(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) :
69 _control(NULL),
70 _global_mspace(NULL),
71 _thread_local_mspace(NULL),
72 _transient_mspace(NULL),
73 _age_mspace(NULL),
74 _chunkwriter(chunkwriter),
75 _post_box(post_box) {}
76
77JfrStorage::~JfrStorage() {
78 if (_control != NULL) {
79 delete _control;
80 }
81 if (_global_mspace != NULL) {
82 delete _global_mspace;
83 }
84 if (_thread_local_mspace != NULL) {
85 delete _thread_local_mspace;
86 }
87 if (_transient_mspace != NULL) {
88 delete _transient_mspace;
89 }
90 if (_age_mspace != NULL) {
91 delete _age_mspace;
92 }
93 _instance = NULL;
94}
95
96static const size_t in_memory_discard_threshold_delta = 2; // start to discard data when the only this number of free buffers are left
97static const size_t unlimited_mspace_size = 0;
98static const size_t thread_local_cache_count = 8;
99static const size_t thread_local_scavenge_threshold = thread_local_cache_count / 2;
100static const size_t transient_buffer_size_multiplier = 8; // against thread local buffer size
101
102template <typename Mspace>
103static Mspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, JfrStorage* storage_instance) {
104 Mspace* mspace = new Mspace(buffer_size, limit, cache_count, storage_instance);
105 if (mspace != NULL) {
106 mspace->initialize();
107 }
108 return mspace;
109}
110
111bool JfrStorage::initialize() {
112 assert(_control == NULL, "invariant");
113 assert(_global_mspace == NULL, "invariant");
114 assert(_thread_local_mspace == NULL, "invariant");
115 assert(_transient_mspace == NULL, "invariant");
116 assert(_age_mspace == NULL, "invariant");
117
118 const size_t num_global_buffers = (size_t)JfrOptionSet::num_global_buffers();
119 assert(num_global_buffers >= in_memory_discard_threshold_delta, "invariant");
120 const size_t memory_size = (size_t)JfrOptionSet::memory_size();
121 const size_t global_buffer_size = (size_t)JfrOptionSet::global_buffer_size();
122 const size_t thread_buffer_size = (size_t)JfrOptionSet::thread_buffer_size();
123
124 _control = new JfrStorageControl(num_global_buffers, num_global_buffers - in_memory_discard_threshold_delta);
125 if (_control == NULL) {
126 return false;
127 }
128 _global_mspace = create_mspace<JfrStorageMspace>(global_buffer_size, memory_size, num_global_buffers, this);
129 if (_global_mspace == NULL) {
130 return false;
131 }
132 _thread_local_mspace = create_mspace<JfrThreadLocalMspace>(thread_buffer_size, unlimited_mspace_size, thread_local_cache_count, this);
133 if (_thread_local_mspace == NULL) {
134 return false;
135 }
136 _transient_mspace = create_mspace<JfrStorageMspace>(thread_buffer_size * transient_buffer_size_multiplier, unlimited_mspace_size, 0, this);
137 if (_transient_mspace == NULL) {
138 return false;
139 }
140 _age_mspace = create_mspace<JfrStorageAgeMspace>(0 /* no extra size except header */, unlimited_mspace_size, num_global_buffers, this);
141 if (_age_mspace == NULL) {
142 return false;
143 }
144 control().set_scavenge_threshold(thread_local_scavenge_threshold);
145 return true;
146}
147
148JfrStorageControl& JfrStorage::control() {
149 return *instance()._control;
150}
151
152static void log_allocation_failure(const char* msg, size_t size) {
153 log_warning(jfr)("Unable to allocate " SIZE_FORMAT " bytes of %s.", size, msg);
154}
155
156BufferPtr JfrStorage::acquire_thread_local(Thread* thread, size_t size /* 0 */) {
157 BufferPtr buffer = mspace_get_to_full(size, instance()._thread_local_mspace, thread);
158 if (buffer == NULL) {
159 log_allocation_failure("thread local_memory", size);
160 return NULL;
161 }
162 assert(buffer->acquired_by_self(), "invariant");
163 return buffer;
164}
165
166BufferPtr JfrStorage::acquire_transient(size_t size, Thread* thread) {
167 BufferPtr buffer = mspace_allocate_transient_lease_to_full(size, instance()._transient_mspace, thread);
168 if (buffer == NULL) {
169 log_allocation_failure("transient memory", size);
170 return NULL;
171 }
172 assert(buffer->acquired_by_self(), "invariant");
173 assert(buffer->transient(), "invariant");
174 assert(buffer->lease(), "invariant");
175 return buffer;
176}
177
178static BufferPtr get_lease(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
179 assert(size <= mspace->min_elem_size(), "invariant");
180 while (true) {
181 BufferPtr t = mspace_get_free_lease_with_retry(size, mspace, retry_count, thread);
182 if (t == NULL && storage_instance.control().should_discard()) {
183 storage_instance.discard_oldest(thread);
184 continue;
185 }
186 return t;
187 }
188}
189
190static BufferPtr get_promotion_buffer(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
191 assert(size <= mspace->min_elem_size(), "invariant");
192 while (true) {
193 BufferPtr t = mspace_get_free_with_retry(size, mspace, retry_count, thread);
194 if (t == NULL && storage_instance.control().should_discard()) {
195 storage_instance.discard_oldest(thread);
196 continue;
197 }
198 return t;
199 }
200}
201
202static const size_t lease_retry = 10;
203
204BufferPtr JfrStorage::acquire_large(size_t size, Thread* thread) {
205 JfrStorage& storage_instance = instance();
206 const size_t max_elem_size = storage_instance._global_mspace->min_elem_size(); // min is also max
207 // if not too large and capacity is still available, ask for a lease from the global system
208 if (size < max_elem_size && storage_instance.control().is_global_lease_allowed()) {
209 BufferPtr const buffer = get_lease(size, storage_instance._global_mspace, storage_instance, lease_retry, thread);
210 if (buffer != NULL) {
211 assert(buffer->acquired_by_self(), "invariant");
212 assert(!buffer->transient(), "invariant");
213 assert(buffer->lease(), "invariant");
214 storage_instance.control().increment_leased();
215 return buffer;
216 }
217 }
218 return acquire_transient(size, thread);
219}
220
221static void write_data_loss_event(JfrBuffer* buffer, u8 unflushed_size, Thread* thread) {
222 assert(buffer != NULL, "invariant");
223 assert(buffer->empty(), "invariant");
224 const u8 total_data_loss = thread->jfr_thread_local()->add_data_lost(unflushed_size);
225 if (EventDataLoss::is_enabled()) {
226 JfrNativeEventWriter writer(buffer, thread);
227 writer.write<u8>(EventDataLoss::eventId);
228 writer.write(JfrTicks::now());
229 writer.write(unflushed_size);
230 writer.write(total_data_loss);
231 }
232}
233
234static void write_data_loss(BufferPtr buffer, Thread* thread) {
235 assert(buffer != NULL, "invariant");
236 const size_t unflushed_size = buffer->unflushed_size();
237 buffer->concurrent_reinitialization();
238 if (unflushed_size == 0) {
239 return;
240 }
241 write_data_loss_event(buffer, unflushed_size, thread);
242}
243
244static const size_t promotion_retry = 100;
245
246bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
247 assert(buffer != NULL, "invariant");
248 assert(!buffer->lease(), "invariant");
249 assert(!buffer->transient(), "invariant");
250 const size_t unflushed_size = buffer->unflushed_size();
251 if (unflushed_size == 0) {
252 buffer->concurrent_reinitialization();
253 assert(buffer->empty(), "invariant");
254 return true;
255 }
256 BufferPtr const promotion_buffer = get_promotion_buffer(unflushed_size, _global_mspace, *this, promotion_retry, thread);
257 if (promotion_buffer == NULL) {
258 write_data_loss(buffer, thread);
259 return false;
260 }
261 assert(promotion_buffer->acquired_by_self(), "invariant");
262 assert(promotion_buffer->free_size() >= unflushed_size, "invariant");
263 buffer->concurrent_move_and_reinitialize(promotion_buffer, unflushed_size);
264 assert(buffer->empty(), "invariant");
265 return true;
266}
267
268/*
269* 1. If the buffer was a "lease" from the global system, release back.
270* 2. If the buffer is transient (temporal dynamically allocated), retire and register full.
271*
272* The buffer is effectively invalidated for the thread post-return,
273* and the caller should take means to ensure that it is not referenced any longer.
274*/
275void JfrStorage::release_large(BufferPtr buffer, Thread* thread) {
276 assert(buffer != NULL, "invariant");
277 assert(buffer->lease(), "invariant");
278 assert(buffer->acquired_by_self(), "invariant");
279 buffer->clear_lease();
280 if (buffer->transient()) {
281 buffer->set_retired();
282 register_full(buffer, thread);
283 } else {
284 buffer->release();
285 control().decrement_leased();
286 }
287}
288
289static JfrAgeNode* new_age_node(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, Thread* thread) {
290 assert(buffer != NULL, "invariant");
291 assert(age_mspace != NULL, "invariant");
292 return mspace_allocate_transient(0, age_mspace, thread);
293}
294
295static void log_registration_failure(size_t unflushed_size) {
296 log_warning(jfr)("Unable to register a full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
297 log_debug(jfr, system)("Cleared 1 full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
298}
299
300static void handle_registration_failure(BufferPtr buffer) {
301 assert(buffer != NULL, "invariant");
302 assert(buffer->retired(), "invariant");
303 const size_t unflushed_size = buffer->unflushed_size();
304 buffer->reinitialize();
305 log_registration_failure(unflushed_size);
306}
307
308static JfrAgeNode* get_free_age_node(JfrStorageAgeMspace* age_mspace, Thread* thread) {
309 assert(JfrBuffer_lock->owned_by_self(), "invariant");
310 return mspace_get_free_with_detach(0, age_mspace, thread);
311}
312
313static bool insert_full_age_node(JfrAgeNode* age_node, JfrStorageAgeMspace* age_mspace, Thread* thread) {
314 assert(JfrBuffer_lock->owned_by_self(), "invariant");
315 assert(age_node != NULL, "invariant");
316 assert(age_node->acquired_by_self(), "invariant");
317 assert(age_node->retired_buffer()->retired(), "invariant");
318 age_node->release(); // drop identity claim on age node when inserting to full list
319 assert(age_node->identity() == NULL, "invariant");
320 age_mspace->insert_full_head(age_node);
321 return true;
322}
323
324static bool full_buffer_registration(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, JfrStorageControl& control, Thread* thread) {
325 assert(buffer != NULL, "invariant");
326 assert(buffer->retired(), "invariant");
327 assert(age_mspace != NULL, "invariant");
328 MutexLocker lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
329 JfrAgeNode* age_node = get_free_age_node(age_mspace, thread);
330 if (age_node == NULL) {
331 age_node = new_age_node(buffer, age_mspace, thread);
332 if (age_node == NULL) {
333 return false;
334 }
335 }
336 assert(age_node != NULL, "invariant");
337 assert(age_node->acquired_by_self(), "invariant");
338 age_node->set_retired_buffer(buffer);
339 control.increment_full();
340 return insert_full_age_node(age_node, age_mspace, thread);
341}
342
343void JfrStorage::register_full(BufferPtr buffer, Thread* thread) {
344 assert(buffer != NULL, "invariant");
345 assert(buffer->retired(), "invariant");
346 assert(buffer->acquired_by(thread), "invariant");
347 if (!full_buffer_registration(buffer, _age_mspace, control(), thread)) {
348 handle_registration_failure(buffer);
349 }
350 if (control().should_post_buffer_full_message()) {
351 _post_box.post(MSG_FULLBUFFER);
352 }
353}
354
355void JfrStorage::lock() {
356 assert(!JfrBuffer_lock->owned_by_self(), "invariant");
357 JfrBuffer_lock->lock_without_safepoint_check();
358}
359
360void JfrStorage::unlock() {
361 assert(JfrBuffer_lock->owned_by_self(), "invariant");
362 JfrBuffer_lock->unlock();
363}
364
365#ifdef ASSERT
366bool JfrStorage::is_locked() const {
367 return JfrBuffer_lock->owned_by_self();
368}
369#endif
370
371// don't use buffer on return, it is gone
372void JfrStorage::release(BufferPtr buffer, Thread* thread) {
373 assert(buffer != NULL, "invariant");
374 assert(!buffer->lease(), "invariant");
375 assert(!buffer->transient(), "invariant");
376 assert(!buffer->retired(), "invariant");
377 if (!buffer->empty()) {
378 if (!flush_regular_buffer(buffer, thread)) {
379 buffer->concurrent_reinitialization();
380 }
381 }
382 assert(buffer->empty(), "invariant");
383 assert(buffer->identity() != NULL, "invariant");
384 control().increment_dead();
385 buffer->set_retired();
386}
387
388void JfrStorage::release_thread_local(BufferPtr buffer, Thread* thread) {
389 assert(buffer != NULL, "invariant");
390 JfrStorage& storage_instance = instance();
391 storage_instance.release(buffer, thread);
392 if (storage_instance.control().should_scavenge()) {
393 storage_instance._post_box.post(MSG_DEADBUFFER);
394 }
395}
396
397static void log_discard(size_t count, size_t amount, size_t current) {
398 if (log_is_enabled(Debug, jfr, system)) {
399 assert(count > 0, "invariant");
400 log_debug(jfr, system)("Cleared " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" bytes.", count, amount);
401 log_debug(jfr, system)("Current number of full buffers " SIZE_FORMAT "", current);
402 }
403}
404
405void JfrStorage::discard_oldest(Thread* thread) {
406 if (JfrBuffer_lock->try_lock()) {
407 if (!control().should_discard()) {
408 // another thread handled it
409 return;
410 }
411 const size_t num_full_pre_discard = control().full_count();
412 size_t num_full_post_discard = 0;
413 size_t discarded_size = 0;
414 while (true) {
415 JfrAgeNode* const oldest_age_node = _age_mspace->full_tail();
416 if (oldest_age_node == NULL) {
417 break;
418 }
419 assert(oldest_age_node->identity() == NULL, "invariant");
420 BufferPtr const buffer = oldest_age_node->retired_buffer();
421 assert(buffer->retired(), "invariant");
422 discarded_size += buffer->unflushed_size();
423 num_full_post_discard = control().decrement_full();
424 if (buffer->transient()) {
425 mspace_release_full(buffer, _transient_mspace);
426 mspace_release_full(oldest_age_node, _age_mspace);
427 continue;
428 } else {
429 mspace_release_full(oldest_age_node, _age_mspace);
430 buffer->reinitialize();
431 buffer->release(); // publish
432 break;
433 }
434 }
435 JfrBuffer_lock->unlock();
436 const size_t number_of_discards = num_full_pre_discard - num_full_post_discard;
437 if (number_of_discards > 0) {
438 log_discard(number_of_discards, discarded_size, num_full_post_discard);
439 }
440 }
441}
442
443#ifdef ASSERT
444typedef const BufferPtr ConstBufferPtr;
445
446static void assert_flush_precondition(ConstBufferPtr cur, size_t used, bool native, const Thread* t) {
447 assert(t != NULL, "invariant");
448 assert(cur != NULL, "invariant");
449 assert(cur->pos() + used <= cur->end(), "invariant");
450 assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
451}
452
453static void assert_flush_regular_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, const Thread* t) {
454 assert(t != NULL, "invariant");
455 assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
456 assert(cur != NULL, "invariant");
457 assert(!cur->lease(), "invariant");
458 assert(cur_pos != NULL, "invariant");
459 assert(req >= used, "invariant");
460}
461
462static void assert_provision_large_precondition(ConstBufferPtr cur, size_t used, size_t req, const Thread* t) {
463 assert(cur != NULL, "invariant");
464 assert(t != NULL, "invariant");
465 assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
466 assert(req >= used, "invariant");
467}
468
469static void assert_flush_large_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
470 assert(t != NULL, "invariant");
471 assert(cur != NULL, "invariant");
472 assert(cur->lease(), "invariant");
473 assert(cur_pos != NULL, "invariant");
474 assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
475 assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
476 assert(req >= used, "invariant");
477 assert(cur != t->jfr_thread_local()->shelved_buffer(), "invariant");
478}
479#endif // ASSERT
480
481BufferPtr JfrStorage::flush(BufferPtr cur, size_t used, size_t req, bool native, Thread* t) {
482 debug_only(assert_flush_precondition(cur, used, native, t);)
483 const u1* const cur_pos = cur->pos();
484 req += used;
485 // requested size now encompass the outstanding used size
486 return cur->lease() ? instance().flush_large(cur, cur_pos, used, req, native, t) :
487 instance().flush_regular(cur, cur_pos, used, req, native, t);
488}
489
490BufferPtr JfrStorage::flush_regular(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
491 debug_only(assert_flush_regular_precondition(cur, cur_pos, used, req, t);)
492 // A flush is needed before memcpy since a non-large buffer is thread stable
493 // (thread local). The flush will not modify memory in addresses above pos()
494 // which is where the "used / uncommitted" data resides. It is therefore both
495 // possible and valid to migrate data after the flush. This is however only
496 // the case for stable thread local buffers; it is not the case for large buffers.
497 if (!cur->empty()) {
498 flush_regular_buffer(cur, t);
499 }
500 assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
501 if (cur->free_size() >= req) {
502 // simplest case, no switching of buffers
503 if (used > 0) {
504 memcpy(cur->pos(), (void*)cur_pos, used);
505 }
506 assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
507 return cur;
508 }
509 // Going for a "larger-than-regular" buffer.
510 // Shelve the current buffer to make room for a temporary lease.
511 t->jfr_thread_local()->shelve_buffer(cur);
512 return provision_large(cur, cur_pos, used, req, native, t);
513}
514
515static BufferPtr store_buffer_to_thread_local(BufferPtr buffer, JfrThreadLocal* jfr_thread_local, bool native) {
516 assert(buffer != NULL, "invariant");
517 if (native) {
518 jfr_thread_local->set_native_buffer(buffer);
519 } else {
520 jfr_thread_local->set_java_buffer(buffer);
521 }
522 return buffer;
523}
524
525static BufferPtr restore_shelved_buffer(bool native, Thread* t) {
526 JfrThreadLocal* const tl = t->jfr_thread_local();
527 BufferPtr shelved = tl->shelved_buffer();
528 assert(shelved != NULL, "invariant");
529 tl->shelve_buffer(NULL);
530 // restore shelved buffer back as primary
531 return store_buffer_to_thread_local(shelved, tl, native);
532}
533
534BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
535 debug_only(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);)
536 // Can the "regular" buffer (now shelved) accommodate the requested size?
537 BufferPtr shelved = t->jfr_thread_local()->shelved_buffer();
538 assert(shelved != NULL, "invariant");
539 if (shelved->free_size() >= req) {
540 if (req > 0) {
541 memcpy(shelved->pos(), (void*)cur_pos, (size_t)used);
542 }
543 // release and invalidate
544 release_large(cur, t);
545 return restore_shelved_buffer(native, t);
546 }
547 // regular too small
548 return provision_large(cur, cur_pos, used, req, native, t);
549}
550
551static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_instance, Thread* t) {
552 assert(cur != NULL, "invariant");
553 assert(t != NULL, "invariant");
554 if (cur->lease()) {
555 storage_instance.release_large(cur, t);
556 }
557 return restore_shelved_buffer(native, t);
558}
559
560// Always returns a non-null buffer.
561// If accommodating the large request fails, the shelved buffer is returned
562// even though it might be smaller than the requested size.
563// Caller needs to ensure if the size was successfully accommodated.
564BufferPtr JfrStorage::provision_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
565 debug_only(assert_provision_large_precondition(cur, used, req, t);)
566 assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
567 BufferPtr const buffer = acquire_large(req, t);
568 if (buffer == NULL) {
569 // unable to allocate and serve the request
570 return large_fail(cur, native, *this, t);
571 }
572 // ok managed to acquire a "large" buffer for the requested size
573 assert(buffer->free_size() >= req, "invariant");
574 assert(buffer->lease(), "invariant");
575 // transfer outstanding data
576 memcpy(buffer->pos(), (void*)cur_pos, used);
577 if (cur->lease()) {
578 release_large(cur, t);
579 // don't use current anymore, it is gone
580 }
581 return store_buffer_to_thread_local(buffer, t->jfr_thread_local(), native);
582}
583
584typedef UnBufferedWriteToChunk<JfrBuffer> WriteOperation;
585typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
586typedef ConcurrentWriteOp<WriteOperation> ConcurrentWriteOperation;
587typedef ConcurrentWriteOpExcludeRetired<WriteOperation> ThreadLocalConcurrentWriteOperation;
588
589size_t JfrStorage::write() {
590 const size_t full_size_processed = write_full();
591 WriteOperation wo(_chunkwriter);
592 ThreadLocalConcurrentWriteOperation tlwo(wo);
593 process_full_list(tlwo, _thread_local_mspace);
594 ConcurrentWriteOperation cwo(wo);
595 process_free_list(cwo, _global_mspace);
596 return full_size_processed + wo.processed();
597}
598
599size_t JfrStorage::write_at_safepoint() {
600 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
601 WriteOperation wo(_chunkwriter);
602 MutexedWriteOperation writer(wo); // mutexed write mode
603 process_full_list(writer, _thread_local_mspace);
604 assert(_transient_mspace->is_free_empty(), "invariant");
605 process_full_list(writer, _transient_mspace);
606 assert(_global_mspace->is_full_empty(), "invariant");
607 process_free_list(writer, _global_mspace);
608 return wo.processed();
609}
610
611typedef DiscardOp<DefaultDiscarder<JfrStorage::Buffer> > DiscardOperation;
612typedef ReleaseOp<JfrStorageMspace> ReleaseOperation;
613typedef CompositeOperation<MutexedWriteOperation, ReleaseOperation> FullOperation;
614
615size_t JfrStorage::clear() {
616 const size_t full_size_processed = clear_full();
617 DiscardOperation discarder(concurrent); // concurrent discard mode
618 process_full_list(discarder, _thread_local_mspace);
619 assert(_transient_mspace->is_free_empty(), "invariant");
620 process_full_list(discarder, _transient_mspace);
621 assert(_global_mspace->is_full_empty(), "invariant");
622 process_free_list(discarder, _global_mspace);
623 return full_size_processed + discarder.processed();
624}
625
626static void insert_free_age_nodes(JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, JfrAgeNode* tail, size_t count) {
627 if (tail != NULL) {
628 assert(tail->next() == NULL, "invariant");
629 assert(head != NULL, "invariant");
630 assert(head->prev() == NULL, "invariant");
631 MutexLocker buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
632 age_mspace->insert_free_tail(head, tail, count);
633 }
634}
635
636template <typename Processor>
637static void process_age_list(Processor& processor, JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, size_t count) {
638 assert(age_mspace != NULL, "invariant");
639 assert(head != NULL, "invariant");
640 assert(count > 0, "invariant");
641 JfrAgeNode* node = head;
642 JfrAgeNode* last = NULL;
643 while (node != NULL) {
644 last = node;
645 assert(node->identity() == NULL, "invariant");
646 BufferPtr const buffer = node->retired_buffer();
647 assert(buffer != NULL, "invariant");
648 assert(buffer->retired(), "invariant");
649 processor.process(buffer);
650 // at this point, buffer is already live or destroyed
651 JfrAgeNode* const next = (JfrAgeNode*)node->next();
652 if (node->transient()) {
653 // detach
654 last = (JfrAgeNode*)last->prev();
655 if (last != NULL) {
656 last->set_next(next);
657 } else {
658 head = next;
659 }
660 if (next != NULL) {
661 next->set_prev(last);
662 }
663 --count;
664 age_mspace->deallocate(node);
665 }
666 node = next;
667 }
668 insert_free_age_nodes(age_mspace, head, last, count);
669}
670
671template <typename Processor>
672static size_t process_full(Processor& processor, JfrStorageControl& control, JfrStorageAgeMspace* age_mspace) {
673 assert(age_mspace != NULL, "invariant");
674 if (age_mspace->is_full_empty()) {
675 // nothing to do
676 return 0;
677 }
678 size_t count;
679 JfrAgeNode* head;
680 {
681 // fetch age list
682 MutexLocker buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
683 count = age_mspace->full_count();
684 head = age_mspace->clear_full();
685 control.reset_full();
686 }
687 assert(head != NULL, "invariant");
688 assert(count > 0, "invariant");
689 process_age_list(processor, age_mspace, head, count);
690 return count;
691}
692
693static void log(size_t count, size_t amount, bool clear = false) {
694 if (log_is_enabled(Debug, jfr, system)) {
695 if (count > 0) {
696 log_debug(jfr, system)("%s " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" B of data%s",
697 clear ? "Discarded" : "Wrote", count, amount, clear ? "." : " to chunk.");
698 }
699 }
700}
701
702// full writer
703// Assumption is retired only; exclusive access
704// MutexedWriter -> ReleaseOp
705//
706size_t JfrStorage::write_full() {
707 assert(_chunkwriter.is_valid(), "invariant");
708 Thread* const thread = Thread::current();
709 WriteOperation wo(_chunkwriter);
710 MutexedWriteOperation writer(wo); // a retired buffer implies mutexed access
711 ReleaseOperation ro(_transient_mspace, thread);
712 FullOperation cmd(&writer, &ro);
713 const size_t count = process_full(cmd, control(), _age_mspace);
714 log(count, writer.processed());
715 return writer.processed();
716}
717
718size_t JfrStorage::clear_full() {
719 DiscardOperation discarder(mutexed); // a retired buffer implies mutexed access
720 const size_t count = process_full(discarder, control(), _age_mspace);
721 log(count, discarder.processed(), true);
722 return discarder.processed();
723}
724
725static void scavenge_log(size_t count, size_t amount, size_t current) {
726 if (count > 0) {
727 if (log_is_enabled(Debug, jfr, system)) {
728 log_debug(jfr, system)("Released " SIZE_FORMAT " dead buffer(s) of " SIZE_FORMAT" B of data.", count, amount);
729 log_debug(jfr, system)("Current number of dead buffers " SIZE_FORMAT "", current);
730 }
731 }
732}
733
734template <typename Mspace>
735class Scavenger {
736private:
737 JfrStorageControl& _control;
738 Mspace* _mspace;
739 size_t _count;
740 size_t _amount;
741public:
742 typedef typename Mspace::Type Type;
743 Scavenger(JfrStorageControl& control, Mspace* mspace) : _control(control), _mspace(mspace), _count(0), _amount(0) {}
744 bool process(Type* t) {
745 if (t->retired()) {
746 assert(t->identity() != NULL, "invariant");
747 assert(t->empty(), "invariant");
748 assert(!t->transient(), "invariant");
749 assert(!t->lease(), "invariant");
750 ++_count;
751 _amount += t->total_size();
752 t->clear_retired();
753 t->release();
754 _control.decrement_dead();
755 mspace_release_full_critical(t, _mspace);
756 }
757 return true;
758 }
759 size_t processed() const { return _count; }
760 size_t amount() const { return _amount; }
761};
762
763size_t JfrStorage::scavenge() {
764 JfrStorageControl& ctrl = control();
765 if (ctrl.dead_count() == 0) {
766 return 0;
767 }
768 Scavenger<JfrThreadLocalMspace> scavenger(ctrl, _thread_local_mspace);
769 process_full_list(scavenger, _thread_local_mspace);
770 scavenge_log(scavenger.processed(), scavenger.amount(), ctrl.dead_count());
771 return scavenger.processed();
772}
773