1 | /* |
2 | * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "interpreter/oopMapCache.hpp" |
27 | #include "logging/log.hpp" |
28 | #include "logging/logStream.hpp" |
29 | #include "memory/allocation.inline.hpp" |
30 | #include "memory/resourceArea.hpp" |
31 | #include "oops/oop.inline.hpp" |
32 | #include "runtime/handles.inline.hpp" |
33 | #include "runtime/signature.hpp" |
34 | |
35 | class OopMapCacheEntry: private InterpreterOopMap { |
36 | friend class InterpreterOopMap; |
37 | friend class OopMapForCacheEntry; |
38 | friend class OopMapCache; |
39 | friend class VerifyClosure; |
40 | |
41 | private: |
42 | OopMapCacheEntry* _next; |
43 | |
44 | protected: |
45 | // Initialization |
46 | void fill(const methodHandle& method, int bci); |
47 | // fills the bit mask for native calls |
48 | void fill_for_native(const methodHandle& method); |
49 | void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top); |
50 | |
51 | // Deallocate bit masks and initialize fields |
52 | void flush(); |
53 | |
54 | private: |
55 | void allocate_bit_mask(); // allocates the bit mask on C heap f necessary |
56 | void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary |
57 | bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top); |
58 | |
59 | public: |
60 | OopMapCacheEntry() : InterpreterOopMap() { |
61 | _next = NULL; |
62 | #ifdef ASSERT |
63 | _resource_allocate_bit_mask = false; |
64 | #endif |
65 | } |
66 | }; |
67 | |
68 | |
69 | // Implementation of OopMapForCacheEntry |
70 | // (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci) |
71 | |
72 | class OopMapForCacheEntry: public GenerateOopMap { |
73 | OopMapCacheEntry *_entry; |
74 | int _bci; |
75 | int _stack_top; |
76 | |
77 | virtual bool report_results() const { return false; } |
78 | virtual bool possible_gc_point (BytecodeStream *bcs); |
79 | virtual void fill_stackmap_prolog (int nof_gc_points); |
80 | virtual void fill_stackmap_epilog (); |
81 | virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs, |
82 | CellTypeState* vars, |
83 | CellTypeState* stack, |
84 | int stack_top); |
85 | virtual void fill_init_vars (GrowableArray<intptr_t> *init_vars); |
86 | |
87 | public: |
88 | OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry *entry); |
89 | |
90 | // Computes stack map for (method,bci) and initialize entry |
91 | void compute_map(TRAPS); |
92 | int size(); |
93 | }; |
94 | |
95 | |
96 | OopMapForCacheEntry::OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) { |
97 | _bci = bci; |
98 | _entry = entry; |
99 | _stack_top = -1; |
100 | } |
101 | |
102 | |
103 | void OopMapForCacheEntry::compute_map(TRAPS) { |
104 | assert(!method()->is_native(), "cannot compute oop map for native methods" ); |
105 | // First check if it is a method where the stackmap is always empty |
106 | if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) { |
107 | _entry->set_mask_size(0); |
108 | } else { |
109 | ResourceMark rm; |
110 | GenerateOopMap::compute_map(CATCH); |
111 | result_for_basicblock(_bci); |
112 | } |
113 | } |
114 | |
115 | |
116 | bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) { |
117 | return false; // We are not reporting any result. We call result_for_basicblock directly |
118 | } |
119 | |
120 | |
121 | void OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) { |
122 | // Do nothing |
123 | } |
124 | |
125 | |
126 | void OopMapForCacheEntry::fill_stackmap_epilog() { |
127 | // Do nothing |
128 | } |
129 | |
130 | |
131 | void OopMapForCacheEntry::fill_init_vars(GrowableArray<intptr_t> *init_vars) { |
132 | // Do nothing |
133 | } |
134 | |
135 | |
136 | void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs, |
137 | CellTypeState* vars, |
138 | CellTypeState* stack, |
139 | int stack_top) { |
140 | // Only interested in one specific bci |
141 | if (bcs->bci() == _bci) { |
142 | _entry->set_mask(vars, stack, stack_top); |
143 | _stack_top = stack_top; |
144 | } |
145 | } |
146 | |
147 | |
148 | int OopMapForCacheEntry::size() { |
149 | assert(_stack_top != -1, "compute_map must be called first" ); |
150 | return ((method()->is_static()) ? 0 : 1) + method()->max_locals() + _stack_top; |
151 | } |
152 | |
153 | |
154 | // Implementation of InterpreterOopMap and OopMapCacheEntry |
155 | |
156 | class VerifyClosure : public OffsetClosure { |
157 | private: |
158 | OopMapCacheEntry* _entry; |
159 | bool _failed; |
160 | |
161 | public: |
162 | VerifyClosure(OopMapCacheEntry* entry) { _entry = entry; _failed = false; } |
163 | void offset_do(int offset) { if (!_entry->is_oop(offset)) _failed = true; } |
164 | bool failed() const { return _failed; } |
165 | }; |
166 | |
167 | InterpreterOopMap::InterpreterOopMap() { |
168 | initialize(); |
169 | #ifdef ASSERT |
170 | _resource_allocate_bit_mask = true; |
171 | #endif |
172 | } |
173 | |
174 | InterpreterOopMap::~InterpreterOopMap() { |
175 | // The expection is that the bit mask was allocated |
176 | // last in this resource area. That would make the free of the |
177 | // bit_mask effective (see how FREE_RESOURCE_ARRAY does a free). |
178 | // If it was not allocated last, there is not a correctness problem |
179 | // but the space for the bit_mask is not freed. |
180 | assert(_resource_allocate_bit_mask, "Trying to free C heap space" ); |
181 | if (mask_size() > small_mask_limit) { |
182 | FREE_RESOURCE_ARRAY(uintptr_t, _bit_mask[0], mask_word_size()); |
183 | } |
184 | } |
185 | |
186 | bool InterpreterOopMap::is_empty() const { |
187 | bool result = _method == NULL; |
188 | assert(_method != NULL || (_bci == 0 && |
189 | (_mask_size == 0 || _mask_size == USHRT_MAX) && |
190 | _bit_mask[0] == 0), "Should be completely empty" ); |
191 | return result; |
192 | } |
193 | |
194 | void InterpreterOopMap::initialize() { |
195 | _method = NULL; |
196 | _mask_size = USHRT_MAX; // This value should cause a failure quickly |
197 | _bci = 0; |
198 | _expression_stack_size = 0; |
199 | for (int i = 0; i < N; i++) _bit_mask[i] = 0; |
200 | } |
201 | |
202 | void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) const { |
203 | int n = number_of_entries(); |
204 | int word_index = 0; |
205 | uintptr_t value = 0; |
206 | uintptr_t mask = 0; |
207 | // iterate over entries |
208 | for (int i = 0; i < n; i++, mask <<= bits_per_entry) { |
209 | // get current word |
210 | if (mask == 0) { |
211 | value = bit_mask()[word_index++]; |
212 | mask = 1; |
213 | } |
214 | // test for oop |
215 | if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i); |
216 | } |
217 | } |
218 | |
219 | void InterpreterOopMap::print() const { |
220 | int n = number_of_entries(); |
221 | tty->print("oop map for " ); |
222 | method()->print_value(); |
223 | tty->print(" @ %d = [%d] { " , bci(), n); |
224 | for (int i = 0; i < n; i++) { |
225 | if (is_dead(i)) tty->print("%d+ " , i); |
226 | else |
227 | if (is_oop(i)) tty->print("%d " , i); |
228 | } |
229 | tty->print_cr("}" ); |
230 | } |
231 | |
232 | class MaskFillerForNative: public NativeSignatureIterator { |
233 | private: |
234 | uintptr_t * _mask; // the bit mask to be filled |
235 | int _size; // the mask size in bits |
236 | |
237 | void set_one(int i) { |
238 | i *= InterpreterOopMap::bits_per_entry; |
239 | assert(0 <= i && i < _size, "offset out of bounds" ); |
240 | _mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord)); |
241 | } |
242 | |
243 | public: |
244 | void pass_int() { /* ignore */ } |
245 | void pass_long() { /* ignore */ } |
246 | void pass_float() { /* ignore */ } |
247 | void pass_double() { /* ignore */ } |
248 | void pass_object() { set_one(offset()); } |
249 | |
250 | MaskFillerForNative(const methodHandle& method, uintptr_t* mask, int size) : NativeSignatureIterator(method) { |
251 | _mask = mask; |
252 | _size = size; |
253 | // initialize with 0 |
254 | int i = (size + BitsPerWord - 1) / BitsPerWord; |
255 | while (i-- > 0) _mask[i] = 0; |
256 | } |
257 | |
258 | void generate() { |
259 | NativeSignatureIterator::iterate(); |
260 | } |
261 | }; |
262 | |
263 | bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) { |
264 | // Check mask includes map |
265 | VerifyClosure blk(this); |
266 | iterate_oop(&blk); |
267 | if (blk.failed()) return false; |
268 | |
269 | // Check if map is generated correctly |
270 | // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards) |
271 | Log(interpreter, oopmap) logv; |
272 | LogStream st(logv.trace()); |
273 | |
274 | st.print("Locals (%d): " , max_locals); |
275 | for(int i = 0; i < max_locals; i++) { |
276 | bool v1 = is_oop(i) ? true : false; |
277 | bool v2 = vars[i].is_reference() ? true : false; |
278 | assert(v1 == v2, "locals oop mask generation error" ); |
279 | st.print("%d" , v1 ? 1 : 0); |
280 | } |
281 | st.cr(); |
282 | |
283 | st.print("Stack (%d): " , stack_top); |
284 | for(int j = 0; j < stack_top; j++) { |
285 | bool v1 = is_oop(max_locals + j) ? true : false; |
286 | bool v2 = stack[j].is_reference() ? true : false; |
287 | assert(v1 == v2, "stack oop mask generation error" ); |
288 | st.print("%d" , v1 ? 1 : 0); |
289 | } |
290 | st.cr(); |
291 | return true; |
292 | } |
293 | |
294 | void OopMapCacheEntry::allocate_bit_mask() { |
295 | if (mask_size() > small_mask_limit) { |
296 | assert(_bit_mask[0] == 0, "bit mask should be new or just flushed" ); |
297 | _bit_mask[0] = (intptr_t) |
298 | NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass); |
299 | } |
300 | } |
301 | |
302 | void OopMapCacheEntry::deallocate_bit_mask() { |
303 | if (mask_size() > small_mask_limit && _bit_mask[0] != 0) { |
304 | assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]), |
305 | "This bit mask should not be in the resource area" ); |
306 | FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]); |
307 | debug_only(_bit_mask[0] = 0;) |
308 | } |
309 | } |
310 | |
311 | |
312 | void OopMapCacheEntry::fill_for_native(const methodHandle& mh) { |
313 | assert(mh->is_native(), "method must be native method" ); |
314 | set_mask_size(mh->size_of_parameters() * bits_per_entry); |
315 | allocate_bit_mask(); |
316 | // fill mask for parameters |
317 | MaskFillerForNative mf(mh, bit_mask(), mask_size()); |
318 | mf.generate(); |
319 | } |
320 | |
321 | |
322 | void OopMapCacheEntry::fill(const methodHandle& method, int bci) { |
323 | HandleMark hm; |
324 | // Flush entry to deallocate an existing entry |
325 | flush(); |
326 | set_method(method()); |
327 | set_bci(bci); |
328 | if (method->is_native()) { |
329 | // Native method activations have oops only among the parameters and one |
330 | // extra oop following the parameters (the mirror for static native methods). |
331 | fill_for_native(method); |
332 | } else { |
333 | EXCEPTION_MARK; |
334 | OopMapForCacheEntry gen(method, bci, this); |
335 | gen.compute_map(CATCH); |
336 | } |
337 | } |
338 | |
339 | |
340 | void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int stack_top) { |
341 | // compute bit mask size |
342 | int max_locals = method()->max_locals(); |
343 | int n_entries = max_locals + stack_top; |
344 | set_mask_size(n_entries * bits_per_entry); |
345 | allocate_bit_mask(); |
346 | set_expression_stack_size(stack_top); |
347 | |
348 | // compute bits |
349 | int word_index = 0; |
350 | uintptr_t value = 0; |
351 | uintptr_t mask = 1; |
352 | |
353 | CellTypeState* cell = vars; |
354 | for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) { |
355 | // store last word |
356 | if (mask == 0) { |
357 | bit_mask()[word_index++] = value; |
358 | value = 0; |
359 | mask = 1; |
360 | } |
361 | |
362 | // switch to stack when done with locals |
363 | if (entry_index == max_locals) { |
364 | cell = stack; |
365 | } |
366 | |
367 | // set oop bit |
368 | if ( cell->is_reference()) { |
369 | value |= (mask << oop_bit_number ); |
370 | } |
371 | |
372 | // set dead bit |
373 | if (!cell->is_live()) { |
374 | value |= (mask << dead_bit_number); |
375 | assert(!cell->is_reference(), "dead value marked as oop" ); |
376 | } |
377 | } |
378 | |
379 | // make sure last word is stored |
380 | bit_mask()[word_index] = value; |
381 | |
382 | // verify bit mask |
383 | assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified" ); |
384 | } |
385 | |
386 | void OopMapCacheEntry::flush() { |
387 | deallocate_bit_mask(); |
388 | initialize(); |
389 | } |
390 | |
391 | |
392 | // Implementation of OopMapCache |
393 | |
394 | void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) { |
395 | assert(_resource_allocate_bit_mask, |
396 | "Should not resource allocate the _bit_mask" ); |
397 | |
398 | set_method(from->method()); |
399 | set_bci(from->bci()); |
400 | set_mask_size(from->mask_size()); |
401 | set_expression_stack_size(from->expression_stack_size()); |
402 | |
403 | // Is the bit mask contained in the entry? |
404 | if (from->mask_size() <= small_mask_limit) { |
405 | memcpy((void *)_bit_mask, (void *)from->_bit_mask, |
406 | mask_word_size() * BytesPerWord); |
407 | } else { |
408 | // The expectation is that this InterpreterOopMap is a recently created |
409 | // and empty. It is used to get a copy of a cached entry. |
410 | // If the bit mask has a value, it should be in the |
411 | // resource area. |
412 | assert(_bit_mask[0] == 0 || |
413 | Thread::current()->resource_area()->contains((void*)_bit_mask[0]), |
414 | "The bit mask should have been allocated from a resource area" ); |
415 | // Allocate the bit_mask from a Resource area for performance. Allocating |
416 | // from the C heap as is done for OopMapCache has a significant |
417 | // performance impact. |
418 | _bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size()); |
419 | assert(_bit_mask[0] != 0, "bit mask was not allocated" ); |
420 | memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0], |
421 | mask_word_size() * BytesPerWord); |
422 | } |
423 | } |
424 | |
425 | inline unsigned int OopMapCache::hash_value_for(const methodHandle& method, int bci) const { |
426 | // We use method->code_size() rather than method->identity_hash() below since |
427 | // the mark may not be present if a pointer to the method is already reversed. |
428 | return ((unsigned int) bci) |
429 | ^ ((unsigned int) method->max_locals() << 2) |
430 | ^ ((unsigned int) method->code_size() << 4) |
431 | ^ ((unsigned int) method->size_of_parameters() << 6); |
432 | } |
433 | |
434 | OopMapCacheEntry* volatile OopMapCache::_old_entries = NULL; |
435 | |
436 | OopMapCache::OopMapCache() { |
437 | _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry*, _size, mtClass); |
438 | for(int i = 0; i < _size; i++) _array[i] = NULL; |
439 | } |
440 | |
441 | |
442 | OopMapCache::~OopMapCache() { |
443 | assert(_array != NULL, "sanity check" ); |
444 | // Deallocate oop maps that are allocated out-of-line |
445 | flush(); |
446 | // Deallocate array |
447 | FREE_C_HEAP_ARRAY(OopMapCacheEntry*, _array); |
448 | } |
449 | |
450 | OopMapCacheEntry* OopMapCache::entry_at(int i) const { |
451 | return OrderAccess::load_acquire(&(_array[i % _size])); |
452 | } |
453 | |
454 | bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) { |
455 | return Atomic::cmpxchg(entry, &_array[i % _size], old) == old; |
456 | } |
457 | |
458 | void OopMapCache::flush() { |
459 | for (int i = 0; i < _size; i++) { |
460 | OopMapCacheEntry* entry = _array[i]; |
461 | if (entry != NULL) { |
462 | _array[i] = NULL; // no barrier, only called in OopMapCache destructor |
463 | entry->flush(); |
464 | FREE_C_HEAP_OBJ(entry); |
465 | } |
466 | } |
467 | } |
468 | |
469 | void OopMapCache::flush_obsolete_entries() { |
470 | assert(SafepointSynchronize::is_at_safepoint(), "called by RedefineClasses in a safepoint" ); |
471 | for (int i = 0; i < _size; i++) { |
472 | OopMapCacheEntry* entry = _array[i]; |
473 | if (entry != NULL && !entry->is_empty() && entry->method()->is_old()) { |
474 | // Cache entry is occupied by an old redefined method and we don't want |
475 | // to pin it down so flush the entry. |
476 | if (log_is_enabled(Debug, redefine, class, oopmap)) { |
477 | ResourceMark rm; |
478 | log_debug(redefine, class, interpreter, oopmap) |
479 | ("flush: %s(%s): cached entry @%d" , |
480 | entry->method()->name()->as_C_string(), entry->method()->signature()->as_C_string(), i); |
481 | } |
482 | _array[i] = NULL; |
483 | entry->flush(); |
484 | FREE_C_HEAP_OBJ(entry); |
485 | } |
486 | } |
487 | } |
488 | |
489 | // Called by GC for thread root scan during a safepoint only. The other interpreted frame oopmaps |
490 | // are generated locally and not cached. |
491 | void OopMapCache::lookup(const methodHandle& method, |
492 | int bci, |
493 | InterpreterOopMap* entry_for) { |
494 | assert(SafepointSynchronize::is_at_safepoint(), "called by GC in a safepoint" ); |
495 | int probe = hash_value_for(method, bci); |
496 | int i; |
497 | OopMapCacheEntry* entry = NULL; |
498 | |
499 | if (log_is_enabled(Debug, interpreter, oopmap)) { |
500 | static int count = 0; |
501 | ResourceMark rm; |
502 | log_debug(interpreter, oopmap) |
503 | ("%d - Computing oopmap at bci %d for %s at hash %d" , ++count, bci, |
504 | method()->name_and_sig_as_C_string(), probe); |
505 | } |
506 | |
507 | // Search hashtable for match |
508 | for(i = 0; i < _probe_depth; i++) { |
509 | entry = entry_at(probe + i); |
510 | if (entry != NULL && !entry->is_empty() && entry->match(method, bci)) { |
511 | entry_for->resource_copy(entry); |
512 | assert(!entry_for->is_empty(), "A non-empty oop map should be returned" ); |
513 | log_debug(interpreter, oopmap)("- found at hash %d" , probe + i); |
514 | return; |
515 | } |
516 | } |
517 | |
518 | // Entry is not in hashtable. |
519 | // Compute entry |
520 | |
521 | OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass); |
522 | tmp->initialize(); |
523 | tmp->fill(method, bci); |
524 | entry_for->resource_copy(tmp); |
525 | |
526 | if (method->should_not_be_cached()) { |
527 | // It is either not safe or not a good idea to cache this Method* |
528 | // at this time. We give the caller of lookup() a copy of the |
529 | // interesting info via parameter entry_for, but we don't add it to |
530 | // the cache. See the gory details in Method*.cpp. |
531 | FREE_C_HEAP_OBJ(tmp); |
532 | return; |
533 | } |
534 | |
535 | // First search for an empty slot |
536 | for(i = 0; i < _probe_depth; i++) { |
537 | entry = entry_at(probe + i); |
538 | if (entry == NULL) { |
539 | if (put_at(probe + i, tmp, NULL)) { |
540 | assert(!entry_for->is_empty(), "A non-empty oop map should be returned" ); |
541 | return; |
542 | } |
543 | } |
544 | } |
545 | |
546 | log_debug(interpreter, oopmap)("*** collision in oopmap cache - flushing item ***" ); |
547 | |
548 | // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm |
549 | // where the first entry in the collision array is replaced with the new one. |
550 | OopMapCacheEntry* old = entry_at(probe + 0); |
551 | if (put_at(probe + 0, tmp, old)) { |
552 | enqueue_for_cleanup(old); |
553 | } else { |
554 | enqueue_for_cleanup(tmp); |
555 | } |
556 | |
557 | assert(!entry_for->is_empty(), "A non-empty oop map should be returned" ); |
558 | return; |
559 | } |
560 | |
561 | void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) { |
562 | bool success = false; |
563 | OopMapCacheEntry* head; |
564 | do { |
565 | head = _old_entries; |
566 | entry->_next = head; |
567 | success = Atomic::cmpxchg(entry, &_old_entries, head) == head; |
568 | } while (!success); |
569 | |
570 | if (log_is_enabled(Debug, interpreter, oopmap)) { |
571 | ResourceMark rm; |
572 | log_debug(interpreter, oopmap)("enqueue %s at bci %d for cleanup" , |
573 | entry->method()->name_and_sig_as_C_string(), entry->bci()); |
574 | } |
575 | } |
576 | |
577 | // This is called after GC threads are done and nothing is accessing the old_entries |
578 | // list, so no synchronization needed. |
579 | void OopMapCache::cleanup_old_entries() { |
580 | OopMapCacheEntry* entry = _old_entries; |
581 | _old_entries = NULL; |
582 | while (entry != NULL) { |
583 | if (log_is_enabled(Debug, interpreter, oopmap)) { |
584 | ResourceMark rm; |
585 | log_debug(interpreter, oopmap)("cleanup entry %s at bci %d" , |
586 | entry->method()->name_and_sig_as_C_string(), entry->bci()); |
587 | } |
588 | OopMapCacheEntry* next = entry->_next; |
589 | entry->flush(); |
590 | FREE_C_HEAP_OBJ(entry); |
591 | entry = next; |
592 | } |
593 | } |
594 | |
595 | void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) { |
596 | // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack |
597 | OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1, mtClass); |
598 | tmp->initialize(); |
599 | tmp->fill(method, bci); |
600 | entry->resource_copy(tmp); |
601 | FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp); |
602 | } |
603 | |