1 | /* |
2 | * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "code/codeBlob.hpp" |
27 | #include "code/codeCache.hpp" |
28 | #include "code/nmethod.hpp" |
29 | #include "code/scopeDesc.hpp" |
30 | #include "compiler/oopMap.hpp" |
31 | #include "gc/shared/collectedHeap.hpp" |
32 | #include "memory/allocation.inline.hpp" |
33 | #include "memory/iterator.hpp" |
34 | #include "memory/resourceArea.hpp" |
35 | #include "memory/universe.hpp" |
36 | #include "oops/compressedOops.hpp" |
37 | #include "runtime/frame.inline.hpp" |
38 | #include "runtime/handles.inline.hpp" |
39 | #include "runtime/signature.hpp" |
40 | #include "utilities/align.hpp" |
41 | #include "utilities/lockFreeStack.hpp" |
42 | #ifdef COMPILER1 |
43 | #include "c1/c1_Defs.hpp" |
44 | #endif |
45 | #ifdef COMPILER2 |
46 | #include "opto/optoreg.hpp" |
47 | #endif |
48 | |
49 | // OopMapStream |
50 | |
51 | OopMapStream::OopMapStream(OopMap* oop_map, int oop_types_mask) { |
52 | _stream = new CompressedReadStream(oop_map->write_stream()->buffer()); |
53 | _mask = oop_types_mask; |
54 | _size = oop_map->omv_count(); |
55 | _position = 0; |
56 | _valid_omv = false; |
57 | } |
58 | |
59 | OopMapStream::OopMapStream(const ImmutableOopMap* oop_map, int oop_types_mask) { |
60 | _stream = new CompressedReadStream(oop_map->data_addr()); |
61 | _mask = oop_types_mask; |
62 | _size = oop_map->count(); |
63 | _position = 0; |
64 | _valid_omv = false; |
65 | } |
66 | |
67 | void OopMapStream::find_next() { |
68 | while(_position++ < _size) { |
69 | _omv.read_from(_stream); |
70 | if(((int)_omv.type() & _mask) > 0) { |
71 | _valid_omv = true; |
72 | return; |
73 | } |
74 | } |
75 | _valid_omv = false; |
76 | } |
77 | |
78 | |
79 | // OopMap |
80 | |
81 | // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd |
82 | // slots to hold 4-byte values like ints and floats in the LP64 build. |
83 | OopMap::OopMap(int frame_size, int arg_count) { |
84 | // OopMaps are usually quite so small, so pick a small initial size |
85 | set_write_stream(new CompressedWriteStream(32)); |
86 | set_omv_count(0); |
87 | |
88 | #ifdef ASSERT |
89 | _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count; |
90 | _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length); |
91 | for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value; |
92 | #endif |
93 | } |
94 | |
95 | |
96 | OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) { |
97 | // This constructor does a deep copy |
98 | // of the source OopMap. |
99 | set_write_stream(new CompressedWriteStream(source->omv_count() * 2)); |
100 | set_omv_count(0); |
101 | set_offset(source->offset()); |
102 | |
103 | #ifdef ASSERT |
104 | _locs_length = source->_locs_length; |
105 | _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length); |
106 | for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value; |
107 | #endif |
108 | |
109 | // We need to copy the entries too. |
110 | for (OopMapStream oms(source); !oms.is_done(); oms.next()) { |
111 | OopMapValue omv = oms.current(); |
112 | omv.write_on(write_stream()); |
113 | increment_count(); |
114 | } |
115 | } |
116 | |
117 | |
118 | OopMap* OopMap::deep_copy() { |
119 | return new OopMap(_deep_copy_token, this); |
120 | } |
121 | |
122 | void OopMap::copy_data_to(address addr) const { |
123 | memcpy(addr, write_stream()->buffer(), write_stream()->position()); |
124 | } |
125 | |
126 | int OopMap::heap_size() const { |
127 | int size = sizeof(OopMap); |
128 | int align = sizeof(void *) - 1; |
129 | size += write_stream()->position(); |
130 | // Align to a reasonable ending point |
131 | size = ((size+align) & ~align); |
132 | return size; |
133 | } |
134 | |
135 | // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd |
136 | // slots to hold 4-byte values like ints and floats in the LP64 build. |
137 | void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) { |
138 | |
139 | assert(reg->value() < _locs_length, "too big reg value for stack size" ); |
140 | assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" ); |
141 | debug_only( _locs_used[reg->value()] = x; ) |
142 | |
143 | OopMapValue o(reg, x); |
144 | |
145 | if(x == OopMapValue::callee_saved_value) { |
146 | // This can never be a stack location, so we don't need to transform it. |
147 | assert(optional->is_reg(), "Trying to callee save a stack location" ); |
148 | o.set_content_reg(optional); |
149 | } else if(x == OopMapValue::derived_oop_value) { |
150 | o.set_content_reg(optional); |
151 | } |
152 | |
153 | o.write_on(write_stream()); |
154 | increment_count(); |
155 | } |
156 | |
157 | |
158 | void OopMap::set_oop(VMReg reg) { |
159 | set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad()); |
160 | } |
161 | |
162 | |
163 | void OopMap::set_value(VMReg reg) { |
164 | // At this time, we don't need value entries in our OopMap. |
165 | } |
166 | |
167 | |
168 | void OopMap::set_narrowoop(VMReg reg) { |
169 | set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad()); |
170 | } |
171 | |
172 | |
173 | void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) { |
174 | set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register); |
175 | } |
176 | |
177 | |
178 | void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) { |
179 | if( reg == derived_from_local_register ) { |
180 | // Actually an oop, derived shares storage with base, |
181 | set_oop(reg); |
182 | } else { |
183 | set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register); |
184 | } |
185 | } |
186 | |
187 | // OopMapSet |
188 | |
189 | OopMapSet::OopMapSet() { |
190 | set_om_size(MinOopMapAllocation); |
191 | set_om_count(0); |
192 | OopMap** temp = NEW_RESOURCE_ARRAY(OopMap*, om_size()); |
193 | set_om_data(temp); |
194 | } |
195 | |
196 | |
197 | void OopMapSet::grow_om_data() { |
198 | int new_size = om_size() * 2; |
199 | OopMap** new_data = NEW_RESOURCE_ARRAY(OopMap*, new_size); |
200 | memcpy(new_data,om_data(),om_size() * sizeof(OopMap*)); |
201 | set_om_size(new_size); |
202 | set_om_data(new_data); |
203 | } |
204 | |
205 | void OopMapSet::add_gc_map(int pc_offset, OopMap *map ) { |
206 | assert(om_size() != -1,"Cannot grow a fixed OopMapSet" ); |
207 | |
208 | if(om_count() >= om_size()) { |
209 | grow_om_data(); |
210 | } |
211 | map->set_offset(pc_offset); |
212 | |
213 | #ifdef ASSERT |
214 | if(om_count() > 0) { |
215 | OopMap* last = at(om_count()-1); |
216 | if (last->offset() == map->offset() ) { |
217 | fatal("OopMap inserted twice" ); |
218 | } |
219 | if(last->offset() > map->offset()) { |
220 | tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d" , |
221 | om_count(),last->offset(),om_count()+1,map->offset()); |
222 | } |
223 | } |
224 | #endif // ASSERT |
225 | |
226 | set(om_count(),map); |
227 | increment_count(); |
228 | } |
229 | |
230 | |
231 | int OopMapSet::heap_size() const { |
232 | // The space we use |
233 | int size = sizeof(OopMap); |
234 | int align = sizeof(void *) - 1; |
235 | size = ((size+align) & ~align); |
236 | size += om_count() * sizeof(OopMap*); |
237 | |
238 | // Now add in the space needed for the indivdiual OopMaps |
239 | for(int i=0; i < om_count(); i++) { |
240 | size += at(i)->heap_size(); |
241 | } |
242 | // We don't need to align this, it will be naturally pointer aligned |
243 | return size; |
244 | } |
245 | |
246 | |
247 | OopMap* OopMapSet::singular_oop_map() { |
248 | guarantee(om_count() == 1, "Make sure we only have a single gc point" ); |
249 | return at(0); |
250 | } |
251 | |
252 | |
253 | OopMap* OopMapSet::find_map_at_offset(int pc_offset) const { |
254 | int i, len = om_count(); |
255 | assert( len > 0, "must have pointer maps" ); |
256 | |
257 | // Scan through oopmaps. Stop when current offset is either equal or greater |
258 | // than the one we are looking for. |
259 | for( i = 0; i < len; i++) { |
260 | if( at(i)->offset() >= pc_offset ) |
261 | break; |
262 | } |
263 | |
264 | assert( i < len, "oopmap not found" ); |
265 | |
266 | OopMap* m = at(i); |
267 | assert( m->offset() == pc_offset, "oopmap not found" ); |
268 | return m; |
269 | } |
270 | |
271 | static void add_derived_oop(oop* base, oop* derived) { |
272 | #if !defined(TIERED) && !INCLUDE_JVMCI |
273 | COMPILER1_PRESENT(ShouldNotReachHere();) |
274 | #endif // !defined(TIERED) && !INCLUDE_JVMCI |
275 | #if COMPILER2_OR_JVMCI |
276 | DerivedPointerTable::add(derived, base); |
277 | #endif // COMPILER2_OR_JVMCI |
278 | } |
279 | |
280 | |
281 | #ifndef PRODUCT |
282 | static void trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) { |
283 | // Print oopmap and regmap |
284 | tty->print_cr("------ " ); |
285 | CodeBlob* cb = fr->cb(); |
286 | const ImmutableOopMapSet* maps = cb->oop_maps(); |
287 | const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc()); |
288 | map->print(); |
289 | if( cb->is_nmethod() ) { |
290 | nmethod* nm = (nmethod*)cb; |
291 | // native wrappers have no scope data, it is implied |
292 | if (nm->is_native_method()) { |
293 | tty->print("bci: 0 (native)" ); |
294 | } else { |
295 | ScopeDesc* scope = nm->scope_desc_at(fr->pc()); |
296 | tty->print("bci: %d " ,scope->bci()); |
297 | } |
298 | } |
299 | tty->cr(); |
300 | fr->print_on(tty); |
301 | tty->print(" " ); |
302 | cb->print_value_on(tty); tty->cr(); |
303 | reg_map->print(); |
304 | tty->print_cr("------ " ); |
305 | |
306 | } |
307 | #endif // PRODUCT |
308 | |
309 | void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f) { |
310 | // add derived oops to a table |
311 | all_do(fr, reg_map, f, add_derived_oop, &do_nothing_cl); |
312 | } |
313 | |
314 | |
315 | void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map, |
316 | OopClosure* oop_fn, void derived_oop_fn(oop*, oop*), |
317 | OopClosure* value_fn) { |
318 | CodeBlob* cb = fr->cb(); |
319 | assert(cb != NULL, "no codeblob" ); |
320 | |
321 | NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);) |
322 | |
323 | const ImmutableOopMapSet* maps = cb->oop_maps(); |
324 | const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc()); |
325 | assert(map != NULL, "no ptr map found" ); |
326 | |
327 | // handle derived pointers first (otherwise base pointer may be |
328 | // changed before derived pointer offset has been collected) |
329 | OopMapValue omv; |
330 | { |
331 | OopMapStream oms(map,OopMapValue::derived_oop_value); |
332 | if (!oms.is_done()) { |
333 | #ifndef TIERED |
334 | COMPILER1_PRESENT(ShouldNotReachHere();) |
335 | #if INCLUDE_JVMCI |
336 | if (UseJVMCICompiler) { |
337 | ShouldNotReachHere(); |
338 | } |
339 | #endif |
340 | #endif // !TIERED |
341 | do { |
342 | omv = oms.current(); |
343 | oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map); |
344 | guarantee(loc != NULL, "missing saved register" ); |
345 | oop *derived_loc = loc; |
346 | oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map); |
347 | // Ignore NULL oops and decoded NULL narrow oops which |
348 | // equal to CompressedOops::base() when a narrow oop |
349 | // implicit null check is used in compiled code. |
350 | // The narrow_oop_base could be NULL or be the address |
351 | // of the page below heap depending on compressed oops mode. |
352 | if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) { |
353 | derived_oop_fn(base_loc, derived_loc); |
354 | } |
355 | oms.next(); |
356 | } while (!oms.is_done()); |
357 | } |
358 | } |
359 | |
360 | // We want coop and oop oop_types |
361 | int mask = OopMapValue::oop_value | OopMapValue::narrowoop_value; |
362 | { |
363 | for (OopMapStream oms(map,mask); !oms.is_done(); oms.next()) { |
364 | omv = oms.current(); |
365 | oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map); |
366 | // It should be an error if no location can be found for a |
367 | // register mentioned as contained an oop of some kind. Maybe |
368 | // this was allowed previously because value_value items might |
369 | // be missing? |
370 | guarantee(loc != NULL, "missing saved register" ); |
371 | if ( omv.type() == OopMapValue::oop_value ) { |
372 | oop val = *loc; |
373 | if (val == NULL || CompressedOops::is_base(val)) { |
374 | // Ignore NULL oops and decoded NULL narrow oops which |
375 | // equal to CompressedOops::base() when a narrow oop |
376 | // implicit null check is used in compiled code. |
377 | // The narrow_oop_base could be NULL or be the address |
378 | // of the page below heap depending on compressed oops mode. |
379 | continue; |
380 | } |
381 | #ifdef ASSERT |
382 | if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) || |
383 | !Universe::heap()->is_in_or_null(*loc)) { |
384 | tty->print_cr("# Found non oop pointer. Dumping state at failure" ); |
385 | // try to dump out some helpful debugging information |
386 | trace_codeblob_maps(fr, reg_map); |
387 | omv.print(); |
388 | tty->print_cr("register r" ); |
389 | omv.reg()->print(); |
390 | tty->print_cr("loc = %p *loc = %p\n" , loc, (address)*loc); |
391 | // do the real assert. |
392 | assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer" ); |
393 | } |
394 | #endif // ASSERT |
395 | oop_fn->do_oop(loc); |
396 | } else if ( omv.type() == OopMapValue::narrowoop_value ) { |
397 | narrowOop *nl = (narrowOop*)loc; |
398 | #ifndef VM_LITTLE_ENDIAN |
399 | VMReg vmReg = omv.reg(); |
400 | // Don't do this on SPARC float registers as they can be individually addressed |
401 | if (!vmReg->is_stack() SPARC_ONLY(&& !vmReg->is_FloatRegister())) { |
402 | // compressed oops in registers only take up 4 bytes of an |
403 | // 8 byte register but they are in the wrong part of the |
404 | // word so adjust loc to point at the right place. |
405 | nl = (narrowOop*)((address)nl + 4); |
406 | } |
407 | #endif |
408 | oop_fn->do_oop(nl); |
409 | } |
410 | } |
411 | } |
412 | } |
413 | |
414 | |
415 | // Update callee-saved register info for the following frame |
416 | void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) { |
417 | ResourceMark rm; |
418 | CodeBlob* cb = fr->cb(); |
419 | assert(cb != NULL, "no codeblob" ); |
420 | |
421 | // Any reg might be saved by a safepoint handler (see generate_handler_blob). |
422 | assert( reg_map->_update_for_id == NULL || fr->is_older(reg_map->_update_for_id), |
423 | "already updated this map; do not 'update' it twice!" ); |
424 | debug_only(reg_map->_update_for_id = fr->id()); |
425 | |
426 | // Check if caller must update oop argument |
427 | assert((reg_map->include_argument_oops() || |
428 | !cb->caller_must_gc_arguments(reg_map->thread())), |
429 | "include_argument_oops should already be set" ); |
430 | |
431 | // Scan through oopmap and find location of all callee-saved registers |
432 | // (we do not do update in place, since info could be overwritten) |
433 | |
434 | address pc = fr->pc(); |
435 | const ImmutableOopMap* map = cb->oop_map_for_return_address(pc); |
436 | assert(map != NULL, "no ptr map found" ); |
437 | DEBUG_ONLY(int nof_callee = 0;) |
438 | |
439 | for (OopMapStream oms(map, OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) { |
440 | OopMapValue omv = oms.current(); |
441 | VMReg reg = omv.content_reg(); |
442 | oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map); |
443 | reg_map->set_location(reg, (address) loc); |
444 | DEBUG_ONLY(nof_callee++;) |
445 | } |
446 | |
447 | // Check that runtime stubs save all callee-saved registers |
448 | #ifdef COMPILER2 |
449 | assert(cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() || |
450 | (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT), |
451 | "must save all" ); |
452 | #endif // COMPILER2 |
453 | } |
454 | |
455 | //============================================================================= |
456 | // Non-Product code |
457 | |
458 | #ifndef PRODUCT |
459 | |
460 | bool ImmutableOopMap::has_derived_pointer() const { |
461 | #if !defined(TIERED) && !INCLUDE_JVMCI |
462 | COMPILER1_PRESENT(return false); |
463 | #endif // !TIERED |
464 | #if COMPILER2_OR_JVMCI |
465 | OopMapStream oms(this,OopMapValue::derived_oop_value); |
466 | return oms.is_done(); |
467 | #else |
468 | return false; |
469 | #endif // COMPILER2_OR_JVMCI |
470 | } |
471 | |
472 | #endif //PRODUCT |
473 | |
474 | // Printing code is present in product build for -XX:+PrintAssembly. |
475 | |
476 | static |
477 | void print_register_type(OopMapValue::oop_types x, VMReg optional, |
478 | outputStream* st) { |
479 | switch( x ) { |
480 | case OopMapValue::oop_value: |
481 | st->print("Oop" ); |
482 | break; |
483 | case OopMapValue::narrowoop_value: |
484 | st->print("NarrowOop" ); |
485 | break; |
486 | case OopMapValue::callee_saved_value: |
487 | st->print("Callers_" ); |
488 | optional->print_on(st); |
489 | break; |
490 | case OopMapValue::derived_oop_value: |
491 | st->print("Derived_oop_" ); |
492 | optional->print_on(st); |
493 | break; |
494 | default: |
495 | ShouldNotReachHere(); |
496 | } |
497 | } |
498 | |
499 | void OopMapValue::print_on(outputStream* st) const { |
500 | reg()->print_on(st); |
501 | st->print("=" ); |
502 | print_register_type(type(),content_reg(),st); |
503 | st->print(" " ); |
504 | } |
505 | |
506 | void OopMapValue::print() const { print_on(tty); } |
507 | |
508 | void ImmutableOopMap::print_on(outputStream* st) const { |
509 | OopMapValue omv; |
510 | st->print("ImmutableOopMap {" ); |
511 | for(OopMapStream oms(this); !oms.is_done(); oms.next()) { |
512 | omv = oms.current(); |
513 | omv.print_on(st); |
514 | } |
515 | st->print("}" ); |
516 | } |
517 | |
518 | void ImmutableOopMap::print() const { print_on(tty); } |
519 | |
520 | void OopMap::print_on(outputStream* st) const { |
521 | OopMapValue omv; |
522 | st->print("OopMap {" ); |
523 | for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) { |
524 | omv = oms.current(); |
525 | omv.print_on(st); |
526 | } |
527 | // Print hex offset in addition. |
528 | st->print("off=%d/0x%x}" , (int) offset(), (int) offset()); |
529 | } |
530 | |
531 | void OopMap::print() const { print_on(tty); } |
532 | |
533 | void ImmutableOopMapSet::print_on(outputStream* st) const { |
534 | const ImmutableOopMap* last = NULL; |
535 | const int len = count(); |
536 | |
537 | st->print_cr("ImmutableOopMapSet contains %d OopMaps" , len); |
538 | |
539 | for (int i = 0; i < len; i++) { |
540 | const ImmutableOopMapPair* pair = pair_at(i); |
541 | const ImmutableOopMap* map = pair->get_from(this); |
542 | if (map != last) { |
543 | st->cr(); |
544 | map->print_on(st); |
545 | st->print(" pc offsets: " ); |
546 | } |
547 | last = map; |
548 | st->print("%d " , pair->pc_offset()); |
549 | } |
550 | st->cr(); |
551 | } |
552 | |
553 | void ImmutableOopMapSet::print() const { print_on(tty); } |
554 | |
555 | void OopMapSet::print_on(outputStream* st) const { |
556 | const int len = om_count(); |
557 | |
558 | st->print_cr("OopMapSet contains %d OopMaps" , len); |
559 | |
560 | for( int i = 0; i < len; i++) { |
561 | OopMap* m = at(i); |
562 | st->print_cr("#%d " ,i); |
563 | m->print_on(st); |
564 | st->cr(); |
565 | } |
566 | st->cr(); |
567 | } |
568 | |
569 | void OopMapSet::print() const { print_on(tty); } |
570 | |
571 | bool OopMap::equals(const OopMap* other) const { |
572 | if (other->_omv_count != _omv_count) { |
573 | return false; |
574 | } |
575 | if (other->write_stream()->position() != write_stream()->position()) { |
576 | return false; |
577 | } |
578 | if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) { |
579 | return false; |
580 | } |
581 | return true; |
582 | } |
583 | |
584 | const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const { |
585 | ImmutableOopMapPair* pairs = get_pairs(); |
586 | ImmutableOopMapPair* last = NULL; |
587 | |
588 | for (int i = 0; i < _count; ++i) { |
589 | if (pairs[i].pc_offset() >= pc_offset) { |
590 | last = &pairs[i]; |
591 | break; |
592 | } |
593 | } |
594 | |
595 | // Heal Coverity issue: potential index out of bounds access. |
596 | guarantee(last != NULL, "last may not be null" ); |
597 | assert(last->pc_offset() == pc_offset, "oopmap not found" ); |
598 | return last->get_from(this); |
599 | } |
600 | |
601 | const ImmutableOopMap* ImmutableOopMapPair::get_from(const ImmutableOopMapSet* set) const { |
602 | return set->oopmap_at_offset(_oopmap_offset); |
603 | } |
604 | |
605 | ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap) : _count(oopmap->count()) { |
606 | address addr = data_addr(); |
607 | oopmap->copy_data_to(addr); |
608 | } |
609 | |
610 | #ifdef ASSERT |
611 | int ImmutableOopMap::nr_of_bytes() const { |
612 | OopMapStream oms(this); |
613 | |
614 | while (!oms.is_done()) { |
615 | oms.next(); |
616 | } |
617 | return sizeof(ImmutableOopMap) + oms.stream_position(); |
618 | } |
619 | #endif |
620 | |
621 | ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(NULL) { |
622 | _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size()); |
623 | } |
624 | |
625 | int ImmutableOopMapBuilder::size_for(const OopMap* map) const { |
626 | return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8); |
627 | } |
628 | |
629 | int ImmutableOopMapBuilder::heap_size() { |
630 | int base = sizeof(ImmutableOopMapSet); |
631 | base = align_up(base, 8); |
632 | |
633 | // all of ours pc / offset pairs |
634 | int pairs = _set->size() * sizeof(ImmutableOopMapPair); |
635 | pairs = align_up(pairs, 8); |
636 | |
637 | for (int i = 0; i < _set->size(); ++i) { |
638 | int size = 0; |
639 | OopMap* map = _set->at(i); |
640 | |
641 | if (is_empty(map)) { |
642 | /* only keep a single empty map in the set */ |
643 | if (has_empty()) { |
644 | _mapping[i].set(Mapping::OOPMAP_EMPTY, _empty_offset, 0, map, _empty); |
645 | } else { |
646 | _empty_offset = _offset; |
647 | _empty = map; |
648 | size = size_for(map); |
649 | _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map); |
650 | } |
651 | } else if (is_last_duplicate(map)) { |
652 | /* if this entry is identical to the previous one, just point it there */ |
653 | _mapping[i].set(Mapping::OOPMAP_DUPLICATE, _last_offset, 0, map, _last); |
654 | } else { |
655 | /* not empty, not an identical copy of the previous entry */ |
656 | size = size_for(map); |
657 | _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map); |
658 | _last_offset = _offset; |
659 | _last = map; |
660 | } |
661 | |
662 | assert(_mapping[i]._map == map, "check" ); |
663 | _offset += size; |
664 | } |
665 | |
666 | int total = base + pairs + _offset; |
667 | DEBUG_ONLY(total += 8); |
668 | _required = total; |
669 | return total; |
670 | } |
671 | |
672 | void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) { |
673 | assert(offset < set->nr_of_bytes(), "check" ); |
674 | new ((address) pair) ImmutableOopMapPair(map->offset(), offset); |
675 | } |
676 | |
677 | int ImmutableOopMapBuilder::fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) { |
678 | fill_pair(pair, map, offset, set); |
679 | address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap |
680 | |
681 | new (addr) ImmutableOopMap(map); |
682 | return size_for(map); |
683 | } |
684 | |
685 | void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) { |
686 | ImmutableOopMapPair* pairs = set->get_pairs(); |
687 | |
688 | for (int i = 0; i < set->count(); ++i) { |
689 | const OopMap* map = _mapping[i]._map; |
690 | ImmutableOopMapPair* pair = NULL; |
691 | int size = 0; |
692 | |
693 | if (_mapping[i]._kind == Mapping::OOPMAP_NEW) { |
694 | size = fill_map(&pairs[i], map, _mapping[i]._offset, set); |
695 | } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) { |
696 | fill_pair(&pairs[i], map, _mapping[i]._offset, set); |
697 | } |
698 | |
699 | const ImmutableOopMap* nv = set->find_map_at_offset(map->offset()); |
700 | assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity" ); |
701 | } |
702 | } |
703 | |
704 | #ifdef ASSERT |
705 | void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) { |
706 | for (int i = 0; i < 8; ++i) { |
707 | assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check" ); |
708 | } |
709 | |
710 | for (int i = 0; i < set->count(); ++i) { |
711 | const ImmutableOopMapPair* pair = set->pair_at(i); |
712 | assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size" ); |
713 | const ImmutableOopMap* map = pair->get_from(set); |
714 | int nr_of_bytes = map->nr_of_bytes(); |
715 | assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size" ); |
716 | } |
717 | } |
718 | #endif |
719 | |
720 | ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) { |
721 | DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8)); |
722 | |
723 | _new_set = new (buffer) ImmutableOopMapSet(_set, _required); |
724 | fill(_new_set, _required); |
725 | |
726 | DEBUG_ONLY(verify(buffer, _required, _new_set)); |
727 | |
728 | return _new_set; |
729 | } |
730 | |
731 | ImmutableOopMapSet* ImmutableOopMapBuilder::build() { |
732 | _required = heap_size(); |
733 | |
734 | // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps |
735 | address buffer = (address) NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode); |
736 | return generate_into(buffer); |
737 | } |
738 | |
739 | ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) { |
740 | ResourceMark mark; |
741 | ImmutableOopMapBuilder builder(oopmap_set); |
742 | return builder.build(); |
743 | } |
744 | |
745 | |
746 | //------------------------------DerivedPointerTable--------------------------- |
747 | |
748 | #if COMPILER2_OR_JVMCI |
749 | |
750 | class DerivedPointerTable::Entry : public CHeapObj<mtCompiler> { |
751 | oop* _location; // Location of derived pointer, also pointing to base |
752 | intptr_t _offset; // Offset from base pointer |
753 | Entry* volatile _next; |
754 | |
755 | static Entry* volatile* next_ptr(Entry& entry) { return &entry._next; } |
756 | |
757 | public: |
758 | Entry(oop* location, intptr_t offset) : |
759 | _location(location), _offset(offset), _next(NULL) {} |
760 | |
761 | oop* location() const { return _location; } |
762 | intptr_t offset() const { return _offset; } |
763 | Entry* next() const { return _next; } |
764 | |
765 | typedef LockFreeStack<Entry, &next_ptr> List; |
766 | static List* _list; |
767 | }; |
768 | |
769 | DerivedPointerTable::Entry::List* DerivedPointerTable::Entry::_list = NULL; |
770 | bool DerivedPointerTable::_active = false; |
771 | |
772 | bool DerivedPointerTable::is_empty() { |
773 | return Entry::_list == NULL || Entry::_list->empty(); |
774 | } |
775 | |
776 | void DerivedPointerTable::clear() { |
777 | // The first time, we create the list. Otherwise it should be |
778 | // empty. If not, then we have probably forgotton to call |
779 | // update_pointers after last GC/Scavenge. |
780 | assert (!_active, "should not be active" ); |
781 | assert(is_empty(), "table not empty" ); |
782 | if (Entry::_list == NULL) { |
783 | void* mem = NEW_C_HEAP_OBJ(Entry::List, mtCompiler); |
784 | Entry::_list = ::new (mem) Entry::List(); |
785 | } |
786 | _active = true; |
787 | } |
788 | |
789 | // Returns value of location as an int |
790 | inline intptr_t value_of_loc(oop *pointer) { |
791 | return cast_from_oop<intptr_t>((*pointer)); |
792 | } |
793 | |
794 | void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) { |
795 | assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop" ); |
796 | assert(derived_loc != base_loc, "Base and derived in same location" ); |
797 | if (_active) { |
798 | assert(*derived_loc != (void*)base_loc, "location already added" ); |
799 | assert(Entry::_list != NULL, "list must exist" ); |
800 | intptr_t offset = value_of_loc(derived_loc) - value_of_loc(base_loc); |
801 | // This assert is invalid because derived pointers can be |
802 | // arbitrarily far away from their base. |
803 | // assert(offset >= -1000000, "wrong derived pointer info"); |
804 | |
805 | if (TraceDerivedPointers) { |
806 | tty->print_cr( |
807 | "Add derived pointer@" INTPTR_FORMAT |
808 | " - Derived: " INTPTR_FORMAT |
809 | " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")" , |
810 | p2i(derived_loc), p2i((address)*derived_loc), p2i((address)*base_loc), p2i(base_loc), offset |
811 | ); |
812 | } |
813 | // Set derived oop location to point to base. |
814 | *derived_loc = (oop)base_loc; |
815 | Entry* entry = new Entry(derived_loc, offset); |
816 | Entry::_list->push(*entry); |
817 | } |
818 | } |
819 | |
820 | void DerivedPointerTable::update_pointers() { |
821 | assert(Entry::_list != NULL, "list must exist" ); |
822 | Entry* entries = Entry::_list->pop_all(); |
823 | while (entries != NULL) { |
824 | Entry* entry = entries; |
825 | entries = entry->next(); |
826 | oop* derived_loc = entry->location(); |
827 | intptr_t offset = entry->offset(); |
828 | // The derived oop was setup to point to location of base |
829 | oop base = **(oop**)derived_loc; |
830 | assert(Universe::heap()->is_in_or_null(base), "must be an oop" ); |
831 | |
832 | *derived_loc = (oop)(((address)base) + offset); |
833 | assert(value_of_loc(derived_loc) - value_of_loc(&base) == offset, "sanity check" ); |
834 | |
835 | if (TraceDerivedPointers) { |
836 | tty->print_cr("Updating derived pointer@" INTPTR_FORMAT |
837 | " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (Offset: " INTX_FORMAT ")" , |
838 | p2i(derived_loc), p2i((address)*derived_loc), p2i((address)base), offset); |
839 | } |
840 | |
841 | // Delete entry |
842 | delete entry; |
843 | } |
844 | assert(Entry::_list->empty(), "invariant" ); |
845 | _active = false; |
846 | } |
847 | |
848 | #endif // COMPILER2_OR_JVMCI |
849 | |