1/*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/codeBuffer.hpp"
27#include "code/oopRecorder.inline.hpp"
28#include "compiler/disassembler.hpp"
29#include "oops/methodData.hpp"
30#include "oops/oop.inline.hpp"
31#include "runtime/icache.hpp"
32#include "runtime/safepointVerifiers.hpp"
33#include "utilities/align.hpp"
34#include "utilities/copy.hpp"
35#include "utilities/xmlstream.hpp"
36
37// The structure of a CodeSection:
38//
39// _start -> +----------------+
40// | machine code...|
41// _end -> |----------------|
42// | |
43// | (empty) |
44// | |
45// | |
46// +----------------+
47// _limit -> | |
48//
49// _locs_start -> +----------------+
50// |reloc records...|
51// |----------------|
52// _locs_end -> | |
53// | |
54// | (empty) |
55// | |
56// | |
57// +----------------+
58// _locs_limit -> | |
59// The _end (resp. _limit) pointer refers to the first
60// unused (resp. unallocated) byte.
61
62// The structure of the CodeBuffer while code is being accumulated:
63//
64// _total_start -> \
65// _insts._start -> +----------------+
66// | |
67// | Code |
68// | |
69// _stubs._start -> |----------------|
70// | |
71// | Stubs | (also handlers for deopt/exception)
72// | |
73// _consts._start -> |----------------|
74// | |
75// | Constants |
76// | |
77// +----------------+
78// + _total_size -> | |
79//
80// When the code and relocations are copied to the code cache,
81// the empty parts of each section are removed, and everything
82// is copied into contiguous locations.
83
84typedef CodeBuffer::csize_t csize_t; // file-local definition
85
86// External buffer, in a predefined CodeBlob.
87// Important: The code_start must be taken exactly, and not realigned.
88CodeBuffer::CodeBuffer(CodeBlob* blob) {
89 // Provide code buffer with meaningful name
90 initialize_misc(blob->name());
91 initialize(blob->content_begin(), blob->content_size());
92 verify_section_allocation();
93}
94
95void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
96 // Compute maximal alignment.
97 int align = _insts.alignment();
98 // Always allow for empty slop around each section.
99 int slop = (int) CodeSection::end_slop();
100
101 assert(blob() == NULL, "only once");
102 set_blob(BufferBlob::create(_name, code_size + (align+slop) * (SECT_LIMIT+1)));
103 if (blob() == NULL) {
104 // The assembler constructor will throw a fatal on an empty CodeBuffer.
105 return; // caller must test this
106 }
107
108 // Set up various pointers into the blob.
109 initialize(_total_start, _total_size);
110
111 assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned");
112
113 pd_initialize();
114
115 if (locs_size != 0) {
116 _insts.initialize_locs(locs_size / sizeof(relocInfo));
117 }
118
119 verify_section_allocation();
120}
121
122
123CodeBuffer::~CodeBuffer() {
124 verify_section_allocation();
125
126 // If we allocate our code buffer from the CodeCache
127 // via a BufferBlob, and it's not permanent, then
128 // free the BufferBlob.
129 // The rest of the memory will be freed when the ResourceObj
130 // is released.
131 for (CodeBuffer* cb = this; cb != NULL; cb = cb->before_expand()) {
132 // Previous incarnations of this buffer are held live, so that internal
133 // addresses constructed before expansions will not be confused.
134 cb->free_blob();
135 }
136
137 // free any overflow storage
138 delete _overflow_arena;
139
140 // Claim is that stack allocation ensures resources are cleaned up.
141 // This is resource clean up, let's hope that all were properly copied out.
142 free_strings();
143
144#ifdef ASSERT
145 // Save allocation type to execute assert in ~ResourceObj()
146 // which is called after this destructor.
147 assert(_default_oop_recorder.allocated_on_stack(), "should be embedded object");
148 ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type();
149 Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
150 ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at);
151#endif
152}
153
154void CodeBuffer::initialize_oop_recorder(OopRecorder* r) {
155 assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once");
156 DEBUG_ONLY(_default_oop_recorder.freeze()); // force unused OR to be frozen
157 _oop_recorder = r;
158}
159
160void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) {
161 assert(cs != &_insts, "insts is the memory provider, not the consumer");
162 csize_t slop = CodeSection::end_slop(); // margin between sections
163 int align = cs->alignment();
164 assert(is_power_of_2(align), "sanity");
165 address start = _insts._start;
166 address limit = _insts._limit;
167 address middle = limit - size;
168 middle -= (intptr_t)middle & (align-1); // align the division point downward
169 guarantee(middle - slop > start, "need enough space to divide up");
170 _insts._limit = middle - slop; // subtract desired space, plus slop
171 cs->initialize(middle, limit - middle);
172 assert(cs->start() == middle, "sanity");
173 assert(cs->limit() == limit, "sanity");
174 // give it some relocations to start with, if the main section has them
175 if (_insts.has_locs()) cs->initialize_locs(1);
176}
177
178void CodeBuffer::freeze_section(CodeSection* cs) {
179 CodeSection* next_cs = (cs == consts())? NULL: code_section(cs->index()+1);
180 csize_t frozen_size = cs->size();
181 if (next_cs != NULL) {
182 frozen_size = next_cs->align_at_start(frozen_size);
183 }
184 address old_limit = cs->limit();
185 address new_limit = cs->start() + frozen_size;
186 relocInfo* old_locs_limit = cs->locs_limit();
187 relocInfo* new_locs_limit = cs->locs_end();
188 // Patch the limits.
189 cs->_limit = new_limit;
190 cs->_locs_limit = new_locs_limit;
191 cs->_frozen = true;
192 if (next_cs != NULL && !next_cs->is_allocated() && !next_cs->is_frozen()) {
193 // Give remaining buffer space to the following section.
194 next_cs->initialize(new_limit, old_limit - new_limit);
195 next_cs->initialize_shared_locs(new_locs_limit,
196 old_locs_limit - new_locs_limit);
197 }
198}
199
200void CodeBuffer::set_blob(BufferBlob* blob) {
201 _blob = blob;
202 if (blob != NULL) {
203 address start = blob->content_begin();
204 address end = blob->content_end();
205 // Round up the starting address.
206 int align = _insts.alignment();
207 start += (-(intptr_t)start) & (align-1);
208 _total_start = start;
209 _total_size = end - start;
210 } else {
211#ifdef ASSERT
212 // Clean out dangling pointers.
213 _total_start = badAddress;
214 _consts._start = _consts._end = badAddress;
215 _insts._start = _insts._end = badAddress;
216 _stubs._start = _stubs._end = badAddress;
217#endif //ASSERT
218 }
219}
220
221void CodeBuffer::free_blob() {
222 if (_blob != NULL) {
223 BufferBlob::free(_blob);
224 set_blob(NULL);
225 }
226}
227
228const char* CodeBuffer::code_section_name(int n) {
229#ifdef PRODUCT
230 return NULL;
231#else //PRODUCT
232 switch (n) {
233 case SECT_CONSTS: return "consts";
234 case SECT_INSTS: return "insts";
235 case SECT_STUBS: return "stubs";
236 default: return NULL;
237 }
238#endif //PRODUCT
239}
240
241int CodeBuffer::section_index_of(address addr) const {
242 for (int n = 0; n < (int)SECT_LIMIT; n++) {
243 const CodeSection* cs = code_section(n);
244 if (cs->allocates(addr)) return n;
245 }
246 return SECT_NONE;
247}
248
249int CodeBuffer::locator(address addr) const {
250 for (int n = 0; n < (int)SECT_LIMIT; n++) {
251 const CodeSection* cs = code_section(n);
252 if (cs->allocates(addr)) {
253 return locator(addr - cs->start(), n);
254 }
255 }
256 return -1;
257}
258
259address CodeBuffer::locator_address(int locator) const {
260 if (locator < 0) return NULL;
261 address start = code_section(locator_sect(locator))->start();
262 return start + locator_pos(locator);
263}
264
265bool CodeBuffer::is_backward_branch(Label& L) {
266 return L.is_bound() && insts_end() <= locator_address(L.loc());
267}
268
269address CodeBuffer::decode_begin() {
270 address begin = _insts.start();
271 if (_decode_begin != NULL && _decode_begin > begin)
272 begin = _decode_begin;
273 return begin;
274}
275
276
277GrowableArray<int>* CodeBuffer::create_patch_overflow() {
278 if (_overflow_arena == NULL) {
279 _overflow_arena = new (mtCode) Arena(mtCode);
280 }
281 return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
282}
283
284
285// Helper function for managing labels and their target addresses.
286// Returns a sensible address, and if it is not the label's final
287// address, notes the dependency (at 'branch_pc') on the label.
288address CodeSection::target(Label& L, address branch_pc) {
289 if (L.is_bound()) {
290 int loc = L.loc();
291 if (index() == CodeBuffer::locator_sect(loc)) {
292 return start() + CodeBuffer::locator_pos(loc);
293 } else {
294 return outer()->locator_address(loc);
295 }
296 } else {
297 assert(allocates2(branch_pc), "sanity");
298 address base = start();
299 int patch_loc = CodeBuffer::locator(branch_pc - base, index());
300 L.add_patch_at(outer(), patch_loc);
301
302 // Need to return a pc, doesn't matter what it is since it will be
303 // replaced during resolution later.
304 // Don't return NULL or badAddress, since branches shouldn't overflow.
305 // Don't return base either because that could overflow displacements
306 // for shorter branches. It will get checked when bound.
307 return branch_pc;
308 }
309}
310
311void CodeSection::relocate(address at, relocInfo::relocType rtype, int format, jint method_index) {
312 RelocationHolder rh;
313 switch (rtype) {
314 case relocInfo::none: return;
315 case relocInfo::opt_virtual_call_type: {
316 rh = opt_virtual_call_Relocation::spec(method_index);
317 break;
318 }
319 case relocInfo::static_call_type: {
320 rh = static_call_Relocation::spec(method_index);
321 break;
322 }
323 case relocInfo::virtual_call_type: {
324 assert(method_index == 0, "resolved method overriding is not supported");
325 rh = Relocation::spec_simple(rtype);
326 break;
327 }
328 default: {
329 rh = Relocation::spec_simple(rtype);
330 break;
331 }
332 }
333 relocate(at, rh, format);
334}
335
336void CodeSection::relocate(address at, RelocationHolder const& spec, int format) {
337 // Do not relocate in scratch buffers.
338 if (scratch_emit()) { return; }
339 Relocation* reloc = spec.reloc();
340 relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
341 if (rtype == relocInfo::none) return;
342
343 // The assertion below has been adjusted, to also work for
344 // relocation for fixup. Sometimes we want to put relocation
345 // information for the next instruction, since it will be patched
346 // with a call.
347 assert(start() <= at && at <= end()+1,
348 "cannot relocate data outside code boundaries");
349
350 if (!has_locs()) {
351 // no space for relocation information provided => code cannot be
352 // relocated. Make sure that relocate is only called with rtypes
353 // that can be ignored for this kind of code.
354 assert(rtype == relocInfo::none ||
355 rtype == relocInfo::runtime_call_type ||
356 rtype == relocInfo::internal_word_type||
357 rtype == relocInfo::section_word_type ||
358 rtype == relocInfo::external_word_type,
359 "code needs relocation information");
360 // leave behind an indication that we attempted a relocation
361 DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress);
362 return;
363 }
364
365 // Advance the point, noting the offset we'll have to record.
366 csize_t offset = at - locs_point();
367 set_locs_point(at);
368
369 // Test for a couple of overflow conditions; maybe expand the buffer.
370 relocInfo* end = locs_end();
371 relocInfo* req = end + relocInfo::length_limit;
372 // Check for (potential) overflow
373 if (req >= locs_limit() || offset >= relocInfo::offset_limit()) {
374 req += (uint)offset / (uint)relocInfo::offset_limit();
375 if (req >= locs_limit()) {
376 // Allocate or reallocate.
377 expand_locs(locs_count() + (req - end));
378 // reload pointer
379 end = locs_end();
380 }
381 }
382
383 // If the offset is giant, emit filler relocs, of type 'none', but
384 // each carrying the largest possible offset, to advance the locs_point.
385 while (offset >= relocInfo::offset_limit()) {
386 assert(end < locs_limit(), "adjust previous paragraph of code");
387 *end++ = filler_relocInfo();
388 offset -= filler_relocInfo().addr_offset();
389 }
390
391 // If it's a simple reloc with no data, we'll just write (rtype | offset).
392 (*end) = relocInfo(rtype, offset, format);
393
394 // If it has data, insert the prefix, as (data_prefix_tag | data1), data2.
395 end->initialize(this, reloc);
396}
397
398void CodeSection::initialize_locs(int locs_capacity) {
399 assert(_locs_start == NULL, "only one locs init step, please");
400 // Apply a priori lower limits to relocation size:
401 csize_t min_locs = MAX2(size() / 16, (csize_t)4);
402 if (locs_capacity < min_locs) locs_capacity = min_locs;
403 relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity);
404 _locs_start = locs_start;
405 _locs_end = locs_start;
406 _locs_limit = locs_start + locs_capacity;
407 _locs_own = true;
408}
409
410void CodeSection::initialize_shared_locs(relocInfo* buf, int length) {
411 assert(_locs_start == NULL, "do this before locs are allocated");
412 // Internal invariant: locs buf must be fully aligned.
413 // See copy_relocations_to() below.
414 while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) {
415 ++buf; --length;
416 }
417 if (length > 0) {
418 _locs_start = buf;
419 _locs_end = buf;
420 _locs_limit = buf + length;
421 _locs_own = false;
422 }
423}
424
425void CodeSection::initialize_locs_from(const CodeSection* source_cs) {
426 int lcount = source_cs->locs_count();
427 if (lcount != 0) {
428 initialize_shared_locs(source_cs->locs_start(), lcount);
429 _locs_end = _locs_limit = _locs_start + lcount;
430 assert(is_allocated(), "must have copied code already");
431 set_locs_point(start() + source_cs->locs_point_off());
432 }
433 assert(this->locs_count() == source_cs->locs_count(), "sanity");
434}
435
436void CodeSection::expand_locs(int new_capacity) {
437 if (_locs_start == NULL) {
438 initialize_locs(new_capacity);
439 return;
440 } else {
441 int old_count = locs_count();
442 int old_capacity = locs_capacity();
443 if (new_capacity < old_capacity * 2)
444 new_capacity = old_capacity * 2;
445 relocInfo* locs_start;
446 if (_locs_own) {
447 locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity);
448 } else {
449 locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity);
450 Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo));
451 _locs_own = true;
452 }
453 _locs_start = locs_start;
454 _locs_end = locs_start + old_count;
455 _locs_limit = locs_start + new_capacity;
456 }
457}
458
459
460/// Support for emitting the code to its final location.
461/// The pattern is the same for all functions.
462/// We iterate over all the sections, padding each to alignment.
463
464csize_t CodeBuffer::total_content_size() const {
465 csize_t size_so_far = 0;
466 for (int n = 0; n < (int)SECT_LIMIT; n++) {
467 const CodeSection* cs = code_section(n);
468 if (cs->is_empty()) continue; // skip trivial section
469 size_so_far = cs->align_at_start(size_so_far);
470 size_so_far += cs->size();
471 }
472 return size_so_far;
473}
474
475void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
476 address buf = dest->_total_start;
477 csize_t buf_offset = 0;
478 assert(dest->_total_size >= total_content_size(), "must be big enough");
479
480 {
481 // not sure why this is here, but why not...
482 int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment);
483 assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment");
484 }
485
486 const CodeSection* prev_cs = NULL;
487 CodeSection* prev_dest_cs = NULL;
488
489 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
490 // figure compact layout of each section
491 const CodeSection* cs = code_section(n);
492 csize_t csize = cs->size();
493
494 CodeSection* dest_cs = dest->code_section(n);
495 if (!cs->is_empty()) {
496 // Compute initial padding; assign it to the previous non-empty guy.
497 // Cf. figure_expanded_capacities.
498 csize_t padding = cs->align_at_start(buf_offset) - buf_offset;
499 if (prev_dest_cs != NULL) {
500 if (padding != 0) {
501 buf_offset += padding;
502 prev_dest_cs->_limit += padding;
503 }
504 } else {
505 guarantee(padding == 0, "In first iteration no padding should be needed.");
506 }
507 #ifdef ASSERT
508 if (prev_cs != NULL && prev_cs->is_frozen() && n < (SECT_LIMIT - 1)) {
509 // Make sure the ends still match up.
510 // This is important because a branch in a frozen section
511 // might target code in a following section, via a Label,
512 // and without a relocation record. See Label::patch_instructions.
513 address dest_start = buf+buf_offset;
514 csize_t start2start = cs->start() - prev_cs->start();
515 csize_t dest_start2start = dest_start - prev_dest_cs->start();
516 assert(start2start == dest_start2start, "cannot stretch frozen sect");
517 }
518 #endif //ASSERT
519 prev_dest_cs = dest_cs;
520 prev_cs = cs;
521 }
522
523 debug_only(dest_cs->_start = NULL); // defeat double-initialization assert
524 dest_cs->initialize(buf+buf_offset, csize);
525 dest_cs->set_end(buf+buf_offset+csize);
526 assert(dest_cs->is_allocated(), "must always be allocated");
527 assert(cs->is_empty() == dest_cs->is_empty(), "sanity");
528
529 buf_offset += csize;
530 }
531
532 // Done calculating sections; did it come out to the right end?
533 assert(buf_offset == total_content_size(), "sanity");
534 dest->verify_section_allocation();
535}
536
537// Append an oop reference that keeps the class alive.
538static void append_oop_references(GrowableArray<oop>* oops, Klass* k) {
539 oop cl = k->klass_holder();
540 if (cl != NULL && !oops->contains(cl)) {
541 oops->append(cl);
542 }
543}
544
545void CodeBuffer::finalize_oop_references(const methodHandle& mh) {
546 NoSafepointVerifier nsv;
547
548 GrowableArray<oop> oops;
549
550 // Make sure that immediate metadata records something in the OopRecorder
551 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
552 // pull code out of each section
553 CodeSection* cs = code_section(n);
554 if (cs->is_empty()) continue; // skip trivial section
555 RelocIterator iter(cs);
556 while (iter.next()) {
557 if (iter.type() == relocInfo::metadata_type) {
558 metadata_Relocation* md = iter.metadata_reloc();
559 if (md->metadata_is_immediate()) {
560 Metadata* m = md->metadata_value();
561 if (oop_recorder()->is_real(m)) {
562 if (m->is_methodData()) {
563 m = ((MethodData*)m)->method();
564 }
565 if (m->is_method()) {
566 m = ((Method*)m)->method_holder();
567 }
568 if (m->is_klass()) {
569 append_oop_references(&oops, (Klass*)m);
570 } else {
571 // XXX This will currently occur for MDO which don't
572 // have a backpointer. This has to be fixed later.
573 m->print();
574 ShouldNotReachHere();
575 }
576 }
577 }
578 }
579 }
580 }
581
582 if (!oop_recorder()->is_unused()) {
583 for (int i = 0; i < oop_recorder()->metadata_count(); i++) {
584 Metadata* m = oop_recorder()->metadata_at(i);
585 if (oop_recorder()->is_real(m)) {
586 if (m->is_methodData()) {
587 m = ((MethodData*)m)->method();
588 }
589 if (m->is_method()) {
590 m = ((Method*)m)->method_holder();
591 }
592 if (m->is_klass()) {
593 append_oop_references(&oops, (Klass*)m);
594 } else {
595 m->print();
596 ShouldNotReachHere();
597 }
598 }
599 }
600
601 }
602
603 // Add the class loader of Method* for the nmethod itself
604 append_oop_references(&oops, mh->method_holder());
605
606 // Add any oops that we've found
607 Thread* thread = Thread::current();
608 for (int i = 0; i < oops.length(); i++) {
609 oop_recorder()->find_index((jobject)thread->handle_area()->allocate_handle(oops.at(i)));
610 }
611}
612
613
614
615csize_t CodeBuffer::total_offset_of(const CodeSection* cs) const {
616 csize_t size_so_far = 0;
617 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
618 const CodeSection* cur_cs = code_section(n);
619 if (!cur_cs->is_empty()) {
620 size_so_far = cur_cs->align_at_start(size_so_far);
621 }
622 if (cur_cs->index() == cs->index()) {
623 return size_so_far;
624 }
625 size_so_far += cur_cs->size();
626 }
627 ShouldNotReachHere();
628 return -1;
629}
630
631csize_t CodeBuffer::total_relocation_size() const {
632 csize_t total = copy_relocations_to(NULL); // dry run only
633 return (csize_t) align_up(total, HeapWordSize);
634}
635
636csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const {
637 csize_t buf_offset = 0;
638 csize_t code_end_so_far = 0;
639 csize_t code_point_so_far = 0;
640
641 assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned");
642 assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized");
643
644 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) {
645 if (only_inst && (n != (int)SECT_INSTS)) {
646 // Need only relocation info for code.
647 continue;
648 }
649 // pull relocs out of each section
650 const CodeSection* cs = code_section(n);
651 assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity");
652 if (cs->is_empty()) continue; // skip trivial section
653 relocInfo* lstart = cs->locs_start();
654 relocInfo* lend = cs->locs_end();
655 csize_t lsize = (csize_t)( (address)lend - (address)lstart );
656 csize_t csize = cs->size();
657 code_end_so_far = cs->align_at_start(code_end_so_far);
658
659 if (lsize > 0) {
660 // Figure out how to advance the combined relocation point
661 // first to the beginning of this section.
662 // We'll insert one or more filler relocs to span that gap.
663 // (Don't bother to improve this by editing the first reloc's offset.)
664 csize_t new_code_point = code_end_so_far;
665 for (csize_t jump;
666 code_point_so_far < new_code_point;
667 code_point_so_far += jump) {
668 jump = new_code_point - code_point_so_far;
669 relocInfo filler = filler_relocInfo();
670 if (jump >= filler.addr_offset()) {
671 jump = filler.addr_offset();
672 } else { // else shrink the filler to fit
673 filler = relocInfo(relocInfo::none, jump);
674 }
675 if (buf != NULL) {
676 assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds");
677 *(relocInfo*)(buf+buf_offset) = filler;
678 }
679 buf_offset += sizeof(filler);
680 }
681
682 // Update code point and end to skip past this section:
683 csize_t last_code_point = code_end_so_far + cs->locs_point_off();
684 assert(code_point_so_far <= last_code_point, "sanity");
685 code_point_so_far = last_code_point; // advance past this guy's relocs
686 }
687 code_end_so_far += csize; // advance past this guy's instructions too
688
689 // Done with filler; emit the real relocations:
690 if (buf != NULL && lsize != 0) {
691 assert(buf_offset + lsize <= buf_limit, "target in bounds");
692 assert((uintptr_t)lstart % HeapWordSize == 0, "sane start");
693 if (buf_offset % HeapWordSize == 0) {
694 // Use wordwise copies if possible:
695 Copy::disjoint_words((HeapWord*)lstart,
696 (HeapWord*)(buf+buf_offset),
697 (lsize + HeapWordSize-1) / HeapWordSize);
698 } else {
699 Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize);
700 }
701 }
702 buf_offset += lsize;
703 }
704
705 // Align end of relocation info in target.
706 while (buf_offset % HeapWordSize != 0) {
707 if (buf != NULL) {
708 relocInfo padding = relocInfo(relocInfo::none, 0);
709 assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds");
710 *(relocInfo*)(buf+buf_offset) = padding;
711 }
712 buf_offset += sizeof(relocInfo);
713 }
714
715 assert(only_inst || code_end_so_far == total_content_size(), "sanity");
716
717 return buf_offset;
718}
719
720csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const {
721 address buf = NULL;
722 csize_t buf_offset = 0;
723 csize_t buf_limit = 0;
724
725 if (dest != NULL) {
726 buf = (address)dest->relocation_begin();
727 buf_limit = (address)dest->relocation_end() - buf;
728 }
729 // if dest == NULL, this is just the sizing pass
730 //
731 buf_offset = copy_relocations_to(buf, buf_limit, false);
732
733 return buf_offset;
734}
735
736void CodeBuffer::copy_code_to(CodeBlob* dest_blob) {
737#ifndef PRODUCT
738 if (PrintNMethods && (WizardMode || Verbose)) {
739 tty->print("done with CodeBuffer:");
740 ((CodeBuffer*)this)->print();
741 }
742#endif //PRODUCT
743
744 CodeBuffer dest(dest_blob);
745 assert(dest_blob->content_size() >= total_content_size(), "good sizing");
746 this->compute_final_layout(&dest);
747
748 // Set beginning of constant table before relocating.
749 dest_blob->set_ctable_begin(dest.consts()->start());
750
751 relocate_code_to(&dest);
752
753 // transfer strings and comments from buffer to blob
754 dest_blob->set_strings(_code_strings);
755
756 // Done moving code bytes; were they the right size?
757 assert((int)align_up(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity");
758
759 // Flush generated code
760 ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size());
761}
762
763// Move all my code into another code buffer. Consult applicable
764// relocs to repair embedded addresses. The layout in the destination
765// CodeBuffer is different to the source CodeBuffer: the destination
766// CodeBuffer gets the final layout (consts, insts, stubs in order of
767// ascending address).
768void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
769 address dest_end = dest->_total_start + dest->_total_size;
770 address dest_filled = NULL;
771 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
772 // pull code out of each section
773 const CodeSection* cs = code_section(n);
774 if (cs->is_empty()) continue; // skip trivial section
775 CodeSection* dest_cs = dest->code_section(n);
776 assert(cs->size() == dest_cs->size(), "sanity");
777 csize_t usize = dest_cs->size();
778 csize_t wsize = align_up(usize, HeapWordSize);
779 assert(dest_cs->start() + wsize <= dest_end, "no overflow");
780 // Copy the code as aligned machine words.
781 // This may also include an uninitialized partial word at the end.
782 Copy::disjoint_words((HeapWord*)cs->start(),
783 (HeapWord*)dest_cs->start(),
784 wsize / HeapWordSize);
785
786 if (dest->blob() == NULL) {
787 // Destination is a final resting place, not just another buffer.
788 // Normalize uninitialized bytes in the final padding.
789 Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(),
790 Assembler::code_fill_byte());
791 }
792 // Keep track of the highest filled address
793 dest_filled = MAX2(dest_filled, dest_cs->end() + dest_cs->remaining());
794
795 assert(cs->locs_start() != (relocInfo*)badAddress,
796 "this section carries no reloc storage, but reloc was attempted");
797
798 // Make the new code copy use the old copy's relocations:
799 dest_cs->initialize_locs_from(cs);
800 }
801
802 // Do relocation after all sections are copied.
803 // This is necessary if the code uses constants in stubs, which are
804 // relocated when the corresponding instruction in the code (e.g., a
805 // call) is relocated. Stubs are placed behind the main code
806 // section, so that section has to be copied before relocating.
807 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) {
808 // pull code out of each section
809 const CodeSection* cs = code_section(n);
810 if (cs->is_empty()) continue; // skip trivial section
811 CodeSection* dest_cs = dest->code_section(n);
812 { // Repair the pc relative information in the code after the move
813 RelocIterator iter(dest_cs);
814 while (iter.next()) {
815 iter.reloc()->fix_relocation_after_move(this, dest);
816 }
817 }
818 }
819
820 if (dest->blob() == NULL && dest_filled != NULL) {
821 // Destination is a final resting place, not just another buffer.
822 // Normalize uninitialized bytes in the final padding.
823 Copy::fill_to_bytes(dest_filled, dest_end - dest_filled,
824 Assembler::code_fill_byte());
825
826 }
827}
828
829csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs,
830 csize_t amount,
831 csize_t* new_capacity) {
832 csize_t new_total_cap = 0;
833
834 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
835 const CodeSection* sect = code_section(n);
836
837 if (!sect->is_empty()) {
838 // Compute initial padding; assign it to the previous section,
839 // even if it's empty (e.g. consts section can be empty).
840 // Cf. compute_final_layout
841 csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap;
842 if (padding != 0) {
843 new_total_cap += padding;
844 assert(n - 1 >= SECT_FIRST, "sanity");
845 new_capacity[n - 1] += padding;
846 }
847 }
848
849 csize_t exp = sect->size(); // 100% increase
850 if ((uint)exp < 4*K) exp = 4*K; // minimum initial increase
851 if (sect == which_cs) {
852 if (exp < amount) exp = amount;
853 if (StressCodeBuffers) exp = amount; // expand only slightly
854 } else if (n == SECT_INSTS) {
855 // scale down inst increases to a more modest 25%
856 exp = 4*K + ((exp - 4*K) >> 2);
857 if (StressCodeBuffers) exp = amount / 2; // expand only slightly
858 } else if (sect->is_empty()) {
859 // do not grow an empty secondary section
860 exp = 0;
861 }
862 // Allow for inter-section slop:
863 exp += CodeSection::end_slop();
864 csize_t new_cap = sect->size() + exp;
865 if (new_cap < sect->capacity()) {
866 // No need to expand after all.
867 new_cap = sect->capacity();
868 }
869 new_capacity[n] = new_cap;
870 new_total_cap += new_cap;
871 }
872
873 return new_total_cap;
874}
875
876void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
877#ifndef PRODUCT
878 if (PrintNMethods && (WizardMode || Verbose)) {
879 tty->print("expanding CodeBuffer:");
880 this->print();
881 }
882
883 if (StressCodeBuffers && blob() != NULL) {
884 static int expand_count = 0;
885 if (expand_count >= 0) expand_count += 1;
886 if (expand_count > 100 && is_power_of_2(expand_count)) {
887 tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count);
888 // simulate an occasional allocation failure:
889 free_blob();
890 }
891 }
892#endif //PRODUCT
893
894 // Resizing must be allowed
895 {
896 if (blob() == NULL) return; // caller must check for blob == NULL
897 for (int n = 0; n < (int)SECT_LIMIT; n++) {
898 guarantee(!code_section(n)->is_frozen(), "resizing not allowed when frozen");
899 }
900 }
901
902 // Figure new capacity for each section.
903 csize_t new_capacity[SECT_LIMIT];
904 memset(new_capacity, 0, sizeof(csize_t) * SECT_LIMIT);
905 csize_t new_total_cap
906 = figure_expanded_capacities(which_cs, amount, new_capacity);
907
908 // Create a new (temporary) code buffer to hold all the new data
909 CodeBuffer cb(name(), new_total_cap, 0);
910 if (cb.blob() == NULL) {
911 // Failed to allocate in code cache.
912 free_blob();
913 return;
914 }
915
916 // Create an old code buffer to remember which addresses used to go where.
917 // This will be useful when we do final assembly into the code cache,
918 // because we will need to know how to warp any internal address that
919 // has been created at any time in this CodeBuffer's past.
920 CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size);
921 bxp->take_over_code_from(this); // remember the old undersized blob
922 DEBUG_ONLY(this->_blob = NULL); // silence a later assert
923 bxp->_before_expand = this->_before_expand;
924 this->_before_expand = bxp;
925
926 // Give each section its required (expanded) capacity.
927 for (int n = (int)SECT_LIMIT-1; n >= SECT_FIRST; n--) {
928 CodeSection* cb_sect = cb.code_section(n);
929 CodeSection* this_sect = code_section(n);
930 if (new_capacity[n] == 0) continue; // already nulled out
931 if (n != SECT_INSTS) {
932 cb.initialize_section_size(cb_sect, new_capacity[n]);
933 }
934 assert(cb_sect->capacity() >= new_capacity[n], "big enough");
935 address cb_start = cb_sect->start();
936 cb_sect->set_end(cb_start + this_sect->size());
937 if (this_sect->mark() == NULL) {
938 cb_sect->clear_mark();
939 } else {
940 cb_sect->set_mark(cb_start + this_sect->mark_off());
941 }
942 }
943
944 // Needs to be initialized when calling fix_relocation_after_move.
945 cb.blob()->set_ctable_begin(cb.consts()->start());
946
947 // Move all the code and relocations to the new blob:
948 relocate_code_to(&cb);
949
950 // Copy the temporary code buffer into the current code buffer.
951 // Basically, do {*this = cb}, except for some control information.
952 this->take_over_code_from(&cb);
953 cb.set_blob(NULL);
954
955 // Zap the old code buffer contents, to avoid mistakenly using them.
956 debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size,
957 badCodeHeapFreeVal));
958
959 _decode_begin = NULL; // sanity
960
961 // Make certain that the new sections are all snugly inside the new blob.
962 verify_section_allocation();
963
964#ifndef PRODUCT
965 if (PrintNMethods && (WizardMode || Verbose)) {
966 tty->print("expanded CodeBuffer:");
967 this->print();
968 }
969#endif //PRODUCT
970}
971
972void CodeBuffer::take_over_code_from(CodeBuffer* cb) {
973 // Must already have disposed of the old blob somehow.
974 assert(blob() == NULL, "must be empty");
975 // Take the new blob away from cb.
976 set_blob(cb->blob());
977 // Take over all the section pointers.
978 for (int n = 0; n < (int)SECT_LIMIT; n++) {
979 CodeSection* cb_sect = cb->code_section(n);
980 CodeSection* this_sect = code_section(n);
981 this_sect->take_over_code_from(cb_sect);
982 }
983 _overflow_arena = cb->_overflow_arena;
984 // Make sure the old cb won't try to use it or free it.
985 DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress);
986}
987
988void CodeBuffer::verify_section_allocation() {
989 address tstart = _total_start;
990 if (tstart == badAddress) return; // smashed by set_blob(NULL)
991 address tend = tstart + _total_size;
992 if (_blob != NULL) {
993
994 guarantee(tstart >= _blob->content_begin(), "sanity");
995 guarantee(tend <= _blob->content_end(), "sanity");
996 }
997 // Verify disjointness.
998 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
999 CodeSection* sect = code_section(n);
1000 if (!sect->is_allocated() || sect->is_empty()) continue;
1001 guarantee((intptr_t)sect->start() % sect->alignment() == 0
1002 || sect->is_empty() || _blob == NULL,
1003 "start is aligned");
1004 for (int m = (int) SECT_FIRST; m < (int) SECT_LIMIT; m++) {
1005 CodeSection* other = code_section(m);
1006 if (!other->is_allocated() || other == sect) continue;
1007 guarantee(!other->contains(sect->start() ), "sanity");
1008 // limit is an exclusive address and can be the start of another
1009 // section.
1010 guarantee(!other->contains(sect->limit() - 1), "sanity");
1011 }
1012 guarantee(sect->end() <= tend, "sanity");
1013 guarantee(sect->end() <= sect->limit(), "sanity");
1014 }
1015}
1016
1017void CodeBuffer::log_section_sizes(const char* name) {
1018 if (xtty != NULL) {
1019 ttyLocker ttyl;
1020 // log info about buffer usage
1021 xtty->print_cr("<blob name='%s' size='%d'>", name, _total_size);
1022 for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) {
1023 CodeSection* sect = code_section(n);
1024 if (!sect->is_allocated() || sect->is_empty()) continue;
1025 xtty->print_cr("<sect index='%d' size='" SIZE_FORMAT "' free='" SIZE_FORMAT "'/>",
1026 n, sect->limit() - sect->start(), sect->limit() - sect->end());
1027 }
1028 xtty->print_cr("</blob>");
1029 }
1030}
1031
1032#ifndef PRODUCT
1033
1034void CodeSection::decode() {
1035 Disassembler::decode(start(), end());
1036}
1037
1038void CodeBuffer::block_comment(intptr_t offset, const char * comment) {
1039 if (_collect_comments) {
1040 _code_strings.add_comment(offset, comment);
1041 }
1042}
1043
1044const char* CodeBuffer::code_string(const char* str) {
1045 return _code_strings.add_string(str);
1046}
1047
1048class CodeString: public CHeapObj<mtCode> {
1049 private:
1050 friend class CodeStrings;
1051 const char * _string;
1052 CodeString* _next;
1053 intptr_t _offset;
1054
1055 ~CodeString() {
1056 assert(_next == NULL, "wrong interface for freeing list");
1057 os::free((void*)_string);
1058 }
1059
1060 bool is_comment() const { return _offset >= 0; }
1061
1062 public:
1063 CodeString(const char * string, intptr_t offset = -1)
1064 : _next(NULL), _offset(offset) {
1065 _string = os::strdup(string, mtCode);
1066 }
1067
1068 const char * string() const { return _string; }
1069 intptr_t offset() const { assert(_offset >= 0, "offset for non comment?"); return _offset; }
1070 CodeString* next() const { return _next; }
1071
1072 void set_next(CodeString* next) { _next = next; }
1073
1074 CodeString* first_comment() {
1075 if (is_comment()) {
1076 return this;
1077 } else {
1078 return next_comment();
1079 }
1080 }
1081 CodeString* next_comment() const {
1082 CodeString* s = _next;
1083 while (s != NULL && !s->is_comment()) {
1084 s = s->_next;
1085 }
1086 return s;
1087 }
1088};
1089
1090CodeString* CodeStrings::find(intptr_t offset) const {
1091 CodeString* a = _strings->first_comment();
1092 while (a != NULL && a->offset() != offset) {
1093 a = a->next_comment();
1094 }
1095 return a;
1096}
1097
1098// Convenience for add_comment.
1099CodeString* CodeStrings::find_last(intptr_t offset) const {
1100 CodeString* a = find(offset);
1101 if (a != NULL) {
1102 CodeString* c = NULL;
1103 while (((c = a->next_comment()) != NULL) && (c->offset() == offset)) {
1104 a = c;
1105 }
1106 }
1107 return a;
1108}
1109
1110void CodeStrings::add_comment(intptr_t offset, const char * comment) {
1111 check_valid();
1112 CodeString* c = new CodeString(comment, offset);
1113 CodeString* inspos = (_strings == NULL) ? NULL : find_last(offset);
1114
1115 if (inspos) {
1116 // insert after already existing comments with same offset
1117 c->set_next(inspos->next());
1118 inspos->set_next(c);
1119 } else {
1120 // no comments with such offset, yet. Insert before anything else.
1121 c->set_next(_strings);
1122 _strings = c;
1123 }
1124}
1125
1126void CodeStrings::assign(CodeStrings& other) {
1127 other.check_valid();
1128 assert(is_null(), "Cannot assign onto non-empty CodeStrings");
1129 _strings = other._strings;
1130#ifdef ASSERT
1131 _defunct = false;
1132#endif
1133 other.set_null_and_invalidate();
1134}
1135
1136// Deep copy of CodeStrings for consistent memory management.
1137// Only used for actual disassembly so this is cheaper than reference counting
1138// for the "normal" fastdebug case.
1139void CodeStrings::copy(CodeStrings& other) {
1140 other.check_valid();
1141 check_valid();
1142 assert(is_null(), "Cannot copy onto non-empty CodeStrings");
1143 CodeString* n = other._strings;
1144 CodeString** ps = &_strings;
1145 while (n != NULL) {
1146 *ps = new CodeString(n->string(),n->offset());
1147 ps = &((*ps)->_next);
1148 n = n->next();
1149 }
1150}
1151
1152const char* CodeStrings::_prefix = " ;; "; // default: can be changed via set_prefix
1153
1154// Check if any block comments are pending for the given offset.
1155bool CodeStrings::has_block_comment(intptr_t offset) const {
1156 if (_strings == NULL) return false;
1157 CodeString* c = find(offset);
1158 return c != NULL;
1159}
1160
1161void CodeStrings::print_block_comment(outputStream* stream, intptr_t offset) const {
1162 check_valid();
1163 if (_strings != NULL) {
1164 CodeString* c = find(offset);
1165 while (c && c->offset() == offset) {
1166 stream->bol();
1167 stream->print("%s", _prefix);
1168 // Don't interpret as format strings since it could contain %
1169 stream->print_raw(c->string());
1170 stream->bol(); // advance to next line only if string didn't contain a cr() at the end.
1171 c = c->next_comment();
1172 }
1173 }
1174}
1175
1176// Also sets isNull()
1177void CodeStrings::free() {
1178 CodeString* n = _strings;
1179 while (n) {
1180 // unlink the node from the list saving a pointer to the next
1181 CodeString* p = n->next();
1182 n->set_next(NULL);
1183 delete n;
1184 n = p;
1185 }
1186 set_null_and_invalidate();
1187}
1188
1189const char* CodeStrings::add_string(const char * string) {
1190 check_valid();
1191 CodeString* s = new CodeString(string);
1192 s->set_next(_strings);
1193 _strings = s;
1194 assert(s->string() != NULL, "should have a string");
1195 return s->string();
1196}
1197
1198void CodeBuffer::decode() {
1199 ttyLocker ttyl;
1200 Disassembler::decode(decode_begin(), insts_end(), tty);
1201 _decode_begin = insts_end();
1202}
1203
1204void CodeSection::print(const char* name) {
1205 csize_t locs_size = locs_end() - locs_start();
1206 tty->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)%s",
1207 name, p2i(start()), p2i(end()), p2i(limit()), size(), capacity(),
1208 is_frozen()? " [frozen]": "");
1209 tty->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d",
1210 name, p2i(locs_start()), p2i(locs_end()), p2i(locs_limit()), locs_size, locs_capacity(), locs_point_off());
1211 if (PrintRelocations) {
1212 RelocIterator iter(this);
1213 iter.print();
1214 }
1215}
1216
1217void CodeBuffer::print() {
1218 if (this == NULL) {
1219 tty->print_cr("NULL CodeBuffer pointer");
1220 return;
1221 }
1222
1223 tty->print_cr("CodeBuffer:");
1224 for (int n = 0; n < (int)SECT_LIMIT; n++) {
1225 // print each section
1226 CodeSection* cs = code_section(n);
1227 cs->print(code_section_name(n));
1228 }
1229}
1230
1231// Directly disassemble code buffer.
1232void CodeBuffer::decode(address start, address end) {
1233 ttyLocker ttyl;
1234 Disassembler::decode(this, start, end, tty);
1235}
1236
1237#endif // PRODUCT
1238