1/*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_ASM_CODEBUFFER_HPP
26#define SHARE_ASM_CODEBUFFER_HPP
27
28#include "code/oopRecorder.hpp"
29#include "code/relocInfo.hpp"
30#include "utilities/align.hpp"
31#include "utilities/debug.hpp"
32#include "utilities/macros.hpp"
33
34class CodeStrings;
35class PhaseCFG;
36class Compile;
37class BufferBlob;
38class CodeBuffer;
39class Label;
40
41class CodeOffsets: public StackObj {
42public:
43 enum Entries { Entry,
44 Verified_Entry,
45 Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete
46 OSR_Entry,
47 Exceptions, // Offset where exception handler lives
48 Deopt, // Offset where deopt handler lives
49 DeoptMH, // Offset where MethodHandle deopt handler lives
50 UnwindHandler, // Offset to default unwind handler
51 max_Entries };
52
53 // special value to note codeBlobs where profile (forte) stack walking is
54 // always dangerous and suspect.
55
56 enum { frame_never_safe = -1 };
57
58private:
59 int _values[max_Entries];
60
61public:
62 CodeOffsets() {
63 _values[Entry ] = 0;
64 _values[Verified_Entry] = 0;
65 _values[Frame_Complete] = frame_never_safe;
66 _values[OSR_Entry ] = 0;
67 _values[Exceptions ] = -1;
68 _values[Deopt ] = -1;
69 _values[DeoptMH ] = -1;
70 _values[UnwindHandler ] = -1;
71 }
72
73 int value(Entries e) { return _values[e]; }
74 void set_value(Entries e, int val) { _values[e] = val; }
75};
76
77// This class represents a stream of code and associated relocations.
78// There are a few in each CodeBuffer.
79// They are filled concurrently, and concatenated at the end.
80class CodeSection {
81 friend class CodeBuffer;
82 public:
83 typedef int csize_t; // code size type; would be size_t except for history
84
85 private:
86 address _start; // first byte of contents (instructions)
87 address _mark; // user mark, usually an instruction beginning
88 address _end; // current end address
89 address _limit; // last possible (allocated) end address
90 relocInfo* _locs_start; // first byte of relocation information
91 relocInfo* _locs_end; // first byte after relocation information
92 relocInfo* _locs_limit; // first byte after relocation information buf
93 address _locs_point; // last relocated position (grows upward)
94 bool _locs_own; // did I allocate the locs myself?
95 bool _frozen; // no more expansion of this section
96 bool _scratch_emit; // Buffer is used for scratch emit, don't relocate.
97 char _index; // my section number (SECT_INST, etc.)
98 CodeBuffer* _outer; // enclosing CodeBuffer
99
100 // (Note: _locs_point used to be called _last_reloc_offset.)
101
102 CodeSection() {
103 _start = NULL;
104 _mark = NULL;
105 _end = NULL;
106 _limit = NULL;
107 _locs_start = NULL;
108 _locs_end = NULL;
109 _locs_limit = NULL;
110 _locs_point = NULL;
111 _locs_own = false;
112 _frozen = false;
113 _scratch_emit = false;
114 debug_only(_index = (char)-1);
115 debug_only(_outer = (CodeBuffer*)badAddress);
116 }
117
118 void initialize_outer(CodeBuffer* outer, int index) {
119 _outer = outer;
120 _index = index;
121 }
122
123 void initialize(address start, csize_t size = 0) {
124 assert(_start == NULL, "only one init step, please");
125 _start = start;
126 _mark = NULL;
127 _end = start;
128
129 _limit = start + size;
130 _locs_point = start;
131 }
132
133 void initialize_locs(int locs_capacity);
134 void expand_locs(int new_capacity);
135 void initialize_locs_from(const CodeSection* source_cs);
136
137 // helper for CodeBuffer::expand()
138 void take_over_code_from(CodeSection* cs) {
139 _start = cs->_start;
140 _mark = cs->_mark;
141 _end = cs->_end;
142 _limit = cs->_limit;
143 _locs_point = cs->_locs_point;
144 }
145
146 public:
147 address start() const { return _start; }
148 address mark() const { return _mark; }
149 address end() const { return _end; }
150 address limit() const { return _limit; }
151 csize_t size() const { return (csize_t)(_end - _start); }
152 csize_t mark_off() const { assert(_mark != NULL, "not an offset");
153 return (csize_t)(_mark - _start); }
154 csize_t capacity() const { return (csize_t)(_limit - _start); }
155 csize_t remaining() const { return (csize_t)(_limit - _end); }
156
157 relocInfo* locs_start() const { return _locs_start; }
158 relocInfo* locs_end() const { return _locs_end; }
159 int locs_count() const { return (int)(_locs_end - _locs_start); }
160 relocInfo* locs_limit() const { return _locs_limit; }
161 address locs_point() const { return _locs_point; }
162 csize_t locs_point_off() const{ return (csize_t)(_locs_point - _start); }
163 csize_t locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); }
164 csize_t locs_remaining()const { return (csize_t)(_locs_limit - _locs_end); }
165
166 int index() const { return _index; }
167 bool is_allocated() const { return _start != NULL; }
168 bool is_empty() const { return _start == _end; }
169 bool is_frozen() const { return _frozen; }
170 bool has_locs() const { return _locs_end != NULL; }
171
172 // Mark scratch buffer.
173 void set_scratch_emit() { _scratch_emit = true; }
174 bool scratch_emit() { return _scratch_emit; }
175
176 CodeBuffer* outer() const { return _outer; }
177
178 // is a given address in this section? (2nd version is end-inclusive)
179 bool contains(address pc) const { return pc >= _start && pc < _end; }
180 bool contains2(address pc) const { return pc >= _start && pc <= _end; }
181 bool allocates(address pc) const { return pc >= _start && pc < _limit; }
182 bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
183
184 void set_end(address pc) { assert(allocates2(pc), "not in CodeBuffer memory: " INTPTR_FORMAT " <= " INTPTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit)); _end = pc; }
185 void set_mark(address pc) { assert(contains2(pc), "not in codeBuffer");
186 _mark = pc; }
187 void set_mark_off(int offset) { assert(contains2(offset+_start),"not in codeBuffer");
188 _mark = offset + _start; }
189 void set_mark() { _mark = _end; }
190 void clear_mark() { _mark = NULL; }
191
192 void set_locs_end(relocInfo* p) {
193 assert(p <= locs_limit(), "locs data fits in allocated buffer");
194 _locs_end = p;
195 }
196 void set_locs_point(address pc) {
197 assert(pc >= locs_point(), "relocation addr may not decrease");
198 assert(allocates2(pc), "relocation addr must be in this section");
199 _locs_point = pc;
200 }
201
202 // Code emission
203 void emit_int8 ( int8_t x) { *((int8_t*) end()) = x; set_end(end() + sizeof(int8_t)); }
204 void emit_int16( int16_t x) { *((int16_t*) end()) = x; set_end(end() + sizeof(int16_t)); }
205 void emit_int32( int32_t x) { *((int32_t*) end()) = x; set_end(end() + sizeof(int32_t)); }
206 void emit_int64( int64_t x) { *((int64_t*) end()) = x; set_end(end() + sizeof(int64_t)); }
207
208 void emit_float( jfloat x) { *((jfloat*) end()) = x; set_end(end() + sizeof(jfloat)); }
209 void emit_double(jdouble x) { *((jdouble*) end()) = x; set_end(end() + sizeof(jdouble)); }
210 void emit_address(address x) { *((address*) end()) = x; set_end(end() + sizeof(address)); }
211
212 // Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.)
213 void initialize_shared_locs(relocInfo* buf, int length);
214
215 // Manage labels and their addresses.
216 address target(Label& L, address branch_pc);
217
218 // Emit a relocation.
219 void relocate(address at, RelocationHolder const& rspec, int format = 0);
220 void relocate(address at, relocInfo::relocType rtype, int format = 0, jint method_index = 0);
221
222 // alignment requirement for starting offset
223 // Requirements are that the instruction area and the
224 // stubs area must start on CodeEntryAlignment, and
225 // the ctable on sizeof(jdouble)
226 int alignment() const { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
227
228 // Slop between sections, used only when allocating temporary BufferBlob buffers.
229 static csize_t end_slop() { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
230
231 csize_t align_at_start(csize_t off) const { return (csize_t) align_up(off, alignment()); }
232
233 // Mark a section frozen. Assign its remaining space to
234 // the following section. It will never expand after this point.
235 inline void freeze(); // { _outer->freeze_section(this); }
236
237 // Ensure there's enough space left in the current section.
238 // Return true if there was an expansion.
239 bool maybe_expand_to_ensure_remaining(csize_t amount);
240
241#ifndef PRODUCT
242 void decode();
243 void print(const char* name);
244#endif //PRODUCT
245};
246
247class CodeString;
248class CodeStrings {
249private:
250#ifndef PRODUCT
251 CodeString* _strings;
252#ifdef ASSERT
253 // Becomes true after copy-out, forbids further use.
254 bool _defunct; // Zero bit pattern is "valid", see memset call in decode_env::decode_env
255#endif
256 static const char* _prefix; // defaults to " ;; "
257#endif
258
259 CodeString* find(intptr_t offset) const;
260 CodeString* find_last(intptr_t offset) const;
261
262 void set_null_and_invalidate() {
263#ifndef PRODUCT
264 _strings = NULL;
265#ifdef ASSERT
266 _defunct = true;
267#endif
268#endif
269 }
270
271public:
272 CodeStrings() {
273#ifndef PRODUCT
274 _strings = NULL;
275#ifdef ASSERT
276 _defunct = false;
277#endif
278#endif
279 }
280
281 bool is_null() {
282#ifdef ASSERT
283 return _strings == NULL;
284#else
285 return true;
286#endif
287 }
288
289 const char* add_string(const char * string) PRODUCT_RETURN_(return NULL;);
290
291 void add_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
292 bool has_block_comment(intptr_t offset) const;
293 void print_block_comment(outputStream* stream, intptr_t offset) const PRODUCT_RETURN;
294 // MOVE strings from other to this; invalidate other.
295 void assign(CodeStrings& other) PRODUCT_RETURN;
296 // COPY strings from other to this; leave other valid.
297 void copy(CodeStrings& other) PRODUCT_RETURN;
298 // FREE strings; invalidate this.
299 void free() PRODUCT_RETURN;
300
301 // Guarantee that _strings are used at most once; assign and free invalidate a buffer.
302 inline void check_valid() const {
303#ifdef ASSERT
304 assert(!_defunct, "Use of invalid CodeStrings");
305#endif
306 }
307
308 static void set_prefix(const char *prefix) {
309#ifndef PRODUCT
310 _prefix = prefix;
311#endif
312 }
313};
314
315// A CodeBuffer describes a memory space into which assembly
316// code is generated. This memory space usually occupies the
317// interior of a single BufferBlob, but in some cases it may be
318// an arbitrary span of memory, even outside the code cache.
319//
320// A code buffer comes in two variants:
321//
322// (1) A CodeBuffer referring to an already allocated piece of memory:
323// This is used to direct 'static' code generation (e.g. for interpreter
324// or stubroutine generation, etc.). This code comes with NO relocation
325// information.
326//
327// (2) A CodeBuffer referring to a piece of memory allocated when the
328// CodeBuffer is allocated. This is used for nmethod generation.
329//
330// The memory can be divided up into several parts called sections.
331// Each section independently accumulates code (or data) an relocations.
332// Sections can grow (at the expense of a reallocation of the BufferBlob
333// and recopying of all active sections). When the buffered code is finally
334// written to an nmethod (or other CodeBlob), the contents (code, data,
335// and relocations) of the sections are padded to an alignment and concatenated.
336// Instructions and data in one section can contain relocatable references to
337// addresses in a sibling section.
338
339class CodeBuffer: public StackObj {
340 friend class CodeSection;
341 friend class StubCodeGenerator;
342
343 private:
344 // CodeBuffers must be allocated on the stack except for a single
345 // special case during expansion which is handled internally. This
346 // is done to guarantee proper cleanup of resources.
347 void* operator new(size_t size) throw() { return ResourceObj::operator new(size); }
348 void operator delete(void* p) { ShouldNotCallThis(); }
349
350 public:
351 typedef int csize_t; // code size type; would be size_t except for history
352 enum {
353 // Here is the list of all possible sections. The order reflects
354 // the final layout.
355 SECT_FIRST = 0,
356 SECT_CONSTS = SECT_FIRST, // Non-instruction data: Floats, jump tables, etc.
357 SECT_INSTS, // Executable instructions.
358 SECT_STUBS, // Outbound trampolines for supporting call sites.
359 SECT_LIMIT, SECT_NONE = -1
360 };
361
362 private:
363 enum {
364 sect_bits = 2, // assert (SECT_LIMIT <= (1<<sect_bits))
365 sect_mask = (1<<sect_bits)-1
366 };
367
368 const char* _name;
369
370 CodeSection _consts; // constants, jump tables
371 CodeSection _insts; // instructions (the main section)
372 CodeSection _stubs; // stubs (call site support), deopt, exception handling
373
374 CodeBuffer* _before_expand; // dead buffer, from before the last expansion
375
376 BufferBlob* _blob; // optional buffer in CodeCache for generated code
377 address _total_start; // first address of combined memory buffer
378 csize_t _total_size; // size in bytes of combined memory buffer
379
380 OopRecorder* _oop_recorder;
381 CodeStrings _code_strings;
382 bool _collect_comments; // Indicate if we need to collect block comments at all.
383 OopRecorder _default_oop_recorder; // override with initialize_oop_recorder
384 Arena* _overflow_arena;
385
386 address _last_insn; // used to merge consecutive memory barriers, loads or stores.
387
388#if INCLUDE_AOT
389 bool _immutable_PIC;
390#endif
391
392 address _decode_begin; // start address for decode
393 address decode_begin();
394
395 void initialize_misc(const char * name) {
396 // all pointers other than code_start/end and those inside the sections
397 assert(name != NULL, "must have a name");
398 _name = name;
399 _before_expand = NULL;
400 _blob = NULL;
401 _oop_recorder = NULL;
402 _decode_begin = NULL;
403 _overflow_arena = NULL;
404 _code_strings = CodeStrings();
405 _last_insn = NULL;
406#if INCLUDE_AOT
407 _immutable_PIC = false;
408#endif
409
410 // Collect block comments, but restrict collection to cases where a disassembly is output.
411 _collect_comments = ( PrintAssembly
412 || PrintStubCode
413 || PrintMethodHandleStubs
414 || PrintInterpreter
415 || PrintSignatureHandlers
416 || UnlockDiagnosticVMOptions
417 );
418 }
419
420 void initialize(address code_start, csize_t code_size) {
421 _consts.initialize_outer(this, SECT_CONSTS);
422 _insts.initialize_outer(this, SECT_INSTS);
423 _stubs.initialize_outer(this, SECT_STUBS);
424 _total_start = code_start;
425 _total_size = code_size;
426 // Initialize the main section:
427 _insts.initialize(code_start, code_size);
428 assert(!_stubs.is_allocated(), "no garbage here");
429 assert(!_consts.is_allocated(), "no garbage here");
430 _oop_recorder = &_default_oop_recorder;
431 }
432
433 void initialize_section_size(CodeSection* cs, csize_t size);
434
435 void freeze_section(CodeSection* cs);
436
437 // helper for CodeBuffer::expand()
438 void take_over_code_from(CodeBuffer* cs);
439
440 // ensure sections are disjoint, ordered, and contained in the blob
441 void verify_section_allocation();
442
443 // copies combined relocations to the blob, returns bytes copied
444 // (if target is null, it is a dry run only, just for sizing)
445 csize_t copy_relocations_to(CodeBlob* blob) const;
446
447 // copies combined code to the blob (assumes relocs are already in there)
448 void copy_code_to(CodeBlob* blob);
449
450 // moves code sections to new buffer (assumes relocs are already in there)
451 void relocate_code_to(CodeBuffer* cb) const;
452
453 // set up a model of the final layout of my contents
454 void compute_final_layout(CodeBuffer* dest) const;
455
456 // Expand the given section so at least 'amount' is remaining.
457 // Creates a new, larger BufferBlob, and rewrites the code & relocs.
458 void expand(CodeSection* which_cs, csize_t amount);
459
460 // Helper for expand.
461 csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity);
462
463 public:
464 // (1) code buffer referring to pre-allocated instruction memory
465 CodeBuffer(address code_start, csize_t code_size) {
466 assert(code_start != NULL, "sanity");
467 initialize_misc("static buffer");
468 initialize(code_start, code_size);
469 verify_section_allocation();
470 }
471
472 // (2) CodeBuffer referring to pre-allocated CodeBlob.
473 CodeBuffer(CodeBlob* blob);
474
475 // (3) code buffer allocating codeBlob memory for code & relocation
476 // info but with lazy initialization. The name must be something
477 // informative.
478 CodeBuffer(const char* name) {
479 initialize_misc(name);
480 }
481
482 // (4) code buffer allocating codeBlob memory for code & relocation
483 // info. The name must be something informative and code_size must
484 // include both code and stubs sizes.
485 CodeBuffer(const char* name, csize_t code_size, csize_t locs_size) {
486 initialize_misc(name);
487 initialize(code_size, locs_size);
488 }
489
490 ~CodeBuffer();
491
492 // Initialize a CodeBuffer constructed using constructor 3. Using
493 // constructor 4 is equivalent to calling constructor 3 and then
494 // calling this method. It's been factored out for convenience of
495 // construction.
496 void initialize(csize_t code_size, csize_t locs_size);
497
498 CodeSection* consts() { return &_consts; }
499 CodeSection* insts() { return &_insts; }
500 CodeSection* stubs() { return &_stubs; }
501
502 const CodeSection* insts() const { return &_insts; }
503
504 // present sections in order; return NULL at end; consts is #0, etc.
505 CodeSection* code_section(int n) {
506 // This makes the slightly questionable but portable assumption
507 // that the various members (_consts, _insts, _stubs, etc.) are
508 // adjacent in the layout of CodeBuffer.
509 CodeSection* cs = &_consts + n;
510 assert(cs->index() == n || !cs->is_allocated(), "sanity");
511 return cs;
512 }
513 const CodeSection* code_section(int n) const { // yucky const stuff
514 return ((CodeBuffer*)this)->code_section(n);
515 }
516 static const char* code_section_name(int n);
517 int section_index_of(address addr) const;
518 bool contains(address addr) const {
519 // handy for debugging
520 return section_index_of(addr) > SECT_NONE;
521 }
522
523 // A stable mapping between 'locators' (small ints) and addresses.
524 static int locator_pos(int locator) { return locator >> sect_bits; }
525 static int locator_sect(int locator) { return locator & sect_mask; }
526 static int locator(int pos, int sect) { return (pos << sect_bits) | sect; }
527 int locator(address addr) const;
528 address locator_address(int locator) const;
529
530 // Heuristic for pre-packing the taken/not-taken bit of a predicted branch.
531 bool is_backward_branch(Label& L);
532
533 // Properties
534 const char* name() const { return _name; }
535 void set_name(const char* name) { _name = name; }
536 CodeBuffer* before_expand() const { return _before_expand; }
537 BufferBlob* blob() const { return _blob; }
538 void set_blob(BufferBlob* blob);
539 void free_blob(); // Free the blob, if we own one.
540
541 // Properties relative to the insts section:
542 address insts_begin() const { return _insts.start(); }
543 address insts_end() const { return _insts.end(); }
544 void set_insts_end(address end) { _insts.set_end(end); }
545 address insts_limit() const { return _insts.limit(); }
546 address insts_mark() const { return _insts.mark(); }
547 void set_insts_mark() { _insts.set_mark(); }
548 void clear_insts_mark() { _insts.clear_mark(); }
549
550 // is there anything in the buffer other than the current section?
551 bool is_pure() const { return insts_size() == total_content_size(); }
552
553 // size in bytes of output so far in the insts sections
554 csize_t insts_size() const { return _insts.size(); }
555
556 // same as insts_size(), except that it asserts there is no non-code here
557 csize_t pure_insts_size() const { assert(is_pure(), "no non-code");
558 return insts_size(); }
559 // capacity in bytes of the insts sections
560 csize_t insts_capacity() const { return _insts.capacity(); }
561
562 // number of bytes remaining in the insts section
563 csize_t insts_remaining() const { return _insts.remaining(); }
564
565 // is a given address in the insts section? (2nd version is end-inclusive)
566 bool insts_contains(address pc) const { return _insts.contains(pc); }
567 bool insts_contains2(address pc) const { return _insts.contains2(pc); }
568
569 // Record any extra oops required to keep embedded metadata alive
570 void finalize_oop_references(const methodHandle& method);
571
572 // Allocated size in all sections, when aligned and concatenated
573 // (this is the eventual state of the content in its final
574 // CodeBlob).
575 csize_t total_content_size() const;
576
577 // Combined offset (relative to start of first section) of given
578 // section, as eventually found in the final CodeBlob.
579 csize_t total_offset_of(const CodeSection* cs) const;
580
581 // allocated size of all relocation data, including index, rounded up
582 csize_t total_relocation_size() const;
583
584 csize_t copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const;
585
586 // allocated size of any and all recorded oops
587 csize_t total_oop_size() const {
588 OopRecorder* recorder = oop_recorder();
589 return (recorder == NULL)? 0: recorder->oop_size();
590 }
591
592 // allocated size of any and all recorded metadata
593 csize_t total_metadata_size() const {
594 OopRecorder* recorder = oop_recorder();
595 return (recorder == NULL)? 0: recorder->metadata_size();
596 }
597
598 // Configuration functions, called immediately after the CB is constructed.
599 // The section sizes are subtracted from the original insts section.
600 // Note: Call them in reverse section order, because each steals from insts.
601 void initialize_consts_size(csize_t size) { initialize_section_size(&_consts, size); }
602 void initialize_stubs_size(csize_t size) { initialize_section_size(&_stubs, size); }
603 // Override default oop recorder.
604 void initialize_oop_recorder(OopRecorder* r);
605
606 OopRecorder* oop_recorder() const { return _oop_recorder; }
607 CodeStrings& strings() { return _code_strings; }
608
609 address last_insn() const { return _last_insn; }
610 void set_last_insn(address a) { _last_insn = a; }
611 void clear_last_insn() { set_last_insn(NULL); }
612
613 void free_strings() {
614 if (!_code_strings.is_null()) {
615 _code_strings.free(); // sets _strings Null as a side-effect.
616 }
617 }
618
619 // Directly disassemble code buffer.
620 // Print the comment associated with offset on stream, if there is one.
621 virtual void print_block_comment(outputStream* stream, address block_begin) {
622#ifndef PRODUCT
623 intptr_t offset = (intptr_t)(block_begin - _total_start); // I assume total_start is not correct for all code sections.
624 _code_strings.print_block_comment(stream, offset);
625#endif
626 }
627 bool has_block_comment(address block_begin) {
628#ifndef PRODUCT
629 intptr_t offset = (intptr_t)(block_begin - _total_start); // I assume total_start is not correct for all code sections.
630 return _code_strings.has_block_comment(offset);
631#else
632 return false;
633#endif
634 }
635
636 // Code generation
637 void relocate(address at, RelocationHolder const& rspec, int format = 0) {
638 _insts.relocate(at, rspec, format);
639 }
640 void relocate(address at, relocInfo::relocType rtype, int format = 0) {
641 _insts.relocate(at, rtype, format);
642 }
643
644 // Management of overflow storage for binding of Labels.
645 GrowableArray<int>* create_patch_overflow();
646
647 // NMethod generation
648 void copy_code_and_locs_to(CodeBlob* blob) {
649 assert(blob != NULL, "sane");
650 copy_relocations_to(blob);
651 copy_code_to(blob);
652 }
653 void copy_values_to(nmethod* nm) {
654 if (!oop_recorder()->is_unused()) {
655 oop_recorder()->copy_values_to(nm);
656 }
657 }
658
659 // Transform an address from the code in this code buffer to a specified code buffer
660 address transform_address(const CodeBuffer &cb, address addr) const;
661
662 void block_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
663 const char* code_string(const char* str) PRODUCT_RETURN_(return NULL;);
664
665 // Log a little info about section usage in the CodeBuffer
666 void log_section_sizes(const char* name);
667
668#if INCLUDE_AOT
669 // True if this is a code buffer used for immutable PIC, i.e. AOT
670 // compilation.
671 bool immutable_PIC() { return _immutable_PIC; }
672 void set_immutable_PIC(bool pic) { _immutable_PIC = pic; }
673#endif
674
675#ifndef PRODUCT
676 public:
677 // Printing / Decoding
678 // decodes from decode_begin() to code_end() and sets decode_begin to end
679 void decode();
680 void print();
681#endif
682 // Directly disassemble code buffer.
683 void decode(address start, address end);
684
685 // The following header contains architecture-specific implementations
686#include CPU_HEADER(codeBuffer)
687
688};
689
690
691inline void CodeSection::freeze() {
692 _outer->freeze_section(this);
693}
694
695inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) {
696 if (remaining() < amount) { _outer->expand(this, amount); return true; }
697 return false;
698}
699
700#endif // SHARE_ASM_CODEBUFFER_HPP
701