1 | /* |
2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "logging/log.hpp" |
27 | #include "memory/resourceArea.hpp" |
28 | #include "memory/virtualspace.hpp" |
29 | #include "oops/compressedOops.hpp" |
30 | #include "oops/markOop.hpp" |
31 | #include "oops/oop.inline.hpp" |
32 | #include "runtime/os.inline.hpp" |
33 | #include "services/memTracker.hpp" |
34 | #include "utilities/align.hpp" |
35 | |
36 | // ReservedSpace |
37 | |
38 | // Dummy constructor |
39 | ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), |
40 | _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) { |
41 | } |
42 | |
43 | ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) { |
44 | bool has_preferred_page_size = preferred_page_size != 0; |
45 | // Want to use large pages where possible and pad with small pages. |
46 | size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1); |
47 | bool large_pages = page_size != (size_t)os::vm_page_size(); |
48 | size_t alignment; |
49 | if (large_pages && has_preferred_page_size) { |
50 | alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); |
51 | // ReservedSpace initialization requires size to be aligned to the given |
52 | // alignment. Align the size up. |
53 | size = align_up(size, alignment); |
54 | } else { |
55 | // Don't force the alignment to be large page aligned, |
56 | // since that will waste memory. |
57 | alignment = os::vm_allocation_granularity(); |
58 | } |
59 | initialize(size, alignment, large_pages, NULL, false); |
60 | } |
61 | |
62 | ReservedSpace::ReservedSpace(size_t size, size_t alignment, |
63 | bool large, |
64 | char* requested_address) : _fd_for_heap(-1) { |
65 | initialize(size, alignment, large, requested_address, false); |
66 | } |
67 | |
68 | ReservedSpace::ReservedSpace(size_t size, size_t alignment, |
69 | bool large, |
70 | bool executable) : _fd_for_heap(-1) { |
71 | initialize(size, alignment, large, NULL, executable); |
72 | } |
73 | |
74 | ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, |
75 | bool special, bool executable) : _fd_for_heap(-1) { |
76 | assert((size % os::vm_allocation_granularity()) == 0, |
77 | "size not allocation aligned" ); |
78 | _base = base; |
79 | _size = size; |
80 | _alignment = alignment; |
81 | _noaccess_prefix = 0; |
82 | _special = special; |
83 | _executable = executable; |
84 | } |
85 | |
86 | // Helper method |
87 | static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) { |
88 | if (is_file_mapped) { |
89 | if (!os::unmap_memory(base, size)) { |
90 | fatal("os::unmap_memory failed" ); |
91 | } |
92 | } else if (!os::release_memory(base, size)) { |
93 | fatal("os::release_memory failed" ); |
94 | } |
95 | } |
96 | |
97 | // Helper method. |
98 | static bool failed_to_reserve_as_requested(char* base, char* requested_address, |
99 | const size_t size, bool special, bool is_file_mapped = false) |
100 | { |
101 | if (base == requested_address || requested_address == NULL) |
102 | return false; // did not fail |
103 | |
104 | if (base != NULL) { |
105 | // Different reserve address may be acceptable in other cases |
106 | // but for compressed oops heap should be at requested address. |
107 | assert(UseCompressedOops, "currently requested address used only for compressed oops" ); |
108 | log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address)); |
109 | // OS ignored requested address. Try different address. |
110 | if (special) { |
111 | if (!os::release_memory_special(base, size)) { |
112 | fatal("os::release_memory_special failed" ); |
113 | } |
114 | } else { |
115 | unmap_or_release_memory(base, size, is_file_mapped); |
116 | } |
117 | } |
118 | return true; |
119 | } |
120 | |
121 | void ReservedSpace::initialize(size_t size, size_t alignment, bool large, |
122 | char* requested_address, |
123 | bool executable) { |
124 | const size_t granularity = os::vm_allocation_granularity(); |
125 | assert((size & (granularity - 1)) == 0, |
126 | "size not aligned to os::vm_allocation_granularity()" ); |
127 | assert((alignment & (granularity - 1)) == 0, |
128 | "alignment not aligned to os::vm_allocation_granularity()" ); |
129 | assert(alignment == 0 || is_power_of_2((intptr_t)alignment), |
130 | "not a power of 2" ); |
131 | |
132 | alignment = MAX2(alignment, (size_t)os::vm_page_size()); |
133 | |
134 | _base = NULL; |
135 | _size = 0; |
136 | _special = false; |
137 | _executable = executable; |
138 | _alignment = 0; |
139 | _noaccess_prefix = 0; |
140 | if (size == 0) { |
141 | return; |
142 | } |
143 | |
144 | // If OS doesn't support demand paging for large page memory, we need |
145 | // to use reserve_memory_special() to reserve and pin the entire region. |
146 | // If there is a backing file directory for this space then whether |
147 | // large pages are allocated is up to the filesystem of the backing file. |
148 | // So we ignore the UseLargePages flag in this case. |
149 | bool special = large && !os::can_commit_large_page_memory(); |
150 | if (special && _fd_for_heap != -1) { |
151 | special = false; |
152 | if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || |
153 | !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { |
154 | log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap" ); |
155 | } |
156 | } |
157 | |
158 | char* base = NULL; |
159 | |
160 | if (special) { |
161 | |
162 | base = os::reserve_memory_special(size, alignment, requested_address, executable); |
163 | |
164 | if (base != NULL) { |
165 | if (failed_to_reserve_as_requested(base, requested_address, size, true)) { |
166 | // OS ignored requested address. Try different address. |
167 | return; |
168 | } |
169 | // Check alignment constraints. |
170 | assert((uintptr_t) base % alignment == 0, |
171 | "Large pages returned a non-aligned address, base: " |
172 | PTR_FORMAT " alignment: " SIZE_FORMAT_HEX, |
173 | p2i(base), alignment); |
174 | _special = true; |
175 | } else { |
176 | // failed; try to reserve regular memory below |
177 | if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || |
178 | !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { |
179 | log_debug(gc, heap, coops)("Reserve regular memory without large pages" ); |
180 | } |
181 | } |
182 | } |
183 | |
184 | if (base == NULL) { |
185 | // Optimistically assume that the OSes returns an aligned base pointer. |
186 | // When reserving a large address range, most OSes seem to align to at |
187 | // least 64K. |
188 | |
189 | // If the memory was requested at a particular address, use |
190 | // os::attempt_reserve_memory_at() to avoid over mapping something |
191 | // important. If available space is not detected, return NULL. |
192 | |
193 | if (requested_address != 0) { |
194 | base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap); |
195 | if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) { |
196 | // OS ignored requested address. Try different address. |
197 | base = NULL; |
198 | } |
199 | } else { |
200 | base = os::reserve_memory(size, NULL, alignment, _fd_for_heap); |
201 | } |
202 | |
203 | if (base == NULL) return; |
204 | |
205 | // Check alignment constraints |
206 | if ((((size_t)base) & (alignment - 1)) != 0) { |
207 | // Base not aligned, retry |
208 | unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/); |
209 | |
210 | // Make sure that size is aligned |
211 | size = align_up(size, alignment); |
212 | base = os::reserve_memory_aligned(size, alignment, _fd_for_heap); |
213 | |
214 | if (requested_address != 0 && |
215 | failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) { |
216 | // As a result of the alignment constraints, the allocated base differs |
217 | // from the requested address. Return back to the caller who can |
218 | // take remedial action (like try again without a requested address). |
219 | assert(_base == NULL, "should be" ); |
220 | return; |
221 | } |
222 | } |
223 | } |
224 | // Done |
225 | _base = base; |
226 | _size = size; |
227 | _alignment = alignment; |
228 | // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true |
229 | if (_fd_for_heap != -1) { |
230 | _special = true; |
231 | } |
232 | } |
233 | |
234 | ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, |
235 | bool split, bool realloc) { |
236 | assert(partition_size <= size(), "partition failed" ); |
237 | if (split) { |
238 | os::split_reserved_memory(base(), size(), partition_size, realloc); |
239 | } |
240 | ReservedSpace result(base(), partition_size, alignment, special(), |
241 | executable()); |
242 | return result; |
243 | } |
244 | |
245 | |
246 | ReservedSpace |
247 | ReservedSpace::last_part(size_t partition_size, size_t alignment) { |
248 | assert(partition_size <= size(), "partition failed" ); |
249 | ReservedSpace result(base() + partition_size, size() - partition_size, |
250 | alignment, special(), executable()); |
251 | return result; |
252 | } |
253 | |
254 | |
255 | size_t ReservedSpace::page_align_size_up(size_t size) { |
256 | return align_up(size, os::vm_page_size()); |
257 | } |
258 | |
259 | |
260 | size_t ReservedSpace::page_align_size_down(size_t size) { |
261 | return align_down(size, os::vm_page_size()); |
262 | } |
263 | |
264 | |
265 | size_t ReservedSpace::allocation_align_size_up(size_t size) { |
266 | return align_up(size, os::vm_allocation_granularity()); |
267 | } |
268 | |
269 | |
270 | void ReservedSpace::release() { |
271 | if (is_reserved()) { |
272 | char *real_base = _base - _noaccess_prefix; |
273 | const size_t real_size = _size + _noaccess_prefix; |
274 | if (special()) { |
275 | if (_fd_for_heap != -1) { |
276 | os::unmap_memory(real_base, real_size); |
277 | } else { |
278 | os::release_memory_special(real_base, real_size); |
279 | } |
280 | } else{ |
281 | os::release_memory(real_base, real_size); |
282 | } |
283 | _base = NULL; |
284 | _size = 0; |
285 | _noaccess_prefix = 0; |
286 | _alignment = 0; |
287 | _special = false; |
288 | _executable = false; |
289 | } |
290 | } |
291 | |
292 | static size_t noaccess_prefix_size(size_t alignment) { |
293 | return lcm(os::vm_page_size(), alignment); |
294 | } |
295 | |
296 | void ReservedHeapSpace::establish_noaccess_prefix() { |
297 | assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big" ); |
298 | _noaccess_prefix = noaccess_prefix_size(_alignment); |
299 | |
300 | if (base() && base() + _size > (char *)OopEncodingHeapMax) { |
301 | if (true |
302 | WIN64_ONLY(&& !UseLargePages) |
303 | AIX_ONLY(&& os::vm_page_size() != 64*K)) { |
304 | // Protect memory at the base of the allocated region. |
305 | // If special, the page was committed (only matters on windows) |
306 | if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) { |
307 | fatal("cannot protect protection page" ); |
308 | } |
309 | log_debug(gc, heap, coops)("Protected page at the reserved heap base: " |
310 | PTR_FORMAT " / " INTX_FORMAT " bytes" , |
311 | p2i(_base), |
312 | _noaccess_prefix); |
313 | assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?" ); |
314 | } else { |
315 | CompressedOops::set_use_implicit_null_checks(false); |
316 | } |
317 | } |
318 | |
319 | _base += _noaccess_prefix; |
320 | _size -= _noaccess_prefix; |
321 | assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment" ); |
322 | } |
323 | |
324 | // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'. |
325 | // Does not check whether the reserved memory actually is at requested_address, as the memory returned |
326 | // might still fulfill the wishes of the caller. |
327 | // Assures the memory is aligned to 'alignment'. |
328 | // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first. |
329 | void ReservedHeapSpace::try_reserve_heap(size_t size, |
330 | size_t alignment, |
331 | bool large, |
332 | char* requested_address) { |
333 | if (_base != NULL) { |
334 | // We tried before, but we didn't like the address delivered. |
335 | release(); |
336 | } |
337 | |
338 | // If OS doesn't support demand paging for large page memory, we need |
339 | // to use reserve_memory_special() to reserve and pin the entire region. |
340 | // If there is a backing file directory for this space then whether |
341 | // large pages are allocated is up to the filesystem of the backing file. |
342 | // So we ignore the UseLargePages flag in this case. |
343 | bool special = large && !os::can_commit_large_page_memory(); |
344 | if (special && _fd_for_heap != -1) { |
345 | special = false; |
346 | if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || |
347 | !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { |
348 | log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set." ); |
349 | } |
350 | } |
351 | char* base = NULL; |
352 | |
353 | log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT |
354 | " heap of size " SIZE_FORMAT_HEX, |
355 | p2i(requested_address), |
356 | size); |
357 | |
358 | if (special) { |
359 | base = os::reserve_memory_special(size, alignment, requested_address, false); |
360 | |
361 | if (base != NULL) { |
362 | // Check alignment constraints. |
363 | assert((uintptr_t) base % alignment == 0, |
364 | "Large pages returned a non-aligned address, base: " |
365 | PTR_FORMAT " alignment: " SIZE_FORMAT_HEX, |
366 | p2i(base), alignment); |
367 | _special = true; |
368 | } |
369 | } |
370 | |
371 | if (base == NULL) { |
372 | // Failed; try to reserve regular memory below |
373 | if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || |
374 | !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { |
375 | log_debug(gc, heap, coops)("Reserve regular memory without large pages" ); |
376 | } |
377 | |
378 | // Optimistically assume that the OSes returns an aligned base pointer. |
379 | // When reserving a large address range, most OSes seem to align to at |
380 | // least 64K. |
381 | |
382 | // If the memory was requested at a particular address, use |
383 | // os::attempt_reserve_memory_at() to avoid over mapping something |
384 | // important. If available space is not detected, return NULL. |
385 | |
386 | if (requested_address != 0) { |
387 | base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap); |
388 | } else { |
389 | base = os::reserve_memory(size, NULL, alignment, _fd_for_heap); |
390 | } |
391 | } |
392 | if (base == NULL) { return; } |
393 | |
394 | // Done |
395 | _base = base; |
396 | _size = size; |
397 | _alignment = alignment; |
398 | |
399 | // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true |
400 | if (_fd_for_heap != -1) { |
401 | _special = true; |
402 | } |
403 | |
404 | // Check alignment constraints |
405 | if ((((size_t)base) & (alignment - 1)) != 0) { |
406 | // Base not aligned, retry. |
407 | release(); |
408 | } |
409 | } |
410 | |
411 | void ReservedHeapSpace::try_reserve_range(char *highest_start, |
412 | char *lowest_start, |
413 | size_t attach_point_alignment, |
414 | char *aligned_heap_base_min_address, |
415 | char *upper_bound, |
416 | size_t size, |
417 | size_t alignment, |
418 | bool large) { |
419 | const size_t attach_range = highest_start - lowest_start; |
420 | // Cap num_attempts at possible number. |
421 | // At least one is possible even for 0 sized attach range. |
422 | const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1; |
423 | const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible); |
424 | |
425 | const size_t stepsize = (attach_range == 0) ? // Only one try. |
426 | (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment); |
427 | |
428 | // Try attach points from top to bottom. |
429 | char* attach_point = highest_start; |
430 | while (attach_point >= lowest_start && |
431 | attach_point <= highest_start && // Avoid wrap around. |
432 | ((_base == NULL) || |
433 | (_base < aligned_heap_base_min_address || _base + size > upper_bound))) { |
434 | try_reserve_heap(size, alignment, large, attach_point); |
435 | attach_point -= stepsize; |
436 | } |
437 | } |
438 | |
439 | #define SIZE_64K ((uint64_t) UCONST64( 0x10000)) |
440 | #define SIZE_256M ((uint64_t) UCONST64( 0x10000000)) |
441 | #define SIZE_32G ((uint64_t) UCONST64( 0x800000000)) |
442 | |
443 | // Helper for heap allocation. Returns an array with addresses |
444 | // (OS-specific) which are suited for disjoint base mode. Array is |
445 | // NULL terminated. |
446 | static char** get_attach_addresses_for_disjoint_mode() { |
447 | static uint64_t addresses[] = { |
448 | 2 * SIZE_32G, |
449 | 3 * SIZE_32G, |
450 | 4 * SIZE_32G, |
451 | 8 * SIZE_32G, |
452 | 10 * SIZE_32G, |
453 | 1 * SIZE_64K * SIZE_32G, |
454 | 2 * SIZE_64K * SIZE_32G, |
455 | 3 * SIZE_64K * SIZE_32G, |
456 | 4 * SIZE_64K * SIZE_32G, |
457 | 16 * SIZE_64K * SIZE_32G, |
458 | 32 * SIZE_64K * SIZE_32G, |
459 | 34 * SIZE_64K * SIZE_32G, |
460 | 0 |
461 | }; |
462 | |
463 | // Sort out addresses smaller than HeapBaseMinAddress. This assumes |
464 | // the array is sorted. |
465 | uint i = 0; |
466 | while (addresses[i] != 0 && |
467 | (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) { |
468 | i++; |
469 | } |
470 | uint start = i; |
471 | |
472 | // Avoid more steps than requested. |
473 | i = 0; |
474 | while (addresses[start+i] != 0) { |
475 | if (i == HeapSearchSteps) { |
476 | addresses[start+i] = 0; |
477 | break; |
478 | } |
479 | i++; |
480 | } |
481 | |
482 | return (char**) &addresses[start]; |
483 | } |
484 | |
485 | void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) { |
486 | guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax, |
487 | "can not allocate compressed oop heap for this size" ); |
488 | guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small" ); |
489 | |
490 | const size_t granularity = os::vm_allocation_granularity(); |
491 | assert((size & (granularity - 1)) == 0, |
492 | "size not aligned to os::vm_allocation_granularity()" ); |
493 | assert((alignment & (granularity - 1)) == 0, |
494 | "alignment not aligned to os::vm_allocation_granularity()" ); |
495 | assert(alignment == 0 || is_power_of_2((intptr_t)alignment), |
496 | "not a power of 2" ); |
497 | |
498 | // The necessary attach point alignment for generated wish addresses. |
499 | // This is needed to increase the chance of attaching for mmap and shmat. |
500 | const size_t os_attach_point_alignment = |
501 | AIX_ONLY(SIZE_256M) // Known shm boundary alignment. |
502 | NOT_AIX(os::vm_allocation_granularity()); |
503 | const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment); |
504 | |
505 | char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment); |
506 | size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ? |
507 | noaccess_prefix_size(alignment) : 0; |
508 | |
509 | // Attempt to alloc at user-given address. |
510 | if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) { |
511 | try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address); |
512 | if (_base != aligned_heap_base_min_address) { // Enforce this exact address. |
513 | release(); |
514 | } |
515 | } |
516 | |
517 | // Keep heap at HeapBaseMinAddress. |
518 | if (_base == NULL) { |
519 | |
520 | // Try to allocate the heap at addresses that allow efficient oop compression. |
521 | // Different schemes are tried, in order of decreasing optimization potential. |
522 | // |
523 | // For this, try_reserve_heap() is called with the desired heap base addresses. |
524 | // A call into the os layer to allocate at a given address can return memory |
525 | // at a different address than requested. Still, this might be memory at a useful |
526 | // address. try_reserve_heap() always returns this allocated memory, as only here |
527 | // the criteria for a good heap are checked. |
528 | |
529 | // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops). |
530 | // Give it several tries from top of range to bottom. |
531 | if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) { |
532 | |
533 | // Calc address range within we try to attach (range of possible start addresses). |
534 | char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment); |
535 | char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment); |
536 | try_reserve_range(highest_start, lowest_start, attach_point_alignment, |
537 | aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large); |
538 | } |
539 | |
540 | // zerobased: Attempt to allocate in the lower 32G. |
541 | // But leave room for the compressed class pointers, which is allocated above |
542 | // the heap. |
543 | char *zerobased_max = (char *)OopEncodingHeapMax; |
544 | const size_t class_space = align_up(CompressedClassSpaceSize, alignment); |
545 | // For small heaps, save some space for compressed class pointer |
546 | // space so it can be decoded with no base. |
547 | if (UseCompressedClassPointers && !UseSharedSpaces && |
548 | OopEncodingHeapMax <= KlassEncodingMetaspaceMax && |
549 | (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) { |
550 | zerobased_max = (char *)OopEncodingHeapMax - class_space; |
551 | } |
552 | |
553 | // Give it several tries from top of range to bottom. |
554 | if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible. |
555 | ((_base == NULL) || // No previous try succeeded. |
556 | (_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address. |
557 | |
558 | // Calc address range within we try to attach (range of possible start addresses). |
559 | char *const highest_start = align_down(zerobased_max - size, attach_point_alignment); |
560 | // Need to be careful about size being guaranteed to be less |
561 | // than UnscaledOopHeapMax due to type constraints. |
562 | char *lowest_start = aligned_heap_base_min_address; |
563 | uint64_t unscaled_end = UnscaledOopHeapMax - size; |
564 | if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large |
565 | lowest_start = MAX2(lowest_start, (char*)unscaled_end); |
566 | } |
567 | lowest_start = align_up(lowest_start, attach_point_alignment); |
568 | try_reserve_range(highest_start, lowest_start, attach_point_alignment, |
569 | aligned_heap_base_min_address, zerobased_max, size, alignment, large); |
570 | } |
571 | |
572 | // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently |
573 | // implement null checks. |
574 | noaccess_prefix = noaccess_prefix_size(alignment); |
575 | |
576 | // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode. |
577 | char** addresses = get_attach_addresses_for_disjoint_mode(); |
578 | int i = 0; |
579 | while (addresses[i] && // End of array not yet reached. |
580 | ((_base == NULL) || // No previous try succeeded. |
581 | (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address. |
582 | !CompressedOops::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address. |
583 | char* const attach_point = addresses[i]; |
584 | assert(attach_point >= aligned_heap_base_min_address, "Flag support broken" ); |
585 | try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point); |
586 | i++; |
587 | } |
588 | |
589 | // Last, desperate try without any placement. |
590 | if (_base == NULL) { |
591 | log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix); |
592 | initialize(size + noaccess_prefix, alignment, large, NULL, false); |
593 | } |
594 | } |
595 | } |
596 | |
597 | ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() { |
598 | |
599 | if (size == 0) { |
600 | return; |
601 | } |
602 | |
603 | if (heap_allocation_directory != NULL) { |
604 | _fd_for_heap = os::create_file_for_heap(heap_allocation_directory); |
605 | if (_fd_for_heap == -1) { |
606 | vm_exit_during_initialization( |
607 | err_msg("Could not create file for Heap at location %s" , heap_allocation_directory)); |
608 | } |
609 | } |
610 | |
611 | // Heap size should be aligned to alignment, too. |
612 | guarantee(is_aligned(size, alignment), "set by caller" ); |
613 | |
614 | if (UseCompressedOops) { |
615 | initialize_compressed_heap(size, alignment, large); |
616 | if (_size > size) { |
617 | // We allocated heap with noaccess prefix. |
618 | // It can happen we get a zerobased/unscaled heap with noaccess prefix, |
619 | // if we had to try at arbitrary address. |
620 | establish_noaccess_prefix(); |
621 | } |
622 | } else { |
623 | initialize(size, alignment, large, NULL, false); |
624 | } |
625 | |
626 | assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base, |
627 | "area must be distinguishable from marks for mark-sweep" ); |
628 | assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size], |
629 | "area must be distinguishable from marks for mark-sweep" ); |
630 | |
631 | if (base() != NULL) { |
632 | MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap); |
633 | } |
634 | |
635 | if (_fd_for_heap != -1) { |
636 | os::close(_fd_for_heap); |
637 | } |
638 | } |
639 | |
640 | // Reserve space for code segment. Same as Java heap only we mark this as |
641 | // executable. |
642 | ReservedCodeSpace::ReservedCodeSpace(size_t r_size, |
643 | size_t rs_align, |
644 | bool large) : |
645 | ReservedSpace(r_size, rs_align, large, /*executable*/ true) { |
646 | MemTracker::record_virtual_memory_type((address)base(), mtCode); |
647 | } |
648 | |
649 | // VirtualSpace |
650 | |
651 | VirtualSpace::VirtualSpace() { |
652 | _low_boundary = NULL; |
653 | _high_boundary = NULL; |
654 | _low = NULL; |
655 | _high = NULL; |
656 | _lower_high = NULL; |
657 | _middle_high = NULL; |
658 | _upper_high = NULL; |
659 | _lower_high_boundary = NULL; |
660 | _middle_high_boundary = NULL; |
661 | _upper_high_boundary = NULL; |
662 | _lower_alignment = 0; |
663 | _middle_alignment = 0; |
664 | _upper_alignment = 0; |
665 | _special = false; |
666 | _executable = false; |
667 | } |
668 | |
669 | |
670 | bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { |
671 | const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1); |
672 | return initialize_with_granularity(rs, committed_size, max_commit_granularity); |
673 | } |
674 | |
675 | bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) { |
676 | if(!rs.is_reserved()) return false; // allocation failed. |
677 | assert(_low_boundary == NULL, "VirtualSpace already initialized" ); |
678 | assert(max_commit_granularity > 0, "Granularity must be non-zero." ); |
679 | |
680 | _low_boundary = rs.base(); |
681 | _high_boundary = low_boundary() + rs.size(); |
682 | |
683 | _low = low_boundary(); |
684 | _high = low(); |
685 | |
686 | _special = rs.special(); |
687 | _executable = rs.executable(); |
688 | |
689 | // When a VirtualSpace begins life at a large size, make all future expansion |
690 | // and shrinking occur aligned to a granularity of large pages. This avoids |
691 | // fragmentation of physical addresses that inhibits the use of large pages |
692 | // by the OS virtual memory system. Empirically, we see that with a 4MB |
693 | // page size, the only spaces that get handled this way are codecache and |
694 | // the heap itself, both of which provide a substantial performance |
695 | // boost in many benchmarks when covered by large pages. |
696 | // |
697 | // No attempt is made to force large page alignment at the very top and |
698 | // bottom of the space if they are not aligned so already. |
699 | _lower_alignment = os::vm_page_size(); |
700 | _middle_alignment = max_commit_granularity; |
701 | _upper_alignment = os::vm_page_size(); |
702 | |
703 | // End of each region |
704 | _lower_high_boundary = align_up(low_boundary(), middle_alignment()); |
705 | _middle_high_boundary = align_down(high_boundary(), middle_alignment()); |
706 | _upper_high_boundary = high_boundary(); |
707 | |
708 | // High address of each region |
709 | _lower_high = low_boundary(); |
710 | _middle_high = lower_high_boundary(); |
711 | _upper_high = middle_high_boundary(); |
712 | |
713 | // commit to initial size |
714 | if (committed_size > 0) { |
715 | if (!expand_by(committed_size)) { |
716 | return false; |
717 | } |
718 | } |
719 | return true; |
720 | } |
721 | |
722 | |
723 | VirtualSpace::~VirtualSpace() { |
724 | release(); |
725 | } |
726 | |
727 | |
728 | void VirtualSpace::release() { |
729 | // This does not release memory it reserved. |
730 | // Caller must release via rs.release(); |
731 | _low_boundary = NULL; |
732 | _high_boundary = NULL; |
733 | _low = NULL; |
734 | _high = NULL; |
735 | _lower_high = NULL; |
736 | _middle_high = NULL; |
737 | _upper_high = NULL; |
738 | _lower_high_boundary = NULL; |
739 | _middle_high_boundary = NULL; |
740 | _upper_high_boundary = NULL; |
741 | _lower_alignment = 0; |
742 | _middle_alignment = 0; |
743 | _upper_alignment = 0; |
744 | _special = false; |
745 | _executable = false; |
746 | } |
747 | |
748 | |
749 | size_t VirtualSpace::committed_size() const { |
750 | return pointer_delta(high(), low(), sizeof(char)); |
751 | } |
752 | |
753 | |
754 | size_t VirtualSpace::reserved_size() const { |
755 | return pointer_delta(high_boundary(), low_boundary(), sizeof(char)); |
756 | } |
757 | |
758 | |
759 | size_t VirtualSpace::uncommitted_size() const { |
760 | return reserved_size() - committed_size(); |
761 | } |
762 | |
763 | size_t VirtualSpace::actual_committed_size() const { |
764 | // Special VirtualSpaces commit all reserved space up front. |
765 | if (special()) { |
766 | return reserved_size(); |
767 | } |
768 | |
769 | size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char)); |
770 | size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char)); |
771 | size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char)); |
772 | |
773 | #ifdef ASSERT |
774 | size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char)); |
775 | size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char)); |
776 | size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char)); |
777 | |
778 | if (committed_high > 0) { |
779 | assert(committed_low == lower, "Must be" ); |
780 | assert(committed_middle == middle, "Must be" ); |
781 | } |
782 | |
783 | if (committed_middle > 0) { |
784 | assert(committed_low == lower, "Must be" ); |
785 | } |
786 | if (committed_middle < middle) { |
787 | assert(committed_high == 0, "Must be" ); |
788 | } |
789 | |
790 | if (committed_low < lower) { |
791 | assert(committed_high == 0, "Must be" ); |
792 | assert(committed_middle == 0, "Must be" ); |
793 | } |
794 | #endif |
795 | |
796 | return committed_low + committed_middle + committed_high; |
797 | } |
798 | |
799 | |
800 | bool VirtualSpace::contains(const void* p) const { |
801 | return low() <= (const char*) p && (const char*) p < high(); |
802 | } |
803 | |
804 | static void pretouch_expanded_memory(void* start, void* end) { |
805 | assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment" ); |
806 | assert(is_aligned(end, os::vm_page_size()), "Unexpected alignment" ); |
807 | |
808 | os::pretouch_memory(start, end); |
809 | } |
810 | |
811 | static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) { |
812 | if (os::commit_memory(start, size, alignment, executable)) { |
813 | if (pre_touch || AlwaysPreTouch) { |
814 | pretouch_expanded_memory(start, start + size); |
815 | } |
816 | return true; |
817 | } |
818 | |
819 | debug_only(warning( |
820 | "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT |
821 | " size=" SIZE_FORMAT ", executable=%d) failed" , |
822 | p2i(start), p2i(start + size), size, executable);) |
823 | |
824 | return false; |
825 | } |
826 | |
827 | /* |
828 | First we need to determine if a particular virtual space is using large |
829 | pages. This is done at the initialize function and only virtual spaces |
830 | that are larger than LargePageSizeInBytes use large pages. Once we |
831 | have determined this, all expand_by and shrink_by calls must grow and |
832 | shrink by large page size chunks. If a particular request |
833 | is within the current large page, the call to commit and uncommit memory |
834 | can be ignored. In the case that the low and high boundaries of this |
835 | space is not large page aligned, the pages leading to the first large |
836 | page address and the pages after the last large page address must be |
837 | allocated with default pages. |
838 | */ |
839 | bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) { |
840 | if (uncommitted_size() < bytes) { |
841 | return false; |
842 | } |
843 | |
844 | if (special()) { |
845 | // don't commit memory if the entire space is pinned in memory |
846 | _high += bytes; |
847 | return true; |
848 | } |
849 | |
850 | char* previous_high = high(); |
851 | char* unaligned_new_high = high() + bytes; |
852 | assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary" ); |
853 | |
854 | // Calculate where the new high for each of the regions should be. If |
855 | // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned |
856 | // then the unaligned lower and upper new highs would be the |
857 | // lower_high() and upper_high() respectively. |
858 | char* unaligned_lower_new_high = MIN2(unaligned_new_high, lower_high_boundary()); |
859 | char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary()); |
860 | char* unaligned_upper_new_high = MIN2(unaligned_new_high, upper_high_boundary()); |
861 | |
862 | // Align the new highs based on the regions alignment. lower and upper |
863 | // alignment will always be default page size. middle alignment will be |
864 | // LargePageSizeInBytes if the actual size of the virtual space is in |
865 | // fact larger than LargePageSizeInBytes. |
866 | char* aligned_lower_new_high = align_up(unaligned_lower_new_high, lower_alignment()); |
867 | char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment()); |
868 | char* aligned_upper_new_high = align_up(unaligned_upper_new_high, upper_alignment()); |
869 | |
870 | // Determine which regions need to grow in this expand_by call. |
871 | // If you are growing in the lower region, high() must be in that |
872 | // region so calculate the size based on high(). For the middle and |
873 | // upper regions, determine the starting point of growth based on the |
874 | // location of high(). By getting the MAX of the region's low address |
875 | // (or the previous region's high address) and high(), we can tell if it |
876 | // is an intra or inter region growth. |
877 | size_t lower_needs = 0; |
878 | if (aligned_lower_new_high > lower_high()) { |
879 | lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char)); |
880 | } |
881 | size_t middle_needs = 0; |
882 | if (aligned_middle_new_high > middle_high()) { |
883 | middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char)); |
884 | } |
885 | size_t upper_needs = 0; |
886 | if (aligned_upper_new_high > upper_high()) { |
887 | upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char)); |
888 | } |
889 | |
890 | // Check contiguity. |
891 | assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(), |
892 | "high address must be contained within the region" ); |
893 | assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(), |
894 | "high address must be contained within the region" ); |
895 | assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(), |
896 | "high address must be contained within the region" ); |
897 | |
898 | // Commit regions |
899 | if (lower_needs > 0) { |
900 | assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region" ); |
901 | if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) { |
902 | return false; |
903 | } |
904 | _lower_high += lower_needs; |
905 | } |
906 | |
907 | if (middle_needs > 0) { |
908 | assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region" ); |
909 | if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) { |
910 | return false; |
911 | } |
912 | _middle_high += middle_needs; |
913 | } |
914 | |
915 | if (upper_needs > 0) { |
916 | assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region" ); |
917 | if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) { |
918 | return false; |
919 | } |
920 | _upper_high += upper_needs; |
921 | } |
922 | |
923 | _high += bytes; |
924 | return true; |
925 | } |
926 | |
927 | // A page is uncommitted if the contents of the entire page is deemed unusable. |
928 | // Continue to decrement the high() pointer until it reaches a page boundary |
929 | // in which case that particular page can now be uncommitted. |
930 | void VirtualSpace::shrink_by(size_t size) { |
931 | if (committed_size() < size) |
932 | fatal("Cannot shrink virtual space to negative size" ); |
933 | |
934 | if (special()) { |
935 | // don't uncommit if the entire space is pinned in memory |
936 | _high -= size; |
937 | return; |
938 | } |
939 | |
940 | char* unaligned_new_high = high() - size; |
941 | assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary" ); |
942 | |
943 | // Calculate new unaligned address |
944 | char* unaligned_upper_new_high = |
945 | MAX2(unaligned_new_high, middle_high_boundary()); |
946 | char* unaligned_middle_new_high = |
947 | MAX2(unaligned_new_high, lower_high_boundary()); |
948 | char* unaligned_lower_new_high = |
949 | MAX2(unaligned_new_high, low_boundary()); |
950 | |
951 | // Align address to region's alignment |
952 | char* aligned_upper_new_high = align_up(unaligned_upper_new_high, upper_alignment()); |
953 | char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment()); |
954 | char* aligned_lower_new_high = align_up(unaligned_lower_new_high, lower_alignment()); |
955 | |
956 | // Determine which regions need to shrink |
957 | size_t upper_needs = 0; |
958 | if (aligned_upper_new_high < upper_high()) { |
959 | upper_needs = |
960 | pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char)); |
961 | } |
962 | size_t middle_needs = 0; |
963 | if (aligned_middle_new_high < middle_high()) { |
964 | middle_needs = |
965 | pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char)); |
966 | } |
967 | size_t lower_needs = 0; |
968 | if (aligned_lower_new_high < lower_high()) { |
969 | lower_needs = |
970 | pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char)); |
971 | } |
972 | |
973 | // Check contiguity. |
974 | assert(middle_high_boundary() <= upper_high() && |
975 | upper_high() <= upper_high_boundary(), |
976 | "high address must be contained within the region" ); |
977 | assert(lower_high_boundary() <= middle_high() && |
978 | middle_high() <= middle_high_boundary(), |
979 | "high address must be contained within the region" ); |
980 | assert(low_boundary() <= lower_high() && |
981 | lower_high() <= lower_high_boundary(), |
982 | "high address must be contained within the region" ); |
983 | |
984 | // Uncommit |
985 | if (upper_needs > 0) { |
986 | assert(middle_high_boundary() <= aligned_upper_new_high && |
987 | aligned_upper_new_high + upper_needs <= upper_high_boundary(), |
988 | "must not shrink beyond region" ); |
989 | if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) { |
990 | debug_only(warning("os::uncommit_memory failed" )); |
991 | return; |
992 | } else { |
993 | _upper_high -= upper_needs; |
994 | } |
995 | } |
996 | if (middle_needs > 0) { |
997 | assert(lower_high_boundary() <= aligned_middle_new_high && |
998 | aligned_middle_new_high + middle_needs <= middle_high_boundary(), |
999 | "must not shrink beyond region" ); |
1000 | if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) { |
1001 | debug_only(warning("os::uncommit_memory failed" )); |
1002 | return; |
1003 | } else { |
1004 | _middle_high -= middle_needs; |
1005 | } |
1006 | } |
1007 | if (lower_needs > 0) { |
1008 | assert(low_boundary() <= aligned_lower_new_high && |
1009 | aligned_lower_new_high + lower_needs <= lower_high_boundary(), |
1010 | "must not shrink beyond region" ); |
1011 | if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) { |
1012 | debug_only(warning("os::uncommit_memory failed" )); |
1013 | return; |
1014 | } else { |
1015 | _lower_high -= lower_needs; |
1016 | } |
1017 | } |
1018 | |
1019 | _high -= size; |
1020 | } |
1021 | |
1022 | #ifndef PRODUCT |
1023 | void VirtualSpace::check_for_contiguity() { |
1024 | // Check contiguity. |
1025 | assert(low_boundary() <= lower_high() && |
1026 | lower_high() <= lower_high_boundary(), |
1027 | "high address must be contained within the region" ); |
1028 | assert(lower_high_boundary() <= middle_high() && |
1029 | middle_high() <= middle_high_boundary(), |
1030 | "high address must be contained within the region" ); |
1031 | assert(middle_high_boundary() <= upper_high() && |
1032 | upper_high() <= upper_high_boundary(), |
1033 | "high address must be contained within the region" ); |
1034 | assert(low() >= low_boundary(), "low" ); |
1035 | assert(low_boundary() <= lower_high_boundary(), "lower high boundary" ); |
1036 | assert(upper_high_boundary() <= high_boundary(), "upper high boundary" ); |
1037 | assert(high() <= upper_high(), "upper high" ); |
1038 | } |
1039 | |
1040 | void VirtualSpace::print_on(outputStream* out) { |
1041 | out->print ("Virtual space:" ); |
1042 | if (special()) out->print(" (pinned in memory)" ); |
1043 | out->cr(); |
1044 | out->print_cr(" - committed: " SIZE_FORMAT, committed_size()); |
1045 | out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size()); |
1046 | out->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]" , p2i(low()), p2i(high())); |
1047 | out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]" , p2i(low_boundary()), p2i(high_boundary())); |
1048 | } |
1049 | |
1050 | void VirtualSpace::print() { |
1051 | print_on(tty); |
1052 | } |
1053 | |
1054 | /////////////// Unit tests /////////////// |
1055 | |
1056 | #ifndef PRODUCT |
1057 | |
1058 | class TestReservedSpace : AllStatic { |
1059 | public: |
1060 | static void small_page_write(void* addr, size_t size) { |
1061 | size_t page_size = os::vm_page_size(); |
1062 | |
1063 | char* end = (char*)addr + size; |
1064 | for (char* p = (char*)addr; p < end; p += page_size) { |
1065 | *p = 1; |
1066 | } |
1067 | } |
1068 | |
1069 | static void release_memory_for_test(ReservedSpace rs) { |
1070 | if (rs.special()) { |
1071 | guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail" ); |
1072 | } else { |
1073 | guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail" ); |
1074 | } |
1075 | } |
1076 | |
1077 | static void test_reserved_space1(size_t size, size_t alignment) { |
1078 | assert(is_aligned(size, alignment), "Incorrect input parameters" ); |
1079 | |
1080 | ReservedSpace rs(size, // size |
1081 | alignment, // alignment |
1082 | UseLargePages, // large |
1083 | (char *)NULL); // requested_address |
1084 | |
1085 | assert(rs.base() != NULL, "Must be" ); |
1086 | assert(rs.size() == size, "Must be" ); |
1087 | |
1088 | assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses" ); |
1089 | assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses" ); |
1090 | |
1091 | if (rs.special()) { |
1092 | small_page_write(rs.base(), size); |
1093 | } |
1094 | |
1095 | release_memory_for_test(rs); |
1096 | } |
1097 | |
1098 | static void test_reserved_space2(size_t size) { |
1099 | assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned" ); |
1100 | |
1101 | ReservedSpace rs(size); |
1102 | |
1103 | assert(rs.base() != NULL, "Must be" ); |
1104 | assert(rs.size() == size, "Must be" ); |
1105 | |
1106 | if (rs.special()) { |
1107 | small_page_write(rs.base(), size); |
1108 | } |
1109 | |
1110 | release_memory_for_test(rs); |
1111 | } |
1112 | |
1113 | static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) { |
1114 | if (size < alignment) { |
1115 | // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test. |
1116 | assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement" ); |
1117 | return; |
1118 | } |
1119 | |
1120 | assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned" ); |
1121 | assert(is_aligned(size, alignment), "Must be at least aligned against alignment" ); |
1122 | |
1123 | bool large = maybe_large && UseLargePages && size >= os::large_page_size(); |
1124 | |
1125 | ReservedSpace rs(size, alignment, large, false); |
1126 | |
1127 | assert(rs.base() != NULL, "Must be" ); |
1128 | assert(rs.size() == size, "Must be" ); |
1129 | |
1130 | if (rs.special()) { |
1131 | small_page_write(rs.base(), size); |
1132 | } |
1133 | |
1134 | release_memory_for_test(rs); |
1135 | } |
1136 | |
1137 | |
1138 | static void test_reserved_space1() { |
1139 | size_t size = 2 * 1024 * 1024; |
1140 | size_t ag = os::vm_allocation_granularity(); |
1141 | |
1142 | test_reserved_space1(size, ag); |
1143 | test_reserved_space1(size * 2, ag); |
1144 | test_reserved_space1(size * 10, ag); |
1145 | } |
1146 | |
1147 | static void test_reserved_space2() { |
1148 | size_t size = 2 * 1024 * 1024; |
1149 | size_t ag = os::vm_allocation_granularity(); |
1150 | |
1151 | test_reserved_space2(size * 1); |
1152 | test_reserved_space2(size * 2); |
1153 | test_reserved_space2(size * 10); |
1154 | test_reserved_space2(ag); |
1155 | test_reserved_space2(size - ag); |
1156 | test_reserved_space2(size); |
1157 | test_reserved_space2(size + ag); |
1158 | test_reserved_space2(size * 2); |
1159 | test_reserved_space2(size * 2 - ag); |
1160 | test_reserved_space2(size * 2 + ag); |
1161 | test_reserved_space2(size * 3); |
1162 | test_reserved_space2(size * 3 - ag); |
1163 | test_reserved_space2(size * 3 + ag); |
1164 | test_reserved_space2(size * 10); |
1165 | test_reserved_space2(size * 10 + size / 2); |
1166 | } |
1167 | |
1168 | static void test_reserved_space3() { |
1169 | size_t ag = os::vm_allocation_granularity(); |
1170 | |
1171 | test_reserved_space3(ag, ag , false); |
1172 | test_reserved_space3(ag * 2, ag , false); |
1173 | test_reserved_space3(ag * 3, ag , false); |
1174 | test_reserved_space3(ag * 2, ag * 2, false); |
1175 | test_reserved_space3(ag * 4, ag * 2, false); |
1176 | test_reserved_space3(ag * 8, ag * 2, false); |
1177 | test_reserved_space3(ag * 4, ag * 4, false); |
1178 | test_reserved_space3(ag * 8, ag * 4, false); |
1179 | test_reserved_space3(ag * 16, ag * 4, false); |
1180 | |
1181 | if (UseLargePages) { |
1182 | size_t lp = os::large_page_size(); |
1183 | |
1184 | // Without large pages |
1185 | test_reserved_space3(lp, ag * 4, false); |
1186 | test_reserved_space3(lp * 2, ag * 4, false); |
1187 | test_reserved_space3(lp * 4, ag * 4, false); |
1188 | test_reserved_space3(lp, lp , false); |
1189 | test_reserved_space3(lp * 2, lp , false); |
1190 | test_reserved_space3(lp * 3, lp , false); |
1191 | test_reserved_space3(lp * 2, lp * 2, false); |
1192 | test_reserved_space3(lp * 4, lp * 2, false); |
1193 | test_reserved_space3(lp * 8, lp * 2, false); |
1194 | |
1195 | // With large pages |
1196 | test_reserved_space3(lp, ag * 4 , true); |
1197 | test_reserved_space3(lp * 2, ag * 4, true); |
1198 | test_reserved_space3(lp * 4, ag * 4, true); |
1199 | test_reserved_space3(lp, lp , true); |
1200 | test_reserved_space3(lp * 2, lp , true); |
1201 | test_reserved_space3(lp * 3, lp , true); |
1202 | test_reserved_space3(lp * 2, lp * 2, true); |
1203 | test_reserved_space3(lp * 4, lp * 2, true); |
1204 | test_reserved_space3(lp * 8, lp * 2, true); |
1205 | } |
1206 | } |
1207 | |
1208 | static void test_reserved_space() { |
1209 | test_reserved_space1(); |
1210 | test_reserved_space2(); |
1211 | test_reserved_space3(); |
1212 | } |
1213 | }; |
1214 | |
1215 | void TestReservedSpace_test() { |
1216 | TestReservedSpace::test_reserved_space(); |
1217 | } |
1218 | |
1219 | #define assert_equals(actual, expected) \ |
1220 | assert(actual == expected, \ |
1221 | "Got " SIZE_FORMAT " expected " \ |
1222 | SIZE_FORMAT, actual, expected); |
1223 | |
1224 | #define assert_ge(value1, value2) \ |
1225 | assert(value1 >= value2, \ |
1226 | "'" #value1 "': " SIZE_FORMAT " '" \ |
1227 | #value2 "': " SIZE_FORMAT, value1, value2); |
1228 | |
1229 | #define assert_lt(value1, value2) \ |
1230 | assert(value1 < value2, \ |
1231 | "'" #value1 "': " SIZE_FORMAT " '" \ |
1232 | #value2 "': " SIZE_FORMAT, value1, value2); |
1233 | |
1234 | |
1235 | class TestVirtualSpace : AllStatic { |
1236 | enum TestLargePages { |
1237 | Default, |
1238 | Disable, |
1239 | Reserve, |
1240 | Commit |
1241 | }; |
1242 | |
1243 | static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) { |
1244 | switch(mode) { |
1245 | default: |
1246 | case Default: |
1247 | case Reserve: |
1248 | return ReservedSpace(reserve_size_aligned); |
1249 | case Disable: |
1250 | case Commit: |
1251 | return ReservedSpace(reserve_size_aligned, |
1252 | os::vm_allocation_granularity(), |
1253 | /* large */ false, /* exec */ false); |
1254 | } |
1255 | } |
1256 | |
1257 | static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) { |
1258 | switch(mode) { |
1259 | default: |
1260 | case Default: |
1261 | case Reserve: |
1262 | return vs.initialize(rs, 0); |
1263 | case Disable: |
1264 | return vs.initialize_with_granularity(rs, 0, os::vm_page_size()); |
1265 | case Commit: |
1266 | return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1)); |
1267 | } |
1268 | } |
1269 | |
1270 | public: |
1271 | static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size, |
1272 | TestLargePages mode = Default) { |
1273 | size_t granularity = os::vm_allocation_granularity(); |
1274 | size_t reserve_size_aligned = align_up(reserve_size, granularity); |
1275 | |
1276 | ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode); |
1277 | |
1278 | assert(reserved.is_reserved(), "Must be" ); |
1279 | |
1280 | VirtualSpace vs; |
1281 | bool initialized = initialize_virtual_space(vs, reserved, mode); |
1282 | assert(initialized, "Failed to initialize VirtualSpace" ); |
1283 | |
1284 | vs.expand_by(commit_size, false); |
1285 | |
1286 | if (vs.special()) { |
1287 | assert_equals(vs.actual_committed_size(), reserve_size_aligned); |
1288 | } else { |
1289 | assert_ge(vs.actual_committed_size(), commit_size); |
1290 | // Approximate the commit granularity. |
1291 | // Make sure that we don't commit using large pages |
1292 | // if large pages has been disabled for this VirtualSpace. |
1293 | size_t commit_granularity = (mode == Disable || !UseLargePages) ? |
1294 | os::vm_page_size() : os::large_page_size(); |
1295 | assert_lt(vs.actual_committed_size(), commit_size + commit_granularity); |
1296 | } |
1297 | |
1298 | reserved.release(); |
1299 | } |
1300 | |
1301 | static void test_virtual_space_actual_committed_space_one_large_page() { |
1302 | if (!UseLargePages) { |
1303 | return; |
1304 | } |
1305 | |
1306 | size_t large_page_size = os::large_page_size(); |
1307 | |
1308 | ReservedSpace reserved(large_page_size, large_page_size, true, false); |
1309 | |
1310 | assert(reserved.is_reserved(), "Must be" ); |
1311 | |
1312 | VirtualSpace vs; |
1313 | bool initialized = vs.initialize(reserved, 0); |
1314 | assert(initialized, "Failed to initialize VirtualSpace" ); |
1315 | |
1316 | vs.expand_by(large_page_size, false); |
1317 | |
1318 | assert_equals(vs.actual_committed_size(), large_page_size); |
1319 | |
1320 | reserved.release(); |
1321 | } |
1322 | |
1323 | static void test_virtual_space_actual_committed_space() { |
1324 | test_virtual_space_actual_committed_space(4 * K, 0); |
1325 | test_virtual_space_actual_committed_space(4 * K, 4 * K); |
1326 | test_virtual_space_actual_committed_space(8 * K, 0); |
1327 | test_virtual_space_actual_committed_space(8 * K, 4 * K); |
1328 | test_virtual_space_actual_committed_space(8 * K, 8 * K); |
1329 | test_virtual_space_actual_committed_space(12 * K, 0); |
1330 | test_virtual_space_actual_committed_space(12 * K, 4 * K); |
1331 | test_virtual_space_actual_committed_space(12 * K, 8 * K); |
1332 | test_virtual_space_actual_committed_space(12 * K, 12 * K); |
1333 | test_virtual_space_actual_committed_space(64 * K, 0); |
1334 | test_virtual_space_actual_committed_space(64 * K, 32 * K); |
1335 | test_virtual_space_actual_committed_space(64 * K, 64 * K); |
1336 | test_virtual_space_actual_committed_space(2 * M, 0); |
1337 | test_virtual_space_actual_committed_space(2 * M, 4 * K); |
1338 | test_virtual_space_actual_committed_space(2 * M, 64 * K); |
1339 | test_virtual_space_actual_committed_space(2 * M, 1 * M); |
1340 | test_virtual_space_actual_committed_space(2 * M, 2 * M); |
1341 | test_virtual_space_actual_committed_space(10 * M, 0); |
1342 | test_virtual_space_actual_committed_space(10 * M, 4 * K); |
1343 | test_virtual_space_actual_committed_space(10 * M, 8 * K); |
1344 | test_virtual_space_actual_committed_space(10 * M, 1 * M); |
1345 | test_virtual_space_actual_committed_space(10 * M, 2 * M); |
1346 | test_virtual_space_actual_committed_space(10 * M, 5 * M); |
1347 | test_virtual_space_actual_committed_space(10 * M, 10 * M); |
1348 | } |
1349 | |
1350 | static void test_virtual_space_disable_large_pages() { |
1351 | if (!UseLargePages) { |
1352 | return; |
1353 | } |
1354 | // These test cases verify that if we force VirtualSpace to disable large pages |
1355 | test_virtual_space_actual_committed_space(10 * M, 0, Disable); |
1356 | test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable); |
1357 | test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable); |
1358 | test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable); |
1359 | test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable); |
1360 | test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable); |
1361 | test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable); |
1362 | |
1363 | test_virtual_space_actual_committed_space(10 * M, 0, Reserve); |
1364 | test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve); |
1365 | test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve); |
1366 | test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve); |
1367 | test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve); |
1368 | test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve); |
1369 | test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve); |
1370 | |
1371 | test_virtual_space_actual_committed_space(10 * M, 0, Commit); |
1372 | test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit); |
1373 | test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit); |
1374 | test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit); |
1375 | test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit); |
1376 | test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit); |
1377 | test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit); |
1378 | } |
1379 | |
1380 | static void test_virtual_space() { |
1381 | test_virtual_space_actual_committed_space(); |
1382 | test_virtual_space_actual_committed_space_one_large_page(); |
1383 | test_virtual_space_disable_large_pages(); |
1384 | } |
1385 | }; |
1386 | |
1387 | void TestVirtualSpace_test() { |
1388 | TestVirtualSpace::test_virtual_space(); |
1389 | } |
1390 | |
1391 | #endif // PRODUCT |
1392 | |
1393 | #endif |
1394 | |