1 | /* |
2 | * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/parallel/mutableNUMASpace.hpp" |
27 | #include "gc/shared/collectedHeap.hpp" |
28 | #include "gc/shared/spaceDecorator.hpp" |
29 | #include "memory/allocation.inline.hpp" |
30 | #include "oops/oop.inline.hpp" |
31 | #include "runtime/atomic.hpp" |
32 | #include "runtime/thread.inline.hpp" |
33 | #include "runtime/threadSMR.hpp" |
34 | #include "utilities/align.hpp" |
35 | |
36 | MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment), _must_use_large_pages(false) { |
37 | _lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, true); |
38 | _page_size = os::vm_page_size(); |
39 | _adaptation_cycles = 0; |
40 | _samples_count = 0; |
41 | |
42 | #ifdef LINUX |
43 | // Changing the page size can lead to freeing of memory. When using large pages |
44 | // and the memory has been both reserved and committed, Linux does not support |
45 | // freeing parts of it. |
46 | if (UseLargePages && !os::can_commit_large_page_memory()) { |
47 | _must_use_large_pages = true; |
48 | } |
49 | #endif // LINUX |
50 | |
51 | update_layout(true); |
52 | } |
53 | |
54 | MutableNUMASpace::~MutableNUMASpace() { |
55 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
56 | delete lgrp_spaces()->at(i); |
57 | } |
58 | delete lgrp_spaces(); |
59 | } |
60 | |
61 | #ifndef PRODUCT |
62 | void MutableNUMASpace::mangle_unused_area() { |
63 | // This method should do nothing. |
64 | // It can be called on a numa space during a full compaction. |
65 | } |
66 | void MutableNUMASpace::mangle_unused_area_complete() { |
67 | // This method should do nothing. |
68 | // It can be called on a numa space during a full compaction. |
69 | } |
70 | void MutableNUMASpace::mangle_region(MemRegion mr) { |
71 | // This method should do nothing because numa spaces are not mangled. |
72 | } |
73 | void MutableNUMASpace::set_top_for_allocations(HeapWord* v) { |
74 | assert(false, "Do not mangle MutableNUMASpace's" ); |
75 | } |
76 | void MutableNUMASpace::set_top_for_allocations() { |
77 | // This method should do nothing. |
78 | } |
79 | void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) { |
80 | // This method should do nothing. |
81 | } |
82 | void MutableNUMASpace::check_mangled_unused_area_complete() { |
83 | // This method should do nothing. |
84 | } |
85 | #endif // NOT_PRODUCT |
86 | |
87 | // There may be unallocated holes in the middle chunks |
88 | // that should be filled with dead objects to ensure parsability. |
89 | void MutableNUMASpace::ensure_parsability() { |
90 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
91 | LGRPSpace *ls = lgrp_spaces()->at(i); |
92 | MutableSpace *s = ls->space(); |
93 | if (s->top() < top()) { // For all spaces preceding the one containing top() |
94 | if (s->free_in_words() > 0) { |
95 | HeapWord* cur_top = s->top(); |
96 | size_t words_left_to_fill = pointer_delta(s->end(), s->top());; |
97 | while (words_left_to_fill > 0) { |
98 | size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size()); |
99 | assert(words_to_fill >= CollectedHeap::min_fill_size(), |
100 | "Remaining size (" SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")" , |
101 | words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size()); |
102 | CollectedHeap::fill_with_object(cur_top, words_to_fill); |
103 | if (!os::numa_has_static_binding()) { |
104 | size_t touched_words = words_to_fill; |
105 | #ifndef ASSERT |
106 | if (!ZapUnusedHeapArea) { |
107 | touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), |
108 | touched_words); |
109 | } |
110 | #endif |
111 | MemRegion invalid; |
112 | HeapWord *crossing_start = align_up(cur_top, os::vm_page_size()); |
113 | HeapWord *crossing_end = align_down(cur_top + touched_words, os::vm_page_size()); |
114 | if (crossing_start != crossing_end) { |
115 | // If object header crossed a small page boundary we mark the area |
116 | // as invalid rounding it to a page_size(). |
117 | HeapWord *start = MAX2(align_down(cur_top, page_size()), s->bottom()); |
118 | HeapWord *end = MIN2(align_up(cur_top + touched_words, page_size()), s->end()); |
119 | invalid = MemRegion(start, end); |
120 | } |
121 | |
122 | ls->add_invalid_region(invalid); |
123 | } |
124 | cur_top += words_to_fill; |
125 | words_left_to_fill -= words_to_fill; |
126 | } |
127 | } |
128 | } else { |
129 | if (!os::numa_has_static_binding()) { |
130 | #ifdef ASSERT |
131 | MemRegion invalid(s->top(), s->end()); |
132 | ls->add_invalid_region(invalid); |
133 | #else |
134 | if (ZapUnusedHeapArea) { |
135 | MemRegion invalid(s->top(), s->end()); |
136 | ls->add_invalid_region(invalid); |
137 | } else { |
138 | return; |
139 | } |
140 | #endif |
141 | } else { |
142 | return; |
143 | } |
144 | } |
145 | } |
146 | } |
147 | |
148 | size_t MutableNUMASpace::used_in_words() const { |
149 | size_t s = 0; |
150 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
151 | s += lgrp_spaces()->at(i)->space()->used_in_words(); |
152 | } |
153 | return s; |
154 | } |
155 | |
156 | size_t MutableNUMASpace::free_in_words() const { |
157 | size_t s = 0; |
158 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
159 | s += lgrp_spaces()->at(i)->space()->free_in_words(); |
160 | } |
161 | return s; |
162 | } |
163 | |
164 | |
165 | size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { |
166 | guarantee(thr != NULL, "No thread" ); |
167 | int lgrp_id = thr->lgrp_id(); |
168 | if (lgrp_id == -1) { |
169 | // This case can occur after the topology of the system has |
170 | // changed. Thread can change their location, the new home |
171 | // group will be determined during the first allocation |
172 | // attempt. For now we can safely assume that all spaces |
173 | // have equal size because the whole space will be reinitialized. |
174 | if (lgrp_spaces()->length() > 0) { |
175 | return capacity_in_bytes() / lgrp_spaces()->length(); |
176 | } else { |
177 | assert(false, "There should be at least one locality group" ); |
178 | return 0; |
179 | } |
180 | } |
181 | // That's the normal case, where we know the locality group of the thread. |
182 | int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
183 | if (i == -1) { |
184 | return 0; |
185 | } |
186 | return lgrp_spaces()->at(i)->space()->capacity_in_bytes(); |
187 | } |
188 | |
189 | size_t MutableNUMASpace::tlab_used(Thread *thr) const { |
190 | // Please see the comments for tlab_capacity(). |
191 | guarantee(thr != NULL, "No thread" ); |
192 | int lgrp_id = thr->lgrp_id(); |
193 | if (lgrp_id == -1) { |
194 | if (lgrp_spaces()->length() > 0) { |
195 | return (used_in_bytes()) / lgrp_spaces()->length(); |
196 | } else { |
197 | assert(false, "There should be at least one locality group" ); |
198 | return 0; |
199 | } |
200 | } |
201 | int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
202 | if (i == -1) { |
203 | return 0; |
204 | } |
205 | return lgrp_spaces()->at(i)->space()->used_in_bytes(); |
206 | } |
207 | |
208 | |
209 | size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { |
210 | // Please see the comments for tlab_capacity(). |
211 | guarantee(thr != NULL, "No thread" ); |
212 | int lgrp_id = thr->lgrp_id(); |
213 | if (lgrp_id == -1) { |
214 | if (lgrp_spaces()->length() > 0) { |
215 | return free_in_bytes() / lgrp_spaces()->length(); |
216 | } else { |
217 | assert(false, "There should be at least one locality group" ); |
218 | return 0; |
219 | } |
220 | } |
221 | int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
222 | if (i == -1) { |
223 | return 0; |
224 | } |
225 | return lgrp_spaces()->at(i)->space()->free_in_bytes(); |
226 | } |
227 | |
228 | |
229 | size_t MutableNUMASpace::capacity_in_words(Thread* thr) const { |
230 | guarantee(thr != NULL, "No thread" ); |
231 | int lgrp_id = thr->lgrp_id(); |
232 | if (lgrp_id == -1) { |
233 | if (lgrp_spaces()->length() > 0) { |
234 | return capacity_in_words() / lgrp_spaces()->length(); |
235 | } else { |
236 | assert(false, "There should be at least one locality group" ); |
237 | return 0; |
238 | } |
239 | } |
240 | int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
241 | if (i == -1) { |
242 | return 0; |
243 | } |
244 | return lgrp_spaces()->at(i)->space()->capacity_in_words(); |
245 | } |
246 | |
247 | // Check if the NUMA topology has changed. Add and remove spaces if needed. |
248 | // The update can be forced by setting the force parameter equal to true. |
249 | bool MutableNUMASpace::update_layout(bool force) { |
250 | // Check if the topology had changed. |
251 | bool changed = os::numa_topology_changed(); |
252 | if (force || changed) { |
253 | // Compute lgrp intersection. Add/remove spaces. |
254 | int lgrp_limit = (int)os::numa_get_groups_num(); |
255 | int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtGC); |
256 | int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); |
257 | assert(lgrp_num > 0, "There should be at least one locality group" ); |
258 | // Add new spaces for the new nodes |
259 | for (int i = 0; i < lgrp_num; i++) { |
260 | bool found = false; |
261 | for (int j = 0; j < lgrp_spaces()->length(); j++) { |
262 | if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) { |
263 | found = true; |
264 | break; |
265 | } |
266 | } |
267 | if (!found) { |
268 | lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment())); |
269 | } |
270 | } |
271 | |
272 | // Remove spaces for the removed nodes. |
273 | for (int i = 0; i < lgrp_spaces()->length();) { |
274 | bool found = false; |
275 | for (int j = 0; j < lgrp_num; j++) { |
276 | if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) { |
277 | found = true; |
278 | break; |
279 | } |
280 | } |
281 | if (!found) { |
282 | delete lgrp_spaces()->at(i); |
283 | lgrp_spaces()->remove_at(i); |
284 | } else { |
285 | i++; |
286 | } |
287 | } |
288 | |
289 | FREE_C_HEAP_ARRAY(int, lgrp_ids); |
290 | |
291 | if (changed) { |
292 | for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) { |
293 | thread->set_lgrp_id(-1); |
294 | } |
295 | } |
296 | return true; |
297 | } |
298 | return false; |
299 | } |
300 | |
301 | // Bias region towards the first-touching lgrp. Set the right page sizes. |
302 | void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) { |
303 | HeapWord *start = align_up(mr.start(), page_size()); |
304 | HeapWord *end = align_down(mr.end(), page_size()); |
305 | if (end > start) { |
306 | MemRegion aligned_region(start, end); |
307 | assert((intptr_t)aligned_region.start() % page_size() == 0 && |
308 | (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment" ); |
309 | assert(region().contains(aligned_region), "Sanity" ); |
310 | // First we tell the OS which page size we want in the given range. The underlying |
311 | // large page can be broken down if we require small pages. |
312 | os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
313 | // Then we uncommit the pages in the range. |
314 | os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
315 | // And make them local/first-touch biased. |
316 | os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id); |
317 | } |
318 | } |
319 | |
320 | // Free all pages in the region. |
321 | void MutableNUMASpace::free_region(MemRegion mr) { |
322 | HeapWord *start = align_up(mr.start(), page_size()); |
323 | HeapWord *end = align_down(mr.end(), page_size()); |
324 | if (end > start) { |
325 | MemRegion aligned_region(start, end); |
326 | assert((intptr_t)aligned_region.start() % page_size() == 0 && |
327 | (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment" ); |
328 | assert(region().contains(aligned_region), "Sanity" ); |
329 | os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
330 | } |
331 | } |
332 | |
333 | // Update space layout. Perform adaptation. |
334 | void MutableNUMASpace::update() { |
335 | if (update_layout(false)) { |
336 | // If the topology has changed, make all chunks zero-sized. |
337 | // And clear the alloc-rate statistics. |
338 | // In future we may want to handle this more gracefully in order |
339 | // to avoid the reallocation of the pages as much as possible. |
340 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
341 | LGRPSpace *ls = lgrp_spaces()->at(i); |
342 | MutableSpace *s = ls->space(); |
343 | s->set_end(s->bottom()); |
344 | s->set_top(s->bottom()); |
345 | ls->clear_alloc_rate(); |
346 | } |
347 | // A NUMA space is never mangled |
348 | initialize(region(), |
349 | SpaceDecorator::Clear, |
350 | SpaceDecorator::DontMangle); |
351 | } else { |
352 | bool should_initialize = false; |
353 | if (!os::numa_has_static_binding()) { |
354 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
355 | if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) { |
356 | should_initialize = true; |
357 | break; |
358 | } |
359 | } |
360 | } |
361 | |
362 | if (should_initialize || |
363 | (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) { |
364 | // A NUMA space is never mangled |
365 | initialize(region(), |
366 | SpaceDecorator::Clear, |
367 | SpaceDecorator::DontMangle); |
368 | } |
369 | } |
370 | |
371 | if (NUMAStats) { |
372 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
373 | lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
374 | } |
375 | } |
376 | |
377 | scan_pages(NUMAPageScanRate); |
378 | } |
379 | |
380 | // Scan pages. Free pages that have smaller size or wrong placement. |
381 | void MutableNUMASpace::scan_pages(size_t page_count) |
382 | { |
383 | size_t pages_per_chunk = page_count / lgrp_spaces()->length(); |
384 | if (pages_per_chunk > 0) { |
385 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
386 | LGRPSpace *ls = lgrp_spaces()->at(i); |
387 | ls->scan_pages(page_size(), pages_per_chunk); |
388 | } |
389 | } |
390 | } |
391 | |
392 | // Accumulate statistics about the allocation rate of each lgrp. |
393 | void MutableNUMASpace::accumulate_statistics() { |
394 | if (UseAdaptiveNUMAChunkSizing) { |
395 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
396 | lgrp_spaces()->at(i)->sample(); |
397 | } |
398 | increment_samples_count(); |
399 | } |
400 | |
401 | if (NUMAStats) { |
402 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
403 | lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
404 | } |
405 | } |
406 | } |
407 | |
408 | // Get the current size of a chunk. |
409 | // This function computes the size of the chunk based on the |
410 | // difference between chunk ends. This allows it to work correctly in |
411 | // case the whole space is resized and during the process of adaptive |
412 | // chunk resizing. |
413 | size_t MutableNUMASpace::current_chunk_size(int i) { |
414 | HeapWord *cur_end, *prev_end; |
415 | if (i == 0) { |
416 | prev_end = bottom(); |
417 | } else { |
418 | prev_end = lgrp_spaces()->at(i - 1)->space()->end(); |
419 | } |
420 | if (i == lgrp_spaces()->length() - 1) { |
421 | cur_end = end(); |
422 | } else { |
423 | cur_end = lgrp_spaces()->at(i)->space()->end(); |
424 | } |
425 | if (cur_end > prev_end) { |
426 | return pointer_delta(cur_end, prev_end, sizeof(char)); |
427 | } |
428 | return 0; |
429 | } |
430 | |
431 | // Return the default chunk size by equally diving the space. |
432 | // page_size() aligned. |
433 | size_t MutableNUMASpace::default_chunk_size() { |
434 | return base_space_size() / lgrp_spaces()->length() * page_size(); |
435 | } |
436 | |
437 | // Produce a new chunk size. page_size() aligned. |
438 | // This function is expected to be called on sequence of i's from 0 to |
439 | // lgrp_spaces()->length(). |
440 | size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) { |
441 | size_t pages_available = base_space_size(); |
442 | for (int j = 0; j < i; j++) { |
443 | pages_available -= align_down(current_chunk_size(j), page_size()) / page_size(); |
444 | } |
445 | pages_available -= lgrp_spaces()->length() - i - 1; |
446 | assert(pages_available > 0, "No pages left" ); |
447 | float alloc_rate = 0; |
448 | for (int j = i; j < lgrp_spaces()->length(); j++) { |
449 | alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average(); |
450 | } |
451 | size_t chunk_size = 0; |
452 | if (alloc_rate > 0) { |
453 | LGRPSpace *ls = lgrp_spaces()->at(i); |
454 | chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size(); |
455 | } |
456 | chunk_size = MAX2(chunk_size, page_size()); |
457 | |
458 | if (limit > 0) { |
459 | limit = align_down(limit, page_size()); |
460 | if (chunk_size > current_chunk_size(i)) { |
461 | size_t upper_bound = pages_available * page_size(); |
462 | if (upper_bound > limit && |
463 | current_chunk_size(i) < upper_bound - limit) { |
464 | // The resulting upper bound should not exceed the available |
465 | // amount of memory (pages_available * page_size()). |
466 | upper_bound = current_chunk_size(i) + limit; |
467 | } |
468 | chunk_size = MIN2(chunk_size, upper_bound); |
469 | } else { |
470 | size_t lower_bound = page_size(); |
471 | if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow. |
472 | lower_bound = current_chunk_size(i) - limit; |
473 | } |
474 | chunk_size = MAX2(chunk_size, lower_bound); |
475 | } |
476 | } |
477 | assert(chunk_size <= pages_available * page_size(), "Chunk size out of range" ); |
478 | return chunk_size; |
479 | } |
480 | |
481 | |
482 | // Return the bottom_region and the top_region. Align them to page_size() boundary. |
483 | // |------------------new_region---------------------------------| |
484 | // |----bottom_region--|---intersection---|------top_region------| |
485 | void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection, |
486 | MemRegion* bottom_region, MemRegion *top_region) { |
487 | // Is there bottom? |
488 | if (new_region.start() < intersection.start()) { // Yes |
489 | // Try to coalesce small pages into a large one. |
490 | if (UseLargePages && page_size() >= alignment()) { |
491 | HeapWord* p = align_up(intersection.start(), alignment()); |
492 | if (new_region.contains(p) |
493 | && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) { |
494 | if (intersection.contains(p)) { |
495 | intersection = MemRegion(p, intersection.end()); |
496 | } else { |
497 | intersection = MemRegion(p, p); |
498 | } |
499 | } |
500 | } |
501 | *bottom_region = MemRegion(new_region.start(), intersection.start()); |
502 | } else { |
503 | *bottom_region = MemRegion(); |
504 | } |
505 | |
506 | // Is there top? |
507 | if (intersection.end() < new_region.end()) { // Yes |
508 | // Try to coalesce small pages into a large one. |
509 | if (UseLargePages && page_size() >= alignment()) { |
510 | HeapWord* p = align_down(intersection.end(), alignment()); |
511 | if (new_region.contains(p) |
512 | && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) { |
513 | if (intersection.contains(p)) { |
514 | intersection = MemRegion(intersection.start(), p); |
515 | } else { |
516 | intersection = MemRegion(p, p); |
517 | } |
518 | } |
519 | } |
520 | *top_region = MemRegion(intersection.end(), new_region.end()); |
521 | } else { |
522 | *top_region = MemRegion(); |
523 | } |
524 | } |
525 | |
526 | // Try to merge the invalid region with the bottom or top region by decreasing |
527 | // the intersection area. Return the invalid_region aligned to the page_size() |
528 | // boundary if it's inside the intersection. Return non-empty invalid_region |
529 | // if it lies inside the intersection (also page-aligned). |
530 | // |------------------new_region---------------------------------| |
531 | // |----------------|-------invalid---|--------------------------| |
532 | // |----bottom_region--|---intersection---|------top_region------| |
533 | void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection, |
534 | MemRegion *invalid_region) { |
535 | if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) { |
536 | *intersection = MemRegion(invalid_region->end(), intersection->end()); |
537 | *invalid_region = MemRegion(); |
538 | } else |
539 | if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) { |
540 | *intersection = MemRegion(intersection->start(), invalid_region->start()); |
541 | *invalid_region = MemRegion(); |
542 | } else |
543 | if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) { |
544 | *intersection = MemRegion(new_region.start(), new_region.start()); |
545 | *invalid_region = MemRegion(); |
546 | } else |
547 | if (intersection->contains(invalid_region)) { |
548 | // That's the only case we have to make an additional bias_region() call. |
549 | HeapWord* start = invalid_region->start(); |
550 | HeapWord* end = invalid_region->end(); |
551 | if (UseLargePages && page_size() >= alignment()) { |
552 | HeapWord *p = align_down(start, alignment()); |
553 | if (new_region.contains(p)) { |
554 | start = p; |
555 | } |
556 | p = align_up(end, alignment()); |
557 | if (new_region.contains(end)) { |
558 | end = p; |
559 | } |
560 | } |
561 | if (intersection->start() > start) { |
562 | *intersection = MemRegion(start, intersection->end()); |
563 | } |
564 | if (intersection->end() < end) { |
565 | *intersection = MemRegion(intersection->start(), end); |
566 | } |
567 | *invalid_region = MemRegion(start, end); |
568 | } |
569 | } |
570 | |
571 | void MutableNUMASpace::initialize(MemRegion mr, |
572 | bool clear_space, |
573 | bool mangle_space, |
574 | bool setup_pages) { |
575 | assert(clear_space, "Reallocation will destroy data!" ); |
576 | assert(lgrp_spaces()->length() > 0, "There should be at least one space" ); |
577 | |
578 | MemRegion old_region = region(), new_region; |
579 | set_bottom(mr.start()); |
580 | set_end(mr.end()); |
581 | // Must always clear the space |
582 | clear(SpaceDecorator::DontMangle); |
583 | |
584 | // Compute chunk sizes |
585 | size_t prev_page_size = page_size(); |
586 | set_page_size(UseLargePages ? alignment() : os::vm_page_size()); |
587 | HeapWord* rounded_bottom = align_up(bottom(), page_size()); |
588 | HeapWord* rounded_end = align_down(end(), page_size()); |
589 | size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); |
590 | |
591 | // Try small pages if the chunk size is too small |
592 | if (base_space_size_pages / lgrp_spaces()->length() == 0 |
593 | && page_size() > (size_t)os::vm_page_size()) { |
594 | // Changing the page size below can lead to freeing of memory. So we fail initialization. |
595 | if (_must_use_large_pages) { |
596 | vm_exit_during_initialization("Failed initializing NUMA with large pages. Too small heap size" ); |
597 | } |
598 | set_page_size(os::vm_page_size()); |
599 | rounded_bottom = align_up(bottom(), page_size()); |
600 | rounded_end = align_down(end(), page_size()); |
601 | base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); |
602 | } |
603 | guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small" ); |
604 | set_base_space_size(base_space_size_pages); |
605 | |
606 | // Handle space resize |
607 | MemRegion top_region, bottom_region; |
608 | if (!old_region.equals(region())) { |
609 | new_region = MemRegion(rounded_bottom, rounded_end); |
610 | MemRegion intersection = new_region.intersection(old_region); |
611 | if (intersection.start() == NULL || |
612 | intersection.end() == NULL || |
613 | prev_page_size > page_size()) { // If the page size got smaller we have to change |
614 | // the page size preference for the whole space. |
615 | intersection = MemRegion(new_region.start(), new_region.start()); |
616 | } |
617 | select_tails(new_region, intersection, &bottom_region, &top_region); |
618 | bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id()); |
619 | bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id()); |
620 | } |
621 | |
622 | // Check if the space layout has changed significantly? |
623 | // This happens when the space has been resized so that either head or tail |
624 | // chunk became less than a page. |
625 | bool layout_valid = UseAdaptiveNUMAChunkSizing && |
626 | current_chunk_size(0) > page_size() && |
627 | current_chunk_size(lgrp_spaces()->length() - 1) > page_size(); |
628 | |
629 | |
630 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
631 | LGRPSpace *ls = lgrp_spaces()->at(i); |
632 | MutableSpace *s = ls->space(); |
633 | old_region = s->region(); |
634 | |
635 | size_t chunk_byte_size = 0, old_chunk_byte_size = 0; |
636 | if (i < lgrp_spaces()->length() - 1) { |
637 | if (!UseAdaptiveNUMAChunkSizing || |
638 | (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) || |
639 | samples_count() < AdaptiveSizePolicyReadyThreshold) { |
640 | // No adaptation. Divide the space equally. |
641 | chunk_byte_size = default_chunk_size(); |
642 | } else |
643 | if (!layout_valid || NUMASpaceResizeRate == 0) { |
644 | // Fast adaptation. If no space resize rate is set, resize |
645 | // the chunks instantly. |
646 | chunk_byte_size = adaptive_chunk_size(i, 0); |
647 | } else { |
648 | // Slow adaptation. Resize the chunks moving no more than |
649 | // NUMASpaceResizeRate bytes per collection. |
650 | size_t limit = NUMASpaceResizeRate / |
651 | (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2); |
652 | chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size())); |
653 | } |
654 | |
655 | assert(chunk_byte_size >= page_size(), "Chunk size too small" ); |
656 | assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check" ); |
657 | } |
658 | |
659 | if (i == 0) { // Bottom chunk |
660 | if (i != lgrp_spaces()->length() - 1) { |
661 | new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize)); |
662 | } else { |
663 | new_region = MemRegion(bottom(), end()); |
664 | } |
665 | } else |
666 | if (i < lgrp_spaces()->length() - 1) { // Middle chunks |
667 | MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); |
668 | new_region = MemRegion(ps->end(), |
669 | ps->end() + (chunk_byte_size >> LogHeapWordSize)); |
670 | } else { // Top chunk |
671 | MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); |
672 | new_region = MemRegion(ps->end(), end()); |
673 | } |
674 | guarantee(region().contains(new_region), "Region invariant" ); |
675 | |
676 | |
677 | // The general case: |
678 | // |---------------------|--invalid---|--------------------------| |
679 | // |------------------new_region---------------------------------| |
680 | // |----bottom_region--|---intersection---|------top_region------| |
681 | // |----old_region----| |
682 | // The intersection part has all pages in place we don't need to migrate them. |
683 | // Pages for the top and bottom part should be freed and then reallocated. |
684 | |
685 | MemRegion intersection = old_region.intersection(new_region); |
686 | |
687 | if (intersection.start() == NULL || intersection.end() == NULL) { |
688 | intersection = MemRegion(new_region.start(), new_region.start()); |
689 | } |
690 | |
691 | if (!os::numa_has_static_binding()) { |
692 | MemRegion invalid_region = ls->invalid_region().intersection(new_region); |
693 | // Invalid region is a range of memory that could've possibly |
694 | // been allocated on the other node. That's relevant only on Solaris where |
695 | // there is no static memory binding. |
696 | if (!invalid_region.is_empty()) { |
697 | merge_regions(new_region, &intersection, &invalid_region); |
698 | free_region(invalid_region); |
699 | ls->set_invalid_region(MemRegion()); |
700 | } |
701 | } |
702 | |
703 | select_tails(new_region, intersection, &bottom_region, &top_region); |
704 | |
705 | if (!os::numa_has_static_binding()) { |
706 | // If that's a system with the first-touch policy then it's enough |
707 | // to free the pages. |
708 | free_region(bottom_region); |
709 | free_region(top_region); |
710 | } else { |
711 | // In a system with static binding we have to change the bias whenever |
712 | // we reshape the heap. |
713 | bias_region(bottom_region, ls->lgrp_id()); |
714 | bias_region(top_region, ls->lgrp_id()); |
715 | } |
716 | |
717 | // Clear space (set top = bottom) but never mangle. |
718 | s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages); |
719 | |
720 | set_adaptation_cycles(samples_count()); |
721 | } |
722 | } |
723 | |
724 | // Set the top of the whole space. |
725 | // Mark the the holes in chunks below the top() as invalid. |
726 | void MutableNUMASpace::set_top(HeapWord* value) { |
727 | bool found_top = false; |
728 | for (int i = 0; i < lgrp_spaces()->length();) { |
729 | LGRPSpace *ls = lgrp_spaces()->at(i); |
730 | MutableSpace *s = ls->space(); |
731 | HeapWord *top = MAX2(align_down(s->top(), page_size()), s->bottom()); |
732 | |
733 | if (s->contains(value)) { |
734 | // Check if setting the chunk's top to a given value would create a hole less than |
735 | // a minimal object; assuming that's not the last chunk in which case we don't care. |
736 | if (i < lgrp_spaces()->length() - 1) { |
737 | size_t remainder = pointer_delta(s->end(), value); |
738 | const size_t min_fill_size = CollectedHeap::min_fill_size(); |
739 | if (remainder < min_fill_size && remainder > 0) { |
740 | // Add a minimum size filler object; it will cross the chunk boundary. |
741 | CollectedHeap::fill_with_object(value, min_fill_size); |
742 | value += min_fill_size; |
743 | assert(!s->contains(value), "Should be in the next chunk" ); |
744 | // Restart the loop from the same chunk, since the value has moved |
745 | // to the next one. |
746 | continue; |
747 | } |
748 | } |
749 | |
750 | if (!os::numa_has_static_binding() && top < value && top < s->end()) { |
751 | ls->add_invalid_region(MemRegion(top, value)); |
752 | } |
753 | s->set_top(value); |
754 | found_top = true; |
755 | } else { |
756 | if (found_top) { |
757 | s->set_top(s->bottom()); |
758 | } else { |
759 | if (!os::numa_has_static_binding() && top < s->end()) { |
760 | ls->add_invalid_region(MemRegion(top, s->end())); |
761 | } |
762 | s->set_top(s->end()); |
763 | } |
764 | } |
765 | i++; |
766 | } |
767 | MutableSpace::set_top(value); |
768 | } |
769 | |
770 | void MutableNUMASpace::clear(bool mangle_space) { |
771 | MutableSpace::set_top(bottom()); |
772 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
773 | // Never mangle NUMA spaces because the mangling will |
774 | // bind the memory to a possibly unwanted lgroup. |
775 | lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle); |
776 | } |
777 | } |
778 | |
779 | /* |
780 | Linux supports static memory binding, therefore the most part of the |
781 | logic dealing with the possible invalid page allocation is effectively |
782 | disabled. Besides there is no notion of the home node in Linux. A |
783 | thread is allowed to migrate freely. Although the scheduler is rather |
784 | reluctant to move threads between the nodes. We check for the current |
785 | node every allocation. And with a high probability a thread stays on |
786 | the same node for some time allowing local access to recently allocated |
787 | objects. |
788 | */ |
789 | |
790 | HeapWord* MutableNUMASpace::allocate(size_t size) { |
791 | Thread* thr = Thread::current(); |
792 | int lgrp_id = thr->lgrp_id(); |
793 | if (lgrp_id == -1 || !os::numa_has_group_homing()) { |
794 | lgrp_id = os::numa_get_group_id(); |
795 | thr->set_lgrp_id(lgrp_id); |
796 | } |
797 | |
798 | int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
799 | |
800 | // It is possible that a new CPU has been hotplugged and |
801 | // we haven't reshaped the space accordingly. |
802 | if (i == -1) { |
803 | i = os::random() % lgrp_spaces()->length(); |
804 | } |
805 | |
806 | LGRPSpace* ls = lgrp_spaces()->at(i); |
807 | MutableSpace *s = ls->space(); |
808 | HeapWord *p = s->allocate(size); |
809 | |
810 | if (p != NULL) { |
811 | size_t remainder = s->free_in_words(); |
812 | if (remainder < CollectedHeap::min_fill_size() && remainder > 0) { |
813 | s->set_top(s->top() - size); |
814 | p = NULL; |
815 | } |
816 | } |
817 | if (p != NULL) { |
818 | if (top() < s->top()) { // Keep _top updated. |
819 | MutableSpace::set_top(s->top()); |
820 | } |
821 | } |
822 | // Make the page allocation happen here if there is no static binding.. |
823 | if (p != NULL && !os::numa_has_static_binding()) { |
824 | for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
825 | *(int*)i = 0; |
826 | } |
827 | } |
828 | if (p == NULL) { |
829 | ls->set_allocation_failed(); |
830 | } |
831 | return p; |
832 | } |
833 | |
834 | // This version is lock-free. |
835 | HeapWord* MutableNUMASpace::cas_allocate(size_t size) { |
836 | Thread* thr = Thread::current(); |
837 | int lgrp_id = thr->lgrp_id(); |
838 | if (lgrp_id == -1 || !os::numa_has_group_homing()) { |
839 | lgrp_id = os::numa_get_group_id(); |
840 | thr->set_lgrp_id(lgrp_id); |
841 | } |
842 | |
843 | int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
844 | // It is possible that a new CPU has been hotplugged and |
845 | // we haven't reshaped the space accordingly. |
846 | if (i == -1) { |
847 | i = os::random() % lgrp_spaces()->length(); |
848 | } |
849 | LGRPSpace *ls = lgrp_spaces()->at(i); |
850 | MutableSpace *s = ls->space(); |
851 | HeapWord *p = s->cas_allocate(size); |
852 | if (p != NULL) { |
853 | size_t remainder = pointer_delta(s->end(), p + size); |
854 | if (remainder < CollectedHeap::min_fill_size() && remainder > 0) { |
855 | if (s->cas_deallocate(p, size)) { |
856 | // We were the last to allocate and created a fragment less than |
857 | // a minimal object. |
858 | p = NULL; |
859 | } else { |
860 | guarantee(false, "Deallocation should always succeed" ); |
861 | } |
862 | } |
863 | } |
864 | if (p != NULL) { |
865 | HeapWord* cur_top, *cur_chunk_top = p + size; |
866 | while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated. |
867 | if (Atomic::cmpxchg(cur_chunk_top, top_addr(), cur_top) == cur_top) { |
868 | break; |
869 | } |
870 | } |
871 | } |
872 | |
873 | // Make the page allocation happen here if there is no static binding. |
874 | if (p != NULL && !os::numa_has_static_binding() ) { |
875 | for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
876 | *(int*)i = 0; |
877 | } |
878 | } |
879 | if (p == NULL) { |
880 | ls->set_allocation_failed(); |
881 | } |
882 | return p; |
883 | } |
884 | |
885 | void MutableNUMASpace::print_short_on(outputStream* st) const { |
886 | MutableSpace::print_short_on(st); |
887 | st->print(" (" ); |
888 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
889 | st->print("lgrp %d: " , lgrp_spaces()->at(i)->lgrp_id()); |
890 | lgrp_spaces()->at(i)->space()->print_short_on(st); |
891 | if (i < lgrp_spaces()->length() - 1) { |
892 | st->print(", " ); |
893 | } |
894 | } |
895 | st->print(")" ); |
896 | } |
897 | |
898 | void MutableNUMASpace::print_on(outputStream* st) const { |
899 | MutableSpace::print_on(st); |
900 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
901 | LGRPSpace *ls = lgrp_spaces()->at(i); |
902 | st->print(" lgrp %d" , ls->lgrp_id()); |
903 | ls->space()->print_on(st); |
904 | if (NUMAStats) { |
905 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
906 | lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
907 | } |
908 | st->print(" local/remote/unbiased/uncommitted: " SIZE_FORMAT "K/" |
909 | SIZE_FORMAT "K/" SIZE_FORMAT "K/" SIZE_FORMAT |
910 | "K, large/small pages: " SIZE_FORMAT "/" SIZE_FORMAT "\n" , |
911 | ls->space_stats()->_local_space / K, |
912 | ls->space_stats()->_remote_space / K, |
913 | ls->space_stats()->_unbiased_space / K, |
914 | ls->space_stats()->_uncommited_space / K, |
915 | ls->space_stats()->_large_pages, |
916 | ls->space_stats()->_small_pages); |
917 | } |
918 | } |
919 | } |
920 | |
921 | void MutableNUMASpace::verify() { |
922 | // This can be called after setting an arbitrary value to the space's top, |
923 | // so an object can cross the chunk boundary. We ensure the parsability |
924 | // of the space and just walk the objects in linear fashion. |
925 | ensure_parsability(); |
926 | MutableSpace::verify(); |
927 | } |
928 | |
929 | // Scan pages and gather stats about page placement and size. |
930 | void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) { |
931 | clear_space_stats(); |
932 | char *start = (char*)align_up(space()->bottom(), page_size); |
933 | char* end = (char*)align_down(space()->end(), page_size); |
934 | if (start < end) { |
935 | for (char *p = start; p < end;) { |
936 | os::page_info info; |
937 | if (os::get_page_info(p, &info)) { |
938 | if (info.size > 0) { |
939 | if (info.size > (size_t)os::vm_page_size()) { |
940 | space_stats()->_large_pages++; |
941 | } else { |
942 | space_stats()->_small_pages++; |
943 | } |
944 | if (info.lgrp_id == lgrp_id()) { |
945 | space_stats()->_local_space += info.size; |
946 | } else { |
947 | space_stats()->_remote_space += info.size; |
948 | } |
949 | p += info.size; |
950 | } else { |
951 | p += os::vm_page_size(); |
952 | space_stats()->_uncommited_space += os::vm_page_size(); |
953 | } |
954 | } else { |
955 | return; |
956 | } |
957 | } |
958 | } |
959 | space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) + |
960 | pointer_delta(space()->end(), end, sizeof(char)); |
961 | |
962 | } |
963 | |
964 | // Scan page_count pages and verify if they have the right size and right placement. |
965 | // If invalid pages are found they are freed in hope that subsequent reallocation |
966 | // will be more successful. |
967 | void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count) |
968 | { |
969 | char* range_start = (char*)align_up(space()->bottom(), page_size); |
970 | char* range_end = (char*)align_down(space()->end(), page_size); |
971 | |
972 | if (range_start > last_page_scanned() || last_page_scanned() >= range_end) { |
973 | set_last_page_scanned(range_start); |
974 | } |
975 | |
976 | char *scan_start = last_page_scanned(); |
977 | char* scan_end = MIN2(scan_start + page_size * page_count, range_end); |
978 | |
979 | os::page_info page_expected, page_found; |
980 | page_expected.size = page_size; |
981 | page_expected.lgrp_id = lgrp_id(); |
982 | |
983 | char *s = scan_start; |
984 | while (s < scan_end) { |
985 | char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found); |
986 | if (e == NULL) { |
987 | break; |
988 | } |
989 | if (e != scan_end) { |
990 | assert(e < scan_end, "e: " PTR_FORMAT " scan_end: " PTR_FORMAT, p2i(e), p2i(scan_end)); |
991 | |
992 | if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id()) |
993 | && page_expected.size != 0) { |
994 | os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size); |
995 | } |
996 | page_expected = page_found; |
997 | } |
998 | s = e; |
999 | } |
1000 | |
1001 | set_last_page_scanned(scan_end); |
1002 | } |
1003 | |