1/*
2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24#include "precompiled.hpp"
25#include "gc/z/zAddress.inline.hpp"
26#include "gc/z/zBackingFile_linux_x86.hpp"
27#include "gc/z/zErrno.hpp"
28#include "gc/z/zGlobals.hpp"
29#include "gc/z/zLargePages.inline.hpp"
30#include "gc/z/zMemory.hpp"
31#include "gc/z/zNUMA.hpp"
32#include "gc/z/zPhysicalMemory.inline.hpp"
33#include "gc/z/zPhysicalMemoryBacking_linux_x86.hpp"
34#include "logging/log.hpp"
35#include "runtime/init.hpp"
36#include "runtime/os.hpp"
37#include "utilities/align.hpp"
38#include "utilities/debug.hpp"
39
40#include <stdio.h>
41#include <sys/mman.h>
42#include <sys/types.h>
43
44//
45// Support for building on older Linux systems
46//
47
48// madvise(2) flags
49#ifndef MADV_HUGEPAGE
50#define MADV_HUGEPAGE 14
51#endif
52
53// Proc file entry for max map mount
54#define ZFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count"
55
56bool ZPhysicalMemoryBacking::is_initialized() const {
57 return _file.is_initialized();
58}
59
60void ZPhysicalMemoryBacking::warn_available_space(size_t max) const {
61 // Note that the available space on a tmpfs or a hugetlbfs filesystem
62 // will be zero if no size limit was specified when it was mounted.
63 const size_t available = _file.available();
64 if (available == 0) {
65 // No size limit set, skip check
66 log_info(gc, init)("Available space on backing filesystem: N/A");
67 return;
68 }
69
70 log_info(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", available / M);
71
72 // Warn if the filesystem doesn't currently have enough space available to hold
73 // the max heap size. The max heap size will be capped if we later hit this limit
74 // when trying to expand the heap.
75 if (available < max) {
76 log_warning(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
77 log_warning(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");
78 log_warning(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "
79 "(available", max / M);
80 log_warning(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "
81 "size could", available / M);
82 log_warning(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
83 }
84}
85
86void ZPhysicalMemoryBacking::warn_max_map_count(size_t max) const {
87 const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
88 FILE* const file = fopen(filename, "r");
89 if (file == NULL) {
90 // Failed to open file, skip check
91 log_debug(gc, init)("Failed to open %s", filename);
92 return;
93 }
94
95 size_t actual_max_map_count = 0;
96 const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count);
97 fclose(file);
98 if (result != 1) {
99 // Failed to read file, skip check
100 log_debug(gc, init)("Failed to read %s", filename);
101 return;
102 }
103
104 // The required max map count is impossible to calculate exactly since subsystems
105 // other than ZGC are also creating memory mappings, and we have no control over that.
106 // However, ZGC tends to create the most mappings and dominate the total count.
107 // In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
108 // We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
109 const size_t required_max_map_count = (max / ZGranuleSize) * 3 * 1.2;
110 if (actual_max_map_count < required_max_map_count) {
111 log_warning(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
112 log_warning(gc)("The system limit on number of memory mappings per process might be too low for the given");
113 log_warning(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
114 max / M, filename);
115 log_warning(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution "
116 "with the current", required_max_map_count, actual_max_map_count);
117 log_warning(gc)("limit could lead to a fatal error, due to failure to map memory.");
118 }
119}
120
121void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
122 // Warn if available space is too low
123 warn_available_space(max);
124
125 // Warn if max map count is too low
126 warn_max_map_count(max);
127}
128
129bool ZPhysicalMemoryBacking::supports_uncommit() {
130 assert(!is_init_completed(), "Invalid state");
131 assert(_file.size() >= ZGranuleSize, "Invalid size");
132
133 // Test if uncommit is supported by uncommitting and then re-committing a granule
134 return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
135}
136
137size_t ZPhysicalMemoryBacking::commit(size_t size) {
138 size_t committed = 0;
139
140 // Fill holes in the backing file
141 while (committed < size) {
142 size_t allocated = 0;
143 const size_t remaining = size - committed;
144 const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
145 if (start == UINTPTR_MAX) {
146 // No holes to commit
147 break;
148 }
149
150 // Try commit hole
151 const size_t filled = _file.commit(start, allocated);
152 if (filled > 0) {
153 // Successful or partialy successful
154 _committed.free(start, filled);
155 committed += filled;
156 }
157 if (filled < allocated) {
158 // Failed or partialy failed
159 _uncommitted.free(start + filled, allocated - filled);
160 return committed;
161 }
162 }
163
164 // Expand backing file
165 if (committed < size) {
166 const size_t remaining = size - committed;
167 const uintptr_t start = _file.size();
168 const size_t expanded = _file.commit(start, remaining);
169 if (expanded > 0) {
170 // Successful or partialy successful
171 _committed.free(start, expanded);
172 committed += expanded;
173 }
174 }
175
176 return committed;
177}
178
179size_t ZPhysicalMemoryBacking::uncommit(size_t size) {
180 size_t uncommitted = 0;
181
182 // Punch holes in backing file
183 while (uncommitted < size) {
184 size_t allocated = 0;
185 const size_t remaining = size - uncommitted;
186 const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
187 assert(start != UINTPTR_MAX, "Allocation should never fail");
188
189 // Try punch hole
190 const size_t punched = _file.uncommit(start, allocated);
191 if (punched > 0) {
192 // Successful or partialy successful
193 _uncommitted.free(start, punched);
194 uncommitted += punched;
195 }
196 if (punched < allocated) {
197 // Failed or partialy failed
198 _committed.free(start + punched, allocated - punched);
199 return uncommitted;
200 }
201 }
202
203 return uncommitted;
204}
205
206ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
207 assert(is_aligned(size, ZGranuleSize), "Invalid size");
208
209 ZPhysicalMemory pmem;
210
211 // Allocate segments
212 for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
213 const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
214 assert(start != UINTPTR_MAX, "Allocation should never fail");
215 pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
216 }
217
218 return pmem;
219}
220
221void ZPhysicalMemoryBacking::free(const ZPhysicalMemory& pmem) {
222 const size_t nsegments = pmem.nsegments();
223
224 // Free segments
225 for (size_t i = 0; i < nsegments; i++) {
226 const ZPhysicalMemorySegment& segment = pmem.segment(i);
227 _committed.free(segment.start(), segment.size());
228 }
229}
230
231void ZPhysicalMemoryBacking::map_failed(ZErrno err) const {
232 if (err == ENOMEM) {
233 fatal("Failed to map memory. Please check the system limit on number of "
234 "memory mappings allowed per process (see %s)", ZFILENAME_PROC_MAX_MAP_COUNT);
235 } else {
236 fatal("Failed to map memory (%s)", err.to_string());
237 }
238}
239
240void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size, int advice) const {
241 if (madvise((void*)addr, size, advice) == -1) {
242 ZErrno err;
243 log_error(gc)("Failed to advise on memory (advice %d, %s)", advice, err.to_string());
244 }
245}
246
247void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
248 const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size();
249 os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
250}
251
252void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const {
253 const size_t nsegments = pmem.nsegments();
254 size_t size = 0;
255
256 // Map segments
257 for (size_t i = 0; i < nsegments; i++) {
258 const ZPhysicalMemorySegment& segment = pmem.segment(i);
259 const uintptr_t segment_addr = addr + size;
260 const void* const res = mmap((void*)segment_addr, segment.size(), PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
261 if (res == MAP_FAILED) {
262 ZErrno err;
263 map_failed(err);
264 }
265
266 size += segment.size();
267 }
268
269 // Advise on use of transparent huge pages before touching it
270 if (ZLargePages::is_transparent()) {
271 advise_view(addr, size, MADV_HUGEPAGE);
272 }
273
274 // NUMA interleave memory before touching it
275 ZNUMA::memory_interleave(addr, size);
276
277 // Pre-touch memory
278 if (pretouch) {
279 pretouch_view(addr, size);
280 }
281}
282
283void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
284 // Note that we must keep the address space reservation intact and just detach
285 // the backing memory. For this reason we map a new anonymous, non-accessible
286 // and non-reserved page over the mapping instead of actually unmapping.
287 const void* const res = mmap((void*)addr, pmem.size(), PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
288 if (res == MAP_FAILED) {
289 ZErrno err;
290 map_failed(err);
291 }
292}
293
294uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
295 // From an NMT point of view we treat the first heap view (marked0) as committed
296 return ZAddress::marked0(offset);
297}
298
299void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
300 if (ZVerifyViews) {
301 // Map good view
302 map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
303 } else {
304 // Map all views
305 map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
306 map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
307 map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
308 }
309}
310
311void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
312 if (ZVerifyViews) {
313 // Unmap good view
314 unmap_view(pmem, ZAddress::good(offset));
315 } else {
316 // Unmap all views
317 unmap_view(pmem, ZAddress::marked0(offset));
318 unmap_view(pmem, ZAddress::marked1(offset));
319 unmap_view(pmem, ZAddress::remapped(offset));
320 }
321}
322
323void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
324 // Map good view
325 assert(ZVerifyViews, "Should be enabled");
326 map_view(pmem, ZAddress::good(offset), false /* pretouch */);
327}
328
329void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
330 // Unmap good view
331 assert(ZVerifyViews, "Should be enabled");
332 unmap_view(pmem, ZAddress::good(offset));
333}
334