| 1 | /* | 
|---|
| 2 | * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. | 
|---|
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
|---|
| 4 | * | 
|---|
| 5 | * This code is free software; you can redistribute it and/or modify it | 
|---|
| 6 | * under the terms of the GNU General Public License version 2 only, as | 
|---|
| 7 | * published by the Free Software Foundation. | 
|---|
| 8 | * | 
|---|
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | 
|---|
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|---|
| 11 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
|---|
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | 
|---|
| 13 | * accompanied this code). | 
|---|
| 14 | * | 
|---|
| 15 | * You should have received a copy of the GNU General Public License version | 
|---|
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | 
|---|
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
|---|
| 18 | * | 
|---|
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
|---|
| 20 | * or visit www.oracle.com if you need additional information or have any | 
|---|
| 21 | * questions. | 
|---|
| 22 | */ | 
|---|
| 23 |  | 
|---|
| 24 | #include "precompiled.hpp" | 
|---|
| 25 | #include "gc/shared/suspendibleThreadSet.hpp" | 
|---|
| 26 | #include "gc/z/zAddress.inline.hpp" | 
|---|
| 27 | #include "gc/z/zCollectedHeap.hpp" | 
|---|
| 28 | #include "gc/z/zFuture.inline.hpp" | 
|---|
| 29 | #include "gc/z/zGlobals.hpp" | 
|---|
| 30 | #include "gc/z/zLock.inline.hpp" | 
|---|
| 31 | #include "gc/z/zPage.inline.hpp" | 
|---|
| 32 | #include "gc/z/zPageAllocator.hpp" | 
|---|
| 33 | #include "gc/z/zPageCache.inline.hpp" | 
|---|
| 34 | #include "gc/z/zSafeDelete.inline.hpp" | 
|---|
| 35 | #include "gc/z/zStat.hpp" | 
|---|
| 36 | #include "gc/z/zTracer.inline.hpp" | 
|---|
| 37 | #include "runtime/globals.hpp" | 
|---|
| 38 | #include "runtime/init.hpp" | 
|---|
| 39 | #include "runtime/java.hpp" | 
|---|
| 40 | #include "utilities/debug.hpp" | 
|---|
| 41 |  | 
|---|
| 42 | static const ZStatCounter       ZCounterAllocationRate( "Memory", "Allocation Rate", ZStatUnitBytesPerSecond); | 
|---|
| 43 | static const ZStatCounter       ZCounterPageCacheFlush( "Memory", "Page Cache Flush", ZStatUnitBytesPerSecond); | 
|---|
| 44 | static const ZStatCounter       ZCounterUncommit( "Memory", "Uncommit", ZStatUnitBytesPerSecond); | 
|---|
| 45 | static const ZStatCriticalPhase ZCriticalPhaseAllocationStall( "Allocation Stall"); | 
|---|
| 46 |  | 
|---|
| 47 | class ZPageAllocRequest : public StackObj { | 
|---|
| 48 | friend class ZList<ZPageAllocRequest>; | 
|---|
| 49 |  | 
|---|
| 50 | private: | 
|---|
| 51 | const uint8_t                _type; | 
|---|
| 52 | const size_t                 _size; | 
|---|
| 53 | const ZAllocationFlags       _flags; | 
|---|
| 54 | const unsigned int           _total_collections; | 
|---|
| 55 | ZListNode<ZPageAllocRequest> _node; | 
|---|
| 56 | ZFuture<ZPage*>              _result; | 
|---|
| 57 |  | 
|---|
| 58 | public: | 
|---|
| 59 | ZPageAllocRequest(uint8_t type, size_t size, ZAllocationFlags flags, unsigned int total_collections) : | 
|---|
| 60 | _type(type), | 
|---|
| 61 | _size(size), | 
|---|
| 62 | _flags(flags), | 
|---|
| 63 | _total_collections(total_collections) {} | 
|---|
| 64 |  | 
|---|
| 65 | uint8_t type() const { | 
|---|
| 66 | return _type; | 
|---|
| 67 | } | 
|---|
| 68 |  | 
|---|
| 69 | size_t size() const { | 
|---|
| 70 | return _size; | 
|---|
| 71 | } | 
|---|
| 72 |  | 
|---|
| 73 | ZAllocationFlags flags() const { | 
|---|
| 74 | return _flags; | 
|---|
| 75 | } | 
|---|
| 76 |  | 
|---|
| 77 | unsigned int total_collections() const { | 
|---|
| 78 | return _total_collections; | 
|---|
| 79 | } | 
|---|
| 80 |  | 
|---|
| 81 | ZPage* wait() { | 
|---|
| 82 | return _result.get(); | 
|---|
| 83 | } | 
|---|
| 84 |  | 
|---|
| 85 | void satisfy(ZPage* page) { | 
|---|
| 86 | _result.set(page); | 
|---|
| 87 | } | 
|---|
| 88 | }; | 
|---|
| 89 |  | 
|---|
| 90 | ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1; | 
|---|
| 91 |  | 
|---|
| 92 | ZPageAllocator::ZPageAllocator(size_t min_capacity, | 
|---|
| 93 | size_t initial_capacity, | 
|---|
| 94 | size_t max_capacity, | 
|---|
| 95 | size_t max_reserve) : | 
|---|
| 96 | _lock(), | 
|---|
| 97 | _virtual(), | 
|---|
| 98 | _physical(), | 
|---|
| 99 | _cache(), | 
|---|
| 100 | _min_capacity(min_capacity), | 
|---|
| 101 | _max_capacity(max_capacity), | 
|---|
| 102 | _max_reserve(max_reserve), | 
|---|
| 103 | _current_max_capacity(max_capacity), | 
|---|
| 104 | _capacity(0), | 
|---|
| 105 | _used_high(0), | 
|---|
| 106 | _used_low(0), | 
|---|
| 107 | _used(0), | 
|---|
| 108 | _allocated(0), | 
|---|
| 109 | _reclaimed(0), | 
|---|
| 110 | _queue(), | 
|---|
| 111 | _safe_delete(), | 
|---|
| 112 | _uncommit(false), | 
|---|
| 113 | _initialized(false) { | 
|---|
| 114 |  | 
|---|
| 115 | if (!_virtual.is_initialized() || !_physical.is_initialized()) { | 
|---|
| 116 | return; | 
|---|
| 117 | } | 
|---|
| 118 |  | 
|---|
| 119 | log_info(gc, init)( "Min Capacity: "SIZE_FORMAT "M", min_capacity / M); | 
|---|
| 120 | log_info(gc, init)( "Initial Capacity: "SIZE_FORMAT "M", initial_capacity / M); | 
|---|
| 121 | log_info(gc, init)( "Max Capacity: "SIZE_FORMAT "M", max_capacity / M); | 
|---|
| 122 | log_info(gc, init)( "Max Reserve: "SIZE_FORMAT "M", max_reserve / M); | 
|---|
| 123 | log_info(gc, init)( "Pre-touch: %s", AlwaysPreTouch ? "Enabled": "Disabled"); | 
|---|
| 124 |  | 
|---|
| 125 | // Warn if system limits could stop us from reaching max capacity | 
|---|
| 126 | _physical.warn_commit_limits(max_capacity); | 
|---|
| 127 |  | 
|---|
| 128 | // Commit initial capacity | 
|---|
| 129 | _capacity = _physical.commit(initial_capacity); | 
|---|
| 130 | if (_capacity != initial_capacity) { | 
|---|
| 131 | log_error(gc)( "Failed to allocate initial Java heap ("SIZE_FORMAT "M)", initial_capacity / M); | 
|---|
| 132 | return; | 
|---|
| 133 | } | 
|---|
| 134 |  | 
|---|
| 135 | // If uncommit is not explicitly disabled, max capacity is greater than | 
|---|
| 136 | // min capacity, and uncommit is supported by the platform, then we will | 
|---|
| 137 | // try to uncommit unused memory. | 
|---|
| 138 | _uncommit = ZUncommit && (max_capacity > min_capacity) && _physical.supports_uncommit(); | 
|---|
| 139 | if (_uncommit) { | 
|---|
| 140 | log_info(gc, init)( "Uncommit: Enabled, Delay: "UINTX_FORMAT "s", ZUncommitDelay); | 
|---|
| 141 | } else { | 
|---|
| 142 | log_info(gc, init)( "Uncommit: Disabled"); | 
|---|
| 143 | } | 
|---|
| 144 |  | 
|---|
| 145 | // Pre-map initial capacity | 
|---|
| 146 | prime_cache(initial_capacity); | 
|---|
| 147 |  | 
|---|
| 148 | // Successfully initialized | 
|---|
| 149 | _initialized = true; | 
|---|
| 150 | } | 
|---|
| 151 |  | 
|---|
| 152 | void ZPageAllocator::prime_cache(size_t size) { | 
|---|
| 153 | // Allocate physical memory | 
|---|
| 154 | const ZPhysicalMemory pmem = _physical.alloc(size); | 
|---|
| 155 | guarantee(!pmem.is_null(), "Invalid size"); | 
|---|
| 156 |  | 
|---|
| 157 | // Allocate virtual memory | 
|---|
| 158 | const ZVirtualMemory vmem = _virtual.alloc(size, true /* alloc_from_front */); | 
|---|
| 159 | guarantee(!vmem.is_null(), "Invalid size"); | 
|---|
| 160 |  | 
|---|
| 161 | // Allocate page | 
|---|
| 162 | ZPage* const page = new ZPage(vmem, pmem); | 
|---|
| 163 |  | 
|---|
| 164 | // Map page | 
|---|
| 165 | map_page(page); | 
|---|
| 166 | page->set_pre_mapped(); | 
|---|
| 167 |  | 
|---|
| 168 | // Add page to cache | 
|---|
| 169 | page->set_last_used(); | 
|---|
| 170 | _cache.free_page(page); | 
|---|
| 171 | } | 
|---|
| 172 |  | 
|---|
| 173 | bool ZPageAllocator::is_initialized() const { | 
|---|
| 174 | return _initialized; | 
|---|
| 175 | } | 
|---|
| 176 |  | 
|---|
| 177 | size_t ZPageAllocator::min_capacity() const { | 
|---|
| 178 | return _min_capacity; | 
|---|
| 179 | } | 
|---|
| 180 |  | 
|---|
| 181 | size_t ZPageAllocator::max_capacity() const { | 
|---|
| 182 | return _max_capacity; | 
|---|
| 183 | } | 
|---|
| 184 |  | 
|---|
| 185 | size_t ZPageAllocator::soft_max_capacity() const { | 
|---|
| 186 | // Note that SoftMaxHeapSize is a manageable flag | 
|---|
| 187 | return MIN2(SoftMaxHeapSize, _current_max_capacity); | 
|---|
| 188 | } | 
|---|
| 189 |  | 
|---|
| 190 | size_t ZPageAllocator::capacity() const { | 
|---|
| 191 | return _capacity; | 
|---|
| 192 | } | 
|---|
| 193 |  | 
|---|
| 194 | size_t ZPageAllocator::max_reserve() const { | 
|---|
| 195 | return _max_reserve; | 
|---|
| 196 | } | 
|---|
| 197 |  | 
|---|
| 198 | size_t ZPageAllocator::used_high() const { | 
|---|
| 199 | return _used_high; | 
|---|
| 200 | } | 
|---|
| 201 |  | 
|---|
| 202 | size_t ZPageAllocator::used_low() const { | 
|---|
| 203 | return _used_low; | 
|---|
| 204 | } | 
|---|
| 205 |  | 
|---|
| 206 | size_t ZPageAllocator::used() const { | 
|---|
| 207 | return _used; | 
|---|
| 208 | } | 
|---|
| 209 |  | 
|---|
| 210 | size_t ZPageAllocator::unused() const { | 
|---|
| 211 | const ssize_t unused = (ssize_t)_capacity - (ssize_t)_used - (ssize_t)_max_reserve; | 
|---|
| 212 | return unused > 0 ? (size_t)unused : 0; | 
|---|
| 213 | } | 
|---|
| 214 |  | 
|---|
| 215 | size_t ZPageAllocator::allocated() const { | 
|---|
| 216 | return _allocated; | 
|---|
| 217 | } | 
|---|
| 218 |  | 
|---|
| 219 | size_t ZPageAllocator::reclaimed() const { | 
|---|
| 220 | return _reclaimed > 0 ? (size_t)_reclaimed : 0; | 
|---|
| 221 | } | 
|---|
| 222 |  | 
|---|
| 223 | void ZPageAllocator::reset_statistics() { | 
|---|
| 224 | assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); | 
|---|
| 225 | _allocated = 0; | 
|---|
| 226 | _reclaimed = 0; | 
|---|
| 227 | _used_high = _used_low = _used; | 
|---|
| 228 | } | 
|---|
| 229 |  | 
|---|
| 230 | void ZPageAllocator::increase_used(size_t size, bool relocation) { | 
|---|
| 231 | if (relocation) { | 
|---|
| 232 | // Allocating a page for the purpose of relocation has a | 
|---|
| 233 | // negative contribution to the number of reclaimed bytes. | 
|---|
| 234 | _reclaimed -= size; | 
|---|
| 235 | } | 
|---|
| 236 | _allocated += size; | 
|---|
| 237 | _used += size; | 
|---|
| 238 | if (_used > _used_high) { | 
|---|
| 239 | _used_high = _used; | 
|---|
| 240 | } | 
|---|
| 241 | } | 
|---|
| 242 |  | 
|---|
| 243 | void ZPageAllocator::decrease_used(size_t size, bool reclaimed) { | 
|---|
| 244 | if (reclaimed) { | 
|---|
| 245 | // Only pages explicitly released with the reclaimed flag set | 
|---|
| 246 | // counts as reclaimed bytes. This flag is typically true when | 
|---|
| 247 | // a worker releases a page after relocation, and is typically | 
|---|
| 248 | // false when we release a page to undo an allocation. | 
|---|
| 249 | _reclaimed += size; | 
|---|
| 250 | } | 
|---|
| 251 | _used -= size; | 
|---|
| 252 | if (_used < _used_low) { | 
|---|
| 253 | _used_low = _used; | 
|---|
| 254 | } | 
|---|
| 255 | } | 
|---|
| 256 |  | 
|---|
| 257 | ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) { | 
|---|
| 258 | // Allocate virtual memory | 
|---|
| 259 | const ZVirtualMemory vmem = _virtual.alloc(size); | 
|---|
| 260 | if (vmem.is_null()) { | 
|---|
| 261 | // Out of address space | 
|---|
| 262 | return NULL; | 
|---|
| 263 | } | 
|---|
| 264 |  | 
|---|
| 265 | // Allocate physical memory | 
|---|
| 266 | const ZPhysicalMemory pmem = _physical.alloc(size); | 
|---|
| 267 | assert(!pmem.is_null(), "Invalid size"); | 
|---|
| 268 |  | 
|---|
| 269 | // Allocate page | 
|---|
| 270 | return new ZPage(type, vmem, pmem); | 
|---|
| 271 | } | 
|---|
| 272 |  | 
|---|
| 273 | void ZPageAllocator::destroy_page(ZPage* page) { | 
|---|
| 274 | const ZVirtualMemory& vmem = page->virtual_memory(); | 
|---|
| 275 | const ZPhysicalMemory& pmem = page->physical_memory(); | 
|---|
| 276 |  | 
|---|
| 277 | // Unmap memory | 
|---|
| 278 | _physical.unmap(pmem, vmem.start()); | 
|---|
| 279 |  | 
|---|
| 280 | // Free physical memory | 
|---|
| 281 | _physical.free(pmem); | 
|---|
| 282 |  | 
|---|
| 283 | // Free virtual memory | 
|---|
| 284 | _virtual.free(vmem); | 
|---|
| 285 |  | 
|---|
| 286 | // Delete page safely | 
|---|
| 287 | _safe_delete(page); | 
|---|
| 288 | } | 
|---|
| 289 |  | 
|---|
| 290 | void ZPageAllocator::map_page(const ZPage* page) const { | 
|---|
| 291 | // Map physical memory | 
|---|
| 292 | if (!page->is_mapped()) { | 
|---|
| 293 | _physical.map(page->physical_memory(), page->start()); | 
|---|
| 294 | } else if (ZVerifyViews) { | 
|---|
| 295 | _physical.debug_map(page->physical_memory(), page->start()); | 
|---|
| 296 | } | 
|---|
| 297 | } | 
|---|
| 298 |  | 
|---|
| 299 | size_t ZPageAllocator::max_available(bool no_reserve) const { | 
|---|
| 300 | size_t available = _current_max_capacity - _used; | 
|---|
| 301 |  | 
|---|
| 302 | if (no_reserve) { | 
|---|
| 303 | // The reserve should not be considered available | 
|---|
| 304 | available -= MIN2(available, _max_reserve); | 
|---|
| 305 | } | 
|---|
| 306 |  | 
|---|
| 307 | return available; | 
|---|
| 308 | } | 
|---|
| 309 |  | 
|---|
| 310 | bool ZPageAllocator::ensure_available(size_t size, bool no_reserve) { | 
|---|
| 311 | if (max_available(no_reserve) < size) { | 
|---|
| 312 | // Not enough free memory | 
|---|
| 313 | return false; | 
|---|
| 314 | } | 
|---|
| 315 |  | 
|---|
| 316 | // We add the max_reserve to the requested size to avoid losing | 
|---|
| 317 | // the reserve because of failure to increase capacity before | 
|---|
| 318 | // reaching max capacity. | 
|---|
| 319 | size += _max_reserve; | 
|---|
| 320 |  | 
|---|
| 321 | // Don't try to increase capacity if enough unused capacity | 
|---|
| 322 | // is available or if current max capacity has been reached. | 
|---|
| 323 | const size_t available = _capacity - _used; | 
|---|
| 324 | if (available < size && _capacity < _current_max_capacity) { | 
|---|
| 325 | // Try to increase capacity | 
|---|
| 326 | const size_t commit = MIN2(size - available, _current_max_capacity - _capacity); | 
|---|
| 327 | const size_t committed = _physical.commit(commit); | 
|---|
| 328 | _capacity += committed; | 
|---|
| 329 |  | 
|---|
| 330 | log_trace(gc, heap)( "Make Available: Size: "SIZE_FORMAT "M, NoReserve: %s, " | 
|---|
| 331 | "Available: "SIZE_FORMAT "M, Commit: "SIZE_FORMAT "M, " | 
|---|
| 332 | "Committed: "SIZE_FORMAT "M, Capacity: "SIZE_FORMAT "M", | 
|---|
| 333 | size / M, no_reserve ? "True": "False", available / M, | 
|---|
| 334 | commit / M, committed / M, _capacity / M); | 
|---|
| 335 |  | 
|---|
| 336 | if (committed != commit) { | 
|---|
| 337 | // Failed, or partly failed, to increase capacity. Adjust current | 
|---|
| 338 | // max capacity to avoid further attempts to increase capacity. | 
|---|
| 339 | log_error(gc)( "Forced to lower max Java heap size from " | 
|---|
| 340 | SIZE_FORMAT "M(%.0lf%%) to "SIZE_FORMAT "M(%.0lf%%)", | 
|---|
| 341 | _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity), | 
|---|
| 342 | _capacity / M, percent_of(_capacity, _max_capacity)); | 
|---|
| 343 |  | 
|---|
| 344 | _current_max_capacity = _capacity; | 
|---|
| 345 | } | 
|---|
| 346 | } | 
|---|
| 347 |  | 
|---|
| 348 | if (!no_reserve) { | 
|---|
| 349 | size -= _max_reserve; | 
|---|
| 350 | } | 
|---|
| 351 |  | 
|---|
| 352 | const size_t new_available = _capacity - _used; | 
|---|
| 353 | return new_available >= size; | 
|---|
| 354 | } | 
|---|
| 355 |  | 
|---|
| 356 | void ZPageAllocator::ensure_uncached_available(size_t size) { | 
|---|
| 357 | assert(_capacity - _used >= size, "Invalid size"); | 
|---|
| 358 | const size_t uncached_available = _capacity - _used - _cache.available(); | 
|---|
| 359 | if (size > uncached_available) { | 
|---|
| 360 | flush_cache_for_allocation(size - uncached_available); | 
|---|
| 361 | } | 
|---|
| 362 | } | 
|---|
| 363 |  | 
|---|
| 364 | ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve) { | 
|---|
| 365 | if (!ensure_available(size, no_reserve)) { | 
|---|
| 366 | // Not enough free memory | 
|---|
| 367 | return NULL; | 
|---|
| 368 | } | 
|---|
| 369 |  | 
|---|
| 370 | // Try allocate page from the cache | 
|---|
| 371 | ZPage* const page = _cache.alloc_page(type, size); | 
|---|
| 372 | if (page != NULL) { | 
|---|
| 373 | return page; | 
|---|
| 374 | } | 
|---|
| 375 |  | 
|---|
| 376 | // Try flush pages from the cache | 
|---|
| 377 | ensure_uncached_available(size); | 
|---|
| 378 |  | 
|---|
| 379 | // Create new page | 
|---|
| 380 | return create_page(type, size); | 
|---|
| 381 | } | 
|---|
| 382 |  | 
|---|
| 383 | ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) { | 
|---|
| 384 | ZPage* const page = alloc_page_common_inner(type, size, flags.no_reserve()); | 
|---|
| 385 | if (page == NULL) { | 
|---|
| 386 | // Out of memory | 
|---|
| 387 | return NULL; | 
|---|
| 388 | } | 
|---|
| 389 |  | 
|---|
| 390 | // Update used statistics | 
|---|
| 391 | increase_used(size, flags.relocation()); | 
|---|
| 392 |  | 
|---|
| 393 | // Send trace event | 
|---|
| 394 | ZTracer::tracer()->report_page_alloc(size, _used, max_available(flags.no_reserve()), _cache.available(), flags); | 
|---|
| 395 |  | 
|---|
| 396 | return page; | 
|---|
| 397 | } | 
|---|
| 398 |  | 
|---|
| 399 | void ZPageAllocator::check_out_of_memory_during_initialization() { | 
|---|
| 400 | if (!is_init_completed()) { | 
|---|
| 401 | vm_exit_during_initialization( "java.lang.OutOfMemoryError", "Java heap too small"); | 
|---|
| 402 | } | 
|---|
| 403 | } | 
|---|
| 404 |  | 
|---|
| 405 | ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) { | 
|---|
| 406 | // Prepare to block | 
|---|
| 407 | ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections()); | 
|---|
| 408 |  | 
|---|
| 409 | _lock.lock(); | 
|---|
| 410 |  | 
|---|
| 411 | // Try non-blocking allocation | 
|---|
| 412 | ZPage* page = alloc_page_common(type, size, flags); | 
|---|
| 413 | if (page == NULL) { | 
|---|
| 414 | // Allocation failed, enqueue request | 
|---|
| 415 | _queue.insert_last(&request); | 
|---|
| 416 | } | 
|---|
| 417 |  | 
|---|
| 418 | _lock.unlock(); | 
|---|
| 419 |  | 
|---|
| 420 | if (page == NULL) { | 
|---|
| 421 | // Allocation failed | 
|---|
| 422 | ZStatTimer timer(ZCriticalPhaseAllocationStall); | 
|---|
| 423 |  | 
|---|
| 424 | // We can only block if VM is fully initialized | 
|---|
| 425 | check_out_of_memory_during_initialization(); | 
|---|
| 426 |  | 
|---|
| 427 | do { | 
|---|
| 428 | // Start asynchronous GC | 
|---|
| 429 | ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall); | 
|---|
| 430 |  | 
|---|
| 431 | // Wait for allocation to complete or fail | 
|---|
| 432 | page = request.wait(); | 
|---|
| 433 | } while (page == gc_marker); | 
|---|
| 434 |  | 
|---|
| 435 | { | 
|---|
| 436 | // Guard deletion of underlying semaphore. This is a workaround for a | 
|---|
| 437 | // bug in sem_post() in glibc < 2.21, where it's not safe to destroy | 
|---|
| 438 | // the semaphore immediately after returning from sem_wait(). The | 
|---|
| 439 | // reason is that sem_post() can touch the semaphore after a waiting | 
|---|
| 440 | // thread have returned from sem_wait(). To avoid this race we are | 
|---|
| 441 | // forcing the waiting thread to acquire/release the lock held by the | 
|---|
| 442 | // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 | 
|---|
| 443 | ZLocker<ZLock> locker(&_lock); | 
|---|
| 444 | } | 
|---|
| 445 | } | 
|---|
| 446 |  | 
|---|
| 447 | return page; | 
|---|
| 448 | } | 
|---|
| 449 |  | 
|---|
| 450 | ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) { | 
|---|
| 451 | ZLocker<ZLock> locker(&_lock); | 
|---|
| 452 | return alloc_page_common(type, size, flags); | 
|---|
| 453 | } | 
|---|
| 454 |  | 
|---|
| 455 | ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { | 
|---|
| 456 | ZPage* const page = flags.non_blocking() | 
|---|
| 457 | ? alloc_page_nonblocking(type, size, flags) | 
|---|
| 458 | : alloc_page_blocking(type, size, flags); | 
|---|
| 459 | if (page == NULL) { | 
|---|
| 460 | // Out of memory | 
|---|
| 461 | return NULL; | 
|---|
| 462 | } | 
|---|
| 463 |  | 
|---|
| 464 | // Map page if needed | 
|---|
| 465 | map_page(page); | 
|---|
| 466 |  | 
|---|
| 467 | // Reset page. This updates the page's sequence number and must | 
|---|
| 468 | // be done after page allocation, which potentially blocked in | 
|---|
| 469 | // a safepoint where the global sequence number was updated. | 
|---|
| 470 | page->reset(); | 
|---|
| 471 |  | 
|---|
| 472 | // Update allocation statistics. Exclude worker threads to avoid | 
|---|
| 473 | // artificial inflation of the allocation rate due to relocation. | 
|---|
| 474 | if (!flags.worker_thread()) { | 
|---|
| 475 | // Note that there are two allocation rate counters, which have | 
|---|
| 476 | // different purposes and are sampled at different frequencies. | 
|---|
| 477 | const size_t bytes = page->size(); | 
|---|
| 478 | ZStatInc(ZCounterAllocationRate, bytes); | 
|---|
| 479 | ZStatInc(ZStatAllocRate::counter(), bytes); | 
|---|
| 480 | } | 
|---|
| 481 |  | 
|---|
| 482 | return page; | 
|---|
| 483 | } | 
|---|
| 484 |  | 
|---|
| 485 | void ZPageAllocator::satisfy_alloc_queue() { | 
|---|
| 486 | for (;;) { | 
|---|
| 487 | ZPageAllocRequest* const request = _queue.first(); | 
|---|
| 488 | if (request == NULL) { | 
|---|
| 489 | // Allocation queue is empty | 
|---|
| 490 | return; | 
|---|
| 491 | } | 
|---|
| 492 |  | 
|---|
| 493 | ZPage* const page = alloc_page_common(request->type(), request->size(), request->flags()); | 
|---|
| 494 | if (page == NULL) { | 
|---|
| 495 | // Allocation could not be satisfied, give up | 
|---|
| 496 | return; | 
|---|
| 497 | } | 
|---|
| 498 |  | 
|---|
| 499 | // Allocation succeeded, dequeue and satisfy request. Note that | 
|---|
| 500 | // the dequeue operation must happen first, since the request | 
|---|
| 501 | // will immediately be deallocated once it has been satisfied. | 
|---|
| 502 | _queue.remove(request); | 
|---|
| 503 | request->satisfy(page); | 
|---|
| 504 | } | 
|---|
| 505 | } | 
|---|
| 506 |  | 
|---|
| 507 | void ZPageAllocator::free_page(ZPage* page, bool reclaimed) { | 
|---|
| 508 | ZLocker<ZLock> locker(&_lock); | 
|---|
| 509 |  | 
|---|
| 510 | // Update used statistics | 
|---|
| 511 | decrease_used(page->size(), reclaimed); | 
|---|
| 512 |  | 
|---|
| 513 | // Set time when last used | 
|---|
| 514 | page->set_last_used(); | 
|---|
| 515 |  | 
|---|
| 516 | // Cache page | 
|---|
| 517 | _cache.free_page(page); | 
|---|
| 518 |  | 
|---|
| 519 | // Try satisfy blocked allocations | 
|---|
| 520 | satisfy_alloc_queue(); | 
|---|
| 521 | } | 
|---|
| 522 |  | 
|---|
| 523 | size_t ZPageAllocator::flush_cache(ZPageCacheFlushClosure* cl) { | 
|---|
| 524 | ZList<ZPage> list; | 
|---|
| 525 |  | 
|---|
| 526 | // Flush pages | 
|---|
| 527 | _cache.flush(cl, &list); | 
|---|
| 528 |  | 
|---|
| 529 | const size_t overflushed = cl->overflushed(); | 
|---|
| 530 | if (overflushed > 0) { | 
|---|
| 531 | // Overflushed, keep part of last page | 
|---|
| 532 | ZPage* const page = list.last()->split(overflushed); | 
|---|
| 533 | _cache.free_page(page); | 
|---|
| 534 | } | 
|---|
| 535 |  | 
|---|
| 536 | // Destroy pages | 
|---|
| 537 | size_t flushed = 0; | 
|---|
| 538 | for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) { | 
|---|
| 539 | flushed += page->size(); | 
|---|
| 540 | destroy_page(page); | 
|---|
| 541 | } | 
|---|
| 542 |  | 
|---|
| 543 | return flushed; | 
|---|
| 544 | } | 
|---|
| 545 |  | 
|---|
| 546 | class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure { | 
|---|
| 547 | public: | 
|---|
| 548 | ZPageCacheFlushForAllocationClosure(size_t requested) : | 
|---|
| 549 | ZPageCacheFlushClosure(requested) {} | 
|---|
| 550 |  | 
|---|
| 551 | virtual bool do_page(const ZPage* page) { | 
|---|
| 552 | if (_flushed < _requested) { | 
|---|
| 553 | // Flush page | 
|---|
| 554 | _flushed += page->size(); | 
|---|
| 555 | return true; | 
|---|
| 556 | } | 
|---|
| 557 |  | 
|---|
| 558 | // Don't flush page | 
|---|
| 559 | return false; | 
|---|
| 560 | } | 
|---|
| 561 | }; | 
|---|
| 562 |  | 
|---|
| 563 | void ZPageAllocator::flush_cache_for_allocation(size_t requested) { | 
|---|
| 564 | assert(requested <= _cache.available(), "Invalid request"); | 
|---|
| 565 |  | 
|---|
| 566 | // Flush pages | 
|---|
| 567 | ZPageCacheFlushForAllocationClosure cl(requested); | 
|---|
| 568 | const size_t flushed = flush_cache(&cl); | 
|---|
| 569 |  | 
|---|
| 570 | assert(requested == flushed, "Failed to flush"); | 
|---|
| 571 |  | 
|---|
| 572 | const size_t cached_after = _cache.available(); | 
|---|
| 573 | const size_t cached_before = cached_after + flushed; | 
|---|
| 574 |  | 
|---|
| 575 | log_info(gc, heap)( "Page Cache: "SIZE_FORMAT "M(%.0lf%%)->"SIZE_FORMAT "M(%.0lf%%), " | 
|---|
| 576 | "Flushed: "SIZE_FORMAT "M", | 
|---|
| 577 | cached_before / M, percent_of(cached_before, max_capacity()), | 
|---|
| 578 | cached_after / M, percent_of(cached_after, max_capacity()), | 
|---|
| 579 | flushed / M); | 
|---|
| 580 |  | 
|---|
| 581 | // Update statistics | 
|---|
| 582 | ZStatInc(ZCounterPageCacheFlush, flushed); | 
|---|
| 583 | } | 
|---|
| 584 |  | 
|---|
| 585 | class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure { | 
|---|
| 586 | private: | 
|---|
| 587 | const uint64_t _now; | 
|---|
| 588 | const uint64_t _delay; | 
|---|
| 589 | uint64_t       _timeout; | 
|---|
| 590 |  | 
|---|
| 591 | public: | 
|---|
| 592 | ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t delay) : | 
|---|
| 593 | ZPageCacheFlushClosure(requested), | 
|---|
| 594 | _now(os::elapsedTime()), | 
|---|
| 595 | _delay(delay), | 
|---|
| 596 | _timeout(_delay) {} | 
|---|
| 597 |  | 
|---|
| 598 | virtual bool do_page(const ZPage* page) { | 
|---|
| 599 | const uint64_t expires = page->last_used() + _delay; | 
|---|
| 600 | const uint64_t timeout = expires - MIN2(expires, _now); | 
|---|
| 601 |  | 
|---|
| 602 | if (_flushed < _requested && timeout == 0) { | 
|---|
| 603 | // Flush page | 
|---|
| 604 | _flushed += page->size(); | 
|---|
| 605 | return true; | 
|---|
| 606 | } | 
|---|
| 607 |  | 
|---|
| 608 | // Record shortest non-expired timeout | 
|---|
| 609 | _timeout = MIN2(_timeout, timeout); | 
|---|
| 610 |  | 
|---|
| 611 | // Don't flush page | 
|---|
| 612 | return false; | 
|---|
| 613 | } | 
|---|
| 614 |  | 
|---|
| 615 | uint64_t timeout() const { | 
|---|
| 616 | return _timeout; | 
|---|
| 617 | } | 
|---|
| 618 | }; | 
|---|
| 619 |  | 
|---|
| 620 | uint64_t ZPageAllocator::uncommit(uint64_t delay) { | 
|---|
| 621 | // Set the default timeout, when no pages are found in the | 
|---|
| 622 | // cache or when uncommit is disabled, equal to the delay. | 
|---|
| 623 | uint64_t timeout = delay; | 
|---|
| 624 |  | 
|---|
| 625 | if (!_uncommit) { | 
|---|
| 626 | // Disabled | 
|---|
| 627 | return timeout; | 
|---|
| 628 | } | 
|---|
| 629 |  | 
|---|
| 630 | size_t capacity_before; | 
|---|
| 631 | size_t capacity_after; | 
|---|
| 632 | size_t uncommitted; | 
|---|
| 633 |  | 
|---|
| 634 | { | 
|---|
| 635 | SuspendibleThreadSetJoiner joiner; | 
|---|
| 636 | ZLocker<ZLock> locker(&_lock); | 
|---|
| 637 |  | 
|---|
| 638 | // Don't flush more than we will uncommit. Never uncommit | 
|---|
| 639 | // the reserve, and never uncommit below min capacity. | 
|---|
| 640 | const size_t needed = MIN2(_used + _max_reserve, _current_max_capacity); | 
|---|
| 641 | const size_t guarded = MAX2(needed, _min_capacity); | 
|---|
| 642 | const size_t uncommittable = _capacity - guarded; | 
|---|
| 643 | const size_t uncached_available = _capacity - _used - _cache.available(); | 
|---|
| 644 | size_t uncommit = MIN2(uncommittable, uncached_available); | 
|---|
| 645 | const size_t flush = uncommittable - uncommit; | 
|---|
| 646 |  | 
|---|
| 647 | if (flush > 0) { | 
|---|
| 648 | // Flush pages to uncommit | 
|---|
| 649 | ZPageCacheFlushForUncommitClosure cl(flush, delay); | 
|---|
| 650 | uncommit += flush_cache(&cl); | 
|---|
| 651 | timeout = cl.timeout(); | 
|---|
| 652 | } | 
|---|
| 653 |  | 
|---|
| 654 | // Uncommit | 
|---|
| 655 | uncommitted = _physical.uncommit(uncommit); | 
|---|
| 656 | _capacity -= uncommitted; | 
|---|
| 657 |  | 
|---|
| 658 | capacity_after = _capacity; | 
|---|
| 659 | capacity_before = capacity_after + uncommitted; | 
|---|
| 660 | } | 
|---|
| 661 |  | 
|---|
| 662 | if (uncommitted > 0) { | 
|---|
| 663 | log_info(gc, heap)( "Capacity: "SIZE_FORMAT "M(%.0lf%%)->"SIZE_FORMAT "M(%.0lf%%), " | 
|---|
| 664 | "Uncommitted: "SIZE_FORMAT "M", | 
|---|
| 665 | capacity_before / M, percent_of(capacity_before, max_capacity()), | 
|---|
| 666 | capacity_after / M, percent_of(capacity_after, max_capacity()), | 
|---|
| 667 | uncommitted / M); | 
|---|
| 668 |  | 
|---|
| 669 | // Update statistics | 
|---|
| 670 | ZStatInc(ZCounterUncommit, uncommitted); | 
|---|
| 671 | } | 
|---|
| 672 |  | 
|---|
| 673 | return timeout; | 
|---|
| 674 | } | 
|---|
| 675 |  | 
|---|
| 676 | void ZPageAllocator::enable_deferred_delete() const { | 
|---|
| 677 | _safe_delete.enable_deferred_delete(); | 
|---|
| 678 | } | 
|---|
| 679 |  | 
|---|
| 680 | void ZPageAllocator::disable_deferred_delete() const { | 
|---|
| 681 | _safe_delete.disable_deferred_delete(); | 
|---|
| 682 | } | 
|---|
| 683 |  | 
|---|
| 684 | void ZPageAllocator::debug_map_page(const ZPage* page) const { | 
|---|
| 685 | assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); | 
|---|
| 686 | _physical.debug_map(page->physical_memory(), page->start()); | 
|---|
| 687 | } | 
|---|
| 688 |  | 
|---|
| 689 | class ZPageCacheDebugMapClosure : public StackObj { | 
|---|
| 690 | private: | 
|---|
| 691 | const ZPageAllocator* const _allocator; | 
|---|
| 692 |  | 
|---|
| 693 | public: | 
|---|
| 694 | ZPageCacheDebugMapClosure(const ZPageAllocator* allocator) : | 
|---|
| 695 | _allocator(allocator) {} | 
|---|
| 696 |  | 
|---|
| 697 | virtual void do_page(const ZPage* page) { | 
|---|
| 698 | _allocator->debug_map_page(page); | 
|---|
| 699 | } | 
|---|
| 700 | }; | 
|---|
| 701 |  | 
|---|
| 702 | void ZPageAllocator::debug_map_cached_pages() const { | 
|---|
| 703 | assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); | 
|---|
| 704 | ZPageCacheDebugMapClosure cl(this); | 
|---|
| 705 | _cache.pages_do(&cl); | 
|---|
| 706 | } | 
|---|
| 707 |  | 
|---|
| 708 | void ZPageAllocator::debug_unmap_all_pages() const { | 
|---|
| 709 | assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); | 
|---|
| 710 | _physical.debug_unmap(ZPhysicalMemorySegment(0 /* start */, ZAddressOffsetMax), 0 /* offset */); | 
|---|
| 711 | } | 
|---|
| 712 |  | 
|---|
| 713 | bool ZPageAllocator::is_alloc_stalled() const { | 
|---|
| 714 | assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); | 
|---|
| 715 | return !_queue.is_empty(); | 
|---|
| 716 | } | 
|---|
| 717 |  | 
|---|
| 718 | void ZPageAllocator::check_out_of_memory() { | 
|---|
| 719 | ZLocker<ZLock> locker(&_lock); | 
|---|
| 720 |  | 
|---|
| 721 | // Fail allocation requests that were enqueued before the | 
|---|
| 722 | // last GC cycle started, otherwise start a new GC cycle. | 
|---|
| 723 | for (ZPageAllocRequest* request = _queue.first(); request != NULL; request = _queue.first()) { | 
|---|
| 724 | if (request->total_collections() == ZCollectedHeap::heap()->total_collections()) { | 
|---|
| 725 | // Start a new GC cycle, keep allocation requests enqueued | 
|---|
| 726 | request->satisfy(gc_marker); | 
|---|
| 727 | return; | 
|---|
| 728 | } | 
|---|
| 729 |  | 
|---|
| 730 | // Out of memory, fail allocation request | 
|---|
| 731 | _queue.remove_first(); | 
|---|
| 732 | request->satisfy(NULL); | 
|---|
| 733 | } | 
|---|
| 734 | } | 
|---|
| 735 |  | 
|---|