1/*
2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24#include "precompiled.hpp"
25#include "gc/z/zList.inline.hpp"
26#include "gc/z/zNUMA.hpp"
27#include "gc/z/zPage.inline.hpp"
28#include "gc/z/zPageCache.hpp"
29#include "gc/z/zStat.hpp"
30#include "logging/log.hpp"
31
32static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond);
33static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond);
34static const ZStatCounter ZCounterPageCacheHitL3("Memory", "Page Cache Hit L3", ZStatUnitOpsPerSecond);
35static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond);
36
37ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested) :
38 _requested(requested),
39 _flushed(0) {}
40
41size_t ZPageCacheFlushClosure::overflushed() const {
42 return _flushed > _requested ? _flushed - _requested : 0;
43}
44
45ZPageCache::ZPageCache() :
46 _available(0),
47 _small(),
48 _medium(),
49 _large() {}
50
51ZPage* ZPageCache::alloc_small_page() {
52 const uint32_t numa_id = ZNUMA::id();
53 const uint32_t numa_count = ZNUMA::count();
54
55 // Try NUMA local page cache
56 ZPage* const l1_page = _small.get(numa_id).remove_first();
57 if (l1_page != NULL) {
58 ZStatInc(ZCounterPageCacheHitL1);
59 return l1_page;
60 }
61
62 // Try NUMA remote page cache(s)
63 uint32_t remote_numa_id = numa_id + 1;
64 const uint32_t remote_numa_count = numa_count - 1;
65 for (uint32_t i = 0; i < remote_numa_count; i++) {
66 if (remote_numa_id == numa_count) {
67 remote_numa_id = 0;
68 }
69
70 ZPage* const l2_page = _small.get(remote_numa_id).remove_first();
71 if (l2_page != NULL) {
72 ZStatInc(ZCounterPageCacheHitL2);
73 return l2_page;
74 }
75
76 remote_numa_id++;
77 }
78
79 return NULL;
80}
81
82ZPage* ZPageCache::alloc_medium_page() {
83 ZPage* const page = _medium.remove_first();
84 if (page != NULL) {
85 ZStatInc(ZCounterPageCacheHitL1);
86 return page;
87 }
88
89 return NULL;
90}
91
92ZPage* ZPageCache::alloc_large_page(size_t size) {
93 // Find a page with the right size
94 ZListIterator<ZPage> iter(&_large);
95 for (ZPage* page; iter.next(&page);) {
96 if (size == page->size()) {
97 // Page found
98 _large.remove(page);
99 ZStatInc(ZCounterPageCacheHitL1);
100 return page;
101 }
102 }
103
104 return NULL;
105}
106
107ZPage* ZPageCache::alloc_oversized_medium_page(size_t size) {
108 if (size <= ZPageSizeMedium) {
109 return _medium.remove_first();
110 }
111
112 return NULL;
113}
114
115ZPage* ZPageCache::alloc_oversized_large_page(size_t size) {
116 // Find a page that is large enough
117 ZListIterator<ZPage> iter(&_large);
118 for (ZPage* page; iter.next(&page);) {
119 if (size <= page->size()) {
120 // Page found
121 _large.remove(page);
122 return page;
123 }
124 }
125
126 return NULL;
127}
128
129ZPage* ZPageCache::alloc_oversized_page(size_t size) {
130 ZPage* page = alloc_oversized_large_page(size);
131 if (page == NULL) {
132 page = alloc_oversized_medium_page(size);
133 }
134
135 if (page != NULL) {
136 ZStatInc(ZCounterPageCacheHitL3);
137 }
138
139 return page;
140}
141
142ZPage* ZPageCache::alloc_page(uint8_t type, size_t size) {
143 ZPage* page;
144
145 // Try allocate exact page
146 if (type == ZPageTypeSmall) {
147 page = alloc_small_page();
148 } else if (type == ZPageTypeMedium) {
149 page = alloc_medium_page();
150 } else {
151 page = alloc_large_page(size);
152 }
153
154 if (page == NULL) {
155 // Try allocate potentially oversized page
156 ZPage* const oversized = alloc_oversized_page(size);
157 if (oversized != NULL) {
158 if (size < oversized->size()) {
159 // Split oversized page
160 page = oversized->split(type, size);
161
162 // Cache remainder
163 free_page_inner(oversized);
164 } else {
165 // Re-type correctly sized page
166 page = oversized->retype(type);
167 }
168 }
169 }
170
171 if (page != NULL) {
172 _available -= page->size();
173 } else {
174 ZStatInc(ZCounterPageCacheMiss);
175 }
176
177 return page;
178}
179
180void ZPageCache::free_page_inner(ZPage* page) {
181 const uint8_t type = page->type();
182 if (type == ZPageTypeSmall) {
183 _small.get(page->numa_id()).insert_first(page);
184 } else if (type == ZPageTypeMedium) {
185 _medium.insert_first(page);
186 } else {
187 _large.insert_first(page);
188 }
189}
190
191void ZPageCache::free_page(ZPage* page) {
192 free_page_inner(page);
193 _available += page->size();
194}
195
196bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) {
197 ZPage* const page = from->last();
198 if (page == NULL || !cl->do_page(page)) {
199 // Don't flush page
200 return false;
201 }
202
203 // Flush page
204 _available -= page->size();
205 from->remove(page);
206 to->insert_last(page);
207 return true;
208}
209
210void ZPageCache::flush_list(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) {
211 while (flush_list_inner(cl, from, to));
212}
213
214void ZPageCache::flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA<ZList<ZPage> >* from, ZList<ZPage>* to) {
215 const uint32_t numa_count = ZNUMA::count();
216 uint32_t numa_done = 0;
217 uint32_t numa_next = 0;
218
219 // Flush lists round-robin
220 while (numa_done < numa_count) {
221 ZList<ZPage>* numa_list = from->addr(numa_next);
222 if (++numa_next == numa_count) {
223 numa_next = 0;
224 }
225
226 if (flush_list_inner(cl, numa_list, to)) {
227 // Not done
228 numa_done = 0;
229 } else {
230 // Done
231 numa_done++;
232 }
233 }
234}
235
236void ZPageCache::flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to) {
237 // Prefer flushing large, then medium and last small pages
238 flush_list(cl, &_large, to);
239 flush_list(cl, &_medium, to);
240 flush_per_numa_lists(cl, &_small, to);
241}
242