1 | /* |
2 | * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_UTILITIES_COPY_HPP |
26 | #define SHARE_UTILITIES_COPY_HPP |
27 | |
28 | #include "runtime/stubRoutines.hpp" |
29 | #include "utilities/align.hpp" |
30 | #include "utilities/debug.hpp" |
31 | #include "utilities/macros.hpp" |
32 | |
33 | // Assembly code for platforms that need it. |
34 | extern "C" { |
35 | void _Copy_conjoint_words(const HeapWord* from, HeapWord* to, size_t count); |
36 | void _Copy_disjoint_words(const HeapWord* from, HeapWord* to, size_t count); |
37 | |
38 | void _Copy_conjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count); |
39 | void _Copy_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count); |
40 | |
41 | void _Copy_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count); |
42 | void _Copy_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count); |
43 | |
44 | void _Copy_conjoint_bytes(const void* from, void* to, size_t count); |
45 | |
46 | void _Copy_conjoint_bytes_atomic (const void* from, void* to, size_t count); |
47 | void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count); |
48 | void _Copy_conjoint_jints_atomic (const jint* from, jint* to, size_t count); |
49 | void _Copy_conjoint_jlongs_atomic (const jlong* from, jlong* to, size_t count); |
50 | void _Copy_conjoint_oops_atomic (const oop* from, oop* to, size_t count); |
51 | |
52 | void _Copy_arrayof_conjoint_bytes (const HeapWord* from, HeapWord* to, size_t count); |
53 | void _Copy_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count); |
54 | void _Copy_arrayof_conjoint_jints (const HeapWord* from, HeapWord* to, size_t count); |
55 | void _Copy_arrayof_conjoint_jlongs (const HeapWord* from, HeapWord* to, size_t count); |
56 | void _Copy_arrayof_conjoint_oops (const HeapWord* from, HeapWord* to, size_t count); |
57 | } |
58 | |
59 | class Copy : AllStatic { |
60 | public: |
61 | // Block copy methods have four attributes. We don't define all possibilities. |
62 | // alignment: aligned to BytesPerLong |
63 | // arrayof: arraycopy operation with both operands aligned on the same |
64 | // boundary as the first element of an array of the copy unit. |
65 | // This is currently a HeapWord boundary on all platforms, except |
66 | // for long and double arrays, which are aligned on an 8-byte |
67 | // boundary on all platforms. |
68 | // arraycopy operations are implicitly atomic on each array element. |
69 | // overlap: disjoint or conjoint. |
70 | // copy unit: bytes or words (i.e., HeapWords) or oops (i.e., pointers). |
71 | // atomicity: atomic or non-atomic on the copy unit. |
72 | // |
73 | // Names are constructed thusly: |
74 | // |
75 | // [ 'aligned_' | 'arrayof_' ] |
76 | // ('conjoint_' | 'disjoint_') |
77 | // ('words' | 'bytes' | 'jshorts' | 'jints' | 'jlongs' | 'oops') |
78 | // [ '_atomic' ] |
79 | // |
80 | // Except in the arrayof case, whatever the alignment is, we assume we can copy |
81 | // whole alignment units. E.g., if BytesPerLong is 2x word alignment, an odd |
82 | // count may copy an extra word. In the arrayof case, we are allowed to copy |
83 | // only the number of copy units specified. |
84 | // |
85 | // All callees check count for 0. |
86 | // |
87 | |
88 | // HeapWords |
89 | |
90 | // Word-aligned words, conjoint, not atomic on each word |
91 | static void conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { |
92 | assert_params_ok(from, to, HeapWordSize); |
93 | pd_conjoint_words(from, to, count); |
94 | } |
95 | |
96 | // Word-aligned words, disjoint, not atomic on each word |
97 | static void disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { |
98 | assert_params_ok(from, to, HeapWordSize); |
99 | assert_disjoint(from, to, count); |
100 | pd_disjoint_words(from, to, count); |
101 | } |
102 | |
103 | // Word-aligned words, disjoint, atomic on each word |
104 | static void disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { |
105 | assert_params_ok(from, to, HeapWordSize); |
106 | assert_disjoint(from, to, count); |
107 | pd_disjoint_words_atomic(from, to, count); |
108 | } |
109 | |
110 | // Object-aligned words, conjoint, not atomic on each word |
111 | static void aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { |
112 | assert_params_aligned(from, to); |
113 | pd_aligned_conjoint_words(from, to, count); |
114 | } |
115 | |
116 | // Object-aligned words, disjoint, not atomic on each word |
117 | static void aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { |
118 | assert_params_aligned(from, to); |
119 | assert_disjoint(from, to, count); |
120 | pd_aligned_disjoint_words(from, to, count); |
121 | } |
122 | |
123 | // bytes, jshorts, jints, jlongs, oops |
124 | |
125 | // bytes, conjoint, not atomic on each byte (not that it matters) |
126 | static void conjoint_jbytes(const void* from, void* to, size_t count) { |
127 | pd_conjoint_bytes(from, to, count); |
128 | } |
129 | |
130 | // bytes, conjoint, atomic on each byte (not that it matters) |
131 | static void conjoint_jbytes_atomic(const void* from, void* to, size_t count) { |
132 | pd_conjoint_bytes(from, to, count); |
133 | } |
134 | |
135 | // jshorts, conjoint, atomic on each jshort |
136 | static void conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { |
137 | assert_params_ok(from, to, BytesPerShort); |
138 | pd_conjoint_jshorts_atomic(from, to, count); |
139 | } |
140 | |
141 | // jints, conjoint, atomic on each jint |
142 | static void conjoint_jints_atomic(const jint* from, jint* to, size_t count) { |
143 | assert_params_ok(from, to, BytesPerInt); |
144 | pd_conjoint_jints_atomic(from, to, count); |
145 | } |
146 | |
147 | // jlongs, conjoint, atomic on each jlong |
148 | static void conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { |
149 | assert_params_ok(from, to, BytesPerLong); |
150 | pd_conjoint_jlongs_atomic(from, to, count); |
151 | } |
152 | |
153 | // oops, conjoint, atomic on each oop |
154 | static void conjoint_oops_atomic(const oop* from, oop* to, size_t count) { |
155 | assert_params_ok(from, to, BytesPerHeapOop); |
156 | pd_conjoint_oops_atomic(from, to, count); |
157 | } |
158 | |
159 | // overloaded for UseCompressedOops |
160 | static void conjoint_oops_atomic(const narrowOop* from, narrowOop* to, size_t count) { |
161 | assert(sizeof(narrowOop) == sizeof(jint), "this cast is wrong" ); |
162 | assert_params_ok(from, to, BytesPerInt); |
163 | pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); |
164 | } |
165 | |
166 | // Copy a span of memory. If the span is an integral number of aligned |
167 | // longs, words, or ints, copy those units atomically. |
168 | // The largest atomic transfer unit is 8 bytes, or the largest power |
169 | // of two which divides all of from, to, and size, whichever is smaller. |
170 | static void conjoint_memory_atomic(const void* from, void* to, size_t size); |
171 | |
172 | // bytes, conjoint array, atomic on each byte (not that it matters) |
173 | static void arrayof_conjoint_jbytes(const HeapWord* from, HeapWord* to, size_t count) { |
174 | pd_arrayof_conjoint_bytes(from, to, count); |
175 | } |
176 | |
177 | // jshorts, conjoint array, atomic on each jshort |
178 | static void arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { |
179 | assert_params_ok(from, to, BytesPerShort); |
180 | pd_arrayof_conjoint_jshorts(from, to, count); |
181 | } |
182 | |
183 | // jints, conjoint array, atomic on each jint |
184 | static void arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { |
185 | assert_params_ok(from, to, BytesPerInt); |
186 | pd_arrayof_conjoint_jints(from, to, count); |
187 | } |
188 | |
189 | // jlongs, conjoint array, atomic on each jlong |
190 | static void arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { |
191 | assert_params_ok(from, to, BytesPerLong); |
192 | pd_arrayof_conjoint_jlongs(from, to, count); |
193 | } |
194 | |
195 | // oops, conjoint array, atomic on each oop |
196 | static void arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { |
197 | assert_params_ok(from, to, BytesPerHeapOop); |
198 | pd_arrayof_conjoint_oops(from, to, count); |
199 | } |
200 | |
201 | // Known overlap methods |
202 | |
203 | // Copy word-aligned words from higher to lower addresses, not atomic on each word |
204 | inline static void conjoint_words_to_lower(const HeapWord* from, HeapWord* to, size_t byte_count) { |
205 | // byte_count is in bytes to check its alignment |
206 | assert_params_ok(from, to, HeapWordSize); |
207 | assert_byte_count_ok(byte_count, HeapWordSize); |
208 | |
209 | size_t count = align_up(byte_count, HeapWordSize) >> LogHeapWordSize; |
210 | assert(to <= from || from + count <= to, "do not overwrite source data" ); |
211 | |
212 | while (count-- > 0) { |
213 | *to++ = *from++; |
214 | } |
215 | } |
216 | |
217 | // Copy word-aligned words from lower to higher addresses, not atomic on each word |
218 | inline static void conjoint_words_to_higher(const HeapWord* from, HeapWord* to, size_t byte_count) { |
219 | // byte_count is in bytes to check its alignment |
220 | assert_params_ok(from, to, HeapWordSize); |
221 | assert_byte_count_ok(byte_count, HeapWordSize); |
222 | |
223 | size_t count = align_up(byte_count, HeapWordSize) >> LogHeapWordSize; |
224 | assert(from <= to || to + count <= from, "do not overwrite source data" ); |
225 | |
226 | from += count - 1; |
227 | to += count - 1; |
228 | while (count-- > 0) { |
229 | *to-- = *from--; |
230 | } |
231 | } |
232 | |
233 | /** |
234 | * Copy elements |
235 | * |
236 | * @param src address of source |
237 | * @param dst address of destination |
238 | * @param byte_count number of bytes to copy |
239 | * @param elem_size size of the elements to copy-swap |
240 | */ |
241 | static void conjoint_copy(const void* src, void* dst, size_t byte_count, size_t elem_size); |
242 | |
243 | /** |
244 | * Copy and *unconditionally* byte swap elements |
245 | * |
246 | * @param src address of source |
247 | * @param dst address of destination |
248 | * @param byte_count number of bytes to copy |
249 | * @param elem_size size of the elements to copy-swap |
250 | */ |
251 | static void conjoint_swap(const void* src, void* dst, size_t byte_count, size_t elem_size); |
252 | |
253 | /** |
254 | * Copy and byte swap elements from the specified endian to the native (cpu) endian if needed (if they differ) |
255 | * |
256 | * @param src address of source |
257 | * @param dst address of destination |
258 | * @param byte_count number of bytes to copy |
259 | * @param elem_size size of the elements to copy-swap |
260 | */ |
261 | template <Endian::Order endian> |
262 | static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) { |
263 | if (Endian::NATIVE != endian) { |
264 | conjoint_swap(src, dst, byte_count, elem_size); |
265 | } else { |
266 | conjoint_copy(src, dst, byte_count, elem_size); |
267 | } |
268 | } |
269 | |
270 | // Fill methods |
271 | |
272 | // Fill word-aligned words, not atomic on each word |
273 | // set_words |
274 | static void fill_to_words(HeapWord* to, size_t count, juint value = 0) { |
275 | assert_params_ok(to, HeapWordSize); |
276 | pd_fill_to_words(to, count, value); |
277 | } |
278 | |
279 | static void fill_to_aligned_words(HeapWord* to, size_t count, juint value = 0) { |
280 | assert_params_aligned(to); |
281 | pd_fill_to_aligned_words(to, count, value); |
282 | } |
283 | |
284 | // Fill bytes |
285 | static void fill_to_bytes(void* to, size_t count, jubyte value = 0) { |
286 | pd_fill_to_bytes(to, count, value); |
287 | } |
288 | |
289 | // Fill a span of memory. If the span is an integral number of aligned |
290 | // longs, words, or ints, store to those units atomically. |
291 | // The largest atomic transfer unit is 8 bytes, or the largest power |
292 | // of two which divides both to and size, whichever is smaller. |
293 | static void fill_to_memory_atomic(void* to, size_t size, jubyte value = 0); |
294 | |
295 | // Zero-fill methods |
296 | |
297 | // Zero word-aligned words, not atomic on each word |
298 | static void zero_to_words(HeapWord* to, size_t count) { |
299 | assert_params_ok(to, HeapWordSize); |
300 | pd_zero_to_words(to, count); |
301 | } |
302 | |
303 | // Zero bytes |
304 | static void zero_to_bytes(void* to, size_t count) { |
305 | pd_zero_to_bytes(to, count); |
306 | } |
307 | |
308 | private: |
309 | static bool params_disjoint(const HeapWord* from, HeapWord* to, size_t count) { |
310 | if (from < to) { |
311 | return pointer_delta(to, from) >= count; |
312 | } |
313 | return pointer_delta(from, to) >= count; |
314 | } |
315 | |
316 | // These methods raise a fatal if they detect a problem. |
317 | |
318 | static void assert_disjoint(const HeapWord* from, HeapWord* to, size_t count) { |
319 | assert(params_disjoint(from, to, count), "source and dest overlap" ); |
320 | } |
321 | |
322 | static void assert_params_ok(const void* from, void* to, intptr_t alignment) { |
323 | assert(is_aligned(from, alignment), "must be aligned: " INTPTR_FORMAT, p2i(from)); |
324 | assert(is_aligned(to, alignment), "must be aligned: " INTPTR_FORMAT, p2i(to)); |
325 | } |
326 | |
327 | static void assert_params_ok(HeapWord* to, intptr_t alignment) { |
328 | assert(is_aligned(to, alignment), "must be aligned: " INTPTR_FORMAT, p2i(to)); |
329 | } |
330 | |
331 | static void assert_params_aligned(const HeapWord* from, HeapWord* to) { |
332 | assert(is_aligned(from, BytesPerLong), "must be aligned: " INTPTR_FORMAT, p2i(from)); |
333 | assert(is_aligned(to, BytesPerLong), "must be aligned: " INTPTR_FORMAT, p2i(to)); |
334 | } |
335 | |
336 | static void assert_params_aligned(HeapWord* to) { |
337 | assert(is_aligned(to, BytesPerLong), "must be aligned: " INTPTR_FORMAT, p2i(to)); |
338 | } |
339 | |
340 | static void assert_byte_count_ok(size_t byte_count, size_t unit_size) { |
341 | assert(is_aligned(byte_count, unit_size), "byte count must be aligned" ); |
342 | } |
343 | |
344 | // Platform dependent implementations of the above methods. |
345 | #include CPU_HEADER(copy) |
346 | |
347 | }; |
348 | |
349 | #endif // SHARE_UTILITIES_COPY_HPP |
350 | |