1 | /* |
2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef CPU_X86_BYTES_X86_HPP |
26 | #define CPU_X86_BYTES_X86_HPP |
27 | |
28 | #include "memory/allocation.hpp" |
29 | #include "utilities/align.hpp" |
30 | #include "utilities/macros.hpp" |
31 | |
32 | class Bytes: AllStatic { |
33 | private: |
34 | #ifndef AMD64 |
35 | // Helper function for swap_u8 |
36 | static inline u8 swap_u8_base(u4 x, u4 y); // compiler-dependent implementation |
37 | #endif // AMD64 |
38 | |
39 | public: |
40 | // Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering |
41 | template <typename T> |
42 | static inline T get_native(const void* p) { |
43 | assert(p != NULL, "null pointer" ); |
44 | |
45 | T x; |
46 | |
47 | if (is_aligned(p, sizeof(T))) { |
48 | x = *(T*)p; |
49 | } else { |
50 | memcpy(&x, p, sizeof(T)); |
51 | } |
52 | |
53 | return x; |
54 | } |
55 | |
56 | template <typename T> |
57 | static inline void put_native(void* p, T x) { |
58 | assert(p != NULL, "null pointer" ); |
59 | |
60 | if (is_aligned(p, sizeof(T))) { |
61 | *(T*)p = x; |
62 | } else { |
63 | memcpy(p, &x, sizeof(T)); |
64 | } |
65 | } |
66 | |
67 | static inline u2 get_native_u2(address p) { return get_native<u2>((void*)p); } |
68 | static inline u4 get_native_u4(address p) { return get_native<u4>((void*)p); } |
69 | static inline u8 get_native_u8(address p) { return get_native<u8>((void*)p); } |
70 | static inline void put_native_u2(address p, u2 x) { put_native<u2>((void*)p, x); } |
71 | static inline void put_native_u4(address p, u4 x) { put_native<u4>((void*)p, x); } |
72 | static inline void put_native_u8(address p, u8 x) { put_native<u8>((void*)p, x); } |
73 | |
74 | // Efficient reading and writing of unaligned unsigned data in Java |
75 | // byte ordering (i.e. big-endian ordering). Byte-order reversal is |
76 | // needed since x86 CPUs use little-endian format. |
77 | template <typename T> |
78 | static inline T get_Java(const address p) { |
79 | T x = get_native<T>(p); |
80 | |
81 | if (Endian::is_Java_byte_ordering_different()) { |
82 | x = swap<T>(x); |
83 | } |
84 | |
85 | return x; |
86 | } |
87 | |
88 | template <typename T> |
89 | static inline void put_Java(address p, T x) { |
90 | if (Endian::is_Java_byte_ordering_different()) { |
91 | x = swap<T>(x); |
92 | } |
93 | |
94 | put_native<T>(p, x); |
95 | } |
96 | |
97 | static inline u2 get_Java_u2(address p) { return get_Java<u2>(p); } |
98 | static inline u4 get_Java_u4(address p) { return get_Java<u4>(p); } |
99 | static inline u8 get_Java_u8(address p) { return get_Java<u8>(p); } |
100 | |
101 | static inline void put_Java_u2(address p, u2 x) { put_Java<u2>(p, x); } |
102 | static inline void put_Java_u4(address p, u4 x) { put_Java<u4>(p, x); } |
103 | static inline void put_Java_u8(address p, u8 x) { put_Java<u8>(p, x); } |
104 | |
105 | // Efficient swapping of byte ordering |
106 | template <typename T> |
107 | static T swap(T x) { |
108 | switch (sizeof(T)) { |
109 | case sizeof(u1): return x; |
110 | case sizeof(u2): return swap_u2(x); |
111 | case sizeof(u4): return swap_u4(x); |
112 | case sizeof(u8): return swap_u8(x); |
113 | default: |
114 | guarantee(false, "invalid size: " SIZE_FORMAT "\n" , sizeof(T)); |
115 | return 0; |
116 | } |
117 | } |
118 | |
119 | static inline u2 swap_u2(u2 x); // compiler-dependent implementation |
120 | static inline u4 swap_u4(u4 x); // compiler-dependent implementation |
121 | static inline u8 swap_u8(u8 x); |
122 | }; |
123 | |
124 | // The following header contains the implementations of swap_u2, swap_u4, and swap_u8[_base] |
125 | #include OS_CPU_HEADER_INLINE(bytes) |
126 | |
127 | #endif // CPU_X86_BYTES_X86_HPP |
128 | |