1 | /* |
2 | * Copyright (c) 2015, Intel Corporation |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions are met: |
6 | * |
7 | * * Redistributions of source code must retain the above copyright notice, |
8 | * this list of conditions and the following disclaimer. |
9 | * * Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * * Neither the name of Intel Corporation nor the names of its contributors |
13 | * may be used to endorse or promote products derived from this software |
14 | * without specific prior written permission. |
15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
20 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 | * POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
28 | |
29 | #ifndef PARTIAL_STORE_H |
30 | #define PARTIAL_STORE_H |
31 | |
32 | #include "ue2common.h" |
33 | #include "unaligned.h" |
34 | |
35 | /* loads/stores the least significant bytes of the values. */ |
36 | |
37 | static really_inline |
38 | void partial_store_u32(void *ptr, u32 value, u32 numBytes) { |
39 | assert(numBytes <= 4); |
40 | switch (numBytes) { |
41 | case 4: |
42 | unaligned_store_u32(ptr, value); |
43 | break; |
44 | case 3: |
45 | unaligned_store_u16(ptr, (u16)value); |
46 | *((u8 *)ptr + 2) = (u8)(value >> 16); |
47 | break; |
48 | case 2: |
49 | unaligned_store_u16(ptr, (u16)value); |
50 | break; |
51 | case 1: |
52 | *(u8 *)ptr = (u8)value; |
53 | break; |
54 | case 0: |
55 | break; |
56 | } |
57 | } |
58 | |
59 | static really_inline |
60 | u32 partial_load_u32(const void *ptr, u32 numBytes) { |
61 | u32 value; |
62 | assert(numBytes <= 4); |
63 | switch (numBytes) { |
64 | case 4: |
65 | value = unaligned_load_u32(ptr); |
66 | return value; |
67 | case 3: |
68 | value = unaligned_load_u16(ptr); |
69 | value |= ((u32)(*((const u8 *)ptr + 2)) << 16); |
70 | return value; |
71 | case 2: |
72 | value = unaligned_load_u16(ptr); |
73 | return value; |
74 | case 1: |
75 | value = *(const u8 *)ptr; |
76 | return value; |
77 | case 0: |
78 | break; |
79 | } |
80 | |
81 | return 0; |
82 | } |
83 | |
84 | static really_inline |
85 | void partial_store_u64a(void *ptr, u64a value, u32 numBytes) { |
86 | assert(numBytes <= 8); |
87 | switch (numBytes) { |
88 | case 8: |
89 | unaligned_store_u64a(ptr, value); |
90 | break; |
91 | case 7: |
92 | unaligned_store_u32(ptr, (u32)value); |
93 | unaligned_store_u16((u8 *)ptr + 4, (u16)(value >> 32)); |
94 | *((u8 *)ptr + 6) = (u8)(value >> 48); |
95 | break; |
96 | case 6: |
97 | unaligned_store_u32(ptr, (u32)value); |
98 | unaligned_store_u16((u8 *)ptr + 4, (u16)(value >> 32)); |
99 | break; |
100 | case 5: |
101 | unaligned_store_u32(ptr, (u32)value); |
102 | *((u8 *)ptr + 4) = (u8)(value >> 32); |
103 | break; |
104 | case 4: |
105 | unaligned_store_u32(ptr, (u32)value); |
106 | break; |
107 | case 3: |
108 | unaligned_store_u16(ptr, (u16)value); |
109 | *((u8 *)ptr + 2) = (u8)(value >> 16); |
110 | break; |
111 | case 2: |
112 | unaligned_store_u16(ptr, (u16)value); |
113 | break; |
114 | case 1: |
115 | *(u8 *)ptr = (u8)value; |
116 | break; |
117 | case 0: |
118 | break; |
119 | } |
120 | } |
121 | |
122 | static really_inline |
123 | u64a partial_load_u64a(const void *ptr, u32 numBytes) { |
124 | u64a value; |
125 | assert(numBytes <= 8); |
126 | switch (numBytes) { |
127 | case 8: |
128 | value = unaligned_load_u64a(ptr); |
129 | return value; |
130 | case 7: |
131 | value = unaligned_load_u32(ptr); |
132 | value |= (u64a)unaligned_load_u16((const u8 *)ptr + 4) << 32; |
133 | value |= (u64a)(*((const u8 *)ptr + 6)) << 48; |
134 | return value; |
135 | case 6: |
136 | value = unaligned_load_u32(ptr); |
137 | value |= (u64a)unaligned_load_u16((const u8 *)ptr + 4) << 32; |
138 | return value; |
139 | case 5: |
140 | value = unaligned_load_u32(ptr); |
141 | value |= (u64a)(*((const u8 *)ptr + 4)) << 32; |
142 | return value; |
143 | case 4: |
144 | value = unaligned_load_u32(ptr); |
145 | return value; |
146 | case 3: |
147 | value = unaligned_load_u16(ptr); |
148 | value |= (u64a)(*((const u8 *)ptr + 2)) << 16; |
149 | return value; |
150 | case 2: |
151 | value = unaligned_load_u16(ptr); |
152 | return value; |
153 | case 1: |
154 | value = *(const u8 *)ptr; |
155 | return value; |
156 | case 0: |
157 | break; |
158 | } |
159 | |
160 | return 0; |
161 | } |
162 | |
163 | #endif |
164 | |