1/*
2 * x86 memory access helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/helper-proto.h"
23#include "exec/exec-all.h"
24#include "exec/cpu_ldst.h"
25#include "qemu/int128.h"
26#include "qemu/atomic128.h"
27#include "tcg.h"
28
29void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0)
30{
31 uintptr_t ra = GETPC();
32 uint64_t oldv, cmpv, newv;
33 int eflags;
34
35 eflags = cpu_cc_compute_all(env, CC_OP);
36
37 cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
38 newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
39
40 oldv = cpu_ldq_data_ra(env, a0, ra);
41 newv = (cmpv == oldv ? newv : oldv);
42 /* always do the store */
43 cpu_stq_data_ra(env, a0, newv, ra);
44
45 if (oldv == cmpv) {
46 eflags |= CC_Z;
47 } else {
48 env->regs[R_EAX] = (uint32_t)oldv;
49 env->regs[R_EDX] = (uint32_t)(oldv >> 32);
50 eflags &= ~CC_Z;
51 }
52 CC_SRC = eflags;
53}
54
55void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
56{
57#ifdef CONFIG_ATOMIC64
58 uint64_t oldv, cmpv, newv;
59 int eflags;
60
61 eflags = cpu_cc_compute_all(env, CC_OP);
62
63 cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
64 newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
65
66#ifdef CONFIG_USER_ONLY
67 {
68 uint64_t *haddr = g2h(a0);
69 cmpv = cpu_to_le64(cmpv);
70 newv = cpu_to_le64(newv);
71 oldv = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
72 oldv = le64_to_cpu(oldv);
73 }
74#else
75 {
76 uintptr_t ra = GETPC();
77 int mem_idx = cpu_mmu_index(env, false);
78 TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
79 oldv = helper_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
80 }
81#endif
82
83 if (oldv == cmpv) {
84 eflags |= CC_Z;
85 } else {
86 env->regs[R_EAX] = (uint32_t)oldv;
87 env->regs[R_EDX] = (uint32_t)(oldv >> 32);
88 eflags &= ~CC_Z;
89 }
90 CC_SRC = eflags;
91#else
92 cpu_loop_exit_atomic(env_cpu(env), GETPC());
93#endif /* CONFIG_ATOMIC64 */
94}
95
96#ifdef TARGET_X86_64
97void helper_cmpxchg16b_unlocked(CPUX86State *env, target_ulong a0)
98{
99 uintptr_t ra = GETPC();
100 Int128 oldv, cmpv, newv;
101 uint64_t o0, o1;
102 int eflags;
103 bool success;
104
105 if ((a0 & 0xf) != 0) {
106 raise_exception_ra(env, EXCP0D_GPF, GETPC());
107 }
108 eflags = cpu_cc_compute_all(env, CC_OP);
109
110 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
111 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
112
113 o0 = cpu_ldq_data_ra(env, a0 + 0, ra);
114 o1 = cpu_ldq_data_ra(env, a0 + 8, ra);
115
116 oldv = int128_make128(o0, o1);
117 success = int128_eq(oldv, cmpv);
118 if (!success) {
119 newv = oldv;
120 }
121
122 cpu_stq_data_ra(env, a0 + 0, int128_getlo(newv), ra);
123 cpu_stq_data_ra(env, a0 + 8, int128_gethi(newv), ra);
124
125 if (success) {
126 eflags |= CC_Z;
127 } else {
128 env->regs[R_EAX] = int128_getlo(oldv);
129 env->regs[R_EDX] = int128_gethi(oldv);
130 eflags &= ~CC_Z;
131 }
132 CC_SRC = eflags;
133}
134
135void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
136{
137 uintptr_t ra = GETPC();
138
139 if ((a0 & 0xf) != 0) {
140 raise_exception_ra(env, EXCP0D_GPF, ra);
141 } else if (HAVE_CMPXCHG128) {
142 int eflags = cpu_cc_compute_all(env, CC_OP);
143
144 Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
145 Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
146
147 int mem_idx = cpu_mmu_index(env, false);
148 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
149 Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv,
150 newv, oi, ra);
151
152 if (int128_eq(oldv, cmpv)) {
153 eflags |= CC_Z;
154 } else {
155 env->regs[R_EAX] = int128_getlo(oldv);
156 env->regs[R_EDX] = int128_gethi(oldv);
157 eflags &= ~CC_Z;
158 }
159 CC_SRC = eflags;
160 } else {
161 cpu_loop_exit_atomic(env_cpu(env), ra);
162 }
163}
164#endif
165
166void helper_boundw(CPUX86State *env, target_ulong a0, int v)
167{
168 int low, high;
169
170 low = cpu_ldsw_data_ra(env, a0, GETPC());
171 high = cpu_ldsw_data_ra(env, a0 + 2, GETPC());
172 v = (int16_t)v;
173 if (v < low || v > high) {
174 if (env->hflags & HF_MPX_EN_MASK) {
175 env->bndcs_regs.sts = 0;
176 }
177 raise_exception_ra(env, EXCP05_BOUND, GETPC());
178 }
179}
180
181void helper_boundl(CPUX86State *env, target_ulong a0, int v)
182{
183 int low, high;
184
185 low = cpu_ldl_data_ra(env, a0, GETPC());
186 high = cpu_ldl_data_ra(env, a0 + 4, GETPC());
187 if (v < low || v > high) {
188 if (env->hflags & HF_MPX_EN_MASK) {
189 env->bndcs_regs.sts = 0;
190 }
191 raise_exception_ra(env, EXCP05_BOUND, GETPC());
192 }
193}
194