1/*
2 * Helpers for CWP and PSTATE handling
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "qemu/main-loop.h"
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "exec/helper-proto.h"
25#include "trace.h"
26
27static inline void memcpy32(target_ulong *dst, const target_ulong *src)
28{
29 dst[0] = src[0];
30 dst[1] = src[1];
31 dst[2] = src[2];
32 dst[3] = src[3];
33 dst[4] = src[4];
34 dst[5] = src[5];
35 dst[6] = src[6];
36 dst[7] = src[7];
37}
38
39void cpu_set_cwp(CPUSPARCState *env, int new_cwp)
40{
41 /* put the modified wrap registers at their proper location */
42 if (env->cwp == env->nwindows - 1) {
43 memcpy32(env->regbase, env->regbase + env->nwindows * 16);
44 }
45 env->cwp = new_cwp;
46
47 /* put the wrap registers at their temporary location */
48 if (new_cwp == env->nwindows - 1) {
49 memcpy32(env->regbase + env->nwindows * 16, env->regbase);
50 }
51 env->regwptr = env->regbase + (new_cwp * 16);
52}
53
54target_ulong cpu_get_psr(CPUSPARCState *env)
55{
56 helper_compute_psr(env);
57
58#if !defined(TARGET_SPARC64)
59 return env->version | (env->psr & PSR_ICC) |
60 (env->psref ? PSR_EF : 0) |
61 (env->psrpil << 8) |
62 (env->psrs ? PSR_S : 0) |
63 (env->psrps ? PSR_PS : 0) |
64 (env->psret ? PSR_ET : 0) | env->cwp;
65#else
66 return env->psr & PSR_ICC;
67#endif
68}
69
70void cpu_put_psr_raw(CPUSPARCState *env, target_ulong val)
71{
72 env->psr = val & PSR_ICC;
73#if !defined(TARGET_SPARC64)
74 env->psref = (val & PSR_EF) ? 1 : 0;
75 env->psrpil = (val & PSR_PIL) >> 8;
76 env->psrs = (val & PSR_S) ? 1 : 0;
77 env->psrps = (val & PSR_PS) ? 1 : 0;
78 env->psret = (val & PSR_ET) ? 1 : 0;
79#endif
80 env->cc_op = CC_OP_FLAGS;
81#if !defined(TARGET_SPARC64)
82 cpu_set_cwp(env, val & PSR_CWP);
83#endif
84}
85
86/* Called with BQL held */
87void cpu_put_psr(CPUSPARCState *env, target_ulong val)
88{
89 cpu_put_psr_raw(env, val);
90#if ((!defined(TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
91 cpu_check_irqs(env);
92#endif
93}
94
95int cpu_cwp_inc(CPUSPARCState *env, int cwp)
96{
97 if (unlikely(cwp >= env->nwindows)) {
98 cwp -= env->nwindows;
99 }
100 return cwp;
101}
102
103int cpu_cwp_dec(CPUSPARCState *env, int cwp)
104{
105 if (unlikely(cwp < 0)) {
106 cwp += env->nwindows;
107 }
108 return cwp;
109}
110
111#ifndef TARGET_SPARC64
112void helper_rett(CPUSPARCState *env)
113{
114 unsigned int cwp;
115
116 if (env->psret == 1) {
117 cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC());
118 }
119
120 env->psret = 1;
121 cwp = cpu_cwp_inc(env, env->cwp + 1) ;
122 if (env->wim & (1 << cwp)) {
123 cpu_raise_exception_ra(env, TT_WIN_UNF, GETPC());
124 }
125 cpu_set_cwp(env, cwp);
126 env->psrs = env->psrps;
127}
128
129/* XXX: use another pointer for %iN registers to avoid slow wrapping
130 handling ? */
131void helper_save(CPUSPARCState *env)
132{
133 uint32_t cwp;
134
135 cwp = cpu_cwp_dec(env, env->cwp - 1);
136 if (env->wim & (1 << cwp)) {
137 cpu_raise_exception_ra(env, TT_WIN_OVF, GETPC());
138 }
139 cpu_set_cwp(env, cwp);
140}
141
142void helper_restore(CPUSPARCState *env)
143{
144 uint32_t cwp;
145
146 cwp = cpu_cwp_inc(env, env->cwp + 1);
147 if (env->wim & (1 << cwp)) {
148 cpu_raise_exception_ra(env, TT_WIN_UNF, GETPC());
149 }
150 cpu_set_cwp(env, cwp);
151}
152
153void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr)
154{
155 if ((new_psr & PSR_CWP) >= env->nwindows) {
156 cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC());
157 } else {
158 /* cpu_put_psr may trigger interrupts, hence BQL */
159 qemu_mutex_lock_iothread();
160 cpu_put_psr(env, new_psr);
161 qemu_mutex_unlock_iothread();
162 }
163}
164
165target_ulong helper_rdpsr(CPUSPARCState *env)
166{
167 return cpu_get_psr(env);
168}
169
170#else
171/* XXX: use another pointer for %iN registers to avoid slow wrapping
172 handling ? */
173void helper_save(CPUSPARCState *env)
174{
175 uint32_t cwp;
176
177 cwp = cpu_cwp_dec(env, env->cwp - 1);
178 if (env->cansave == 0) {
179 int tt = TT_SPILL | (env->otherwin != 0
180 ? (TT_WOTHER | ((env->wstate & 0x38) >> 1))
181 : ((env->wstate & 0x7) << 2));
182 cpu_raise_exception_ra(env, tt, GETPC());
183 } else {
184 if (env->cleanwin - env->canrestore == 0) {
185 /* XXX Clean windows without trap */
186 cpu_raise_exception_ra(env, TT_CLRWIN, GETPC());
187 } else {
188 env->cansave--;
189 env->canrestore++;
190 cpu_set_cwp(env, cwp);
191 }
192 }
193}
194
195void helper_restore(CPUSPARCState *env)
196{
197 uint32_t cwp;
198
199 cwp = cpu_cwp_inc(env, env->cwp + 1);
200 if (env->canrestore == 0) {
201 int tt = TT_FILL | (env->otherwin != 0
202 ? (TT_WOTHER | ((env->wstate & 0x38) >> 1))
203 : ((env->wstate & 0x7) << 2));
204 cpu_raise_exception_ra(env, tt, GETPC());
205 } else {
206 env->cansave++;
207 env->canrestore--;
208 cpu_set_cwp(env, cwp);
209 }
210}
211
212void helper_flushw(CPUSPARCState *env)
213{
214 if (env->cansave != env->nwindows - 2) {
215 int tt = TT_SPILL | (env->otherwin != 0
216 ? (TT_WOTHER | ((env->wstate & 0x38) >> 1))
217 : ((env->wstate & 0x7) << 2));
218 cpu_raise_exception_ra(env, tt, GETPC());
219 }
220}
221
222void helper_saved(CPUSPARCState *env)
223{
224 env->cansave++;
225 if (env->otherwin == 0) {
226 env->canrestore--;
227 } else {
228 env->otherwin--;
229 }
230}
231
232void helper_restored(CPUSPARCState *env)
233{
234 env->canrestore++;
235 if (env->cleanwin < env->nwindows - 1) {
236 env->cleanwin++;
237 }
238 if (env->otherwin == 0) {
239 env->cansave--;
240 } else {
241 env->otherwin--;
242 }
243}
244
245target_ulong cpu_get_ccr(CPUSPARCState *env)
246{
247 target_ulong psr;
248
249 psr = cpu_get_psr(env);
250
251 return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20);
252}
253
254void cpu_put_ccr(CPUSPARCState *env, target_ulong val)
255{
256 env->xcc = (val >> 4) << 20;
257 env->psr = (val & 0xf) << 20;
258 CC_OP = CC_OP_FLAGS;
259}
260
261target_ulong cpu_get_cwp64(CPUSPARCState *env)
262{
263 return env->nwindows - 1 - env->cwp;
264}
265
266void cpu_put_cwp64(CPUSPARCState *env, int cwp)
267{
268 if (unlikely(cwp >= env->nwindows || cwp < 0)) {
269 cwp %= env->nwindows;
270 }
271 cpu_set_cwp(env, env->nwindows - 1 - cwp);
272}
273
274target_ulong helper_rdccr(CPUSPARCState *env)
275{
276 return cpu_get_ccr(env);
277}
278
279void helper_wrccr(CPUSPARCState *env, target_ulong new_ccr)
280{
281 cpu_put_ccr(env, new_ccr);
282}
283
284/* CWP handling is reversed in V9, but we still use the V8 register
285 order. */
286target_ulong helper_rdcwp(CPUSPARCState *env)
287{
288 return cpu_get_cwp64(env);
289}
290
291void helper_wrcwp(CPUSPARCState *env, target_ulong new_cwp)
292{
293 cpu_put_cwp64(env, new_cwp);
294}
295
296static inline uint64_t *get_gregset(CPUSPARCState *env, uint32_t pstate)
297{
298 if (env->def.features & CPU_FEATURE_GL) {
299 return env->glregs + (env->gl & 7) * 8;
300 }
301
302 switch (pstate) {
303 default:
304 trace_win_helper_gregset_error(pstate);
305 /* pass through to normal set of global registers */
306 case 0:
307 return env->bgregs;
308 case PS_AG:
309 return env->agregs;
310 case PS_MG:
311 return env->mgregs;
312 case PS_IG:
313 return env->igregs;
314 }
315}
316
317static inline uint64_t *get_gl_gregset(CPUSPARCState *env, uint32_t gl)
318{
319 return env->glregs + (gl & 7) * 8;
320}
321
322/* Switch global register bank */
323void cpu_gl_switch_gregs(CPUSPARCState *env, uint32_t new_gl)
324{
325 uint64_t *src, *dst;
326 src = get_gl_gregset(env, new_gl);
327 dst = get_gl_gregset(env, env->gl);
328
329 if (src != dst) {
330 memcpy32(dst, env->gregs);
331 memcpy32(env->gregs, src);
332 }
333}
334
335void helper_wrgl(CPUSPARCState *env, target_ulong new_gl)
336{
337 cpu_gl_switch_gregs(env, new_gl & 7);
338 env->gl = new_gl & 7;
339}
340
341void cpu_change_pstate(CPUSPARCState *env, uint32_t new_pstate)
342{
343 uint32_t pstate_regs, new_pstate_regs;
344 uint64_t *src, *dst;
345
346 if (env->def.features & CPU_FEATURE_GL) {
347 /* PS_AG, IG and MG are not implemented in this case */
348 new_pstate &= ~(PS_AG | PS_IG | PS_MG);
349 env->pstate = new_pstate;
350 return;
351 }
352
353 pstate_regs = env->pstate & 0xc01;
354 new_pstate_regs = new_pstate & 0xc01;
355
356 if (new_pstate_regs != pstate_regs) {
357 trace_win_helper_switch_pstate(pstate_regs, new_pstate_regs);
358
359 /* Switch global register bank */
360 src = get_gregset(env, new_pstate_regs);
361 dst = get_gregset(env, pstate_regs);
362 memcpy32(dst, env->gregs);
363 memcpy32(env->gregs, src);
364 } else {
365 trace_win_helper_no_switch_pstate(new_pstate_regs);
366 }
367 env->pstate = new_pstate;
368}
369
370void helper_wrpstate(CPUSPARCState *env, target_ulong new_state)
371{
372 cpu_change_pstate(env, new_state & 0xf3f);
373
374#if !defined(CONFIG_USER_ONLY)
375 if (cpu_interrupts_enabled(env)) {
376 qemu_mutex_lock_iothread();
377 cpu_check_irqs(env);
378 qemu_mutex_unlock_iothread();
379 }
380#endif
381}
382
383void helper_wrpil(CPUSPARCState *env, target_ulong new_pil)
384{
385#if !defined(CONFIG_USER_ONLY)
386 trace_win_helper_wrpil(env->psrpil, (uint32_t)new_pil);
387
388 env->psrpil = new_pil;
389
390 if (cpu_interrupts_enabled(env)) {
391 qemu_mutex_lock_iothread();
392 cpu_check_irqs(env);
393 qemu_mutex_unlock_iothread();
394 }
395#endif
396}
397
398void helper_done(CPUSPARCState *env)
399{
400 trap_state *tsptr = cpu_tsptr(env);
401
402 env->pc = tsptr->tnpc;
403 env->npc = tsptr->tnpc + 4;
404 cpu_put_ccr(env, tsptr->tstate >> 32);
405 env->asi = (tsptr->tstate >> 24) & 0xff;
406 cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f);
407 cpu_put_cwp64(env, tsptr->tstate & 0xff);
408 if (cpu_has_hypervisor(env)) {
409 uint32_t new_gl = (tsptr->tstate >> 40) & 7;
410 env->hpstate = env->htstate[env->tl];
411 cpu_gl_switch_gregs(env, new_gl);
412 env->gl = new_gl;
413 }
414 env->tl--;
415
416 trace_win_helper_done(env->tl);
417
418#if !defined(CONFIG_USER_ONLY)
419 if (cpu_interrupts_enabled(env)) {
420 qemu_mutex_lock_iothread();
421 cpu_check_irqs(env);
422 qemu_mutex_unlock_iothread();
423 }
424#endif
425}
426
427void helper_retry(CPUSPARCState *env)
428{
429 trap_state *tsptr = cpu_tsptr(env);
430
431 env->pc = tsptr->tpc;
432 env->npc = tsptr->tnpc;
433 cpu_put_ccr(env, tsptr->tstate >> 32);
434 env->asi = (tsptr->tstate >> 24) & 0xff;
435 cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f);
436 cpu_put_cwp64(env, tsptr->tstate & 0xff);
437 if (cpu_has_hypervisor(env)) {
438 uint32_t new_gl = (tsptr->tstate >> 40) & 7;
439 env->hpstate = env->htstate[env->tl];
440 cpu_gl_switch_gregs(env, new_gl);
441 env->gl = new_gl;
442 }
443 env->tl--;
444
445 trace_win_helper_retry(env->tl);
446
447#if !defined(CONFIG_USER_ONLY)
448 if (cpu_interrupts_enabled(env)) {
449 qemu_mutex_lock_iothread();
450 cpu_check_irqs(env);
451 qemu_mutex_unlock_iothread();
452 }
453#endif
454}
455#endif
456