1/*
2 * S/390 helpers
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "qemu/osdep.h"
22#include "cpu.h"
23#include "internal.h"
24#include "exec/gdbstub.h"
25#include "qemu/timer.h"
26#include "qemu/qemu-print.h"
27#include "hw/s390x/ioinst.h"
28#include "sysemu/hw_accel.h"
29#include "sysemu/runstate.h"
30#ifndef CONFIG_USER_ONLY
31#include "sysemu/tcg.h"
32#endif
33
34#ifndef CONFIG_USER_ONLY
35void s390x_tod_timer(void *opaque)
36{
37 cpu_inject_clock_comparator((S390CPU *) opaque);
38}
39
40void s390x_cpu_timer(void *opaque)
41{
42 cpu_inject_cpu_timer((S390CPU *) opaque);
43}
44#endif
45
46#ifndef CONFIG_USER_ONLY
47
48hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
49{
50 S390CPU *cpu = S390_CPU(cs);
51 CPUS390XState *env = &cpu->env;
52 target_ulong raddr;
53 int prot;
54 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
55
56 /* 31-Bit mode */
57 if (!(env->psw.mask & PSW_MASK_64)) {
58 vaddr &= 0x7fffffff;
59 }
60
61 /* We want to read the code (e.g., see what we are single-stepping).*/
62 if (asc != PSW_ASC_HOME) {
63 asc = PSW_ASC_PRIMARY;
64 }
65
66 if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
67 return -1;
68 }
69 return raddr;
70}
71
72hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
73{
74 hwaddr phys_addr;
75 target_ulong page;
76
77 page = vaddr & TARGET_PAGE_MASK;
78 phys_addr = cpu_get_phys_page_debug(cs, page);
79 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
80
81 return phys_addr;
82}
83
84static inline bool is_special_wait_psw(uint64_t psw_addr)
85{
86 /* signal quiesce */
87 return psw_addr == 0xfffUL;
88}
89
90void s390_handle_wait(S390CPU *cpu)
91{
92 CPUState *cs = CPU(cpu);
93
94 if (s390_cpu_halt(cpu) == 0) {
95#ifndef CONFIG_USER_ONLY
96 if (is_special_wait_psw(cpu->env.psw.addr)) {
97 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
98 } else {
99 cpu->env.crash_reason = S390_CRASH_REASON_DISABLED_WAIT;
100 qemu_system_guest_panicked(cpu_get_crash_info(cs));
101 }
102#endif
103 }
104}
105
106void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
107{
108 uint64_t old_mask = env->psw.mask;
109
110 env->psw.addr = addr;
111 env->psw.mask = mask;
112
113 /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */
114 if (!tcg_enabled()) {
115 return;
116 }
117 env->cc_op = (mask >> 44) & 3;
118
119 if ((old_mask ^ mask) & PSW_MASK_PER) {
120 s390_cpu_recompute_watchpoints(env_cpu(env));
121 }
122
123 if (mask & PSW_MASK_WAIT) {
124 s390_handle_wait(env_archcpu(env));
125 }
126}
127
128uint64_t get_psw_mask(CPUS390XState *env)
129{
130 uint64_t r = env->psw.mask;
131
132 if (tcg_enabled()) {
133 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
134 env->cc_vr);
135
136 r &= ~PSW_MASK_CC;
137 assert(!(env->cc_op & ~3));
138 r |= (uint64_t)env->cc_op << 44;
139 }
140
141 return r;
142}
143
144LowCore *cpu_map_lowcore(CPUS390XState *env)
145{
146 LowCore *lowcore;
147 hwaddr len = sizeof(LowCore);
148
149 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
150
151 if (len < sizeof(LowCore)) {
152 cpu_abort(env_cpu(env), "Could not map lowcore\n");
153 }
154
155 return lowcore;
156}
157
158void cpu_unmap_lowcore(LowCore *lowcore)
159{
160 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
161}
162
163void do_restart_interrupt(CPUS390XState *env)
164{
165 uint64_t mask, addr;
166 LowCore *lowcore;
167
168 lowcore = cpu_map_lowcore(env);
169
170 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
171 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
172 mask = be64_to_cpu(lowcore->restart_new_psw.mask);
173 addr = be64_to_cpu(lowcore->restart_new_psw.addr);
174
175 cpu_unmap_lowcore(lowcore);
176 env->pending_int &= ~INTERRUPT_RESTART;
177
178 load_psw(env, mask, addr);
179}
180
181void s390_cpu_recompute_watchpoints(CPUState *cs)
182{
183 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
184 S390CPU *cpu = S390_CPU(cs);
185 CPUS390XState *env = &cpu->env;
186
187 /* We are called when the watchpoints have changed. First
188 remove them all. */
189 cpu_watchpoint_remove_all(cs, BP_CPU);
190
191 /* Return if PER is not enabled */
192 if (!(env->psw.mask & PSW_MASK_PER)) {
193 return;
194 }
195
196 /* Return if storage-alteration event is not enabled. */
197 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
198 return;
199 }
200
201 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
202 /* We can't create a watchoint spanning the whole memory range, so
203 split it in two parts. */
204 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
205 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
206 } else if (env->cregs[10] > env->cregs[11]) {
207 /* The address range loops, create two watchpoints. */
208 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
209 wp_flags, NULL);
210 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
211
212 } else {
213 /* Default case, create a single watchpoint. */
214 cpu_watchpoint_insert(cs, env->cregs[10],
215 env->cregs[11] - env->cregs[10] + 1,
216 wp_flags, NULL);
217 }
218}
219
220typedef struct SigpSaveArea {
221 uint64_t fprs[16]; /* 0x0000 */
222 uint64_t grs[16]; /* 0x0080 */
223 PSW psw; /* 0x0100 */
224 uint8_t pad_0x0110[0x0118 - 0x0110]; /* 0x0110 */
225 uint32_t prefix; /* 0x0118 */
226 uint32_t fpc; /* 0x011c */
227 uint8_t pad_0x0120[0x0124 - 0x0120]; /* 0x0120 */
228 uint32_t todpr; /* 0x0124 */
229 uint64_t cputm; /* 0x0128 */
230 uint64_t ckc; /* 0x0130 */
231 uint8_t pad_0x0138[0x0140 - 0x0138]; /* 0x0138 */
232 uint32_t ars[16]; /* 0x0140 */
233 uint64_t crs[16]; /* 0x0384 */
234} SigpSaveArea;
235QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea) != 512);
236
237int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
238{
239 static const uint8_t ar_id = 1;
240 SigpSaveArea *sa;
241 hwaddr len = sizeof(*sa);
242 int i;
243
244 sa = cpu_physical_memory_map(addr, &len, 1);
245 if (!sa) {
246 return -EFAULT;
247 }
248 if (len != sizeof(*sa)) {
249 cpu_physical_memory_unmap(sa, len, 1, 0);
250 return -EFAULT;
251 }
252
253 if (store_arch) {
254 cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
255 }
256 for (i = 0; i < 16; ++i) {
257 sa->fprs[i] = cpu_to_be64(*get_freg(&cpu->env, i));
258 }
259 for (i = 0; i < 16; ++i) {
260 sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
261 }
262 sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
263 sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env));
264 sa->prefix = cpu_to_be32(cpu->env.psa);
265 sa->fpc = cpu_to_be32(cpu->env.fpc);
266 sa->todpr = cpu_to_be32(cpu->env.todpr);
267 sa->cputm = cpu_to_be64(cpu->env.cputm);
268 sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
269 for (i = 0; i < 16; ++i) {
270 sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
271 }
272 for (i = 0; i < 16; ++i) {
273 sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]);
274 }
275
276 cpu_physical_memory_unmap(sa, len, 1, len);
277
278 return 0;
279}
280
281typedef struct SigpAdtlSaveArea {
282 uint64_t vregs[32][2]; /* 0x0000 */
283 uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */
284 uint64_t gscb[4]; /* 0x0400 */
285 uint8_t pad_0x0420[0x1000 - 0x0420]; /* 0x0420 */
286} SigpAdtlSaveArea;
287QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea) != 4096);
288
289#define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
290int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
291{
292 SigpAdtlSaveArea *sa;
293 hwaddr save = len;
294 int i;
295
296 sa = cpu_physical_memory_map(addr, &save, 1);
297 if (!sa) {
298 return -EFAULT;
299 }
300 if (save != len) {
301 cpu_physical_memory_unmap(sa, len, 1, 0);
302 return -EFAULT;
303 }
304
305 if (s390_has_feat(S390_FEAT_VECTOR)) {
306 for (i = 0; i < 32; i++) {
307 sa->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i][0]);
308 sa->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i][1]);
309 }
310 }
311 if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) {
312 for (i = 0; i < 4; i++) {
313 sa->gscb[i] = cpu_to_be64(cpu->env.gscb[i]);
314 }
315 }
316
317 cpu_physical_memory_unmap(sa, len, 1, len);
318 return 0;
319}
320#endif /* CONFIG_USER_ONLY */
321
322void s390_cpu_dump_state(CPUState *cs, FILE *f, int flags)
323{
324 S390CPU *cpu = S390_CPU(cs);
325 CPUS390XState *env = &cpu->env;
326 int i;
327
328 if (env->cc_op > 3) {
329 qemu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
330 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
331 } else {
332 qemu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
333 env->psw.mask, env->psw.addr, env->cc_op);
334 }
335
336 for (i = 0; i < 16; i++) {
337 qemu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
338 if ((i % 4) == 3) {
339 qemu_fprintf(f, "\n");
340 } else {
341 qemu_fprintf(f, " ");
342 }
343 }
344
345 if (flags & CPU_DUMP_FPU) {
346 if (s390_has_feat(S390_FEAT_VECTOR)) {
347 for (i = 0; i < 32; i++) {
348 qemu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64 "%c",
349 i, env->vregs[i][0], env->vregs[i][1],
350 i % 2 ? '\n' : ' ');
351 }
352 } else {
353 for (i = 0; i < 16; i++) {
354 qemu_fprintf(f, "F%02d=%016" PRIx64 "%c",
355 i, *get_freg(env, i),
356 (i % 4) == 3 ? '\n' : ' ');
357 }
358 }
359 }
360
361#ifndef CONFIG_USER_ONLY
362 for (i = 0; i < 16; i++) {
363 qemu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
364 if ((i % 4) == 3) {
365 qemu_fprintf(f, "\n");
366 } else {
367 qemu_fprintf(f, " ");
368 }
369 }
370#endif
371
372#ifdef DEBUG_INLINE_BRANCHES
373 for (i = 0; i < CC_OP_MAX; i++) {
374 qemu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
375 inline_branch_miss[i], inline_branch_hit[i]);
376 }
377#endif
378
379 qemu_fprintf(f, "\n");
380}
381
382const char *cc_name(enum cc_op cc_op)
383{
384 static const char * const cc_names[] = {
385 [CC_OP_CONST0] = "CC_OP_CONST0",
386 [CC_OP_CONST1] = "CC_OP_CONST1",
387 [CC_OP_CONST2] = "CC_OP_CONST2",
388 [CC_OP_CONST3] = "CC_OP_CONST3",
389 [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC",
390 [CC_OP_STATIC] = "CC_OP_STATIC",
391 [CC_OP_NZ] = "CC_OP_NZ",
392 [CC_OP_LTGT_32] = "CC_OP_LTGT_32",
393 [CC_OP_LTGT_64] = "CC_OP_LTGT_64",
394 [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32",
395 [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64",
396 [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32",
397 [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64",
398 [CC_OP_ADD_64] = "CC_OP_ADD_64",
399 [CC_OP_ADDU_64] = "CC_OP_ADDU_64",
400 [CC_OP_ADDC_64] = "CC_OP_ADDC_64",
401 [CC_OP_SUB_64] = "CC_OP_SUB_64",
402 [CC_OP_SUBU_64] = "CC_OP_SUBU_64",
403 [CC_OP_SUBB_64] = "CC_OP_SUBB_64",
404 [CC_OP_ABS_64] = "CC_OP_ABS_64",
405 [CC_OP_NABS_64] = "CC_OP_NABS_64",
406 [CC_OP_ADD_32] = "CC_OP_ADD_32",
407 [CC_OP_ADDU_32] = "CC_OP_ADDU_32",
408 [CC_OP_ADDC_32] = "CC_OP_ADDC_32",
409 [CC_OP_SUB_32] = "CC_OP_SUB_32",
410 [CC_OP_SUBU_32] = "CC_OP_SUBU_32",
411 [CC_OP_SUBB_32] = "CC_OP_SUBB_32",
412 [CC_OP_ABS_32] = "CC_OP_ABS_32",
413 [CC_OP_NABS_32] = "CC_OP_NABS_32",
414 [CC_OP_COMP_32] = "CC_OP_COMP_32",
415 [CC_OP_COMP_64] = "CC_OP_COMP_64",
416 [CC_OP_TM_32] = "CC_OP_TM_32",
417 [CC_OP_TM_64] = "CC_OP_TM_64",
418 [CC_OP_NZ_F32] = "CC_OP_NZ_F32",
419 [CC_OP_NZ_F64] = "CC_OP_NZ_F64",
420 [CC_OP_NZ_F128] = "CC_OP_NZ_F128",
421 [CC_OP_ICM] = "CC_OP_ICM",
422 [CC_OP_SLA_32] = "CC_OP_SLA_32",
423 [CC_OP_SLA_64] = "CC_OP_SLA_64",
424 [CC_OP_FLOGR] = "CC_OP_FLOGR",
425 [CC_OP_LCBB] = "CC_OP_LCBB",
426 [CC_OP_VC] = "CC_OP_VC",
427 };
428
429 return cc_names[cc_op];
430}
431