1 | /* |
2 | * s390x exception / interrupt helpers |
3 | * |
4 | * Copyright (c) 2009 Ulrich Hecht |
5 | * Copyright (c) 2011 Alexander Graf |
6 | * |
7 | * This library is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU Lesser General Public |
9 | * License as published by the Free Software Foundation; either |
10 | * version 2.1 of the License, or (at your option) any later version. |
11 | * |
12 | * This library is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | * Lesser General Public License for more details. |
16 | * |
17 | * You should have received a copy of the GNU Lesser General Public |
18 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
19 | */ |
20 | |
21 | #include "qemu/osdep.h" |
22 | #include "cpu.h" |
23 | #include "internal.h" |
24 | #include "exec/helper-proto.h" |
25 | #include "qemu/timer.h" |
26 | #include "exec/exec-all.h" |
27 | #include "exec/cpu_ldst.h" |
28 | #include "hw/s390x/ioinst.h" |
29 | #include "exec/address-spaces.h" |
30 | #include "tcg_s390x.h" |
31 | #ifndef CONFIG_USER_ONLY |
32 | #include "sysemu/sysemu.h" |
33 | #include "hw/s390x/s390_flic.h" |
34 | #include "hw/boards.h" |
35 | #endif |
36 | |
37 | void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code, |
38 | int ilen, uintptr_t ra) |
39 | { |
40 | CPUState *cs = env_cpu(env); |
41 | |
42 | cpu_restore_state(cs, ra, true); |
43 | qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n" , |
44 | env->psw.addr); |
45 | trigger_pgm_exception(env, code, ilen); |
46 | cpu_loop_exit(cs); |
47 | } |
48 | |
49 | void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, |
50 | uintptr_t ra) |
51 | { |
52 | g_assert(dxc <= 0xff); |
53 | #if !defined(CONFIG_USER_ONLY) |
54 | /* Store the DXC into the lowcore */ |
55 | stl_phys(env_cpu(env)->as, |
56 | env->psa + offsetof(LowCore, data_exc_code), dxc); |
57 | #endif |
58 | |
59 | /* Store the DXC into the FPC if AFP is enabled */ |
60 | if (env->cregs[0] & CR0_AFP) { |
61 | env->fpc = deposit32(env->fpc, 8, 8, dxc); |
62 | } |
63 | tcg_s390_program_interrupt(env, PGM_DATA, ILEN_AUTO, ra); |
64 | } |
65 | |
66 | void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc, |
67 | uintptr_t ra) |
68 | { |
69 | g_assert(vxc <= 0xff); |
70 | #if !defined(CONFIG_USER_ONLY) |
71 | /* Always store the VXC into the lowcore, without AFP it is undefined */ |
72 | stl_phys(env_cpu(env)->as, |
73 | env->psa + offsetof(LowCore, data_exc_code), vxc); |
74 | #endif |
75 | |
76 | /* Always store the VXC into the FPC, without AFP it is undefined */ |
77 | env->fpc = deposit32(env->fpc, 8, 8, vxc); |
78 | tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ILEN_AUTO, ra); |
79 | } |
80 | |
81 | void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc) |
82 | { |
83 | tcg_s390_data_exception(env, dxc, GETPC()); |
84 | } |
85 | |
86 | #if defined(CONFIG_USER_ONLY) |
87 | |
88 | void s390_cpu_do_interrupt(CPUState *cs) |
89 | { |
90 | cs->exception_index = -1; |
91 | } |
92 | |
93 | bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, |
94 | MMUAccessType access_type, int mmu_idx, |
95 | bool probe, uintptr_t retaddr) |
96 | { |
97 | S390CPU *cpu = S390_CPU(cs); |
98 | |
99 | trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_AUTO); |
100 | /* On real machines this value is dropped into LowMem. Since this |
101 | is userland, simply put this someplace that cpu_loop can find it. */ |
102 | cpu->env.__excp_addr = address; |
103 | cpu_loop_exit_restore(cs, retaddr); |
104 | } |
105 | |
106 | #else /* !CONFIG_USER_ONLY */ |
107 | |
108 | static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx) |
109 | { |
110 | switch (mmu_idx) { |
111 | case MMU_PRIMARY_IDX: |
112 | return PSW_ASC_PRIMARY; |
113 | case MMU_SECONDARY_IDX: |
114 | return PSW_ASC_SECONDARY; |
115 | case MMU_HOME_IDX: |
116 | return PSW_ASC_HOME; |
117 | default: |
118 | abort(); |
119 | } |
120 | } |
121 | |
122 | bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, |
123 | MMUAccessType access_type, int mmu_idx, |
124 | bool probe, uintptr_t retaddr) |
125 | { |
126 | S390CPU *cpu = S390_CPU(cs); |
127 | CPUS390XState *env = &cpu->env; |
128 | target_ulong vaddr, raddr; |
129 | uint64_t asc; |
130 | int prot, fail; |
131 | |
132 | qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n" , |
133 | __func__, address, access_type, mmu_idx); |
134 | |
135 | vaddr = address; |
136 | |
137 | if (mmu_idx < MMU_REAL_IDX) { |
138 | asc = cpu_mmu_idx_to_asc(mmu_idx); |
139 | /* 31-Bit mode */ |
140 | if (!(env->psw.mask & PSW_MASK_64)) { |
141 | vaddr &= 0x7fffffff; |
142 | } |
143 | fail = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, true); |
144 | } else if (mmu_idx == MMU_REAL_IDX) { |
145 | /* 31-Bit mode */ |
146 | if (!(env->psw.mask & PSW_MASK_64)) { |
147 | vaddr &= 0x7fffffff; |
148 | } |
149 | fail = mmu_translate_real(env, vaddr, access_type, &raddr, &prot); |
150 | } else { |
151 | g_assert_not_reached(); |
152 | } |
153 | |
154 | /* check out of RAM access */ |
155 | if (!fail && |
156 | !address_space_access_valid(&address_space_memory, raddr, |
157 | TARGET_PAGE_SIZE, access_type, |
158 | MEMTXATTRS_UNSPECIFIED)) { |
159 | qemu_log_mask(CPU_LOG_MMU, |
160 | "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n" , |
161 | __func__, (uint64_t)raddr, (uint64_t)ram_size); |
162 | trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO); |
163 | fail = 1; |
164 | } |
165 | |
166 | if (!fail) { |
167 | qemu_log_mask(CPU_LOG_MMU, |
168 | "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n" , |
169 | __func__, (uint64_t)vaddr, (uint64_t)raddr, prot); |
170 | tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot, |
171 | mmu_idx, TARGET_PAGE_SIZE); |
172 | return true; |
173 | } |
174 | if (probe) { |
175 | return false; |
176 | } |
177 | |
178 | cpu_restore_state(cs, retaddr, true); |
179 | |
180 | /* |
181 | * The ILC value for code accesses is undefined. The important |
182 | * thing here is to *not* leave env->int_pgm_ilen set to ILEN_AUTO, |
183 | * which would cause do_program_interrupt to attempt to read from |
184 | * env->psw.addr again. C.f. the condition in trigger_page_fault, |
185 | * but is not universally applied. |
186 | * |
187 | * ??? If we remove ILEN_AUTO, by moving the computation of ILEN |
188 | * into cpu_restore_state, then we may remove this entirely. |
189 | */ |
190 | if (access_type == MMU_INST_FETCH) { |
191 | env->int_pgm_ilen = 2; |
192 | } |
193 | |
194 | cpu_loop_exit(cs); |
195 | } |
196 | |
197 | static void do_program_interrupt(CPUS390XState *env) |
198 | { |
199 | uint64_t mask, addr; |
200 | LowCore *lowcore; |
201 | int ilen = env->int_pgm_ilen; |
202 | |
203 | if (ilen == ILEN_AUTO) { |
204 | ilen = get_ilen(cpu_ldub_code(env, env->psw.addr)); |
205 | } |
206 | assert(ilen == 2 || ilen == 4 || ilen == 6); |
207 | |
208 | switch (env->int_pgm_code) { |
209 | case PGM_PER: |
210 | if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) { |
211 | break; |
212 | } |
213 | /* FALL THROUGH */ |
214 | case PGM_OPERATION: |
215 | case PGM_PRIVILEGED: |
216 | case PGM_EXECUTE: |
217 | case PGM_PROTECTION: |
218 | case PGM_ADDRESSING: |
219 | case PGM_SPECIFICATION: |
220 | case PGM_DATA: |
221 | case PGM_FIXPT_OVERFLOW: |
222 | case PGM_FIXPT_DIVIDE: |
223 | case PGM_DEC_OVERFLOW: |
224 | case PGM_DEC_DIVIDE: |
225 | case PGM_HFP_EXP_OVERFLOW: |
226 | case PGM_HFP_EXP_UNDERFLOW: |
227 | case PGM_HFP_SIGNIFICANCE: |
228 | case PGM_HFP_DIVIDE: |
229 | case PGM_TRANS_SPEC: |
230 | case PGM_SPECIAL_OP: |
231 | case PGM_OPERAND: |
232 | case PGM_HFP_SQRT: |
233 | case PGM_PC_TRANS_SPEC: |
234 | case PGM_ALET_SPEC: |
235 | case PGM_MONITOR: |
236 | /* advance the PSW if our exception is not nullifying */ |
237 | env->psw.addr += ilen; |
238 | break; |
239 | } |
240 | |
241 | qemu_log_mask(CPU_LOG_INT, |
242 | "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n" , |
243 | __func__, env->int_pgm_code, ilen, env->psw.mask, |
244 | env->psw.addr); |
245 | |
246 | lowcore = cpu_map_lowcore(env); |
247 | |
248 | /* Signal PER events with the exception. */ |
249 | if (env->per_perc_atmid) { |
250 | env->int_pgm_code |= PGM_PER; |
251 | lowcore->per_address = cpu_to_be64(env->per_address); |
252 | lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid); |
253 | env->per_perc_atmid = 0; |
254 | } |
255 | |
256 | lowcore->pgm_ilen = cpu_to_be16(ilen); |
257 | lowcore->pgm_code = cpu_to_be16(env->int_pgm_code); |
258 | lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env)); |
259 | lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr); |
260 | mask = be64_to_cpu(lowcore->program_new_psw.mask); |
261 | addr = be64_to_cpu(lowcore->program_new_psw.addr); |
262 | lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea); |
263 | |
264 | cpu_unmap_lowcore(lowcore); |
265 | |
266 | load_psw(env, mask, addr); |
267 | } |
268 | |
269 | static void do_svc_interrupt(CPUS390XState *env) |
270 | { |
271 | uint64_t mask, addr; |
272 | LowCore *lowcore; |
273 | |
274 | lowcore = cpu_map_lowcore(env); |
275 | |
276 | lowcore->svc_code = cpu_to_be16(env->int_svc_code); |
277 | lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen); |
278 | lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env)); |
279 | lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen); |
280 | mask = be64_to_cpu(lowcore->svc_new_psw.mask); |
281 | addr = be64_to_cpu(lowcore->svc_new_psw.addr); |
282 | |
283 | cpu_unmap_lowcore(lowcore); |
284 | |
285 | load_psw(env, mask, addr); |
286 | |
287 | /* When a PER event is pending, the PER exception has to happen |
288 | immediately after the SERVICE CALL one. */ |
289 | if (env->per_perc_atmid) { |
290 | env->int_pgm_code = PGM_PER; |
291 | env->int_pgm_ilen = env->int_svc_ilen; |
292 | do_program_interrupt(env); |
293 | } |
294 | } |
295 | |
296 | #define VIRTIO_SUBCODE_64 0x0D00 |
297 | |
298 | static void do_ext_interrupt(CPUS390XState *env) |
299 | { |
300 | QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); |
301 | S390CPU *cpu = env_archcpu(env); |
302 | uint64_t mask, addr; |
303 | uint16_t cpu_addr; |
304 | LowCore *lowcore; |
305 | |
306 | if (!(env->psw.mask & PSW_MASK_EXT)) { |
307 | cpu_abort(CPU(cpu), "Ext int w/o ext mask\n" ); |
308 | } |
309 | |
310 | lowcore = cpu_map_lowcore(env); |
311 | |
312 | if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) && |
313 | (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) { |
314 | lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY); |
315 | cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS); |
316 | g_assert(cpu_addr < S390_MAX_CPUS); |
317 | lowcore->cpu_addr = cpu_to_be16(cpu_addr); |
318 | clear_bit(cpu_addr, env->emergency_signals); |
319 | #ifndef CONFIG_USER_ONLY |
320 | MachineState *ms = MACHINE(qdev_get_machine()); |
321 | unsigned int max_cpus = ms->smp.max_cpus; |
322 | #endif |
323 | if (bitmap_empty(env->emergency_signals, max_cpus)) { |
324 | env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL; |
325 | } |
326 | } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) && |
327 | (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) { |
328 | lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL); |
329 | lowcore->cpu_addr = cpu_to_be16(env->external_call_addr); |
330 | env->pending_int &= ~INTERRUPT_EXTERNAL_CALL; |
331 | } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) && |
332 | (env->cregs[0] & CR0_CKC_SC)) { |
333 | lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP); |
334 | lowcore->cpu_addr = 0; |
335 | env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; |
336 | } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) && |
337 | (env->cregs[0] & CR0_CPU_TIMER_SC)) { |
338 | lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER); |
339 | lowcore->cpu_addr = 0; |
340 | env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER; |
341 | } else if (qemu_s390_flic_has_service(flic) && |
342 | (env->cregs[0] & CR0_SERVICE_SC)) { |
343 | uint32_t param; |
344 | |
345 | param = qemu_s390_flic_dequeue_service(flic); |
346 | lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE); |
347 | lowcore->ext_params = cpu_to_be32(param); |
348 | lowcore->cpu_addr = 0; |
349 | } else { |
350 | g_assert_not_reached(); |
351 | } |
352 | |
353 | mask = be64_to_cpu(lowcore->external_new_psw.mask); |
354 | addr = be64_to_cpu(lowcore->external_new_psw.addr); |
355 | lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env)); |
356 | lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr); |
357 | |
358 | cpu_unmap_lowcore(lowcore); |
359 | |
360 | load_psw(env, mask, addr); |
361 | } |
362 | |
363 | static void do_io_interrupt(CPUS390XState *env) |
364 | { |
365 | QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); |
366 | uint64_t mask, addr; |
367 | QEMUS390FlicIO *io; |
368 | LowCore *lowcore; |
369 | |
370 | g_assert(env->psw.mask & PSW_MASK_IO); |
371 | io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]); |
372 | g_assert(io); |
373 | |
374 | lowcore = cpu_map_lowcore(env); |
375 | |
376 | lowcore->subchannel_id = cpu_to_be16(io->id); |
377 | lowcore->subchannel_nr = cpu_to_be16(io->nr); |
378 | lowcore->io_int_parm = cpu_to_be32(io->parm); |
379 | lowcore->io_int_word = cpu_to_be32(io->word); |
380 | lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env)); |
381 | lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr); |
382 | mask = be64_to_cpu(lowcore->io_new_psw.mask); |
383 | addr = be64_to_cpu(lowcore->io_new_psw.addr); |
384 | |
385 | cpu_unmap_lowcore(lowcore); |
386 | g_free(io); |
387 | |
388 | load_psw(env, mask, addr); |
389 | } |
390 | |
391 | typedef struct MchkExtSaveArea { |
392 | uint64_t vregs[32][2]; /* 0x0000 */ |
393 | uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */ |
394 | } MchkExtSaveArea; |
395 | QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024); |
396 | |
397 | static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao) |
398 | { |
399 | hwaddr len = sizeof(MchkExtSaveArea); |
400 | MchkExtSaveArea *sa; |
401 | int i; |
402 | |
403 | sa = cpu_physical_memory_map(mcesao, &len, 1); |
404 | if (!sa) { |
405 | return -EFAULT; |
406 | } |
407 | if (len != sizeof(MchkExtSaveArea)) { |
408 | cpu_physical_memory_unmap(sa, len, 1, 0); |
409 | return -EFAULT; |
410 | } |
411 | |
412 | for (i = 0; i < 32; i++) { |
413 | sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]); |
414 | sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]); |
415 | } |
416 | |
417 | cpu_physical_memory_unmap(sa, len, 1, len); |
418 | return 0; |
419 | } |
420 | |
421 | static void do_mchk_interrupt(CPUS390XState *env) |
422 | { |
423 | QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); |
424 | uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP; |
425 | uint64_t mask, addr, mcesao = 0; |
426 | LowCore *lowcore; |
427 | int i; |
428 | |
429 | /* for now we only support channel report machine checks (floating) */ |
430 | g_assert(env->psw.mask & PSW_MASK_MCHECK); |
431 | g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC); |
432 | |
433 | qemu_s390_flic_dequeue_crw_mchk(flic); |
434 | |
435 | lowcore = cpu_map_lowcore(env); |
436 | |
437 | /* extended save area */ |
438 | if (mcic & MCIC_VB_VR) { |
439 | /* length and alignment is 1024 bytes */ |
440 | mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull; |
441 | } |
442 | |
443 | /* try to store vector registers */ |
444 | if (!mcesao || mchk_store_vregs(env, mcesao)) { |
445 | mcic &= ~MCIC_VB_VR; |
446 | } |
447 | |
448 | /* we are always in z/Architecture mode */ |
449 | lowcore->ar_access_id = 1; |
450 | |
451 | for (i = 0; i < 16; i++) { |
452 | lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i)); |
453 | lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]); |
454 | lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]); |
455 | lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]); |
456 | } |
457 | lowcore->prefixreg_save_area = cpu_to_be32(env->psa); |
458 | lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc); |
459 | lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr); |
460 | lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm); |
461 | lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8); |
462 | |
463 | lowcore->mcic = cpu_to_be64(mcic); |
464 | lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env)); |
465 | lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr); |
466 | mask = be64_to_cpu(lowcore->mcck_new_psw.mask); |
467 | addr = be64_to_cpu(lowcore->mcck_new_psw.addr); |
468 | |
469 | cpu_unmap_lowcore(lowcore); |
470 | |
471 | load_psw(env, mask, addr); |
472 | } |
473 | |
474 | void s390_cpu_do_interrupt(CPUState *cs) |
475 | { |
476 | QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); |
477 | S390CPU *cpu = S390_CPU(cs); |
478 | CPUS390XState *env = &cpu->env; |
479 | bool stopped = false; |
480 | |
481 | qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n" , |
482 | __func__, cs->exception_index, env->psw.mask, env->psw.addr); |
483 | |
484 | try_deliver: |
485 | /* handle machine checks */ |
486 | if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) { |
487 | cs->exception_index = EXCP_MCHK; |
488 | } |
489 | /* handle external interrupts */ |
490 | if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) { |
491 | cs->exception_index = EXCP_EXT; |
492 | } |
493 | /* handle I/O interrupts */ |
494 | if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) { |
495 | cs->exception_index = EXCP_IO; |
496 | } |
497 | /* RESTART interrupt */ |
498 | if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) { |
499 | cs->exception_index = EXCP_RESTART; |
500 | } |
501 | /* STOP interrupt has least priority */ |
502 | if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) { |
503 | cs->exception_index = EXCP_STOP; |
504 | } |
505 | |
506 | switch (cs->exception_index) { |
507 | case EXCP_PGM: |
508 | do_program_interrupt(env); |
509 | break; |
510 | case EXCP_SVC: |
511 | do_svc_interrupt(env); |
512 | break; |
513 | case EXCP_EXT: |
514 | do_ext_interrupt(env); |
515 | break; |
516 | case EXCP_IO: |
517 | do_io_interrupt(env); |
518 | break; |
519 | case EXCP_MCHK: |
520 | do_mchk_interrupt(env); |
521 | break; |
522 | case EXCP_RESTART: |
523 | do_restart_interrupt(env); |
524 | break; |
525 | case EXCP_STOP: |
526 | do_stop_interrupt(env); |
527 | stopped = true; |
528 | break; |
529 | } |
530 | |
531 | if (cs->exception_index != -1 && !stopped) { |
532 | /* check if there are more pending interrupts to deliver */ |
533 | cs->exception_index = -1; |
534 | goto try_deliver; |
535 | } |
536 | cs->exception_index = -1; |
537 | |
538 | /* we might still have pending interrupts, but not deliverable */ |
539 | if (!env->pending_int && !qemu_s390_flic_has_any(flic)) { |
540 | cs->interrupt_request &= ~CPU_INTERRUPT_HARD; |
541 | } |
542 | |
543 | /* WAIT PSW during interrupt injection or STOP interrupt */ |
544 | if ((env->psw.mask & PSW_MASK_WAIT) || stopped) { |
545 | /* don't trigger a cpu_loop_exit(), use an interrupt instead */ |
546 | cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT); |
547 | } else if (cs->halted) { |
548 | /* unhalt if we had a WAIT PSW somehwere in our injection chain */ |
549 | s390_cpu_unhalt(cpu); |
550 | } |
551 | } |
552 | |
553 | bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request) |
554 | { |
555 | if (interrupt_request & CPU_INTERRUPT_HARD) { |
556 | S390CPU *cpu = S390_CPU(cs); |
557 | CPUS390XState *env = &cpu->env; |
558 | |
559 | if (env->ex_value) { |
560 | /* Execution of the target insn is indivisible from |
561 | the parent EXECUTE insn. */ |
562 | return false; |
563 | } |
564 | if (s390_cpu_has_int(cpu)) { |
565 | s390_cpu_do_interrupt(cs); |
566 | return true; |
567 | } |
568 | if (env->psw.mask & PSW_MASK_WAIT) { |
569 | /* Woken up because of a floating interrupt but it has already |
570 | * been delivered. Go back to sleep. */ |
571 | cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT); |
572 | } |
573 | } |
574 | return false; |
575 | } |
576 | |
577 | void s390x_cpu_debug_excp_handler(CPUState *cs) |
578 | { |
579 | S390CPU *cpu = S390_CPU(cs); |
580 | CPUS390XState *env = &cpu->env; |
581 | CPUWatchpoint *wp_hit = cs->watchpoint_hit; |
582 | |
583 | if (wp_hit && wp_hit->flags & BP_CPU) { |
584 | /* FIXME: When the storage-alteration-space control bit is set, |
585 | the exception should only be triggered if the memory access |
586 | is done using an address space with the storage-alteration-event |
587 | bit set. We have no way to detect that with the current |
588 | watchpoint code. */ |
589 | cs->watchpoint_hit = NULL; |
590 | |
591 | env->per_address = env->psw.addr; |
592 | env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env); |
593 | /* FIXME: We currently no way to detect the address space used |
594 | to trigger the watchpoint. For now just consider it is the |
595 | current default ASC. This turn to be true except when MVCP |
596 | and MVCS instrutions are not used. */ |
597 | env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46; |
598 | |
599 | /* Remove all watchpoints to re-execute the code. A PER exception |
600 | will be triggered, it will call load_psw which will recompute |
601 | the watchpoints. */ |
602 | cpu_watchpoint_remove_all(cs, BP_CPU); |
603 | cpu_loop_exit_noexc(cs); |
604 | } |
605 | } |
606 | |
607 | /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment, |
608 | this is only for the atomic operations, for which we want to raise a |
609 | specification exception. */ |
610 | void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, |
611 | MMUAccessType access_type, |
612 | int mmu_idx, uintptr_t retaddr) |
613 | { |
614 | S390CPU *cpu = S390_CPU(cs); |
615 | CPUS390XState *env = &cpu->env; |
616 | |
617 | s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, retaddr); |
618 | } |
619 | |
620 | #endif /* CONFIG_USER_ONLY */ |
621 | |