1/*
2 * S/390 misc helper routines
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "qemu/osdep.h"
22#include "qemu/main-loop.h"
23#include "cpu.h"
24#include "internal.h"
25#include "exec/memory.h"
26#include "qemu/host-utils.h"
27#include "exec/helper-proto.h"
28#include "qemu/timer.h"
29#include "exec/exec-all.h"
30#include "exec/cpu_ldst.h"
31#include "qapi/error.h"
32#include "tcg_s390x.h"
33#include "s390-tod.h"
34
35#if !defined(CONFIG_USER_ONLY)
36#include "sysemu/cpus.h"
37#include "sysemu/sysemu.h"
38#include "hw/s390x/ebcdic.h"
39#include "hw/s390x/s390-virtio-hcall.h"
40#include "hw/s390x/sclp.h"
41#include "hw/s390x/s390_flic.h"
42#include "hw/s390x/ioinst.h"
43#include "hw/s390x/s390-pci-inst.h"
44#include "hw/boards.h"
45#include "hw/s390x/tod.h"
46#endif
47
48/* #define DEBUG_HELPER */
49#ifdef DEBUG_HELPER
50#define HELPER_LOG(x...) qemu_log(x)
51#else
52#define HELPER_LOG(x...)
53#endif
54
55/* Raise an exception statically from a TB. */
56void HELPER(exception)(CPUS390XState *env, uint32_t excp)
57{
58 CPUState *cs = env_cpu(env);
59
60 HELPER_LOG("%s: exception %d\n", __func__, excp);
61 cs->exception_index = excp;
62 cpu_loop_exit(cs);
63}
64
65/* Store CPU Timer (also used for EXTRACT CPU TIME) */
66uint64_t HELPER(stpt)(CPUS390XState *env)
67{
68#if defined(CONFIG_USER_ONLY)
69 /*
70 * Fake a descending CPU timer. We could get negative values here,
71 * but we don't care as it is up to the OS when to process that
72 * interrupt and reset to > 0.
73 */
74 return UINT64_MAX - (uint64_t)cpu_get_host_ticks();
75#else
76 return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
77#endif
78}
79
80/* Store Clock */
81uint64_t HELPER(stck)(CPUS390XState *env)
82{
83#ifdef CONFIG_USER_ONLY
84 struct timespec ts;
85 uint64_t ns;
86
87 clock_gettime(CLOCK_REALTIME, &ts);
88 ns = ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec;
89
90 return TOD_UNIX_EPOCH + time2tod(ns);
91#else
92 S390TODState *td = s390_get_todstate();
93 S390TODClass *tdc = S390_TOD_GET_CLASS(td);
94 S390TOD tod;
95
96 tdc->get(td, &tod, &error_abort);
97 return tod.low;
98#endif
99}
100
101#ifndef CONFIG_USER_ONLY
102/* SCLP service call */
103uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
104{
105 qemu_mutex_lock_iothread();
106 int r = sclp_service_call(env, r1, r2);
107 qemu_mutex_unlock_iothread();
108 if (r < 0) {
109 s390_program_interrupt(env, -r, 4, GETPC());
110 }
111 return r;
112}
113
114void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
115{
116 uint64_t r;
117
118 switch (num) {
119 case 0x500:
120 /* KVM hypercall */
121 qemu_mutex_lock_iothread();
122 r = s390_virtio_hypercall(env);
123 qemu_mutex_unlock_iothread();
124 break;
125 case 0x44:
126 /* yield */
127 r = 0;
128 break;
129 case 0x308:
130 /* ipl */
131 qemu_mutex_lock_iothread();
132 handle_diag_308(env, r1, r3, GETPC());
133 qemu_mutex_unlock_iothread();
134 r = 0;
135 break;
136 case 0x288:
137 /* time bomb (watchdog) */
138 r = handle_diag_288(env, r1, r3);
139 break;
140 default:
141 r = -1;
142 break;
143 }
144
145 if (r) {
146 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, GETPC());
147 }
148}
149
150/* Set Prefix */
151void HELPER(spx)(CPUS390XState *env, uint64_t a1)
152{
153 CPUState *cs = env_cpu(env);
154 uint32_t prefix = a1 & 0x7fffe000;
155
156 env->psa = prefix;
157 HELPER_LOG("prefix: %#x\n", prefix);
158 tlb_flush_page(cs, 0);
159 tlb_flush_page(cs, TARGET_PAGE_SIZE);
160}
161
162static void update_ckc_timer(CPUS390XState *env)
163{
164 S390TODState *td = s390_get_todstate();
165 uint64_t time;
166
167 /* stop the timer and remove pending CKC IRQs */
168 timer_del(env->tod_timer);
169 g_assert(qemu_mutex_iothread_locked());
170 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
171
172 /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */
173 if (env->ckc == -1ULL) {
174 return;
175 }
176
177 /* difference between origins */
178 time = env->ckc - td->base.low;
179
180 /* nanoseconds */
181 time = tod2time(time);
182
183 timer_mod(env->tod_timer, time);
184}
185
186/* Set Clock Comparator */
187void HELPER(sckc)(CPUS390XState *env, uint64_t ckc)
188{
189 env->ckc = ckc;
190
191 qemu_mutex_lock_iothread();
192 update_ckc_timer(env);
193 qemu_mutex_unlock_iothread();
194}
195
196void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque)
197{
198 S390CPU *cpu = S390_CPU(cs);
199
200 update_ckc_timer(&cpu->env);
201}
202
203/* Set Clock */
204uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low)
205{
206 S390TODState *td = s390_get_todstate();
207 S390TODClass *tdc = S390_TOD_GET_CLASS(td);
208 S390TOD tod = {
209 .high = 0,
210 .low = tod_low,
211 };
212
213 qemu_mutex_lock_iothread();
214 tdc->set(td, &tod, &error_abort);
215 qemu_mutex_unlock_iothread();
216 return 0;
217}
218
219/* Set Tod Programmable Field */
220void HELPER(sckpf)(CPUS390XState *env, uint64_t r0)
221{
222 uint32_t val = r0;
223
224 if (val & 0xffff0000) {
225 s390_program_interrupt(env, PGM_SPECIFICATION, 2, GETPC());
226 }
227 env->todpr = val;
228}
229
230/* Store Clock Comparator */
231uint64_t HELPER(stckc)(CPUS390XState *env)
232{
233 return env->ckc;
234}
235
236/* Set CPU Timer */
237void HELPER(spt)(CPUS390XState *env, uint64_t time)
238{
239 if (time == -1ULL) {
240 return;
241 }
242
243 /* nanoseconds */
244 time = tod2time(time);
245
246 env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
247
248 timer_mod(env->cpu_timer, env->cputm);
249}
250
251/* Store System Information */
252uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1)
253{
254 const uintptr_t ra = GETPC();
255 const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK;
256 const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK;
257 const MachineState *ms = MACHINE(qdev_get_machine());
258 uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0;
259 S390CPU *cpu = env_archcpu(env);
260 SysIB sysib = { };
261 int i, cc = 0;
262
263 if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) {
264 /* invalid function code: no other checks are performed */
265 return 3;
266 }
267
268 if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) {
269 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
270 }
271
272 if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) {
273 /* query the current level: no further checks are performed */
274 env->regs[0] = STSI_R0_FC_LEVEL_3;
275 return 0;
276 }
277
278 if (a0 & ~TARGET_PAGE_MASK) {
279 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
280 }
281
282 /* count the cpus and split them into configured and reserved ones */
283 for (i = 0; i < ms->possible_cpus->len; i++) {
284 total_cpus++;
285 if (ms->possible_cpus->cpus[i].cpu) {
286 conf_cpus++;
287 } else {
288 reserved_cpus++;
289 }
290 }
291
292 /*
293 * In theory, we could report Level 1 / Level 2 as current. However,
294 * the Linux kernel will detect this as running under LPAR and assume
295 * that we have a sclp linemode console (which is always present on
296 * LPAR, but not the default for QEMU), therefore not displaying boot
297 * messages and making booting a Linux kernel under TCG harder.
298 *
299 * For now we fake the same SMP configuration on all levels.
300 *
301 * TODO: We could later make the level configurable via the machine
302 * and change defaults (linemode console) based on machine type
303 * and accelerator.
304 */
305 switch (r0 & STSI_R0_FC_MASK) {
306 case STSI_R0_FC_LEVEL_1:
307 if ((sel1 == 1) && (sel2 == 1)) {
308 /* Basic Machine Configuration */
309 char type[5] = {};
310
311 ebcdic_put(sysib.sysib_111.manuf, "QEMU ", 16);
312 /* same as machine type number in STORE CPU ID, but in EBCDIC */
313 snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
314 ebcdic_put(sysib.sysib_111.type, type, 4);
315 /* model number (not stored in STORE CPU ID for z/Architecure) */
316 ebcdic_put(sysib.sysib_111.model, "QEMU ", 16);
317 ebcdic_put(sysib.sysib_111.sequence, "QEMU ", 16);
318 ebcdic_put(sysib.sysib_111.plant, "QEMU", 4);
319 } else if ((sel1 == 2) && (sel2 == 1)) {
320 /* Basic Machine CPU */
321 ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16);
322 ebcdic_put(sysib.sysib_121.plant, "QEMU", 4);
323 sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id);
324 } else if ((sel1 == 2) && (sel2 == 2)) {
325 /* Basic Machine CPUs */
326 sysib.sysib_122.capability = cpu_to_be32(0x443afc29);
327 sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus);
328 sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus);
329 sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus);
330 } else {
331 cc = 3;
332 }
333 break;
334 case STSI_R0_FC_LEVEL_2:
335 if ((sel1 == 2) && (sel2 == 1)) {
336 /* LPAR CPU */
337 ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16);
338 ebcdic_put(sysib.sysib_221.plant, "QEMU", 4);
339 sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id);
340 } else if ((sel1 == 2) && (sel2 == 2)) {
341 /* LPAR CPUs */
342 sysib.sysib_222.lcpuc = 0x80; /* dedicated */
343 sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus);
344 sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus);
345 sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus);
346 ebcdic_put(sysib.sysib_222.name, "QEMU ", 8);
347 sysib.sysib_222.caf = cpu_to_be32(1000);
348 sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus);
349 } else {
350 cc = 3;
351 }
352 break;
353 case STSI_R0_FC_LEVEL_3:
354 if ((sel1 == 2) && (sel2 == 2)) {
355 /* VM CPUs */
356 sysib.sysib_322.count = 1;
357 sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus);
358 sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus);
359 sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus);
360 sysib.sysib_322.vm[0].caf = cpu_to_be32(1000);
361 /* Linux kernel uses this to distinguish us from z/VM */
362 ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux ", 16);
363 sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */
364
365 /* If our VM has a name, use the real name */
366 if (qemu_name) {
367 memset(sysib.sysib_322.vm[0].name, 0x40,
368 sizeof(sysib.sysib_322.vm[0].name));
369 ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name,
370 MIN(sizeof(sysib.sysib_322.vm[0].name),
371 strlen(qemu_name)));
372 strncpy((char *)sysib.sysib_322.ext_names[0], qemu_name,
373 sizeof(sysib.sysib_322.ext_names[0]));
374 } else {
375 ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8);
376 strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest");
377 }
378
379 /* add the uuid */
380 memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid,
381 sizeof(sysib.sysib_322.vm[0].uuid));
382 } else {
383 cc = 3;
384 }
385 break;
386 }
387
388 if (cc == 0) {
389 if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) {
390 s390_cpu_virt_mem_handle_exc(cpu, ra);
391 }
392 }
393
394 return cc;
395}
396
397uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
398 uint32_t r3)
399{
400 int cc;
401
402 /* TODO: needed to inject interrupts - push further down */
403 qemu_mutex_lock_iothread();
404 cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
405 qemu_mutex_unlock_iothread();
406
407 return cc;
408}
409#endif
410
411#ifndef CONFIG_USER_ONLY
412void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
413{
414 S390CPU *cpu = env_archcpu(env);
415 qemu_mutex_lock_iothread();
416 ioinst_handle_xsch(cpu, r1, GETPC());
417 qemu_mutex_unlock_iothread();
418}
419
420void HELPER(csch)(CPUS390XState *env, uint64_t r1)
421{
422 S390CPU *cpu = env_archcpu(env);
423 qemu_mutex_lock_iothread();
424 ioinst_handle_csch(cpu, r1, GETPC());
425 qemu_mutex_unlock_iothread();
426}
427
428void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
429{
430 S390CPU *cpu = env_archcpu(env);
431 qemu_mutex_lock_iothread();
432 ioinst_handle_hsch(cpu, r1, GETPC());
433 qemu_mutex_unlock_iothread();
434}
435
436void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
437{
438 S390CPU *cpu = env_archcpu(env);
439 qemu_mutex_lock_iothread();
440 ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
441 qemu_mutex_unlock_iothread();
442}
443
444void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
445{
446 S390CPU *cpu = env_archcpu(env);
447 qemu_mutex_lock_iothread();
448 ioinst_handle_rchp(cpu, r1, GETPC());
449 qemu_mutex_unlock_iothread();
450}
451
452void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
453{
454 S390CPU *cpu = env_archcpu(env);
455 qemu_mutex_lock_iothread();
456 ioinst_handle_rsch(cpu, r1, GETPC());
457 qemu_mutex_unlock_iothread();
458}
459
460void HELPER(sal)(CPUS390XState *env, uint64_t r1)
461{
462 S390CPU *cpu = env_archcpu(env);
463
464 qemu_mutex_lock_iothread();
465 ioinst_handle_sal(cpu, r1, GETPC());
466 qemu_mutex_unlock_iothread();
467}
468
469void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
470{
471 S390CPU *cpu = env_archcpu(env);
472
473 qemu_mutex_lock_iothread();
474 ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
475 qemu_mutex_unlock_iothread();
476}
477
478void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
479{
480 S390CPU *cpu = env_archcpu(env);
481 qemu_mutex_lock_iothread();
482 ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
483 qemu_mutex_unlock_iothread();
484}
485
486void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
487{
488 S390CPU *cpu = env_archcpu(env);
489
490 qemu_mutex_lock_iothread();
491 ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
492 qemu_mutex_unlock_iothread();
493}
494
495void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
496{
497 S390CPU *cpu = env_archcpu(env);
498 qemu_mutex_lock_iothread();
499 ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
500 qemu_mutex_unlock_iothread();
501}
502
503uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
504{
505 const uintptr_t ra = GETPC();
506 S390CPU *cpu = env_archcpu(env);
507 QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
508 QEMUS390FlicIO *io = NULL;
509 LowCore *lowcore;
510
511 if (addr & 0x3) {
512 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
513 }
514
515 qemu_mutex_lock_iothread();
516 io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
517 if (!io) {
518 qemu_mutex_unlock_iothread();
519 return 0;
520 }
521
522 if (addr) {
523 struct {
524 uint16_t id;
525 uint16_t nr;
526 uint32_t parm;
527 } intc = {
528 .id = cpu_to_be16(io->id),
529 .nr = cpu_to_be16(io->nr),
530 .parm = cpu_to_be32(io->parm),
531 };
532
533 if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) {
534 /* writing failed, reinject and properly clean up */
535 s390_io_interrupt(io->id, io->nr, io->parm, io->word);
536 qemu_mutex_unlock_iothread();
537 g_free(io);
538 s390_cpu_virt_mem_handle_exc(cpu, ra);
539 return 0;
540 }
541 } else {
542 /* no protection applies */
543 lowcore = cpu_map_lowcore(env);
544 lowcore->subchannel_id = cpu_to_be16(io->id);
545 lowcore->subchannel_nr = cpu_to_be16(io->nr);
546 lowcore->io_int_parm = cpu_to_be32(io->parm);
547 lowcore->io_int_word = cpu_to_be32(io->word);
548 cpu_unmap_lowcore(lowcore);
549 }
550
551 g_free(io);
552 qemu_mutex_unlock_iothread();
553 return 1;
554}
555
556void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
557{
558 S390CPU *cpu = env_archcpu(env);
559 qemu_mutex_lock_iothread();
560 ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
561 qemu_mutex_unlock_iothread();
562}
563
564void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
565{
566 S390CPU *cpu = env_archcpu(env);
567 qemu_mutex_lock_iothread();
568 ioinst_handle_chsc(cpu, inst >> 16, GETPC());
569 qemu_mutex_unlock_iothread();
570}
571#endif
572
573#ifndef CONFIG_USER_ONLY
574void HELPER(per_check_exception)(CPUS390XState *env)
575{
576 uint32_t ilen;
577
578 if (env->per_perc_atmid) {
579 /*
580 * FIXME: ILEN_AUTO is most probably the right thing to use. ilen
581 * always has to match the instruction referenced in the PSW. E.g.
582 * if a PER interrupt is triggered via EXECUTE, we have to use ilen
583 * of EXECUTE, while per_address contains the target of EXECUTE.
584 */
585 ilen = get_ilen(cpu_ldub_code(env, env->per_address));
586 s390_program_interrupt(env, PGM_PER, ilen, GETPC());
587 }
588}
589
590/* Check if an address is within the PER starting address and the PER
591 ending address. The address range might loop. */
592static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
593{
594 if (env->cregs[10] <= env->cregs[11]) {
595 return env->cregs[10] <= addr && addr <= env->cregs[11];
596 } else {
597 return env->cregs[10] <= addr || addr <= env->cregs[11];
598 }
599}
600
601void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
602{
603 if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
604 if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
605 || get_per_in_range(env, to)) {
606 env->per_address = from;
607 env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
608 }
609 }
610}
611
612void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
613{
614 if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
615 env->per_address = addr;
616 env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
617
618 /* If the instruction has to be nullified, trigger the
619 exception immediately. */
620 if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
621 CPUState *cs = env_cpu(env);
622
623 env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION;
624 env->int_pgm_code = PGM_PER;
625 env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));
626
627 cs->exception_index = EXCP_PGM;
628 cpu_loop_exit(cs);
629 }
630 }
631}
632#endif
633
634static uint8_t stfl_bytes[2048];
635static unsigned int used_stfl_bytes;
636
637static void prepare_stfl(void)
638{
639 static bool initialized;
640 int i;
641
642 /* racy, but we don't care, the same values are always written */
643 if (initialized) {
644 return;
645 }
646
647 s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
648 for (i = 0; i < sizeof(stfl_bytes); i++) {
649 if (stfl_bytes[i]) {
650 used_stfl_bytes = i + 1;
651 }
652 }
653 initialized = true;
654}
655
656#ifndef CONFIG_USER_ONLY
657void HELPER(stfl)(CPUS390XState *env)
658{
659 LowCore *lowcore;
660
661 lowcore = cpu_map_lowcore(env);
662 prepare_stfl();
663 memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
664 cpu_unmap_lowcore(lowcore);
665}
666#endif
667
668uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
669{
670 const uintptr_t ra = GETPC();
671 const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8;
672 int max_bytes;
673 int i;
674
675 if (addr & 0x7) {
676 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
677 }
678
679 prepare_stfl();
680 max_bytes = ROUND_UP(used_stfl_bytes, 8);
681
682 /*
683 * The PoP says that doublewords beyond the highest-numbered facility
684 * bit may or may not be stored. However, existing hardware appears to
685 * not store the words, and existing software depend on that.
686 */
687 for (i = 0; i < MIN(count_bytes, max_bytes); ++i) {
688 cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra);
689 }
690
691 env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
692 return count_bytes >= max_bytes ? 0 : 3;
693}
694
695#ifndef CONFIG_USER_ONLY
696/*
697 * Note: we ignore any return code of the functions called for the pci
698 * instructions, as the only time they return !0 is when the stub is
699 * called, and in that case we didn't even offer the zpci facility.
700 * The only exception is SIC, where program checks need to be handled
701 * by the caller.
702 */
703void HELPER(clp)(CPUS390XState *env, uint32_t r2)
704{
705 S390CPU *cpu = env_archcpu(env);
706
707 qemu_mutex_lock_iothread();
708 clp_service_call(cpu, r2, GETPC());
709 qemu_mutex_unlock_iothread();
710}
711
712void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
713{
714 S390CPU *cpu = env_archcpu(env);
715
716 qemu_mutex_lock_iothread();
717 pcilg_service_call(cpu, r1, r2, GETPC());
718 qemu_mutex_unlock_iothread();
719}
720
721void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
722{
723 S390CPU *cpu = env_archcpu(env);
724
725 qemu_mutex_lock_iothread();
726 pcistg_service_call(cpu, r1, r2, GETPC());
727 qemu_mutex_unlock_iothread();
728}
729
730void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
731 uint32_t ar)
732{
733 S390CPU *cpu = env_archcpu(env);
734
735 qemu_mutex_lock_iothread();
736 stpcifc_service_call(cpu, r1, fiba, ar, GETPC());
737 qemu_mutex_unlock_iothread();
738}
739
740void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
741{
742 int r;
743
744 qemu_mutex_lock_iothread();
745 r = css_do_sic(env, (r3 >> 27) & 0x7, r1 & 0xffff);
746 qemu_mutex_unlock_iothread();
747 /* css_do_sic() may actually return a PGM_xxx value to inject */
748 if (r) {
749 s390_program_interrupt(env, -r, 4, GETPC());
750 }
751}
752
753void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2)
754{
755 S390CPU *cpu = env_archcpu(env);
756
757 qemu_mutex_lock_iothread();
758 rpcit_service_call(cpu, r1, r2, GETPC());
759 qemu_mutex_unlock_iothread();
760}
761
762void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
763 uint64_t gaddr, uint32_t ar)
764{
765 S390CPU *cpu = env_archcpu(env);
766
767 qemu_mutex_lock_iothread();
768 pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC());
769 qemu_mutex_unlock_iothread();
770}
771
772void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
773 uint32_t ar)
774{
775 S390CPU *cpu = env_archcpu(env);
776
777 qemu_mutex_lock_iothread();
778 mpcifc_service_call(cpu, r1, fiba, ar, GETPC());
779 qemu_mutex_unlock_iothread();
780}
781#endif
782