1 | #include "qemu/osdep.h" |
2 | #include "cpu.h" |
3 | #include "exec/exec-all.h" |
4 | #include "sysemu/kvm.h" |
5 | #include "helper_regs.h" |
6 | #include "mmu-hash64.h" |
7 | #include "migration/cpu.h" |
8 | #include "qapi/error.h" |
9 | #include "qemu/main-loop.h" |
10 | #include "kvm_ppc.h" |
11 | #include "exec/helper-proto.h" |
12 | |
13 | static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) |
14 | { |
15 | PowerPCCPU *cpu = opaque; |
16 | CPUPPCState *env = &cpu->env; |
17 | unsigned int i, j; |
18 | target_ulong sdr1; |
19 | uint32_t fpscr, vscr; |
20 | #if defined(TARGET_PPC64) |
21 | int32_t slb_nr; |
22 | #endif |
23 | target_ulong xer; |
24 | |
25 | for (i = 0; i < 32; i++) { |
26 | qemu_get_betls(f, &env->gpr[i]); |
27 | } |
28 | #if !defined(TARGET_PPC64) |
29 | for (i = 0; i < 32; i++) { |
30 | qemu_get_betls(f, &env->gprh[i]); |
31 | } |
32 | #endif |
33 | qemu_get_betls(f, &env->lr); |
34 | qemu_get_betls(f, &env->ctr); |
35 | for (i = 0; i < 8; i++) { |
36 | qemu_get_be32s(f, &env->crf[i]); |
37 | } |
38 | qemu_get_betls(f, &xer); |
39 | cpu_write_xer(env, xer); |
40 | qemu_get_betls(f, &env->reserve_addr); |
41 | qemu_get_betls(f, &env->msr); |
42 | for (i = 0; i < 4; i++) { |
43 | qemu_get_betls(f, &env->tgpr[i]); |
44 | } |
45 | for (i = 0; i < 32; i++) { |
46 | union { |
47 | float64 d; |
48 | uint64_t l; |
49 | } u; |
50 | u.l = qemu_get_be64(f); |
51 | *cpu_fpr_ptr(env, i) = u.d; |
52 | } |
53 | qemu_get_be32s(f, &fpscr); |
54 | env->fpscr = fpscr; |
55 | qemu_get_sbe32s(f, &env->access_type); |
56 | #if defined(TARGET_PPC64) |
57 | qemu_get_betls(f, &env->spr[SPR_ASR]); |
58 | qemu_get_sbe32s(f, &slb_nr); |
59 | #endif |
60 | qemu_get_betls(f, &sdr1); |
61 | for (i = 0; i < 32; i++) { |
62 | qemu_get_betls(f, &env->sr[i]); |
63 | } |
64 | for (i = 0; i < 2; i++) { |
65 | for (j = 0; j < 8; j++) { |
66 | qemu_get_betls(f, &env->DBAT[i][j]); |
67 | } |
68 | } |
69 | for (i = 0; i < 2; i++) { |
70 | for (j = 0; j < 8; j++) { |
71 | qemu_get_betls(f, &env->IBAT[i][j]); |
72 | } |
73 | } |
74 | qemu_get_sbe32s(f, &env->nb_tlb); |
75 | qemu_get_sbe32s(f, &env->tlb_per_way); |
76 | qemu_get_sbe32s(f, &env->nb_ways); |
77 | qemu_get_sbe32s(f, &env->last_way); |
78 | qemu_get_sbe32s(f, &env->id_tlbs); |
79 | qemu_get_sbe32s(f, &env->nb_pids); |
80 | if (env->tlb.tlb6) { |
81 | /* XXX assumes 6xx */ |
82 | for (i = 0; i < env->nb_tlb; i++) { |
83 | qemu_get_betls(f, &env->tlb.tlb6[i].pte0); |
84 | qemu_get_betls(f, &env->tlb.tlb6[i].pte1); |
85 | qemu_get_betls(f, &env->tlb.tlb6[i].EPN); |
86 | } |
87 | } |
88 | for (i = 0; i < 4; i++) { |
89 | qemu_get_betls(f, &env->pb[i]); |
90 | } |
91 | for (i = 0; i < 1024; i++) { |
92 | qemu_get_betls(f, &env->spr[i]); |
93 | } |
94 | if (!cpu->vhyp) { |
95 | ppc_store_sdr1(env, sdr1); |
96 | } |
97 | qemu_get_be32s(f, &vscr); |
98 | helper_mtvscr(env, vscr); |
99 | qemu_get_be64s(f, &env->spe_acc); |
100 | qemu_get_be32s(f, &env->spe_fscr); |
101 | qemu_get_betls(f, &env->msr_mask); |
102 | qemu_get_be32s(f, &env->flags); |
103 | qemu_get_sbe32s(f, &env->error_code); |
104 | qemu_get_be32s(f, &env->pending_interrupts); |
105 | qemu_get_be32s(f, &env->irq_input_state); |
106 | for (i = 0; i < POWERPC_EXCP_NB; i++) { |
107 | qemu_get_betls(f, &env->excp_vectors[i]); |
108 | } |
109 | qemu_get_betls(f, &env->excp_prefix); |
110 | qemu_get_betls(f, &env->ivor_mask); |
111 | qemu_get_betls(f, &env->ivpr_mask); |
112 | qemu_get_betls(f, &env->hreset_vector); |
113 | qemu_get_betls(f, &env->nip); |
114 | qemu_get_betls(f, &env->hflags); |
115 | qemu_get_betls(f, &env->hflags_nmsr); |
116 | qemu_get_sbe32(f); /* Discard unused mmu_idx */ |
117 | qemu_get_sbe32(f); /* Discard unused power_mode */ |
118 | |
119 | /* Recompute mmu indices */ |
120 | hreg_compute_mem_idx(env); |
121 | |
122 | return 0; |
123 | } |
124 | |
125 | static int get_avr(QEMUFile *f, void *pv, size_t size, |
126 | const VMStateField *field) |
127 | { |
128 | ppc_avr_t *v = pv; |
129 | |
130 | v->u64[0] = qemu_get_be64(f); |
131 | v->u64[1] = qemu_get_be64(f); |
132 | |
133 | return 0; |
134 | } |
135 | |
136 | static int put_avr(QEMUFile *f, void *pv, size_t size, |
137 | const VMStateField *field, QJSON *vmdesc) |
138 | { |
139 | ppc_avr_t *v = pv; |
140 | |
141 | qemu_put_be64(f, v->u64[0]); |
142 | qemu_put_be64(f, v->u64[1]); |
143 | return 0; |
144 | } |
145 | |
146 | static const VMStateInfo vmstate_info_avr = { |
147 | .name = "avr" , |
148 | .get = get_avr, |
149 | .put = put_avr, |
150 | }; |
151 | |
152 | #define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \ |
153 | VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t) |
154 | |
155 | #define VMSTATE_AVR_ARRAY(_f, _s, _n) \ |
156 | VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0) |
157 | |
158 | static int get_fpr(QEMUFile *f, void *pv, size_t size, |
159 | const VMStateField *field) |
160 | { |
161 | ppc_vsr_t *v = pv; |
162 | |
163 | v->VsrD(0) = qemu_get_be64(f); |
164 | |
165 | return 0; |
166 | } |
167 | |
168 | static int put_fpr(QEMUFile *f, void *pv, size_t size, |
169 | const VMStateField *field, QJSON *vmdesc) |
170 | { |
171 | ppc_vsr_t *v = pv; |
172 | |
173 | qemu_put_be64(f, v->VsrD(0)); |
174 | return 0; |
175 | } |
176 | |
177 | static const VMStateInfo vmstate_info_fpr = { |
178 | .name = "fpr" , |
179 | .get = get_fpr, |
180 | .put = put_fpr, |
181 | }; |
182 | |
183 | #define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \ |
184 | VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t) |
185 | |
186 | #define VMSTATE_FPR_ARRAY(_f, _s, _n) \ |
187 | VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0) |
188 | |
189 | static int get_vsr(QEMUFile *f, void *pv, size_t size, |
190 | const VMStateField *field) |
191 | { |
192 | ppc_vsr_t *v = pv; |
193 | |
194 | v->VsrD(1) = qemu_get_be64(f); |
195 | |
196 | return 0; |
197 | } |
198 | |
199 | static int put_vsr(QEMUFile *f, void *pv, size_t size, |
200 | const VMStateField *field, QJSON *vmdesc) |
201 | { |
202 | ppc_vsr_t *v = pv; |
203 | |
204 | qemu_put_be64(f, v->VsrD(1)); |
205 | return 0; |
206 | } |
207 | |
208 | static const VMStateInfo vmstate_info_vsr = { |
209 | .name = "vsr" , |
210 | .get = get_vsr, |
211 | .put = put_vsr, |
212 | }; |
213 | |
214 | #define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v) \ |
215 | VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t) |
216 | |
217 | #define VMSTATE_VSR_ARRAY(_f, _s, _n) \ |
218 | VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0) |
219 | |
220 | static bool cpu_pre_2_8_migration(void *opaque, int version_id) |
221 | { |
222 | PowerPCCPU *cpu = opaque; |
223 | |
224 | return cpu->pre_2_8_migration; |
225 | } |
226 | |
227 | #if defined(TARGET_PPC64) |
228 | static bool cpu_pre_3_0_migration(void *opaque, int version_id) |
229 | { |
230 | PowerPCCPU *cpu = opaque; |
231 | |
232 | return cpu->pre_3_0_migration; |
233 | } |
234 | #endif |
235 | |
236 | static int cpu_pre_save(void *opaque) |
237 | { |
238 | PowerPCCPU *cpu = opaque; |
239 | CPUPPCState *env = &cpu->env; |
240 | int i; |
241 | uint64_t insns_compat_mask = |
242 | PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
243 | | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
244 | | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES |
245 | | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT |
246 | | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
247 | | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
248 | | PPC_64B | PPC_64BX | PPC_ALTIVEC |
249 | | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD; |
250 | uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX |
251 | | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
252 | | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
253 | | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
254 | | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
255 | | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM; |
256 | |
257 | env->spr[SPR_LR] = env->lr; |
258 | env->spr[SPR_CTR] = env->ctr; |
259 | env->spr[SPR_XER] = cpu_read_xer(env); |
260 | #if defined(TARGET_PPC64) |
261 | env->spr[SPR_CFAR] = env->cfar; |
262 | #endif |
263 | env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr; |
264 | |
265 | for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { |
266 | env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i]; |
267 | env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i]; |
268 | env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i]; |
269 | env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i]; |
270 | } |
271 | for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { |
272 | env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4]; |
273 | env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4]; |
274 | env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4]; |
275 | env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4]; |
276 | } |
277 | |
278 | /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */ |
279 | if (cpu->pre_2_8_migration) { |
280 | /* |
281 | * Mask out bits that got added to msr_mask since the versions |
282 | * which stupidly included it in the migration stream. |
283 | */ |
284 | target_ulong metamask = 0 |
285 | #if defined(TARGET_PPC64) |
286 | | (1ULL << MSR_TS0) |
287 | | (1ULL << MSR_TS1) |
288 | #endif |
289 | ; |
290 | cpu->mig_msr_mask = env->msr_mask & ~metamask; |
291 | cpu->mig_insns_flags = env->insns_flags & insns_compat_mask; |
292 | /* |
293 | * CPU models supported by old machines all have |
294 | * PPC_MEM_TLBIE, so we set it unconditionally to allow |
295 | * backward migration from a POWER9 host to a POWER8 host. |
296 | */ |
297 | cpu->mig_insns_flags |= PPC_MEM_TLBIE; |
298 | cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2; |
299 | cpu->mig_nb_BATs = env->nb_BATs; |
300 | } |
301 | if (cpu->pre_3_0_migration) { |
302 | if (cpu->hash64_opts) { |
303 | cpu->mig_slb_nr = cpu->hash64_opts->slb_size; |
304 | } |
305 | } |
306 | |
307 | return 0; |
308 | } |
309 | |
310 | /* |
311 | * Determine if a given PVR is a "close enough" match to the CPU |
312 | * object. For TCG and KVM PR it would probably be sufficient to |
313 | * require an exact PVR match. However for KVM HV the user is |
314 | * restricted to a PVR exactly matching the host CPU. The correct way |
315 | * to handle this is to put the guest into an architected |
316 | * compatibility mode. However, to allow a more forgiving transition |
317 | * and migration from before this was widely done, we allow migration |
318 | * between sufficiently similar PVRs, as determined by the CPU class's |
319 | * pvr_match() hook. |
320 | */ |
321 | static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr) |
322 | { |
323 | PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); |
324 | |
325 | if (pvr == pcc->pvr) { |
326 | return true; |
327 | } |
328 | return pcc->pvr_match(pcc, pvr); |
329 | } |
330 | |
331 | static int cpu_post_load(void *opaque, int version_id) |
332 | { |
333 | PowerPCCPU *cpu = opaque; |
334 | CPUPPCState *env = &cpu->env; |
335 | int i; |
336 | target_ulong msr; |
337 | |
338 | /* |
339 | * If we're operating in compat mode, we should be ok as long as |
340 | * the destination supports the same compatiblity mode. |
341 | * |
342 | * Otherwise, however, we require that the destination has exactly |
343 | * the same CPU model as the source. |
344 | */ |
345 | |
346 | #if defined(TARGET_PPC64) |
347 | if (cpu->compat_pvr) { |
348 | uint32_t compat_pvr = cpu->compat_pvr; |
349 | Error *local_err = NULL; |
350 | |
351 | cpu->compat_pvr = 0; |
352 | ppc_set_compat(cpu, compat_pvr, &local_err); |
353 | if (local_err) { |
354 | error_report_err(local_err); |
355 | return -1; |
356 | } |
357 | } else |
358 | #endif |
359 | { |
360 | if (!pvr_match(cpu, env->spr[SPR_PVR])) { |
361 | return -1; |
362 | } |
363 | } |
364 | |
365 | /* |
366 | * If we're running with KVM HV, there is a chance that the guest |
367 | * is running with KVM HV and its kernel does not have the |
368 | * capability of dealing with a different PVR other than this |
369 | * exact host PVR in KVM_SET_SREGS. If that happens, the |
370 | * guest freezes after migration. |
371 | * |
372 | * The function kvmppc_pvr_workaround_required does this verification |
373 | * by first checking if the kernel has the cap, returning true immediately |
374 | * if that is the case. Otherwise, it checks if we're running in KVM PR. |
375 | * If the guest kernel does not have the cap and we're not running KVM-PR |
376 | * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will |
377 | * receive the PVR it expects as a workaround. |
378 | * |
379 | */ |
380 | if (kvmppc_pvr_workaround_required(cpu)) { |
381 | env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value; |
382 | } |
383 | |
384 | env->lr = env->spr[SPR_LR]; |
385 | env->ctr = env->spr[SPR_CTR]; |
386 | cpu_write_xer(env, env->spr[SPR_XER]); |
387 | #if defined(TARGET_PPC64) |
388 | env->cfar = env->spr[SPR_CFAR]; |
389 | #endif |
390 | env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR]; |
391 | |
392 | for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { |
393 | env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i]; |
394 | env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1]; |
395 | env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i]; |
396 | env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1]; |
397 | } |
398 | for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { |
399 | env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i]; |
400 | env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1]; |
401 | env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i]; |
402 | env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1]; |
403 | } |
404 | |
405 | if (!cpu->vhyp) { |
406 | ppc_store_sdr1(env, env->spr[SPR_SDR1]); |
407 | } |
408 | |
409 | /* |
410 | * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB |
411 | * before restoring |
412 | */ |
413 | msr = env->msr; |
414 | env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB); |
415 | ppc_store_msr(env, msr); |
416 | |
417 | hreg_compute_mem_idx(env); |
418 | |
419 | return 0; |
420 | } |
421 | |
422 | static bool fpu_needed(void *opaque) |
423 | { |
424 | PowerPCCPU *cpu = opaque; |
425 | |
426 | return cpu->env.insns_flags & PPC_FLOAT; |
427 | } |
428 | |
429 | static const VMStateDescription vmstate_fpu = { |
430 | .name = "cpu/fpu" , |
431 | .version_id = 1, |
432 | .minimum_version_id = 1, |
433 | .needed = fpu_needed, |
434 | .fields = (VMStateField[]) { |
435 | VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32), |
436 | VMSTATE_UINTTL(env.fpscr, PowerPCCPU), |
437 | VMSTATE_END_OF_LIST() |
438 | }, |
439 | }; |
440 | |
441 | static bool altivec_needed(void *opaque) |
442 | { |
443 | PowerPCCPU *cpu = opaque; |
444 | |
445 | return cpu->env.insns_flags & PPC_ALTIVEC; |
446 | } |
447 | |
448 | static int get_vscr(QEMUFile *f, void *opaque, size_t size, |
449 | const VMStateField *field) |
450 | { |
451 | PowerPCCPU *cpu = opaque; |
452 | helper_mtvscr(&cpu->env, qemu_get_be32(f)); |
453 | return 0; |
454 | } |
455 | |
456 | static int put_vscr(QEMUFile *f, void *opaque, size_t size, |
457 | const VMStateField *field, QJSON *vmdesc) |
458 | { |
459 | PowerPCCPU *cpu = opaque; |
460 | qemu_put_be32(f, helper_mfvscr(&cpu->env)); |
461 | return 0; |
462 | } |
463 | |
464 | static const VMStateInfo vmstate_vscr = { |
465 | .name = "cpu/altivec/vscr" , |
466 | .get = get_vscr, |
467 | .put = put_vscr, |
468 | }; |
469 | |
470 | static const VMStateDescription vmstate_altivec = { |
471 | .name = "cpu/altivec" , |
472 | .version_id = 1, |
473 | .minimum_version_id = 1, |
474 | .needed = altivec_needed, |
475 | .fields = (VMStateField[]) { |
476 | VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32), |
477 | /* |
478 | * Save the architecture value of the vscr, not the internally |
479 | * expanded version. Since this architecture value does not |
480 | * exist in memory to be stored, this requires a but of hoop |
481 | * jumping. We want OFFSET=0 so that we effectively pass CPU |
482 | * to the helper functions. |
483 | */ |
484 | { |
485 | .name = "vscr" , |
486 | .version_id = 0, |
487 | .size = sizeof(uint32_t), |
488 | .info = &vmstate_vscr, |
489 | .flags = VMS_SINGLE, |
490 | .offset = 0 |
491 | }, |
492 | VMSTATE_END_OF_LIST() |
493 | }, |
494 | }; |
495 | |
496 | static bool vsx_needed(void *opaque) |
497 | { |
498 | PowerPCCPU *cpu = opaque; |
499 | |
500 | return cpu->env.insns_flags2 & PPC2_VSX; |
501 | } |
502 | |
503 | static const VMStateDescription vmstate_vsx = { |
504 | .name = "cpu/vsx" , |
505 | .version_id = 1, |
506 | .minimum_version_id = 1, |
507 | .needed = vsx_needed, |
508 | .fields = (VMStateField[]) { |
509 | VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32), |
510 | VMSTATE_END_OF_LIST() |
511 | }, |
512 | }; |
513 | |
514 | #ifdef TARGET_PPC64 |
515 | /* Transactional memory state */ |
516 | static bool tm_needed(void *opaque) |
517 | { |
518 | PowerPCCPU *cpu = opaque; |
519 | CPUPPCState *env = &cpu->env; |
520 | return msr_ts; |
521 | } |
522 | |
523 | static const VMStateDescription vmstate_tm = { |
524 | .name = "cpu/tm" , |
525 | .version_id = 1, |
526 | .minimum_version_id = 1, |
527 | .minimum_version_id_old = 1, |
528 | .needed = tm_needed, |
529 | .fields = (VMStateField []) { |
530 | VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), |
531 | VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64), |
532 | VMSTATE_UINT64(env.tm_cr, PowerPCCPU), |
533 | VMSTATE_UINT64(env.tm_lr, PowerPCCPU), |
534 | VMSTATE_UINT64(env.tm_ctr, PowerPCCPU), |
535 | VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU), |
536 | VMSTATE_UINT64(env.tm_amr, PowerPCCPU), |
537 | VMSTATE_UINT64(env.tm_ppr, PowerPCCPU), |
538 | VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU), |
539 | VMSTATE_UINT32(env.tm_vscr, PowerPCCPU), |
540 | VMSTATE_UINT64(env.tm_dscr, PowerPCCPU), |
541 | VMSTATE_UINT64(env.tm_tar, PowerPCCPU), |
542 | VMSTATE_END_OF_LIST() |
543 | }, |
544 | }; |
545 | #endif |
546 | |
547 | static bool sr_needed(void *opaque) |
548 | { |
549 | #ifdef TARGET_PPC64 |
550 | PowerPCCPU *cpu = opaque; |
551 | |
552 | return !(cpu->env.mmu_model & POWERPC_MMU_64); |
553 | #else |
554 | return true; |
555 | #endif |
556 | } |
557 | |
558 | static const VMStateDescription vmstate_sr = { |
559 | .name = "cpu/sr" , |
560 | .version_id = 1, |
561 | .minimum_version_id = 1, |
562 | .needed = sr_needed, |
563 | .fields = (VMStateField[]) { |
564 | VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32), |
565 | VMSTATE_END_OF_LIST() |
566 | }, |
567 | }; |
568 | |
569 | #ifdef TARGET_PPC64 |
570 | static int get_slbe(QEMUFile *f, void *pv, size_t size, |
571 | const VMStateField *field) |
572 | { |
573 | ppc_slb_t *v = pv; |
574 | |
575 | v->esid = qemu_get_be64(f); |
576 | v->vsid = qemu_get_be64(f); |
577 | |
578 | return 0; |
579 | } |
580 | |
581 | static int put_slbe(QEMUFile *f, void *pv, size_t size, |
582 | const VMStateField *field, QJSON *vmdesc) |
583 | { |
584 | ppc_slb_t *v = pv; |
585 | |
586 | qemu_put_be64(f, v->esid); |
587 | qemu_put_be64(f, v->vsid); |
588 | return 0; |
589 | } |
590 | |
591 | static const VMStateInfo vmstate_info_slbe = { |
592 | .name = "slbe" , |
593 | .get = get_slbe, |
594 | .put = put_slbe, |
595 | }; |
596 | |
597 | #define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \ |
598 | VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t) |
599 | |
600 | #define VMSTATE_SLB_ARRAY(_f, _s, _n) \ |
601 | VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0) |
602 | |
603 | static bool slb_needed(void *opaque) |
604 | { |
605 | PowerPCCPU *cpu = opaque; |
606 | |
607 | /* We don't support any of the old segment table based 64-bit CPUs */ |
608 | return cpu->env.mmu_model & POWERPC_MMU_64; |
609 | } |
610 | |
611 | static int slb_post_load(void *opaque, int version_id) |
612 | { |
613 | PowerPCCPU *cpu = opaque; |
614 | CPUPPCState *env = &cpu->env; |
615 | int i; |
616 | |
617 | /* |
618 | * We've pulled in the raw esid and vsid values from the migration |
619 | * stream, but we need to recompute the page size pointers |
620 | */ |
621 | for (i = 0; i < cpu->hash64_opts->slb_size; i++) { |
622 | if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) { |
623 | /* Migration source had bad values in its SLB */ |
624 | return -1; |
625 | } |
626 | } |
627 | |
628 | return 0; |
629 | } |
630 | |
631 | static const VMStateDescription vmstate_slb = { |
632 | .name = "cpu/slb" , |
633 | .version_id = 1, |
634 | .minimum_version_id = 1, |
635 | .needed = slb_needed, |
636 | .post_load = slb_post_load, |
637 | .fields = (VMStateField[]) { |
638 | VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration), |
639 | VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES), |
640 | VMSTATE_END_OF_LIST() |
641 | } |
642 | }; |
643 | #endif /* TARGET_PPC64 */ |
644 | |
645 | static const VMStateDescription vmstate_tlb6xx_entry = { |
646 | .name = "cpu/tlb6xx_entry" , |
647 | .version_id = 1, |
648 | .minimum_version_id = 1, |
649 | .fields = (VMStateField[]) { |
650 | VMSTATE_UINTTL(pte0, ppc6xx_tlb_t), |
651 | VMSTATE_UINTTL(pte1, ppc6xx_tlb_t), |
652 | VMSTATE_UINTTL(EPN, ppc6xx_tlb_t), |
653 | VMSTATE_END_OF_LIST() |
654 | }, |
655 | }; |
656 | |
657 | static bool tlb6xx_needed(void *opaque) |
658 | { |
659 | PowerPCCPU *cpu = opaque; |
660 | CPUPPCState *env = &cpu->env; |
661 | |
662 | return env->nb_tlb && (env->tlb_type == TLB_6XX); |
663 | } |
664 | |
665 | static const VMStateDescription vmstate_tlb6xx = { |
666 | .name = "cpu/tlb6xx" , |
667 | .version_id = 1, |
668 | .minimum_version_id = 1, |
669 | .needed = tlb6xx_needed, |
670 | .fields = (VMStateField[]) { |
671 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), |
672 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU, |
673 | env.nb_tlb, |
674 | vmstate_tlb6xx_entry, |
675 | ppc6xx_tlb_t), |
676 | VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4), |
677 | VMSTATE_END_OF_LIST() |
678 | } |
679 | }; |
680 | |
681 | static const VMStateDescription vmstate_tlbemb_entry = { |
682 | .name = "cpu/tlbemb_entry" , |
683 | .version_id = 1, |
684 | .minimum_version_id = 1, |
685 | .fields = (VMStateField[]) { |
686 | VMSTATE_UINT64(RPN, ppcemb_tlb_t), |
687 | VMSTATE_UINTTL(EPN, ppcemb_tlb_t), |
688 | VMSTATE_UINTTL(PID, ppcemb_tlb_t), |
689 | VMSTATE_UINTTL(size, ppcemb_tlb_t), |
690 | VMSTATE_UINT32(prot, ppcemb_tlb_t), |
691 | VMSTATE_UINT32(attr, ppcemb_tlb_t), |
692 | VMSTATE_END_OF_LIST() |
693 | }, |
694 | }; |
695 | |
696 | static bool tlbemb_needed(void *opaque) |
697 | { |
698 | PowerPCCPU *cpu = opaque; |
699 | CPUPPCState *env = &cpu->env; |
700 | |
701 | return env->nb_tlb && (env->tlb_type == TLB_EMB); |
702 | } |
703 | |
704 | static bool pbr403_needed(void *opaque) |
705 | { |
706 | PowerPCCPU *cpu = opaque; |
707 | uint32_t pvr = cpu->env.spr[SPR_PVR]; |
708 | |
709 | return (pvr & 0xffff0000) == 0x00200000; |
710 | } |
711 | |
712 | static const VMStateDescription vmstate_pbr403 = { |
713 | .name = "cpu/pbr403" , |
714 | .version_id = 1, |
715 | .minimum_version_id = 1, |
716 | .needed = pbr403_needed, |
717 | .fields = (VMStateField[]) { |
718 | VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4), |
719 | VMSTATE_END_OF_LIST() |
720 | }, |
721 | }; |
722 | |
723 | static const VMStateDescription vmstate_tlbemb = { |
724 | .name = "cpu/tlb6xx" , |
725 | .version_id = 1, |
726 | .minimum_version_id = 1, |
727 | .needed = tlbemb_needed, |
728 | .fields = (VMStateField[]) { |
729 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), |
730 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU, |
731 | env.nb_tlb, |
732 | vmstate_tlbemb_entry, |
733 | ppcemb_tlb_t), |
734 | /* 403 protection registers */ |
735 | VMSTATE_END_OF_LIST() |
736 | }, |
737 | .subsections = (const VMStateDescription*[]) { |
738 | &vmstate_pbr403, |
739 | NULL |
740 | } |
741 | }; |
742 | |
743 | static const VMStateDescription vmstate_tlbmas_entry = { |
744 | .name = "cpu/tlbmas_entry" , |
745 | .version_id = 1, |
746 | .minimum_version_id = 1, |
747 | .fields = (VMStateField[]) { |
748 | VMSTATE_UINT32(mas8, ppcmas_tlb_t), |
749 | VMSTATE_UINT32(mas1, ppcmas_tlb_t), |
750 | VMSTATE_UINT64(mas2, ppcmas_tlb_t), |
751 | VMSTATE_UINT64(mas7_3, ppcmas_tlb_t), |
752 | VMSTATE_END_OF_LIST() |
753 | }, |
754 | }; |
755 | |
756 | static bool tlbmas_needed(void *opaque) |
757 | { |
758 | PowerPCCPU *cpu = opaque; |
759 | CPUPPCState *env = &cpu->env; |
760 | |
761 | return env->nb_tlb && (env->tlb_type == TLB_MAS); |
762 | } |
763 | |
764 | static const VMStateDescription vmstate_tlbmas = { |
765 | .name = "cpu/tlbmas" , |
766 | .version_id = 1, |
767 | .minimum_version_id = 1, |
768 | .needed = tlbmas_needed, |
769 | .fields = (VMStateField[]) { |
770 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), |
771 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU, |
772 | env.nb_tlb, |
773 | vmstate_tlbmas_entry, |
774 | ppcmas_tlb_t), |
775 | VMSTATE_END_OF_LIST() |
776 | } |
777 | }; |
778 | |
779 | static bool compat_needed(void *opaque) |
780 | { |
781 | PowerPCCPU *cpu = opaque; |
782 | |
783 | assert(!(cpu->compat_pvr && !cpu->vhyp)); |
784 | return !cpu->pre_2_10_migration && cpu->compat_pvr != 0; |
785 | } |
786 | |
787 | static const VMStateDescription vmstate_compat = { |
788 | .name = "cpu/compat" , |
789 | .version_id = 1, |
790 | .minimum_version_id = 1, |
791 | .needed = compat_needed, |
792 | .fields = (VMStateField[]) { |
793 | VMSTATE_UINT32(compat_pvr, PowerPCCPU), |
794 | VMSTATE_END_OF_LIST() |
795 | } |
796 | }; |
797 | |
798 | const VMStateDescription vmstate_ppc_cpu = { |
799 | .name = "cpu" , |
800 | .version_id = 5, |
801 | .minimum_version_id = 5, |
802 | .minimum_version_id_old = 4, |
803 | .load_state_old = cpu_load_old, |
804 | .pre_save = cpu_pre_save, |
805 | .post_load = cpu_post_load, |
806 | .fields = (VMStateField[]) { |
807 | VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */ |
808 | |
809 | /* User mode architected state */ |
810 | VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32), |
811 | #if !defined(TARGET_PPC64) |
812 | VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32), |
813 | #endif |
814 | VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8), |
815 | VMSTATE_UINTTL(env.nip, PowerPCCPU), |
816 | |
817 | /* SPRs */ |
818 | VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024), |
819 | VMSTATE_UINT64(env.spe_acc, PowerPCCPU), |
820 | |
821 | /* Reservation */ |
822 | VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU), |
823 | |
824 | /* Supervisor mode architected state */ |
825 | VMSTATE_UINTTL(env.msr, PowerPCCPU), |
826 | |
827 | /* Internal state */ |
828 | VMSTATE_UINTTL(env.hflags_nmsr, PowerPCCPU), |
829 | /* FIXME: access_type? */ |
830 | |
831 | /* Sanity checking */ |
832 | VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration), |
833 | VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration), |
834 | VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU, |
835 | cpu_pre_2_8_migration), |
836 | VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration), |
837 | VMSTATE_END_OF_LIST() |
838 | }, |
839 | .subsections = (const VMStateDescription*[]) { |
840 | &vmstate_fpu, |
841 | &vmstate_altivec, |
842 | &vmstate_vsx, |
843 | &vmstate_sr, |
844 | #ifdef TARGET_PPC64 |
845 | &vmstate_tm, |
846 | &vmstate_slb, |
847 | #endif /* TARGET_PPC64 */ |
848 | &vmstate_tlb6xx, |
849 | &vmstate_tlbemb, |
850 | &vmstate_tlbmas, |
851 | &vmstate_compat, |
852 | NULL |
853 | } |
854 | }; |
855 | |