1#include "qemu/osdep.h"
2#include "cpu.h"
3#include "qemu/error-report.h"
4#include "sysemu/kvm.h"
5#include "kvm_arm.h"
6#include "internals.h"
7#include "migration/cpu.h"
8
9static bool vfp_needed(void *opaque)
10{
11 ARMCPU *cpu = opaque;
12 CPUARMState *env = &cpu->env;
13
14 return arm_feature(env, ARM_FEATURE_VFP);
15}
16
17static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
18 const VMStateField *field)
19{
20 ARMCPU *cpu = opaque;
21 CPUARMState *env = &cpu->env;
22 uint32_t val = qemu_get_be32(f);
23
24 vfp_set_fpscr(env, val);
25 return 0;
26}
27
28static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
29 const VMStateField *field, QJSON *vmdesc)
30{
31 ARMCPU *cpu = opaque;
32 CPUARMState *env = &cpu->env;
33
34 qemu_put_be32(f, vfp_get_fpscr(env));
35 return 0;
36}
37
38static const VMStateInfo vmstate_fpscr = {
39 .name = "fpscr",
40 .get = get_fpscr,
41 .put = put_fpscr,
42};
43
44static const VMStateDescription vmstate_vfp = {
45 .name = "cpu/vfp",
46 .version_id = 3,
47 .minimum_version_id = 3,
48 .needed = vfp_needed,
49 .fields = (VMStateField[]) {
50 /* For compatibility, store Qn out of Zn here. */
51 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
52 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
53 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
54 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
55 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
56 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
57 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
58 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
59 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
60 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
61 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
62 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
63 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
64 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
65 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
66 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
67 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
68 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
69 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
70 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
71 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
72 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
73 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
74 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
75 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
76 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
77 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
78 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
79 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
80 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
81 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
82 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
83
84 /* The xregs array is a little awkward because element 1 (FPSCR)
85 * requires a specific accessor, so we have to split it up in
86 * the vmstate:
87 */
88 VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
89 VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
90 {
91 .name = "fpscr",
92 .version_id = 0,
93 .size = sizeof(uint32_t),
94 .info = &vmstate_fpscr,
95 .flags = VMS_SINGLE,
96 .offset = 0,
97 },
98 VMSTATE_END_OF_LIST()
99 }
100};
101
102static bool iwmmxt_needed(void *opaque)
103{
104 ARMCPU *cpu = opaque;
105 CPUARMState *env = &cpu->env;
106
107 return arm_feature(env, ARM_FEATURE_IWMMXT);
108}
109
110static const VMStateDescription vmstate_iwmmxt = {
111 .name = "cpu/iwmmxt",
112 .version_id = 1,
113 .minimum_version_id = 1,
114 .needed = iwmmxt_needed,
115 .fields = (VMStateField[]) {
116 VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
117 VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
118 VMSTATE_END_OF_LIST()
119 }
120};
121
122#ifdef TARGET_AARCH64
123/* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
124 * and ARMPredicateReg is actively empty. This triggers errors
125 * in the expansion of the VMSTATE macros.
126 */
127
128static bool sve_needed(void *opaque)
129{
130 ARMCPU *cpu = opaque;
131
132 return cpu_isar_feature(aa64_sve, cpu);
133}
134
135/* The first two words of each Zreg is stored in VFP state. */
136static const VMStateDescription vmstate_zreg_hi_reg = {
137 .name = "cpu/sve/zreg_hi",
138 .version_id = 1,
139 .minimum_version_id = 1,
140 .fields = (VMStateField[]) {
141 VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
142 VMSTATE_END_OF_LIST()
143 }
144};
145
146static const VMStateDescription vmstate_preg_reg = {
147 .name = "cpu/sve/preg",
148 .version_id = 1,
149 .minimum_version_id = 1,
150 .fields = (VMStateField[]) {
151 VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
152 VMSTATE_END_OF_LIST()
153 }
154};
155
156static const VMStateDescription vmstate_sve = {
157 .name = "cpu/sve",
158 .version_id = 1,
159 .minimum_version_id = 1,
160 .needed = sve_needed,
161 .fields = (VMStateField[]) {
162 VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
163 vmstate_zreg_hi_reg, ARMVectorReg),
164 VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
165 vmstate_preg_reg, ARMPredicateReg),
166 VMSTATE_END_OF_LIST()
167 }
168};
169#endif /* AARCH64 */
170
171static bool serror_needed(void *opaque)
172{
173 ARMCPU *cpu = opaque;
174 CPUARMState *env = &cpu->env;
175
176 return env->serror.pending != 0;
177}
178
179static const VMStateDescription vmstate_serror = {
180 .name = "cpu/serror",
181 .version_id = 1,
182 .minimum_version_id = 1,
183 .needed = serror_needed,
184 .fields = (VMStateField[]) {
185 VMSTATE_UINT8(env.serror.pending, ARMCPU),
186 VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
187 VMSTATE_UINT64(env.serror.esr, ARMCPU),
188 VMSTATE_END_OF_LIST()
189 }
190};
191
192static bool irq_line_state_needed(void *opaque)
193{
194 return true;
195}
196
197static const VMStateDescription vmstate_irq_line_state = {
198 .name = "cpu/irq-line-state",
199 .version_id = 1,
200 .minimum_version_id = 1,
201 .needed = irq_line_state_needed,
202 .fields = (VMStateField[]) {
203 VMSTATE_UINT32(env.irq_line_state, ARMCPU),
204 VMSTATE_END_OF_LIST()
205 }
206};
207
208static bool m_needed(void *opaque)
209{
210 ARMCPU *cpu = opaque;
211 CPUARMState *env = &cpu->env;
212
213 return arm_feature(env, ARM_FEATURE_M);
214}
215
216static const VMStateDescription vmstate_m_faultmask_primask = {
217 .name = "cpu/m/faultmask-primask",
218 .version_id = 1,
219 .minimum_version_id = 1,
220 .needed = m_needed,
221 .fields = (VMStateField[]) {
222 VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
223 VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
224 VMSTATE_END_OF_LIST()
225 }
226};
227
228/* CSSELR is in a subsection because we didn't implement it previously.
229 * Migration from an old implementation will leave it at zero, which
230 * is OK since the only CPUs in the old implementation make the
231 * register RAZ/WI.
232 * Since there was no version of QEMU which implemented the CSSELR for
233 * just non-secure, we transfer both banks here rather than putting
234 * the secure banked version in the m-security subsection.
235 */
236static bool csselr_vmstate_validate(void *opaque, int version_id)
237{
238 ARMCPU *cpu = opaque;
239
240 return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
241 && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
242}
243
244static bool m_csselr_needed(void *opaque)
245{
246 ARMCPU *cpu = opaque;
247
248 return !arm_v7m_csselr_razwi(cpu);
249}
250
251static const VMStateDescription vmstate_m_csselr = {
252 .name = "cpu/m/csselr",
253 .version_id = 1,
254 .minimum_version_id = 1,
255 .needed = m_csselr_needed,
256 .fields = (VMStateField[]) {
257 VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
258 VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
259 VMSTATE_END_OF_LIST()
260 }
261};
262
263static const VMStateDescription vmstate_m_scr = {
264 .name = "cpu/m/scr",
265 .version_id = 1,
266 .minimum_version_id = 1,
267 .needed = m_needed,
268 .fields = (VMStateField[]) {
269 VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
270 VMSTATE_END_OF_LIST()
271 }
272};
273
274static const VMStateDescription vmstate_m_other_sp = {
275 .name = "cpu/m/other-sp",
276 .version_id = 1,
277 .minimum_version_id = 1,
278 .needed = m_needed,
279 .fields = (VMStateField[]) {
280 VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
281 VMSTATE_END_OF_LIST()
282 }
283};
284
285static bool m_v8m_needed(void *opaque)
286{
287 ARMCPU *cpu = opaque;
288 CPUARMState *env = &cpu->env;
289
290 return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
291}
292
293static const VMStateDescription vmstate_m_v8m = {
294 .name = "cpu/m/v8m",
295 .version_id = 1,
296 .minimum_version_id = 1,
297 .needed = m_v8m_needed,
298 .fields = (VMStateField[]) {
299 VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
300 VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
301 VMSTATE_END_OF_LIST()
302 }
303};
304
305static const VMStateDescription vmstate_m_fp = {
306 .name = "cpu/m/fp",
307 .version_id = 1,
308 .minimum_version_id = 1,
309 .needed = vfp_needed,
310 .fields = (VMStateField[]) {
311 VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS),
312 VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS),
313 VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS),
314 VMSTATE_UINT32_ARRAY(env.v7m.cpacr, ARMCPU, M_REG_NUM_BANKS),
315 VMSTATE_UINT32(env.v7m.nsacr, ARMCPU),
316 VMSTATE_END_OF_LIST()
317 }
318};
319
320static const VMStateDescription vmstate_m = {
321 .name = "cpu/m",
322 .version_id = 4,
323 .minimum_version_id = 4,
324 .needed = m_needed,
325 .fields = (VMStateField[]) {
326 VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
327 VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
328 VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
329 VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
330 VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
331 VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
332 VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
333 VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
334 VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
335 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
336 VMSTATE_INT32(env.v7m.exception, ARMCPU),
337 VMSTATE_END_OF_LIST()
338 },
339 .subsections = (const VMStateDescription*[]) {
340 &vmstate_m_faultmask_primask,
341 &vmstate_m_csselr,
342 &vmstate_m_scr,
343 &vmstate_m_other_sp,
344 &vmstate_m_v8m,
345 &vmstate_m_fp,
346 NULL
347 }
348};
349
350static bool thumb2ee_needed(void *opaque)
351{
352 ARMCPU *cpu = opaque;
353 CPUARMState *env = &cpu->env;
354
355 return arm_feature(env, ARM_FEATURE_THUMB2EE);
356}
357
358static const VMStateDescription vmstate_thumb2ee = {
359 .name = "cpu/thumb2ee",
360 .version_id = 1,
361 .minimum_version_id = 1,
362 .needed = thumb2ee_needed,
363 .fields = (VMStateField[]) {
364 VMSTATE_UINT32(env.teecr, ARMCPU),
365 VMSTATE_UINT32(env.teehbr, ARMCPU),
366 VMSTATE_END_OF_LIST()
367 }
368};
369
370static bool pmsav7_needed(void *opaque)
371{
372 ARMCPU *cpu = opaque;
373 CPUARMState *env = &cpu->env;
374
375 return arm_feature(env, ARM_FEATURE_PMSA) &&
376 arm_feature(env, ARM_FEATURE_V7) &&
377 !arm_feature(env, ARM_FEATURE_V8);
378}
379
380static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
381{
382 ARMCPU *cpu = opaque;
383
384 return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
385}
386
387static const VMStateDescription vmstate_pmsav7 = {
388 .name = "cpu/pmsav7",
389 .version_id = 1,
390 .minimum_version_id = 1,
391 .needed = pmsav7_needed,
392 .fields = (VMStateField[]) {
393 VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
394 vmstate_info_uint32, uint32_t),
395 VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
396 vmstate_info_uint32, uint32_t),
397 VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
398 vmstate_info_uint32, uint32_t),
399 VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
400 VMSTATE_END_OF_LIST()
401 }
402};
403
404static bool pmsav7_rnr_needed(void *opaque)
405{
406 ARMCPU *cpu = opaque;
407 CPUARMState *env = &cpu->env;
408
409 /* For R profile cores pmsav7.rnr is migrated via the cpreg
410 * "RGNR" definition in helper.h. For M profile we have to
411 * migrate it separately.
412 */
413 return arm_feature(env, ARM_FEATURE_M);
414}
415
416static const VMStateDescription vmstate_pmsav7_rnr = {
417 .name = "cpu/pmsav7-rnr",
418 .version_id = 1,
419 .minimum_version_id = 1,
420 .needed = pmsav7_rnr_needed,
421 .fields = (VMStateField[]) {
422 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
423 VMSTATE_END_OF_LIST()
424 }
425};
426
427static bool pmsav8_needed(void *opaque)
428{
429 ARMCPU *cpu = opaque;
430 CPUARMState *env = &cpu->env;
431
432 return arm_feature(env, ARM_FEATURE_PMSA) &&
433 arm_feature(env, ARM_FEATURE_V8);
434}
435
436static const VMStateDescription vmstate_pmsav8 = {
437 .name = "cpu/pmsav8",
438 .version_id = 1,
439 .minimum_version_id = 1,
440 .needed = pmsav8_needed,
441 .fields = (VMStateField[]) {
442 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
443 0, vmstate_info_uint32, uint32_t),
444 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
445 0, vmstate_info_uint32, uint32_t),
446 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
447 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
448 VMSTATE_END_OF_LIST()
449 }
450};
451
452static bool s_rnr_vmstate_validate(void *opaque, int version_id)
453{
454 ARMCPU *cpu = opaque;
455
456 return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
457}
458
459static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
460{
461 ARMCPU *cpu = opaque;
462
463 return cpu->env.sau.rnr < cpu->sau_sregion;
464}
465
466static bool m_security_needed(void *opaque)
467{
468 ARMCPU *cpu = opaque;
469 CPUARMState *env = &cpu->env;
470
471 return arm_feature(env, ARM_FEATURE_M_SECURITY);
472}
473
474static const VMStateDescription vmstate_m_security = {
475 .name = "cpu/m-security",
476 .version_id = 1,
477 .minimum_version_id = 1,
478 .needed = m_security_needed,
479 .fields = (VMStateField[]) {
480 VMSTATE_UINT32(env.v7m.secure, ARMCPU),
481 VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
482 VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
483 VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
484 VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
485 VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
486 VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
487 VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
488 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
489 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
490 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
491 0, vmstate_info_uint32, uint32_t),
492 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
493 0, vmstate_info_uint32, uint32_t),
494 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
495 VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
496 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
497 VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
498 VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
499 VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
500 VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
501 VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
502 VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
503 vmstate_info_uint32, uint32_t),
504 VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
505 vmstate_info_uint32, uint32_t),
506 VMSTATE_UINT32(env.sau.rnr, ARMCPU),
507 VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
508 VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
509 VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
510 /* AIRCR is not secure-only, but our implementation is R/O if the
511 * security extension is unimplemented, so we migrate it here.
512 */
513 VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
514 VMSTATE_END_OF_LIST()
515 }
516};
517
518static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
519 const VMStateField *field)
520{
521 ARMCPU *cpu = opaque;
522 CPUARMState *env = &cpu->env;
523 uint32_t val = qemu_get_be32(f);
524
525 if (arm_feature(env, ARM_FEATURE_M)) {
526 if (val & XPSR_EXCP) {
527 /* This is a CPSR format value from an older QEMU. (We can tell
528 * because values transferred in XPSR format always have zero
529 * for the EXCP field, and CPSR format will always have bit 4
530 * set in CPSR_M.) Rearrange it into XPSR format. The significant
531 * differences are that the T bit is not in the same place, the
532 * primask/faultmask info may be in the CPSR I and F bits, and
533 * we do not want the mode bits.
534 * We know that this cleanup happened before v8M, so there
535 * is no complication with banked primask/faultmask.
536 */
537 uint32_t newval = val;
538
539 assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
540
541 newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
542 if (val & CPSR_T) {
543 newval |= XPSR_T;
544 }
545 /* If the I or F bits are set then this is a migration from
546 * an old QEMU which still stored the M profile FAULTMASK
547 * and PRIMASK in env->daif. For a new QEMU, the data is
548 * transferred using the vmstate_m_faultmask_primask subsection.
549 */
550 if (val & CPSR_F) {
551 env->v7m.faultmask[M_REG_NS] = 1;
552 }
553 if (val & CPSR_I) {
554 env->v7m.primask[M_REG_NS] = 1;
555 }
556 val = newval;
557 }
558 /* Ignore the low bits, they are handled by vmstate_m. */
559 xpsr_write(env, val, ~XPSR_EXCP);
560 return 0;
561 }
562
563 env->aarch64 = ((val & PSTATE_nRW) == 0);
564
565 if (is_a64(env)) {
566 pstate_write(env, val);
567 return 0;
568 }
569
570 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
571 return 0;
572}
573
574static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
575 const VMStateField *field, QJSON *vmdesc)
576{
577 ARMCPU *cpu = opaque;
578 CPUARMState *env = &cpu->env;
579 uint32_t val;
580
581 if (arm_feature(env, ARM_FEATURE_M)) {
582 /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
583 val = xpsr_read(env) & ~XPSR_EXCP;
584 } else if (is_a64(env)) {
585 val = pstate_read(env);
586 } else {
587 val = cpsr_read(env);
588 }
589
590 qemu_put_be32(f, val);
591 return 0;
592}
593
594static const VMStateInfo vmstate_cpsr = {
595 .name = "cpsr",
596 .get = get_cpsr,
597 .put = put_cpsr,
598};
599
600static int get_power(QEMUFile *f, void *opaque, size_t size,
601 const VMStateField *field)
602{
603 ARMCPU *cpu = opaque;
604 bool powered_off = qemu_get_byte(f);
605 cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
606 return 0;
607}
608
609static int put_power(QEMUFile *f, void *opaque, size_t size,
610 const VMStateField *field, QJSON *vmdesc)
611{
612 ARMCPU *cpu = opaque;
613
614 /* Migration should never happen while we transition power states */
615
616 if (cpu->power_state == PSCI_ON ||
617 cpu->power_state == PSCI_OFF) {
618 bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
619 qemu_put_byte(f, powered_off);
620 return 0;
621 } else {
622 return 1;
623 }
624}
625
626static const VMStateInfo vmstate_powered_off = {
627 .name = "powered_off",
628 .get = get_power,
629 .put = put_power,
630};
631
632static int cpu_pre_save(void *opaque)
633{
634 ARMCPU *cpu = opaque;
635
636 if (!kvm_enabled()) {
637 pmu_op_start(&cpu->env);
638 }
639
640 if (kvm_enabled()) {
641 if (!write_kvmstate_to_list(cpu)) {
642 /* This should never fail */
643 abort();
644 }
645 } else {
646 if (!write_cpustate_to_list(cpu, false)) {
647 /* This should never fail. */
648 abort();
649 }
650 }
651
652 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
653 memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
654 cpu->cpreg_array_len * sizeof(uint64_t));
655 memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
656 cpu->cpreg_array_len * sizeof(uint64_t));
657
658 return 0;
659}
660
661static int cpu_post_save(void *opaque)
662{
663 ARMCPU *cpu = opaque;
664
665 if (!kvm_enabled()) {
666 pmu_op_finish(&cpu->env);
667 }
668
669 return 0;
670}
671
672static int cpu_pre_load(void *opaque)
673{
674 ARMCPU *cpu = opaque;
675 CPUARMState *env = &cpu->env;
676
677 /*
678 * Pre-initialize irq_line_state to a value that's never valid as
679 * real data, so cpu_post_load() can tell whether we've seen the
680 * irq-line-state subsection in the incoming migration state.
681 */
682 env->irq_line_state = UINT32_MAX;
683
684 if (!kvm_enabled()) {
685 pmu_op_start(&cpu->env);
686 }
687
688 return 0;
689}
690
691static int cpu_post_load(void *opaque, int version_id)
692{
693 ARMCPU *cpu = opaque;
694 CPUARMState *env = &cpu->env;
695 int i, v;
696
697 /*
698 * Handle migration compatibility from old QEMU which didn't
699 * send the irq-line-state subsection. A QEMU without it did not
700 * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
701 * so for TCG the line state matches the bits set in cs->interrupt_request.
702 * For KVM the line state is not stored in cs->interrupt_request
703 * and so this will leave irq_line_state as 0, but this is OK because
704 * we only need to care about it for TCG.
705 */
706 if (env->irq_line_state == UINT32_MAX) {
707 CPUState *cs = CPU(cpu);
708
709 env->irq_line_state = cs->interrupt_request &
710 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
711 CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
712 }
713
714 /* Update the values list from the incoming migration data.
715 * Anything in the incoming data which we don't know about is
716 * a migration failure; anything we know about but the incoming
717 * data doesn't specify retains its current (reset) value.
718 * The indexes list remains untouched -- we only inspect the
719 * incoming migration index list so we can match the values array
720 * entries with the right slots in our own values array.
721 */
722
723 for (i = 0, v = 0; i < cpu->cpreg_array_len
724 && v < cpu->cpreg_vmstate_array_len; i++) {
725 if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
726 /* register in our list but not incoming : skip it */
727 continue;
728 }
729 if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
730 /* register in their list but not ours: fail migration */
731 return -1;
732 }
733 /* matching register, copy the value over */
734 cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
735 v++;
736 }
737
738 if (kvm_enabled()) {
739 if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
740 return -1;
741 }
742 /* Note that it's OK for the TCG side not to know about
743 * every register in the list; KVM is authoritative if
744 * we're using it.
745 */
746 write_list_to_cpustate(cpu);
747 } else {
748 if (!write_list_to_cpustate(cpu)) {
749 return -1;
750 }
751 }
752
753 hw_breakpoint_update_all(cpu);
754 hw_watchpoint_update_all(cpu);
755
756 if (!kvm_enabled()) {
757 pmu_op_finish(&cpu->env);
758 }
759
760 return 0;
761}
762
763const VMStateDescription vmstate_arm_cpu = {
764 .name = "cpu",
765 .version_id = 22,
766 .minimum_version_id = 22,
767 .pre_save = cpu_pre_save,
768 .post_save = cpu_post_save,
769 .pre_load = cpu_pre_load,
770 .post_load = cpu_post_load,
771 .fields = (VMStateField[]) {
772 VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
773 VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
774 VMSTATE_UINT64(env.pc, ARMCPU),
775 {
776 .name = "cpsr",
777 .version_id = 0,
778 .size = sizeof(uint32_t),
779 .info = &vmstate_cpsr,
780 .flags = VMS_SINGLE,
781 .offset = 0,
782 },
783 VMSTATE_UINT32(env.spsr, ARMCPU),
784 VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
785 VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
786 VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
787 VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
788 VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
789 VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
790 VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
791 /* The length-check must come before the arrays to avoid
792 * incoming data possibly overflowing the array.
793 */
794 VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
795 VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
796 cpreg_vmstate_array_len,
797 0, vmstate_info_uint64, uint64_t),
798 VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
799 cpreg_vmstate_array_len,
800 0, vmstate_info_uint64, uint64_t),
801 VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
802 VMSTATE_UINT64(env.exclusive_val, ARMCPU),
803 VMSTATE_UINT64(env.exclusive_high, ARMCPU),
804 VMSTATE_UINT64(env.features, ARMCPU),
805 VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
806 VMSTATE_UINT32(env.exception.fsr, ARMCPU),
807 VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
808 VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
809 VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
810 {
811 .name = "power_state",
812 .version_id = 0,
813 .size = sizeof(bool),
814 .info = &vmstate_powered_off,
815 .flags = VMS_SINGLE,
816 .offset = 0,
817 },
818 VMSTATE_END_OF_LIST()
819 },
820 .subsections = (const VMStateDescription*[]) {
821 &vmstate_vfp,
822 &vmstate_iwmmxt,
823 &vmstate_m,
824 &vmstate_thumb2ee,
825 /* pmsav7_rnr must come before pmsav7 so that we have the
826 * region number before we test it in the VMSTATE_VALIDATE
827 * in vmstate_pmsav7.
828 */
829 &vmstate_pmsav7_rnr,
830 &vmstate_pmsav7,
831 &vmstate_pmsav8,
832 &vmstate_m_security,
833#ifdef TARGET_AARCH64
834 &vmstate_sve,
835#endif
836 &vmstate_serror,
837 &vmstate_irq_line_state,
838 NULL
839 }
840};
841