1/*
2 * QEMU monitor
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "qemu/osdep.h"
26#include "cpu.h"
27#include "monitor/monitor.h"
28#include "monitor/hmp-target.h"
29#include "monitor/hmp.h"
30#include "qapi/qmp/qdict.h"
31#include "hw/i386/pc.h"
32#include "sysemu/kvm.h"
33#include "sysemu/sev.h"
34#include "qapi/error.h"
35#include "sev_i386.h"
36#include "qapi/qapi-commands-misc-target.h"
37#include "qapi/qapi-commands-misc.h"
38
39/* Perform linear address sign extension */
40static hwaddr addr_canonical(CPUArchState *env, hwaddr addr)
41{
42#ifdef TARGET_X86_64
43 if (env->cr[4] & CR4_LA57_MASK) {
44 if (addr & (1ULL << 56)) {
45 addr |= (hwaddr)-(1LL << 57);
46 }
47 } else {
48 if (addr & (1ULL << 47)) {
49 addr |= (hwaddr)-(1LL << 48);
50 }
51 }
52#endif
53 return addr;
54}
55
56static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
57 hwaddr pte, hwaddr mask)
58{
59 addr = addr_canonical(env, addr);
60
61 monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
62 " %c%c%c%c%c%c%c%c%c\n",
63 addr,
64 pte & mask,
65 pte & PG_NX_MASK ? 'X' : '-',
66 pte & PG_GLOBAL_MASK ? 'G' : '-',
67 pte & PG_PSE_MASK ? 'P' : '-',
68 pte & PG_DIRTY_MASK ? 'D' : '-',
69 pte & PG_ACCESSED_MASK ? 'A' : '-',
70 pte & PG_PCD_MASK ? 'C' : '-',
71 pte & PG_PWT_MASK ? 'T' : '-',
72 pte & PG_USER_MASK ? 'U' : '-',
73 pte & PG_RW_MASK ? 'W' : '-');
74}
75
76static void tlb_info_32(Monitor *mon, CPUArchState *env)
77{
78 unsigned int l1, l2;
79 uint32_t pgd, pde, pte;
80
81 pgd = env->cr[3] & ~0xfff;
82 for(l1 = 0; l1 < 1024; l1++) {
83 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
84 pde = le32_to_cpu(pde);
85 if (pde & PG_PRESENT_MASK) {
86 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
87 /* 4M pages */
88 print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
89 } else {
90 for(l2 = 0; l2 < 1024; l2++) {
91 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
92 pte = le32_to_cpu(pte);
93 if (pte & PG_PRESENT_MASK) {
94 print_pte(mon, env, (l1 << 22) + (l2 << 12),
95 pte & ~PG_PSE_MASK,
96 ~0xfff);
97 }
98 }
99 }
100 }
101 }
102}
103
104static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
105{
106 unsigned int l1, l2, l3;
107 uint64_t pdpe, pde, pte;
108 uint64_t pdp_addr, pd_addr, pt_addr;
109
110 pdp_addr = env->cr[3] & ~0x1f;
111 for (l1 = 0; l1 < 4; l1++) {
112 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
113 pdpe = le64_to_cpu(pdpe);
114 if (pdpe & PG_PRESENT_MASK) {
115 pd_addr = pdpe & 0x3fffffffff000ULL;
116 for (l2 = 0; l2 < 512; l2++) {
117 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
118 pde = le64_to_cpu(pde);
119 if (pde & PG_PRESENT_MASK) {
120 if (pde & PG_PSE_MASK) {
121 /* 2M pages with PAE, CR4.PSE is ignored */
122 print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
123 ~((hwaddr)(1 << 20) - 1));
124 } else {
125 pt_addr = pde & 0x3fffffffff000ULL;
126 for (l3 = 0; l3 < 512; l3++) {
127 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
128 pte = le64_to_cpu(pte);
129 if (pte & PG_PRESENT_MASK) {
130 print_pte(mon, env, (l1 << 30) + (l2 << 21)
131 + (l3 << 12),
132 pte & ~PG_PSE_MASK,
133 ~(hwaddr)0xfff);
134 }
135 }
136 }
137 }
138 }
139 }
140 }
141}
142
143#ifdef TARGET_X86_64
144static void tlb_info_la48(Monitor *mon, CPUArchState *env,
145 uint64_t l0, uint64_t pml4_addr)
146{
147 uint64_t l1, l2, l3, l4;
148 uint64_t pml4e, pdpe, pde, pte;
149 uint64_t pdp_addr, pd_addr, pt_addr;
150
151 for (l1 = 0; l1 < 512; l1++) {
152 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
153 pml4e = le64_to_cpu(pml4e);
154 if (!(pml4e & PG_PRESENT_MASK)) {
155 continue;
156 }
157
158 pdp_addr = pml4e & 0x3fffffffff000ULL;
159 for (l2 = 0; l2 < 512; l2++) {
160 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
161 pdpe = le64_to_cpu(pdpe);
162 if (!(pdpe & PG_PRESENT_MASK)) {
163 continue;
164 }
165
166 if (pdpe & PG_PSE_MASK) {
167 /* 1G pages, CR4.PSE is ignored */
168 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
169 pdpe, 0x3ffffc0000000ULL);
170 continue;
171 }
172
173 pd_addr = pdpe & 0x3fffffffff000ULL;
174 for (l3 = 0; l3 < 512; l3++) {
175 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
176 pde = le64_to_cpu(pde);
177 if (!(pde & PG_PRESENT_MASK)) {
178 continue;
179 }
180
181 if (pde & PG_PSE_MASK) {
182 /* 2M pages, CR4.PSE is ignored */
183 print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
184 (l3 << 21), pde, 0x3ffffffe00000ULL);
185 continue;
186 }
187
188 pt_addr = pde & 0x3fffffffff000ULL;
189 for (l4 = 0; l4 < 512; l4++) {
190 cpu_physical_memory_read(pt_addr
191 + l4 * 8,
192 &pte, 8);
193 pte = le64_to_cpu(pte);
194 if (pte & PG_PRESENT_MASK) {
195 print_pte(mon, env, (l0 << 48) + (l1 << 39) +
196 (l2 << 30) + (l3 << 21) + (l4 << 12),
197 pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
198 }
199 }
200 }
201 }
202 }
203}
204
205static void tlb_info_la57(Monitor *mon, CPUArchState *env)
206{
207 uint64_t l0;
208 uint64_t pml5e;
209 uint64_t pml5_addr;
210
211 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
212 for (l0 = 0; l0 < 512; l0++) {
213 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
214 pml5e = le64_to_cpu(pml5e);
215 if (pml5e & PG_PRESENT_MASK) {
216 tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
217 }
218 }
219}
220#endif /* TARGET_X86_64 */
221
222void hmp_info_tlb(Monitor *mon, const QDict *qdict)
223{
224 CPUArchState *env;
225
226 env = mon_get_cpu_env();
227 if (!env) {
228 monitor_printf(mon, "No CPU available\n");
229 return;
230 }
231
232 if (!(env->cr[0] & CR0_PG_MASK)) {
233 monitor_printf(mon, "PG disabled\n");
234 return;
235 }
236 if (env->cr[4] & CR4_PAE_MASK) {
237#ifdef TARGET_X86_64
238 if (env->hflags & HF_LMA_MASK) {
239 if (env->cr[4] & CR4_LA57_MASK) {
240 tlb_info_la57(mon, env);
241 } else {
242 tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
243 }
244 } else
245#endif
246 {
247 tlb_info_pae32(mon, env);
248 }
249 } else {
250 tlb_info_32(mon, env);
251 }
252}
253
254static void mem_print(Monitor *mon, CPUArchState *env,
255 hwaddr *pstart, int *plast_prot,
256 hwaddr end, int prot)
257{
258 int prot1;
259 prot1 = *plast_prot;
260 if (prot != prot1) {
261 if (*pstart != -1) {
262 monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
263 TARGET_FMT_plx " %c%c%c\n",
264 addr_canonical(env, *pstart),
265 addr_canonical(env, end),
266 addr_canonical(env, end - *pstart),
267 prot1 & PG_USER_MASK ? 'u' : '-',
268 'r',
269 prot1 & PG_RW_MASK ? 'w' : '-');
270 }
271 if (prot != 0)
272 *pstart = end;
273 else
274 *pstart = -1;
275 *plast_prot = prot;
276 }
277}
278
279static void mem_info_32(Monitor *mon, CPUArchState *env)
280{
281 unsigned int l1, l2;
282 int prot, last_prot;
283 uint32_t pgd, pde, pte;
284 hwaddr start, end;
285
286 pgd = env->cr[3] & ~0xfff;
287 last_prot = 0;
288 start = -1;
289 for(l1 = 0; l1 < 1024; l1++) {
290 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
291 pde = le32_to_cpu(pde);
292 end = l1 << 22;
293 if (pde & PG_PRESENT_MASK) {
294 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
295 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
296 mem_print(mon, env, &start, &last_prot, end, prot);
297 } else {
298 for(l2 = 0; l2 < 1024; l2++) {
299 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
300 pte = le32_to_cpu(pte);
301 end = (l1 << 22) + (l2 << 12);
302 if (pte & PG_PRESENT_MASK) {
303 prot = pte & pde &
304 (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
305 } else {
306 prot = 0;
307 }
308 mem_print(mon, env, &start, &last_prot, end, prot);
309 }
310 }
311 } else {
312 prot = 0;
313 mem_print(mon, env, &start, &last_prot, end, prot);
314 }
315 }
316 /* Flush last range */
317 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
318}
319
320static void mem_info_pae32(Monitor *mon, CPUArchState *env)
321{
322 unsigned int l1, l2, l3;
323 int prot, last_prot;
324 uint64_t pdpe, pde, pte;
325 uint64_t pdp_addr, pd_addr, pt_addr;
326 hwaddr start, end;
327
328 pdp_addr = env->cr[3] & ~0x1f;
329 last_prot = 0;
330 start = -1;
331 for (l1 = 0; l1 < 4; l1++) {
332 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
333 pdpe = le64_to_cpu(pdpe);
334 end = l1 << 30;
335 if (pdpe & PG_PRESENT_MASK) {
336 pd_addr = pdpe & 0x3fffffffff000ULL;
337 for (l2 = 0; l2 < 512; l2++) {
338 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
339 pde = le64_to_cpu(pde);
340 end = (l1 << 30) + (l2 << 21);
341 if (pde & PG_PRESENT_MASK) {
342 if (pde & PG_PSE_MASK) {
343 prot = pde & (PG_USER_MASK | PG_RW_MASK |
344 PG_PRESENT_MASK);
345 mem_print(mon, env, &start, &last_prot, end, prot);
346 } else {
347 pt_addr = pde & 0x3fffffffff000ULL;
348 for (l3 = 0; l3 < 512; l3++) {
349 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
350 pte = le64_to_cpu(pte);
351 end = (l1 << 30) + (l2 << 21) + (l3 << 12);
352 if (pte & PG_PRESENT_MASK) {
353 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
354 PG_PRESENT_MASK);
355 } else {
356 prot = 0;
357 }
358 mem_print(mon, env, &start, &last_prot, end, prot);
359 }
360 }
361 } else {
362 prot = 0;
363 mem_print(mon, env, &start, &last_prot, end, prot);
364 }
365 }
366 } else {
367 prot = 0;
368 mem_print(mon, env, &start, &last_prot, end, prot);
369 }
370 }
371 /* Flush last range */
372 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
373}
374
375
376#ifdef TARGET_X86_64
377static void mem_info_la48(Monitor *mon, CPUArchState *env)
378{
379 int prot, last_prot;
380 uint64_t l1, l2, l3, l4;
381 uint64_t pml4e, pdpe, pde, pte;
382 uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
383
384 pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
385 last_prot = 0;
386 start = -1;
387 for (l1 = 0; l1 < 512; l1++) {
388 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
389 pml4e = le64_to_cpu(pml4e);
390 end = l1 << 39;
391 if (pml4e & PG_PRESENT_MASK) {
392 pdp_addr = pml4e & 0x3fffffffff000ULL;
393 for (l2 = 0; l2 < 512; l2++) {
394 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
395 pdpe = le64_to_cpu(pdpe);
396 end = (l1 << 39) + (l2 << 30);
397 if (pdpe & PG_PRESENT_MASK) {
398 if (pdpe & PG_PSE_MASK) {
399 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
400 PG_PRESENT_MASK);
401 prot &= pml4e;
402 mem_print(mon, env, &start, &last_prot, end, prot);
403 } else {
404 pd_addr = pdpe & 0x3fffffffff000ULL;
405 for (l3 = 0; l3 < 512; l3++) {
406 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
407 pde = le64_to_cpu(pde);
408 end = (l1 << 39) + (l2 << 30) + (l3 << 21);
409 if (pde & PG_PRESENT_MASK) {
410 if (pde & PG_PSE_MASK) {
411 prot = pde & (PG_USER_MASK | PG_RW_MASK |
412 PG_PRESENT_MASK);
413 prot &= pml4e & pdpe;
414 mem_print(mon, env, &start,
415 &last_prot, end, prot);
416 } else {
417 pt_addr = pde & 0x3fffffffff000ULL;
418 for (l4 = 0; l4 < 512; l4++) {
419 cpu_physical_memory_read(pt_addr
420 + l4 * 8,
421 &pte, 8);
422 pte = le64_to_cpu(pte);
423 end = (l1 << 39) + (l2 << 30) +
424 (l3 << 21) + (l4 << 12);
425 if (pte & PG_PRESENT_MASK) {
426 prot = pte & (PG_USER_MASK | PG_RW_MASK |
427 PG_PRESENT_MASK);
428 prot &= pml4e & pdpe & pde;
429 } else {
430 prot = 0;
431 }
432 mem_print(mon, env, &start,
433 &last_prot, end, prot);
434 }
435 }
436 } else {
437 prot = 0;
438 mem_print(mon, env, &start,
439 &last_prot, end, prot);
440 }
441 }
442 }
443 } else {
444 prot = 0;
445 mem_print(mon, env, &start, &last_prot, end, prot);
446 }
447 }
448 } else {
449 prot = 0;
450 mem_print(mon, env, &start, &last_prot, end, prot);
451 }
452 }
453 /* Flush last range */
454 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 48, 0);
455}
456
457static void mem_info_la57(Monitor *mon, CPUArchState *env)
458{
459 int prot, last_prot;
460 uint64_t l0, l1, l2, l3, l4;
461 uint64_t pml5e, pml4e, pdpe, pde, pte;
462 uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
463
464 pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
465 last_prot = 0;
466 start = -1;
467 for (l0 = 0; l0 < 512; l0++) {
468 cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
469 pml5e = le64_to_cpu(pml5e);
470 end = l0 << 48;
471 if (!(pml5e & PG_PRESENT_MASK)) {
472 prot = 0;
473 mem_print(mon, env, &start, &last_prot, end, prot);
474 continue;
475 }
476
477 pml4_addr = pml5e & 0x3fffffffff000ULL;
478 for (l1 = 0; l1 < 512; l1++) {
479 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
480 pml4e = le64_to_cpu(pml4e);
481 end = (l0 << 48) + (l1 << 39);
482 if (!(pml4e & PG_PRESENT_MASK)) {
483 prot = 0;
484 mem_print(mon, env, &start, &last_prot, end, prot);
485 continue;
486 }
487
488 pdp_addr = pml4e & 0x3fffffffff000ULL;
489 for (l2 = 0; l2 < 512; l2++) {
490 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
491 pdpe = le64_to_cpu(pdpe);
492 end = (l0 << 48) + (l1 << 39) + (l2 << 30);
493 if (pdpe & PG_PRESENT_MASK) {
494 prot = 0;
495 mem_print(mon, env, &start, &last_prot, end, prot);
496 continue;
497 }
498
499 if (pdpe & PG_PSE_MASK) {
500 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
501 PG_PRESENT_MASK);
502 prot &= pml5e & pml4e;
503 mem_print(mon, env, &start, &last_prot, end, prot);
504 continue;
505 }
506
507 pd_addr = pdpe & 0x3fffffffff000ULL;
508 for (l3 = 0; l3 < 512; l3++) {
509 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
510 pde = le64_to_cpu(pde);
511 end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
512 if (pde & PG_PRESENT_MASK) {
513 prot = 0;
514 mem_print(mon, env, &start, &last_prot, end, prot);
515 continue;
516 }
517
518 if (pde & PG_PSE_MASK) {
519 prot = pde & (PG_USER_MASK | PG_RW_MASK |
520 PG_PRESENT_MASK);
521 prot &= pml5e & pml4e & pdpe;
522 mem_print(mon, env, &start, &last_prot, end, prot);
523 continue;
524 }
525
526 pt_addr = pde & 0x3fffffffff000ULL;
527 for (l4 = 0; l4 < 512; l4++) {
528 cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
529 pte = le64_to_cpu(pte);
530 end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
531 (l3 << 21) + (l4 << 12);
532 if (pte & PG_PRESENT_MASK) {
533 prot = pte & (PG_USER_MASK | PG_RW_MASK |
534 PG_PRESENT_MASK);
535 prot &= pml5e & pml4e & pdpe & pde;
536 } else {
537 prot = 0;
538 }
539 mem_print(mon, env, &start, &last_prot, end, prot);
540 }
541 }
542 }
543 }
544 }
545 /* Flush last range */
546 mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 57, 0);
547}
548#endif /* TARGET_X86_64 */
549
550void hmp_info_mem(Monitor *mon, const QDict *qdict)
551{
552 CPUArchState *env;
553
554 env = mon_get_cpu_env();
555 if (!env) {
556 monitor_printf(mon, "No CPU available\n");
557 return;
558 }
559
560 if (!(env->cr[0] & CR0_PG_MASK)) {
561 monitor_printf(mon, "PG disabled\n");
562 return;
563 }
564 if (env->cr[4] & CR4_PAE_MASK) {
565#ifdef TARGET_X86_64
566 if (env->hflags & HF_LMA_MASK) {
567 if (env->cr[4] & CR4_LA57_MASK) {
568 mem_info_la57(mon, env);
569 } else {
570 mem_info_la48(mon, env);
571 }
572 } else
573#endif
574 {
575 mem_info_pae32(mon, env);
576 }
577 } else {
578 mem_info_32(mon, env);
579 }
580}
581
582void hmp_mce(Monitor *mon, const QDict *qdict)
583{
584 X86CPU *cpu;
585 CPUState *cs;
586 int cpu_index = qdict_get_int(qdict, "cpu_index");
587 int bank = qdict_get_int(qdict, "bank");
588 uint64_t status = qdict_get_int(qdict, "status");
589 uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
590 uint64_t addr = qdict_get_int(qdict, "addr");
591 uint64_t misc = qdict_get_int(qdict, "misc");
592 int flags = MCE_INJECT_UNCOND_AO;
593
594 if (qdict_get_try_bool(qdict, "broadcast", false)) {
595 flags |= MCE_INJECT_BROADCAST;
596 }
597 cs = qemu_get_cpu(cpu_index);
598 if (cs != NULL) {
599 cpu = X86_CPU(cs);
600 cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
601 flags);
602 }
603}
604
605static target_long monitor_get_pc(const struct MonitorDef *md, int val)
606{
607 CPUArchState *env = mon_get_cpu_env();
608 return env->eip + env->segs[R_CS].base;
609}
610
611const MonitorDef monitor_defs[] = {
612#define SEG(name, seg) \
613 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
614 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
615 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
616
617 { "eax", offsetof(CPUX86State, regs[0]) },
618 { "ecx", offsetof(CPUX86State, regs[1]) },
619 { "edx", offsetof(CPUX86State, regs[2]) },
620 { "ebx", offsetof(CPUX86State, regs[3]) },
621 { "esp|sp", offsetof(CPUX86State, regs[4]) },
622 { "ebp|fp", offsetof(CPUX86State, regs[5]) },
623 { "esi", offsetof(CPUX86State, regs[6]) },
624 { "edi", offsetof(CPUX86State, regs[7]) },
625#ifdef TARGET_X86_64
626 { "r8", offsetof(CPUX86State, regs[8]) },
627 { "r9", offsetof(CPUX86State, regs[9]) },
628 { "r10", offsetof(CPUX86State, regs[10]) },
629 { "r11", offsetof(CPUX86State, regs[11]) },
630 { "r12", offsetof(CPUX86State, regs[12]) },
631 { "r13", offsetof(CPUX86State, regs[13]) },
632 { "r14", offsetof(CPUX86State, regs[14]) },
633 { "r15", offsetof(CPUX86State, regs[15]) },
634#endif
635 { "eflags", offsetof(CPUX86State, eflags) },
636 { "eip", offsetof(CPUX86State, eip) },
637 SEG("cs", R_CS)
638 SEG("ds", R_DS)
639 SEG("es", R_ES)
640 SEG("ss", R_SS)
641 SEG("fs", R_FS)
642 SEG("gs", R_GS)
643 { "pc", 0, monitor_get_pc, },
644 { NULL },
645};
646
647const MonitorDef *target_monitor_defs(void)
648{
649 return monitor_defs;
650}
651
652void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
653{
654 CPUState *cs;
655
656 if (qdict_haskey(qdict, "apic-id")) {
657 int id = qdict_get_try_int(qdict, "apic-id", 0);
658 cs = cpu_by_arch_id(id);
659 } else {
660 cs = mon_get_cpu();
661 }
662
663
664 if (!cs) {
665 monitor_printf(mon, "No CPU available\n");
666 return;
667 }
668 x86_cpu_dump_local_apic_state(cs, CPU_DUMP_FPU);
669}
670
671void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
672{
673 monitor_printf(mon, "This command is obsolete and will be "
674 "removed soon. Please use 'info pic' instead.\n");
675}
676
677SevInfo *qmp_query_sev(Error **errp)
678{
679 SevInfo *info;
680
681 info = sev_get_info();
682 if (!info) {
683 error_setg(errp, "SEV feature is not available");
684 return NULL;
685 }
686
687 return info;
688}
689
690void hmp_info_sev(Monitor *mon, const QDict *qdict)
691{
692 SevInfo *info = sev_get_info();
693
694 if (info && info->enabled) {
695 monitor_printf(mon, "handle: %d\n", info->handle);
696 monitor_printf(mon, "state: %s\n", SevState_str(info->state));
697 monitor_printf(mon, "build: %d\n", info->build_id);
698 monitor_printf(mon, "api version: %d.%d\n",
699 info->api_major, info->api_minor);
700 monitor_printf(mon, "debug: %s\n",
701 info->policy & SEV_POLICY_NODBG ? "off" : "on");
702 monitor_printf(mon, "key-sharing: %s\n",
703 info->policy & SEV_POLICY_NOKS ? "off" : "on");
704 } else {
705 monitor_printf(mon, "SEV is not enabled\n");
706 }
707
708 qapi_free_SevInfo(info);
709}
710
711SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
712{
713 char *data;
714 SevLaunchMeasureInfo *info;
715
716 data = sev_get_launch_measurement();
717 if (!data) {
718 error_setg(errp, "Measurement is not available");
719 return NULL;
720 }
721
722 info = g_malloc0(sizeof(*info));
723 info->data = data;
724
725 return info;
726}
727
728SevCapability *qmp_query_sev_capabilities(Error **errp)
729{
730 SevCapability *data;
731
732 data = sev_get_capabilities();
733 if (!data) {
734 error_setg(errp, "SEV feature is not available");
735 return NULL;
736 }
737
738 return data;
739}
740