1 | /* |
2 | * QEMU dump |
3 | * |
4 | * Copyright Fujitsu, Corp. 2011, 2012 |
5 | * |
6 | * Authors: |
7 | * Wen Congyang <wency@cn.fujitsu.com> |
8 | * |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
10 | * See the COPYING file in the top-level directory. |
11 | * |
12 | */ |
13 | |
14 | #include "qemu/osdep.h" |
15 | #include "qemu-common.h" |
16 | #include "qemu/cutils.h" |
17 | #include "elf.h" |
18 | #include "cpu.h" |
19 | #include "exec/hwaddr.h" |
20 | #include "monitor/monitor.h" |
21 | #include "sysemu/kvm.h" |
22 | #include "sysemu/dump.h" |
23 | #include "sysemu/memory_mapping.h" |
24 | #include "sysemu/runstate.h" |
25 | #include "sysemu/cpus.h" |
26 | #include "qapi/error.h" |
27 | #include "qapi/qapi-commands-dump.h" |
28 | #include "qapi/qapi-events-dump.h" |
29 | #include "qapi/qmp/qerror.h" |
30 | #include "qemu/error-report.h" |
31 | #include "qemu/main-loop.h" |
32 | #include "hw/misc/vmcoreinfo.h" |
33 | |
34 | #ifdef TARGET_X86_64 |
35 | #include "win_dump.h" |
36 | #endif |
37 | |
38 | #include <zlib.h> |
39 | #ifdef CONFIG_LZO |
40 | #include <lzo/lzo1x.h> |
41 | #endif |
42 | #ifdef CONFIG_SNAPPY |
43 | #include <snappy-c.h> |
44 | #endif |
45 | #ifndef ELF_MACHINE_UNAME |
46 | #define ELF_MACHINE_UNAME "Unknown" |
47 | #endif |
48 | |
49 | #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */ |
50 | |
51 | #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \ |
52 | ((DIV_ROUND_UP((hdr_size), 4) + \ |
53 | DIV_ROUND_UP((name_size), 4) + \ |
54 | DIV_ROUND_UP((desc_size), 4)) * 4) |
55 | |
56 | uint16_t cpu_to_dump16(DumpState *s, uint16_t val) |
57 | { |
58 | if (s->dump_info.d_endian == ELFDATA2LSB) { |
59 | val = cpu_to_le16(val); |
60 | } else { |
61 | val = cpu_to_be16(val); |
62 | } |
63 | |
64 | return val; |
65 | } |
66 | |
67 | uint32_t cpu_to_dump32(DumpState *s, uint32_t val) |
68 | { |
69 | if (s->dump_info.d_endian == ELFDATA2LSB) { |
70 | val = cpu_to_le32(val); |
71 | } else { |
72 | val = cpu_to_be32(val); |
73 | } |
74 | |
75 | return val; |
76 | } |
77 | |
78 | uint64_t cpu_to_dump64(DumpState *s, uint64_t val) |
79 | { |
80 | if (s->dump_info.d_endian == ELFDATA2LSB) { |
81 | val = cpu_to_le64(val); |
82 | } else { |
83 | val = cpu_to_be64(val); |
84 | } |
85 | |
86 | return val; |
87 | } |
88 | |
89 | static int dump_cleanup(DumpState *s) |
90 | { |
91 | guest_phys_blocks_free(&s->guest_phys_blocks); |
92 | memory_mapping_list_free(&s->list); |
93 | close(s->fd); |
94 | g_free(s->guest_note); |
95 | s->guest_note = NULL; |
96 | if (s->resume) { |
97 | if (s->detached) { |
98 | qemu_mutex_lock_iothread(); |
99 | } |
100 | vm_start(); |
101 | if (s->detached) { |
102 | qemu_mutex_unlock_iothread(); |
103 | } |
104 | } |
105 | |
106 | return 0; |
107 | } |
108 | |
109 | static int fd_write_vmcore(const void *buf, size_t size, void *opaque) |
110 | { |
111 | DumpState *s = opaque; |
112 | size_t written_size; |
113 | |
114 | written_size = qemu_write_full(s->fd, buf, size); |
115 | if (written_size != size) { |
116 | return -errno; |
117 | } |
118 | |
119 | return 0; |
120 | } |
121 | |
122 | static void (DumpState *s, Error **errp) |
123 | { |
124 | Elf64_Ehdr ; |
125 | int ret; |
126 | |
127 | memset(&elf_header, 0, sizeof(Elf64_Ehdr)); |
128 | memcpy(&elf_header, ELFMAG, SELFMAG); |
129 | elf_header.e_ident[EI_CLASS] = ELFCLASS64; |
130 | elf_header.e_ident[EI_DATA] = s->dump_info.d_endian; |
131 | elf_header.e_ident[EI_VERSION] = EV_CURRENT; |
132 | elf_header.e_type = cpu_to_dump16(s, ET_CORE); |
133 | elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine); |
134 | elf_header.e_version = cpu_to_dump32(s, EV_CURRENT); |
135 | elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); |
136 | elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr)); |
137 | elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr)); |
138 | elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num); |
139 | if (s->have_section) { |
140 | uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info; |
141 | |
142 | elf_header.e_shoff = cpu_to_dump64(s, shoff); |
143 | elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr)); |
144 | elf_header.e_shnum = cpu_to_dump16(s, 1); |
145 | } |
146 | |
147 | ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s); |
148 | if (ret < 0) { |
149 | error_setg_errno(errp, -ret, "dump: failed to write elf header" ); |
150 | } |
151 | } |
152 | |
153 | static void (DumpState *s, Error **errp) |
154 | { |
155 | Elf32_Ehdr ; |
156 | int ret; |
157 | |
158 | memset(&elf_header, 0, sizeof(Elf32_Ehdr)); |
159 | memcpy(&elf_header, ELFMAG, SELFMAG); |
160 | elf_header.e_ident[EI_CLASS] = ELFCLASS32; |
161 | elf_header.e_ident[EI_DATA] = s->dump_info.d_endian; |
162 | elf_header.e_ident[EI_VERSION] = EV_CURRENT; |
163 | elf_header.e_type = cpu_to_dump16(s, ET_CORE); |
164 | elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine); |
165 | elf_header.e_version = cpu_to_dump32(s, EV_CURRENT); |
166 | elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); |
167 | elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr)); |
168 | elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr)); |
169 | elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num); |
170 | if (s->have_section) { |
171 | uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info; |
172 | |
173 | elf_header.e_shoff = cpu_to_dump32(s, shoff); |
174 | elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr)); |
175 | elf_header.e_shnum = cpu_to_dump16(s, 1); |
176 | } |
177 | |
178 | ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s); |
179 | if (ret < 0) { |
180 | error_setg_errno(errp, -ret, "dump: failed to write elf header" ); |
181 | } |
182 | } |
183 | |
184 | static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping, |
185 | int phdr_index, hwaddr offset, |
186 | hwaddr filesz, Error **errp) |
187 | { |
188 | Elf64_Phdr phdr; |
189 | int ret; |
190 | |
191 | memset(&phdr, 0, sizeof(Elf64_Phdr)); |
192 | phdr.p_type = cpu_to_dump32(s, PT_LOAD); |
193 | phdr.p_offset = cpu_to_dump64(s, offset); |
194 | phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr); |
195 | phdr.p_filesz = cpu_to_dump64(s, filesz); |
196 | phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length); |
197 | phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr) ?: phdr.p_paddr; |
198 | |
199 | assert(memory_mapping->length >= filesz); |
200 | |
201 | ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s); |
202 | if (ret < 0) { |
203 | error_setg_errno(errp, -ret, |
204 | "dump: failed to write program header table" ); |
205 | } |
206 | } |
207 | |
208 | static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping, |
209 | int phdr_index, hwaddr offset, |
210 | hwaddr filesz, Error **errp) |
211 | { |
212 | Elf32_Phdr phdr; |
213 | int ret; |
214 | |
215 | memset(&phdr, 0, sizeof(Elf32_Phdr)); |
216 | phdr.p_type = cpu_to_dump32(s, PT_LOAD); |
217 | phdr.p_offset = cpu_to_dump32(s, offset); |
218 | phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr); |
219 | phdr.p_filesz = cpu_to_dump32(s, filesz); |
220 | phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length); |
221 | phdr.p_vaddr = |
222 | cpu_to_dump32(s, memory_mapping->virt_addr) ?: phdr.p_paddr; |
223 | |
224 | assert(memory_mapping->length >= filesz); |
225 | |
226 | ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s); |
227 | if (ret < 0) { |
228 | error_setg_errno(errp, -ret, |
229 | "dump: failed to write program header table" ); |
230 | } |
231 | } |
232 | |
233 | static void write_elf64_note(DumpState *s, Error **errp) |
234 | { |
235 | Elf64_Phdr phdr; |
236 | hwaddr begin = s->memory_offset - s->note_size; |
237 | int ret; |
238 | |
239 | memset(&phdr, 0, sizeof(Elf64_Phdr)); |
240 | phdr.p_type = cpu_to_dump32(s, PT_NOTE); |
241 | phdr.p_offset = cpu_to_dump64(s, begin); |
242 | phdr.p_paddr = 0; |
243 | phdr.p_filesz = cpu_to_dump64(s, s->note_size); |
244 | phdr.p_memsz = cpu_to_dump64(s, s->note_size); |
245 | phdr.p_vaddr = 0; |
246 | |
247 | ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s); |
248 | if (ret < 0) { |
249 | error_setg_errno(errp, -ret, |
250 | "dump: failed to write program header table" ); |
251 | } |
252 | } |
253 | |
254 | static inline int cpu_index(CPUState *cpu) |
255 | { |
256 | return cpu->cpu_index + 1; |
257 | } |
258 | |
259 | static void write_guest_note(WriteCoreDumpFunction f, DumpState *s, |
260 | Error **errp) |
261 | { |
262 | int ret; |
263 | |
264 | if (s->guest_note) { |
265 | ret = f(s->guest_note, s->guest_note_size, s); |
266 | if (ret < 0) { |
267 | error_setg(errp, "dump: failed to write guest note" ); |
268 | } |
269 | } |
270 | } |
271 | |
272 | static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s, |
273 | Error **errp) |
274 | { |
275 | CPUState *cpu; |
276 | int ret; |
277 | int id; |
278 | |
279 | CPU_FOREACH(cpu) { |
280 | id = cpu_index(cpu); |
281 | ret = cpu_write_elf64_note(f, cpu, id, s); |
282 | if (ret < 0) { |
283 | error_setg(errp, "dump: failed to write elf notes" ); |
284 | return; |
285 | } |
286 | } |
287 | |
288 | CPU_FOREACH(cpu) { |
289 | ret = cpu_write_elf64_qemunote(f, cpu, s); |
290 | if (ret < 0) { |
291 | error_setg(errp, "dump: failed to write CPU status" ); |
292 | return; |
293 | } |
294 | } |
295 | |
296 | write_guest_note(f, s, errp); |
297 | } |
298 | |
299 | static void write_elf32_note(DumpState *s, Error **errp) |
300 | { |
301 | hwaddr begin = s->memory_offset - s->note_size; |
302 | Elf32_Phdr phdr; |
303 | int ret; |
304 | |
305 | memset(&phdr, 0, sizeof(Elf32_Phdr)); |
306 | phdr.p_type = cpu_to_dump32(s, PT_NOTE); |
307 | phdr.p_offset = cpu_to_dump32(s, begin); |
308 | phdr.p_paddr = 0; |
309 | phdr.p_filesz = cpu_to_dump32(s, s->note_size); |
310 | phdr.p_memsz = cpu_to_dump32(s, s->note_size); |
311 | phdr.p_vaddr = 0; |
312 | |
313 | ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s); |
314 | if (ret < 0) { |
315 | error_setg_errno(errp, -ret, |
316 | "dump: failed to write program header table" ); |
317 | } |
318 | } |
319 | |
320 | static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s, |
321 | Error **errp) |
322 | { |
323 | CPUState *cpu; |
324 | int ret; |
325 | int id; |
326 | |
327 | CPU_FOREACH(cpu) { |
328 | id = cpu_index(cpu); |
329 | ret = cpu_write_elf32_note(f, cpu, id, s); |
330 | if (ret < 0) { |
331 | error_setg(errp, "dump: failed to write elf notes" ); |
332 | return; |
333 | } |
334 | } |
335 | |
336 | CPU_FOREACH(cpu) { |
337 | ret = cpu_write_elf32_qemunote(f, cpu, s); |
338 | if (ret < 0) { |
339 | error_setg(errp, "dump: failed to write CPU status" ); |
340 | return; |
341 | } |
342 | } |
343 | |
344 | write_guest_note(f, s, errp); |
345 | } |
346 | |
347 | static void write_elf_section(DumpState *s, int type, Error **errp) |
348 | { |
349 | Elf32_Shdr shdr32; |
350 | Elf64_Shdr shdr64; |
351 | int shdr_size; |
352 | void *shdr; |
353 | int ret; |
354 | |
355 | if (type == 0) { |
356 | shdr_size = sizeof(Elf32_Shdr); |
357 | memset(&shdr32, 0, shdr_size); |
358 | shdr32.sh_info = cpu_to_dump32(s, s->sh_info); |
359 | shdr = &shdr32; |
360 | } else { |
361 | shdr_size = sizeof(Elf64_Shdr); |
362 | memset(&shdr64, 0, shdr_size); |
363 | shdr64.sh_info = cpu_to_dump32(s, s->sh_info); |
364 | shdr = &shdr64; |
365 | } |
366 | |
367 | ret = fd_write_vmcore(&shdr, shdr_size, s); |
368 | if (ret < 0) { |
369 | error_setg_errno(errp, -ret, |
370 | "dump: failed to write section header table" ); |
371 | } |
372 | } |
373 | |
374 | static void write_data(DumpState *s, void *buf, int length, Error **errp) |
375 | { |
376 | int ret; |
377 | |
378 | ret = fd_write_vmcore(buf, length, s); |
379 | if (ret < 0) { |
380 | error_setg_errno(errp, -ret, "dump: failed to save memory" ); |
381 | } else { |
382 | s->written_size += length; |
383 | } |
384 | } |
385 | |
386 | /* write the memory to vmcore. 1 page per I/O. */ |
387 | static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start, |
388 | int64_t size, Error **errp) |
389 | { |
390 | int64_t i; |
391 | Error *local_err = NULL; |
392 | |
393 | for (i = 0; i < size / s->dump_info.page_size; i++) { |
394 | write_data(s, block->host_addr + start + i * s->dump_info.page_size, |
395 | s->dump_info.page_size, &local_err); |
396 | if (local_err) { |
397 | error_propagate(errp, local_err); |
398 | return; |
399 | } |
400 | } |
401 | |
402 | if ((size % s->dump_info.page_size) != 0) { |
403 | write_data(s, block->host_addr + start + i * s->dump_info.page_size, |
404 | size % s->dump_info.page_size, &local_err); |
405 | if (local_err) { |
406 | error_propagate(errp, local_err); |
407 | return; |
408 | } |
409 | } |
410 | } |
411 | |
412 | /* get the memory's offset and size in the vmcore */ |
413 | static void get_offset_range(hwaddr phys_addr, |
414 | ram_addr_t mapping_length, |
415 | DumpState *s, |
416 | hwaddr *p_offset, |
417 | hwaddr *p_filesz) |
418 | { |
419 | GuestPhysBlock *block; |
420 | hwaddr offset = s->memory_offset; |
421 | int64_t size_in_block, start; |
422 | |
423 | /* When the memory is not stored into vmcore, offset will be -1 */ |
424 | *p_offset = -1; |
425 | *p_filesz = 0; |
426 | |
427 | if (s->has_filter) { |
428 | if (phys_addr < s->begin || phys_addr >= s->begin + s->length) { |
429 | return; |
430 | } |
431 | } |
432 | |
433 | QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { |
434 | if (s->has_filter) { |
435 | if (block->target_start >= s->begin + s->length || |
436 | block->target_end <= s->begin) { |
437 | /* This block is out of the range */ |
438 | continue; |
439 | } |
440 | |
441 | if (s->begin <= block->target_start) { |
442 | start = block->target_start; |
443 | } else { |
444 | start = s->begin; |
445 | } |
446 | |
447 | size_in_block = block->target_end - start; |
448 | if (s->begin + s->length < block->target_end) { |
449 | size_in_block -= block->target_end - (s->begin + s->length); |
450 | } |
451 | } else { |
452 | start = block->target_start; |
453 | size_in_block = block->target_end - block->target_start; |
454 | } |
455 | |
456 | if (phys_addr >= start && phys_addr < start + size_in_block) { |
457 | *p_offset = phys_addr - start + offset; |
458 | |
459 | /* The offset range mapped from the vmcore file must not spill over |
460 | * the GuestPhysBlock, clamp it. The rest of the mapping will be |
461 | * zero-filled in memory at load time; see |
462 | * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>. |
463 | */ |
464 | *p_filesz = phys_addr + mapping_length <= start + size_in_block ? |
465 | mapping_length : |
466 | size_in_block - (phys_addr - start); |
467 | return; |
468 | } |
469 | |
470 | offset += size_in_block; |
471 | } |
472 | } |
473 | |
474 | static void write_elf_loads(DumpState *s, Error **errp) |
475 | { |
476 | hwaddr offset, filesz; |
477 | MemoryMapping *memory_mapping; |
478 | uint32_t phdr_index = 1; |
479 | uint32_t max_index; |
480 | Error *local_err = NULL; |
481 | |
482 | if (s->have_section) { |
483 | max_index = s->sh_info; |
484 | } else { |
485 | max_index = s->phdr_num; |
486 | } |
487 | |
488 | QTAILQ_FOREACH(memory_mapping, &s->list.head, next) { |
489 | get_offset_range(memory_mapping->phys_addr, |
490 | memory_mapping->length, |
491 | s, &offset, &filesz); |
492 | if (s->dump_info.d_class == ELFCLASS64) { |
493 | write_elf64_load(s, memory_mapping, phdr_index++, offset, |
494 | filesz, &local_err); |
495 | } else { |
496 | write_elf32_load(s, memory_mapping, phdr_index++, offset, |
497 | filesz, &local_err); |
498 | } |
499 | |
500 | if (local_err) { |
501 | error_propagate(errp, local_err); |
502 | return; |
503 | } |
504 | |
505 | if (phdr_index >= max_index) { |
506 | break; |
507 | } |
508 | } |
509 | } |
510 | |
511 | /* write elf header, PT_NOTE and elf note to vmcore. */ |
512 | static void dump_begin(DumpState *s, Error **errp) |
513 | { |
514 | Error *local_err = NULL; |
515 | |
516 | /* |
517 | * the vmcore's format is: |
518 | * -------------- |
519 | * | elf header | |
520 | * -------------- |
521 | * | PT_NOTE | |
522 | * -------------- |
523 | * | PT_LOAD | |
524 | * -------------- |
525 | * | ...... | |
526 | * -------------- |
527 | * | PT_LOAD | |
528 | * -------------- |
529 | * | sec_hdr | |
530 | * -------------- |
531 | * | elf note | |
532 | * -------------- |
533 | * | memory | |
534 | * -------------- |
535 | * |
536 | * we only know where the memory is saved after we write elf note into |
537 | * vmcore. |
538 | */ |
539 | |
540 | /* write elf header to vmcore */ |
541 | if (s->dump_info.d_class == ELFCLASS64) { |
542 | write_elf64_header(s, &local_err); |
543 | } else { |
544 | write_elf32_header(s, &local_err); |
545 | } |
546 | if (local_err) { |
547 | error_propagate(errp, local_err); |
548 | return; |
549 | } |
550 | |
551 | if (s->dump_info.d_class == ELFCLASS64) { |
552 | /* write PT_NOTE to vmcore */ |
553 | write_elf64_note(s, &local_err); |
554 | if (local_err) { |
555 | error_propagate(errp, local_err); |
556 | return; |
557 | } |
558 | |
559 | /* write all PT_LOAD to vmcore */ |
560 | write_elf_loads(s, &local_err); |
561 | if (local_err) { |
562 | error_propagate(errp, local_err); |
563 | return; |
564 | } |
565 | |
566 | /* write section to vmcore */ |
567 | if (s->have_section) { |
568 | write_elf_section(s, 1, &local_err); |
569 | if (local_err) { |
570 | error_propagate(errp, local_err); |
571 | return; |
572 | } |
573 | } |
574 | |
575 | /* write notes to vmcore */ |
576 | write_elf64_notes(fd_write_vmcore, s, &local_err); |
577 | if (local_err) { |
578 | error_propagate(errp, local_err); |
579 | return; |
580 | } |
581 | } else { |
582 | /* write PT_NOTE to vmcore */ |
583 | write_elf32_note(s, &local_err); |
584 | if (local_err) { |
585 | error_propagate(errp, local_err); |
586 | return; |
587 | } |
588 | |
589 | /* write all PT_LOAD to vmcore */ |
590 | write_elf_loads(s, &local_err); |
591 | if (local_err) { |
592 | error_propagate(errp, local_err); |
593 | return; |
594 | } |
595 | |
596 | /* write section to vmcore */ |
597 | if (s->have_section) { |
598 | write_elf_section(s, 0, &local_err); |
599 | if (local_err) { |
600 | error_propagate(errp, local_err); |
601 | return; |
602 | } |
603 | } |
604 | |
605 | /* write notes to vmcore */ |
606 | write_elf32_notes(fd_write_vmcore, s, &local_err); |
607 | if (local_err) { |
608 | error_propagate(errp, local_err); |
609 | return; |
610 | } |
611 | } |
612 | } |
613 | |
614 | static int get_next_block(DumpState *s, GuestPhysBlock *block) |
615 | { |
616 | while (1) { |
617 | block = QTAILQ_NEXT(block, next); |
618 | if (!block) { |
619 | /* no more block */ |
620 | return 1; |
621 | } |
622 | |
623 | s->start = 0; |
624 | s->next_block = block; |
625 | if (s->has_filter) { |
626 | if (block->target_start >= s->begin + s->length || |
627 | block->target_end <= s->begin) { |
628 | /* This block is out of the range */ |
629 | continue; |
630 | } |
631 | |
632 | if (s->begin > block->target_start) { |
633 | s->start = s->begin - block->target_start; |
634 | } |
635 | } |
636 | |
637 | return 0; |
638 | } |
639 | } |
640 | |
641 | /* write all memory to vmcore */ |
642 | static void dump_iterate(DumpState *s, Error **errp) |
643 | { |
644 | GuestPhysBlock *block; |
645 | int64_t size; |
646 | Error *local_err = NULL; |
647 | |
648 | do { |
649 | block = s->next_block; |
650 | |
651 | size = block->target_end - block->target_start; |
652 | if (s->has_filter) { |
653 | size -= s->start; |
654 | if (s->begin + s->length < block->target_end) { |
655 | size -= block->target_end - (s->begin + s->length); |
656 | } |
657 | } |
658 | write_memory(s, block, s->start, size, &local_err); |
659 | if (local_err) { |
660 | error_propagate(errp, local_err); |
661 | return; |
662 | } |
663 | |
664 | } while (!get_next_block(s, block)); |
665 | } |
666 | |
667 | static void create_vmcore(DumpState *s, Error **errp) |
668 | { |
669 | Error *local_err = NULL; |
670 | |
671 | dump_begin(s, &local_err); |
672 | if (local_err) { |
673 | error_propagate(errp, local_err); |
674 | return; |
675 | } |
676 | |
677 | dump_iterate(s, errp); |
678 | } |
679 | |
680 | static int (int fd) |
681 | { |
682 | MakedumpfileHeader *mh; |
683 | int ret = 0; |
684 | |
685 | QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER); |
686 | mh = g_malloc0(MAX_SIZE_MDF_HEADER); |
687 | |
688 | memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE, |
689 | MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE)); |
690 | |
691 | mh->type = cpu_to_be64(TYPE_FLAT_HEADER); |
692 | mh->version = cpu_to_be64(VERSION_FLAT_HEADER); |
693 | |
694 | size_t written_size; |
695 | written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER); |
696 | if (written_size != MAX_SIZE_MDF_HEADER) { |
697 | ret = -1; |
698 | } |
699 | |
700 | g_free(mh); |
701 | return ret; |
702 | } |
703 | |
704 | static int (int fd) |
705 | { |
706 | MakedumpfileDataHeader mdh; |
707 | |
708 | mdh.offset = END_FLAG_FLAT_HEADER; |
709 | mdh.buf_size = END_FLAG_FLAT_HEADER; |
710 | |
711 | size_t written_size; |
712 | written_size = qemu_write_full(fd, &mdh, sizeof(mdh)); |
713 | if (written_size != sizeof(mdh)) { |
714 | return -1; |
715 | } |
716 | |
717 | return 0; |
718 | } |
719 | |
720 | static int write_buffer(int fd, off_t offset, const void *buf, size_t size) |
721 | { |
722 | size_t written_size; |
723 | MakedumpfileDataHeader mdh; |
724 | |
725 | mdh.offset = cpu_to_be64(offset); |
726 | mdh.buf_size = cpu_to_be64(size); |
727 | |
728 | written_size = qemu_write_full(fd, &mdh, sizeof(mdh)); |
729 | if (written_size != sizeof(mdh)) { |
730 | return -1; |
731 | } |
732 | |
733 | written_size = qemu_write_full(fd, buf, size); |
734 | if (written_size != size) { |
735 | return -1; |
736 | } |
737 | |
738 | return 0; |
739 | } |
740 | |
741 | static int buf_write_note(const void *buf, size_t size, void *opaque) |
742 | { |
743 | DumpState *s = opaque; |
744 | |
745 | /* note_buf is not enough */ |
746 | if (s->note_buf_offset + size > s->note_size) { |
747 | return -1; |
748 | } |
749 | |
750 | memcpy(s->note_buf + s->note_buf_offset, buf, size); |
751 | |
752 | s->note_buf_offset += size; |
753 | |
754 | return 0; |
755 | } |
756 | |
757 | /* |
758 | * This function retrieves various sizes from an elf header. |
759 | * |
760 | * @note has to be a valid ELF note. The return sizes are unmodified |
761 | * (not padded or rounded up to be multiple of 4). |
762 | */ |
763 | static void get_note_sizes(DumpState *s, const void *note, |
764 | uint64_t *note_head_size, |
765 | uint64_t *name_size, |
766 | uint64_t *desc_size) |
767 | { |
768 | uint64_t note_head_sz; |
769 | uint64_t name_sz; |
770 | uint64_t desc_sz; |
771 | |
772 | if (s->dump_info.d_class == ELFCLASS64) { |
773 | const Elf64_Nhdr *hdr = note; |
774 | note_head_sz = sizeof(Elf64_Nhdr); |
775 | name_sz = tswap64(hdr->n_namesz); |
776 | desc_sz = tswap64(hdr->n_descsz); |
777 | } else { |
778 | const Elf32_Nhdr *hdr = note; |
779 | note_head_sz = sizeof(Elf32_Nhdr); |
780 | name_sz = tswap32(hdr->n_namesz); |
781 | desc_sz = tswap32(hdr->n_descsz); |
782 | } |
783 | |
784 | if (note_head_size) { |
785 | *note_head_size = note_head_sz; |
786 | } |
787 | if (name_size) { |
788 | *name_size = name_sz; |
789 | } |
790 | if (desc_size) { |
791 | *desc_size = desc_sz; |
792 | } |
793 | } |
794 | |
795 | static bool note_name_equal(DumpState *s, |
796 | const uint8_t *note, const char *name) |
797 | { |
798 | int len = strlen(name) + 1; |
799 | uint64_t head_size, name_size; |
800 | |
801 | get_note_sizes(s, note, &head_size, &name_size, NULL); |
802 | head_size = ROUND_UP(head_size, 4); |
803 | |
804 | return name_size == len && memcmp(note + head_size, name, len) == 0; |
805 | } |
806 | |
807 | /* write common header, sub header and elf note to vmcore */ |
808 | static void (DumpState *s, Error **errp) |
809 | { |
810 | DiskDumpHeader32 *dh = NULL; |
811 | KdumpSubHeader32 *kh = NULL; |
812 | size_t size; |
813 | uint32_t block_size; |
814 | uint32_t sub_hdr_size; |
815 | uint32_t bitmap_blocks; |
816 | uint32_t status = 0; |
817 | uint64_t offset_note; |
818 | Error *local_err = NULL; |
819 | |
820 | /* write common header, the version of kdump-compressed format is 6th */ |
821 | size = sizeof(DiskDumpHeader32); |
822 | dh = g_malloc0(size); |
823 | |
824 | memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN); |
825 | dh->header_version = cpu_to_dump32(s, 6); |
826 | block_size = s->dump_info.page_size; |
827 | dh->block_size = cpu_to_dump32(s, block_size); |
828 | sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size; |
829 | sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size); |
830 | dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size); |
831 | /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */ |
832 | dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX)); |
833 | dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus); |
834 | bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2; |
835 | dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks); |
836 | strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine)); |
837 | |
838 | if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) { |
839 | status |= DUMP_DH_COMPRESSED_ZLIB; |
840 | } |
841 | #ifdef CONFIG_LZO |
842 | if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) { |
843 | status |= DUMP_DH_COMPRESSED_LZO; |
844 | } |
845 | #endif |
846 | #ifdef CONFIG_SNAPPY |
847 | if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) { |
848 | status |= DUMP_DH_COMPRESSED_SNAPPY; |
849 | } |
850 | #endif |
851 | dh->status = cpu_to_dump32(s, status); |
852 | |
853 | if (write_buffer(s->fd, 0, dh, size) < 0) { |
854 | error_setg(errp, "dump: failed to write disk dump header" ); |
855 | goto out; |
856 | } |
857 | |
858 | /* write sub header */ |
859 | size = sizeof(KdumpSubHeader32); |
860 | kh = g_malloc0(size); |
861 | |
862 | /* 64bit max_mapnr_64 */ |
863 | kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr); |
864 | kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base); |
865 | kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL); |
866 | |
867 | offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size; |
868 | if (s->guest_note && |
869 | note_name_equal(s, s->guest_note, "VMCOREINFO" )) { |
870 | uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo; |
871 | |
872 | get_note_sizes(s, s->guest_note, |
873 | &hsize, &name_size, &size_vmcoreinfo_desc); |
874 | offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size + |
875 | (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4; |
876 | kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo); |
877 | kh->size_vmcoreinfo = cpu_to_dump32(s, size_vmcoreinfo_desc); |
878 | } |
879 | |
880 | kh->offset_note = cpu_to_dump64(s, offset_note); |
881 | kh->note_size = cpu_to_dump32(s, s->note_size); |
882 | |
883 | if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS * |
884 | block_size, kh, size) < 0) { |
885 | error_setg(errp, "dump: failed to write kdump sub header" ); |
886 | goto out; |
887 | } |
888 | |
889 | /* write note */ |
890 | s->note_buf = g_malloc0(s->note_size); |
891 | s->note_buf_offset = 0; |
892 | |
893 | /* use s->note_buf to store notes temporarily */ |
894 | write_elf32_notes(buf_write_note, s, &local_err); |
895 | if (local_err) { |
896 | error_propagate(errp, local_err); |
897 | goto out; |
898 | } |
899 | if (write_buffer(s->fd, offset_note, s->note_buf, |
900 | s->note_size) < 0) { |
901 | error_setg(errp, "dump: failed to write notes" ); |
902 | goto out; |
903 | } |
904 | |
905 | /* get offset of dump_bitmap */ |
906 | s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) * |
907 | block_size; |
908 | |
909 | /* get offset of page */ |
910 | s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) * |
911 | block_size; |
912 | |
913 | out: |
914 | g_free(dh); |
915 | g_free(kh); |
916 | g_free(s->note_buf); |
917 | } |
918 | |
919 | /* write common header, sub header and elf note to vmcore */ |
920 | static void (DumpState *s, Error **errp) |
921 | { |
922 | DiskDumpHeader64 *dh = NULL; |
923 | KdumpSubHeader64 *kh = NULL; |
924 | size_t size; |
925 | uint32_t block_size; |
926 | uint32_t sub_hdr_size; |
927 | uint32_t bitmap_blocks; |
928 | uint32_t status = 0; |
929 | uint64_t offset_note; |
930 | Error *local_err = NULL; |
931 | |
932 | /* write common header, the version of kdump-compressed format is 6th */ |
933 | size = sizeof(DiskDumpHeader64); |
934 | dh = g_malloc0(size); |
935 | |
936 | memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN); |
937 | dh->header_version = cpu_to_dump32(s, 6); |
938 | block_size = s->dump_info.page_size; |
939 | dh->block_size = cpu_to_dump32(s, block_size); |
940 | sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size; |
941 | sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size); |
942 | dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size); |
943 | /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */ |
944 | dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX)); |
945 | dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus); |
946 | bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2; |
947 | dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks); |
948 | strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine)); |
949 | |
950 | if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) { |
951 | status |= DUMP_DH_COMPRESSED_ZLIB; |
952 | } |
953 | #ifdef CONFIG_LZO |
954 | if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) { |
955 | status |= DUMP_DH_COMPRESSED_LZO; |
956 | } |
957 | #endif |
958 | #ifdef CONFIG_SNAPPY |
959 | if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) { |
960 | status |= DUMP_DH_COMPRESSED_SNAPPY; |
961 | } |
962 | #endif |
963 | dh->status = cpu_to_dump32(s, status); |
964 | |
965 | if (write_buffer(s->fd, 0, dh, size) < 0) { |
966 | error_setg(errp, "dump: failed to write disk dump header" ); |
967 | goto out; |
968 | } |
969 | |
970 | /* write sub header */ |
971 | size = sizeof(KdumpSubHeader64); |
972 | kh = g_malloc0(size); |
973 | |
974 | /* 64bit max_mapnr_64 */ |
975 | kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr); |
976 | kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base); |
977 | kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL); |
978 | |
979 | offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size; |
980 | if (s->guest_note && |
981 | note_name_equal(s, s->guest_note, "VMCOREINFO" )) { |
982 | uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo; |
983 | |
984 | get_note_sizes(s, s->guest_note, |
985 | &hsize, &name_size, &size_vmcoreinfo_desc); |
986 | offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size + |
987 | (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4; |
988 | kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo); |
989 | kh->size_vmcoreinfo = cpu_to_dump64(s, size_vmcoreinfo_desc); |
990 | } |
991 | |
992 | kh->offset_note = cpu_to_dump64(s, offset_note); |
993 | kh->note_size = cpu_to_dump64(s, s->note_size); |
994 | |
995 | if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS * |
996 | block_size, kh, size) < 0) { |
997 | error_setg(errp, "dump: failed to write kdump sub header" ); |
998 | goto out; |
999 | } |
1000 | |
1001 | /* write note */ |
1002 | s->note_buf = g_malloc0(s->note_size); |
1003 | s->note_buf_offset = 0; |
1004 | |
1005 | /* use s->note_buf to store notes temporarily */ |
1006 | write_elf64_notes(buf_write_note, s, &local_err); |
1007 | if (local_err) { |
1008 | error_propagate(errp, local_err); |
1009 | goto out; |
1010 | } |
1011 | |
1012 | if (write_buffer(s->fd, offset_note, s->note_buf, |
1013 | s->note_size) < 0) { |
1014 | error_setg(errp, "dump: failed to write notes" ); |
1015 | goto out; |
1016 | } |
1017 | |
1018 | /* get offset of dump_bitmap */ |
1019 | s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) * |
1020 | block_size; |
1021 | |
1022 | /* get offset of page */ |
1023 | s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) * |
1024 | block_size; |
1025 | |
1026 | out: |
1027 | g_free(dh); |
1028 | g_free(kh); |
1029 | g_free(s->note_buf); |
1030 | } |
1031 | |
1032 | static void (DumpState *s, Error **errp) |
1033 | { |
1034 | Error *local_err = NULL; |
1035 | |
1036 | if (s->dump_info.d_class == ELFCLASS32) { |
1037 | create_header32(s, &local_err); |
1038 | } else { |
1039 | create_header64(s, &local_err); |
1040 | } |
1041 | error_propagate(errp, local_err); |
1042 | } |
1043 | |
1044 | static size_t dump_bitmap_get_bufsize(DumpState *s) |
1045 | { |
1046 | return s->dump_info.page_size; |
1047 | } |
1048 | |
1049 | /* |
1050 | * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be |
1051 | * rewritten, so if need to set the first bit, set last_pfn and pfn to 0. |
1052 | * set_dump_bitmap will always leave the recently set bit un-sync. And setting |
1053 | * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into |
1054 | * vmcore, ie. synchronizing un-sync bit into vmcore. |
1055 | */ |
1056 | static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value, |
1057 | uint8_t *buf, DumpState *s) |
1058 | { |
1059 | off_t old_offset, new_offset; |
1060 | off_t offset_bitmap1, offset_bitmap2; |
1061 | uint32_t byte, bit; |
1062 | size_t bitmap_bufsize = dump_bitmap_get_bufsize(s); |
1063 | size_t bits_per_buf = bitmap_bufsize * CHAR_BIT; |
1064 | |
1065 | /* should not set the previous place */ |
1066 | assert(last_pfn <= pfn); |
1067 | |
1068 | /* |
1069 | * if the bit needed to be set is not cached in buf, flush the data in buf |
1070 | * to vmcore firstly. |
1071 | * making new_offset be bigger than old_offset can also sync remained data |
1072 | * into vmcore. |
1073 | */ |
1074 | old_offset = bitmap_bufsize * (last_pfn / bits_per_buf); |
1075 | new_offset = bitmap_bufsize * (pfn / bits_per_buf); |
1076 | |
1077 | while (old_offset < new_offset) { |
1078 | /* calculate the offset and write dump_bitmap */ |
1079 | offset_bitmap1 = s->offset_dump_bitmap + old_offset; |
1080 | if (write_buffer(s->fd, offset_bitmap1, buf, |
1081 | bitmap_bufsize) < 0) { |
1082 | return -1; |
1083 | } |
1084 | |
1085 | /* dump level 1 is chosen, so 1st and 2nd bitmap are same */ |
1086 | offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap + |
1087 | old_offset; |
1088 | if (write_buffer(s->fd, offset_bitmap2, buf, |
1089 | bitmap_bufsize) < 0) { |
1090 | return -1; |
1091 | } |
1092 | |
1093 | memset(buf, 0, bitmap_bufsize); |
1094 | old_offset += bitmap_bufsize; |
1095 | } |
1096 | |
1097 | /* get the exact place of the bit in the buf, and set it */ |
1098 | byte = (pfn % bits_per_buf) / CHAR_BIT; |
1099 | bit = (pfn % bits_per_buf) % CHAR_BIT; |
1100 | if (value) { |
1101 | buf[byte] |= 1u << bit; |
1102 | } else { |
1103 | buf[byte] &= ~(1u << bit); |
1104 | } |
1105 | |
1106 | return 0; |
1107 | } |
1108 | |
1109 | static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr) |
1110 | { |
1111 | int target_page_shift = ctz32(s->dump_info.page_size); |
1112 | |
1113 | return (addr >> target_page_shift) - ARCH_PFN_OFFSET; |
1114 | } |
1115 | |
1116 | static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn) |
1117 | { |
1118 | int target_page_shift = ctz32(s->dump_info.page_size); |
1119 | |
1120 | return (pfn + ARCH_PFN_OFFSET) << target_page_shift; |
1121 | } |
1122 | |
1123 | /* |
1124 | * exam every page and return the page frame number and the address of the page. |
1125 | * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys |
1126 | * blocks, so block->target_start and block->target_end should be interal |
1127 | * multiples of the target page size. |
1128 | */ |
1129 | static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr, |
1130 | uint8_t **bufptr, DumpState *s) |
1131 | { |
1132 | GuestPhysBlock *block = *blockptr; |
1133 | hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1); |
1134 | uint8_t *buf; |
1135 | |
1136 | /* block == NULL means the start of the iteration */ |
1137 | if (!block) { |
1138 | block = QTAILQ_FIRST(&s->guest_phys_blocks.head); |
1139 | *blockptr = block; |
1140 | assert((block->target_start & ~target_page_mask) == 0); |
1141 | assert((block->target_end & ~target_page_mask) == 0); |
1142 | *pfnptr = dump_paddr_to_pfn(s, block->target_start); |
1143 | if (bufptr) { |
1144 | *bufptr = block->host_addr; |
1145 | } |
1146 | return true; |
1147 | } |
1148 | |
1149 | *pfnptr = *pfnptr + 1; |
1150 | addr = dump_pfn_to_paddr(s, *pfnptr); |
1151 | |
1152 | if ((addr >= block->target_start) && |
1153 | (addr + s->dump_info.page_size <= block->target_end)) { |
1154 | buf = block->host_addr + (addr - block->target_start); |
1155 | } else { |
1156 | /* the next page is in the next block */ |
1157 | block = QTAILQ_NEXT(block, next); |
1158 | *blockptr = block; |
1159 | if (!block) { |
1160 | return false; |
1161 | } |
1162 | assert((block->target_start & ~target_page_mask) == 0); |
1163 | assert((block->target_end & ~target_page_mask) == 0); |
1164 | *pfnptr = dump_paddr_to_pfn(s, block->target_start); |
1165 | buf = block->host_addr; |
1166 | } |
1167 | |
1168 | if (bufptr) { |
1169 | *bufptr = buf; |
1170 | } |
1171 | |
1172 | return true; |
1173 | } |
1174 | |
1175 | static void write_dump_bitmap(DumpState *s, Error **errp) |
1176 | { |
1177 | int ret = 0; |
1178 | uint64_t last_pfn, pfn; |
1179 | void *dump_bitmap_buf; |
1180 | size_t num_dumpable; |
1181 | GuestPhysBlock *block_iter = NULL; |
1182 | size_t bitmap_bufsize = dump_bitmap_get_bufsize(s); |
1183 | size_t bits_per_buf = bitmap_bufsize * CHAR_BIT; |
1184 | |
1185 | /* dump_bitmap_buf is used to store dump_bitmap temporarily */ |
1186 | dump_bitmap_buf = g_malloc0(bitmap_bufsize); |
1187 | |
1188 | num_dumpable = 0; |
1189 | last_pfn = 0; |
1190 | |
1191 | /* |
1192 | * exam memory page by page, and set the bit in dump_bitmap corresponded |
1193 | * to the existing page. |
1194 | */ |
1195 | while (get_next_page(&block_iter, &pfn, NULL, s)) { |
1196 | ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s); |
1197 | if (ret < 0) { |
1198 | error_setg(errp, "dump: failed to set dump_bitmap" ); |
1199 | goto out; |
1200 | } |
1201 | |
1202 | last_pfn = pfn; |
1203 | num_dumpable++; |
1204 | } |
1205 | |
1206 | /* |
1207 | * set_dump_bitmap will always leave the recently set bit un-sync. Here we |
1208 | * set the remaining bits from last_pfn to the end of the bitmap buffer to |
1209 | * 0. With those set, the un-sync bit will be synchronized into the vmcore. |
1210 | */ |
1211 | if (num_dumpable > 0) { |
1212 | ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false, |
1213 | dump_bitmap_buf, s); |
1214 | if (ret < 0) { |
1215 | error_setg(errp, "dump: failed to sync dump_bitmap" ); |
1216 | goto out; |
1217 | } |
1218 | } |
1219 | |
1220 | /* number of dumpable pages that will be dumped later */ |
1221 | s->num_dumpable = num_dumpable; |
1222 | |
1223 | out: |
1224 | g_free(dump_bitmap_buf); |
1225 | } |
1226 | |
1227 | static void prepare_data_cache(DataCache *data_cache, DumpState *s, |
1228 | off_t offset) |
1229 | { |
1230 | data_cache->fd = s->fd; |
1231 | data_cache->data_size = 0; |
1232 | data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s); |
1233 | data_cache->buf = g_malloc0(data_cache->buf_size); |
1234 | data_cache->offset = offset; |
1235 | } |
1236 | |
1237 | static int write_cache(DataCache *dc, const void *buf, size_t size, |
1238 | bool flag_sync) |
1239 | { |
1240 | /* |
1241 | * dc->buf_size should not be less than size, otherwise dc will never be |
1242 | * enough |
1243 | */ |
1244 | assert(size <= dc->buf_size); |
1245 | |
1246 | /* |
1247 | * if flag_sync is set, synchronize data in dc->buf into vmcore. |
1248 | * otherwise check if the space is enough for caching data in buf, if not, |
1249 | * write the data in dc->buf to dc->fd and reset dc->buf |
1250 | */ |
1251 | if ((!flag_sync && dc->data_size + size > dc->buf_size) || |
1252 | (flag_sync && dc->data_size > 0)) { |
1253 | if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) { |
1254 | return -1; |
1255 | } |
1256 | |
1257 | dc->offset += dc->data_size; |
1258 | dc->data_size = 0; |
1259 | } |
1260 | |
1261 | if (!flag_sync) { |
1262 | memcpy(dc->buf + dc->data_size, buf, size); |
1263 | dc->data_size += size; |
1264 | } |
1265 | |
1266 | return 0; |
1267 | } |
1268 | |
1269 | static void free_data_cache(DataCache *data_cache) |
1270 | { |
1271 | g_free(data_cache->buf); |
1272 | } |
1273 | |
1274 | static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress) |
1275 | { |
1276 | switch (flag_compress) { |
1277 | case DUMP_DH_COMPRESSED_ZLIB: |
1278 | return compressBound(page_size); |
1279 | |
1280 | case DUMP_DH_COMPRESSED_LZO: |
1281 | /* |
1282 | * LZO will expand incompressible data by a little amount. Please check |
1283 | * the following URL to see the expansion calculation: |
1284 | * http://www.oberhumer.com/opensource/lzo/lzofaq.php |
1285 | */ |
1286 | return page_size + page_size / 16 + 64 + 3; |
1287 | |
1288 | #ifdef CONFIG_SNAPPY |
1289 | case DUMP_DH_COMPRESSED_SNAPPY: |
1290 | return snappy_max_compressed_length(page_size); |
1291 | #endif |
1292 | } |
1293 | return 0; |
1294 | } |
1295 | |
1296 | /* |
1297 | * check if the page is all 0 |
1298 | */ |
1299 | static inline bool is_zero_page(const uint8_t *buf, size_t page_size) |
1300 | { |
1301 | return buffer_is_zero(buf, page_size); |
1302 | } |
1303 | |
1304 | static void write_dump_pages(DumpState *s, Error **errp) |
1305 | { |
1306 | int ret = 0; |
1307 | DataCache page_desc, page_data; |
1308 | size_t len_buf_out, size_out; |
1309 | #ifdef CONFIG_LZO |
1310 | lzo_bytep wrkmem = NULL; |
1311 | #endif |
1312 | uint8_t *buf_out = NULL; |
1313 | off_t offset_desc, offset_data; |
1314 | PageDescriptor pd, pd_zero; |
1315 | uint8_t *buf; |
1316 | GuestPhysBlock *block_iter = NULL; |
1317 | uint64_t pfn_iter; |
1318 | |
1319 | /* get offset of page_desc and page_data in dump file */ |
1320 | offset_desc = s->offset_page; |
1321 | offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable; |
1322 | |
1323 | prepare_data_cache(&page_desc, s, offset_desc); |
1324 | prepare_data_cache(&page_data, s, offset_data); |
1325 | |
1326 | /* prepare buffer to store compressed data */ |
1327 | len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress); |
1328 | assert(len_buf_out != 0); |
1329 | |
1330 | #ifdef CONFIG_LZO |
1331 | wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS); |
1332 | #endif |
1333 | |
1334 | buf_out = g_malloc(len_buf_out); |
1335 | |
1336 | /* |
1337 | * init zero page's page_desc and page_data, because every zero page |
1338 | * uses the same page_data |
1339 | */ |
1340 | pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size); |
1341 | pd_zero.flags = cpu_to_dump32(s, 0); |
1342 | pd_zero.offset = cpu_to_dump64(s, offset_data); |
1343 | pd_zero.page_flags = cpu_to_dump64(s, 0); |
1344 | buf = g_malloc0(s->dump_info.page_size); |
1345 | ret = write_cache(&page_data, buf, s->dump_info.page_size, false); |
1346 | g_free(buf); |
1347 | if (ret < 0) { |
1348 | error_setg(errp, "dump: failed to write page data (zero page)" ); |
1349 | goto out; |
1350 | } |
1351 | |
1352 | offset_data += s->dump_info.page_size; |
1353 | |
1354 | /* |
1355 | * dump memory to vmcore page by page. zero page will all be resided in the |
1356 | * first page of page section |
1357 | */ |
1358 | while (get_next_page(&block_iter, &pfn_iter, &buf, s)) { |
1359 | /* check zero page */ |
1360 | if (is_zero_page(buf, s->dump_info.page_size)) { |
1361 | ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor), |
1362 | false); |
1363 | if (ret < 0) { |
1364 | error_setg(errp, "dump: failed to write page desc" ); |
1365 | goto out; |
1366 | } |
1367 | } else { |
1368 | /* |
1369 | * not zero page, then: |
1370 | * 1. compress the page |
1371 | * 2. write the compressed page into the cache of page_data |
1372 | * 3. get page desc of the compressed page and write it into the |
1373 | * cache of page_desc |
1374 | * |
1375 | * only one compression format will be used here, for |
1376 | * s->flag_compress is set. But when compression fails to work, |
1377 | * we fall back to save in plaintext. |
1378 | */ |
1379 | size_out = len_buf_out; |
1380 | if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) && |
1381 | (compress2(buf_out, (uLongf *)&size_out, buf, |
1382 | s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) && |
1383 | (size_out < s->dump_info.page_size)) { |
1384 | pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB); |
1385 | pd.size = cpu_to_dump32(s, size_out); |
1386 | |
1387 | ret = write_cache(&page_data, buf_out, size_out, false); |
1388 | if (ret < 0) { |
1389 | error_setg(errp, "dump: failed to write page data" ); |
1390 | goto out; |
1391 | } |
1392 | #ifdef CONFIG_LZO |
1393 | } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) && |
1394 | (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out, |
1395 | (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) && |
1396 | (size_out < s->dump_info.page_size)) { |
1397 | pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO); |
1398 | pd.size = cpu_to_dump32(s, size_out); |
1399 | |
1400 | ret = write_cache(&page_data, buf_out, size_out, false); |
1401 | if (ret < 0) { |
1402 | error_setg(errp, "dump: failed to write page data" ); |
1403 | goto out; |
1404 | } |
1405 | #endif |
1406 | #ifdef CONFIG_SNAPPY |
1407 | } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) && |
1408 | (snappy_compress((char *)buf, s->dump_info.page_size, |
1409 | (char *)buf_out, &size_out) == SNAPPY_OK) && |
1410 | (size_out < s->dump_info.page_size)) { |
1411 | pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY); |
1412 | pd.size = cpu_to_dump32(s, size_out); |
1413 | |
1414 | ret = write_cache(&page_data, buf_out, size_out, false); |
1415 | if (ret < 0) { |
1416 | error_setg(errp, "dump: failed to write page data" ); |
1417 | goto out; |
1418 | } |
1419 | #endif |
1420 | } else { |
1421 | /* |
1422 | * fall back to save in plaintext, size_out should be |
1423 | * assigned the target's page size |
1424 | */ |
1425 | pd.flags = cpu_to_dump32(s, 0); |
1426 | size_out = s->dump_info.page_size; |
1427 | pd.size = cpu_to_dump32(s, size_out); |
1428 | |
1429 | ret = write_cache(&page_data, buf, |
1430 | s->dump_info.page_size, false); |
1431 | if (ret < 0) { |
1432 | error_setg(errp, "dump: failed to write page data" ); |
1433 | goto out; |
1434 | } |
1435 | } |
1436 | |
1437 | /* get and write page desc here */ |
1438 | pd.page_flags = cpu_to_dump64(s, 0); |
1439 | pd.offset = cpu_to_dump64(s, offset_data); |
1440 | offset_data += size_out; |
1441 | |
1442 | ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false); |
1443 | if (ret < 0) { |
1444 | error_setg(errp, "dump: failed to write page desc" ); |
1445 | goto out; |
1446 | } |
1447 | } |
1448 | s->written_size += s->dump_info.page_size; |
1449 | } |
1450 | |
1451 | ret = write_cache(&page_desc, NULL, 0, true); |
1452 | if (ret < 0) { |
1453 | error_setg(errp, "dump: failed to sync cache for page_desc" ); |
1454 | goto out; |
1455 | } |
1456 | ret = write_cache(&page_data, NULL, 0, true); |
1457 | if (ret < 0) { |
1458 | error_setg(errp, "dump: failed to sync cache for page_data" ); |
1459 | goto out; |
1460 | } |
1461 | |
1462 | out: |
1463 | free_data_cache(&page_desc); |
1464 | free_data_cache(&page_data); |
1465 | |
1466 | #ifdef CONFIG_LZO |
1467 | g_free(wrkmem); |
1468 | #endif |
1469 | |
1470 | g_free(buf_out); |
1471 | } |
1472 | |
1473 | static void create_kdump_vmcore(DumpState *s, Error **errp) |
1474 | { |
1475 | int ret; |
1476 | Error *local_err = NULL; |
1477 | |
1478 | /* |
1479 | * the kdump-compressed format is: |
1480 | * File offset |
1481 | * +------------------------------------------+ 0x0 |
1482 | * | main header (struct disk_dump_header) | |
1483 | * |------------------------------------------+ block 1 |
1484 | * | sub header (struct kdump_sub_header) | |
1485 | * |------------------------------------------+ block 2 |
1486 | * | 1st-dump_bitmap | |
1487 | * |------------------------------------------+ block 2 + X blocks |
1488 | * | 2nd-dump_bitmap | (aligned by block) |
1489 | * |------------------------------------------+ block 2 + 2 * X blocks |
1490 | * | page desc for pfn 0 (struct page_desc) | (aligned by block) |
1491 | * | page desc for pfn 1 (struct page_desc) | |
1492 | * | : | |
1493 | * |------------------------------------------| (not aligned by block) |
1494 | * | page data (pfn 0) | |
1495 | * | page data (pfn 1) | |
1496 | * | : | |
1497 | * +------------------------------------------+ |
1498 | */ |
1499 | |
1500 | ret = write_start_flat_header(s->fd); |
1501 | if (ret < 0) { |
1502 | error_setg(errp, "dump: failed to write start flat header" ); |
1503 | return; |
1504 | } |
1505 | |
1506 | write_dump_header(s, &local_err); |
1507 | if (local_err) { |
1508 | error_propagate(errp, local_err); |
1509 | return; |
1510 | } |
1511 | |
1512 | write_dump_bitmap(s, &local_err); |
1513 | if (local_err) { |
1514 | error_propagate(errp, local_err); |
1515 | return; |
1516 | } |
1517 | |
1518 | write_dump_pages(s, &local_err); |
1519 | if (local_err) { |
1520 | error_propagate(errp, local_err); |
1521 | return; |
1522 | } |
1523 | |
1524 | ret = write_end_flat_header(s->fd); |
1525 | if (ret < 0) { |
1526 | error_setg(errp, "dump: failed to write end flat header" ); |
1527 | return; |
1528 | } |
1529 | } |
1530 | |
1531 | static ram_addr_t get_start_block(DumpState *s) |
1532 | { |
1533 | GuestPhysBlock *block; |
1534 | |
1535 | if (!s->has_filter) { |
1536 | s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head); |
1537 | return 0; |
1538 | } |
1539 | |
1540 | QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { |
1541 | if (block->target_start >= s->begin + s->length || |
1542 | block->target_end <= s->begin) { |
1543 | /* This block is out of the range */ |
1544 | continue; |
1545 | } |
1546 | |
1547 | s->next_block = block; |
1548 | if (s->begin > block->target_start) { |
1549 | s->start = s->begin - block->target_start; |
1550 | } else { |
1551 | s->start = 0; |
1552 | } |
1553 | return s->start; |
1554 | } |
1555 | |
1556 | return -1; |
1557 | } |
1558 | |
1559 | static void get_max_mapnr(DumpState *s) |
1560 | { |
1561 | GuestPhysBlock *last_block; |
1562 | |
1563 | last_block = QTAILQ_LAST(&s->guest_phys_blocks.head); |
1564 | s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end); |
1565 | } |
1566 | |
1567 | static DumpState dump_state_global = { .status = DUMP_STATUS_NONE }; |
1568 | |
1569 | static void dump_state_prepare(DumpState *s) |
1570 | { |
1571 | /* zero the struct, setting status to active */ |
1572 | *s = (DumpState) { .status = DUMP_STATUS_ACTIVE }; |
1573 | } |
1574 | |
1575 | bool dump_in_progress(void) |
1576 | { |
1577 | DumpState *state = &dump_state_global; |
1578 | return (atomic_read(&state->status) == DUMP_STATUS_ACTIVE); |
1579 | } |
1580 | |
1581 | /* calculate total size of memory to be dumped (taking filter into |
1582 | * acoount.) */ |
1583 | static int64_t dump_calculate_size(DumpState *s) |
1584 | { |
1585 | GuestPhysBlock *block; |
1586 | int64_t size = 0, total = 0, left = 0, right = 0; |
1587 | |
1588 | QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { |
1589 | if (s->has_filter) { |
1590 | /* calculate the overlapped region. */ |
1591 | left = MAX(s->begin, block->target_start); |
1592 | right = MIN(s->begin + s->length, block->target_end); |
1593 | size = right - left; |
1594 | size = size > 0 ? size : 0; |
1595 | } else { |
1596 | /* count the whole region in */ |
1597 | size = (block->target_end - block->target_start); |
1598 | } |
1599 | total += size; |
1600 | } |
1601 | |
1602 | return total; |
1603 | } |
1604 | |
1605 | static void vmcoreinfo_update_phys_base(DumpState *s) |
1606 | { |
1607 | uint64_t size, note_head_size, name_size, phys_base; |
1608 | char **lines; |
1609 | uint8_t *vmci; |
1610 | size_t i; |
1611 | |
1612 | if (!note_name_equal(s, s->guest_note, "VMCOREINFO" )) { |
1613 | return; |
1614 | } |
1615 | |
1616 | get_note_sizes(s, s->guest_note, ¬e_head_size, &name_size, &size); |
1617 | note_head_size = ROUND_UP(note_head_size, 4); |
1618 | |
1619 | vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4); |
1620 | *(vmci + size) = '\0'; |
1621 | |
1622 | lines = g_strsplit((char *)vmci, "\n" , -1); |
1623 | for (i = 0; lines[i]; i++) { |
1624 | const char *prefix = NULL; |
1625 | |
1626 | if (s->dump_info.d_machine == EM_X86_64) { |
1627 | prefix = "NUMBER(phys_base)=" ; |
1628 | } else if (s->dump_info.d_machine == EM_AARCH64) { |
1629 | prefix = "NUMBER(PHYS_OFFSET)=" ; |
1630 | } |
1631 | |
1632 | if (prefix && g_str_has_prefix(lines[i], prefix)) { |
1633 | if (qemu_strtou64(lines[i] + strlen(prefix), NULL, 16, |
1634 | &phys_base) < 0) { |
1635 | warn_report("Failed to read %s" , prefix); |
1636 | } else { |
1637 | s->dump_info.phys_base = phys_base; |
1638 | } |
1639 | break; |
1640 | } |
1641 | } |
1642 | |
1643 | g_strfreev(lines); |
1644 | } |
1645 | |
1646 | static void dump_init(DumpState *s, int fd, bool has_format, |
1647 | DumpGuestMemoryFormat format, bool paging, bool has_filter, |
1648 | int64_t begin, int64_t length, Error **errp) |
1649 | { |
1650 | VMCoreInfoState *vmci = vmcoreinfo_find(); |
1651 | CPUState *cpu; |
1652 | int nr_cpus; |
1653 | Error *err = NULL; |
1654 | int ret; |
1655 | |
1656 | s->has_format = has_format; |
1657 | s->format = format; |
1658 | s->written_size = 0; |
1659 | |
1660 | /* kdump-compressed is conflict with paging and filter */ |
1661 | if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { |
1662 | assert(!paging && !has_filter); |
1663 | } |
1664 | |
1665 | if (runstate_is_running()) { |
1666 | vm_stop(RUN_STATE_SAVE_VM); |
1667 | s->resume = true; |
1668 | } else { |
1669 | s->resume = false; |
1670 | } |
1671 | |
1672 | /* If we use KVM, we should synchronize the registers before we get dump |
1673 | * info or physmap info. |
1674 | */ |
1675 | cpu_synchronize_all_states(); |
1676 | nr_cpus = 0; |
1677 | CPU_FOREACH(cpu) { |
1678 | nr_cpus++; |
1679 | } |
1680 | |
1681 | s->fd = fd; |
1682 | s->has_filter = has_filter; |
1683 | s->begin = begin; |
1684 | s->length = length; |
1685 | |
1686 | memory_mapping_list_init(&s->list); |
1687 | |
1688 | guest_phys_blocks_init(&s->guest_phys_blocks); |
1689 | guest_phys_blocks_append(&s->guest_phys_blocks); |
1690 | s->total_size = dump_calculate_size(s); |
1691 | #ifdef DEBUG_DUMP_GUEST_MEMORY |
1692 | fprintf(stderr, "DUMP: total memory to dump: %lu\n" , s->total_size); |
1693 | #endif |
1694 | |
1695 | /* it does not make sense to dump non-existent memory */ |
1696 | if (!s->total_size) { |
1697 | error_setg(errp, "dump: no guest memory to dump" ); |
1698 | goto cleanup; |
1699 | } |
1700 | |
1701 | s->start = get_start_block(s); |
1702 | if (s->start == -1) { |
1703 | error_setg(errp, QERR_INVALID_PARAMETER, "begin" ); |
1704 | goto cleanup; |
1705 | } |
1706 | |
1707 | /* get dump info: endian, class and architecture. |
1708 | * If the target architecture is not supported, cpu_get_dump_info() will |
1709 | * return -1. |
1710 | */ |
1711 | ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks); |
1712 | if (ret < 0) { |
1713 | error_setg(errp, QERR_UNSUPPORTED); |
1714 | goto cleanup; |
1715 | } |
1716 | |
1717 | if (!s->dump_info.page_size) { |
1718 | s->dump_info.page_size = TARGET_PAGE_SIZE; |
1719 | } |
1720 | |
1721 | s->note_size = cpu_get_note_size(s->dump_info.d_class, |
1722 | s->dump_info.d_machine, nr_cpus); |
1723 | if (s->note_size < 0) { |
1724 | error_setg(errp, QERR_UNSUPPORTED); |
1725 | goto cleanup; |
1726 | } |
1727 | |
1728 | /* |
1729 | * The goal of this block is to (a) update the previously guessed |
1730 | * phys_base, (b) copy the guest note out of the guest. |
1731 | * Failure to do so is not fatal for dumping. |
1732 | */ |
1733 | if (vmci) { |
1734 | uint64_t addr, note_head_size, name_size, desc_size; |
1735 | uint32_t size; |
1736 | uint16_t format; |
1737 | |
1738 | note_head_size = s->dump_info.d_class == ELFCLASS32 ? |
1739 | sizeof(Elf32_Nhdr) : sizeof(Elf64_Nhdr); |
1740 | |
1741 | format = le16_to_cpu(vmci->vmcoreinfo.guest_format); |
1742 | size = le32_to_cpu(vmci->vmcoreinfo.size); |
1743 | addr = le64_to_cpu(vmci->vmcoreinfo.paddr); |
1744 | if (!vmci->has_vmcoreinfo) { |
1745 | warn_report("guest note is not present" ); |
1746 | } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) { |
1747 | warn_report("guest note size is invalid: %" PRIu32, size); |
1748 | } else if (format != FW_CFG_VMCOREINFO_FORMAT_ELF) { |
1749 | warn_report("guest note format is unsupported: %" PRIu16, format); |
1750 | } else { |
1751 | s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */ |
1752 | cpu_physical_memory_read(addr, s->guest_note, size); |
1753 | |
1754 | get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size); |
1755 | s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size, |
1756 | desc_size); |
1757 | if (name_size > MAX_GUEST_NOTE_SIZE || |
1758 | desc_size > MAX_GUEST_NOTE_SIZE || |
1759 | s->guest_note_size > size) { |
1760 | warn_report("Invalid guest note header" ); |
1761 | g_free(s->guest_note); |
1762 | s->guest_note = NULL; |
1763 | } else { |
1764 | vmcoreinfo_update_phys_base(s); |
1765 | s->note_size += s->guest_note_size; |
1766 | } |
1767 | } |
1768 | } |
1769 | |
1770 | /* get memory mapping */ |
1771 | if (paging) { |
1772 | qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err); |
1773 | if (err != NULL) { |
1774 | error_propagate(errp, err); |
1775 | goto cleanup; |
1776 | } |
1777 | } else { |
1778 | qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks); |
1779 | } |
1780 | |
1781 | s->nr_cpus = nr_cpus; |
1782 | |
1783 | get_max_mapnr(s); |
1784 | |
1785 | uint64_t tmp; |
1786 | tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), |
1787 | s->dump_info.page_size); |
1788 | s->len_dump_bitmap = tmp * s->dump_info.page_size; |
1789 | |
1790 | /* init for kdump-compressed format */ |
1791 | if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { |
1792 | switch (format) { |
1793 | case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB: |
1794 | s->flag_compress = DUMP_DH_COMPRESSED_ZLIB; |
1795 | break; |
1796 | |
1797 | case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO: |
1798 | #ifdef CONFIG_LZO |
1799 | if (lzo_init() != LZO_E_OK) { |
1800 | error_setg(errp, "failed to initialize the LZO library" ); |
1801 | goto cleanup; |
1802 | } |
1803 | #endif |
1804 | s->flag_compress = DUMP_DH_COMPRESSED_LZO; |
1805 | break; |
1806 | |
1807 | case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY: |
1808 | s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY; |
1809 | break; |
1810 | |
1811 | default: |
1812 | s->flag_compress = 0; |
1813 | } |
1814 | |
1815 | return; |
1816 | } |
1817 | |
1818 | if (s->has_filter) { |
1819 | memory_mapping_filter(&s->list, s->begin, s->length); |
1820 | } |
1821 | |
1822 | /* |
1823 | * calculate phdr_num |
1824 | * |
1825 | * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow |
1826 | */ |
1827 | s->phdr_num = 1; /* PT_NOTE */ |
1828 | if (s->list.num < UINT16_MAX - 2) { |
1829 | s->phdr_num += s->list.num; |
1830 | s->have_section = false; |
1831 | } else { |
1832 | s->have_section = true; |
1833 | s->phdr_num = PN_XNUM; |
1834 | s->sh_info = 1; /* PT_NOTE */ |
1835 | |
1836 | /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */ |
1837 | if (s->list.num <= UINT32_MAX - 1) { |
1838 | s->sh_info += s->list.num; |
1839 | } else { |
1840 | s->sh_info = UINT32_MAX; |
1841 | } |
1842 | } |
1843 | |
1844 | if (s->dump_info.d_class == ELFCLASS64) { |
1845 | if (s->have_section) { |
1846 | s->memory_offset = sizeof(Elf64_Ehdr) + |
1847 | sizeof(Elf64_Phdr) * s->sh_info + |
1848 | sizeof(Elf64_Shdr) + s->note_size; |
1849 | } else { |
1850 | s->memory_offset = sizeof(Elf64_Ehdr) + |
1851 | sizeof(Elf64_Phdr) * s->phdr_num + s->note_size; |
1852 | } |
1853 | } else { |
1854 | if (s->have_section) { |
1855 | s->memory_offset = sizeof(Elf32_Ehdr) + |
1856 | sizeof(Elf32_Phdr) * s->sh_info + |
1857 | sizeof(Elf32_Shdr) + s->note_size; |
1858 | } else { |
1859 | s->memory_offset = sizeof(Elf32_Ehdr) + |
1860 | sizeof(Elf32_Phdr) * s->phdr_num + s->note_size; |
1861 | } |
1862 | } |
1863 | |
1864 | return; |
1865 | |
1866 | cleanup: |
1867 | dump_cleanup(s); |
1868 | } |
1869 | |
1870 | /* this operation might be time consuming. */ |
1871 | static void dump_process(DumpState *s, Error **errp) |
1872 | { |
1873 | Error *local_err = NULL; |
1874 | DumpQueryResult *result = NULL; |
1875 | |
1876 | if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { |
1877 | #ifdef TARGET_X86_64 |
1878 | create_win_dump(s, &local_err); |
1879 | #endif |
1880 | } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) { |
1881 | create_kdump_vmcore(s, &local_err); |
1882 | } else { |
1883 | create_vmcore(s, &local_err); |
1884 | } |
1885 | |
1886 | /* make sure status is written after written_size updates */ |
1887 | smp_wmb(); |
1888 | atomic_set(&s->status, |
1889 | (local_err ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED)); |
1890 | |
1891 | /* send DUMP_COMPLETED message (unconditionally) */ |
1892 | result = qmp_query_dump(NULL); |
1893 | /* should never fail */ |
1894 | assert(result); |
1895 | qapi_event_send_dump_completed(result, !!local_err, (local_err ? \ |
1896 | error_get_pretty(local_err) : NULL)); |
1897 | qapi_free_DumpQueryResult(result); |
1898 | |
1899 | error_propagate(errp, local_err); |
1900 | dump_cleanup(s); |
1901 | } |
1902 | |
1903 | static void *dump_thread(void *data) |
1904 | { |
1905 | DumpState *s = (DumpState *)data; |
1906 | dump_process(s, NULL); |
1907 | return NULL; |
1908 | } |
1909 | |
1910 | DumpQueryResult *qmp_query_dump(Error **errp) |
1911 | { |
1912 | DumpQueryResult *result = g_new(DumpQueryResult, 1); |
1913 | DumpState *state = &dump_state_global; |
1914 | result->status = atomic_read(&state->status); |
1915 | /* make sure we are reading status and written_size in order */ |
1916 | smp_rmb(); |
1917 | result->completed = state->written_size; |
1918 | result->total = state->total_size; |
1919 | return result; |
1920 | } |
1921 | |
1922 | void qmp_dump_guest_memory(bool paging, const char *file, |
1923 | bool has_detach, bool detach, |
1924 | bool has_begin, int64_t begin, bool has_length, |
1925 | int64_t length, bool has_format, |
1926 | DumpGuestMemoryFormat format, Error **errp) |
1927 | { |
1928 | const char *p; |
1929 | int fd = -1; |
1930 | DumpState *s; |
1931 | Error *local_err = NULL; |
1932 | bool detach_p = false; |
1933 | |
1934 | if (runstate_check(RUN_STATE_INMIGRATE)) { |
1935 | error_setg(errp, "Dump not allowed during incoming migration." ); |
1936 | return; |
1937 | } |
1938 | |
1939 | /* if there is a dump in background, we should wait until the dump |
1940 | * finished */ |
1941 | if (dump_in_progress()) { |
1942 | error_setg(errp, "There is a dump in process, please wait." ); |
1943 | return; |
1944 | } |
1945 | |
1946 | /* |
1947 | * kdump-compressed format need the whole memory dumped, so paging or |
1948 | * filter is not supported here. |
1949 | */ |
1950 | if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) && |
1951 | (paging || has_begin || has_length)) { |
1952 | error_setg(errp, "kdump-compressed format doesn't support paging or " |
1953 | "filter" ); |
1954 | return; |
1955 | } |
1956 | if (has_begin && !has_length) { |
1957 | error_setg(errp, QERR_MISSING_PARAMETER, "length" ); |
1958 | return; |
1959 | } |
1960 | if (!has_begin && has_length) { |
1961 | error_setg(errp, QERR_MISSING_PARAMETER, "begin" ); |
1962 | return; |
1963 | } |
1964 | if (has_detach) { |
1965 | detach_p = detach; |
1966 | } |
1967 | |
1968 | /* check whether lzo/snappy is supported */ |
1969 | #ifndef CONFIG_LZO |
1970 | if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) { |
1971 | error_setg(errp, "kdump-lzo is not available now" ); |
1972 | return; |
1973 | } |
1974 | #endif |
1975 | |
1976 | #ifndef CONFIG_SNAPPY |
1977 | if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) { |
1978 | error_setg(errp, "kdump-snappy is not available now" ); |
1979 | return; |
1980 | } |
1981 | #endif |
1982 | |
1983 | #ifndef TARGET_X86_64 |
1984 | if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { |
1985 | error_setg(errp, "Windows dump is only available for x86-64" ); |
1986 | return; |
1987 | } |
1988 | #endif |
1989 | |
1990 | #if !defined(WIN32) |
1991 | if (strstart(file, "fd:" , &p)) { |
1992 | fd = monitor_get_fd(cur_mon, p, errp); |
1993 | if (fd == -1) { |
1994 | return; |
1995 | } |
1996 | } |
1997 | #endif |
1998 | |
1999 | if (strstart(file, "file:" , &p)) { |
2000 | fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR); |
2001 | if (fd < 0) { |
2002 | error_setg_file_open(errp, errno, p); |
2003 | return; |
2004 | } |
2005 | } |
2006 | |
2007 | if (fd == -1) { |
2008 | error_setg(errp, QERR_INVALID_PARAMETER, "protocol" ); |
2009 | return; |
2010 | } |
2011 | |
2012 | s = &dump_state_global; |
2013 | dump_state_prepare(s); |
2014 | |
2015 | dump_init(s, fd, has_format, format, paging, has_begin, |
2016 | begin, length, &local_err); |
2017 | if (local_err) { |
2018 | error_propagate(errp, local_err); |
2019 | atomic_set(&s->status, DUMP_STATUS_FAILED); |
2020 | return; |
2021 | } |
2022 | |
2023 | if (detach_p) { |
2024 | /* detached dump */ |
2025 | s->detached = true; |
2026 | qemu_thread_create(&s->dump_thread, "dump_thread" , dump_thread, |
2027 | s, QEMU_THREAD_DETACHED); |
2028 | } else { |
2029 | /* sync dump */ |
2030 | dump_process(s, errp); |
2031 | } |
2032 | } |
2033 | |
2034 | DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp) |
2035 | { |
2036 | DumpGuestMemoryFormatList *item; |
2037 | DumpGuestMemoryCapability *cap = |
2038 | g_malloc0(sizeof(DumpGuestMemoryCapability)); |
2039 | |
2040 | /* elf is always available */ |
2041 | item = g_malloc0(sizeof(DumpGuestMemoryFormatList)); |
2042 | cap->formats = item; |
2043 | item->value = DUMP_GUEST_MEMORY_FORMAT_ELF; |
2044 | |
2045 | /* kdump-zlib is always available */ |
2046 | item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList)); |
2047 | item = item->next; |
2048 | item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB; |
2049 | |
2050 | /* add new item if kdump-lzo is available */ |
2051 | #ifdef CONFIG_LZO |
2052 | item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList)); |
2053 | item = item->next; |
2054 | item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO; |
2055 | #endif |
2056 | |
2057 | /* add new item if kdump-snappy is available */ |
2058 | #ifdef CONFIG_SNAPPY |
2059 | item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList)); |
2060 | item = item->next; |
2061 | item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY; |
2062 | #endif |
2063 | |
2064 | /* Windows dump is available only if target is x86_64 */ |
2065 | #ifdef TARGET_X86_64 |
2066 | item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList)); |
2067 | item = item->next; |
2068 | item->value = DUMP_GUEST_MEMORY_FORMAT_WIN_DMP; |
2069 | #endif |
2070 | |
2071 | return cap; |
2072 | } |
2073 | |