1 | /* |
2 | * Virtual page mapping |
3 | * |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
18 | */ |
19 | |
20 | #include "qemu/osdep.h" |
21 | #include "qemu-common.h" |
22 | #include "qapi/error.h" |
23 | |
24 | #include "qemu/cutils.h" |
25 | #include "cpu.h" |
26 | #include "exec/exec-all.h" |
27 | #include "exec/target_page.h" |
28 | #include "tcg.h" |
29 | #include "hw/qdev-core.h" |
30 | #include "hw/qdev-properties.h" |
31 | #if !defined(CONFIG_USER_ONLY) |
32 | #include "hw/boards.h" |
33 | #include "hw/xen/xen.h" |
34 | #endif |
35 | #include "sysemu/kvm.h" |
36 | #include "sysemu/sysemu.h" |
37 | #include "sysemu/tcg.h" |
38 | #include "qemu/timer.h" |
39 | #include "qemu/config-file.h" |
40 | #include "qemu/error-report.h" |
41 | #include "qemu/qemu-print.h" |
42 | #if defined(CONFIG_USER_ONLY) |
43 | #include "qemu.h" |
44 | #else /* !CONFIG_USER_ONLY */ |
45 | #include "exec/memory.h" |
46 | #include "exec/ioport.h" |
47 | #include "sysemu/dma.h" |
48 | #include "sysemu/hostmem.h" |
49 | #include "sysemu/hw_accel.h" |
50 | #include "exec/address-spaces.h" |
51 | #include "sysemu/xen-mapcache.h" |
52 | #include "trace-root.h" |
53 | |
54 | #ifdef CONFIG_FALLOCATE_PUNCH_HOLE |
55 | #include <linux/falloc.h> |
56 | #endif |
57 | |
58 | #endif |
59 | #include "qemu/rcu_queue.h" |
60 | #include "qemu/main-loop.h" |
61 | #include "translate-all.h" |
62 | #include "sysemu/replay.h" |
63 | |
64 | #include "exec/memory-internal.h" |
65 | #include "exec/ram_addr.h" |
66 | #include "exec/log.h" |
67 | |
68 | #include "migration/vmstate.h" |
69 | |
70 | #include "qemu/range.h" |
71 | #ifndef _WIN32 |
72 | #include "qemu/mmap-alloc.h" |
73 | #endif |
74 | |
75 | #include "monitor/monitor.h" |
76 | |
77 | //#define DEBUG_SUBPAGE |
78 | |
79 | #if !defined(CONFIG_USER_ONLY) |
80 | /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes |
81 | * are protected by the ramlist lock. |
82 | */ |
83 | RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; |
84 | |
85 | static MemoryRegion *system_memory; |
86 | static MemoryRegion *system_io; |
87 | |
88 | AddressSpace address_space_io; |
89 | AddressSpace address_space_memory; |
90 | |
91 | MemoryRegion io_mem_rom, io_mem_notdirty; |
92 | static MemoryRegion io_mem_unassigned; |
93 | #endif |
94 | |
95 | #ifdef TARGET_PAGE_BITS_VARY |
96 | int target_page_bits; |
97 | bool target_page_bits_decided; |
98 | #endif |
99 | |
100 | CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); |
101 | |
102 | /* current CPU in the current thread. It is only valid inside |
103 | cpu_exec() */ |
104 | __thread CPUState *current_cpu; |
105 | /* 0 = Do not count executed instructions. |
106 | 1 = Precise instruction counting. |
107 | 2 = Adaptive rate instruction counting. */ |
108 | int use_icount; |
109 | |
110 | uintptr_t qemu_host_page_size; |
111 | intptr_t qemu_host_page_mask; |
112 | |
113 | bool set_preferred_target_page_bits(int bits) |
114 | { |
115 | /* The target page size is the lowest common denominator for all |
116 | * the CPUs in the system, so we can only make it smaller, never |
117 | * larger. And we can't make it smaller once we've committed to |
118 | * a particular size. |
119 | */ |
120 | #ifdef TARGET_PAGE_BITS_VARY |
121 | assert(bits >= TARGET_PAGE_BITS_MIN); |
122 | if (target_page_bits == 0 || target_page_bits > bits) { |
123 | if (target_page_bits_decided) { |
124 | return false; |
125 | } |
126 | target_page_bits = bits; |
127 | } |
128 | #endif |
129 | return true; |
130 | } |
131 | |
132 | #if !defined(CONFIG_USER_ONLY) |
133 | |
134 | static void finalize_target_page_bits(void) |
135 | { |
136 | #ifdef TARGET_PAGE_BITS_VARY |
137 | if (target_page_bits == 0) { |
138 | target_page_bits = TARGET_PAGE_BITS_MIN; |
139 | } |
140 | target_page_bits_decided = true; |
141 | #endif |
142 | } |
143 | |
144 | typedef struct PhysPageEntry PhysPageEntry; |
145 | |
146 | struct PhysPageEntry { |
147 | /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ |
148 | uint32_t skip : 6; |
149 | /* index into phys_sections (!skip) or phys_map_nodes (skip) */ |
150 | uint32_t ptr : 26; |
151 | }; |
152 | |
153 | #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) |
154 | |
155 | /* Size of the L2 (and L3, etc) page tables. */ |
156 | #define ADDR_SPACE_BITS 64 |
157 | |
158 | #define P_L2_BITS 9 |
159 | #define P_L2_SIZE (1 << P_L2_BITS) |
160 | |
161 | #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) |
162 | |
163 | typedef PhysPageEntry Node[P_L2_SIZE]; |
164 | |
165 | typedef struct PhysPageMap { |
166 | struct rcu_head rcu; |
167 | |
168 | unsigned sections_nb; |
169 | unsigned sections_nb_alloc; |
170 | unsigned nodes_nb; |
171 | unsigned nodes_nb_alloc; |
172 | Node *nodes; |
173 | MemoryRegionSection *sections; |
174 | } PhysPageMap; |
175 | |
176 | struct AddressSpaceDispatch { |
177 | MemoryRegionSection *mru_section; |
178 | /* This is a multi-level map on the physical address space. |
179 | * The bottom level has pointers to MemoryRegionSections. |
180 | */ |
181 | PhysPageEntry phys_map; |
182 | PhysPageMap map; |
183 | }; |
184 | |
185 | #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) |
186 | typedef struct subpage_t { |
187 | MemoryRegion iomem; |
188 | FlatView *fv; |
189 | hwaddr base; |
190 | uint16_t sub_section[]; |
191 | } subpage_t; |
192 | |
193 | #define PHYS_SECTION_UNASSIGNED 0 |
194 | #define PHYS_SECTION_NOTDIRTY 1 |
195 | #define PHYS_SECTION_ROM 2 |
196 | |
197 | static void io_mem_init(void); |
198 | static void memory_map_init(void); |
199 | static void tcg_log_global_after_sync(MemoryListener *listener); |
200 | static void tcg_commit(MemoryListener *listener); |
201 | |
202 | /** |
203 | * CPUAddressSpace: all the information a CPU needs about an AddressSpace |
204 | * @cpu: the CPU whose AddressSpace this is |
205 | * @as: the AddressSpace itself |
206 | * @memory_dispatch: its dispatch pointer (cached, RCU protected) |
207 | * @tcg_as_listener: listener for tracking changes to the AddressSpace |
208 | */ |
209 | struct CPUAddressSpace { |
210 | CPUState *cpu; |
211 | AddressSpace *as; |
212 | struct AddressSpaceDispatch *memory_dispatch; |
213 | MemoryListener tcg_as_listener; |
214 | }; |
215 | |
216 | struct DirtyBitmapSnapshot { |
217 | ram_addr_t start; |
218 | ram_addr_t end; |
219 | unsigned long dirty[]; |
220 | }; |
221 | |
222 | #endif |
223 | |
224 | #if !defined(CONFIG_USER_ONLY) |
225 | |
226 | static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) |
227 | { |
228 | static unsigned alloc_hint = 16; |
229 | if (map->nodes_nb + nodes > map->nodes_nb_alloc) { |
230 | map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint); |
231 | map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes); |
232 | map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); |
233 | alloc_hint = map->nodes_nb_alloc; |
234 | } |
235 | } |
236 | |
237 | static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf) |
238 | { |
239 | unsigned i; |
240 | uint32_t ret; |
241 | PhysPageEntry e; |
242 | PhysPageEntry *p; |
243 | |
244 | ret = map->nodes_nb++; |
245 | p = map->nodes[ret]; |
246 | assert(ret != PHYS_MAP_NODE_NIL); |
247 | assert(ret != map->nodes_nb_alloc); |
248 | |
249 | e.skip = leaf ? 0 : 1; |
250 | e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL; |
251 | for (i = 0; i < P_L2_SIZE; ++i) { |
252 | memcpy(&p[i], &e, sizeof(e)); |
253 | } |
254 | return ret; |
255 | } |
256 | |
257 | static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, |
258 | hwaddr *index, hwaddr *nb, uint16_t leaf, |
259 | int level) |
260 | { |
261 | PhysPageEntry *p; |
262 | hwaddr step = (hwaddr)1 << (level * P_L2_BITS); |
263 | |
264 | if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { |
265 | lp->ptr = phys_map_node_alloc(map, level == 0); |
266 | } |
267 | p = map->nodes[lp->ptr]; |
268 | lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; |
269 | |
270 | while (*nb && lp < &p[P_L2_SIZE]) { |
271 | if ((*index & (step - 1)) == 0 && *nb >= step) { |
272 | lp->skip = 0; |
273 | lp->ptr = leaf; |
274 | *index += step; |
275 | *nb -= step; |
276 | } else { |
277 | phys_page_set_level(map, lp, index, nb, leaf, level - 1); |
278 | } |
279 | ++lp; |
280 | } |
281 | } |
282 | |
283 | static void phys_page_set(AddressSpaceDispatch *d, |
284 | hwaddr index, hwaddr nb, |
285 | uint16_t leaf) |
286 | { |
287 | /* Wildly overreserve - it doesn't matter much. */ |
288 | phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); |
289 | |
290 | phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); |
291 | } |
292 | |
293 | /* Compact a non leaf page entry. Simply detect that the entry has a single child, |
294 | * and update our entry so we can skip it and go directly to the destination. |
295 | */ |
296 | static void phys_page_compact(PhysPageEntry *lp, Node *nodes) |
297 | { |
298 | unsigned valid_ptr = P_L2_SIZE; |
299 | int valid = 0; |
300 | PhysPageEntry *p; |
301 | int i; |
302 | |
303 | if (lp->ptr == PHYS_MAP_NODE_NIL) { |
304 | return; |
305 | } |
306 | |
307 | p = nodes[lp->ptr]; |
308 | for (i = 0; i < P_L2_SIZE; i++) { |
309 | if (p[i].ptr == PHYS_MAP_NODE_NIL) { |
310 | continue; |
311 | } |
312 | |
313 | valid_ptr = i; |
314 | valid++; |
315 | if (p[i].skip) { |
316 | phys_page_compact(&p[i], nodes); |
317 | } |
318 | } |
319 | |
320 | /* We can only compress if there's only one child. */ |
321 | if (valid != 1) { |
322 | return; |
323 | } |
324 | |
325 | assert(valid_ptr < P_L2_SIZE); |
326 | |
327 | /* Don't compress if it won't fit in the # of bits we have. */ |
328 | if (lp->skip + p[valid_ptr].skip >= (1 << 3)) { |
329 | return; |
330 | } |
331 | |
332 | lp->ptr = p[valid_ptr].ptr; |
333 | if (!p[valid_ptr].skip) { |
334 | /* If our only child is a leaf, make this a leaf. */ |
335 | /* By design, we should have made this node a leaf to begin with so we |
336 | * should never reach here. |
337 | * But since it's so simple to handle this, let's do it just in case we |
338 | * change this rule. |
339 | */ |
340 | lp->skip = 0; |
341 | } else { |
342 | lp->skip += p[valid_ptr].skip; |
343 | } |
344 | } |
345 | |
346 | void address_space_dispatch_compact(AddressSpaceDispatch *d) |
347 | { |
348 | if (d->phys_map.skip) { |
349 | phys_page_compact(&d->phys_map, d->map.nodes); |
350 | } |
351 | } |
352 | |
353 | static inline bool section_covers_addr(const MemoryRegionSection *section, |
354 | hwaddr addr) |
355 | { |
356 | /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means |
357 | * the section must cover the entire address space. |
358 | */ |
359 | return int128_gethi(section->size) || |
360 | range_covers_byte(section->offset_within_address_space, |
361 | int128_getlo(section->size), addr); |
362 | } |
363 | |
364 | static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr) |
365 | { |
366 | PhysPageEntry lp = d->phys_map, *p; |
367 | Node *nodes = d->map.nodes; |
368 | MemoryRegionSection *sections = d->map.sections; |
369 | hwaddr index = addr >> TARGET_PAGE_BITS; |
370 | int i; |
371 | |
372 | for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { |
373 | if (lp.ptr == PHYS_MAP_NODE_NIL) { |
374 | return §ions[PHYS_SECTION_UNASSIGNED]; |
375 | } |
376 | p = nodes[lp.ptr]; |
377 | lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; |
378 | } |
379 | |
380 | if (section_covers_addr(§ions[lp.ptr], addr)) { |
381 | return §ions[lp.ptr]; |
382 | } else { |
383 | return §ions[PHYS_SECTION_UNASSIGNED]; |
384 | } |
385 | } |
386 | |
387 | /* Called from RCU critical section */ |
388 | static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, |
389 | hwaddr addr, |
390 | bool resolve_subpage) |
391 | { |
392 | MemoryRegionSection *section = atomic_read(&d->mru_section); |
393 | subpage_t *subpage; |
394 | |
395 | if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || |
396 | !section_covers_addr(section, addr)) { |
397 | section = phys_page_find(d, addr); |
398 | atomic_set(&d->mru_section, section); |
399 | } |
400 | if (resolve_subpage && section->mr->subpage) { |
401 | subpage = container_of(section->mr, subpage_t, iomem); |
402 | section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; |
403 | } |
404 | return section; |
405 | } |
406 | |
407 | /* Called from RCU critical section */ |
408 | static MemoryRegionSection * |
409 | address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, |
410 | hwaddr *plen, bool resolve_subpage) |
411 | { |
412 | MemoryRegionSection *section; |
413 | MemoryRegion *mr; |
414 | Int128 diff; |
415 | |
416 | section = address_space_lookup_region(d, addr, resolve_subpage); |
417 | /* Compute offset within MemoryRegionSection */ |
418 | addr -= section->offset_within_address_space; |
419 | |
420 | /* Compute offset within MemoryRegion */ |
421 | *xlat = addr + section->offset_within_region; |
422 | |
423 | mr = section->mr; |
424 | |
425 | /* MMIO registers can be expected to perform full-width accesses based only |
426 | * on their address, without considering adjacent registers that could |
427 | * decode to completely different MemoryRegions. When such registers |
428 | * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO |
429 | * regions overlap wildly. For this reason we cannot clamp the accesses |
430 | * here. |
431 | * |
432 | * If the length is small (as is the case for address_space_ldl/stl), |
433 | * everything works fine. If the incoming length is large, however, |
434 | * the caller really has to do the clamping through memory_access_size. |
435 | */ |
436 | if (memory_region_is_ram(mr)) { |
437 | diff = int128_sub(section->size, int128_make64(addr)); |
438 | *plen = int128_get64(int128_min(diff, int128_make64(*plen))); |
439 | } |
440 | return section; |
441 | } |
442 | |
443 | /** |
444 | * address_space_translate_iommu - translate an address through an IOMMU |
445 | * memory region and then through the target address space. |
446 | * |
447 | * @iommu_mr: the IOMMU memory region that we start the translation from |
448 | * @addr: the address to be translated through the MMU |
449 | * @xlat: the translated address offset within the destination memory region. |
450 | * It cannot be %NULL. |
451 | * @plen_out: valid read/write length of the translated address. It |
452 | * cannot be %NULL. |
453 | * @page_mask_out: page mask for the translated address. This |
454 | * should only be meaningful for IOMMU translated |
455 | * addresses, since there may be huge pages that this bit |
456 | * would tell. It can be %NULL if we don't care about it. |
457 | * @is_write: whether the translation operation is for write |
458 | * @is_mmio: whether this can be MMIO, set true if it can |
459 | * @target_as: the address space targeted by the IOMMU |
460 | * @attrs: transaction attributes |
461 | * |
462 | * This function is called from RCU critical section. It is the common |
463 | * part of flatview_do_translate and address_space_translate_cached. |
464 | */ |
465 | static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr, |
466 | hwaddr *xlat, |
467 | hwaddr *plen_out, |
468 | hwaddr *page_mask_out, |
469 | bool is_write, |
470 | bool is_mmio, |
471 | AddressSpace **target_as, |
472 | MemTxAttrs attrs) |
473 | { |
474 | MemoryRegionSection *section; |
475 | hwaddr page_mask = (hwaddr)-1; |
476 | |
477 | do { |
478 | hwaddr addr = *xlat; |
479 | IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr); |
480 | int iommu_idx = 0; |
481 | IOMMUTLBEntry iotlb; |
482 | |
483 | if (imrc->attrs_to_index) { |
484 | iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); |
485 | } |
486 | |
487 | iotlb = imrc->translate(iommu_mr, addr, is_write ? |
488 | IOMMU_WO : IOMMU_RO, iommu_idx); |
489 | |
490 | if (!(iotlb.perm & (1 << is_write))) { |
491 | goto unassigned; |
492 | } |
493 | |
494 | addr = ((iotlb.translated_addr & ~iotlb.addr_mask) |
495 | | (addr & iotlb.addr_mask)); |
496 | page_mask &= iotlb.addr_mask; |
497 | *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1); |
498 | *target_as = iotlb.target_as; |
499 | |
500 | section = address_space_translate_internal( |
501 | address_space_to_dispatch(iotlb.target_as), addr, xlat, |
502 | plen_out, is_mmio); |
503 | |
504 | iommu_mr = memory_region_get_iommu(section->mr); |
505 | } while (unlikely(iommu_mr)); |
506 | |
507 | if (page_mask_out) { |
508 | *page_mask_out = page_mask; |
509 | } |
510 | return *section; |
511 | |
512 | unassigned: |
513 | return (MemoryRegionSection) { .mr = &io_mem_unassigned }; |
514 | } |
515 | |
516 | /** |
517 | * flatview_do_translate - translate an address in FlatView |
518 | * |
519 | * @fv: the flat view that we want to translate on |
520 | * @addr: the address to be translated in above address space |
521 | * @xlat: the translated address offset within memory region. It |
522 | * cannot be @NULL. |
523 | * @plen_out: valid read/write length of the translated address. It |
524 | * can be @NULL when we don't care about it. |
525 | * @page_mask_out: page mask for the translated address. This |
526 | * should only be meaningful for IOMMU translated |
527 | * addresses, since there may be huge pages that this bit |
528 | * would tell. It can be @NULL if we don't care about it. |
529 | * @is_write: whether the translation operation is for write |
530 | * @is_mmio: whether this can be MMIO, set true if it can |
531 | * @target_as: the address space targeted by the IOMMU |
532 | * @attrs: memory transaction attributes |
533 | * |
534 | * This function is called from RCU critical section |
535 | */ |
536 | static MemoryRegionSection flatview_do_translate(FlatView *fv, |
537 | hwaddr addr, |
538 | hwaddr *xlat, |
539 | hwaddr *plen_out, |
540 | hwaddr *page_mask_out, |
541 | bool is_write, |
542 | bool is_mmio, |
543 | AddressSpace **target_as, |
544 | MemTxAttrs attrs) |
545 | { |
546 | MemoryRegionSection *section; |
547 | IOMMUMemoryRegion *iommu_mr; |
548 | hwaddr plen = (hwaddr)(-1); |
549 | |
550 | if (!plen_out) { |
551 | plen_out = &plen; |
552 | } |
553 | |
554 | section = address_space_translate_internal( |
555 | flatview_to_dispatch(fv), addr, xlat, |
556 | plen_out, is_mmio); |
557 | |
558 | iommu_mr = memory_region_get_iommu(section->mr); |
559 | if (unlikely(iommu_mr)) { |
560 | return address_space_translate_iommu(iommu_mr, xlat, |
561 | plen_out, page_mask_out, |
562 | is_write, is_mmio, |
563 | target_as, attrs); |
564 | } |
565 | if (page_mask_out) { |
566 | /* Not behind an IOMMU, use default page size. */ |
567 | *page_mask_out = ~TARGET_PAGE_MASK; |
568 | } |
569 | |
570 | return *section; |
571 | } |
572 | |
573 | /* Called from RCU critical section */ |
574 | IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, |
575 | bool is_write, MemTxAttrs attrs) |
576 | { |
577 | MemoryRegionSection section; |
578 | hwaddr xlat, page_mask; |
579 | |
580 | /* |
581 | * This can never be MMIO, and we don't really care about plen, |
582 | * but page mask. |
583 | */ |
584 | section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat, |
585 | NULL, &page_mask, is_write, false, &as, |
586 | attrs); |
587 | |
588 | /* Illegal translation */ |
589 | if (section.mr == &io_mem_unassigned) { |
590 | goto iotlb_fail; |
591 | } |
592 | |
593 | /* Convert memory region offset into address space offset */ |
594 | xlat += section.offset_within_address_space - |
595 | section.offset_within_region; |
596 | |
597 | return (IOMMUTLBEntry) { |
598 | .target_as = as, |
599 | .iova = addr & ~page_mask, |
600 | .translated_addr = xlat & ~page_mask, |
601 | .addr_mask = page_mask, |
602 | /* IOTLBs are for DMAs, and DMA only allows on RAMs. */ |
603 | .perm = IOMMU_RW, |
604 | }; |
605 | |
606 | iotlb_fail: |
607 | return (IOMMUTLBEntry) {0}; |
608 | } |
609 | |
610 | /* Called from RCU critical section */ |
611 | MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, |
612 | hwaddr *plen, bool is_write, |
613 | MemTxAttrs attrs) |
614 | { |
615 | MemoryRegion *mr; |
616 | MemoryRegionSection section; |
617 | AddressSpace *as = NULL; |
618 | |
619 | /* This can be MMIO, so setup MMIO bit. */ |
620 | section = flatview_do_translate(fv, addr, xlat, plen, NULL, |
621 | is_write, true, &as, attrs); |
622 | mr = section.mr; |
623 | |
624 | if (xen_enabled() && memory_access_is_direct(mr, is_write)) { |
625 | hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; |
626 | *plen = MIN(page, *plen); |
627 | } |
628 | |
629 | return mr; |
630 | } |
631 | |
632 | typedef struct TCGIOMMUNotifier { |
633 | IOMMUNotifier n; |
634 | MemoryRegion *mr; |
635 | CPUState *cpu; |
636 | int iommu_idx; |
637 | bool active; |
638 | } TCGIOMMUNotifier; |
639 | |
640 | static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) |
641 | { |
642 | TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n); |
643 | |
644 | if (!notifier->active) { |
645 | return; |
646 | } |
647 | tlb_flush(notifier->cpu); |
648 | notifier->active = false; |
649 | /* We leave the notifier struct on the list to avoid reallocating it later. |
650 | * Generally the number of IOMMUs a CPU deals with will be small. |
651 | * In any case we can't unregister the iommu notifier from a notify |
652 | * callback. |
653 | */ |
654 | } |
655 | |
656 | static void tcg_register_iommu_notifier(CPUState *cpu, |
657 | IOMMUMemoryRegion *iommu_mr, |
658 | int iommu_idx) |
659 | { |
660 | /* Make sure this CPU has an IOMMU notifier registered for this |
661 | * IOMMU/IOMMU index combination, so that we can flush its TLB |
662 | * when the IOMMU tells us the mappings we've cached have changed. |
663 | */ |
664 | MemoryRegion *mr = MEMORY_REGION(iommu_mr); |
665 | TCGIOMMUNotifier *notifier; |
666 | int i; |
667 | |
668 | for (i = 0; i < cpu->iommu_notifiers->len; i++) { |
669 | notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); |
670 | if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) { |
671 | break; |
672 | } |
673 | } |
674 | if (i == cpu->iommu_notifiers->len) { |
675 | /* Not found, add a new entry at the end of the array */ |
676 | cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1); |
677 | notifier = g_new0(TCGIOMMUNotifier, 1); |
678 | g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier; |
679 | |
680 | notifier->mr = mr; |
681 | notifier->iommu_idx = iommu_idx; |
682 | notifier->cpu = cpu; |
683 | /* Rather than trying to register interest in the specific part |
684 | * of the iommu's address space that we've accessed and then |
685 | * expand it later as subsequent accesses touch more of it, we |
686 | * just register interest in the whole thing, on the assumption |
687 | * that iommu reconfiguration will be rare. |
688 | */ |
689 | iommu_notifier_init(¬ifier->n, |
690 | tcg_iommu_unmap_notify, |
691 | IOMMU_NOTIFIER_UNMAP, |
692 | 0, |
693 | HWADDR_MAX, |
694 | iommu_idx); |
695 | memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n); |
696 | } |
697 | |
698 | if (!notifier->active) { |
699 | notifier->active = true; |
700 | } |
701 | } |
702 | |
703 | static void tcg_iommu_free_notifier_list(CPUState *cpu) |
704 | { |
705 | /* Destroy the CPU's notifier list */ |
706 | int i; |
707 | TCGIOMMUNotifier *notifier; |
708 | |
709 | for (i = 0; i < cpu->iommu_notifiers->len; i++) { |
710 | notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); |
711 | memory_region_unregister_iommu_notifier(notifier->mr, ¬ifier->n); |
712 | g_free(notifier); |
713 | } |
714 | g_array_free(cpu->iommu_notifiers, true); |
715 | } |
716 | |
717 | /* Called from RCU critical section */ |
718 | MemoryRegionSection * |
719 | address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, |
720 | hwaddr *xlat, hwaddr *plen, |
721 | MemTxAttrs attrs, int *prot) |
722 | { |
723 | MemoryRegionSection *section; |
724 | IOMMUMemoryRegion *iommu_mr; |
725 | IOMMUMemoryRegionClass *imrc; |
726 | IOMMUTLBEntry iotlb; |
727 | int iommu_idx; |
728 | AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); |
729 | |
730 | for (;;) { |
731 | section = address_space_translate_internal(d, addr, &addr, plen, false); |
732 | |
733 | iommu_mr = memory_region_get_iommu(section->mr); |
734 | if (!iommu_mr) { |
735 | break; |
736 | } |
737 | |
738 | imrc = memory_region_get_iommu_class_nocheck(iommu_mr); |
739 | |
740 | iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); |
741 | tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx); |
742 | /* We need all the permissions, so pass IOMMU_NONE so the IOMMU |
743 | * doesn't short-cut its translation table walk. |
744 | */ |
745 | iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx); |
746 | addr = ((iotlb.translated_addr & ~iotlb.addr_mask) |
747 | | (addr & iotlb.addr_mask)); |
748 | /* Update the caller's prot bits to remove permissions the IOMMU |
749 | * is giving us a failure response for. If we get down to no |
750 | * permissions left at all we can give up now. |
751 | */ |
752 | if (!(iotlb.perm & IOMMU_RO)) { |
753 | *prot &= ~(PAGE_READ | PAGE_EXEC); |
754 | } |
755 | if (!(iotlb.perm & IOMMU_WO)) { |
756 | *prot &= ~PAGE_WRITE; |
757 | } |
758 | |
759 | if (!*prot) { |
760 | goto translate_fail; |
761 | } |
762 | |
763 | d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as)); |
764 | } |
765 | |
766 | assert(!memory_region_is_iommu(section->mr)); |
767 | *xlat = addr; |
768 | return section; |
769 | |
770 | translate_fail: |
771 | return &d->map.sections[PHYS_SECTION_UNASSIGNED]; |
772 | } |
773 | #endif |
774 | |
775 | #if !defined(CONFIG_USER_ONLY) |
776 | |
777 | static int cpu_common_post_load(void *opaque, int version_id) |
778 | { |
779 | CPUState *cpu = opaque; |
780 | |
781 | /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the |
782 | version_id is increased. */ |
783 | cpu->interrupt_request &= ~0x01; |
784 | tlb_flush(cpu); |
785 | |
786 | /* loadvm has just updated the content of RAM, bypassing the |
787 | * usual mechanisms that ensure we flush TBs for writes to |
788 | * memory we've translated code from. So we must flush all TBs, |
789 | * which will now be stale. |
790 | */ |
791 | tb_flush(cpu); |
792 | |
793 | return 0; |
794 | } |
795 | |
796 | static int cpu_common_pre_load(void *opaque) |
797 | { |
798 | CPUState *cpu = opaque; |
799 | |
800 | cpu->exception_index = -1; |
801 | |
802 | return 0; |
803 | } |
804 | |
805 | static bool cpu_common_exception_index_needed(void *opaque) |
806 | { |
807 | CPUState *cpu = opaque; |
808 | |
809 | return tcg_enabled() && cpu->exception_index != -1; |
810 | } |
811 | |
812 | static const VMStateDescription vmstate_cpu_common_exception_index = { |
813 | .name = "cpu_common/exception_index" , |
814 | .version_id = 1, |
815 | .minimum_version_id = 1, |
816 | .needed = cpu_common_exception_index_needed, |
817 | .fields = (VMStateField[]) { |
818 | VMSTATE_INT32(exception_index, CPUState), |
819 | VMSTATE_END_OF_LIST() |
820 | } |
821 | }; |
822 | |
823 | static bool cpu_common_crash_occurred_needed(void *opaque) |
824 | { |
825 | CPUState *cpu = opaque; |
826 | |
827 | return cpu->crash_occurred; |
828 | } |
829 | |
830 | static const VMStateDescription vmstate_cpu_common_crash_occurred = { |
831 | .name = "cpu_common/crash_occurred" , |
832 | .version_id = 1, |
833 | .minimum_version_id = 1, |
834 | .needed = cpu_common_crash_occurred_needed, |
835 | .fields = (VMStateField[]) { |
836 | VMSTATE_BOOL(crash_occurred, CPUState), |
837 | VMSTATE_END_OF_LIST() |
838 | } |
839 | }; |
840 | |
841 | const VMStateDescription vmstate_cpu_common = { |
842 | .name = "cpu_common" , |
843 | .version_id = 1, |
844 | .minimum_version_id = 1, |
845 | .pre_load = cpu_common_pre_load, |
846 | .post_load = cpu_common_post_load, |
847 | .fields = (VMStateField[]) { |
848 | VMSTATE_UINT32(halted, CPUState), |
849 | VMSTATE_UINT32(interrupt_request, CPUState), |
850 | VMSTATE_END_OF_LIST() |
851 | }, |
852 | .subsections = (const VMStateDescription*[]) { |
853 | &vmstate_cpu_common_exception_index, |
854 | &vmstate_cpu_common_crash_occurred, |
855 | NULL |
856 | } |
857 | }; |
858 | |
859 | #endif |
860 | |
861 | CPUState *qemu_get_cpu(int index) |
862 | { |
863 | CPUState *cpu; |
864 | |
865 | CPU_FOREACH(cpu) { |
866 | if (cpu->cpu_index == index) { |
867 | return cpu; |
868 | } |
869 | } |
870 | |
871 | return NULL; |
872 | } |
873 | |
874 | #if !defined(CONFIG_USER_ONLY) |
875 | void cpu_address_space_init(CPUState *cpu, int asidx, |
876 | const char *prefix, MemoryRegion *mr) |
877 | { |
878 | CPUAddressSpace *newas; |
879 | AddressSpace *as = g_new0(AddressSpace, 1); |
880 | char *as_name; |
881 | |
882 | assert(mr); |
883 | as_name = g_strdup_printf("%s-%d" , prefix, cpu->cpu_index); |
884 | address_space_init(as, mr, as_name); |
885 | g_free(as_name); |
886 | |
887 | /* Target code should have set num_ases before calling us */ |
888 | assert(asidx < cpu->num_ases); |
889 | |
890 | if (asidx == 0) { |
891 | /* address space 0 gets the convenience alias */ |
892 | cpu->as = as; |
893 | } |
894 | |
895 | /* KVM cannot currently support multiple address spaces. */ |
896 | assert(asidx == 0 || !kvm_enabled()); |
897 | |
898 | if (!cpu->cpu_ases) { |
899 | cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); |
900 | } |
901 | |
902 | newas = &cpu->cpu_ases[asidx]; |
903 | newas->cpu = cpu; |
904 | newas->as = as; |
905 | if (tcg_enabled()) { |
906 | newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync; |
907 | newas->tcg_as_listener.commit = tcg_commit; |
908 | memory_listener_register(&newas->tcg_as_listener, as); |
909 | } |
910 | } |
911 | |
912 | AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) |
913 | { |
914 | /* Return the AddressSpace corresponding to the specified index */ |
915 | return cpu->cpu_ases[asidx].as; |
916 | } |
917 | #endif |
918 | |
919 | void cpu_exec_unrealizefn(CPUState *cpu) |
920 | { |
921 | CPUClass *cc = CPU_GET_CLASS(cpu); |
922 | |
923 | cpu_list_remove(cpu); |
924 | |
925 | if (cc->vmsd != NULL) { |
926 | vmstate_unregister(NULL, cc->vmsd, cpu); |
927 | } |
928 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { |
929 | vmstate_unregister(NULL, &vmstate_cpu_common, cpu); |
930 | } |
931 | #ifndef CONFIG_USER_ONLY |
932 | tcg_iommu_free_notifier_list(cpu); |
933 | #endif |
934 | } |
935 | |
936 | Property cpu_common_props[] = { |
937 | #ifndef CONFIG_USER_ONLY |
938 | /* Create a memory property for softmmu CPU object, |
939 | * so users can wire up its memory. (This can't go in hw/core/cpu.c |
940 | * because that file is compiled only once for both user-mode |
941 | * and system builds.) The default if no link is set up is to use |
942 | * the system address space. |
943 | */ |
944 | DEFINE_PROP_LINK("memory" , CPUState, memory, TYPE_MEMORY_REGION, |
945 | MemoryRegion *), |
946 | #endif |
947 | DEFINE_PROP_END_OF_LIST(), |
948 | }; |
949 | |
950 | void cpu_exec_initfn(CPUState *cpu) |
951 | { |
952 | cpu->as = NULL; |
953 | cpu->num_ases = 0; |
954 | |
955 | #ifndef CONFIG_USER_ONLY |
956 | cpu->thread_id = qemu_get_thread_id(); |
957 | cpu->memory = system_memory; |
958 | object_ref(OBJECT(cpu->memory)); |
959 | #endif |
960 | } |
961 | |
962 | void cpu_exec_realizefn(CPUState *cpu, Error **errp) |
963 | { |
964 | CPUClass *cc = CPU_GET_CLASS(cpu); |
965 | static bool tcg_target_initialized; |
966 | |
967 | cpu_list_add(cpu); |
968 | |
969 | if (tcg_enabled() && !tcg_target_initialized) { |
970 | tcg_target_initialized = true; |
971 | cc->tcg_initialize(); |
972 | } |
973 | tlb_init(cpu); |
974 | |
975 | #ifndef CONFIG_USER_ONLY |
976 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { |
977 | vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); |
978 | } |
979 | if (cc->vmsd != NULL) { |
980 | vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu); |
981 | } |
982 | |
983 | cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *)); |
984 | #endif |
985 | } |
986 | |
987 | const char *parse_cpu_option(const char *cpu_option) |
988 | { |
989 | ObjectClass *oc; |
990 | CPUClass *cc; |
991 | gchar **model_pieces; |
992 | const char *cpu_type; |
993 | |
994 | model_pieces = g_strsplit(cpu_option, "," , 2); |
995 | if (!model_pieces[0]) { |
996 | error_report("-cpu option cannot be empty" ); |
997 | exit(1); |
998 | } |
999 | |
1000 | oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]); |
1001 | if (oc == NULL) { |
1002 | error_report("unable to find CPU model '%s'" , model_pieces[0]); |
1003 | g_strfreev(model_pieces); |
1004 | exit(EXIT_FAILURE); |
1005 | } |
1006 | |
1007 | cpu_type = object_class_get_name(oc); |
1008 | cc = CPU_CLASS(oc); |
1009 | cc->parse_features(cpu_type, model_pieces[1], &error_fatal); |
1010 | g_strfreev(model_pieces); |
1011 | return cpu_type; |
1012 | } |
1013 | |
1014 | #if defined(CONFIG_USER_ONLY) |
1015 | void tb_invalidate_phys_addr(target_ulong addr) |
1016 | { |
1017 | mmap_lock(); |
1018 | tb_invalidate_phys_page_range(addr, addr + 1, 0); |
1019 | mmap_unlock(); |
1020 | } |
1021 | |
1022 | static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) |
1023 | { |
1024 | tb_invalidate_phys_addr(pc); |
1025 | } |
1026 | #else |
1027 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) |
1028 | { |
1029 | ram_addr_t ram_addr; |
1030 | MemoryRegion *mr; |
1031 | hwaddr l = 1; |
1032 | |
1033 | if (!tcg_enabled()) { |
1034 | return; |
1035 | } |
1036 | |
1037 | rcu_read_lock(); |
1038 | mr = address_space_translate(as, addr, &addr, &l, false, attrs); |
1039 | if (!(memory_region_is_ram(mr) |
1040 | || memory_region_is_romd(mr))) { |
1041 | rcu_read_unlock(); |
1042 | return; |
1043 | } |
1044 | ram_addr = memory_region_get_ram_addr(mr) + addr; |
1045 | tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); |
1046 | rcu_read_unlock(); |
1047 | } |
1048 | |
1049 | static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) |
1050 | { |
1051 | MemTxAttrs attrs; |
1052 | hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs); |
1053 | int asidx = cpu_asidx_from_attrs(cpu, attrs); |
1054 | if (phys != -1) { |
1055 | /* Locks grabbed by tb_invalidate_phys_addr */ |
1056 | tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as, |
1057 | phys | (pc & ~TARGET_PAGE_MASK), attrs); |
1058 | } |
1059 | } |
1060 | #endif |
1061 | |
1062 | #ifndef CONFIG_USER_ONLY |
1063 | /* Add a watchpoint. */ |
1064 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, |
1065 | int flags, CPUWatchpoint **watchpoint) |
1066 | { |
1067 | CPUWatchpoint *wp; |
1068 | |
1069 | /* forbid ranges which are empty or run off the end of the address space */ |
1070 | if (len == 0 || (addr + len - 1) < addr) { |
1071 | error_report("tried to set invalid watchpoint at %" |
1072 | VADDR_PRIx ", len=%" VADDR_PRIu, addr, len); |
1073 | return -EINVAL; |
1074 | } |
1075 | wp = g_malloc(sizeof(*wp)); |
1076 | |
1077 | wp->vaddr = addr; |
1078 | wp->len = len; |
1079 | wp->flags = flags; |
1080 | |
1081 | /* keep all GDB-injected watchpoints in front */ |
1082 | if (flags & BP_GDB) { |
1083 | QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry); |
1084 | } else { |
1085 | QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry); |
1086 | } |
1087 | |
1088 | tlb_flush_page(cpu, addr); |
1089 | |
1090 | if (watchpoint) |
1091 | *watchpoint = wp; |
1092 | return 0; |
1093 | } |
1094 | |
1095 | /* Remove a specific watchpoint. */ |
1096 | int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, |
1097 | int flags) |
1098 | { |
1099 | CPUWatchpoint *wp; |
1100 | |
1101 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
1102 | if (addr == wp->vaddr && len == wp->len |
1103 | && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { |
1104 | cpu_watchpoint_remove_by_ref(cpu, wp); |
1105 | return 0; |
1106 | } |
1107 | } |
1108 | return -ENOENT; |
1109 | } |
1110 | |
1111 | /* Remove a specific watchpoint by reference. */ |
1112 | void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) |
1113 | { |
1114 | QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry); |
1115 | |
1116 | tlb_flush_page(cpu, watchpoint->vaddr); |
1117 | |
1118 | g_free(watchpoint); |
1119 | } |
1120 | |
1121 | /* Remove all matching watchpoints. */ |
1122 | void cpu_watchpoint_remove_all(CPUState *cpu, int mask) |
1123 | { |
1124 | CPUWatchpoint *wp, *next; |
1125 | |
1126 | QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { |
1127 | if (wp->flags & mask) { |
1128 | cpu_watchpoint_remove_by_ref(cpu, wp); |
1129 | } |
1130 | } |
1131 | } |
1132 | |
1133 | /* Return true if this watchpoint address matches the specified |
1134 | * access (ie the address range covered by the watchpoint overlaps |
1135 | * partially or completely with the address range covered by the |
1136 | * access). |
1137 | */ |
1138 | static inline bool watchpoint_address_matches(CPUWatchpoint *wp, |
1139 | vaddr addr, vaddr len) |
1140 | { |
1141 | /* We know the lengths are non-zero, but a little caution is |
1142 | * required to avoid errors in the case where the range ends |
1143 | * exactly at the top of the address space and so addr + len |
1144 | * wraps round to zero. |
1145 | */ |
1146 | vaddr wpend = wp->vaddr + wp->len - 1; |
1147 | vaddr addrend = addr + len - 1; |
1148 | |
1149 | return !(addr > wpend || wp->vaddr > addrend); |
1150 | } |
1151 | |
1152 | /* Return flags for watchpoints that match addr + prot. */ |
1153 | int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) |
1154 | { |
1155 | CPUWatchpoint *wp; |
1156 | int ret = 0; |
1157 | |
1158 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
1159 | if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) { |
1160 | ret |= wp->flags; |
1161 | } |
1162 | } |
1163 | return ret; |
1164 | } |
1165 | #endif /* !CONFIG_USER_ONLY */ |
1166 | |
1167 | /* Add a breakpoint. */ |
1168 | int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, |
1169 | CPUBreakpoint **breakpoint) |
1170 | { |
1171 | CPUBreakpoint *bp; |
1172 | |
1173 | bp = g_malloc(sizeof(*bp)); |
1174 | |
1175 | bp->pc = pc; |
1176 | bp->flags = flags; |
1177 | |
1178 | /* keep all GDB-injected breakpoints in front */ |
1179 | if (flags & BP_GDB) { |
1180 | QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); |
1181 | } else { |
1182 | QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); |
1183 | } |
1184 | |
1185 | breakpoint_invalidate(cpu, pc); |
1186 | |
1187 | if (breakpoint) { |
1188 | *breakpoint = bp; |
1189 | } |
1190 | return 0; |
1191 | } |
1192 | |
1193 | /* Remove a specific breakpoint. */ |
1194 | int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) |
1195 | { |
1196 | CPUBreakpoint *bp; |
1197 | |
1198 | QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { |
1199 | if (bp->pc == pc && bp->flags == flags) { |
1200 | cpu_breakpoint_remove_by_ref(cpu, bp); |
1201 | return 0; |
1202 | } |
1203 | } |
1204 | return -ENOENT; |
1205 | } |
1206 | |
1207 | /* Remove a specific breakpoint by reference. */ |
1208 | void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint) |
1209 | { |
1210 | QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry); |
1211 | |
1212 | breakpoint_invalidate(cpu, breakpoint->pc); |
1213 | |
1214 | g_free(breakpoint); |
1215 | } |
1216 | |
1217 | /* Remove all matching breakpoints. */ |
1218 | void cpu_breakpoint_remove_all(CPUState *cpu, int mask) |
1219 | { |
1220 | CPUBreakpoint *bp, *next; |
1221 | |
1222 | QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { |
1223 | if (bp->flags & mask) { |
1224 | cpu_breakpoint_remove_by_ref(cpu, bp); |
1225 | } |
1226 | } |
1227 | } |
1228 | |
1229 | /* enable or disable single step mode. EXCP_DEBUG is returned by the |
1230 | CPU loop after each instruction */ |
1231 | void cpu_single_step(CPUState *cpu, int enabled) |
1232 | { |
1233 | if (cpu->singlestep_enabled != enabled) { |
1234 | cpu->singlestep_enabled = enabled; |
1235 | if (kvm_enabled()) { |
1236 | kvm_update_guest_debug(cpu, 0); |
1237 | } else { |
1238 | /* must flush all the translated code to avoid inconsistencies */ |
1239 | /* XXX: only flush what is necessary */ |
1240 | tb_flush(cpu); |
1241 | } |
1242 | } |
1243 | } |
1244 | |
1245 | void cpu_abort(CPUState *cpu, const char *fmt, ...) |
1246 | { |
1247 | va_list ap; |
1248 | va_list ap2; |
1249 | |
1250 | va_start(ap, fmt); |
1251 | va_copy(ap2, ap); |
1252 | fprintf(stderr, "qemu: fatal: " ); |
1253 | vfprintf(stderr, fmt, ap); |
1254 | fprintf(stderr, "\n" ); |
1255 | cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
1256 | if (qemu_log_separate()) { |
1257 | qemu_log_lock(); |
1258 | qemu_log("qemu: fatal: " ); |
1259 | qemu_log_vprintf(fmt, ap2); |
1260 | qemu_log("\n" ); |
1261 | log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
1262 | qemu_log_flush(); |
1263 | qemu_log_unlock(); |
1264 | qemu_log_close(); |
1265 | } |
1266 | va_end(ap2); |
1267 | va_end(ap); |
1268 | replay_finish(); |
1269 | #if defined(CONFIG_USER_ONLY) |
1270 | { |
1271 | struct sigaction act; |
1272 | sigfillset(&act.sa_mask); |
1273 | act.sa_handler = SIG_DFL; |
1274 | act.sa_flags = 0; |
1275 | sigaction(SIGABRT, &act, NULL); |
1276 | } |
1277 | #endif |
1278 | abort(); |
1279 | } |
1280 | |
1281 | #if !defined(CONFIG_USER_ONLY) |
1282 | /* Called from RCU critical section */ |
1283 | static RAMBlock *qemu_get_ram_block(ram_addr_t addr) |
1284 | { |
1285 | RAMBlock *block; |
1286 | |
1287 | block = atomic_rcu_read(&ram_list.mru_block); |
1288 | if (block && addr - block->offset < block->max_length) { |
1289 | return block; |
1290 | } |
1291 | RAMBLOCK_FOREACH(block) { |
1292 | if (addr - block->offset < block->max_length) { |
1293 | goto found; |
1294 | } |
1295 | } |
1296 | |
1297 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n" , (uint64_t)addr); |
1298 | abort(); |
1299 | |
1300 | found: |
1301 | /* It is safe to write mru_block outside the iothread lock. This |
1302 | * is what happens: |
1303 | * |
1304 | * mru_block = xxx |
1305 | * rcu_read_unlock() |
1306 | * xxx removed from list |
1307 | * rcu_read_lock() |
1308 | * read mru_block |
1309 | * mru_block = NULL; |
1310 | * call_rcu(reclaim_ramblock, xxx); |
1311 | * rcu_read_unlock() |
1312 | * |
1313 | * atomic_rcu_set is not needed here. The block was already published |
1314 | * when it was placed into the list. Here we're just making an extra |
1315 | * copy of the pointer. |
1316 | */ |
1317 | ram_list.mru_block = block; |
1318 | return block; |
1319 | } |
1320 | |
1321 | static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) |
1322 | { |
1323 | CPUState *cpu; |
1324 | ram_addr_t start1; |
1325 | RAMBlock *block; |
1326 | ram_addr_t end; |
1327 | |
1328 | assert(tcg_enabled()); |
1329 | end = TARGET_PAGE_ALIGN(start + length); |
1330 | start &= TARGET_PAGE_MASK; |
1331 | |
1332 | rcu_read_lock(); |
1333 | block = qemu_get_ram_block(start); |
1334 | assert(block == qemu_get_ram_block(end - 1)); |
1335 | start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); |
1336 | CPU_FOREACH(cpu) { |
1337 | tlb_reset_dirty(cpu, start1, length); |
1338 | } |
1339 | rcu_read_unlock(); |
1340 | } |
1341 | |
1342 | /* Note: start and end must be within the same ram block. */ |
1343 | bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, |
1344 | ram_addr_t length, |
1345 | unsigned client) |
1346 | { |
1347 | DirtyMemoryBlocks *blocks; |
1348 | unsigned long end, page; |
1349 | bool dirty = false; |
1350 | RAMBlock *ramblock; |
1351 | uint64_t mr_offset, mr_size; |
1352 | |
1353 | if (length == 0) { |
1354 | return false; |
1355 | } |
1356 | |
1357 | end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; |
1358 | page = start >> TARGET_PAGE_BITS; |
1359 | |
1360 | rcu_read_lock(); |
1361 | |
1362 | blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); |
1363 | ramblock = qemu_get_ram_block(start); |
1364 | /* Range sanity check on the ramblock */ |
1365 | assert(start >= ramblock->offset && |
1366 | start + length <= ramblock->offset + ramblock->used_length); |
1367 | |
1368 | while (page < end) { |
1369 | unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; |
1370 | unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; |
1371 | unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset); |
1372 | |
1373 | dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], |
1374 | offset, num); |
1375 | page += num; |
1376 | } |
1377 | |
1378 | mr_offset = (ram_addr_t)(page << TARGET_PAGE_BITS) - ramblock->offset; |
1379 | mr_size = (end - page) << TARGET_PAGE_BITS; |
1380 | memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); |
1381 | |
1382 | rcu_read_unlock(); |
1383 | |
1384 | if (dirty && tcg_enabled()) { |
1385 | tlb_reset_dirty_range_all(start, length); |
1386 | } |
1387 | |
1388 | return dirty; |
1389 | } |
1390 | |
1391 | DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty |
1392 | (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client) |
1393 | { |
1394 | DirtyMemoryBlocks *blocks; |
1395 | ram_addr_t start = memory_region_get_ram_addr(mr) + offset; |
1396 | unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL); |
1397 | ram_addr_t first = QEMU_ALIGN_DOWN(start, align); |
1398 | ram_addr_t last = QEMU_ALIGN_UP(start + length, align); |
1399 | DirtyBitmapSnapshot *snap; |
1400 | unsigned long page, end, dest; |
1401 | |
1402 | snap = g_malloc0(sizeof(*snap) + |
1403 | ((last - first) >> (TARGET_PAGE_BITS + 3))); |
1404 | snap->start = first; |
1405 | snap->end = last; |
1406 | |
1407 | page = first >> TARGET_PAGE_BITS; |
1408 | end = last >> TARGET_PAGE_BITS; |
1409 | dest = 0; |
1410 | |
1411 | rcu_read_lock(); |
1412 | |
1413 | blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); |
1414 | |
1415 | while (page < end) { |
1416 | unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; |
1417 | unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; |
1418 | unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset); |
1419 | |
1420 | assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL))); |
1421 | assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL))); |
1422 | offset >>= BITS_PER_LEVEL; |
1423 | |
1424 | bitmap_copy_and_clear_atomic(snap->dirty + dest, |
1425 | blocks->blocks[idx] + offset, |
1426 | num); |
1427 | page += num; |
1428 | dest += num >> BITS_PER_LEVEL; |
1429 | } |
1430 | |
1431 | rcu_read_unlock(); |
1432 | |
1433 | if (tcg_enabled()) { |
1434 | tlb_reset_dirty_range_all(start, length); |
1435 | } |
1436 | |
1437 | memory_region_clear_dirty_bitmap(mr, offset, length); |
1438 | |
1439 | return snap; |
1440 | } |
1441 | |
1442 | bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, |
1443 | ram_addr_t start, |
1444 | ram_addr_t length) |
1445 | { |
1446 | unsigned long page, end; |
1447 | |
1448 | assert(start >= snap->start); |
1449 | assert(start + length <= snap->end); |
1450 | |
1451 | end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS; |
1452 | page = (start - snap->start) >> TARGET_PAGE_BITS; |
1453 | |
1454 | while (page < end) { |
1455 | if (test_bit(page, snap->dirty)) { |
1456 | return true; |
1457 | } |
1458 | page++; |
1459 | } |
1460 | return false; |
1461 | } |
1462 | |
1463 | /* Called from RCU critical section */ |
1464 | hwaddr memory_region_section_get_iotlb(CPUState *cpu, |
1465 | MemoryRegionSection *section, |
1466 | target_ulong vaddr, |
1467 | hwaddr paddr, hwaddr xlat, |
1468 | int prot, |
1469 | target_ulong *address) |
1470 | { |
1471 | hwaddr iotlb; |
1472 | |
1473 | if (memory_region_is_ram(section->mr)) { |
1474 | /* Normal RAM. */ |
1475 | iotlb = memory_region_get_ram_addr(section->mr) + xlat; |
1476 | if (!section->readonly) { |
1477 | iotlb |= PHYS_SECTION_NOTDIRTY; |
1478 | } else { |
1479 | iotlb |= PHYS_SECTION_ROM; |
1480 | } |
1481 | } else { |
1482 | AddressSpaceDispatch *d; |
1483 | |
1484 | d = flatview_to_dispatch(section->fv); |
1485 | iotlb = section - d->map.sections; |
1486 | iotlb += xlat; |
1487 | } |
1488 | |
1489 | return iotlb; |
1490 | } |
1491 | #endif /* defined(CONFIG_USER_ONLY) */ |
1492 | |
1493 | #if !defined(CONFIG_USER_ONLY) |
1494 | |
1495 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
1496 | uint16_t section); |
1497 | static subpage_t *subpage_init(FlatView *fv, hwaddr base); |
1498 | |
1499 | static void *(*phys_mem_alloc)(size_t size, uint64_t *align, bool shared) = |
1500 | qemu_anon_ram_alloc; |
1501 | |
1502 | /* |
1503 | * Set a custom physical guest memory alloator. |
1504 | * Accelerators with unusual needs may need this. Hopefully, we can |
1505 | * get rid of it eventually. |
1506 | */ |
1507 | void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align, bool shared)) |
1508 | { |
1509 | phys_mem_alloc = alloc; |
1510 | } |
1511 | |
1512 | static uint16_t phys_section_add(PhysPageMap *map, |
1513 | MemoryRegionSection *section) |
1514 | { |
1515 | /* The physical section number is ORed with a page-aligned |
1516 | * pointer to produce the iotlb entries. Thus it should |
1517 | * never overflow into the page-aligned value. |
1518 | */ |
1519 | assert(map->sections_nb < TARGET_PAGE_SIZE); |
1520 | |
1521 | if (map->sections_nb == map->sections_nb_alloc) { |
1522 | map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); |
1523 | map->sections = g_renew(MemoryRegionSection, map->sections, |
1524 | map->sections_nb_alloc); |
1525 | } |
1526 | map->sections[map->sections_nb] = *section; |
1527 | memory_region_ref(section->mr); |
1528 | return map->sections_nb++; |
1529 | } |
1530 | |
1531 | static void phys_section_destroy(MemoryRegion *mr) |
1532 | { |
1533 | bool have_sub_page = mr->subpage; |
1534 | |
1535 | memory_region_unref(mr); |
1536 | |
1537 | if (have_sub_page) { |
1538 | subpage_t *subpage = container_of(mr, subpage_t, iomem); |
1539 | object_unref(OBJECT(&subpage->iomem)); |
1540 | g_free(subpage); |
1541 | } |
1542 | } |
1543 | |
1544 | static void phys_sections_free(PhysPageMap *map) |
1545 | { |
1546 | while (map->sections_nb > 0) { |
1547 | MemoryRegionSection *section = &map->sections[--map->sections_nb]; |
1548 | phys_section_destroy(section->mr); |
1549 | } |
1550 | g_free(map->sections); |
1551 | g_free(map->nodes); |
1552 | } |
1553 | |
1554 | static void register_subpage(FlatView *fv, MemoryRegionSection *section) |
1555 | { |
1556 | AddressSpaceDispatch *d = flatview_to_dispatch(fv); |
1557 | subpage_t *subpage; |
1558 | hwaddr base = section->offset_within_address_space |
1559 | & TARGET_PAGE_MASK; |
1560 | MemoryRegionSection *existing = phys_page_find(d, base); |
1561 | MemoryRegionSection subsection = { |
1562 | .offset_within_address_space = base, |
1563 | .size = int128_make64(TARGET_PAGE_SIZE), |
1564 | }; |
1565 | hwaddr start, end; |
1566 | |
1567 | assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); |
1568 | |
1569 | if (!(existing->mr->subpage)) { |
1570 | subpage = subpage_init(fv, base); |
1571 | subsection.fv = fv; |
1572 | subsection.mr = &subpage->iomem; |
1573 | phys_page_set(d, base >> TARGET_PAGE_BITS, 1, |
1574 | phys_section_add(&d->map, &subsection)); |
1575 | } else { |
1576 | subpage = container_of(existing->mr, subpage_t, iomem); |
1577 | } |
1578 | start = section->offset_within_address_space & ~TARGET_PAGE_MASK; |
1579 | end = start + int128_get64(section->size) - 1; |
1580 | subpage_register(subpage, start, end, |
1581 | phys_section_add(&d->map, section)); |
1582 | } |
1583 | |
1584 | |
1585 | static void register_multipage(FlatView *fv, |
1586 | MemoryRegionSection *section) |
1587 | { |
1588 | AddressSpaceDispatch *d = flatview_to_dispatch(fv); |
1589 | hwaddr start_addr = section->offset_within_address_space; |
1590 | uint16_t section_index = phys_section_add(&d->map, section); |
1591 | uint64_t num_pages = int128_get64(int128_rshift(section->size, |
1592 | TARGET_PAGE_BITS)); |
1593 | |
1594 | assert(num_pages); |
1595 | phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); |
1596 | } |
1597 | |
1598 | /* |
1599 | * The range in *section* may look like this: |
1600 | * |
1601 | * |s|PPPPPPP|s| |
1602 | * |
1603 | * where s stands for subpage and P for page. |
1604 | */ |
1605 | void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section) |
1606 | { |
1607 | MemoryRegionSection remain = *section; |
1608 | Int128 page_size = int128_make64(TARGET_PAGE_SIZE); |
1609 | |
1610 | /* register first subpage */ |
1611 | if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { |
1612 | uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space) |
1613 | - remain.offset_within_address_space; |
1614 | |
1615 | MemoryRegionSection now = remain; |
1616 | now.size = int128_min(int128_make64(left), now.size); |
1617 | register_subpage(fv, &now); |
1618 | if (int128_eq(remain.size, now.size)) { |
1619 | return; |
1620 | } |
1621 | remain.size = int128_sub(remain.size, now.size); |
1622 | remain.offset_within_address_space += int128_get64(now.size); |
1623 | remain.offset_within_region += int128_get64(now.size); |
1624 | } |
1625 | |
1626 | /* register whole pages */ |
1627 | if (int128_ge(remain.size, page_size)) { |
1628 | MemoryRegionSection now = remain; |
1629 | now.size = int128_and(now.size, int128_neg(page_size)); |
1630 | register_multipage(fv, &now); |
1631 | if (int128_eq(remain.size, now.size)) { |
1632 | return; |
1633 | } |
1634 | remain.size = int128_sub(remain.size, now.size); |
1635 | remain.offset_within_address_space += int128_get64(now.size); |
1636 | remain.offset_within_region += int128_get64(now.size); |
1637 | } |
1638 | |
1639 | /* register last subpage */ |
1640 | register_subpage(fv, &remain); |
1641 | } |
1642 | |
1643 | void qemu_flush_coalesced_mmio_buffer(void) |
1644 | { |
1645 | if (kvm_enabled()) |
1646 | kvm_flush_coalesced_mmio_buffer(); |
1647 | } |
1648 | |
1649 | void qemu_mutex_lock_ramlist(void) |
1650 | { |
1651 | qemu_mutex_lock(&ram_list.mutex); |
1652 | } |
1653 | |
1654 | void qemu_mutex_unlock_ramlist(void) |
1655 | { |
1656 | qemu_mutex_unlock(&ram_list.mutex); |
1657 | } |
1658 | |
1659 | void ram_block_dump(Monitor *mon) |
1660 | { |
1661 | RAMBlock *block; |
1662 | char *psize; |
1663 | |
1664 | rcu_read_lock(); |
1665 | monitor_printf(mon, "%24s %8s %18s %18s %18s\n" , |
1666 | "Block Name" , "PSize" , "Offset" , "Used" , "Total" ); |
1667 | RAMBLOCK_FOREACH(block) { |
1668 | psize = size_to_str(block->page_size); |
1669 | monitor_printf(mon, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 |
1670 | " 0x%016" PRIx64 "\n" , block->idstr, psize, |
1671 | (uint64_t)block->offset, |
1672 | (uint64_t)block->used_length, |
1673 | (uint64_t)block->max_length); |
1674 | g_free(psize); |
1675 | } |
1676 | rcu_read_unlock(); |
1677 | } |
1678 | |
1679 | #ifdef __linux__ |
1680 | /* |
1681 | * FIXME TOCTTOU: this iterates over memory backends' mem-path, which |
1682 | * may or may not name the same files / on the same filesystem now as |
1683 | * when we actually open and map them. Iterate over the file |
1684 | * descriptors instead, and use qemu_fd_getpagesize(). |
1685 | */ |
1686 | static int find_min_backend_pagesize(Object *obj, void *opaque) |
1687 | { |
1688 | long *hpsize_min = opaque; |
1689 | |
1690 | if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { |
1691 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); |
1692 | long hpsize = host_memory_backend_pagesize(backend); |
1693 | |
1694 | if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) { |
1695 | *hpsize_min = hpsize; |
1696 | } |
1697 | } |
1698 | |
1699 | return 0; |
1700 | } |
1701 | |
1702 | static int find_max_backend_pagesize(Object *obj, void *opaque) |
1703 | { |
1704 | long *hpsize_max = opaque; |
1705 | |
1706 | if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { |
1707 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); |
1708 | long hpsize = host_memory_backend_pagesize(backend); |
1709 | |
1710 | if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) { |
1711 | *hpsize_max = hpsize; |
1712 | } |
1713 | } |
1714 | |
1715 | return 0; |
1716 | } |
1717 | |
1718 | /* |
1719 | * TODO: We assume right now that all mapped host memory backends are |
1720 | * used as RAM, however some might be used for different purposes. |
1721 | */ |
1722 | long qemu_minrampagesize(void) |
1723 | { |
1724 | long hpsize = LONG_MAX; |
1725 | long mainrampagesize; |
1726 | Object *memdev_root; |
1727 | MachineState *ms = MACHINE(qdev_get_machine()); |
1728 | |
1729 | mainrampagesize = qemu_mempath_getpagesize(mem_path); |
1730 | |
1731 | /* it's possible we have memory-backend objects with |
1732 | * hugepage-backed RAM. these may get mapped into system |
1733 | * address space via -numa parameters or memory hotplug |
1734 | * hooks. we want to take these into account, but we |
1735 | * also want to make sure these supported hugepage |
1736 | * sizes are applicable across the entire range of memory |
1737 | * we may boot from, so we take the min across all |
1738 | * backends, and assume normal pages in cases where a |
1739 | * backend isn't backed by hugepages. |
1740 | */ |
1741 | memdev_root = object_resolve_path("/objects" , NULL); |
1742 | if (memdev_root) { |
1743 | object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize); |
1744 | } |
1745 | if (hpsize == LONG_MAX) { |
1746 | /* No additional memory regions found ==> Report main RAM page size */ |
1747 | return mainrampagesize; |
1748 | } |
1749 | |
1750 | /* If NUMA is disabled or the NUMA nodes are not backed with a |
1751 | * memory-backend, then there is at least one node using "normal" RAM, |
1752 | * so if its page size is smaller we have got to report that size instead. |
1753 | */ |
1754 | if (hpsize > mainrampagesize && |
1755 | (ms->numa_state == NULL || |
1756 | ms->numa_state->num_nodes == 0 || |
1757 | ms->numa_state->nodes[0].node_memdev == NULL)) { |
1758 | static bool warned; |
1759 | if (!warned) { |
1760 | error_report("Huge page support disabled (n/a for main memory)." ); |
1761 | warned = true; |
1762 | } |
1763 | return mainrampagesize; |
1764 | } |
1765 | |
1766 | return hpsize; |
1767 | } |
1768 | |
1769 | long qemu_maxrampagesize(void) |
1770 | { |
1771 | long pagesize = qemu_mempath_getpagesize(mem_path); |
1772 | Object *memdev_root = object_resolve_path("/objects" , NULL); |
1773 | |
1774 | if (memdev_root) { |
1775 | object_child_foreach(memdev_root, find_max_backend_pagesize, |
1776 | &pagesize); |
1777 | } |
1778 | return pagesize; |
1779 | } |
1780 | #else |
1781 | long qemu_minrampagesize(void) |
1782 | { |
1783 | return getpagesize(); |
1784 | } |
1785 | long qemu_maxrampagesize(void) |
1786 | { |
1787 | return getpagesize(); |
1788 | } |
1789 | #endif |
1790 | |
1791 | #ifdef CONFIG_POSIX |
1792 | static int64_t get_file_size(int fd) |
1793 | { |
1794 | int64_t size = lseek(fd, 0, SEEK_END); |
1795 | if (size < 0) { |
1796 | return -errno; |
1797 | } |
1798 | return size; |
1799 | } |
1800 | |
1801 | static int file_ram_open(const char *path, |
1802 | const char *region_name, |
1803 | bool *created, |
1804 | Error **errp) |
1805 | { |
1806 | char *filename; |
1807 | char *sanitized_name; |
1808 | char *c; |
1809 | int fd = -1; |
1810 | |
1811 | *created = false; |
1812 | for (;;) { |
1813 | fd = open(path, O_RDWR); |
1814 | if (fd >= 0) { |
1815 | /* @path names an existing file, use it */ |
1816 | break; |
1817 | } |
1818 | if (errno == ENOENT) { |
1819 | /* @path names a file that doesn't exist, create it */ |
1820 | fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644); |
1821 | if (fd >= 0) { |
1822 | *created = true; |
1823 | break; |
1824 | } |
1825 | } else if (errno == EISDIR) { |
1826 | /* @path names a directory, create a file there */ |
1827 | /* Make name safe to use with mkstemp by replacing '/' with '_'. */ |
1828 | sanitized_name = g_strdup(region_name); |
1829 | for (c = sanitized_name; *c != '\0'; c++) { |
1830 | if (*c == '/') { |
1831 | *c = '_'; |
1832 | } |
1833 | } |
1834 | |
1835 | filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX" , path, |
1836 | sanitized_name); |
1837 | g_free(sanitized_name); |
1838 | |
1839 | fd = mkstemp(filename); |
1840 | if (fd >= 0) { |
1841 | unlink(filename); |
1842 | g_free(filename); |
1843 | break; |
1844 | } |
1845 | g_free(filename); |
1846 | } |
1847 | if (errno != EEXIST && errno != EINTR) { |
1848 | error_setg_errno(errp, errno, |
1849 | "can't open backing store %s for guest RAM" , |
1850 | path); |
1851 | return -1; |
1852 | } |
1853 | /* |
1854 | * Try again on EINTR and EEXIST. The latter happens when |
1855 | * something else creates the file between our two open(). |
1856 | */ |
1857 | } |
1858 | |
1859 | return fd; |
1860 | } |
1861 | |
1862 | static void *file_ram_alloc(RAMBlock *block, |
1863 | ram_addr_t memory, |
1864 | int fd, |
1865 | bool truncate, |
1866 | Error **errp) |
1867 | { |
1868 | MachineState *ms = MACHINE(qdev_get_machine()); |
1869 | void *area; |
1870 | |
1871 | block->page_size = qemu_fd_getpagesize(fd); |
1872 | if (block->mr->align % block->page_size) { |
1873 | error_setg(errp, "alignment 0x%" PRIx64 |
1874 | " must be multiples of page size 0x%zx" , |
1875 | block->mr->align, block->page_size); |
1876 | return NULL; |
1877 | } else if (block->mr->align && !is_power_of_2(block->mr->align)) { |
1878 | error_setg(errp, "alignment 0x%" PRIx64 |
1879 | " must be a power of two" , block->mr->align); |
1880 | return NULL; |
1881 | } |
1882 | block->mr->align = MAX(block->page_size, block->mr->align); |
1883 | #if defined(__s390x__) |
1884 | if (kvm_enabled()) { |
1885 | block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); |
1886 | } |
1887 | #endif |
1888 | |
1889 | if (memory < block->page_size) { |
1890 | error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " |
1891 | "or larger than page size 0x%zx" , |
1892 | memory, block->page_size); |
1893 | return NULL; |
1894 | } |
1895 | |
1896 | memory = ROUND_UP(memory, block->page_size); |
1897 | |
1898 | /* |
1899 | * ftruncate is not supported by hugetlbfs in older |
1900 | * hosts, so don't bother bailing out on errors. |
1901 | * If anything goes wrong with it under other filesystems, |
1902 | * mmap will fail. |
1903 | * |
1904 | * Do not truncate the non-empty backend file to avoid corrupting |
1905 | * the existing data in the file. Disabling shrinking is not |
1906 | * enough. For example, the current vNVDIMM implementation stores |
1907 | * the guest NVDIMM labels at the end of the backend file. If the |
1908 | * backend file is later extended, QEMU will not be able to find |
1909 | * those labels. Therefore, extending the non-empty backend file |
1910 | * is disabled as well. |
1911 | */ |
1912 | if (truncate && ftruncate(fd, memory)) { |
1913 | perror("ftruncate" ); |
1914 | } |
1915 | |
1916 | area = qemu_ram_mmap(fd, memory, block->mr->align, |
1917 | block->flags & RAM_SHARED, block->flags & RAM_PMEM); |
1918 | if (area == MAP_FAILED) { |
1919 | error_setg_errno(errp, errno, |
1920 | "unable to map backing store for guest RAM" ); |
1921 | return NULL; |
1922 | } |
1923 | |
1924 | if (mem_prealloc) { |
1925 | os_mem_prealloc(fd, area, memory, ms->smp.cpus, errp); |
1926 | if (errp && *errp) { |
1927 | qemu_ram_munmap(fd, area, memory); |
1928 | return NULL; |
1929 | } |
1930 | } |
1931 | |
1932 | block->fd = fd; |
1933 | return area; |
1934 | } |
1935 | #endif |
1936 | |
1937 | /* Allocate space within the ram_addr_t space that governs the |
1938 | * dirty bitmaps. |
1939 | * Called with the ramlist lock held. |
1940 | */ |
1941 | static ram_addr_t find_ram_offset(ram_addr_t size) |
1942 | { |
1943 | RAMBlock *block, *next_block; |
1944 | ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; |
1945 | |
1946 | assert(size != 0); /* it would hand out same offset multiple times */ |
1947 | |
1948 | if (QLIST_EMPTY_RCU(&ram_list.blocks)) { |
1949 | return 0; |
1950 | } |
1951 | |
1952 | RAMBLOCK_FOREACH(block) { |
1953 | ram_addr_t candidate, next = RAM_ADDR_MAX; |
1954 | |
1955 | /* Align blocks to start on a 'long' in the bitmap |
1956 | * which makes the bitmap sync'ing take the fast path. |
1957 | */ |
1958 | candidate = block->offset + block->max_length; |
1959 | candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS); |
1960 | |
1961 | /* Search for the closest following block |
1962 | * and find the gap. |
1963 | */ |
1964 | RAMBLOCK_FOREACH(next_block) { |
1965 | if (next_block->offset >= candidate) { |
1966 | next = MIN(next, next_block->offset); |
1967 | } |
1968 | } |
1969 | |
1970 | /* If it fits remember our place and remember the size |
1971 | * of gap, but keep going so that we might find a smaller |
1972 | * gap to fill so avoiding fragmentation. |
1973 | */ |
1974 | if (next - candidate >= size && next - candidate < mingap) { |
1975 | offset = candidate; |
1976 | mingap = next - candidate; |
1977 | } |
1978 | |
1979 | trace_find_ram_offset_loop(size, candidate, offset, next, mingap); |
1980 | } |
1981 | |
1982 | if (offset == RAM_ADDR_MAX) { |
1983 | fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n" , |
1984 | (uint64_t)size); |
1985 | abort(); |
1986 | } |
1987 | |
1988 | trace_find_ram_offset(size, offset); |
1989 | |
1990 | return offset; |
1991 | } |
1992 | |
1993 | static unsigned long last_ram_page(void) |
1994 | { |
1995 | RAMBlock *block; |
1996 | ram_addr_t last = 0; |
1997 | |
1998 | rcu_read_lock(); |
1999 | RAMBLOCK_FOREACH(block) { |
2000 | last = MAX(last, block->offset + block->max_length); |
2001 | } |
2002 | rcu_read_unlock(); |
2003 | return last >> TARGET_PAGE_BITS; |
2004 | } |
2005 | |
2006 | static void qemu_ram_setup_dump(void *addr, ram_addr_t size) |
2007 | { |
2008 | int ret; |
2009 | |
2010 | /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ |
2011 | if (!machine_dump_guest_core(current_machine)) { |
2012 | ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); |
2013 | if (ret) { |
2014 | perror("qemu_madvise" ); |
2015 | fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " |
2016 | "but dump_guest_core=off specified\n" ); |
2017 | } |
2018 | } |
2019 | } |
2020 | |
2021 | const char *qemu_ram_get_idstr(RAMBlock *rb) |
2022 | { |
2023 | return rb->idstr; |
2024 | } |
2025 | |
2026 | void *qemu_ram_get_host_addr(RAMBlock *rb) |
2027 | { |
2028 | return rb->host; |
2029 | } |
2030 | |
2031 | ram_addr_t qemu_ram_get_offset(RAMBlock *rb) |
2032 | { |
2033 | return rb->offset; |
2034 | } |
2035 | |
2036 | ram_addr_t qemu_ram_get_used_length(RAMBlock *rb) |
2037 | { |
2038 | return rb->used_length; |
2039 | } |
2040 | |
2041 | bool qemu_ram_is_shared(RAMBlock *rb) |
2042 | { |
2043 | return rb->flags & RAM_SHARED; |
2044 | } |
2045 | |
2046 | /* Note: Only set at the start of postcopy */ |
2047 | bool qemu_ram_is_uf_zeroable(RAMBlock *rb) |
2048 | { |
2049 | return rb->flags & RAM_UF_ZEROPAGE; |
2050 | } |
2051 | |
2052 | void qemu_ram_set_uf_zeroable(RAMBlock *rb) |
2053 | { |
2054 | rb->flags |= RAM_UF_ZEROPAGE; |
2055 | } |
2056 | |
2057 | bool qemu_ram_is_migratable(RAMBlock *rb) |
2058 | { |
2059 | return rb->flags & RAM_MIGRATABLE; |
2060 | } |
2061 | |
2062 | void qemu_ram_set_migratable(RAMBlock *rb) |
2063 | { |
2064 | rb->flags |= RAM_MIGRATABLE; |
2065 | } |
2066 | |
2067 | void qemu_ram_unset_migratable(RAMBlock *rb) |
2068 | { |
2069 | rb->flags &= ~RAM_MIGRATABLE; |
2070 | } |
2071 | |
2072 | /* Called with iothread lock held. */ |
2073 | void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) |
2074 | { |
2075 | RAMBlock *block; |
2076 | |
2077 | assert(new_block); |
2078 | assert(!new_block->idstr[0]); |
2079 | |
2080 | if (dev) { |
2081 | char *id = qdev_get_dev_path(dev); |
2082 | if (id) { |
2083 | snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/" , id); |
2084 | g_free(id); |
2085 | } |
2086 | } |
2087 | pstrcat(new_block->idstr, sizeof(new_block->idstr), name); |
2088 | |
2089 | rcu_read_lock(); |
2090 | RAMBLOCK_FOREACH(block) { |
2091 | if (block != new_block && |
2092 | !strcmp(block->idstr, new_block->idstr)) { |
2093 | fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n" , |
2094 | new_block->idstr); |
2095 | abort(); |
2096 | } |
2097 | } |
2098 | rcu_read_unlock(); |
2099 | } |
2100 | |
2101 | /* Called with iothread lock held. */ |
2102 | void qemu_ram_unset_idstr(RAMBlock *block) |
2103 | { |
2104 | /* FIXME: arch_init.c assumes that this is not called throughout |
2105 | * migration. Ignore the problem since hot-unplug during migration |
2106 | * does not work anyway. |
2107 | */ |
2108 | if (block) { |
2109 | memset(block->idstr, 0, sizeof(block->idstr)); |
2110 | } |
2111 | } |
2112 | |
2113 | size_t qemu_ram_pagesize(RAMBlock *rb) |
2114 | { |
2115 | return rb->page_size; |
2116 | } |
2117 | |
2118 | /* Returns the largest size of page in use */ |
2119 | size_t qemu_ram_pagesize_largest(void) |
2120 | { |
2121 | RAMBlock *block; |
2122 | size_t largest = 0; |
2123 | |
2124 | RAMBLOCK_FOREACH(block) { |
2125 | largest = MAX(largest, qemu_ram_pagesize(block)); |
2126 | } |
2127 | |
2128 | return largest; |
2129 | } |
2130 | |
2131 | static int memory_try_enable_merging(void *addr, size_t len) |
2132 | { |
2133 | if (!machine_mem_merge(current_machine)) { |
2134 | /* disabled by the user */ |
2135 | return 0; |
2136 | } |
2137 | |
2138 | return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); |
2139 | } |
2140 | |
2141 | /* Only legal before guest might have detected the memory size: e.g. on |
2142 | * incoming migration, or right after reset. |
2143 | * |
2144 | * As memory core doesn't know how is memory accessed, it is up to |
2145 | * resize callback to update device state and/or add assertions to detect |
2146 | * misuse, if necessary. |
2147 | */ |
2148 | int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) |
2149 | { |
2150 | assert(block); |
2151 | |
2152 | newsize = HOST_PAGE_ALIGN(newsize); |
2153 | |
2154 | if (block->used_length == newsize) { |
2155 | return 0; |
2156 | } |
2157 | |
2158 | if (!(block->flags & RAM_RESIZEABLE)) { |
2159 | error_setg_errno(errp, EINVAL, |
2160 | "Length mismatch: %s: 0x" RAM_ADDR_FMT |
2161 | " in != 0x" RAM_ADDR_FMT, block->idstr, |
2162 | newsize, block->used_length); |
2163 | return -EINVAL; |
2164 | } |
2165 | |
2166 | if (block->max_length < newsize) { |
2167 | error_setg_errno(errp, EINVAL, |
2168 | "Length too large: %s: 0x" RAM_ADDR_FMT |
2169 | " > 0x" RAM_ADDR_FMT, block->idstr, |
2170 | newsize, block->max_length); |
2171 | return -EINVAL; |
2172 | } |
2173 | |
2174 | cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); |
2175 | block->used_length = newsize; |
2176 | cpu_physical_memory_set_dirty_range(block->offset, block->used_length, |
2177 | DIRTY_CLIENTS_ALL); |
2178 | memory_region_set_size(block->mr, newsize); |
2179 | if (block->resized) { |
2180 | block->resized(block->idstr, newsize, block->host); |
2181 | } |
2182 | return 0; |
2183 | } |
2184 | |
2185 | /* Called with ram_list.mutex held */ |
2186 | static void dirty_memory_extend(ram_addr_t old_ram_size, |
2187 | ram_addr_t new_ram_size) |
2188 | { |
2189 | ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size, |
2190 | DIRTY_MEMORY_BLOCK_SIZE); |
2191 | ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size, |
2192 | DIRTY_MEMORY_BLOCK_SIZE); |
2193 | int i; |
2194 | |
2195 | /* Only need to extend if block count increased */ |
2196 | if (new_num_blocks <= old_num_blocks) { |
2197 | return; |
2198 | } |
2199 | |
2200 | for (i = 0; i < DIRTY_MEMORY_NUM; i++) { |
2201 | DirtyMemoryBlocks *old_blocks; |
2202 | DirtyMemoryBlocks *new_blocks; |
2203 | int j; |
2204 | |
2205 | old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]); |
2206 | new_blocks = g_malloc(sizeof(*new_blocks) + |
2207 | sizeof(new_blocks->blocks[0]) * new_num_blocks); |
2208 | |
2209 | if (old_num_blocks) { |
2210 | memcpy(new_blocks->blocks, old_blocks->blocks, |
2211 | old_num_blocks * sizeof(old_blocks->blocks[0])); |
2212 | } |
2213 | |
2214 | for (j = old_num_blocks; j < new_num_blocks; j++) { |
2215 | new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); |
2216 | } |
2217 | |
2218 | atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); |
2219 | |
2220 | if (old_blocks) { |
2221 | g_free_rcu(old_blocks, rcu); |
2222 | } |
2223 | } |
2224 | } |
2225 | |
2226 | static void ram_block_add(RAMBlock *new_block, Error **errp, bool shared) |
2227 | { |
2228 | RAMBlock *block; |
2229 | RAMBlock *last_block = NULL; |
2230 | ram_addr_t old_ram_size, new_ram_size; |
2231 | Error *err = NULL; |
2232 | |
2233 | old_ram_size = last_ram_page(); |
2234 | |
2235 | qemu_mutex_lock_ramlist(); |
2236 | new_block->offset = find_ram_offset(new_block->max_length); |
2237 | |
2238 | if (!new_block->host) { |
2239 | if (xen_enabled()) { |
2240 | xen_ram_alloc(new_block->offset, new_block->max_length, |
2241 | new_block->mr, &err); |
2242 | if (err) { |
2243 | error_propagate(errp, err); |
2244 | qemu_mutex_unlock_ramlist(); |
2245 | return; |
2246 | } |
2247 | } else { |
2248 | new_block->host = phys_mem_alloc(new_block->max_length, |
2249 | &new_block->mr->align, shared); |
2250 | if (!new_block->host) { |
2251 | error_setg_errno(errp, errno, |
2252 | "cannot set up guest memory '%s'" , |
2253 | memory_region_name(new_block->mr)); |
2254 | qemu_mutex_unlock_ramlist(); |
2255 | return; |
2256 | } |
2257 | memory_try_enable_merging(new_block->host, new_block->max_length); |
2258 | } |
2259 | } |
2260 | |
2261 | new_ram_size = MAX(old_ram_size, |
2262 | (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS); |
2263 | if (new_ram_size > old_ram_size) { |
2264 | dirty_memory_extend(old_ram_size, new_ram_size); |
2265 | } |
2266 | /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, |
2267 | * QLIST (which has an RCU-friendly variant) does not have insertion at |
2268 | * tail, so save the last element in last_block. |
2269 | */ |
2270 | RAMBLOCK_FOREACH(block) { |
2271 | last_block = block; |
2272 | if (block->max_length < new_block->max_length) { |
2273 | break; |
2274 | } |
2275 | } |
2276 | if (block) { |
2277 | QLIST_INSERT_BEFORE_RCU(block, new_block, next); |
2278 | } else if (last_block) { |
2279 | QLIST_INSERT_AFTER_RCU(last_block, new_block, next); |
2280 | } else { /* list is empty */ |
2281 | QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); |
2282 | } |
2283 | ram_list.mru_block = NULL; |
2284 | |
2285 | /* Write list before version */ |
2286 | smp_wmb(); |
2287 | ram_list.version++; |
2288 | qemu_mutex_unlock_ramlist(); |
2289 | |
2290 | cpu_physical_memory_set_dirty_range(new_block->offset, |
2291 | new_block->used_length, |
2292 | DIRTY_CLIENTS_ALL); |
2293 | |
2294 | if (new_block->host) { |
2295 | qemu_ram_setup_dump(new_block->host, new_block->max_length); |
2296 | qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); |
2297 | /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */ |
2298 | qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK); |
2299 | ram_block_notify_add(new_block->host, new_block->max_length); |
2300 | } |
2301 | } |
2302 | |
2303 | #ifdef CONFIG_POSIX |
2304 | RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, |
2305 | uint32_t ram_flags, int fd, |
2306 | Error **errp) |
2307 | { |
2308 | RAMBlock *new_block; |
2309 | Error *local_err = NULL; |
2310 | int64_t file_size; |
2311 | |
2312 | /* Just support these ram flags by now. */ |
2313 | assert((ram_flags & ~(RAM_SHARED | RAM_PMEM)) == 0); |
2314 | |
2315 | if (xen_enabled()) { |
2316 | error_setg(errp, "-mem-path not supported with Xen" ); |
2317 | return NULL; |
2318 | } |
2319 | |
2320 | if (kvm_enabled() && !kvm_has_sync_mmu()) { |
2321 | error_setg(errp, |
2322 | "host lacks kvm mmu notifiers, -mem-path unsupported" ); |
2323 | return NULL; |
2324 | } |
2325 | |
2326 | if (phys_mem_alloc != qemu_anon_ram_alloc) { |
2327 | /* |
2328 | * file_ram_alloc() needs to allocate just like |
2329 | * phys_mem_alloc, but we haven't bothered to provide |
2330 | * a hook there. |
2331 | */ |
2332 | error_setg(errp, |
2333 | "-mem-path not supported with this accelerator" ); |
2334 | return NULL; |
2335 | } |
2336 | |
2337 | size = HOST_PAGE_ALIGN(size); |
2338 | file_size = get_file_size(fd); |
2339 | if (file_size > 0 && file_size < size) { |
2340 | error_setg(errp, "backing store %s size 0x%" PRIx64 |
2341 | " does not match 'size' option 0x" RAM_ADDR_FMT, |
2342 | mem_path, file_size, size); |
2343 | return NULL; |
2344 | } |
2345 | |
2346 | new_block = g_malloc0(sizeof(*new_block)); |
2347 | new_block->mr = mr; |
2348 | new_block->used_length = size; |
2349 | new_block->max_length = size; |
2350 | new_block->flags = ram_flags; |
2351 | new_block->host = file_ram_alloc(new_block, size, fd, !file_size, errp); |
2352 | if (!new_block->host) { |
2353 | g_free(new_block); |
2354 | return NULL; |
2355 | } |
2356 | |
2357 | ram_block_add(new_block, &local_err, ram_flags & RAM_SHARED); |
2358 | if (local_err) { |
2359 | g_free(new_block); |
2360 | error_propagate(errp, local_err); |
2361 | return NULL; |
2362 | } |
2363 | return new_block; |
2364 | |
2365 | } |
2366 | |
2367 | |
2368 | RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, |
2369 | uint32_t ram_flags, const char *mem_path, |
2370 | Error **errp) |
2371 | { |
2372 | int fd; |
2373 | bool created; |
2374 | RAMBlock *block; |
2375 | |
2376 | fd = file_ram_open(mem_path, memory_region_name(mr), &created, errp); |
2377 | if (fd < 0) { |
2378 | return NULL; |
2379 | } |
2380 | |
2381 | block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, errp); |
2382 | if (!block) { |
2383 | if (created) { |
2384 | unlink(mem_path); |
2385 | } |
2386 | close(fd); |
2387 | return NULL; |
2388 | } |
2389 | |
2390 | return block; |
2391 | } |
2392 | #endif |
2393 | |
2394 | static |
2395 | RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, |
2396 | void (*resized)(const char*, |
2397 | uint64_t length, |
2398 | void *host), |
2399 | void *host, bool resizeable, bool share, |
2400 | MemoryRegion *mr, Error **errp) |
2401 | { |
2402 | RAMBlock *new_block; |
2403 | Error *local_err = NULL; |
2404 | |
2405 | size = HOST_PAGE_ALIGN(size); |
2406 | max_size = HOST_PAGE_ALIGN(max_size); |
2407 | new_block = g_malloc0(sizeof(*new_block)); |
2408 | new_block->mr = mr; |
2409 | new_block->resized = resized; |
2410 | new_block->used_length = size; |
2411 | new_block->max_length = max_size; |
2412 | assert(max_size >= size); |
2413 | new_block->fd = -1; |
2414 | new_block->page_size = getpagesize(); |
2415 | new_block->host = host; |
2416 | if (host) { |
2417 | new_block->flags |= RAM_PREALLOC; |
2418 | } |
2419 | if (resizeable) { |
2420 | new_block->flags |= RAM_RESIZEABLE; |
2421 | } |
2422 | ram_block_add(new_block, &local_err, share); |
2423 | if (local_err) { |
2424 | g_free(new_block); |
2425 | error_propagate(errp, local_err); |
2426 | return NULL; |
2427 | } |
2428 | return new_block; |
2429 | } |
2430 | |
2431 | RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, |
2432 | MemoryRegion *mr, Error **errp) |
2433 | { |
2434 | return qemu_ram_alloc_internal(size, size, NULL, host, false, |
2435 | false, mr, errp); |
2436 | } |
2437 | |
2438 | RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share, |
2439 | MemoryRegion *mr, Error **errp) |
2440 | { |
2441 | return qemu_ram_alloc_internal(size, size, NULL, NULL, false, |
2442 | share, mr, errp); |
2443 | } |
2444 | |
2445 | RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, |
2446 | void (*resized)(const char*, |
2447 | uint64_t length, |
2448 | void *host), |
2449 | MemoryRegion *mr, Error **errp) |
2450 | { |
2451 | return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, |
2452 | false, mr, errp); |
2453 | } |
2454 | |
2455 | static void reclaim_ramblock(RAMBlock *block) |
2456 | { |
2457 | if (block->flags & RAM_PREALLOC) { |
2458 | ; |
2459 | } else if (xen_enabled()) { |
2460 | xen_invalidate_map_cache_entry(block->host); |
2461 | #ifndef _WIN32 |
2462 | } else if (block->fd >= 0) { |
2463 | qemu_ram_munmap(block->fd, block->host, block->max_length); |
2464 | close(block->fd); |
2465 | #endif |
2466 | } else { |
2467 | qemu_anon_ram_free(block->host, block->max_length); |
2468 | } |
2469 | g_free(block); |
2470 | } |
2471 | |
2472 | void qemu_ram_free(RAMBlock *block) |
2473 | { |
2474 | if (!block) { |
2475 | return; |
2476 | } |
2477 | |
2478 | if (block->host) { |
2479 | ram_block_notify_remove(block->host, block->max_length); |
2480 | } |
2481 | |
2482 | qemu_mutex_lock_ramlist(); |
2483 | QLIST_REMOVE_RCU(block, next); |
2484 | ram_list.mru_block = NULL; |
2485 | /* Write list before version */ |
2486 | smp_wmb(); |
2487 | ram_list.version++; |
2488 | call_rcu(block, reclaim_ramblock, rcu); |
2489 | qemu_mutex_unlock_ramlist(); |
2490 | } |
2491 | |
2492 | #ifndef _WIN32 |
2493 | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) |
2494 | { |
2495 | RAMBlock *block; |
2496 | ram_addr_t offset; |
2497 | int flags; |
2498 | void *area, *vaddr; |
2499 | |
2500 | RAMBLOCK_FOREACH(block) { |
2501 | offset = addr - block->offset; |
2502 | if (offset < block->max_length) { |
2503 | vaddr = ramblock_ptr(block, offset); |
2504 | if (block->flags & RAM_PREALLOC) { |
2505 | ; |
2506 | } else if (xen_enabled()) { |
2507 | abort(); |
2508 | } else { |
2509 | flags = MAP_FIXED; |
2510 | if (block->fd >= 0) { |
2511 | flags |= (block->flags & RAM_SHARED ? |
2512 | MAP_SHARED : MAP_PRIVATE); |
2513 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, |
2514 | flags, block->fd, offset); |
2515 | } else { |
2516 | /* |
2517 | * Remap needs to match alloc. Accelerators that |
2518 | * set phys_mem_alloc never remap. If they did, |
2519 | * we'd need a remap hook here. |
2520 | */ |
2521 | assert(phys_mem_alloc == qemu_anon_ram_alloc); |
2522 | |
2523 | flags |= MAP_PRIVATE | MAP_ANONYMOUS; |
2524 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, |
2525 | flags, -1, 0); |
2526 | } |
2527 | if (area != vaddr) { |
2528 | error_report("Could not remap addr: " |
2529 | RAM_ADDR_FMT "@" RAM_ADDR_FMT "" , |
2530 | length, addr); |
2531 | exit(1); |
2532 | } |
2533 | memory_try_enable_merging(vaddr, length); |
2534 | qemu_ram_setup_dump(vaddr, length); |
2535 | } |
2536 | } |
2537 | } |
2538 | } |
2539 | #endif /* !_WIN32 */ |
2540 | |
2541 | /* Return a host pointer to ram allocated with qemu_ram_alloc. |
2542 | * This should not be used for general purpose DMA. Use address_space_map |
2543 | * or address_space_rw instead. For local memory (e.g. video ram) that the |
2544 | * device owns, use memory_region_get_ram_ptr. |
2545 | * |
2546 | * Called within RCU critical section. |
2547 | */ |
2548 | void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr) |
2549 | { |
2550 | RAMBlock *block = ram_block; |
2551 | |
2552 | if (block == NULL) { |
2553 | block = qemu_get_ram_block(addr); |
2554 | addr -= block->offset; |
2555 | } |
2556 | |
2557 | if (xen_enabled() && block->host == NULL) { |
2558 | /* We need to check if the requested address is in the RAM |
2559 | * because we don't want to map the entire memory in QEMU. |
2560 | * In that case just map until the end of the page. |
2561 | */ |
2562 | if (block->offset == 0) { |
2563 | return xen_map_cache(addr, 0, 0, false); |
2564 | } |
2565 | |
2566 | block->host = xen_map_cache(block->offset, block->max_length, 1, false); |
2567 | } |
2568 | return ramblock_ptr(block, addr); |
2569 | } |
2570 | |
2571 | /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr |
2572 | * but takes a size argument. |
2573 | * |
2574 | * Called within RCU critical section. |
2575 | */ |
2576 | static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr, |
2577 | hwaddr *size, bool lock) |
2578 | { |
2579 | RAMBlock *block = ram_block; |
2580 | if (*size == 0) { |
2581 | return NULL; |
2582 | } |
2583 | |
2584 | if (block == NULL) { |
2585 | block = qemu_get_ram_block(addr); |
2586 | addr -= block->offset; |
2587 | } |
2588 | *size = MIN(*size, block->max_length - addr); |
2589 | |
2590 | if (xen_enabled() && block->host == NULL) { |
2591 | /* We need to check if the requested address is in the RAM |
2592 | * because we don't want to map the entire memory in QEMU. |
2593 | * In that case just map the requested area. |
2594 | */ |
2595 | if (block->offset == 0) { |
2596 | return xen_map_cache(addr, *size, lock, lock); |
2597 | } |
2598 | |
2599 | block->host = xen_map_cache(block->offset, block->max_length, 1, lock); |
2600 | } |
2601 | |
2602 | return ramblock_ptr(block, addr); |
2603 | } |
2604 | |
2605 | /* Return the offset of a hostpointer within a ramblock */ |
2606 | ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host) |
2607 | { |
2608 | ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; |
2609 | assert((uintptr_t)host >= (uintptr_t)rb->host); |
2610 | assert(res < rb->max_length); |
2611 | |
2612 | return res; |
2613 | } |
2614 | |
2615 | /* |
2616 | * Translates a host ptr back to a RAMBlock, a ram_addr and an offset |
2617 | * in that RAMBlock. |
2618 | * |
2619 | * ptr: Host pointer to look up |
2620 | * round_offset: If true round the result offset down to a page boundary |
2621 | * *ram_addr: set to result ram_addr |
2622 | * *offset: set to result offset within the RAMBlock |
2623 | * |
2624 | * Returns: RAMBlock (or NULL if not found) |
2625 | * |
2626 | * By the time this function returns, the returned pointer is not protected |
2627 | * by RCU anymore. If the caller is not within an RCU critical section and |
2628 | * does not hold the iothread lock, it must have other means of protecting the |
2629 | * pointer, such as a reference to the region that includes the incoming |
2630 | * ram_addr_t. |
2631 | */ |
2632 | RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, |
2633 | ram_addr_t *offset) |
2634 | { |
2635 | RAMBlock *block; |
2636 | uint8_t *host = ptr; |
2637 | |
2638 | if (xen_enabled()) { |
2639 | ram_addr_t ram_addr; |
2640 | rcu_read_lock(); |
2641 | ram_addr = xen_ram_addr_from_mapcache(ptr); |
2642 | block = qemu_get_ram_block(ram_addr); |
2643 | if (block) { |
2644 | *offset = ram_addr - block->offset; |
2645 | } |
2646 | rcu_read_unlock(); |
2647 | return block; |
2648 | } |
2649 | |
2650 | rcu_read_lock(); |
2651 | block = atomic_rcu_read(&ram_list.mru_block); |
2652 | if (block && block->host && host - block->host < block->max_length) { |
2653 | goto found; |
2654 | } |
2655 | |
2656 | RAMBLOCK_FOREACH(block) { |
2657 | /* This case append when the block is not mapped. */ |
2658 | if (block->host == NULL) { |
2659 | continue; |
2660 | } |
2661 | if (host - block->host < block->max_length) { |
2662 | goto found; |
2663 | } |
2664 | } |
2665 | |
2666 | rcu_read_unlock(); |
2667 | return NULL; |
2668 | |
2669 | found: |
2670 | *offset = (host - block->host); |
2671 | if (round_offset) { |
2672 | *offset &= TARGET_PAGE_MASK; |
2673 | } |
2674 | rcu_read_unlock(); |
2675 | return block; |
2676 | } |
2677 | |
2678 | /* |
2679 | * Finds the named RAMBlock |
2680 | * |
2681 | * name: The name of RAMBlock to find |
2682 | * |
2683 | * Returns: RAMBlock (or NULL if not found) |
2684 | */ |
2685 | RAMBlock *qemu_ram_block_by_name(const char *name) |
2686 | { |
2687 | RAMBlock *block; |
2688 | |
2689 | RAMBLOCK_FOREACH(block) { |
2690 | if (!strcmp(name, block->idstr)) { |
2691 | return block; |
2692 | } |
2693 | } |
2694 | |
2695 | return NULL; |
2696 | } |
2697 | |
2698 | /* Some of the softmmu routines need to translate from a host pointer |
2699 | (typically a TLB entry) back to a ram offset. */ |
2700 | ram_addr_t qemu_ram_addr_from_host(void *ptr) |
2701 | { |
2702 | RAMBlock *block; |
2703 | ram_addr_t offset; |
2704 | |
2705 | block = qemu_ram_block_from_host(ptr, false, &offset); |
2706 | if (!block) { |
2707 | return RAM_ADDR_INVALID; |
2708 | } |
2709 | |
2710 | return block->offset + offset; |
2711 | } |
2712 | |
2713 | /* Called within RCU critical section. */ |
2714 | void memory_notdirty_write_prepare(NotDirtyInfo *ndi, |
2715 | CPUState *cpu, |
2716 | vaddr mem_vaddr, |
2717 | ram_addr_t ram_addr, |
2718 | unsigned size) |
2719 | { |
2720 | ndi->cpu = cpu; |
2721 | ndi->ram_addr = ram_addr; |
2722 | ndi->mem_vaddr = mem_vaddr; |
2723 | ndi->size = size; |
2724 | ndi->pages = NULL; |
2725 | |
2726 | assert(tcg_enabled()); |
2727 | if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { |
2728 | ndi->pages = page_collection_lock(ram_addr, ram_addr + size); |
2729 | tb_invalidate_phys_page_fast(ndi->pages, ram_addr, size); |
2730 | } |
2731 | } |
2732 | |
2733 | /* Called within RCU critical section. */ |
2734 | void memory_notdirty_write_complete(NotDirtyInfo *ndi) |
2735 | { |
2736 | if (ndi->pages) { |
2737 | assert(tcg_enabled()); |
2738 | page_collection_unlock(ndi->pages); |
2739 | ndi->pages = NULL; |
2740 | } |
2741 | |
2742 | /* Set both VGA and migration bits for simplicity and to remove |
2743 | * the notdirty callback faster. |
2744 | */ |
2745 | cpu_physical_memory_set_dirty_range(ndi->ram_addr, ndi->size, |
2746 | DIRTY_CLIENTS_NOCODE); |
2747 | /* we remove the notdirty callback only if the code has been |
2748 | flushed */ |
2749 | if (!cpu_physical_memory_is_clean(ndi->ram_addr)) { |
2750 | tlb_set_dirty(ndi->cpu, ndi->mem_vaddr); |
2751 | } |
2752 | } |
2753 | |
2754 | /* Called within RCU critical section. */ |
2755 | static void notdirty_mem_write(void *opaque, hwaddr ram_addr, |
2756 | uint64_t val, unsigned size) |
2757 | { |
2758 | NotDirtyInfo ndi; |
2759 | |
2760 | memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr, |
2761 | ram_addr, size); |
2762 | |
2763 | stn_p(qemu_map_ram_ptr(NULL, ram_addr), size, val); |
2764 | memory_notdirty_write_complete(&ndi); |
2765 | } |
2766 | |
2767 | static bool notdirty_mem_accepts(void *opaque, hwaddr addr, |
2768 | unsigned size, bool is_write, |
2769 | MemTxAttrs attrs) |
2770 | { |
2771 | return is_write; |
2772 | } |
2773 | |
2774 | static const MemoryRegionOps notdirty_mem_ops = { |
2775 | .write = notdirty_mem_write, |
2776 | .valid.accepts = notdirty_mem_accepts, |
2777 | .endianness = DEVICE_NATIVE_ENDIAN, |
2778 | .valid = { |
2779 | .min_access_size = 1, |
2780 | .max_access_size = 8, |
2781 | .unaligned = false, |
2782 | }, |
2783 | .impl = { |
2784 | .min_access_size = 1, |
2785 | .max_access_size = 8, |
2786 | .unaligned = false, |
2787 | }, |
2788 | }; |
2789 | |
2790 | /* Generate a debug exception if a watchpoint has been hit. */ |
2791 | void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, |
2792 | MemTxAttrs attrs, int flags, uintptr_t ra) |
2793 | { |
2794 | CPUClass *cc = CPU_GET_CLASS(cpu); |
2795 | CPUWatchpoint *wp; |
2796 | |
2797 | assert(tcg_enabled()); |
2798 | if (cpu->watchpoint_hit) { |
2799 | /* |
2800 | * We re-entered the check after replacing the TB. |
2801 | * Now raise the debug interrupt so that it will |
2802 | * trigger after the current instruction. |
2803 | */ |
2804 | qemu_mutex_lock_iothread(); |
2805 | cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); |
2806 | qemu_mutex_unlock_iothread(); |
2807 | return; |
2808 | } |
2809 | |
2810 | addr = cc->adjust_watchpoint_address(cpu, addr, len); |
2811 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
2812 | if (watchpoint_address_matches(wp, addr, len) |
2813 | && (wp->flags & flags)) { |
2814 | if (flags == BP_MEM_READ) { |
2815 | wp->flags |= BP_WATCHPOINT_HIT_READ; |
2816 | } else { |
2817 | wp->flags |= BP_WATCHPOINT_HIT_WRITE; |
2818 | } |
2819 | wp->hitaddr = MAX(addr, wp->vaddr); |
2820 | wp->hitattrs = attrs; |
2821 | if (!cpu->watchpoint_hit) { |
2822 | if (wp->flags & BP_CPU && |
2823 | !cc->debug_check_watchpoint(cpu, wp)) { |
2824 | wp->flags &= ~BP_WATCHPOINT_HIT; |
2825 | continue; |
2826 | } |
2827 | cpu->watchpoint_hit = wp; |
2828 | |
2829 | mmap_lock(); |
2830 | tb_check_watchpoint(cpu); |
2831 | if (wp->flags & BP_STOP_BEFORE_ACCESS) { |
2832 | cpu->exception_index = EXCP_DEBUG; |
2833 | mmap_unlock(); |
2834 | cpu_loop_exit_restore(cpu, ra); |
2835 | } else { |
2836 | /* Force execution of one insn next time. */ |
2837 | cpu->cflags_next_tb = 1 | curr_cflags(); |
2838 | mmap_unlock(); |
2839 | if (ra) { |
2840 | cpu_restore_state(cpu, ra, true); |
2841 | } |
2842 | cpu_loop_exit_noexc(cpu); |
2843 | } |
2844 | } |
2845 | } else { |
2846 | wp->flags &= ~BP_WATCHPOINT_HIT; |
2847 | } |
2848 | } |
2849 | } |
2850 | |
2851 | static MemTxResult flatview_read(FlatView *fv, hwaddr addr, |
2852 | MemTxAttrs attrs, uint8_t *buf, hwaddr len); |
2853 | static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, |
2854 | const uint8_t *buf, hwaddr len); |
2855 | static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, |
2856 | bool is_write, MemTxAttrs attrs); |
2857 | |
2858 | static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data, |
2859 | unsigned len, MemTxAttrs attrs) |
2860 | { |
2861 | subpage_t *subpage = opaque; |
2862 | uint8_t buf[8]; |
2863 | MemTxResult res; |
2864 | |
2865 | #if defined(DEBUG_SUBPAGE) |
2866 | printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n" , __func__, |
2867 | subpage, len, addr); |
2868 | #endif |
2869 | res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len); |
2870 | if (res) { |
2871 | return res; |
2872 | } |
2873 | *data = ldn_p(buf, len); |
2874 | return MEMTX_OK; |
2875 | } |
2876 | |
2877 | static MemTxResult subpage_write(void *opaque, hwaddr addr, |
2878 | uint64_t value, unsigned len, MemTxAttrs attrs) |
2879 | { |
2880 | subpage_t *subpage = opaque; |
2881 | uint8_t buf[8]; |
2882 | |
2883 | #if defined(DEBUG_SUBPAGE) |
2884 | printf("%s: subpage %p len %u addr " TARGET_FMT_plx |
2885 | " value %" PRIx64"\n" , |
2886 | __func__, subpage, len, addr, value); |
2887 | #endif |
2888 | stn_p(buf, len, value); |
2889 | return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len); |
2890 | } |
2891 | |
2892 | static bool subpage_accepts(void *opaque, hwaddr addr, |
2893 | unsigned len, bool is_write, |
2894 | MemTxAttrs attrs) |
2895 | { |
2896 | subpage_t *subpage = opaque; |
2897 | #if defined(DEBUG_SUBPAGE) |
2898 | printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n" , |
2899 | __func__, subpage, is_write ? 'w' : 'r', len, addr); |
2900 | #endif |
2901 | |
2902 | return flatview_access_valid(subpage->fv, addr + subpage->base, |
2903 | len, is_write, attrs); |
2904 | } |
2905 | |
2906 | static const MemoryRegionOps subpage_ops = { |
2907 | .read_with_attrs = subpage_read, |
2908 | .write_with_attrs = subpage_write, |
2909 | .impl.min_access_size = 1, |
2910 | .impl.max_access_size = 8, |
2911 | .valid.min_access_size = 1, |
2912 | .valid.max_access_size = 8, |
2913 | .valid.accepts = subpage_accepts, |
2914 | .endianness = DEVICE_NATIVE_ENDIAN, |
2915 | }; |
2916 | |
2917 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
2918 | uint16_t section) |
2919 | { |
2920 | int idx, eidx; |
2921 | |
2922 | if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) |
2923 | return -1; |
2924 | idx = SUBPAGE_IDX(start); |
2925 | eidx = SUBPAGE_IDX(end); |
2926 | #if defined(DEBUG_SUBPAGE) |
2927 | printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n" , |
2928 | __func__, mmio, start, end, idx, eidx, section); |
2929 | #endif |
2930 | for (; idx <= eidx; idx++) { |
2931 | mmio->sub_section[idx] = section; |
2932 | } |
2933 | |
2934 | return 0; |
2935 | } |
2936 | |
2937 | static subpage_t *subpage_init(FlatView *fv, hwaddr base) |
2938 | { |
2939 | subpage_t *mmio; |
2940 | |
2941 | mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t)); |
2942 | mmio->fv = fv; |
2943 | mmio->base = base; |
2944 | memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, |
2945 | NULL, TARGET_PAGE_SIZE); |
2946 | mmio->iomem.subpage = true; |
2947 | #if defined(DEBUG_SUBPAGE) |
2948 | printf("%s: %p base " TARGET_FMT_plx " len %08x\n" , __func__, |
2949 | mmio, base, TARGET_PAGE_SIZE); |
2950 | #endif |
2951 | subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED); |
2952 | |
2953 | return mmio; |
2954 | } |
2955 | |
2956 | static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr) |
2957 | { |
2958 | assert(fv); |
2959 | MemoryRegionSection section = { |
2960 | .fv = fv, |
2961 | .mr = mr, |
2962 | .offset_within_address_space = 0, |
2963 | .offset_within_region = 0, |
2964 | .size = int128_2_64(), |
2965 | }; |
2966 | |
2967 | return phys_section_add(map, §ion); |
2968 | } |
2969 | |
2970 | static void readonly_mem_write(void *opaque, hwaddr addr, |
2971 | uint64_t val, unsigned size) |
2972 | { |
2973 | /* Ignore any write to ROM. */ |
2974 | } |
2975 | |
2976 | static bool readonly_mem_accepts(void *opaque, hwaddr addr, |
2977 | unsigned size, bool is_write, |
2978 | MemTxAttrs attrs) |
2979 | { |
2980 | return is_write; |
2981 | } |
2982 | |
2983 | /* This will only be used for writes, because reads are special cased |
2984 | * to directly access the underlying host ram. |
2985 | */ |
2986 | static const MemoryRegionOps readonly_mem_ops = { |
2987 | .write = readonly_mem_write, |
2988 | .valid.accepts = readonly_mem_accepts, |
2989 | .endianness = DEVICE_NATIVE_ENDIAN, |
2990 | .valid = { |
2991 | .min_access_size = 1, |
2992 | .max_access_size = 8, |
2993 | .unaligned = false, |
2994 | }, |
2995 | .impl = { |
2996 | .min_access_size = 1, |
2997 | .max_access_size = 8, |
2998 | .unaligned = false, |
2999 | }, |
3000 | }; |
3001 | |
3002 | MemoryRegionSection *iotlb_to_section(CPUState *cpu, |
3003 | hwaddr index, MemTxAttrs attrs) |
3004 | { |
3005 | int asidx = cpu_asidx_from_attrs(cpu, attrs); |
3006 | CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; |
3007 | AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch); |
3008 | MemoryRegionSection *sections = d->map.sections; |
3009 | |
3010 | return §ions[index & ~TARGET_PAGE_MASK]; |
3011 | } |
3012 | |
3013 | static void io_mem_init(void) |
3014 | { |
3015 | memory_region_init_io(&io_mem_rom, NULL, &readonly_mem_ops, |
3016 | NULL, NULL, UINT64_MAX); |
3017 | memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, |
3018 | NULL, UINT64_MAX); |
3019 | |
3020 | /* io_mem_notdirty calls tb_invalidate_phys_page_fast, |
3021 | * which can be called without the iothread mutex. |
3022 | */ |
3023 | memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL, |
3024 | NULL, UINT64_MAX); |
3025 | memory_region_clear_global_locking(&io_mem_notdirty); |
3026 | } |
3027 | |
3028 | AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) |
3029 | { |
3030 | AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); |
3031 | uint16_t n; |
3032 | |
3033 | n = dummy_section(&d->map, fv, &io_mem_unassigned); |
3034 | assert(n == PHYS_SECTION_UNASSIGNED); |
3035 | n = dummy_section(&d->map, fv, &io_mem_notdirty); |
3036 | assert(n == PHYS_SECTION_NOTDIRTY); |
3037 | n = dummy_section(&d->map, fv, &io_mem_rom); |
3038 | assert(n == PHYS_SECTION_ROM); |
3039 | |
3040 | d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; |
3041 | |
3042 | return d; |
3043 | } |
3044 | |
3045 | void address_space_dispatch_free(AddressSpaceDispatch *d) |
3046 | { |
3047 | phys_sections_free(&d->map); |
3048 | g_free(d); |
3049 | } |
3050 | |
3051 | static void do_nothing(CPUState *cpu, run_on_cpu_data d) |
3052 | { |
3053 | } |
3054 | |
3055 | static void tcg_log_global_after_sync(MemoryListener *listener) |
3056 | { |
3057 | CPUAddressSpace *cpuas; |
3058 | |
3059 | /* Wait for the CPU to end the current TB. This avoids the following |
3060 | * incorrect race: |
3061 | * |
3062 | * vCPU migration |
3063 | * ---------------------- ------------------------- |
3064 | * TLB check -> slow path |
3065 | * notdirty_mem_write |
3066 | * write to RAM |
3067 | * mark dirty |
3068 | * clear dirty flag |
3069 | * TLB check -> fast path |
3070 | * read memory |
3071 | * write to RAM |
3072 | * |
3073 | * by pushing the migration thread's memory read after the vCPU thread has |
3074 | * written the memory. |
3075 | */ |
3076 | cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); |
3077 | run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL); |
3078 | } |
3079 | |
3080 | static void tcg_commit(MemoryListener *listener) |
3081 | { |
3082 | CPUAddressSpace *cpuas; |
3083 | AddressSpaceDispatch *d; |
3084 | |
3085 | assert(tcg_enabled()); |
3086 | /* since each CPU stores ram addresses in its TLB cache, we must |
3087 | reset the modified entries */ |
3088 | cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); |
3089 | cpu_reloading_memory_map(); |
3090 | /* The CPU and TLB are protected by the iothread lock. |
3091 | * We reload the dispatch pointer now because cpu_reloading_memory_map() |
3092 | * may have split the RCU critical section. |
3093 | */ |
3094 | d = address_space_to_dispatch(cpuas->as); |
3095 | atomic_rcu_set(&cpuas->memory_dispatch, d); |
3096 | tlb_flush(cpuas->cpu); |
3097 | } |
3098 | |
3099 | static void memory_map_init(void) |
3100 | { |
3101 | system_memory = g_malloc(sizeof(*system_memory)); |
3102 | |
3103 | memory_region_init(system_memory, NULL, "system" , UINT64_MAX); |
3104 | address_space_init(&address_space_memory, system_memory, "memory" ); |
3105 | |
3106 | system_io = g_malloc(sizeof(*system_io)); |
3107 | memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io" , |
3108 | 65536); |
3109 | address_space_init(&address_space_io, system_io, "I/O" ); |
3110 | } |
3111 | |
3112 | MemoryRegion *get_system_memory(void) |
3113 | { |
3114 | return system_memory; |
3115 | } |
3116 | |
3117 | MemoryRegion *get_system_io(void) |
3118 | { |
3119 | return system_io; |
3120 | } |
3121 | |
3122 | #endif /* !defined(CONFIG_USER_ONLY) */ |
3123 | |
3124 | /* physical memory access (slow version, mainly for debug) */ |
3125 | #if defined(CONFIG_USER_ONLY) |
3126 | int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, |
3127 | uint8_t *buf, target_ulong len, int is_write) |
3128 | { |
3129 | int flags; |
3130 | target_ulong l, page; |
3131 | void * p; |
3132 | |
3133 | while (len > 0) { |
3134 | page = addr & TARGET_PAGE_MASK; |
3135 | l = (page + TARGET_PAGE_SIZE) - addr; |
3136 | if (l > len) |
3137 | l = len; |
3138 | flags = page_get_flags(page); |
3139 | if (!(flags & PAGE_VALID)) |
3140 | return -1; |
3141 | if (is_write) { |
3142 | if (!(flags & PAGE_WRITE)) |
3143 | return -1; |
3144 | /* XXX: this code should not depend on lock_user */ |
3145 | if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) |
3146 | return -1; |
3147 | memcpy(p, buf, l); |
3148 | unlock_user(p, addr, l); |
3149 | } else { |
3150 | if (!(flags & PAGE_READ)) |
3151 | return -1; |
3152 | /* XXX: this code should not depend on lock_user */ |
3153 | if (!(p = lock_user(VERIFY_READ, addr, l, 1))) |
3154 | return -1; |
3155 | memcpy(buf, p, l); |
3156 | unlock_user(p, addr, 0); |
3157 | } |
3158 | len -= l; |
3159 | buf += l; |
3160 | addr += l; |
3161 | } |
3162 | return 0; |
3163 | } |
3164 | |
3165 | #else |
3166 | |
3167 | static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, |
3168 | hwaddr length) |
3169 | { |
3170 | uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); |
3171 | addr += memory_region_get_ram_addr(mr); |
3172 | |
3173 | /* No early return if dirty_log_mask is or becomes 0, because |
3174 | * cpu_physical_memory_set_dirty_range will still call |
3175 | * xen_modified_memory. |
3176 | */ |
3177 | if (dirty_log_mask) { |
3178 | dirty_log_mask = |
3179 | cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); |
3180 | } |
3181 | if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { |
3182 | assert(tcg_enabled()); |
3183 | tb_invalidate_phys_range(addr, addr + length); |
3184 | dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); |
3185 | } |
3186 | cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); |
3187 | } |
3188 | |
3189 | void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size) |
3190 | { |
3191 | /* |
3192 | * In principle this function would work on other memory region types too, |
3193 | * but the ROM device use case is the only one where this operation is |
3194 | * necessary. Other memory regions should use the |
3195 | * address_space_read/write() APIs. |
3196 | */ |
3197 | assert(memory_region_is_romd(mr)); |
3198 | |
3199 | invalidate_and_set_dirty(mr, addr, size); |
3200 | } |
3201 | |
3202 | static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) |
3203 | { |
3204 | unsigned access_size_max = mr->ops->valid.max_access_size; |
3205 | |
3206 | /* Regions are assumed to support 1-4 byte accesses unless |
3207 | otherwise specified. */ |
3208 | if (access_size_max == 0) { |
3209 | access_size_max = 4; |
3210 | } |
3211 | |
3212 | /* Bound the maximum access by the alignment of the address. */ |
3213 | if (!mr->ops->impl.unaligned) { |
3214 | unsigned align_size_max = addr & -addr; |
3215 | if (align_size_max != 0 && align_size_max < access_size_max) { |
3216 | access_size_max = align_size_max; |
3217 | } |
3218 | } |
3219 | |
3220 | /* Don't attempt accesses larger than the maximum. */ |
3221 | if (l > access_size_max) { |
3222 | l = access_size_max; |
3223 | } |
3224 | l = pow2floor(l); |
3225 | |
3226 | return l; |
3227 | } |
3228 | |
3229 | static bool prepare_mmio_access(MemoryRegion *mr) |
3230 | { |
3231 | bool unlocked = !qemu_mutex_iothread_locked(); |
3232 | bool release_lock = false; |
3233 | |
3234 | if (unlocked && mr->global_locking) { |
3235 | qemu_mutex_lock_iothread(); |
3236 | unlocked = false; |
3237 | release_lock = true; |
3238 | } |
3239 | if (mr->flush_coalesced_mmio) { |
3240 | if (unlocked) { |
3241 | qemu_mutex_lock_iothread(); |
3242 | } |
3243 | qemu_flush_coalesced_mmio_buffer(); |
3244 | if (unlocked) { |
3245 | qemu_mutex_unlock_iothread(); |
3246 | } |
3247 | } |
3248 | |
3249 | return release_lock; |
3250 | } |
3251 | |
3252 | /* Called within RCU critical section. */ |
3253 | static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, |
3254 | MemTxAttrs attrs, |
3255 | const uint8_t *buf, |
3256 | hwaddr len, hwaddr addr1, |
3257 | hwaddr l, MemoryRegion *mr) |
3258 | { |
3259 | uint8_t *ptr; |
3260 | uint64_t val; |
3261 | MemTxResult result = MEMTX_OK; |
3262 | bool release_lock = false; |
3263 | |
3264 | for (;;) { |
3265 | if (!memory_access_is_direct(mr, true)) { |
3266 | release_lock |= prepare_mmio_access(mr); |
3267 | l = memory_access_size(mr, l, addr1); |
3268 | /* XXX: could force current_cpu to NULL to avoid |
3269 | potential bugs */ |
3270 | val = ldn_he_p(buf, l); |
3271 | result |= memory_region_dispatch_write(mr, addr1, val, |
3272 | size_memop(l), attrs); |
3273 | } else { |
3274 | /* RAM case */ |
3275 | ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); |
3276 | memcpy(ptr, buf, l); |
3277 | invalidate_and_set_dirty(mr, addr1, l); |
3278 | } |
3279 | |
3280 | if (release_lock) { |
3281 | qemu_mutex_unlock_iothread(); |
3282 | release_lock = false; |
3283 | } |
3284 | |
3285 | len -= l; |
3286 | buf += l; |
3287 | addr += l; |
3288 | |
3289 | if (!len) { |
3290 | break; |
3291 | } |
3292 | |
3293 | l = len; |
3294 | mr = flatview_translate(fv, addr, &addr1, &l, true, attrs); |
3295 | } |
3296 | |
3297 | return result; |
3298 | } |
3299 | |
3300 | /* Called from RCU critical section. */ |
3301 | static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, |
3302 | const uint8_t *buf, hwaddr len) |
3303 | { |
3304 | hwaddr l; |
3305 | hwaddr addr1; |
3306 | MemoryRegion *mr; |
3307 | MemTxResult result = MEMTX_OK; |
3308 | |
3309 | l = len; |
3310 | mr = flatview_translate(fv, addr, &addr1, &l, true, attrs); |
3311 | result = flatview_write_continue(fv, addr, attrs, buf, len, |
3312 | addr1, l, mr); |
3313 | |
3314 | return result; |
3315 | } |
3316 | |
3317 | /* Called within RCU critical section. */ |
3318 | MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, |
3319 | MemTxAttrs attrs, uint8_t *buf, |
3320 | hwaddr len, hwaddr addr1, hwaddr l, |
3321 | MemoryRegion *mr) |
3322 | { |
3323 | uint8_t *ptr; |
3324 | uint64_t val; |
3325 | MemTxResult result = MEMTX_OK; |
3326 | bool release_lock = false; |
3327 | |
3328 | for (;;) { |
3329 | if (!memory_access_is_direct(mr, false)) { |
3330 | /* I/O case */ |
3331 | release_lock |= prepare_mmio_access(mr); |
3332 | l = memory_access_size(mr, l, addr1); |
3333 | result |= memory_region_dispatch_read(mr, addr1, &val, |
3334 | size_memop(l), attrs); |
3335 | stn_he_p(buf, l, val); |
3336 | } else { |
3337 | /* RAM case */ |
3338 | ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); |
3339 | memcpy(buf, ptr, l); |
3340 | } |
3341 | |
3342 | if (release_lock) { |
3343 | qemu_mutex_unlock_iothread(); |
3344 | release_lock = false; |
3345 | } |
3346 | |
3347 | len -= l; |
3348 | buf += l; |
3349 | addr += l; |
3350 | |
3351 | if (!len) { |
3352 | break; |
3353 | } |
3354 | |
3355 | l = len; |
3356 | mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); |
3357 | } |
3358 | |
3359 | return result; |
3360 | } |
3361 | |
3362 | /* Called from RCU critical section. */ |
3363 | static MemTxResult flatview_read(FlatView *fv, hwaddr addr, |
3364 | MemTxAttrs attrs, uint8_t *buf, hwaddr len) |
3365 | { |
3366 | hwaddr l; |
3367 | hwaddr addr1; |
3368 | MemoryRegion *mr; |
3369 | |
3370 | l = len; |
3371 | mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); |
3372 | return flatview_read_continue(fv, addr, attrs, buf, len, |
3373 | addr1, l, mr); |
3374 | } |
3375 | |
3376 | MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, |
3377 | MemTxAttrs attrs, uint8_t *buf, hwaddr len) |
3378 | { |
3379 | MemTxResult result = MEMTX_OK; |
3380 | FlatView *fv; |
3381 | |
3382 | if (len > 0) { |
3383 | rcu_read_lock(); |
3384 | fv = address_space_to_flatview(as); |
3385 | result = flatview_read(fv, addr, attrs, buf, len); |
3386 | rcu_read_unlock(); |
3387 | } |
3388 | |
3389 | return result; |
3390 | } |
3391 | |
3392 | MemTxResult address_space_write(AddressSpace *as, hwaddr addr, |
3393 | MemTxAttrs attrs, |
3394 | const uint8_t *buf, hwaddr len) |
3395 | { |
3396 | MemTxResult result = MEMTX_OK; |
3397 | FlatView *fv; |
3398 | |
3399 | if (len > 0) { |
3400 | rcu_read_lock(); |
3401 | fv = address_space_to_flatview(as); |
3402 | result = flatview_write(fv, addr, attrs, buf, len); |
3403 | rcu_read_unlock(); |
3404 | } |
3405 | |
3406 | return result; |
3407 | } |
3408 | |
3409 | MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, |
3410 | uint8_t *buf, hwaddr len, bool is_write) |
3411 | { |
3412 | if (is_write) { |
3413 | return address_space_write(as, addr, attrs, buf, len); |
3414 | } else { |
3415 | return address_space_read_full(as, addr, attrs, buf, len); |
3416 | } |
3417 | } |
3418 | |
3419 | void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, |
3420 | hwaddr len, int is_write) |
3421 | { |
3422 | address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, |
3423 | buf, len, is_write); |
3424 | } |
3425 | |
3426 | enum write_rom_type { |
3427 | WRITE_DATA, |
3428 | FLUSH_CACHE, |
3429 | }; |
3430 | |
3431 | static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, |
3432 | hwaddr addr, |
3433 | MemTxAttrs attrs, |
3434 | const uint8_t *buf, |
3435 | hwaddr len, |
3436 | enum write_rom_type type) |
3437 | { |
3438 | hwaddr l; |
3439 | uint8_t *ptr; |
3440 | hwaddr addr1; |
3441 | MemoryRegion *mr; |
3442 | |
3443 | rcu_read_lock(); |
3444 | while (len > 0) { |
3445 | l = len; |
3446 | mr = address_space_translate(as, addr, &addr1, &l, true, attrs); |
3447 | |
3448 | if (!(memory_region_is_ram(mr) || |
3449 | memory_region_is_romd(mr))) { |
3450 | l = memory_access_size(mr, l, addr1); |
3451 | } else { |
3452 | /* ROM/RAM case */ |
3453 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); |
3454 | switch (type) { |
3455 | case WRITE_DATA: |
3456 | memcpy(ptr, buf, l); |
3457 | invalidate_and_set_dirty(mr, addr1, l); |
3458 | break; |
3459 | case FLUSH_CACHE: |
3460 | flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l); |
3461 | break; |
3462 | } |
3463 | } |
3464 | len -= l; |
3465 | buf += l; |
3466 | addr += l; |
3467 | } |
3468 | rcu_read_unlock(); |
3469 | return MEMTX_OK; |
3470 | } |
3471 | |
3472 | /* used for ROM loading : can write in RAM and ROM */ |
3473 | MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, |
3474 | MemTxAttrs attrs, |
3475 | const uint8_t *buf, hwaddr len) |
3476 | { |
3477 | return address_space_write_rom_internal(as, addr, attrs, |
3478 | buf, len, WRITE_DATA); |
3479 | } |
3480 | |
3481 | void cpu_flush_icache_range(hwaddr start, hwaddr len) |
3482 | { |
3483 | /* |
3484 | * This function should do the same thing as an icache flush that was |
3485 | * triggered from within the guest. For TCG we are always cache coherent, |
3486 | * so there is no need to flush anything. For KVM / Xen we need to flush |
3487 | * the host's instruction cache at least. |
3488 | */ |
3489 | if (tcg_enabled()) { |
3490 | return; |
3491 | } |
3492 | |
3493 | address_space_write_rom_internal(&address_space_memory, |
3494 | start, MEMTXATTRS_UNSPECIFIED, |
3495 | NULL, len, FLUSH_CACHE); |
3496 | } |
3497 | |
3498 | typedef struct { |
3499 | MemoryRegion *mr; |
3500 | void *buffer; |
3501 | hwaddr addr; |
3502 | hwaddr len; |
3503 | bool in_use; |
3504 | } BounceBuffer; |
3505 | |
3506 | static BounceBuffer bounce; |
3507 | |
3508 | typedef struct MapClient { |
3509 | QEMUBH *bh; |
3510 | QLIST_ENTRY(MapClient) link; |
3511 | } MapClient; |
3512 | |
3513 | QemuMutex map_client_list_lock; |
3514 | static QLIST_HEAD(, MapClient) map_client_list |
3515 | = QLIST_HEAD_INITIALIZER(map_client_list); |
3516 | |
3517 | static void cpu_unregister_map_client_do(MapClient *client) |
3518 | { |
3519 | QLIST_REMOVE(client, link); |
3520 | g_free(client); |
3521 | } |
3522 | |
3523 | static void cpu_notify_map_clients_locked(void) |
3524 | { |
3525 | MapClient *client; |
3526 | |
3527 | while (!QLIST_EMPTY(&map_client_list)) { |
3528 | client = QLIST_FIRST(&map_client_list); |
3529 | qemu_bh_schedule(client->bh); |
3530 | cpu_unregister_map_client_do(client); |
3531 | } |
3532 | } |
3533 | |
3534 | void cpu_register_map_client(QEMUBH *bh) |
3535 | { |
3536 | MapClient *client = g_malloc(sizeof(*client)); |
3537 | |
3538 | qemu_mutex_lock(&map_client_list_lock); |
3539 | client->bh = bh; |
3540 | QLIST_INSERT_HEAD(&map_client_list, client, link); |
3541 | if (!atomic_read(&bounce.in_use)) { |
3542 | cpu_notify_map_clients_locked(); |
3543 | } |
3544 | qemu_mutex_unlock(&map_client_list_lock); |
3545 | } |
3546 | |
3547 | void cpu_exec_init_all(void) |
3548 | { |
3549 | qemu_mutex_init(&ram_list.mutex); |
3550 | /* The data structures we set up here depend on knowing the page size, |
3551 | * so no more changes can be made after this point. |
3552 | * In an ideal world, nothing we did before we had finished the |
3553 | * machine setup would care about the target page size, and we could |
3554 | * do this much later, rather than requiring board models to state |
3555 | * up front what their requirements are. |
3556 | */ |
3557 | finalize_target_page_bits(); |
3558 | io_mem_init(); |
3559 | memory_map_init(); |
3560 | qemu_mutex_init(&map_client_list_lock); |
3561 | } |
3562 | |
3563 | void cpu_unregister_map_client(QEMUBH *bh) |
3564 | { |
3565 | MapClient *client; |
3566 | |
3567 | qemu_mutex_lock(&map_client_list_lock); |
3568 | QLIST_FOREACH(client, &map_client_list, link) { |
3569 | if (client->bh == bh) { |
3570 | cpu_unregister_map_client_do(client); |
3571 | break; |
3572 | } |
3573 | } |
3574 | qemu_mutex_unlock(&map_client_list_lock); |
3575 | } |
3576 | |
3577 | static void cpu_notify_map_clients(void) |
3578 | { |
3579 | qemu_mutex_lock(&map_client_list_lock); |
3580 | cpu_notify_map_clients_locked(); |
3581 | qemu_mutex_unlock(&map_client_list_lock); |
3582 | } |
3583 | |
3584 | static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, |
3585 | bool is_write, MemTxAttrs attrs) |
3586 | { |
3587 | MemoryRegion *mr; |
3588 | hwaddr l, xlat; |
3589 | |
3590 | while (len > 0) { |
3591 | l = len; |
3592 | mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); |
3593 | if (!memory_access_is_direct(mr, is_write)) { |
3594 | l = memory_access_size(mr, l, addr); |
3595 | if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) { |
3596 | return false; |
3597 | } |
3598 | } |
3599 | |
3600 | len -= l; |
3601 | addr += l; |
3602 | } |
3603 | return true; |
3604 | } |
3605 | |
3606 | bool address_space_access_valid(AddressSpace *as, hwaddr addr, |
3607 | hwaddr len, bool is_write, |
3608 | MemTxAttrs attrs) |
3609 | { |
3610 | FlatView *fv; |
3611 | bool result; |
3612 | |
3613 | rcu_read_lock(); |
3614 | fv = address_space_to_flatview(as); |
3615 | result = flatview_access_valid(fv, addr, len, is_write, attrs); |
3616 | rcu_read_unlock(); |
3617 | return result; |
3618 | } |
3619 | |
3620 | static hwaddr |
3621 | flatview_extend_translation(FlatView *fv, hwaddr addr, |
3622 | hwaddr target_len, |
3623 | MemoryRegion *mr, hwaddr base, hwaddr len, |
3624 | bool is_write, MemTxAttrs attrs) |
3625 | { |
3626 | hwaddr done = 0; |
3627 | hwaddr xlat; |
3628 | MemoryRegion *this_mr; |
3629 | |
3630 | for (;;) { |
3631 | target_len -= len; |
3632 | addr += len; |
3633 | done += len; |
3634 | if (target_len == 0) { |
3635 | return done; |
3636 | } |
3637 | |
3638 | len = target_len; |
3639 | this_mr = flatview_translate(fv, addr, &xlat, |
3640 | &len, is_write, attrs); |
3641 | if (this_mr != mr || xlat != base + done) { |
3642 | return done; |
3643 | } |
3644 | } |
3645 | } |
3646 | |
3647 | /* Map a physical memory region into a host virtual address. |
3648 | * May map a subset of the requested range, given by and returned in *plen. |
3649 | * May return NULL if resources needed to perform the mapping are exhausted. |
3650 | * Use only for reads OR writes - not for read-modify-write operations. |
3651 | * Use cpu_register_map_client() to know when retrying the map operation is |
3652 | * likely to succeed. |
3653 | */ |
3654 | void *address_space_map(AddressSpace *as, |
3655 | hwaddr addr, |
3656 | hwaddr *plen, |
3657 | bool is_write, |
3658 | MemTxAttrs attrs) |
3659 | { |
3660 | hwaddr len = *plen; |
3661 | hwaddr l, xlat; |
3662 | MemoryRegion *mr; |
3663 | void *ptr; |
3664 | FlatView *fv; |
3665 | |
3666 | if (len == 0) { |
3667 | return NULL; |
3668 | } |
3669 | |
3670 | l = len; |
3671 | rcu_read_lock(); |
3672 | fv = address_space_to_flatview(as); |
3673 | mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); |
3674 | |
3675 | if (!memory_access_is_direct(mr, is_write)) { |
3676 | if (atomic_xchg(&bounce.in_use, true)) { |
3677 | rcu_read_unlock(); |
3678 | return NULL; |
3679 | } |
3680 | /* Avoid unbounded allocations */ |
3681 | l = MIN(l, TARGET_PAGE_SIZE); |
3682 | bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); |
3683 | bounce.addr = addr; |
3684 | bounce.len = l; |
3685 | |
3686 | memory_region_ref(mr); |
3687 | bounce.mr = mr; |
3688 | if (!is_write) { |
3689 | flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED, |
3690 | bounce.buffer, l); |
3691 | } |
3692 | |
3693 | rcu_read_unlock(); |
3694 | *plen = l; |
3695 | return bounce.buffer; |
3696 | } |
3697 | |
3698 | |
3699 | memory_region_ref(mr); |
3700 | *plen = flatview_extend_translation(fv, addr, len, mr, xlat, |
3701 | l, is_write, attrs); |
3702 | ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); |
3703 | rcu_read_unlock(); |
3704 | |
3705 | return ptr; |
3706 | } |
3707 | |
3708 | /* Unmaps a memory region previously mapped by address_space_map(). |
3709 | * Will also mark the memory as dirty if is_write == 1. access_len gives |
3710 | * the amount of memory that was actually read or written by the caller. |
3711 | */ |
3712 | void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, |
3713 | int is_write, hwaddr access_len) |
3714 | { |
3715 | if (buffer != bounce.buffer) { |
3716 | MemoryRegion *mr; |
3717 | ram_addr_t addr1; |
3718 | |
3719 | mr = memory_region_from_host(buffer, &addr1); |
3720 | assert(mr != NULL); |
3721 | if (is_write) { |
3722 | invalidate_and_set_dirty(mr, addr1, access_len); |
3723 | } |
3724 | if (xen_enabled()) { |
3725 | xen_invalidate_map_cache_entry(buffer); |
3726 | } |
3727 | memory_region_unref(mr); |
3728 | return; |
3729 | } |
3730 | if (is_write) { |
3731 | address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED, |
3732 | bounce.buffer, access_len); |
3733 | } |
3734 | qemu_vfree(bounce.buffer); |
3735 | bounce.buffer = NULL; |
3736 | memory_region_unref(bounce.mr); |
3737 | atomic_mb_set(&bounce.in_use, false); |
3738 | cpu_notify_map_clients(); |
3739 | } |
3740 | |
3741 | void *cpu_physical_memory_map(hwaddr addr, |
3742 | hwaddr *plen, |
3743 | int is_write) |
3744 | { |
3745 | return address_space_map(&address_space_memory, addr, plen, is_write, |
3746 | MEMTXATTRS_UNSPECIFIED); |
3747 | } |
3748 | |
3749 | void cpu_physical_memory_unmap(void *buffer, hwaddr len, |
3750 | int is_write, hwaddr access_len) |
3751 | { |
3752 | return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); |
3753 | } |
3754 | |
3755 | #define ARG1_DECL AddressSpace *as |
3756 | #define ARG1 as |
3757 | #define SUFFIX |
3758 | #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) |
3759 | #define RCU_READ_LOCK(...) rcu_read_lock() |
3760 | #define RCU_READ_UNLOCK(...) rcu_read_unlock() |
3761 | #include "memory_ldst.inc.c" |
3762 | |
3763 | int64_t address_space_cache_init(MemoryRegionCache *cache, |
3764 | AddressSpace *as, |
3765 | hwaddr addr, |
3766 | hwaddr len, |
3767 | bool is_write) |
3768 | { |
3769 | AddressSpaceDispatch *d; |
3770 | hwaddr l; |
3771 | MemoryRegion *mr; |
3772 | |
3773 | assert(len > 0); |
3774 | |
3775 | l = len; |
3776 | cache->fv = address_space_get_flatview(as); |
3777 | d = flatview_to_dispatch(cache->fv); |
3778 | cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true); |
3779 | |
3780 | mr = cache->mrs.mr; |
3781 | memory_region_ref(mr); |
3782 | if (memory_access_is_direct(mr, is_write)) { |
3783 | /* We don't care about the memory attributes here as we're only |
3784 | * doing this if we found actual RAM, which behaves the same |
3785 | * regardless of attributes; so UNSPECIFIED is fine. |
3786 | */ |
3787 | l = flatview_extend_translation(cache->fv, addr, len, mr, |
3788 | cache->xlat, l, is_write, |
3789 | MEMTXATTRS_UNSPECIFIED); |
3790 | cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true); |
3791 | } else { |
3792 | cache->ptr = NULL; |
3793 | } |
3794 | |
3795 | cache->len = l; |
3796 | cache->is_write = is_write; |
3797 | return l; |
3798 | } |
3799 | |
3800 | void address_space_cache_invalidate(MemoryRegionCache *cache, |
3801 | hwaddr addr, |
3802 | hwaddr access_len) |
3803 | { |
3804 | assert(cache->is_write); |
3805 | if (likely(cache->ptr)) { |
3806 | invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len); |
3807 | } |
3808 | } |
3809 | |
3810 | void address_space_cache_destroy(MemoryRegionCache *cache) |
3811 | { |
3812 | if (!cache->mrs.mr) { |
3813 | return; |
3814 | } |
3815 | |
3816 | if (xen_enabled()) { |
3817 | xen_invalidate_map_cache_entry(cache->ptr); |
3818 | } |
3819 | memory_region_unref(cache->mrs.mr); |
3820 | flatview_unref(cache->fv); |
3821 | cache->mrs.mr = NULL; |
3822 | cache->fv = NULL; |
3823 | } |
3824 | |
3825 | /* Called from RCU critical section. This function has the same |
3826 | * semantics as address_space_translate, but it only works on a |
3827 | * predefined range of a MemoryRegion that was mapped with |
3828 | * address_space_cache_init. |
3829 | */ |
3830 | static inline MemoryRegion *address_space_translate_cached( |
3831 | MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, |
3832 | hwaddr *plen, bool is_write, MemTxAttrs attrs) |
3833 | { |
3834 | MemoryRegionSection section; |
3835 | MemoryRegion *mr; |
3836 | IOMMUMemoryRegion *iommu_mr; |
3837 | AddressSpace *target_as; |
3838 | |
3839 | assert(!cache->ptr); |
3840 | *xlat = addr + cache->xlat; |
3841 | |
3842 | mr = cache->mrs.mr; |
3843 | iommu_mr = memory_region_get_iommu(mr); |
3844 | if (!iommu_mr) { |
3845 | /* MMIO region. */ |
3846 | return mr; |
3847 | } |
3848 | |
3849 | section = address_space_translate_iommu(iommu_mr, xlat, plen, |
3850 | NULL, is_write, true, |
3851 | &target_as, attrs); |
3852 | return section.mr; |
3853 | } |
3854 | |
3855 | /* Called from RCU critical section. address_space_read_cached uses this |
3856 | * out of line function when the target is an MMIO or IOMMU region. |
3857 | */ |
3858 | void |
3859 | address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, |
3860 | void *buf, hwaddr len) |
3861 | { |
3862 | hwaddr addr1, l; |
3863 | MemoryRegion *mr; |
3864 | |
3865 | l = len; |
3866 | mr = address_space_translate_cached(cache, addr, &addr1, &l, false, |
3867 | MEMTXATTRS_UNSPECIFIED); |
3868 | flatview_read_continue(cache->fv, |
3869 | addr, MEMTXATTRS_UNSPECIFIED, buf, len, |
3870 | addr1, l, mr); |
3871 | } |
3872 | |
3873 | /* Called from RCU critical section. address_space_write_cached uses this |
3874 | * out of line function when the target is an MMIO or IOMMU region. |
3875 | */ |
3876 | void |
3877 | address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, |
3878 | const void *buf, hwaddr len) |
3879 | { |
3880 | hwaddr addr1, l; |
3881 | MemoryRegion *mr; |
3882 | |
3883 | l = len; |
3884 | mr = address_space_translate_cached(cache, addr, &addr1, &l, true, |
3885 | MEMTXATTRS_UNSPECIFIED); |
3886 | flatview_write_continue(cache->fv, |
3887 | addr, MEMTXATTRS_UNSPECIFIED, buf, len, |
3888 | addr1, l, mr); |
3889 | } |
3890 | |
3891 | #define ARG1_DECL MemoryRegionCache *cache |
3892 | #define ARG1 cache |
3893 | #define SUFFIX _cached_slow |
3894 | #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) |
3895 | #define RCU_READ_LOCK() ((void)0) |
3896 | #define RCU_READ_UNLOCK() ((void)0) |
3897 | #include "memory_ldst.inc.c" |
3898 | |
3899 | /* virtual memory access for debug (includes writing to ROM) */ |
3900 | int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, |
3901 | uint8_t *buf, target_ulong len, int is_write) |
3902 | { |
3903 | hwaddr phys_addr; |
3904 | target_ulong l, page; |
3905 | |
3906 | cpu_synchronize_state(cpu); |
3907 | while (len > 0) { |
3908 | int asidx; |
3909 | MemTxAttrs attrs; |
3910 | |
3911 | page = addr & TARGET_PAGE_MASK; |
3912 | phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); |
3913 | asidx = cpu_asidx_from_attrs(cpu, attrs); |
3914 | /* if no physical page mapped, return an error */ |
3915 | if (phys_addr == -1) |
3916 | return -1; |
3917 | l = (page + TARGET_PAGE_SIZE) - addr; |
3918 | if (l > len) |
3919 | l = len; |
3920 | phys_addr += (addr & ~TARGET_PAGE_MASK); |
3921 | if (is_write) { |
3922 | address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr, |
3923 | attrs, buf, l); |
3924 | } else { |
3925 | address_space_rw(cpu->cpu_ases[asidx].as, phys_addr, |
3926 | attrs, buf, l, 0); |
3927 | } |
3928 | len -= l; |
3929 | buf += l; |
3930 | addr += l; |
3931 | } |
3932 | return 0; |
3933 | } |
3934 | |
3935 | /* |
3936 | * Allows code that needs to deal with migration bitmaps etc to still be built |
3937 | * target independent. |
3938 | */ |
3939 | size_t qemu_target_page_size(void) |
3940 | { |
3941 | return TARGET_PAGE_SIZE; |
3942 | } |
3943 | |
3944 | int qemu_target_page_bits(void) |
3945 | { |
3946 | return TARGET_PAGE_BITS; |
3947 | } |
3948 | |
3949 | int qemu_target_page_bits_min(void) |
3950 | { |
3951 | return TARGET_PAGE_BITS_MIN; |
3952 | } |
3953 | #endif |
3954 | |
3955 | bool target_words_bigendian(void) |
3956 | { |
3957 | #if defined(TARGET_WORDS_BIGENDIAN) |
3958 | return true; |
3959 | #else |
3960 | return false; |
3961 | #endif |
3962 | } |
3963 | |
3964 | #ifndef CONFIG_USER_ONLY |
3965 | bool cpu_physical_memory_is_io(hwaddr phys_addr) |
3966 | { |
3967 | MemoryRegion*mr; |
3968 | hwaddr l = 1; |
3969 | bool res; |
3970 | |
3971 | rcu_read_lock(); |
3972 | mr = address_space_translate(&address_space_memory, |
3973 | phys_addr, &phys_addr, &l, false, |
3974 | MEMTXATTRS_UNSPECIFIED); |
3975 | |
3976 | res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); |
3977 | rcu_read_unlock(); |
3978 | return res; |
3979 | } |
3980 | |
3981 | int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) |
3982 | { |
3983 | RAMBlock *block; |
3984 | int ret = 0; |
3985 | |
3986 | rcu_read_lock(); |
3987 | RAMBLOCK_FOREACH(block) { |
3988 | ret = func(block, opaque); |
3989 | if (ret) { |
3990 | break; |
3991 | } |
3992 | } |
3993 | rcu_read_unlock(); |
3994 | return ret; |
3995 | } |
3996 | |
3997 | /* |
3998 | * Unmap pages of memory from start to start+length such that |
3999 | * they a) read as 0, b) Trigger whatever fault mechanism |
4000 | * the OS provides for postcopy. |
4001 | * The pages must be unmapped by the end of the function. |
4002 | * Returns: 0 on success, none-0 on failure |
4003 | * |
4004 | */ |
4005 | int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) |
4006 | { |
4007 | int ret = -1; |
4008 | |
4009 | uint8_t *host_startaddr = rb->host + start; |
4010 | |
4011 | if ((uintptr_t)host_startaddr & (rb->page_size - 1)) { |
4012 | error_report("ram_block_discard_range: Unaligned start address: %p" , |
4013 | host_startaddr); |
4014 | goto err; |
4015 | } |
4016 | |
4017 | if ((start + length) <= rb->used_length) { |
4018 | bool need_madvise, need_fallocate; |
4019 | uint8_t *host_endaddr = host_startaddr + length; |
4020 | if ((uintptr_t)host_endaddr & (rb->page_size - 1)) { |
4021 | error_report("ram_block_discard_range: Unaligned end address: %p" , |
4022 | host_endaddr); |
4023 | goto err; |
4024 | } |
4025 | |
4026 | errno = ENOTSUP; /* If we are missing MADVISE etc */ |
4027 | |
4028 | /* The logic here is messy; |
4029 | * madvise DONTNEED fails for hugepages |
4030 | * fallocate works on hugepages and shmem |
4031 | */ |
4032 | need_madvise = (rb->page_size == qemu_host_page_size); |
4033 | need_fallocate = rb->fd != -1; |
4034 | if (need_fallocate) { |
4035 | /* For a file, this causes the area of the file to be zero'd |
4036 | * if read, and for hugetlbfs also causes it to be unmapped |
4037 | * so a userfault will trigger. |
4038 | */ |
4039 | #ifdef CONFIG_FALLOCATE_PUNCH_HOLE |
4040 | ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, |
4041 | start, length); |
4042 | if (ret) { |
4043 | ret = -errno; |
4044 | error_report("ram_block_discard_range: Failed to fallocate " |
4045 | "%s:%" PRIx64 " +%zx (%d)" , |
4046 | rb->idstr, start, length, ret); |
4047 | goto err; |
4048 | } |
4049 | #else |
4050 | ret = -ENOSYS; |
4051 | error_report("ram_block_discard_range: fallocate not available/file" |
4052 | "%s:%" PRIx64 " +%zx (%d)" , |
4053 | rb->idstr, start, length, ret); |
4054 | goto err; |
4055 | #endif |
4056 | } |
4057 | if (need_madvise) { |
4058 | /* For normal RAM this causes it to be unmapped, |
4059 | * for shared memory it causes the local mapping to disappear |
4060 | * and to fall back on the file contents (which we just |
4061 | * fallocate'd away). |
4062 | */ |
4063 | #if defined(CONFIG_MADVISE) |
4064 | ret = madvise(host_startaddr, length, MADV_DONTNEED); |
4065 | if (ret) { |
4066 | ret = -errno; |
4067 | error_report("ram_block_discard_range: Failed to discard range " |
4068 | "%s:%" PRIx64 " +%zx (%d)" , |
4069 | rb->idstr, start, length, ret); |
4070 | goto err; |
4071 | } |
4072 | #else |
4073 | ret = -ENOSYS; |
4074 | error_report("ram_block_discard_range: MADVISE not available" |
4075 | "%s:%" PRIx64 " +%zx (%d)" , |
4076 | rb->idstr, start, length, ret); |
4077 | goto err; |
4078 | #endif |
4079 | } |
4080 | trace_ram_block_discard_range(rb->idstr, host_startaddr, length, |
4081 | need_madvise, need_fallocate, ret); |
4082 | } else { |
4083 | error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64 |
4084 | "/%zx/" RAM_ADDR_FMT")" , |
4085 | rb->idstr, start, length, rb->used_length); |
4086 | } |
4087 | |
4088 | err: |
4089 | return ret; |
4090 | } |
4091 | |
4092 | bool ramblock_is_pmem(RAMBlock *rb) |
4093 | { |
4094 | return rb->flags & RAM_PMEM; |
4095 | } |
4096 | |
4097 | #endif |
4098 | |
4099 | void page_size_init(void) |
4100 | { |
4101 | /* NOTE: we can always suppose that qemu_host_page_size >= |
4102 | TARGET_PAGE_SIZE */ |
4103 | if (qemu_host_page_size == 0) { |
4104 | qemu_host_page_size = qemu_real_host_page_size; |
4105 | } |
4106 | if (qemu_host_page_size < TARGET_PAGE_SIZE) { |
4107 | qemu_host_page_size = TARGET_PAGE_SIZE; |
4108 | } |
4109 | qemu_host_page_mask = -(intptr_t)qemu_host_page_size; |
4110 | } |
4111 | |
4112 | #if !defined(CONFIG_USER_ONLY) |
4113 | |
4114 | static void mtree_print_phys_entries(int start, int end, int skip, int ptr) |
4115 | { |
4116 | if (start == end - 1) { |
4117 | qemu_printf("\t%3d " , start); |
4118 | } else { |
4119 | qemu_printf("\t%3d..%-3d " , start, end - 1); |
4120 | } |
4121 | qemu_printf(" skip=%d " , skip); |
4122 | if (ptr == PHYS_MAP_NODE_NIL) { |
4123 | qemu_printf(" ptr=NIL" ); |
4124 | } else if (!skip) { |
4125 | qemu_printf(" ptr=#%d" , ptr); |
4126 | } else { |
4127 | qemu_printf(" ptr=[%d]" , ptr); |
4128 | } |
4129 | qemu_printf("\n" ); |
4130 | } |
4131 | |
4132 | #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ |
4133 | int128_sub((size), int128_one())) : 0) |
4134 | |
4135 | void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root) |
4136 | { |
4137 | int i; |
4138 | |
4139 | qemu_printf(" Dispatch\n" ); |
4140 | qemu_printf(" Physical sections\n" ); |
4141 | |
4142 | for (i = 0; i < d->map.sections_nb; ++i) { |
4143 | MemoryRegionSection *s = d->map.sections + i; |
4144 | const char *names[] = { " [unassigned]" , " [not dirty]" , |
4145 | " [ROM]" , " [watch]" }; |
4146 | |
4147 | qemu_printf(" #%d @" TARGET_FMT_plx ".." TARGET_FMT_plx |
4148 | " %s%s%s%s%s" , |
4149 | i, |
4150 | s->offset_within_address_space, |
4151 | s->offset_within_address_space + MR_SIZE(s->mr->size), |
4152 | s->mr->name ? s->mr->name : "(noname)" , |
4153 | i < ARRAY_SIZE(names) ? names[i] : "" , |
4154 | s->mr == root ? " [ROOT]" : "" , |
4155 | s == d->mru_section ? " [MRU]" : "" , |
4156 | s->mr->is_iommu ? " [iommu]" : "" ); |
4157 | |
4158 | if (s->mr->alias) { |
4159 | qemu_printf(" alias=%s" , s->mr->alias->name ? |
4160 | s->mr->alias->name : "noname" ); |
4161 | } |
4162 | qemu_printf("\n" ); |
4163 | } |
4164 | |
4165 | qemu_printf(" Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n" , |
4166 | P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip); |
4167 | for (i = 0; i < d->map.nodes_nb; ++i) { |
4168 | int j, jprev; |
4169 | PhysPageEntry prev; |
4170 | Node *n = d->map.nodes + i; |
4171 | |
4172 | qemu_printf(" [%d]\n" , i); |
4173 | |
4174 | for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) { |
4175 | PhysPageEntry *pe = *n + j; |
4176 | |
4177 | if (pe->ptr == prev.ptr && pe->skip == prev.skip) { |
4178 | continue; |
4179 | } |
4180 | |
4181 | mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); |
4182 | |
4183 | jprev = j; |
4184 | prev = *pe; |
4185 | } |
4186 | |
4187 | if (jprev != ARRAY_SIZE(*n)) { |
4188 | mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); |
4189 | } |
4190 | } |
4191 | } |
4192 | |
4193 | #endif |
4194 | |