1 | /* |
2 | * Physical memory management |
3 | * |
4 | * Copyright 2011 Red Hat, Inc. and/or its affiliates |
5 | * |
6 | * Authors: |
7 | * Avi Kivity <avi@redhat.com> |
8 | * |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
10 | * the COPYING file in the top-level directory. |
11 | * |
12 | * Contributions after 2012-01-13 are licensed under the terms of the |
13 | * GNU GPL, version 2 or (at your option) any later version. |
14 | */ |
15 | |
16 | #include "qemu/osdep.h" |
17 | #include "qapi/error.h" |
18 | #include "cpu.h" |
19 | #include "exec/memory.h" |
20 | #include "exec/address-spaces.h" |
21 | #include "qapi/visitor.h" |
22 | #include "qemu/bitops.h" |
23 | #include "qemu/error-report.h" |
24 | #include "qemu/main-loop.h" |
25 | #include "qemu/qemu-print.h" |
26 | #include "qom/object.h" |
27 | #include "trace-root.h" |
28 | |
29 | #include "exec/memory-internal.h" |
30 | #include "exec/ram_addr.h" |
31 | #include "sysemu/kvm.h" |
32 | #include "sysemu/runstate.h" |
33 | #include "sysemu/tcg.h" |
34 | #include "sysemu/accel.h" |
35 | #include "hw/boards.h" |
36 | #include "migration/vmstate.h" |
37 | |
38 | //#define DEBUG_UNASSIGNED |
39 | |
40 | static unsigned memory_region_transaction_depth; |
41 | static bool memory_region_update_pending; |
42 | static bool ioeventfd_update_pending; |
43 | bool global_dirty_log; |
44 | |
45 | static QTAILQ_HEAD(, MemoryListener) memory_listeners |
46 | = QTAILQ_HEAD_INITIALIZER(memory_listeners); |
47 | |
48 | static QTAILQ_HEAD(, AddressSpace) address_spaces |
49 | = QTAILQ_HEAD_INITIALIZER(address_spaces); |
50 | |
51 | static GHashTable *flat_views; |
52 | |
53 | typedef struct AddrRange AddrRange; |
54 | |
55 | /* |
56 | * Note that signed integers are needed for negative offsetting in aliases |
57 | * (large MemoryRegion::alias_offset). |
58 | */ |
59 | struct AddrRange { |
60 | Int128 start; |
61 | Int128 size; |
62 | }; |
63 | |
64 | static AddrRange addrrange_make(Int128 start, Int128 size) |
65 | { |
66 | return (AddrRange) { start, size }; |
67 | } |
68 | |
69 | static bool addrrange_equal(AddrRange r1, AddrRange r2) |
70 | { |
71 | return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size); |
72 | } |
73 | |
74 | static Int128 addrrange_end(AddrRange r) |
75 | { |
76 | return int128_add(r.start, r.size); |
77 | } |
78 | |
79 | static AddrRange addrrange_shift(AddrRange range, Int128 delta) |
80 | { |
81 | int128_addto(&range.start, delta); |
82 | return range; |
83 | } |
84 | |
85 | static bool addrrange_contains(AddrRange range, Int128 addr) |
86 | { |
87 | return int128_ge(addr, range.start) |
88 | && int128_lt(addr, addrrange_end(range)); |
89 | } |
90 | |
91 | static bool addrrange_intersects(AddrRange r1, AddrRange r2) |
92 | { |
93 | return addrrange_contains(r1, r2.start) |
94 | || addrrange_contains(r2, r1.start); |
95 | } |
96 | |
97 | static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) |
98 | { |
99 | Int128 start = int128_max(r1.start, r2.start); |
100 | Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); |
101 | return addrrange_make(start, int128_sub(end, start)); |
102 | } |
103 | |
104 | enum ListenerDirection { Forward, Reverse }; |
105 | |
106 | #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \ |
107 | do { \ |
108 | MemoryListener *_listener; \ |
109 | \ |
110 | switch (_direction) { \ |
111 | case Forward: \ |
112 | QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ |
113 | if (_listener->_callback) { \ |
114 | _listener->_callback(_listener, ##_args); \ |
115 | } \ |
116 | } \ |
117 | break; \ |
118 | case Reverse: \ |
119 | QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \ |
120 | if (_listener->_callback) { \ |
121 | _listener->_callback(_listener, ##_args); \ |
122 | } \ |
123 | } \ |
124 | break; \ |
125 | default: \ |
126 | abort(); \ |
127 | } \ |
128 | } while (0) |
129 | |
130 | #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \ |
131 | do { \ |
132 | MemoryListener *_listener; \ |
133 | \ |
134 | switch (_direction) { \ |
135 | case Forward: \ |
136 | QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \ |
137 | if (_listener->_callback) { \ |
138 | _listener->_callback(_listener, _section, ##_args); \ |
139 | } \ |
140 | } \ |
141 | break; \ |
142 | case Reverse: \ |
143 | QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \ |
144 | if (_listener->_callback) { \ |
145 | _listener->_callback(_listener, _section, ##_args); \ |
146 | } \ |
147 | } \ |
148 | break; \ |
149 | default: \ |
150 | abort(); \ |
151 | } \ |
152 | } while (0) |
153 | |
154 | /* No need to ref/unref .mr, the FlatRange keeps it alive. */ |
155 | #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \ |
156 | do { \ |
157 | MemoryRegionSection mrs = section_from_flat_range(fr, \ |
158 | address_space_to_flatview(as)); \ |
159 | MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \ |
160 | } while(0) |
161 | |
162 | struct CoalescedMemoryRange { |
163 | AddrRange addr; |
164 | QTAILQ_ENTRY(CoalescedMemoryRange) link; |
165 | }; |
166 | |
167 | struct MemoryRegionIoeventfd { |
168 | AddrRange addr; |
169 | bool match_data; |
170 | uint64_t data; |
171 | EventNotifier *e; |
172 | }; |
173 | |
174 | static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a, |
175 | MemoryRegionIoeventfd *b) |
176 | { |
177 | if (int128_lt(a->addr.start, b->addr.start)) { |
178 | return true; |
179 | } else if (int128_gt(a->addr.start, b->addr.start)) { |
180 | return false; |
181 | } else if (int128_lt(a->addr.size, b->addr.size)) { |
182 | return true; |
183 | } else if (int128_gt(a->addr.size, b->addr.size)) { |
184 | return false; |
185 | } else if (a->match_data < b->match_data) { |
186 | return true; |
187 | } else if (a->match_data > b->match_data) { |
188 | return false; |
189 | } else if (a->match_data) { |
190 | if (a->data < b->data) { |
191 | return true; |
192 | } else if (a->data > b->data) { |
193 | return false; |
194 | } |
195 | } |
196 | if (a->e < b->e) { |
197 | return true; |
198 | } else if (a->e > b->e) { |
199 | return false; |
200 | } |
201 | return false; |
202 | } |
203 | |
204 | static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a, |
205 | MemoryRegionIoeventfd *b) |
206 | { |
207 | return !memory_region_ioeventfd_before(a, b) |
208 | && !memory_region_ioeventfd_before(b, a); |
209 | } |
210 | |
211 | /* Range of memory in the global map. Addresses are absolute. */ |
212 | struct FlatRange { |
213 | MemoryRegion *mr; |
214 | hwaddr offset_in_region; |
215 | AddrRange addr; |
216 | uint8_t dirty_log_mask; |
217 | bool romd_mode; |
218 | bool readonly; |
219 | bool nonvolatile; |
220 | }; |
221 | |
222 | #define FOR_EACH_FLAT_RANGE(var, view) \ |
223 | for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) |
224 | |
225 | static inline MemoryRegionSection |
226 | section_from_flat_range(FlatRange *fr, FlatView *fv) |
227 | { |
228 | return (MemoryRegionSection) { |
229 | .mr = fr->mr, |
230 | .fv = fv, |
231 | .offset_within_region = fr->offset_in_region, |
232 | .size = fr->addr.size, |
233 | .offset_within_address_space = int128_get64(fr->addr.start), |
234 | .readonly = fr->readonly, |
235 | .nonvolatile = fr->nonvolatile, |
236 | }; |
237 | } |
238 | |
239 | static bool flatrange_equal(FlatRange *a, FlatRange *b) |
240 | { |
241 | return a->mr == b->mr |
242 | && addrrange_equal(a->addr, b->addr) |
243 | && a->offset_in_region == b->offset_in_region |
244 | && a->romd_mode == b->romd_mode |
245 | && a->readonly == b->readonly |
246 | && a->nonvolatile == b->nonvolatile; |
247 | } |
248 | |
249 | static FlatView *flatview_new(MemoryRegion *mr_root) |
250 | { |
251 | FlatView *view; |
252 | |
253 | view = g_new0(FlatView, 1); |
254 | view->ref = 1; |
255 | view->root = mr_root; |
256 | memory_region_ref(mr_root); |
257 | trace_flatview_new(view, mr_root); |
258 | |
259 | return view; |
260 | } |
261 | |
262 | /* Insert a range into a given position. Caller is responsible for maintaining |
263 | * sorting order. |
264 | */ |
265 | static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) |
266 | { |
267 | if (view->nr == view->nr_allocated) { |
268 | view->nr_allocated = MAX(2 * view->nr, 10); |
269 | view->ranges = g_realloc(view->ranges, |
270 | view->nr_allocated * sizeof(*view->ranges)); |
271 | } |
272 | memmove(view->ranges + pos + 1, view->ranges + pos, |
273 | (view->nr - pos) * sizeof(FlatRange)); |
274 | view->ranges[pos] = *range; |
275 | memory_region_ref(range->mr); |
276 | ++view->nr; |
277 | } |
278 | |
279 | static void flatview_destroy(FlatView *view) |
280 | { |
281 | int i; |
282 | |
283 | trace_flatview_destroy(view, view->root); |
284 | if (view->dispatch) { |
285 | address_space_dispatch_free(view->dispatch); |
286 | } |
287 | for (i = 0; i < view->nr; i++) { |
288 | memory_region_unref(view->ranges[i].mr); |
289 | } |
290 | g_free(view->ranges); |
291 | memory_region_unref(view->root); |
292 | g_free(view); |
293 | } |
294 | |
295 | static bool flatview_ref(FlatView *view) |
296 | { |
297 | return atomic_fetch_inc_nonzero(&view->ref) > 0; |
298 | } |
299 | |
300 | void flatview_unref(FlatView *view) |
301 | { |
302 | if (atomic_fetch_dec(&view->ref) == 1) { |
303 | trace_flatview_destroy_rcu(view, view->root); |
304 | assert(view->root); |
305 | call_rcu(view, flatview_destroy, rcu); |
306 | } |
307 | } |
308 | |
309 | static bool can_merge(FlatRange *r1, FlatRange *r2) |
310 | { |
311 | return int128_eq(addrrange_end(r1->addr), r2->addr.start) |
312 | && r1->mr == r2->mr |
313 | && int128_eq(int128_add(int128_make64(r1->offset_in_region), |
314 | r1->addr.size), |
315 | int128_make64(r2->offset_in_region)) |
316 | && r1->dirty_log_mask == r2->dirty_log_mask |
317 | && r1->romd_mode == r2->romd_mode |
318 | && r1->readonly == r2->readonly |
319 | && r1->nonvolatile == r2->nonvolatile; |
320 | } |
321 | |
322 | /* Attempt to simplify a view by merging adjacent ranges */ |
323 | static void flatview_simplify(FlatView *view) |
324 | { |
325 | unsigned i, j, k; |
326 | |
327 | i = 0; |
328 | while (i < view->nr) { |
329 | j = i + 1; |
330 | while (j < view->nr |
331 | && can_merge(&view->ranges[j-1], &view->ranges[j])) { |
332 | int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); |
333 | ++j; |
334 | } |
335 | ++i; |
336 | for (k = i; k < j; k++) { |
337 | memory_region_unref(view->ranges[k].mr); |
338 | } |
339 | memmove(&view->ranges[i], &view->ranges[j], |
340 | (view->nr - j) * sizeof(view->ranges[j])); |
341 | view->nr -= j - i; |
342 | } |
343 | } |
344 | |
345 | static bool memory_region_big_endian(MemoryRegion *mr) |
346 | { |
347 | #ifdef TARGET_WORDS_BIGENDIAN |
348 | return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; |
349 | #else |
350 | return mr->ops->endianness == DEVICE_BIG_ENDIAN; |
351 | #endif |
352 | } |
353 | |
354 | static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op) |
355 | { |
356 | if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) { |
357 | switch (op & MO_SIZE) { |
358 | case MO_8: |
359 | break; |
360 | case MO_16: |
361 | *data = bswap16(*data); |
362 | break; |
363 | case MO_32: |
364 | *data = bswap32(*data); |
365 | break; |
366 | case MO_64: |
367 | *data = bswap64(*data); |
368 | break; |
369 | default: |
370 | g_assert_not_reached(); |
371 | } |
372 | } |
373 | } |
374 | |
375 | static inline void memory_region_shift_read_access(uint64_t *value, |
376 | signed shift, |
377 | uint64_t mask, |
378 | uint64_t tmp) |
379 | { |
380 | if (shift >= 0) { |
381 | *value |= (tmp & mask) << shift; |
382 | } else { |
383 | *value |= (tmp & mask) >> -shift; |
384 | } |
385 | } |
386 | |
387 | static inline uint64_t memory_region_shift_write_access(uint64_t *value, |
388 | signed shift, |
389 | uint64_t mask) |
390 | { |
391 | uint64_t tmp; |
392 | |
393 | if (shift >= 0) { |
394 | tmp = (*value >> shift) & mask; |
395 | } else { |
396 | tmp = (*value << -shift) & mask; |
397 | } |
398 | |
399 | return tmp; |
400 | } |
401 | |
402 | static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset) |
403 | { |
404 | MemoryRegion *root; |
405 | hwaddr abs_addr = offset; |
406 | |
407 | abs_addr += mr->addr; |
408 | for (root = mr; root->container; ) { |
409 | root = root->container; |
410 | abs_addr += root->addr; |
411 | } |
412 | |
413 | return abs_addr; |
414 | } |
415 | |
416 | static int get_cpu_index(void) |
417 | { |
418 | if (current_cpu) { |
419 | return current_cpu->cpu_index; |
420 | } |
421 | return -1; |
422 | } |
423 | |
424 | static MemTxResult memory_region_read_accessor(MemoryRegion *mr, |
425 | hwaddr addr, |
426 | uint64_t *value, |
427 | unsigned size, |
428 | signed shift, |
429 | uint64_t mask, |
430 | MemTxAttrs attrs) |
431 | { |
432 | uint64_t tmp; |
433 | |
434 | tmp = mr->ops->read(mr->opaque, addr, size); |
435 | if (mr->subpage) { |
436 | trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); |
437 | } else if (mr == &io_mem_notdirty) { |
438 | /* Accesses to code which has previously been translated into a TB show |
439 | * up in the MMIO path, as accesses to the io_mem_notdirty |
440 | * MemoryRegion. */ |
441 | trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size); |
442 | } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { |
443 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); |
444 | trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); |
445 | } |
446 | memory_region_shift_read_access(value, shift, mask, tmp); |
447 | return MEMTX_OK; |
448 | } |
449 | |
450 | static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, |
451 | hwaddr addr, |
452 | uint64_t *value, |
453 | unsigned size, |
454 | signed shift, |
455 | uint64_t mask, |
456 | MemTxAttrs attrs) |
457 | { |
458 | uint64_t tmp = 0; |
459 | MemTxResult r; |
460 | |
461 | r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs); |
462 | if (mr->subpage) { |
463 | trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); |
464 | } else if (mr == &io_mem_notdirty) { |
465 | /* Accesses to code which has previously been translated into a TB show |
466 | * up in the MMIO path, as accesses to the io_mem_notdirty |
467 | * MemoryRegion. */ |
468 | trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size); |
469 | } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { |
470 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); |
471 | trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); |
472 | } |
473 | memory_region_shift_read_access(value, shift, mask, tmp); |
474 | return r; |
475 | } |
476 | |
477 | static MemTxResult memory_region_write_accessor(MemoryRegion *mr, |
478 | hwaddr addr, |
479 | uint64_t *value, |
480 | unsigned size, |
481 | signed shift, |
482 | uint64_t mask, |
483 | MemTxAttrs attrs) |
484 | { |
485 | uint64_t tmp = memory_region_shift_write_access(value, shift, mask); |
486 | |
487 | if (mr->subpage) { |
488 | trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); |
489 | } else if (mr == &io_mem_notdirty) { |
490 | /* Accesses to code which has previously been translated into a TB show |
491 | * up in the MMIO path, as accesses to the io_mem_notdirty |
492 | * MemoryRegion. */ |
493 | trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size); |
494 | } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { |
495 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); |
496 | trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); |
497 | } |
498 | mr->ops->write(mr->opaque, addr, tmp, size); |
499 | return MEMTX_OK; |
500 | } |
501 | |
502 | static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr, |
503 | hwaddr addr, |
504 | uint64_t *value, |
505 | unsigned size, |
506 | signed shift, |
507 | uint64_t mask, |
508 | MemTxAttrs attrs) |
509 | { |
510 | uint64_t tmp = memory_region_shift_write_access(value, shift, mask); |
511 | |
512 | if (mr->subpage) { |
513 | trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); |
514 | } else if (mr == &io_mem_notdirty) { |
515 | /* Accesses to code which has previously been translated into a TB show |
516 | * up in the MMIO path, as accesses to the io_mem_notdirty |
517 | * MemoryRegion. */ |
518 | trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size); |
519 | } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { |
520 | hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); |
521 | trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); |
522 | } |
523 | return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs); |
524 | } |
525 | |
526 | static MemTxResult access_with_adjusted_size(hwaddr addr, |
527 | uint64_t *value, |
528 | unsigned size, |
529 | unsigned access_size_min, |
530 | unsigned access_size_max, |
531 | MemTxResult (*access_fn) |
532 | (MemoryRegion *mr, |
533 | hwaddr addr, |
534 | uint64_t *value, |
535 | unsigned size, |
536 | signed shift, |
537 | uint64_t mask, |
538 | MemTxAttrs attrs), |
539 | MemoryRegion *mr, |
540 | MemTxAttrs attrs) |
541 | { |
542 | uint64_t access_mask; |
543 | unsigned access_size; |
544 | unsigned i; |
545 | MemTxResult r = MEMTX_OK; |
546 | |
547 | if (!access_size_min) { |
548 | access_size_min = 1; |
549 | } |
550 | if (!access_size_max) { |
551 | access_size_max = 4; |
552 | } |
553 | |
554 | /* FIXME: support unaligned access? */ |
555 | access_size = MAX(MIN(size, access_size_max), access_size_min); |
556 | access_mask = MAKE_64BIT_MASK(0, access_size * 8); |
557 | if (memory_region_big_endian(mr)) { |
558 | for (i = 0; i < size; i += access_size) { |
559 | r |= access_fn(mr, addr + i, value, access_size, |
560 | (size - access_size - i) * 8, access_mask, attrs); |
561 | } |
562 | } else { |
563 | for (i = 0; i < size; i += access_size) { |
564 | r |= access_fn(mr, addr + i, value, access_size, i * 8, |
565 | access_mask, attrs); |
566 | } |
567 | } |
568 | return r; |
569 | } |
570 | |
571 | static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) |
572 | { |
573 | AddressSpace *as; |
574 | |
575 | while (mr->container) { |
576 | mr = mr->container; |
577 | } |
578 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
579 | if (mr == as->root) { |
580 | return as; |
581 | } |
582 | } |
583 | return NULL; |
584 | } |
585 | |
586 | /* Render a memory region into the global view. Ranges in @view obscure |
587 | * ranges in @mr. |
588 | */ |
589 | static void render_memory_region(FlatView *view, |
590 | MemoryRegion *mr, |
591 | Int128 base, |
592 | AddrRange clip, |
593 | bool readonly, |
594 | bool nonvolatile) |
595 | { |
596 | MemoryRegion *subregion; |
597 | unsigned i; |
598 | hwaddr offset_in_region; |
599 | Int128 remain; |
600 | Int128 now; |
601 | FlatRange fr; |
602 | AddrRange tmp; |
603 | |
604 | if (!mr->enabled) { |
605 | return; |
606 | } |
607 | |
608 | int128_addto(&base, int128_make64(mr->addr)); |
609 | readonly |= mr->readonly; |
610 | nonvolatile |= mr->nonvolatile; |
611 | |
612 | tmp = addrrange_make(base, mr->size); |
613 | |
614 | if (!addrrange_intersects(tmp, clip)) { |
615 | return; |
616 | } |
617 | |
618 | clip = addrrange_intersection(tmp, clip); |
619 | |
620 | if (mr->alias) { |
621 | int128_subfrom(&base, int128_make64(mr->alias->addr)); |
622 | int128_subfrom(&base, int128_make64(mr->alias_offset)); |
623 | render_memory_region(view, mr->alias, base, clip, |
624 | readonly, nonvolatile); |
625 | return; |
626 | } |
627 | |
628 | /* Render subregions in priority order. */ |
629 | QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { |
630 | render_memory_region(view, subregion, base, clip, |
631 | readonly, nonvolatile); |
632 | } |
633 | |
634 | if (!mr->terminates) { |
635 | return; |
636 | } |
637 | |
638 | offset_in_region = int128_get64(int128_sub(clip.start, base)); |
639 | base = clip.start; |
640 | remain = clip.size; |
641 | |
642 | fr.mr = mr; |
643 | fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr); |
644 | fr.romd_mode = mr->romd_mode; |
645 | fr.readonly = readonly; |
646 | fr.nonvolatile = nonvolatile; |
647 | |
648 | /* Render the region itself into any gaps left by the current view. */ |
649 | for (i = 0; i < view->nr && int128_nz(remain); ++i) { |
650 | if (int128_ge(base, addrrange_end(view->ranges[i].addr))) { |
651 | continue; |
652 | } |
653 | if (int128_lt(base, view->ranges[i].addr.start)) { |
654 | now = int128_min(remain, |
655 | int128_sub(view->ranges[i].addr.start, base)); |
656 | fr.offset_in_region = offset_in_region; |
657 | fr.addr = addrrange_make(base, now); |
658 | flatview_insert(view, i, &fr); |
659 | ++i; |
660 | int128_addto(&base, now); |
661 | offset_in_region += int128_get64(now); |
662 | int128_subfrom(&remain, now); |
663 | } |
664 | now = int128_sub(int128_min(int128_add(base, remain), |
665 | addrrange_end(view->ranges[i].addr)), |
666 | base); |
667 | int128_addto(&base, now); |
668 | offset_in_region += int128_get64(now); |
669 | int128_subfrom(&remain, now); |
670 | } |
671 | if (int128_nz(remain)) { |
672 | fr.offset_in_region = offset_in_region; |
673 | fr.addr = addrrange_make(base, remain); |
674 | flatview_insert(view, i, &fr); |
675 | } |
676 | } |
677 | |
678 | static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr) |
679 | { |
680 | while (mr->enabled) { |
681 | if (mr->alias) { |
682 | if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) { |
683 | /* The alias is included in its entirety. Use it as |
684 | * the "real" root, so that we can share more FlatViews. |
685 | */ |
686 | mr = mr->alias; |
687 | continue; |
688 | } |
689 | } else if (!mr->terminates) { |
690 | unsigned int found = 0; |
691 | MemoryRegion *child, *next = NULL; |
692 | QTAILQ_FOREACH(child, &mr->subregions, subregions_link) { |
693 | if (child->enabled) { |
694 | if (++found > 1) { |
695 | next = NULL; |
696 | break; |
697 | } |
698 | if (!child->addr && int128_ge(mr->size, child->size)) { |
699 | /* A child is included in its entirety. If it's the only |
700 | * enabled one, use it in the hope of finding an alias down the |
701 | * way. This will also let us share FlatViews. |
702 | */ |
703 | next = child; |
704 | } |
705 | } |
706 | } |
707 | if (found == 0) { |
708 | return NULL; |
709 | } |
710 | if (next) { |
711 | mr = next; |
712 | continue; |
713 | } |
714 | } |
715 | |
716 | return mr; |
717 | } |
718 | |
719 | return NULL; |
720 | } |
721 | |
722 | /* Render a memory topology into a list of disjoint absolute ranges. */ |
723 | static FlatView *generate_memory_topology(MemoryRegion *mr) |
724 | { |
725 | int i; |
726 | FlatView *view; |
727 | |
728 | view = flatview_new(mr); |
729 | |
730 | if (mr) { |
731 | render_memory_region(view, mr, int128_zero(), |
732 | addrrange_make(int128_zero(), int128_2_64()), |
733 | false, false); |
734 | } |
735 | flatview_simplify(view); |
736 | |
737 | view->dispatch = address_space_dispatch_new(view); |
738 | for (i = 0; i < view->nr; i++) { |
739 | MemoryRegionSection mrs = |
740 | section_from_flat_range(&view->ranges[i], view); |
741 | flatview_add_to_dispatch(view, &mrs); |
742 | } |
743 | address_space_dispatch_compact(view->dispatch); |
744 | g_hash_table_replace(flat_views, mr, view); |
745 | |
746 | return view; |
747 | } |
748 | |
749 | static void address_space_add_del_ioeventfds(AddressSpace *as, |
750 | MemoryRegionIoeventfd *fds_new, |
751 | unsigned fds_new_nb, |
752 | MemoryRegionIoeventfd *fds_old, |
753 | unsigned fds_old_nb) |
754 | { |
755 | unsigned iold, inew; |
756 | MemoryRegionIoeventfd *fd; |
757 | MemoryRegionSection section; |
758 | |
759 | /* Generate a symmetric difference of the old and new fd sets, adding |
760 | * and deleting as necessary. |
761 | */ |
762 | |
763 | iold = inew = 0; |
764 | while (iold < fds_old_nb || inew < fds_new_nb) { |
765 | if (iold < fds_old_nb |
766 | && (inew == fds_new_nb |
767 | || memory_region_ioeventfd_before(&fds_old[iold], |
768 | &fds_new[inew]))) { |
769 | fd = &fds_old[iold]; |
770 | section = (MemoryRegionSection) { |
771 | .fv = address_space_to_flatview(as), |
772 | .offset_within_address_space = int128_get64(fd->addr.start), |
773 | .size = fd->addr.size, |
774 | }; |
775 | MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion, |
776 | fd->match_data, fd->data, fd->e); |
777 | ++iold; |
778 | } else if (inew < fds_new_nb |
779 | && (iold == fds_old_nb |
780 | || memory_region_ioeventfd_before(&fds_new[inew], |
781 | &fds_old[iold]))) { |
782 | fd = &fds_new[inew]; |
783 | section = (MemoryRegionSection) { |
784 | .fv = address_space_to_flatview(as), |
785 | .offset_within_address_space = int128_get64(fd->addr.start), |
786 | .size = fd->addr.size, |
787 | }; |
788 | MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion, |
789 | fd->match_data, fd->data, fd->e); |
790 | ++inew; |
791 | } else { |
792 | ++iold; |
793 | ++inew; |
794 | } |
795 | } |
796 | } |
797 | |
798 | FlatView *address_space_get_flatview(AddressSpace *as) |
799 | { |
800 | FlatView *view; |
801 | |
802 | rcu_read_lock(); |
803 | do { |
804 | view = address_space_to_flatview(as); |
805 | /* If somebody has replaced as->current_map concurrently, |
806 | * flatview_ref returns false. |
807 | */ |
808 | } while (!flatview_ref(view)); |
809 | rcu_read_unlock(); |
810 | return view; |
811 | } |
812 | |
813 | static void address_space_update_ioeventfds(AddressSpace *as) |
814 | { |
815 | FlatView *view; |
816 | FlatRange *fr; |
817 | unsigned ioeventfd_nb = 0; |
818 | MemoryRegionIoeventfd *ioeventfds = NULL; |
819 | AddrRange tmp; |
820 | unsigned i; |
821 | |
822 | view = address_space_get_flatview(as); |
823 | FOR_EACH_FLAT_RANGE(fr, view) { |
824 | for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { |
825 | tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, |
826 | int128_sub(fr->addr.start, |
827 | int128_make64(fr->offset_in_region))); |
828 | if (addrrange_intersects(fr->addr, tmp)) { |
829 | ++ioeventfd_nb; |
830 | ioeventfds = g_realloc(ioeventfds, |
831 | ioeventfd_nb * sizeof(*ioeventfds)); |
832 | ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i]; |
833 | ioeventfds[ioeventfd_nb-1].addr = tmp; |
834 | } |
835 | } |
836 | } |
837 | |
838 | address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, |
839 | as->ioeventfds, as->ioeventfd_nb); |
840 | |
841 | g_free(as->ioeventfds); |
842 | as->ioeventfds = ioeventfds; |
843 | as->ioeventfd_nb = ioeventfd_nb; |
844 | flatview_unref(view); |
845 | } |
846 | |
847 | /* |
848 | * Notify the memory listeners about the coalesced IO change events of |
849 | * range `cmr'. Only the part that has intersection of the specified |
850 | * FlatRange will be sent. |
851 | */ |
852 | static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as, |
853 | CoalescedMemoryRange *cmr, bool add) |
854 | { |
855 | AddrRange tmp; |
856 | |
857 | tmp = addrrange_shift(cmr->addr, |
858 | int128_sub(fr->addr.start, |
859 | int128_make64(fr->offset_in_region))); |
860 | if (!addrrange_intersects(tmp, fr->addr)) { |
861 | return; |
862 | } |
863 | tmp = addrrange_intersection(tmp, fr->addr); |
864 | |
865 | if (add) { |
866 | MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add, |
867 | int128_get64(tmp.start), |
868 | int128_get64(tmp.size)); |
869 | } else { |
870 | MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del, |
871 | int128_get64(tmp.start), |
872 | int128_get64(tmp.size)); |
873 | } |
874 | } |
875 | |
876 | static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as) |
877 | { |
878 | CoalescedMemoryRange *cmr; |
879 | |
880 | QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) { |
881 | flat_range_coalesced_io_notify(fr, as, cmr, false); |
882 | } |
883 | } |
884 | |
885 | static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as) |
886 | { |
887 | MemoryRegion *mr = fr->mr; |
888 | CoalescedMemoryRange *cmr; |
889 | |
890 | if (QTAILQ_EMPTY(&mr->coalesced)) { |
891 | return; |
892 | } |
893 | |
894 | QTAILQ_FOREACH(cmr, &mr->coalesced, link) { |
895 | flat_range_coalesced_io_notify(fr, as, cmr, true); |
896 | } |
897 | } |
898 | |
899 | static void address_space_update_topology_pass(AddressSpace *as, |
900 | const FlatView *old_view, |
901 | const FlatView *new_view, |
902 | bool adding) |
903 | { |
904 | unsigned iold, inew; |
905 | FlatRange *frold, *frnew; |
906 | |
907 | /* Generate a symmetric difference of the old and new memory maps. |
908 | * Kill ranges in the old map, and instantiate ranges in the new map. |
909 | */ |
910 | iold = inew = 0; |
911 | while (iold < old_view->nr || inew < new_view->nr) { |
912 | if (iold < old_view->nr) { |
913 | frold = &old_view->ranges[iold]; |
914 | } else { |
915 | frold = NULL; |
916 | } |
917 | if (inew < new_view->nr) { |
918 | frnew = &new_view->ranges[inew]; |
919 | } else { |
920 | frnew = NULL; |
921 | } |
922 | |
923 | if (frold |
924 | && (!frnew |
925 | || int128_lt(frold->addr.start, frnew->addr.start) |
926 | || (int128_eq(frold->addr.start, frnew->addr.start) |
927 | && !flatrange_equal(frold, frnew)))) { |
928 | /* In old but not in new, or in both but attributes changed. */ |
929 | |
930 | if (!adding) { |
931 | flat_range_coalesced_io_del(frold, as); |
932 | MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); |
933 | } |
934 | |
935 | ++iold; |
936 | } else if (frold && frnew && flatrange_equal(frold, frnew)) { |
937 | /* In both and unchanged (except logging may have changed) */ |
938 | |
939 | if (adding) { |
940 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); |
941 | if (frnew->dirty_log_mask & ~frold->dirty_log_mask) { |
942 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start, |
943 | frold->dirty_log_mask, |
944 | frnew->dirty_log_mask); |
945 | } |
946 | if (frold->dirty_log_mask & ~frnew->dirty_log_mask) { |
947 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop, |
948 | frold->dirty_log_mask, |
949 | frnew->dirty_log_mask); |
950 | } |
951 | } |
952 | |
953 | ++iold; |
954 | ++inew; |
955 | } else { |
956 | /* In new */ |
957 | |
958 | if (adding) { |
959 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); |
960 | flat_range_coalesced_io_add(frnew, as); |
961 | } |
962 | |
963 | ++inew; |
964 | } |
965 | } |
966 | } |
967 | |
968 | static void flatviews_init(void) |
969 | { |
970 | static FlatView *empty_view; |
971 | |
972 | if (flat_views) { |
973 | return; |
974 | } |
975 | |
976 | flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, |
977 | (GDestroyNotify) flatview_unref); |
978 | if (!empty_view) { |
979 | empty_view = generate_memory_topology(NULL); |
980 | /* We keep it alive forever in the global variable. */ |
981 | flatview_ref(empty_view); |
982 | } else { |
983 | g_hash_table_replace(flat_views, NULL, empty_view); |
984 | flatview_ref(empty_view); |
985 | } |
986 | } |
987 | |
988 | static void flatviews_reset(void) |
989 | { |
990 | AddressSpace *as; |
991 | |
992 | if (flat_views) { |
993 | g_hash_table_unref(flat_views); |
994 | flat_views = NULL; |
995 | } |
996 | flatviews_init(); |
997 | |
998 | /* Render unique FVs */ |
999 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
1000 | MemoryRegion *physmr = memory_region_get_flatview_root(as->root); |
1001 | |
1002 | if (g_hash_table_lookup(flat_views, physmr)) { |
1003 | continue; |
1004 | } |
1005 | |
1006 | generate_memory_topology(physmr); |
1007 | } |
1008 | } |
1009 | |
1010 | static void address_space_set_flatview(AddressSpace *as) |
1011 | { |
1012 | FlatView *old_view = address_space_to_flatview(as); |
1013 | MemoryRegion *physmr = memory_region_get_flatview_root(as->root); |
1014 | FlatView *new_view = g_hash_table_lookup(flat_views, physmr); |
1015 | |
1016 | assert(new_view); |
1017 | |
1018 | if (old_view == new_view) { |
1019 | return; |
1020 | } |
1021 | |
1022 | if (old_view) { |
1023 | flatview_ref(old_view); |
1024 | } |
1025 | |
1026 | flatview_ref(new_view); |
1027 | |
1028 | if (!QTAILQ_EMPTY(&as->listeners)) { |
1029 | FlatView tmpview = { .nr = 0 }, *old_view2 = old_view; |
1030 | |
1031 | if (!old_view2) { |
1032 | old_view2 = &tmpview; |
1033 | } |
1034 | address_space_update_topology_pass(as, old_view2, new_view, false); |
1035 | address_space_update_topology_pass(as, old_view2, new_view, true); |
1036 | } |
1037 | |
1038 | /* Writes are protected by the BQL. */ |
1039 | atomic_rcu_set(&as->current_map, new_view); |
1040 | if (old_view) { |
1041 | flatview_unref(old_view); |
1042 | } |
1043 | |
1044 | /* Note that all the old MemoryRegions are still alive up to this |
1045 | * point. This relieves most MemoryListeners from the need to |
1046 | * ref/unref the MemoryRegions they get---unless they use them |
1047 | * outside the iothread mutex, in which case precise reference |
1048 | * counting is necessary. |
1049 | */ |
1050 | if (old_view) { |
1051 | flatview_unref(old_view); |
1052 | } |
1053 | } |
1054 | |
1055 | static void address_space_update_topology(AddressSpace *as) |
1056 | { |
1057 | MemoryRegion *physmr = memory_region_get_flatview_root(as->root); |
1058 | |
1059 | flatviews_init(); |
1060 | if (!g_hash_table_lookup(flat_views, physmr)) { |
1061 | generate_memory_topology(physmr); |
1062 | } |
1063 | address_space_set_flatview(as); |
1064 | } |
1065 | |
1066 | void memory_region_transaction_begin(void) |
1067 | { |
1068 | qemu_flush_coalesced_mmio_buffer(); |
1069 | ++memory_region_transaction_depth; |
1070 | } |
1071 | |
1072 | void memory_region_transaction_commit(void) |
1073 | { |
1074 | AddressSpace *as; |
1075 | |
1076 | assert(memory_region_transaction_depth); |
1077 | assert(qemu_mutex_iothread_locked()); |
1078 | |
1079 | --memory_region_transaction_depth; |
1080 | if (!memory_region_transaction_depth) { |
1081 | if (memory_region_update_pending) { |
1082 | flatviews_reset(); |
1083 | |
1084 | MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); |
1085 | |
1086 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
1087 | address_space_set_flatview(as); |
1088 | address_space_update_ioeventfds(as); |
1089 | } |
1090 | memory_region_update_pending = false; |
1091 | ioeventfd_update_pending = false; |
1092 | MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); |
1093 | } else if (ioeventfd_update_pending) { |
1094 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
1095 | address_space_update_ioeventfds(as); |
1096 | } |
1097 | ioeventfd_update_pending = false; |
1098 | } |
1099 | } |
1100 | } |
1101 | |
1102 | static void memory_region_destructor_none(MemoryRegion *mr) |
1103 | { |
1104 | } |
1105 | |
1106 | static void memory_region_destructor_ram(MemoryRegion *mr) |
1107 | { |
1108 | qemu_ram_free(mr->ram_block); |
1109 | } |
1110 | |
1111 | static bool memory_region_need_escape(char c) |
1112 | { |
1113 | return c == '/' || c == '[' || c == '\\' || c == ']'; |
1114 | } |
1115 | |
1116 | static char *memory_region_escape_name(const char *name) |
1117 | { |
1118 | const char *p; |
1119 | char *escaped, *q; |
1120 | uint8_t c; |
1121 | size_t bytes = 0; |
1122 | |
1123 | for (p = name; *p; p++) { |
1124 | bytes += memory_region_need_escape(*p) ? 4 : 1; |
1125 | } |
1126 | if (bytes == p - name) { |
1127 | return g_memdup(name, bytes + 1); |
1128 | } |
1129 | |
1130 | escaped = g_malloc(bytes + 1); |
1131 | for (p = name, q = escaped; *p; p++) { |
1132 | c = *p; |
1133 | if (unlikely(memory_region_need_escape(c))) { |
1134 | *q++ = '\\'; |
1135 | *q++ = 'x'; |
1136 | *q++ = "0123456789abcdef" [c >> 4]; |
1137 | c = "0123456789abcdef" [c & 15]; |
1138 | } |
1139 | *q++ = c; |
1140 | } |
1141 | *q = 0; |
1142 | return escaped; |
1143 | } |
1144 | |
1145 | static void memory_region_do_init(MemoryRegion *mr, |
1146 | Object *owner, |
1147 | const char *name, |
1148 | uint64_t size) |
1149 | { |
1150 | mr->size = int128_make64(size); |
1151 | if (size == UINT64_MAX) { |
1152 | mr->size = int128_2_64(); |
1153 | } |
1154 | mr->name = g_strdup(name); |
1155 | mr->owner = owner; |
1156 | mr->ram_block = NULL; |
1157 | |
1158 | if (name) { |
1159 | char *escaped_name = memory_region_escape_name(name); |
1160 | char *name_array = g_strdup_printf("%s[*]" , escaped_name); |
1161 | |
1162 | if (!owner) { |
1163 | owner = container_get(qdev_get_machine(), "/unattached" ); |
1164 | } |
1165 | |
1166 | object_property_add_child(owner, name_array, OBJECT(mr), &error_abort); |
1167 | object_unref(OBJECT(mr)); |
1168 | g_free(name_array); |
1169 | g_free(escaped_name); |
1170 | } |
1171 | } |
1172 | |
1173 | void memory_region_init(MemoryRegion *mr, |
1174 | Object *owner, |
1175 | const char *name, |
1176 | uint64_t size) |
1177 | { |
1178 | object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION); |
1179 | memory_region_do_init(mr, owner, name, size); |
1180 | } |
1181 | |
1182 | static void memory_region_get_addr(Object *obj, Visitor *v, const char *name, |
1183 | void *opaque, Error **errp) |
1184 | { |
1185 | MemoryRegion *mr = MEMORY_REGION(obj); |
1186 | uint64_t value = mr->addr; |
1187 | |
1188 | visit_type_uint64(v, name, &value, errp); |
1189 | } |
1190 | |
1191 | static void memory_region_get_container(Object *obj, Visitor *v, |
1192 | const char *name, void *opaque, |
1193 | Error **errp) |
1194 | { |
1195 | MemoryRegion *mr = MEMORY_REGION(obj); |
1196 | gchar *path = (gchar *)"" ; |
1197 | |
1198 | if (mr->container) { |
1199 | path = object_get_canonical_path(OBJECT(mr->container)); |
1200 | } |
1201 | visit_type_str(v, name, &path, errp); |
1202 | if (mr->container) { |
1203 | g_free(path); |
1204 | } |
1205 | } |
1206 | |
1207 | static Object *memory_region_resolve_container(Object *obj, void *opaque, |
1208 | const char *part) |
1209 | { |
1210 | MemoryRegion *mr = MEMORY_REGION(obj); |
1211 | |
1212 | return OBJECT(mr->container); |
1213 | } |
1214 | |
1215 | static void memory_region_get_priority(Object *obj, Visitor *v, |
1216 | const char *name, void *opaque, |
1217 | Error **errp) |
1218 | { |
1219 | MemoryRegion *mr = MEMORY_REGION(obj); |
1220 | int32_t value = mr->priority; |
1221 | |
1222 | visit_type_int32(v, name, &value, errp); |
1223 | } |
1224 | |
1225 | static void memory_region_get_size(Object *obj, Visitor *v, const char *name, |
1226 | void *opaque, Error **errp) |
1227 | { |
1228 | MemoryRegion *mr = MEMORY_REGION(obj); |
1229 | uint64_t value = memory_region_size(mr); |
1230 | |
1231 | visit_type_uint64(v, name, &value, errp); |
1232 | } |
1233 | |
1234 | static void memory_region_initfn(Object *obj) |
1235 | { |
1236 | MemoryRegion *mr = MEMORY_REGION(obj); |
1237 | ObjectProperty *op; |
1238 | |
1239 | mr->ops = &unassigned_mem_ops; |
1240 | mr->enabled = true; |
1241 | mr->romd_mode = true; |
1242 | mr->global_locking = true; |
1243 | mr->destructor = memory_region_destructor_none; |
1244 | QTAILQ_INIT(&mr->subregions); |
1245 | QTAILQ_INIT(&mr->coalesced); |
1246 | |
1247 | op = object_property_add(OBJECT(mr), "container" , |
1248 | "link<" TYPE_MEMORY_REGION ">" , |
1249 | memory_region_get_container, |
1250 | NULL, /* memory_region_set_container */ |
1251 | NULL, NULL, &error_abort); |
1252 | op->resolve = memory_region_resolve_container; |
1253 | |
1254 | object_property_add(OBJECT(mr), "addr" , "uint64" , |
1255 | memory_region_get_addr, |
1256 | NULL, /* memory_region_set_addr */ |
1257 | NULL, NULL, &error_abort); |
1258 | object_property_add(OBJECT(mr), "priority" , "uint32" , |
1259 | memory_region_get_priority, |
1260 | NULL, /* memory_region_set_priority */ |
1261 | NULL, NULL, &error_abort); |
1262 | object_property_add(OBJECT(mr), "size" , "uint64" , |
1263 | memory_region_get_size, |
1264 | NULL, /* memory_region_set_size, */ |
1265 | NULL, NULL, &error_abort); |
1266 | } |
1267 | |
1268 | static void iommu_memory_region_initfn(Object *obj) |
1269 | { |
1270 | MemoryRegion *mr = MEMORY_REGION(obj); |
1271 | |
1272 | mr->is_iommu = true; |
1273 | } |
1274 | |
1275 | static uint64_t unassigned_mem_read(void *opaque, hwaddr addr, |
1276 | unsigned size) |
1277 | { |
1278 | #ifdef DEBUG_UNASSIGNED |
1279 | printf("Unassigned mem read " TARGET_FMT_plx "\n" , addr); |
1280 | #endif |
1281 | if (current_cpu != NULL) { |
1282 | bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH; |
1283 | cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size); |
1284 | } |
1285 | return 0; |
1286 | } |
1287 | |
1288 | static void unassigned_mem_write(void *opaque, hwaddr addr, |
1289 | uint64_t val, unsigned size) |
1290 | { |
1291 | #ifdef DEBUG_UNASSIGNED |
1292 | printf("Unassigned mem write " TARGET_FMT_plx " = 0x%" PRIx64"\n" , addr, val); |
1293 | #endif |
1294 | if (current_cpu != NULL) { |
1295 | cpu_unassigned_access(current_cpu, addr, true, false, 0, size); |
1296 | } |
1297 | } |
1298 | |
1299 | static bool unassigned_mem_accepts(void *opaque, hwaddr addr, |
1300 | unsigned size, bool is_write, |
1301 | MemTxAttrs attrs) |
1302 | { |
1303 | return false; |
1304 | } |
1305 | |
1306 | const MemoryRegionOps unassigned_mem_ops = { |
1307 | .valid.accepts = unassigned_mem_accepts, |
1308 | .endianness = DEVICE_NATIVE_ENDIAN, |
1309 | }; |
1310 | |
1311 | static uint64_t memory_region_ram_device_read(void *opaque, |
1312 | hwaddr addr, unsigned size) |
1313 | { |
1314 | MemoryRegion *mr = opaque; |
1315 | uint64_t data = (uint64_t)~0; |
1316 | |
1317 | switch (size) { |
1318 | case 1: |
1319 | data = *(uint8_t *)(mr->ram_block->host + addr); |
1320 | break; |
1321 | case 2: |
1322 | data = *(uint16_t *)(mr->ram_block->host + addr); |
1323 | break; |
1324 | case 4: |
1325 | data = *(uint32_t *)(mr->ram_block->host + addr); |
1326 | break; |
1327 | case 8: |
1328 | data = *(uint64_t *)(mr->ram_block->host + addr); |
1329 | break; |
1330 | } |
1331 | |
1332 | trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size); |
1333 | |
1334 | return data; |
1335 | } |
1336 | |
1337 | static void memory_region_ram_device_write(void *opaque, hwaddr addr, |
1338 | uint64_t data, unsigned size) |
1339 | { |
1340 | MemoryRegion *mr = opaque; |
1341 | |
1342 | trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size); |
1343 | |
1344 | switch (size) { |
1345 | case 1: |
1346 | *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data; |
1347 | break; |
1348 | case 2: |
1349 | *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data; |
1350 | break; |
1351 | case 4: |
1352 | *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data; |
1353 | break; |
1354 | case 8: |
1355 | *(uint64_t *)(mr->ram_block->host + addr) = data; |
1356 | break; |
1357 | } |
1358 | } |
1359 | |
1360 | static const MemoryRegionOps ram_device_mem_ops = { |
1361 | .read = memory_region_ram_device_read, |
1362 | .write = memory_region_ram_device_write, |
1363 | .endianness = DEVICE_HOST_ENDIAN, |
1364 | .valid = { |
1365 | .min_access_size = 1, |
1366 | .max_access_size = 8, |
1367 | .unaligned = true, |
1368 | }, |
1369 | .impl = { |
1370 | .min_access_size = 1, |
1371 | .max_access_size = 8, |
1372 | .unaligned = true, |
1373 | }, |
1374 | }; |
1375 | |
1376 | bool memory_region_access_valid(MemoryRegion *mr, |
1377 | hwaddr addr, |
1378 | unsigned size, |
1379 | bool is_write, |
1380 | MemTxAttrs attrs) |
1381 | { |
1382 | int access_size_min, access_size_max; |
1383 | int access_size, i; |
1384 | |
1385 | if (!mr->ops->valid.unaligned && (addr & (size - 1))) { |
1386 | return false; |
1387 | } |
1388 | |
1389 | if (!mr->ops->valid.accepts) { |
1390 | return true; |
1391 | } |
1392 | |
1393 | access_size_min = mr->ops->valid.min_access_size; |
1394 | if (!mr->ops->valid.min_access_size) { |
1395 | access_size_min = 1; |
1396 | } |
1397 | |
1398 | access_size_max = mr->ops->valid.max_access_size; |
1399 | if (!mr->ops->valid.max_access_size) { |
1400 | access_size_max = 4; |
1401 | } |
1402 | |
1403 | access_size = MAX(MIN(size, access_size_max), access_size_min); |
1404 | for (i = 0; i < size; i += access_size) { |
1405 | if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size, |
1406 | is_write, attrs)) { |
1407 | return false; |
1408 | } |
1409 | } |
1410 | |
1411 | return true; |
1412 | } |
1413 | |
1414 | static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, |
1415 | hwaddr addr, |
1416 | uint64_t *pval, |
1417 | unsigned size, |
1418 | MemTxAttrs attrs) |
1419 | { |
1420 | *pval = 0; |
1421 | |
1422 | if (mr->ops->read) { |
1423 | return access_with_adjusted_size(addr, pval, size, |
1424 | mr->ops->impl.min_access_size, |
1425 | mr->ops->impl.max_access_size, |
1426 | memory_region_read_accessor, |
1427 | mr, attrs); |
1428 | } else { |
1429 | return access_with_adjusted_size(addr, pval, size, |
1430 | mr->ops->impl.min_access_size, |
1431 | mr->ops->impl.max_access_size, |
1432 | memory_region_read_with_attrs_accessor, |
1433 | mr, attrs); |
1434 | } |
1435 | } |
1436 | |
1437 | MemTxResult memory_region_dispatch_read(MemoryRegion *mr, |
1438 | hwaddr addr, |
1439 | uint64_t *pval, |
1440 | MemOp op, |
1441 | MemTxAttrs attrs) |
1442 | { |
1443 | unsigned size = memop_size(op); |
1444 | MemTxResult r; |
1445 | |
1446 | if (!memory_region_access_valid(mr, addr, size, false, attrs)) { |
1447 | *pval = unassigned_mem_read(mr, addr, size); |
1448 | return MEMTX_DECODE_ERROR; |
1449 | } |
1450 | |
1451 | r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); |
1452 | adjust_endianness(mr, pval, op); |
1453 | return r; |
1454 | } |
1455 | |
1456 | /* Return true if an eventfd was signalled */ |
1457 | static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr, |
1458 | hwaddr addr, |
1459 | uint64_t data, |
1460 | unsigned size, |
1461 | MemTxAttrs attrs) |
1462 | { |
1463 | MemoryRegionIoeventfd ioeventfd = { |
1464 | .addr = addrrange_make(int128_make64(addr), int128_make64(size)), |
1465 | .data = data, |
1466 | }; |
1467 | unsigned i; |
1468 | |
1469 | for (i = 0; i < mr->ioeventfd_nb; i++) { |
1470 | ioeventfd.match_data = mr->ioeventfds[i].match_data; |
1471 | ioeventfd.e = mr->ioeventfds[i].e; |
1472 | |
1473 | if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) { |
1474 | event_notifier_set(ioeventfd.e); |
1475 | return true; |
1476 | } |
1477 | } |
1478 | |
1479 | return false; |
1480 | } |
1481 | |
1482 | MemTxResult memory_region_dispatch_write(MemoryRegion *mr, |
1483 | hwaddr addr, |
1484 | uint64_t data, |
1485 | MemOp op, |
1486 | MemTxAttrs attrs) |
1487 | { |
1488 | unsigned size = memop_size(op); |
1489 | |
1490 | if (!memory_region_access_valid(mr, addr, size, true, attrs)) { |
1491 | unassigned_mem_write(mr, addr, data, size); |
1492 | return MEMTX_DECODE_ERROR; |
1493 | } |
1494 | |
1495 | adjust_endianness(mr, &data, op); |
1496 | |
1497 | if ((!kvm_eventfds_enabled()) && |
1498 | memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { |
1499 | return MEMTX_OK; |
1500 | } |
1501 | |
1502 | if (mr->ops->write) { |
1503 | return access_with_adjusted_size(addr, &data, size, |
1504 | mr->ops->impl.min_access_size, |
1505 | mr->ops->impl.max_access_size, |
1506 | memory_region_write_accessor, mr, |
1507 | attrs); |
1508 | } else { |
1509 | return |
1510 | access_with_adjusted_size(addr, &data, size, |
1511 | mr->ops->impl.min_access_size, |
1512 | mr->ops->impl.max_access_size, |
1513 | memory_region_write_with_attrs_accessor, |
1514 | mr, attrs); |
1515 | } |
1516 | } |
1517 | |
1518 | void memory_region_init_io(MemoryRegion *mr, |
1519 | Object *owner, |
1520 | const MemoryRegionOps *ops, |
1521 | void *opaque, |
1522 | const char *name, |
1523 | uint64_t size) |
1524 | { |
1525 | memory_region_init(mr, owner, name, size); |
1526 | mr->ops = ops ? ops : &unassigned_mem_ops; |
1527 | mr->opaque = opaque; |
1528 | mr->terminates = true; |
1529 | } |
1530 | |
1531 | void memory_region_init_ram_nomigrate(MemoryRegion *mr, |
1532 | Object *owner, |
1533 | const char *name, |
1534 | uint64_t size, |
1535 | Error **errp) |
1536 | { |
1537 | memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp); |
1538 | } |
1539 | |
1540 | void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr, |
1541 | Object *owner, |
1542 | const char *name, |
1543 | uint64_t size, |
1544 | bool share, |
1545 | Error **errp) |
1546 | { |
1547 | Error *err = NULL; |
1548 | memory_region_init(mr, owner, name, size); |
1549 | mr->ram = true; |
1550 | mr->terminates = true; |
1551 | mr->destructor = memory_region_destructor_ram; |
1552 | mr->ram_block = qemu_ram_alloc(size, share, mr, &err); |
1553 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; |
1554 | if (err) { |
1555 | mr->size = int128_zero(); |
1556 | object_unparent(OBJECT(mr)); |
1557 | error_propagate(errp, err); |
1558 | } |
1559 | } |
1560 | |
1561 | void memory_region_init_resizeable_ram(MemoryRegion *mr, |
1562 | Object *owner, |
1563 | const char *name, |
1564 | uint64_t size, |
1565 | uint64_t max_size, |
1566 | void (*resized)(const char*, |
1567 | uint64_t length, |
1568 | void *host), |
1569 | Error **errp) |
1570 | { |
1571 | Error *err = NULL; |
1572 | memory_region_init(mr, owner, name, size); |
1573 | mr->ram = true; |
1574 | mr->terminates = true; |
1575 | mr->destructor = memory_region_destructor_ram; |
1576 | mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized, |
1577 | mr, &err); |
1578 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; |
1579 | if (err) { |
1580 | mr->size = int128_zero(); |
1581 | object_unparent(OBJECT(mr)); |
1582 | error_propagate(errp, err); |
1583 | } |
1584 | } |
1585 | |
1586 | #ifdef CONFIG_POSIX |
1587 | void memory_region_init_ram_from_file(MemoryRegion *mr, |
1588 | struct Object *owner, |
1589 | const char *name, |
1590 | uint64_t size, |
1591 | uint64_t align, |
1592 | uint32_t ram_flags, |
1593 | const char *path, |
1594 | Error **errp) |
1595 | { |
1596 | Error *err = NULL; |
1597 | memory_region_init(mr, owner, name, size); |
1598 | mr->ram = true; |
1599 | mr->terminates = true; |
1600 | mr->destructor = memory_region_destructor_ram; |
1601 | mr->align = align; |
1602 | mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err); |
1603 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; |
1604 | if (err) { |
1605 | mr->size = int128_zero(); |
1606 | object_unparent(OBJECT(mr)); |
1607 | error_propagate(errp, err); |
1608 | } |
1609 | } |
1610 | |
1611 | void memory_region_init_ram_from_fd(MemoryRegion *mr, |
1612 | struct Object *owner, |
1613 | const char *name, |
1614 | uint64_t size, |
1615 | bool share, |
1616 | int fd, |
1617 | Error **errp) |
1618 | { |
1619 | Error *err = NULL; |
1620 | memory_region_init(mr, owner, name, size); |
1621 | mr->ram = true; |
1622 | mr->terminates = true; |
1623 | mr->destructor = memory_region_destructor_ram; |
1624 | mr->ram_block = qemu_ram_alloc_from_fd(size, mr, |
1625 | share ? RAM_SHARED : 0, |
1626 | fd, &err); |
1627 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; |
1628 | if (err) { |
1629 | mr->size = int128_zero(); |
1630 | object_unparent(OBJECT(mr)); |
1631 | error_propagate(errp, err); |
1632 | } |
1633 | } |
1634 | #endif |
1635 | |
1636 | void memory_region_init_ram_ptr(MemoryRegion *mr, |
1637 | Object *owner, |
1638 | const char *name, |
1639 | uint64_t size, |
1640 | void *ptr) |
1641 | { |
1642 | memory_region_init(mr, owner, name, size); |
1643 | mr->ram = true; |
1644 | mr->terminates = true; |
1645 | mr->destructor = memory_region_destructor_ram; |
1646 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; |
1647 | |
1648 | /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ |
1649 | assert(ptr != NULL); |
1650 | mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); |
1651 | } |
1652 | |
1653 | void memory_region_init_ram_device_ptr(MemoryRegion *mr, |
1654 | Object *owner, |
1655 | const char *name, |
1656 | uint64_t size, |
1657 | void *ptr) |
1658 | { |
1659 | memory_region_init(mr, owner, name, size); |
1660 | mr->ram = true; |
1661 | mr->terminates = true; |
1662 | mr->ram_device = true; |
1663 | mr->ops = &ram_device_mem_ops; |
1664 | mr->opaque = mr; |
1665 | mr->destructor = memory_region_destructor_ram; |
1666 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; |
1667 | /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ |
1668 | assert(ptr != NULL); |
1669 | mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); |
1670 | } |
1671 | |
1672 | void memory_region_init_alias(MemoryRegion *mr, |
1673 | Object *owner, |
1674 | const char *name, |
1675 | MemoryRegion *orig, |
1676 | hwaddr offset, |
1677 | uint64_t size) |
1678 | { |
1679 | memory_region_init(mr, owner, name, size); |
1680 | mr->alias = orig; |
1681 | mr->alias_offset = offset; |
1682 | } |
1683 | |
1684 | void memory_region_init_rom_nomigrate(MemoryRegion *mr, |
1685 | struct Object *owner, |
1686 | const char *name, |
1687 | uint64_t size, |
1688 | Error **errp) |
1689 | { |
1690 | Error *err = NULL; |
1691 | memory_region_init(mr, owner, name, size); |
1692 | mr->ram = true; |
1693 | mr->readonly = true; |
1694 | mr->terminates = true; |
1695 | mr->destructor = memory_region_destructor_ram; |
1696 | mr->ram_block = qemu_ram_alloc(size, false, mr, &err); |
1697 | mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; |
1698 | if (err) { |
1699 | mr->size = int128_zero(); |
1700 | object_unparent(OBJECT(mr)); |
1701 | error_propagate(errp, err); |
1702 | } |
1703 | } |
1704 | |
1705 | void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, |
1706 | Object *owner, |
1707 | const MemoryRegionOps *ops, |
1708 | void *opaque, |
1709 | const char *name, |
1710 | uint64_t size, |
1711 | Error **errp) |
1712 | { |
1713 | Error *err = NULL; |
1714 | assert(ops); |
1715 | memory_region_init(mr, owner, name, size); |
1716 | mr->ops = ops; |
1717 | mr->opaque = opaque; |
1718 | mr->terminates = true; |
1719 | mr->rom_device = true; |
1720 | mr->destructor = memory_region_destructor_ram; |
1721 | mr->ram_block = qemu_ram_alloc(size, false, mr, &err); |
1722 | if (err) { |
1723 | mr->size = int128_zero(); |
1724 | object_unparent(OBJECT(mr)); |
1725 | error_propagate(errp, err); |
1726 | } |
1727 | } |
1728 | |
1729 | void memory_region_init_iommu(void *_iommu_mr, |
1730 | size_t instance_size, |
1731 | const char *mrtypename, |
1732 | Object *owner, |
1733 | const char *name, |
1734 | uint64_t size) |
1735 | { |
1736 | struct IOMMUMemoryRegion *iommu_mr; |
1737 | struct MemoryRegion *mr; |
1738 | |
1739 | object_initialize(_iommu_mr, instance_size, mrtypename); |
1740 | mr = MEMORY_REGION(_iommu_mr); |
1741 | memory_region_do_init(mr, owner, name, size); |
1742 | iommu_mr = IOMMU_MEMORY_REGION(mr); |
1743 | mr->terminates = true; /* then re-forwards */ |
1744 | QLIST_INIT(&iommu_mr->iommu_notify); |
1745 | iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE; |
1746 | } |
1747 | |
1748 | static void memory_region_finalize(Object *obj) |
1749 | { |
1750 | MemoryRegion *mr = MEMORY_REGION(obj); |
1751 | |
1752 | assert(!mr->container); |
1753 | |
1754 | /* We know the region is not visible in any address space (it |
1755 | * does not have a container and cannot be a root either because |
1756 | * it has no references, so we can blindly clear mr->enabled. |
1757 | * memory_region_set_enabled instead could trigger a transaction |
1758 | * and cause an infinite loop. |
1759 | */ |
1760 | mr->enabled = false; |
1761 | memory_region_transaction_begin(); |
1762 | while (!QTAILQ_EMPTY(&mr->subregions)) { |
1763 | MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions); |
1764 | memory_region_del_subregion(mr, subregion); |
1765 | } |
1766 | memory_region_transaction_commit(); |
1767 | |
1768 | mr->destructor(mr); |
1769 | memory_region_clear_coalescing(mr); |
1770 | g_free((char *)mr->name); |
1771 | g_free(mr->ioeventfds); |
1772 | } |
1773 | |
1774 | Object *memory_region_owner(MemoryRegion *mr) |
1775 | { |
1776 | Object *obj = OBJECT(mr); |
1777 | return obj->parent; |
1778 | } |
1779 | |
1780 | void memory_region_ref(MemoryRegion *mr) |
1781 | { |
1782 | /* MMIO callbacks most likely will access data that belongs |
1783 | * to the owner, hence the need to ref/unref the owner whenever |
1784 | * the memory region is in use. |
1785 | * |
1786 | * The memory region is a child of its owner. As long as the |
1787 | * owner doesn't call unparent itself on the memory region, |
1788 | * ref-ing the owner will also keep the memory region alive. |
1789 | * Memory regions without an owner are supposed to never go away; |
1790 | * we do not ref/unref them because it slows down DMA sensibly. |
1791 | */ |
1792 | if (mr && mr->owner) { |
1793 | object_ref(mr->owner); |
1794 | } |
1795 | } |
1796 | |
1797 | void memory_region_unref(MemoryRegion *mr) |
1798 | { |
1799 | if (mr && mr->owner) { |
1800 | object_unref(mr->owner); |
1801 | } |
1802 | } |
1803 | |
1804 | uint64_t memory_region_size(MemoryRegion *mr) |
1805 | { |
1806 | if (int128_eq(mr->size, int128_2_64())) { |
1807 | return UINT64_MAX; |
1808 | } |
1809 | return int128_get64(mr->size); |
1810 | } |
1811 | |
1812 | const char *memory_region_name(const MemoryRegion *mr) |
1813 | { |
1814 | if (!mr->name) { |
1815 | ((MemoryRegion *)mr)->name = |
1816 | object_get_canonical_path_component(OBJECT(mr)); |
1817 | } |
1818 | return mr->name; |
1819 | } |
1820 | |
1821 | bool memory_region_is_ram_device(MemoryRegion *mr) |
1822 | { |
1823 | return mr->ram_device; |
1824 | } |
1825 | |
1826 | uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr) |
1827 | { |
1828 | uint8_t mask = mr->dirty_log_mask; |
1829 | if (global_dirty_log && mr->ram_block) { |
1830 | mask |= (1 << DIRTY_MEMORY_MIGRATION); |
1831 | } |
1832 | return mask; |
1833 | } |
1834 | |
1835 | bool memory_region_is_logging(MemoryRegion *mr, uint8_t client) |
1836 | { |
1837 | return memory_region_get_dirty_log_mask(mr) & (1 << client); |
1838 | } |
1839 | |
1840 | static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr) |
1841 | { |
1842 | IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE; |
1843 | IOMMUNotifier *iommu_notifier; |
1844 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); |
1845 | |
1846 | IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { |
1847 | flags |= iommu_notifier->notifier_flags; |
1848 | } |
1849 | |
1850 | if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) { |
1851 | imrc->notify_flag_changed(iommu_mr, |
1852 | iommu_mr->iommu_notify_flags, |
1853 | flags); |
1854 | } |
1855 | |
1856 | iommu_mr->iommu_notify_flags = flags; |
1857 | } |
1858 | |
1859 | void memory_region_register_iommu_notifier(MemoryRegion *mr, |
1860 | IOMMUNotifier *n) |
1861 | { |
1862 | IOMMUMemoryRegion *iommu_mr; |
1863 | |
1864 | if (mr->alias) { |
1865 | memory_region_register_iommu_notifier(mr->alias, n); |
1866 | return; |
1867 | } |
1868 | |
1869 | /* We need to register for at least one bitfield */ |
1870 | iommu_mr = IOMMU_MEMORY_REGION(mr); |
1871 | assert(n->notifier_flags != IOMMU_NOTIFIER_NONE); |
1872 | assert(n->start <= n->end); |
1873 | assert(n->iommu_idx >= 0 && |
1874 | n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr)); |
1875 | |
1876 | QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node); |
1877 | memory_region_update_iommu_notify_flags(iommu_mr); |
1878 | } |
1879 | |
1880 | uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr) |
1881 | { |
1882 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); |
1883 | |
1884 | if (imrc->get_min_page_size) { |
1885 | return imrc->get_min_page_size(iommu_mr); |
1886 | } |
1887 | return TARGET_PAGE_SIZE; |
1888 | } |
1889 | |
1890 | void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) |
1891 | { |
1892 | MemoryRegion *mr = MEMORY_REGION(iommu_mr); |
1893 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); |
1894 | hwaddr addr, granularity; |
1895 | IOMMUTLBEntry iotlb; |
1896 | |
1897 | /* If the IOMMU has its own replay callback, override */ |
1898 | if (imrc->replay) { |
1899 | imrc->replay(iommu_mr, n); |
1900 | return; |
1901 | } |
1902 | |
1903 | granularity = memory_region_iommu_get_min_page_size(iommu_mr); |
1904 | |
1905 | for (addr = 0; addr < memory_region_size(mr); addr += granularity) { |
1906 | iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx); |
1907 | if (iotlb.perm != IOMMU_NONE) { |
1908 | n->notify(n, &iotlb); |
1909 | } |
1910 | |
1911 | /* if (2^64 - MR size) < granularity, it's possible to get an |
1912 | * infinite loop here. This should catch such a wraparound */ |
1913 | if ((addr + granularity) < addr) { |
1914 | break; |
1915 | } |
1916 | } |
1917 | } |
1918 | |
1919 | void memory_region_unregister_iommu_notifier(MemoryRegion *mr, |
1920 | IOMMUNotifier *n) |
1921 | { |
1922 | IOMMUMemoryRegion *iommu_mr; |
1923 | |
1924 | if (mr->alias) { |
1925 | memory_region_unregister_iommu_notifier(mr->alias, n); |
1926 | return; |
1927 | } |
1928 | QLIST_REMOVE(n, node); |
1929 | iommu_mr = IOMMU_MEMORY_REGION(mr); |
1930 | memory_region_update_iommu_notify_flags(iommu_mr); |
1931 | } |
1932 | |
1933 | void memory_region_notify_one(IOMMUNotifier *notifier, |
1934 | IOMMUTLBEntry *entry) |
1935 | { |
1936 | IOMMUNotifierFlag request_flags; |
1937 | hwaddr entry_end = entry->iova + entry->addr_mask; |
1938 | |
1939 | /* |
1940 | * Skip the notification if the notification does not overlap |
1941 | * with registered range. |
1942 | */ |
1943 | if (notifier->start > entry_end || notifier->end < entry->iova) { |
1944 | return; |
1945 | } |
1946 | |
1947 | assert(entry->iova >= notifier->start && entry_end <= notifier->end); |
1948 | |
1949 | if (entry->perm & IOMMU_RW) { |
1950 | request_flags = IOMMU_NOTIFIER_MAP; |
1951 | } else { |
1952 | request_flags = IOMMU_NOTIFIER_UNMAP; |
1953 | } |
1954 | |
1955 | if (notifier->notifier_flags & request_flags) { |
1956 | notifier->notify(notifier, entry); |
1957 | } |
1958 | } |
1959 | |
1960 | void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, |
1961 | int iommu_idx, |
1962 | IOMMUTLBEntry entry) |
1963 | { |
1964 | IOMMUNotifier *iommu_notifier; |
1965 | |
1966 | assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr))); |
1967 | |
1968 | IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { |
1969 | if (iommu_notifier->iommu_idx == iommu_idx) { |
1970 | memory_region_notify_one(iommu_notifier, &entry); |
1971 | } |
1972 | } |
1973 | } |
1974 | |
1975 | int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, |
1976 | enum IOMMUMemoryRegionAttr attr, |
1977 | void *data) |
1978 | { |
1979 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); |
1980 | |
1981 | if (!imrc->get_attr) { |
1982 | return -EINVAL; |
1983 | } |
1984 | |
1985 | return imrc->get_attr(iommu_mr, attr, data); |
1986 | } |
1987 | |
1988 | int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr, |
1989 | MemTxAttrs attrs) |
1990 | { |
1991 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); |
1992 | |
1993 | if (!imrc->attrs_to_index) { |
1994 | return 0; |
1995 | } |
1996 | |
1997 | return imrc->attrs_to_index(iommu_mr, attrs); |
1998 | } |
1999 | |
2000 | int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr) |
2001 | { |
2002 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); |
2003 | |
2004 | if (!imrc->num_indexes) { |
2005 | return 1; |
2006 | } |
2007 | |
2008 | return imrc->num_indexes(iommu_mr); |
2009 | } |
2010 | |
2011 | void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) |
2012 | { |
2013 | uint8_t mask = 1 << client; |
2014 | uint8_t old_logging; |
2015 | |
2016 | assert(client == DIRTY_MEMORY_VGA); |
2017 | old_logging = mr->vga_logging_count; |
2018 | mr->vga_logging_count += log ? 1 : -1; |
2019 | if (!!old_logging == !!mr->vga_logging_count) { |
2020 | return; |
2021 | } |
2022 | |
2023 | memory_region_transaction_begin(); |
2024 | mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); |
2025 | memory_region_update_pending |= mr->enabled; |
2026 | memory_region_transaction_commit(); |
2027 | } |
2028 | |
2029 | void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, |
2030 | hwaddr size) |
2031 | { |
2032 | assert(mr->ram_block); |
2033 | cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, |
2034 | size, |
2035 | memory_region_get_dirty_log_mask(mr)); |
2036 | } |
2037 | |
2038 | static void memory_region_sync_dirty_bitmap(MemoryRegion *mr) |
2039 | { |
2040 | MemoryListener *listener; |
2041 | AddressSpace *as; |
2042 | FlatView *view; |
2043 | FlatRange *fr; |
2044 | |
2045 | /* If the same address space has multiple log_sync listeners, we |
2046 | * visit that address space's FlatView multiple times. But because |
2047 | * log_sync listeners are rare, it's still cheaper than walking each |
2048 | * address space once. |
2049 | */ |
2050 | QTAILQ_FOREACH(listener, &memory_listeners, link) { |
2051 | if (!listener->log_sync) { |
2052 | continue; |
2053 | } |
2054 | as = listener->address_space; |
2055 | view = address_space_get_flatview(as); |
2056 | FOR_EACH_FLAT_RANGE(fr, view) { |
2057 | if (fr->dirty_log_mask && (!mr || fr->mr == mr)) { |
2058 | MemoryRegionSection mrs = section_from_flat_range(fr, view); |
2059 | listener->log_sync(listener, &mrs); |
2060 | } |
2061 | } |
2062 | flatview_unref(view); |
2063 | } |
2064 | } |
2065 | |
2066 | void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start, |
2067 | hwaddr len) |
2068 | { |
2069 | MemoryRegionSection mrs; |
2070 | MemoryListener *listener; |
2071 | AddressSpace *as; |
2072 | FlatView *view; |
2073 | FlatRange *fr; |
2074 | hwaddr sec_start, sec_end, sec_size; |
2075 | |
2076 | QTAILQ_FOREACH(listener, &memory_listeners, link) { |
2077 | if (!listener->log_clear) { |
2078 | continue; |
2079 | } |
2080 | as = listener->address_space; |
2081 | view = address_space_get_flatview(as); |
2082 | FOR_EACH_FLAT_RANGE(fr, view) { |
2083 | if (!fr->dirty_log_mask || fr->mr != mr) { |
2084 | /* |
2085 | * Clear dirty bitmap operation only applies to those |
2086 | * regions whose dirty logging is at least enabled |
2087 | */ |
2088 | continue; |
2089 | } |
2090 | |
2091 | mrs = section_from_flat_range(fr, view); |
2092 | |
2093 | sec_start = MAX(mrs.offset_within_region, start); |
2094 | sec_end = mrs.offset_within_region + int128_get64(mrs.size); |
2095 | sec_end = MIN(sec_end, start + len); |
2096 | |
2097 | if (sec_start >= sec_end) { |
2098 | /* |
2099 | * If this memory region section has no intersection |
2100 | * with the requested range, skip. |
2101 | */ |
2102 | continue; |
2103 | } |
2104 | |
2105 | /* Valid case; shrink the section if needed */ |
2106 | mrs.offset_within_address_space += |
2107 | sec_start - mrs.offset_within_region; |
2108 | mrs.offset_within_region = sec_start; |
2109 | sec_size = sec_end - sec_start; |
2110 | mrs.size = int128_make64(sec_size); |
2111 | listener->log_clear(listener, &mrs); |
2112 | } |
2113 | flatview_unref(view); |
2114 | } |
2115 | } |
2116 | |
2117 | DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, |
2118 | hwaddr addr, |
2119 | hwaddr size, |
2120 | unsigned client) |
2121 | { |
2122 | DirtyBitmapSnapshot *snapshot; |
2123 | assert(mr->ram_block); |
2124 | memory_region_sync_dirty_bitmap(mr); |
2125 | snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client); |
2126 | memory_global_after_dirty_log_sync(); |
2127 | return snapshot; |
2128 | } |
2129 | |
2130 | bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap, |
2131 | hwaddr addr, hwaddr size) |
2132 | { |
2133 | assert(mr->ram_block); |
2134 | return cpu_physical_memory_snapshot_get_dirty(snap, |
2135 | memory_region_get_ram_addr(mr) + addr, size); |
2136 | } |
2137 | |
2138 | void memory_region_set_readonly(MemoryRegion *mr, bool readonly) |
2139 | { |
2140 | if (mr->readonly != readonly) { |
2141 | memory_region_transaction_begin(); |
2142 | mr->readonly = readonly; |
2143 | memory_region_update_pending |= mr->enabled; |
2144 | memory_region_transaction_commit(); |
2145 | } |
2146 | } |
2147 | |
2148 | void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile) |
2149 | { |
2150 | if (mr->nonvolatile != nonvolatile) { |
2151 | memory_region_transaction_begin(); |
2152 | mr->nonvolatile = nonvolatile; |
2153 | memory_region_update_pending |= mr->enabled; |
2154 | memory_region_transaction_commit(); |
2155 | } |
2156 | } |
2157 | |
2158 | void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode) |
2159 | { |
2160 | if (mr->romd_mode != romd_mode) { |
2161 | memory_region_transaction_begin(); |
2162 | mr->romd_mode = romd_mode; |
2163 | memory_region_update_pending |= mr->enabled; |
2164 | memory_region_transaction_commit(); |
2165 | } |
2166 | } |
2167 | |
2168 | void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, |
2169 | hwaddr size, unsigned client) |
2170 | { |
2171 | assert(mr->ram_block); |
2172 | cpu_physical_memory_test_and_clear_dirty( |
2173 | memory_region_get_ram_addr(mr) + addr, size, client); |
2174 | } |
2175 | |
2176 | int memory_region_get_fd(MemoryRegion *mr) |
2177 | { |
2178 | int fd; |
2179 | |
2180 | rcu_read_lock(); |
2181 | while (mr->alias) { |
2182 | mr = mr->alias; |
2183 | } |
2184 | fd = mr->ram_block->fd; |
2185 | rcu_read_unlock(); |
2186 | |
2187 | return fd; |
2188 | } |
2189 | |
2190 | void *memory_region_get_ram_ptr(MemoryRegion *mr) |
2191 | { |
2192 | void *ptr; |
2193 | uint64_t offset = 0; |
2194 | |
2195 | rcu_read_lock(); |
2196 | while (mr->alias) { |
2197 | offset += mr->alias_offset; |
2198 | mr = mr->alias; |
2199 | } |
2200 | assert(mr->ram_block); |
2201 | ptr = qemu_map_ram_ptr(mr->ram_block, offset); |
2202 | rcu_read_unlock(); |
2203 | |
2204 | return ptr; |
2205 | } |
2206 | |
2207 | MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset) |
2208 | { |
2209 | RAMBlock *block; |
2210 | |
2211 | block = qemu_ram_block_from_host(ptr, false, offset); |
2212 | if (!block) { |
2213 | return NULL; |
2214 | } |
2215 | |
2216 | return block->mr; |
2217 | } |
2218 | |
2219 | ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) |
2220 | { |
2221 | return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID; |
2222 | } |
2223 | |
2224 | void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp) |
2225 | { |
2226 | assert(mr->ram_block); |
2227 | |
2228 | qemu_ram_resize(mr->ram_block, newsize, errp); |
2229 | } |
2230 | |
2231 | /* |
2232 | * Call proper memory listeners about the change on the newly |
2233 | * added/removed CoalescedMemoryRange. |
2234 | */ |
2235 | static void memory_region_update_coalesced_range(MemoryRegion *mr, |
2236 | CoalescedMemoryRange *cmr, |
2237 | bool add) |
2238 | { |
2239 | AddressSpace *as; |
2240 | FlatView *view; |
2241 | FlatRange *fr; |
2242 | |
2243 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
2244 | view = address_space_get_flatview(as); |
2245 | FOR_EACH_FLAT_RANGE(fr, view) { |
2246 | if (fr->mr == mr) { |
2247 | flat_range_coalesced_io_notify(fr, as, cmr, add); |
2248 | } |
2249 | } |
2250 | flatview_unref(view); |
2251 | } |
2252 | } |
2253 | |
2254 | void memory_region_set_coalescing(MemoryRegion *mr) |
2255 | { |
2256 | memory_region_clear_coalescing(mr); |
2257 | memory_region_add_coalescing(mr, 0, int128_get64(mr->size)); |
2258 | } |
2259 | |
2260 | void memory_region_add_coalescing(MemoryRegion *mr, |
2261 | hwaddr offset, |
2262 | uint64_t size) |
2263 | { |
2264 | CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr)); |
2265 | |
2266 | cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size)); |
2267 | QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); |
2268 | memory_region_update_coalesced_range(mr, cmr, true); |
2269 | memory_region_set_flush_coalesced(mr); |
2270 | } |
2271 | |
2272 | void memory_region_clear_coalescing(MemoryRegion *mr) |
2273 | { |
2274 | CoalescedMemoryRange *cmr; |
2275 | |
2276 | if (QTAILQ_EMPTY(&mr->coalesced)) { |
2277 | return; |
2278 | } |
2279 | |
2280 | qemu_flush_coalesced_mmio_buffer(); |
2281 | mr->flush_coalesced_mmio = false; |
2282 | |
2283 | while (!QTAILQ_EMPTY(&mr->coalesced)) { |
2284 | cmr = QTAILQ_FIRST(&mr->coalesced); |
2285 | QTAILQ_REMOVE(&mr->coalesced, cmr, link); |
2286 | memory_region_update_coalesced_range(mr, cmr, false); |
2287 | g_free(cmr); |
2288 | } |
2289 | } |
2290 | |
2291 | void memory_region_set_flush_coalesced(MemoryRegion *mr) |
2292 | { |
2293 | mr->flush_coalesced_mmio = true; |
2294 | } |
2295 | |
2296 | void memory_region_clear_flush_coalesced(MemoryRegion *mr) |
2297 | { |
2298 | qemu_flush_coalesced_mmio_buffer(); |
2299 | if (QTAILQ_EMPTY(&mr->coalesced)) { |
2300 | mr->flush_coalesced_mmio = false; |
2301 | } |
2302 | } |
2303 | |
2304 | void memory_region_clear_global_locking(MemoryRegion *mr) |
2305 | { |
2306 | mr->global_locking = false; |
2307 | } |
2308 | |
2309 | static bool userspace_eventfd_warning; |
2310 | |
2311 | void memory_region_add_eventfd(MemoryRegion *mr, |
2312 | hwaddr addr, |
2313 | unsigned size, |
2314 | bool match_data, |
2315 | uint64_t data, |
2316 | EventNotifier *e) |
2317 | { |
2318 | MemoryRegionIoeventfd mrfd = { |
2319 | .addr.start = int128_make64(addr), |
2320 | .addr.size = int128_make64(size), |
2321 | .match_data = match_data, |
2322 | .data = data, |
2323 | .e = e, |
2324 | }; |
2325 | unsigned i; |
2326 | |
2327 | if (kvm_enabled() && (!(kvm_eventfds_enabled() || |
2328 | userspace_eventfd_warning))) { |
2329 | userspace_eventfd_warning = true; |
2330 | error_report("Using eventfd without MMIO binding in KVM. " |
2331 | "Suboptimal performance expected" ); |
2332 | } |
2333 | |
2334 | if (size) { |
2335 | adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); |
2336 | } |
2337 | memory_region_transaction_begin(); |
2338 | for (i = 0; i < mr->ioeventfd_nb; ++i) { |
2339 | if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) { |
2340 | break; |
2341 | } |
2342 | } |
2343 | ++mr->ioeventfd_nb; |
2344 | mr->ioeventfds = g_realloc(mr->ioeventfds, |
2345 | sizeof(*mr->ioeventfds) * mr->ioeventfd_nb); |
2346 | memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i], |
2347 | sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); |
2348 | mr->ioeventfds[i] = mrfd; |
2349 | ioeventfd_update_pending |= mr->enabled; |
2350 | memory_region_transaction_commit(); |
2351 | } |
2352 | |
2353 | void memory_region_del_eventfd(MemoryRegion *mr, |
2354 | hwaddr addr, |
2355 | unsigned size, |
2356 | bool match_data, |
2357 | uint64_t data, |
2358 | EventNotifier *e) |
2359 | { |
2360 | MemoryRegionIoeventfd mrfd = { |
2361 | .addr.start = int128_make64(addr), |
2362 | .addr.size = int128_make64(size), |
2363 | .match_data = match_data, |
2364 | .data = data, |
2365 | .e = e, |
2366 | }; |
2367 | unsigned i; |
2368 | |
2369 | if (size) { |
2370 | adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); |
2371 | } |
2372 | memory_region_transaction_begin(); |
2373 | for (i = 0; i < mr->ioeventfd_nb; ++i) { |
2374 | if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) { |
2375 | break; |
2376 | } |
2377 | } |
2378 | assert(i != mr->ioeventfd_nb); |
2379 | memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1], |
2380 | sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); |
2381 | --mr->ioeventfd_nb; |
2382 | mr->ioeventfds = g_realloc(mr->ioeventfds, |
2383 | sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); |
2384 | ioeventfd_update_pending |= mr->enabled; |
2385 | memory_region_transaction_commit(); |
2386 | } |
2387 | |
2388 | static void memory_region_update_container_subregions(MemoryRegion *subregion) |
2389 | { |
2390 | MemoryRegion *mr = subregion->container; |
2391 | MemoryRegion *other; |
2392 | |
2393 | memory_region_transaction_begin(); |
2394 | |
2395 | memory_region_ref(subregion); |
2396 | QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
2397 | if (subregion->priority >= other->priority) { |
2398 | QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); |
2399 | goto done; |
2400 | } |
2401 | } |
2402 | QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); |
2403 | done: |
2404 | memory_region_update_pending |= mr->enabled && subregion->enabled; |
2405 | memory_region_transaction_commit(); |
2406 | } |
2407 | |
2408 | static void memory_region_add_subregion_common(MemoryRegion *mr, |
2409 | hwaddr offset, |
2410 | MemoryRegion *subregion) |
2411 | { |
2412 | assert(!subregion->container); |
2413 | subregion->container = mr; |
2414 | subregion->addr = offset; |
2415 | memory_region_update_container_subregions(subregion); |
2416 | } |
2417 | |
2418 | void memory_region_add_subregion(MemoryRegion *mr, |
2419 | hwaddr offset, |
2420 | MemoryRegion *subregion) |
2421 | { |
2422 | subregion->priority = 0; |
2423 | memory_region_add_subregion_common(mr, offset, subregion); |
2424 | } |
2425 | |
2426 | void memory_region_add_subregion_overlap(MemoryRegion *mr, |
2427 | hwaddr offset, |
2428 | MemoryRegion *subregion, |
2429 | int priority) |
2430 | { |
2431 | subregion->priority = priority; |
2432 | memory_region_add_subregion_common(mr, offset, subregion); |
2433 | } |
2434 | |
2435 | void memory_region_del_subregion(MemoryRegion *mr, |
2436 | MemoryRegion *subregion) |
2437 | { |
2438 | memory_region_transaction_begin(); |
2439 | assert(subregion->container == mr); |
2440 | subregion->container = NULL; |
2441 | QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); |
2442 | memory_region_unref(subregion); |
2443 | memory_region_update_pending |= mr->enabled && subregion->enabled; |
2444 | memory_region_transaction_commit(); |
2445 | } |
2446 | |
2447 | void memory_region_set_enabled(MemoryRegion *mr, bool enabled) |
2448 | { |
2449 | if (enabled == mr->enabled) { |
2450 | return; |
2451 | } |
2452 | memory_region_transaction_begin(); |
2453 | mr->enabled = enabled; |
2454 | memory_region_update_pending = true; |
2455 | memory_region_transaction_commit(); |
2456 | } |
2457 | |
2458 | void memory_region_set_size(MemoryRegion *mr, uint64_t size) |
2459 | { |
2460 | Int128 s = int128_make64(size); |
2461 | |
2462 | if (size == UINT64_MAX) { |
2463 | s = int128_2_64(); |
2464 | } |
2465 | if (int128_eq(s, mr->size)) { |
2466 | return; |
2467 | } |
2468 | memory_region_transaction_begin(); |
2469 | mr->size = s; |
2470 | memory_region_update_pending = true; |
2471 | memory_region_transaction_commit(); |
2472 | } |
2473 | |
2474 | static void memory_region_readd_subregion(MemoryRegion *mr) |
2475 | { |
2476 | MemoryRegion *container = mr->container; |
2477 | |
2478 | if (container) { |
2479 | memory_region_transaction_begin(); |
2480 | memory_region_ref(mr); |
2481 | memory_region_del_subregion(container, mr); |
2482 | mr->container = container; |
2483 | memory_region_update_container_subregions(mr); |
2484 | memory_region_unref(mr); |
2485 | memory_region_transaction_commit(); |
2486 | } |
2487 | } |
2488 | |
2489 | void memory_region_set_address(MemoryRegion *mr, hwaddr addr) |
2490 | { |
2491 | if (addr != mr->addr) { |
2492 | mr->addr = addr; |
2493 | memory_region_readd_subregion(mr); |
2494 | } |
2495 | } |
2496 | |
2497 | void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset) |
2498 | { |
2499 | assert(mr->alias); |
2500 | |
2501 | if (offset == mr->alias_offset) { |
2502 | return; |
2503 | } |
2504 | |
2505 | memory_region_transaction_begin(); |
2506 | mr->alias_offset = offset; |
2507 | memory_region_update_pending |= mr->enabled; |
2508 | memory_region_transaction_commit(); |
2509 | } |
2510 | |
2511 | uint64_t memory_region_get_alignment(const MemoryRegion *mr) |
2512 | { |
2513 | return mr->align; |
2514 | } |
2515 | |
2516 | static int cmp_flatrange_addr(const void *addr_, const void *fr_) |
2517 | { |
2518 | const AddrRange *addr = addr_; |
2519 | const FlatRange *fr = fr_; |
2520 | |
2521 | if (int128_le(addrrange_end(*addr), fr->addr.start)) { |
2522 | return -1; |
2523 | } else if (int128_ge(addr->start, addrrange_end(fr->addr))) { |
2524 | return 1; |
2525 | } |
2526 | return 0; |
2527 | } |
2528 | |
2529 | static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) |
2530 | { |
2531 | return bsearch(&addr, view->ranges, view->nr, |
2532 | sizeof(FlatRange), cmp_flatrange_addr); |
2533 | } |
2534 | |
2535 | bool memory_region_is_mapped(MemoryRegion *mr) |
2536 | { |
2537 | return mr->container ? true : false; |
2538 | } |
2539 | |
2540 | /* Same as memory_region_find, but it does not add a reference to the |
2541 | * returned region. It must be called from an RCU critical section. |
2542 | */ |
2543 | static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr, |
2544 | hwaddr addr, uint64_t size) |
2545 | { |
2546 | MemoryRegionSection ret = { .mr = NULL }; |
2547 | MemoryRegion *root; |
2548 | AddressSpace *as; |
2549 | AddrRange range; |
2550 | FlatView *view; |
2551 | FlatRange *fr; |
2552 | |
2553 | addr += mr->addr; |
2554 | for (root = mr; root->container; ) { |
2555 | root = root->container; |
2556 | addr += root->addr; |
2557 | } |
2558 | |
2559 | as = memory_region_to_address_space(root); |
2560 | if (!as) { |
2561 | return ret; |
2562 | } |
2563 | range = addrrange_make(int128_make64(addr), int128_make64(size)); |
2564 | |
2565 | view = address_space_to_flatview(as); |
2566 | fr = flatview_lookup(view, range); |
2567 | if (!fr) { |
2568 | return ret; |
2569 | } |
2570 | |
2571 | while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { |
2572 | --fr; |
2573 | } |
2574 | |
2575 | ret.mr = fr->mr; |
2576 | ret.fv = view; |
2577 | range = addrrange_intersection(range, fr->addr); |
2578 | ret.offset_within_region = fr->offset_in_region; |
2579 | ret.offset_within_region += int128_get64(int128_sub(range.start, |
2580 | fr->addr.start)); |
2581 | ret.size = range.size; |
2582 | ret.offset_within_address_space = int128_get64(range.start); |
2583 | ret.readonly = fr->readonly; |
2584 | ret.nonvolatile = fr->nonvolatile; |
2585 | return ret; |
2586 | } |
2587 | |
2588 | MemoryRegionSection memory_region_find(MemoryRegion *mr, |
2589 | hwaddr addr, uint64_t size) |
2590 | { |
2591 | MemoryRegionSection ret; |
2592 | rcu_read_lock(); |
2593 | ret = memory_region_find_rcu(mr, addr, size); |
2594 | if (ret.mr) { |
2595 | memory_region_ref(ret.mr); |
2596 | } |
2597 | rcu_read_unlock(); |
2598 | return ret; |
2599 | } |
2600 | |
2601 | bool memory_region_present(MemoryRegion *container, hwaddr addr) |
2602 | { |
2603 | MemoryRegion *mr; |
2604 | |
2605 | rcu_read_lock(); |
2606 | mr = memory_region_find_rcu(container, addr, 1).mr; |
2607 | rcu_read_unlock(); |
2608 | return mr && mr != container; |
2609 | } |
2610 | |
2611 | void memory_global_dirty_log_sync(void) |
2612 | { |
2613 | memory_region_sync_dirty_bitmap(NULL); |
2614 | } |
2615 | |
2616 | void memory_global_after_dirty_log_sync(void) |
2617 | { |
2618 | MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward); |
2619 | } |
2620 | |
2621 | static VMChangeStateEntry *vmstate_change; |
2622 | |
2623 | void memory_global_dirty_log_start(void) |
2624 | { |
2625 | if (vmstate_change) { |
2626 | qemu_del_vm_change_state_handler(vmstate_change); |
2627 | vmstate_change = NULL; |
2628 | } |
2629 | |
2630 | global_dirty_log = true; |
2631 | |
2632 | MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); |
2633 | |
2634 | /* Refresh DIRTY_MEMORY_MIGRATION bit. */ |
2635 | memory_region_transaction_begin(); |
2636 | memory_region_update_pending = true; |
2637 | memory_region_transaction_commit(); |
2638 | } |
2639 | |
2640 | static void memory_global_dirty_log_do_stop(void) |
2641 | { |
2642 | global_dirty_log = false; |
2643 | |
2644 | /* Refresh DIRTY_MEMORY_MIGRATION bit. */ |
2645 | memory_region_transaction_begin(); |
2646 | memory_region_update_pending = true; |
2647 | memory_region_transaction_commit(); |
2648 | |
2649 | MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); |
2650 | } |
2651 | |
2652 | static void memory_vm_change_state_handler(void *opaque, int running, |
2653 | RunState state) |
2654 | { |
2655 | if (running) { |
2656 | memory_global_dirty_log_do_stop(); |
2657 | |
2658 | if (vmstate_change) { |
2659 | qemu_del_vm_change_state_handler(vmstate_change); |
2660 | vmstate_change = NULL; |
2661 | } |
2662 | } |
2663 | } |
2664 | |
2665 | void memory_global_dirty_log_stop(void) |
2666 | { |
2667 | if (!runstate_is_running()) { |
2668 | if (vmstate_change) { |
2669 | return; |
2670 | } |
2671 | vmstate_change = qemu_add_vm_change_state_handler( |
2672 | memory_vm_change_state_handler, NULL); |
2673 | return; |
2674 | } |
2675 | |
2676 | memory_global_dirty_log_do_stop(); |
2677 | } |
2678 | |
2679 | static void listener_add_address_space(MemoryListener *listener, |
2680 | AddressSpace *as) |
2681 | { |
2682 | FlatView *view; |
2683 | FlatRange *fr; |
2684 | |
2685 | if (listener->begin) { |
2686 | listener->begin(listener); |
2687 | } |
2688 | if (global_dirty_log) { |
2689 | if (listener->log_global_start) { |
2690 | listener->log_global_start(listener); |
2691 | } |
2692 | } |
2693 | |
2694 | view = address_space_get_flatview(as); |
2695 | FOR_EACH_FLAT_RANGE(fr, view) { |
2696 | MemoryRegionSection section = section_from_flat_range(fr, view); |
2697 | |
2698 | if (listener->region_add) { |
2699 | listener->region_add(listener, §ion); |
2700 | } |
2701 | if (fr->dirty_log_mask && listener->log_start) { |
2702 | listener->log_start(listener, §ion, 0, fr->dirty_log_mask); |
2703 | } |
2704 | } |
2705 | if (listener->commit) { |
2706 | listener->commit(listener); |
2707 | } |
2708 | flatview_unref(view); |
2709 | } |
2710 | |
2711 | static void listener_del_address_space(MemoryListener *listener, |
2712 | AddressSpace *as) |
2713 | { |
2714 | FlatView *view; |
2715 | FlatRange *fr; |
2716 | |
2717 | if (listener->begin) { |
2718 | listener->begin(listener); |
2719 | } |
2720 | view = address_space_get_flatview(as); |
2721 | FOR_EACH_FLAT_RANGE(fr, view) { |
2722 | MemoryRegionSection section = section_from_flat_range(fr, view); |
2723 | |
2724 | if (fr->dirty_log_mask && listener->log_stop) { |
2725 | listener->log_stop(listener, §ion, fr->dirty_log_mask, 0); |
2726 | } |
2727 | if (listener->region_del) { |
2728 | listener->region_del(listener, §ion); |
2729 | } |
2730 | } |
2731 | if (listener->commit) { |
2732 | listener->commit(listener); |
2733 | } |
2734 | flatview_unref(view); |
2735 | } |
2736 | |
2737 | void memory_listener_register(MemoryListener *listener, AddressSpace *as) |
2738 | { |
2739 | MemoryListener *other = NULL; |
2740 | |
2741 | listener->address_space = as; |
2742 | if (QTAILQ_EMPTY(&memory_listeners) |
2743 | || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) { |
2744 | QTAILQ_INSERT_TAIL(&memory_listeners, listener, link); |
2745 | } else { |
2746 | QTAILQ_FOREACH(other, &memory_listeners, link) { |
2747 | if (listener->priority < other->priority) { |
2748 | break; |
2749 | } |
2750 | } |
2751 | QTAILQ_INSERT_BEFORE(other, listener, link); |
2752 | } |
2753 | |
2754 | if (QTAILQ_EMPTY(&as->listeners) |
2755 | || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) { |
2756 | QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as); |
2757 | } else { |
2758 | QTAILQ_FOREACH(other, &as->listeners, link_as) { |
2759 | if (listener->priority < other->priority) { |
2760 | break; |
2761 | } |
2762 | } |
2763 | QTAILQ_INSERT_BEFORE(other, listener, link_as); |
2764 | } |
2765 | |
2766 | listener_add_address_space(listener, as); |
2767 | } |
2768 | |
2769 | void memory_listener_unregister(MemoryListener *listener) |
2770 | { |
2771 | if (!listener->address_space) { |
2772 | return; |
2773 | } |
2774 | |
2775 | listener_del_address_space(listener, listener->address_space); |
2776 | QTAILQ_REMOVE(&memory_listeners, listener, link); |
2777 | QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as); |
2778 | listener->address_space = NULL; |
2779 | } |
2780 | |
2781 | void address_space_remove_listeners(AddressSpace *as) |
2782 | { |
2783 | while (!QTAILQ_EMPTY(&as->listeners)) { |
2784 | memory_listener_unregister(QTAILQ_FIRST(&as->listeners)); |
2785 | } |
2786 | } |
2787 | |
2788 | void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) |
2789 | { |
2790 | memory_region_ref(root); |
2791 | as->root = root; |
2792 | as->current_map = NULL; |
2793 | as->ioeventfd_nb = 0; |
2794 | as->ioeventfds = NULL; |
2795 | QTAILQ_INIT(&as->listeners); |
2796 | QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); |
2797 | as->name = g_strdup(name ? name : "anonymous" ); |
2798 | address_space_update_topology(as); |
2799 | address_space_update_ioeventfds(as); |
2800 | } |
2801 | |
2802 | static void do_address_space_destroy(AddressSpace *as) |
2803 | { |
2804 | assert(QTAILQ_EMPTY(&as->listeners)); |
2805 | |
2806 | flatview_unref(as->current_map); |
2807 | g_free(as->name); |
2808 | g_free(as->ioeventfds); |
2809 | memory_region_unref(as->root); |
2810 | } |
2811 | |
2812 | void address_space_destroy(AddressSpace *as) |
2813 | { |
2814 | MemoryRegion *root = as->root; |
2815 | |
2816 | /* Flush out anything from MemoryListeners listening in on this */ |
2817 | memory_region_transaction_begin(); |
2818 | as->root = NULL; |
2819 | memory_region_transaction_commit(); |
2820 | QTAILQ_REMOVE(&address_spaces, as, address_spaces_link); |
2821 | |
2822 | /* At this point, as->dispatch and as->current_map are dummy |
2823 | * entries that the guest should never use. Wait for the old |
2824 | * values to expire before freeing the data. |
2825 | */ |
2826 | as->root = root; |
2827 | call_rcu(as, do_address_space_destroy, rcu); |
2828 | } |
2829 | |
2830 | static const char *memory_region_type(MemoryRegion *mr) |
2831 | { |
2832 | if (memory_region_is_ram_device(mr)) { |
2833 | return "ramd" ; |
2834 | } else if (memory_region_is_romd(mr)) { |
2835 | return "romd" ; |
2836 | } else if (memory_region_is_rom(mr)) { |
2837 | return "rom" ; |
2838 | } else if (memory_region_is_ram(mr)) { |
2839 | return "ram" ; |
2840 | } else { |
2841 | return "i/o" ; |
2842 | } |
2843 | } |
2844 | |
2845 | typedef struct MemoryRegionList MemoryRegionList; |
2846 | |
2847 | struct MemoryRegionList { |
2848 | const MemoryRegion *mr; |
2849 | QTAILQ_ENTRY(MemoryRegionList) mrqueue; |
2850 | }; |
2851 | |
2852 | typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead; |
2853 | |
2854 | #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ |
2855 | int128_sub((size), int128_one())) : 0) |
2856 | #define MTREE_INDENT " " |
2857 | |
2858 | static void mtree_expand_owner(const char *label, Object *obj) |
2859 | { |
2860 | DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE); |
2861 | |
2862 | qemu_printf(" %s:{%s" , label, dev ? "dev" : "obj" ); |
2863 | if (dev && dev->id) { |
2864 | qemu_printf(" id=%s" , dev->id); |
2865 | } else { |
2866 | gchar *canonical_path = object_get_canonical_path(obj); |
2867 | if (canonical_path) { |
2868 | qemu_printf(" path=%s" , canonical_path); |
2869 | g_free(canonical_path); |
2870 | } else { |
2871 | qemu_printf(" type=%s" , object_get_typename(obj)); |
2872 | } |
2873 | } |
2874 | qemu_printf("}" ); |
2875 | } |
2876 | |
2877 | static void mtree_print_mr_owner(const MemoryRegion *mr) |
2878 | { |
2879 | Object *owner = mr->owner; |
2880 | Object *parent = memory_region_owner((MemoryRegion *)mr); |
2881 | |
2882 | if (!owner && !parent) { |
2883 | qemu_printf(" orphan" ); |
2884 | return; |
2885 | } |
2886 | if (owner) { |
2887 | mtree_expand_owner("owner" , owner); |
2888 | } |
2889 | if (parent && parent != owner) { |
2890 | mtree_expand_owner("parent" , parent); |
2891 | } |
2892 | } |
2893 | |
2894 | static void mtree_print_mr(const MemoryRegion *mr, unsigned int level, |
2895 | hwaddr base, |
2896 | MemoryRegionListHead *alias_print_queue, |
2897 | bool owner) |
2898 | { |
2899 | MemoryRegionList *new_ml, *ml, *next_ml; |
2900 | MemoryRegionListHead submr_print_queue; |
2901 | const MemoryRegion *submr; |
2902 | unsigned int i; |
2903 | hwaddr cur_start, cur_end; |
2904 | |
2905 | if (!mr) { |
2906 | return; |
2907 | } |
2908 | |
2909 | for (i = 0; i < level; i++) { |
2910 | qemu_printf(MTREE_INDENT); |
2911 | } |
2912 | |
2913 | cur_start = base + mr->addr; |
2914 | cur_end = cur_start + MR_SIZE(mr->size); |
2915 | |
2916 | /* |
2917 | * Try to detect overflow of memory region. This should never |
2918 | * happen normally. When it happens, we dump something to warn the |
2919 | * user who is observing this. |
2920 | */ |
2921 | if (cur_start < base || cur_end < cur_start) { |
2922 | qemu_printf("[DETECTED OVERFLOW!] " ); |
2923 | } |
2924 | |
2925 | if (mr->alias) { |
2926 | MemoryRegionList *ml; |
2927 | bool found = false; |
2928 | |
2929 | /* check if the alias is already in the queue */ |
2930 | QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) { |
2931 | if (ml->mr == mr->alias) { |
2932 | found = true; |
2933 | } |
2934 | } |
2935 | |
2936 | if (!found) { |
2937 | ml = g_new(MemoryRegionList, 1); |
2938 | ml->mr = mr->alias; |
2939 | QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue); |
2940 | } |
2941 | qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx |
2942 | " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx |
2943 | "-" TARGET_FMT_plx "%s" , |
2944 | cur_start, cur_end, |
2945 | mr->priority, |
2946 | mr->nonvolatile ? "nv-" : "" , |
2947 | memory_region_type((MemoryRegion *)mr), |
2948 | memory_region_name(mr), |
2949 | memory_region_name(mr->alias), |
2950 | mr->alias_offset, |
2951 | mr->alias_offset + MR_SIZE(mr->size), |
2952 | mr->enabled ? "" : " [disabled]" ); |
2953 | if (owner) { |
2954 | mtree_print_mr_owner(mr); |
2955 | } |
2956 | } else { |
2957 | qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx |
2958 | " (prio %d, %s%s): %s%s" , |
2959 | cur_start, cur_end, |
2960 | mr->priority, |
2961 | mr->nonvolatile ? "nv-" : "" , |
2962 | memory_region_type((MemoryRegion *)mr), |
2963 | memory_region_name(mr), |
2964 | mr->enabled ? "" : " [disabled]" ); |
2965 | if (owner) { |
2966 | mtree_print_mr_owner(mr); |
2967 | } |
2968 | } |
2969 | qemu_printf("\n" ); |
2970 | |
2971 | QTAILQ_INIT(&submr_print_queue); |
2972 | |
2973 | QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { |
2974 | new_ml = g_new(MemoryRegionList, 1); |
2975 | new_ml->mr = submr; |
2976 | QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { |
2977 | if (new_ml->mr->addr < ml->mr->addr || |
2978 | (new_ml->mr->addr == ml->mr->addr && |
2979 | new_ml->mr->priority > ml->mr->priority)) { |
2980 | QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue); |
2981 | new_ml = NULL; |
2982 | break; |
2983 | } |
2984 | } |
2985 | if (new_ml) { |
2986 | QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue); |
2987 | } |
2988 | } |
2989 | |
2990 | QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { |
2991 | mtree_print_mr(ml->mr, level + 1, cur_start, |
2992 | alias_print_queue, owner); |
2993 | } |
2994 | |
2995 | QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) { |
2996 | g_free(ml); |
2997 | } |
2998 | } |
2999 | |
3000 | struct FlatViewInfo { |
3001 | int counter; |
3002 | bool dispatch_tree; |
3003 | bool owner; |
3004 | AccelClass *ac; |
3005 | const char *ac_name; |
3006 | }; |
3007 | |
3008 | static void mtree_print_flatview(gpointer key, gpointer value, |
3009 | gpointer user_data) |
3010 | { |
3011 | FlatView *view = key; |
3012 | GArray *fv_address_spaces = value; |
3013 | struct FlatViewInfo *fvi = user_data; |
3014 | FlatRange *range = &view->ranges[0]; |
3015 | MemoryRegion *mr; |
3016 | int n = view->nr; |
3017 | int i; |
3018 | AddressSpace *as; |
3019 | |
3020 | qemu_printf("FlatView #%d\n" , fvi->counter); |
3021 | ++fvi->counter; |
3022 | |
3023 | for (i = 0; i < fv_address_spaces->len; ++i) { |
3024 | as = g_array_index(fv_address_spaces, AddressSpace*, i); |
3025 | qemu_printf(" AS \"%s\", root: %s" , |
3026 | as->name, memory_region_name(as->root)); |
3027 | if (as->root->alias) { |
3028 | qemu_printf(", alias %s" , memory_region_name(as->root->alias)); |
3029 | } |
3030 | qemu_printf("\n" ); |
3031 | } |
3032 | |
3033 | qemu_printf(" Root memory region: %s\n" , |
3034 | view->root ? memory_region_name(view->root) : "(none)" ); |
3035 | |
3036 | if (n <= 0) { |
3037 | qemu_printf(MTREE_INDENT "No rendered FlatView\n\n" ); |
3038 | return; |
3039 | } |
3040 | |
3041 | while (n--) { |
3042 | mr = range->mr; |
3043 | if (range->offset_in_region) { |
3044 | qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx |
3045 | " (prio %d, %s%s): %s @" TARGET_FMT_plx, |
3046 | int128_get64(range->addr.start), |
3047 | int128_get64(range->addr.start) |
3048 | + MR_SIZE(range->addr.size), |
3049 | mr->priority, |
3050 | range->nonvolatile ? "nv-" : "" , |
3051 | range->readonly ? "rom" : memory_region_type(mr), |
3052 | memory_region_name(mr), |
3053 | range->offset_in_region); |
3054 | } else { |
3055 | qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx |
3056 | " (prio %d, %s%s): %s" , |
3057 | int128_get64(range->addr.start), |
3058 | int128_get64(range->addr.start) |
3059 | + MR_SIZE(range->addr.size), |
3060 | mr->priority, |
3061 | range->nonvolatile ? "nv-" : "" , |
3062 | range->readonly ? "rom" : memory_region_type(mr), |
3063 | memory_region_name(mr)); |
3064 | } |
3065 | if (fvi->owner) { |
3066 | mtree_print_mr_owner(mr); |
3067 | } |
3068 | |
3069 | if (fvi->ac) { |
3070 | for (i = 0; i < fv_address_spaces->len; ++i) { |
3071 | as = g_array_index(fv_address_spaces, AddressSpace*, i); |
3072 | if (fvi->ac->has_memory(current_machine, as, |
3073 | int128_get64(range->addr.start), |
3074 | MR_SIZE(range->addr.size) + 1)) { |
3075 | qemu_printf(" %s" , fvi->ac_name); |
3076 | } |
3077 | } |
3078 | } |
3079 | qemu_printf("\n" ); |
3080 | range++; |
3081 | } |
3082 | |
3083 | #if !defined(CONFIG_USER_ONLY) |
3084 | if (fvi->dispatch_tree && view->root) { |
3085 | mtree_print_dispatch(view->dispatch, view->root); |
3086 | } |
3087 | #endif |
3088 | |
3089 | qemu_printf("\n" ); |
3090 | } |
3091 | |
3092 | static gboolean mtree_info_flatview_free(gpointer key, gpointer value, |
3093 | gpointer user_data) |
3094 | { |
3095 | FlatView *view = key; |
3096 | GArray *fv_address_spaces = value; |
3097 | |
3098 | g_array_unref(fv_address_spaces); |
3099 | flatview_unref(view); |
3100 | |
3101 | return true; |
3102 | } |
3103 | |
3104 | void mtree_info(bool flatview, bool dispatch_tree, bool owner) |
3105 | { |
3106 | MemoryRegionListHead ml_head; |
3107 | MemoryRegionList *ml, *ml2; |
3108 | AddressSpace *as; |
3109 | |
3110 | if (flatview) { |
3111 | FlatView *view; |
3112 | struct FlatViewInfo fvi = { |
3113 | .counter = 0, |
3114 | .dispatch_tree = dispatch_tree, |
3115 | .owner = owner, |
3116 | }; |
3117 | GArray *fv_address_spaces; |
3118 | GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal); |
3119 | AccelClass *ac = ACCEL_GET_CLASS(current_machine->accelerator); |
3120 | |
3121 | if (ac->has_memory) { |
3122 | fvi.ac = ac; |
3123 | fvi.ac_name = current_machine->accel ? current_machine->accel : |
3124 | object_class_get_name(OBJECT_CLASS(ac)); |
3125 | } |
3126 | |
3127 | /* Gather all FVs in one table */ |
3128 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
3129 | view = address_space_get_flatview(as); |
3130 | |
3131 | fv_address_spaces = g_hash_table_lookup(views, view); |
3132 | if (!fv_address_spaces) { |
3133 | fv_address_spaces = g_array_new(false, false, sizeof(as)); |
3134 | g_hash_table_insert(views, view, fv_address_spaces); |
3135 | } |
3136 | |
3137 | g_array_append_val(fv_address_spaces, as); |
3138 | } |
3139 | |
3140 | /* Print */ |
3141 | g_hash_table_foreach(views, mtree_print_flatview, &fvi); |
3142 | |
3143 | /* Free */ |
3144 | g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0); |
3145 | g_hash_table_unref(views); |
3146 | |
3147 | return; |
3148 | } |
3149 | |
3150 | QTAILQ_INIT(&ml_head); |
3151 | |
3152 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { |
3153 | qemu_printf("address-space: %s\n" , as->name); |
3154 | mtree_print_mr(as->root, 1, 0, &ml_head, owner); |
3155 | qemu_printf("\n" ); |
3156 | } |
3157 | |
3158 | /* print aliased regions */ |
3159 | QTAILQ_FOREACH(ml, &ml_head, mrqueue) { |
3160 | qemu_printf("memory-region: %s\n" , memory_region_name(ml->mr)); |
3161 | mtree_print_mr(ml->mr, 1, 0, &ml_head, owner); |
3162 | qemu_printf("\n" ); |
3163 | } |
3164 | |
3165 | QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) { |
3166 | g_free(ml); |
3167 | } |
3168 | } |
3169 | |
3170 | void memory_region_init_ram(MemoryRegion *mr, |
3171 | struct Object *owner, |
3172 | const char *name, |
3173 | uint64_t size, |
3174 | Error **errp) |
3175 | { |
3176 | DeviceState *owner_dev; |
3177 | Error *err = NULL; |
3178 | |
3179 | memory_region_init_ram_nomigrate(mr, owner, name, size, &err); |
3180 | if (err) { |
3181 | error_propagate(errp, err); |
3182 | return; |
3183 | } |
3184 | /* This will assert if owner is neither NULL nor a DeviceState. |
3185 | * We only want the owner here for the purposes of defining a |
3186 | * unique name for migration. TODO: Ideally we should implement |
3187 | * a naming scheme for Objects which are not DeviceStates, in |
3188 | * which case we can relax this restriction. |
3189 | */ |
3190 | owner_dev = DEVICE(owner); |
3191 | vmstate_register_ram(mr, owner_dev); |
3192 | } |
3193 | |
3194 | void memory_region_init_rom(MemoryRegion *mr, |
3195 | struct Object *owner, |
3196 | const char *name, |
3197 | uint64_t size, |
3198 | Error **errp) |
3199 | { |
3200 | DeviceState *owner_dev; |
3201 | Error *err = NULL; |
3202 | |
3203 | memory_region_init_rom_nomigrate(mr, owner, name, size, &err); |
3204 | if (err) { |
3205 | error_propagate(errp, err); |
3206 | return; |
3207 | } |
3208 | /* This will assert if owner is neither NULL nor a DeviceState. |
3209 | * We only want the owner here for the purposes of defining a |
3210 | * unique name for migration. TODO: Ideally we should implement |
3211 | * a naming scheme for Objects which are not DeviceStates, in |
3212 | * which case we can relax this restriction. |
3213 | */ |
3214 | owner_dev = DEVICE(owner); |
3215 | vmstate_register_ram(mr, owner_dev); |
3216 | } |
3217 | |
3218 | void memory_region_init_rom_device(MemoryRegion *mr, |
3219 | struct Object *owner, |
3220 | const MemoryRegionOps *ops, |
3221 | void *opaque, |
3222 | const char *name, |
3223 | uint64_t size, |
3224 | Error **errp) |
3225 | { |
3226 | DeviceState *owner_dev; |
3227 | Error *err = NULL; |
3228 | |
3229 | memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque, |
3230 | name, size, &err); |
3231 | if (err) { |
3232 | error_propagate(errp, err); |
3233 | return; |
3234 | } |
3235 | /* This will assert if owner is neither NULL nor a DeviceState. |
3236 | * We only want the owner here for the purposes of defining a |
3237 | * unique name for migration. TODO: Ideally we should implement |
3238 | * a naming scheme for Objects which are not DeviceStates, in |
3239 | * which case we can relax this restriction. |
3240 | */ |
3241 | owner_dev = DEVICE(owner); |
3242 | vmstate_register_ram(mr, owner_dev); |
3243 | } |
3244 | |
3245 | static const TypeInfo memory_region_info = { |
3246 | .parent = TYPE_OBJECT, |
3247 | .name = TYPE_MEMORY_REGION, |
3248 | .class_size = sizeof(MemoryRegionClass), |
3249 | .instance_size = sizeof(MemoryRegion), |
3250 | .instance_init = memory_region_initfn, |
3251 | .instance_finalize = memory_region_finalize, |
3252 | }; |
3253 | |
3254 | static const TypeInfo iommu_memory_region_info = { |
3255 | .parent = TYPE_MEMORY_REGION, |
3256 | .name = TYPE_IOMMU_MEMORY_REGION, |
3257 | .class_size = sizeof(IOMMUMemoryRegionClass), |
3258 | .instance_size = sizeof(IOMMUMemoryRegion), |
3259 | .instance_init = iommu_memory_region_initfn, |
3260 | .abstract = true, |
3261 | }; |
3262 | |
3263 | static void memory_register_types(void) |
3264 | { |
3265 | type_register_static(&memory_region_info); |
3266 | type_register_static(&iommu_memory_region_info); |
3267 | } |
3268 | |
3269 | type_init(memory_register_types) |
3270 | |
3271 | MemOp devend_memop(enum device_endian end) |
3272 | { |
3273 | static MemOp conv[] = { |
3274 | [DEVICE_LITTLE_ENDIAN] = MO_LE, |
3275 | [DEVICE_BIG_ENDIAN] = MO_BE, |
3276 | [DEVICE_NATIVE_ENDIAN] = MO_TE, |
3277 | [DEVICE_HOST_ENDIAN] = 0, |
3278 | }; |
3279 | switch (end) { |
3280 | case DEVICE_LITTLE_ENDIAN: |
3281 | case DEVICE_BIG_ENDIAN: |
3282 | case DEVICE_NATIVE_ENDIAN: |
3283 | return conv[end]; |
3284 | default: |
3285 | g_assert_not_reached(); |
3286 | } |
3287 | } |
3288 | |