1 | /* |
2 | * internal execution defines for qemu |
3 | * |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
18 | */ |
19 | |
20 | #ifndef EXEC_ALL_H |
21 | #define EXEC_ALL_H |
22 | |
23 | #include "cpu.h" |
24 | #include "exec/tb-context.h" |
25 | #include "sysemu/cpus.h" |
26 | |
27 | /* allow to see translation results - the slowdown should be negligible, so we leave it */ |
28 | #define DEBUG_DISAS |
29 | |
30 | /* Page tracking code uses ram addresses in system mode, and virtual |
31 | addresses in userspace mode. Define tb_page_addr_t to be an appropriate |
32 | type. */ |
33 | #if defined(CONFIG_USER_ONLY) |
34 | typedef abi_ulong tb_page_addr_t; |
35 | #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx |
36 | #else |
37 | typedef ram_addr_t tb_page_addr_t; |
38 | #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT |
39 | #endif |
40 | |
41 | #include "qemu/log.h" |
42 | |
43 | void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns); |
44 | void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb, |
45 | target_ulong *data); |
46 | |
47 | void cpu_gen_init(void); |
48 | |
49 | /** |
50 | * cpu_restore_state: |
51 | * @cpu: the vCPU state is to be restore to |
52 | * @searched_pc: the host PC the fault occurred at |
53 | * @will_exit: true if the TB executed will be interrupted after some |
54 | cpu adjustments. Required for maintaining the correct |
55 | icount valus |
56 | * @return: true if state was restored, false otherwise |
57 | * |
58 | * Attempt to restore the state for a fault occurring in translated |
59 | * code. If the searched_pc is not in translated code no state is |
60 | * restored and the function returns false. |
61 | */ |
62 | bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); |
63 | |
64 | void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); |
65 | void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); |
66 | TranslationBlock *tb_gen_code(CPUState *cpu, |
67 | target_ulong pc, target_ulong cs_base, |
68 | uint32_t flags, |
69 | int cflags); |
70 | |
71 | void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); |
72 | void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); |
73 | void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); |
74 | |
75 | #if !defined(CONFIG_USER_ONLY) |
76 | void cpu_reloading_memory_map(void); |
77 | /** |
78 | * cpu_address_space_init: |
79 | * @cpu: CPU to add this address space to |
80 | * @asidx: integer index of this address space |
81 | * @prefix: prefix to be used as name of address space |
82 | * @mr: the root memory region of address space |
83 | * |
84 | * Add the specified address space to the CPU's cpu_ases list. |
85 | * The address space added with @asidx 0 is the one used for the |
86 | * convenience pointer cpu->as. |
87 | * The target-specific code which registers ASes is responsible |
88 | * for defining what semantics address space 0, 1, 2, etc have. |
89 | * |
90 | * Before the first call to this function, the caller must set |
91 | * cpu->num_ases to the total number of address spaces it needs |
92 | * to support. |
93 | * |
94 | * Note that with KVM only one address space is supported. |
95 | */ |
96 | void cpu_address_space_init(CPUState *cpu, int asidx, |
97 | const char *prefix, MemoryRegion *mr); |
98 | #endif |
99 | |
100 | #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) |
101 | /* cputlb.c */ |
102 | /** |
103 | * tlb_init - initialize a CPU's TLB |
104 | * @cpu: CPU whose TLB should be initialized |
105 | */ |
106 | void tlb_init(CPUState *cpu); |
107 | /** |
108 | * tlb_flush_page: |
109 | * @cpu: CPU whose TLB should be flushed |
110 | * @addr: virtual address of page to be flushed |
111 | * |
112 | * Flush one page from the TLB of the specified CPU, for all |
113 | * MMU indexes. |
114 | */ |
115 | void tlb_flush_page(CPUState *cpu, target_ulong addr); |
116 | /** |
117 | * tlb_flush_page_all_cpus: |
118 | * @cpu: src CPU of the flush |
119 | * @addr: virtual address of page to be flushed |
120 | * |
121 | * Flush one page from the TLB of the specified CPU, for all |
122 | * MMU indexes. |
123 | */ |
124 | void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); |
125 | /** |
126 | * tlb_flush_page_all_cpus_synced: |
127 | * @cpu: src CPU of the flush |
128 | * @addr: virtual address of page to be flushed |
129 | * |
130 | * Flush one page from the TLB of the specified CPU, for all MMU |
131 | * indexes like tlb_flush_page_all_cpus except the source vCPUs work |
132 | * is scheduled as safe work meaning all flushes will be complete once |
133 | * the source vCPUs safe work is complete. This will depend on when |
134 | * the guests translation ends the TB. |
135 | */ |
136 | void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); |
137 | /** |
138 | * tlb_flush: |
139 | * @cpu: CPU whose TLB should be flushed |
140 | * |
141 | * Flush the entire TLB for the specified CPU. Most CPU architectures |
142 | * allow the implementation to drop entries from the TLB at any time |
143 | * so this is generally safe. If more selective flushing is required |
144 | * use one of the other functions for efficiency. |
145 | */ |
146 | void tlb_flush(CPUState *cpu); |
147 | /** |
148 | * tlb_flush_all_cpus: |
149 | * @cpu: src CPU of the flush |
150 | */ |
151 | void tlb_flush_all_cpus(CPUState *src_cpu); |
152 | /** |
153 | * tlb_flush_all_cpus_synced: |
154 | * @cpu: src CPU of the flush |
155 | * |
156 | * Like tlb_flush_all_cpus except this except the source vCPUs work is |
157 | * scheduled as safe work meaning all flushes will be complete once |
158 | * the source vCPUs safe work is complete. This will depend on when |
159 | * the guests translation ends the TB. |
160 | */ |
161 | void tlb_flush_all_cpus_synced(CPUState *src_cpu); |
162 | /** |
163 | * tlb_flush_page_by_mmuidx: |
164 | * @cpu: CPU whose TLB should be flushed |
165 | * @addr: virtual address of page to be flushed |
166 | * @idxmap: bitmap of MMU indexes to flush |
167 | * |
168 | * Flush one page from the TLB of the specified CPU, for the specified |
169 | * MMU indexes. |
170 | */ |
171 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, |
172 | uint16_t idxmap); |
173 | /** |
174 | * tlb_flush_page_by_mmuidx_all_cpus: |
175 | * @cpu: Originating CPU of the flush |
176 | * @addr: virtual address of page to be flushed |
177 | * @idxmap: bitmap of MMU indexes to flush |
178 | * |
179 | * Flush one page from the TLB of all CPUs, for the specified |
180 | * MMU indexes. |
181 | */ |
182 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, |
183 | uint16_t idxmap); |
184 | /** |
185 | * tlb_flush_page_by_mmuidx_all_cpus_synced: |
186 | * @cpu: Originating CPU of the flush |
187 | * @addr: virtual address of page to be flushed |
188 | * @idxmap: bitmap of MMU indexes to flush |
189 | * |
190 | * Flush one page from the TLB of all CPUs, for the specified MMU |
191 | * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source |
192 | * vCPUs work is scheduled as safe work meaning all flushes will be |
193 | * complete once the source vCPUs safe work is complete. This will |
194 | * depend on when the guests translation ends the TB. |
195 | */ |
196 | void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, |
197 | uint16_t idxmap); |
198 | /** |
199 | * tlb_flush_by_mmuidx: |
200 | * @cpu: CPU whose TLB should be flushed |
201 | * @wait: If true ensure synchronisation by exiting the cpu_loop |
202 | * @idxmap: bitmap of MMU indexes to flush |
203 | * |
204 | * Flush all entries from the TLB of the specified CPU, for the specified |
205 | * MMU indexes. |
206 | */ |
207 | void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); |
208 | /** |
209 | * tlb_flush_by_mmuidx_all_cpus: |
210 | * @cpu: Originating CPU of the flush |
211 | * @idxmap: bitmap of MMU indexes to flush |
212 | * |
213 | * Flush all entries from all TLBs of all CPUs, for the specified |
214 | * MMU indexes. |
215 | */ |
216 | void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); |
217 | /** |
218 | * tlb_flush_by_mmuidx_all_cpus_synced: |
219 | * @cpu: Originating CPU of the flush |
220 | * @idxmap: bitmap of MMU indexes to flush |
221 | * |
222 | * Flush all entries from all TLBs of all CPUs, for the specified |
223 | * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source |
224 | * vCPUs work is scheduled as safe work meaning all flushes will be |
225 | * complete once the source vCPUs safe work is complete. This will |
226 | * depend on when the guests translation ends the TB. |
227 | */ |
228 | void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); |
229 | /** |
230 | * tlb_set_page_with_attrs: |
231 | * @cpu: CPU to add this TLB entry for |
232 | * @vaddr: virtual address of page to add entry for |
233 | * @paddr: physical address of the page |
234 | * @attrs: memory transaction attributes |
235 | * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) |
236 | * @mmu_idx: MMU index to insert TLB entry for |
237 | * @size: size of the page in bytes |
238 | * |
239 | * Add an entry to this CPU's TLB (a mapping from virtual address |
240 | * @vaddr to physical address @paddr) with the specified memory |
241 | * transaction attributes. This is generally called by the target CPU |
242 | * specific code after it has been called through the tlb_fill() |
243 | * entry point and performed a successful page table walk to find |
244 | * the physical address and attributes for the virtual address |
245 | * which provoked the TLB miss. |
246 | * |
247 | * At most one entry for a given virtual address is permitted. Only a |
248 | * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only |
249 | * used by tlb_flush_page. |
250 | */ |
251 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
252 | hwaddr paddr, MemTxAttrs attrs, |
253 | int prot, int mmu_idx, target_ulong size); |
254 | /* tlb_set_page: |
255 | * |
256 | * This function is equivalent to calling tlb_set_page_with_attrs() |
257 | * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided |
258 | * as a convenience for CPUs which don't use memory transaction attributes. |
259 | */ |
260 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, |
261 | hwaddr paddr, int prot, |
262 | int mmu_idx, target_ulong size); |
263 | #else |
264 | static inline void tlb_init(CPUState *cpu) |
265 | { |
266 | } |
267 | static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) |
268 | { |
269 | } |
270 | static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) |
271 | { |
272 | } |
273 | static inline void tlb_flush_page_all_cpus_synced(CPUState *src, |
274 | target_ulong addr) |
275 | { |
276 | } |
277 | static inline void tlb_flush(CPUState *cpu) |
278 | { |
279 | } |
280 | static inline void tlb_flush_all_cpus(CPUState *src_cpu) |
281 | { |
282 | } |
283 | static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) |
284 | { |
285 | } |
286 | static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, |
287 | target_ulong addr, uint16_t idxmap) |
288 | { |
289 | } |
290 | |
291 | static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) |
292 | { |
293 | } |
294 | static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, |
295 | target_ulong addr, |
296 | uint16_t idxmap) |
297 | { |
298 | } |
299 | static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, |
300 | target_ulong addr, |
301 | uint16_t idxmap) |
302 | { |
303 | } |
304 | static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) |
305 | { |
306 | } |
307 | |
308 | static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, |
309 | uint16_t idxmap) |
310 | { |
311 | } |
312 | #endif |
313 | void *probe_access(CPUArchState *env, target_ulong addr, int size, |
314 | MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); |
315 | |
316 | static inline void *probe_write(CPUArchState *env, target_ulong addr, int size, |
317 | int mmu_idx, uintptr_t retaddr) |
318 | { |
319 | return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); |
320 | } |
321 | |
322 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
323 | |
324 | /* Estimated block size for TB allocation. */ |
325 | /* ??? The following is based on a 2015 survey of x86_64 host output. |
326 | Better would seem to be some sort of dynamically sized TB array, |
327 | adapting to the block sizes actually being produced. */ |
328 | #if defined(CONFIG_SOFTMMU) |
329 | #define CODE_GEN_AVG_BLOCK_SIZE 400 |
330 | #else |
331 | #define CODE_GEN_AVG_BLOCK_SIZE 150 |
332 | #endif |
333 | |
334 | /* |
335 | * Translation Cache-related fields of a TB. |
336 | * This struct exists just for convenience; we keep track of TB's in a binary |
337 | * search tree, and the only fields needed to compare TB's in the tree are |
338 | * @ptr and @size. |
339 | * Note: the address of search data can be obtained by adding @size to @ptr. |
340 | */ |
341 | struct tb_tc { |
342 | void *ptr; /* pointer to the translated code */ |
343 | size_t size; |
344 | }; |
345 | |
346 | struct TranslationBlock { |
347 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ |
348 | target_ulong cs_base; /* CS base for this block */ |
349 | uint32_t flags; /* flags defining in which context the code was generated */ |
350 | uint16_t size; /* size of target code for this block (1 <= |
351 | size <= TARGET_PAGE_SIZE) */ |
352 | uint16_t icount; |
353 | uint32_t cflags; /* compile flags */ |
354 | #define CF_COUNT_MASK 0x00007fff |
355 | #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ |
356 | #define CF_NOCACHE 0x00010000 /* To be freed after execution */ |
357 | #define CF_USE_ICOUNT 0x00020000 |
358 | #define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ |
359 | #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ |
360 | #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ |
361 | #define CF_CLUSTER_SHIFT 24 |
362 | /* cflags' mask for hashing/comparison */ |
363 | #define CF_HASH_MASK \ |
364 | (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK) |
365 | |
366 | /* Per-vCPU dynamic tracing state used to generate this TB */ |
367 | uint32_t trace_vcpu_dstate; |
368 | |
369 | struct tb_tc tc; |
370 | |
371 | /* original tb when cflags has CF_NOCACHE */ |
372 | struct TranslationBlock *orig_tb; |
373 | /* first and second physical page containing code. The lower bit |
374 | of the pointer tells the index in page_next[]. |
375 | The list is protected by the TB's page('s) lock(s) */ |
376 | uintptr_t page_next[2]; |
377 | tb_page_addr_t page_addr[2]; |
378 | |
379 | /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */ |
380 | QemuSpin jmp_lock; |
381 | |
382 | /* The following data are used to directly call another TB from |
383 | * the code of this one. This can be done either by emitting direct or |
384 | * indirect native jump instructions. These jumps are reset so that the TB |
385 | * just continues its execution. The TB can be linked to another one by |
386 | * setting one of the jump targets (or patching the jump instruction). Only |
387 | * two of such jumps are supported. |
388 | */ |
389 | uint16_t jmp_reset_offset[2]; /* offset of original jump target */ |
390 | #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ |
391 | uintptr_t jmp_target_arg[2]; /* target address or offset */ |
392 | |
393 | /* |
394 | * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps. |
395 | * Each TB can have two outgoing jumps, and therefore can participate |
396 | * in two lists. The list entries are kept in jmp_list_next[2]. The least |
397 | * significant bit (LSB) of the pointers in these lists is used to encode |
398 | * which of the two list entries is to be used in the pointed TB. |
399 | * |
400 | * List traversals are protected by jmp_lock. The destination TB of each |
401 | * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock |
402 | * can be acquired from any origin TB. |
403 | * |
404 | * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is |
405 | * being invalidated, so that no further outgoing jumps from it can be set. |
406 | * |
407 | * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained |
408 | * to a destination TB that has CF_INVALID set. |
409 | */ |
410 | uintptr_t jmp_list_head; |
411 | uintptr_t jmp_list_next[2]; |
412 | uintptr_t jmp_dest[2]; |
413 | }; |
414 | |
415 | extern bool parallel_cpus; |
416 | |
417 | /* Hide the atomic_read to make code a little easier on the eyes */ |
418 | static inline uint32_t tb_cflags(const TranslationBlock *tb) |
419 | { |
420 | return atomic_read(&tb->cflags); |
421 | } |
422 | |
423 | /* current cflags for hashing/comparison */ |
424 | static inline uint32_t curr_cflags(void) |
425 | { |
426 | return (parallel_cpus ? CF_PARALLEL : 0) |
427 | | (use_icount ? CF_USE_ICOUNT : 0); |
428 | } |
429 | |
430 | /* TranslationBlock invalidate API */ |
431 | #if defined(CONFIG_USER_ONLY) |
432 | void tb_invalidate_phys_addr(target_ulong addr); |
433 | void tb_invalidate_phys_range(target_ulong start, target_ulong end); |
434 | #else |
435 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); |
436 | #endif |
437 | void tb_flush(CPUState *cpu); |
438 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); |
439 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, |
440 | target_ulong cs_base, uint32_t flags, |
441 | uint32_t cf_mask); |
442 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); |
443 | |
444 | /* GETPC is the true target of the return instruction that we'll execute. */ |
445 | #if defined(CONFIG_TCG_INTERPRETER) |
446 | extern uintptr_t tci_tb_ptr; |
447 | # define GETPC() tci_tb_ptr |
448 | #else |
449 | # define GETPC() \ |
450 | ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) |
451 | #endif |
452 | |
453 | /* The true return address will often point to a host insn that is part of |
454 | the next translated guest insn. Adjust the address backward to point to |
455 | the middle of the call insn. Subtracting one would do the job except for |
456 | several compressed mode architectures (arm, mips) which set the low bit |
457 | to indicate the compressed mode; subtracting two works around that. It |
458 | is also the case that there are no host isas that contain a call insn |
459 | smaller than 4 bytes, so we don't worry about special-casing this. */ |
460 | #define GETPC_ADJ 2 |
461 | |
462 | #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG) |
463 | void assert_no_pages_locked(void); |
464 | #else |
465 | static inline void assert_no_pages_locked(void) |
466 | { |
467 | } |
468 | #endif |
469 | |
470 | #if !defined(CONFIG_USER_ONLY) |
471 | |
472 | /** |
473 | * iotlb_to_section: |
474 | * @cpu: CPU performing the access |
475 | * @index: TCG CPU IOTLB entry |
476 | * |
477 | * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that |
478 | * it refers to. @index will have been initially created and returned |
479 | * by memory_region_section_get_iotlb(). |
480 | */ |
481 | struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, |
482 | hwaddr index, MemTxAttrs attrs); |
483 | #endif |
484 | |
485 | #if defined(CONFIG_USER_ONLY) |
486 | void mmap_lock(void); |
487 | void mmap_unlock(void); |
488 | bool have_mmap_lock(void); |
489 | |
490 | static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
491 | { |
492 | return addr; |
493 | } |
494 | #else |
495 | static inline void mmap_lock(void) {} |
496 | static inline void mmap_unlock(void) {} |
497 | |
498 | /* cputlb.c */ |
499 | tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); |
500 | |
501 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); |
502 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); |
503 | |
504 | /* exec.c */ |
505 | void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); |
506 | |
507 | MemoryRegionSection * |
508 | address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, |
509 | hwaddr *xlat, hwaddr *plen, |
510 | MemTxAttrs attrs, int *prot); |
511 | hwaddr memory_region_section_get_iotlb(CPUState *cpu, |
512 | MemoryRegionSection *section, |
513 | target_ulong vaddr, |
514 | hwaddr paddr, hwaddr xlat, |
515 | int prot, |
516 | target_ulong *address); |
517 | #endif |
518 | |
519 | /* vl.c */ |
520 | extern int singlestep; |
521 | |
522 | #endif |
523 | |