1 | /* |
2 | * emulator main execution loop |
3 | * |
4 | * Copyright (c) 2003-2005 Fabrice Bellard |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2.1 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
18 | */ |
19 | |
20 | #include "qemu/osdep.h" |
21 | #include "qemu-common.h" |
22 | #include "cpu.h" |
23 | #include "trace.h" |
24 | #include "disas/disas.h" |
25 | #include "exec/exec-all.h" |
26 | #include "tcg.h" |
27 | #include "qemu/atomic.h" |
28 | #include "sysemu/qtest.h" |
29 | #include "qemu/timer.h" |
30 | #include "qemu/rcu.h" |
31 | #include "exec/tb-hash.h" |
32 | #include "exec/tb-lookup.h" |
33 | #include "exec/log.h" |
34 | #include "qemu/main-loop.h" |
35 | #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) |
36 | #include "hw/i386/apic.h" |
37 | #endif |
38 | #include "sysemu/cpus.h" |
39 | #include "sysemu/replay.h" |
40 | |
41 | /* -icount align implementation. */ |
42 | |
43 | typedef struct SyncClocks { |
44 | int64_t diff_clk; |
45 | int64_t last_cpu_icount; |
46 | int64_t realtime_clock; |
47 | } SyncClocks; |
48 | |
49 | #if !defined(CONFIG_USER_ONLY) |
50 | /* Allow the guest to have a max 3ms advance. |
51 | * The difference between the 2 clocks could therefore |
52 | * oscillate around 0. |
53 | */ |
54 | #define VM_CLOCK_ADVANCE 3000000 |
55 | #define THRESHOLD_REDUCE 1.5 |
56 | #define MAX_DELAY_PRINT_RATE 2000000000LL |
57 | #define MAX_NB_PRINTS 100 |
58 | |
59 | static void align_clocks(SyncClocks *sc, CPUState *cpu) |
60 | { |
61 | int64_t cpu_icount; |
62 | |
63 | if (!icount_align_option) { |
64 | return; |
65 | } |
66 | |
67 | cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; |
68 | sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount); |
69 | sc->last_cpu_icount = cpu_icount; |
70 | |
71 | if (sc->diff_clk > VM_CLOCK_ADVANCE) { |
72 | #ifndef _WIN32 |
73 | struct timespec sleep_delay, rem_delay; |
74 | sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; |
75 | sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; |
76 | if (nanosleep(&sleep_delay, &rem_delay) < 0) { |
77 | sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; |
78 | } else { |
79 | sc->diff_clk = 0; |
80 | } |
81 | #else |
82 | Sleep(sc->diff_clk / SCALE_MS); |
83 | sc->diff_clk = 0; |
84 | #endif |
85 | } |
86 | } |
87 | |
88 | static void print_delay(const SyncClocks *sc) |
89 | { |
90 | static float threshold_delay; |
91 | static int64_t last_realtime_clock; |
92 | static int nb_prints; |
93 | |
94 | if (icount_align_option && |
95 | sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && |
96 | nb_prints < MAX_NB_PRINTS) { |
97 | if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || |
98 | (-sc->diff_clk / (float)1000000000LL < |
99 | (threshold_delay - THRESHOLD_REDUCE))) { |
100 | threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; |
101 | printf("Warning: The guest is now late by %.1f to %.1f seconds\n" , |
102 | threshold_delay - 1, |
103 | threshold_delay); |
104 | nb_prints++; |
105 | last_realtime_clock = sc->realtime_clock; |
106 | } |
107 | } |
108 | } |
109 | |
110 | static void init_delay_params(SyncClocks *sc, CPUState *cpu) |
111 | { |
112 | if (!icount_align_option) { |
113 | return; |
114 | } |
115 | sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); |
116 | sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; |
117 | sc->last_cpu_icount |
118 | = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; |
119 | if (sc->diff_clk < max_delay) { |
120 | max_delay = sc->diff_clk; |
121 | } |
122 | if (sc->diff_clk > max_advance) { |
123 | max_advance = sc->diff_clk; |
124 | } |
125 | |
126 | /* Print every 2s max if the guest is late. We limit the number |
127 | of printed messages to NB_PRINT_MAX(currently 100) */ |
128 | print_delay(sc); |
129 | } |
130 | #else |
131 | static void align_clocks(SyncClocks *sc, const CPUState *cpu) |
132 | { |
133 | } |
134 | |
135 | static void init_delay_params(SyncClocks *sc, const CPUState *cpu) |
136 | { |
137 | } |
138 | #endif /* CONFIG USER ONLY */ |
139 | |
140 | /* Execute a TB, and fix up the CPU state afterwards if necessary */ |
141 | static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb) |
142 | { |
143 | CPUArchState *env = cpu->env_ptr; |
144 | uintptr_t ret; |
145 | TranslationBlock *last_tb; |
146 | int tb_exit; |
147 | uint8_t *tb_ptr = itb->tc.ptr; |
148 | |
149 | qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc, |
150 | "Trace %d: %p [" |
151 | TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n" , |
152 | cpu->cpu_index, itb->tc.ptr, |
153 | itb->cs_base, itb->pc, itb->flags, |
154 | lookup_symbol(itb->pc)); |
155 | |
156 | #if defined(DEBUG_DISAS) |
157 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU) |
158 | && qemu_log_in_addr_range(itb->pc)) { |
159 | qemu_log_lock(); |
160 | int flags = 0; |
161 | if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { |
162 | flags |= CPU_DUMP_FPU; |
163 | } |
164 | #if defined(TARGET_I386) |
165 | flags |= CPU_DUMP_CCOP; |
166 | #endif |
167 | log_cpu_state(cpu, flags); |
168 | qemu_log_unlock(); |
169 | } |
170 | #endif /* DEBUG_DISAS */ |
171 | |
172 | ret = tcg_qemu_tb_exec(env, tb_ptr); |
173 | cpu->can_do_io = 1; |
174 | last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); |
175 | tb_exit = ret & TB_EXIT_MASK; |
176 | trace_exec_tb_exit(last_tb, tb_exit); |
177 | |
178 | if (tb_exit > TB_EXIT_IDX1) { |
179 | /* We didn't start executing this TB (eg because the instruction |
180 | * counter hit zero); we must restore the guest PC to the address |
181 | * of the start of the TB. |
182 | */ |
183 | CPUClass *cc = CPU_GET_CLASS(cpu); |
184 | qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, |
185 | "Stopped execution of TB chain before %p [" |
186 | TARGET_FMT_lx "] %s\n" , |
187 | last_tb->tc.ptr, last_tb->pc, |
188 | lookup_symbol(last_tb->pc)); |
189 | if (cc->synchronize_from_tb) { |
190 | cc->synchronize_from_tb(cpu, last_tb); |
191 | } else { |
192 | assert(cc->set_pc); |
193 | cc->set_pc(cpu, last_tb->pc); |
194 | } |
195 | } |
196 | return ret; |
197 | } |
198 | |
199 | #ifndef CONFIG_USER_ONLY |
200 | /* Execute the code without caching the generated code. An interpreter |
201 | could be used if available. */ |
202 | static void cpu_exec_nocache(CPUState *cpu, int max_cycles, |
203 | TranslationBlock *orig_tb, bool ignore_icount) |
204 | { |
205 | TranslationBlock *tb; |
206 | uint32_t cflags = curr_cflags() | CF_NOCACHE; |
207 | |
208 | if (ignore_icount) { |
209 | cflags &= ~CF_USE_ICOUNT; |
210 | } |
211 | |
212 | /* Should never happen. |
213 | We only end up here when an existing TB is too long. */ |
214 | cflags |= MIN(max_cycles, CF_COUNT_MASK); |
215 | |
216 | mmap_lock(); |
217 | tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, |
218 | orig_tb->flags, cflags); |
219 | tb->orig_tb = orig_tb; |
220 | mmap_unlock(); |
221 | |
222 | /* execute the generated code */ |
223 | trace_exec_tb_nocache(tb, tb->pc); |
224 | cpu_tb_exec(cpu, tb); |
225 | |
226 | mmap_lock(); |
227 | tb_phys_invalidate(tb, -1); |
228 | mmap_unlock(); |
229 | tcg_tb_remove(tb); |
230 | } |
231 | #endif |
232 | |
233 | void cpu_exec_step_atomic(CPUState *cpu) |
234 | { |
235 | CPUClass *cc = CPU_GET_CLASS(cpu); |
236 | TranslationBlock *tb; |
237 | target_ulong cs_base, pc; |
238 | uint32_t flags; |
239 | uint32_t cflags = 1; |
240 | uint32_t cf_mask = cflags & CF_HASH_MASK; |
241 | /* volatile because we modify it between setjmp and longjmp */ |
242 | volatile bool in_exclusive_region = false; |
243 | |
244 | if (sigsetjmp(cpu->jmp_env, 0) == 0) { |
245 | tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); |
246 | if (tb == NULL) { |
247 | mmap_lock(); |
248 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); |
249 | mmap_unlock(); |
250 | } |
251 | |
252 | start_exclusive(); |
253 | |
254 | /* Since we got here, we know that parallel_cpus must be true. */ |
255 | parallel_cpus = false; |
256 | in_exclusive_region = true; |
257 | cc->cpu_exec_enter(cpu); |
258 | /* execute the generated code */ |
259 | trace_exec_tb(tb, pc); |
260 | cpu_tb_exec(cpu, tb); |
261 | cc->cpu_exec_exit(cpu); |
262 | } else { |
263 | /* |
264 | * The mmap_lock is dropped by tb_gen_code if it runs out of |
265 | * memory. |
266 | */ |
267 | #ifndef CONFIG_SOFTMMU |
268 | tcg_debug_assert(!have_mmap_lock()); |
269 | #endif |
270 | if (qemu_mutex_iothread_locked()) { |
271 | qemu_mutex_unlock_iothread(); |
272 | } |
273 | assert_no_pages_locked(); |
274 | } |
275 | |
276 | if (in_exclusive_region) { |
277 | /* We might longjump out of either the codegen or the |
278 | * execution, so must make sure we only end the exclusive |
279 | * region if we started it. |
280 | */ |
281 | parallel_cpus = true; |
282 | end_exclusive(); |
283 | } |
284 | } |
285 | |
286 | struct tb_desc { |
287 | target_ulong pc; |
288 | target_ulong cs_base; |
289 | CPUArchState *env; |
290 | tb_page_addr_t phys_page1; |
291 | uint32_t flags; |
292 | uint32_t cf_mask; |
293 | uint32_t trace_vcpu_dstate; |
294 | }; |
295 | |
296 | static bool tb_lookup_cmp(const void *p, const void *d) |
297 | { |
298 | const TranslationBlock *tb = p; |
299 | const struct tb_desc *desc = d; |
300 | |
301 | if (tb->pc == desc->pc && |
302 | tb->page_addr[0] == desc->phys_page1 && |
303 | tb->cs_base == desc->cs_base && |
304 | tb->flags == desc->flags && |
305 | tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && |
306 | (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) { |
307 | /* check next page if needed */ |
308 | if (tb->page_addr[1] == -1) { |
309 | return true; |
310 | } else { |
311 | tb_page_addr_t phys_page2; |
312 | target_ulong virt_page2; |
313 | |
314 | virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; |
315 | phys_page2 = get_page_addr_code(desc->env, virt_page2); |
316 | if (tb->page_addr[1] == phys_page2) { |
317 | return true; |
318 | } |
319 | } |
320 | } |
321 | return false; |
322 | } |
323 | |
324 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, |
325 | target_ulong cs_base, uint32_t flags, |
326 | uint32_t cf_mask) |
327 | { |
328 | tb_page_addr_t phys_pc; |
329 | struct tb_desc desc; |
330 | uint32_t h; |
331 | |
332 | desc.env = (CPUArchState *)cpu->env_ptr; |
333 | desc.cs_base = cs_base; |
334 | desc.flags = flags; |
335 | desc.cf_mask = cf_mask; |
336 | desc.trace_vcpu_dstate = *cpu->trace_dstate; |
337 | desc.pc = pc; |
338 | phys_pc = get_page_addr_code(desc.env, pc); |
339 | if (phys_pc == -1) { |
340 | return NULL; |
341 | } |
342 | desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; |
343 | h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate); |
344 | return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); |
345 | } |
346 | |
347 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) |
348 | { |
349 | if (TCG_TARGET_HAS_direct_jump) { |
350 | uintptr_t offset = tb->jmp_target_arg[n]; |
351 | uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr; |
352 | tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr); |
353 | } else { |
354 | tb->jmp_target_arg[n] = addr; |
355 | } |
356 | } |
357 | |
358 | static inline void tb_add_jump(TranslationBlock *tb, int n, |
359 | TranslationBlock *tb_next) |
360 | { |
361 | uintptr_t old; |
362 | |
363 | assert(n < ARRAY_SIZE(tb->jmp_list_next)); |
364 | qemu_spin_lock(&tb_next->jmp_lock); |
365 | |
366 | /* make sure the destination TB is valid */ |
367 | if (tb_next->cflags & CF_INVALID) { |
368 | goto out_unlock_next; |
369 | } |
370 | /* Atomically claim the jump destination slot only if it was NULL */ |
371 | old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next); |
372 | if (old) { |
373 | goto out_unlock_next; |
374 | } |
375 | |
376 | /* patch the native jump address */ |
377 | tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); |
378 | |
379 | /* add in TB jmp list */ |
380 | tb->jmp_list_next[n] = tb_next->jmp_list_head; |
381 | tb_next->jmp_list_head = (uintptr_t)tb | n; |
382 | |
383 | qemu_spin_unlock(&tb_next->jmp_lock); |
384 | |
385 | qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, |
386 | "Linking TBs %p [" TARGET_FMT_lx |
387 | "] index %d -> %p [" TARGET_FMT_lx "]\n" , |
388 | tb->tc.ptr, tb->pc, n, |
389 | tb_next->tc.ptr, tb_next->pc); |
390 | return; |
391 | |
392 | out_unlock_next: |
393 | qemu_spin_unlock(&tb_next->jmp_lock); |
394 | return; |
395 | } |
396 | |
397 | static inline TranslationBlock *tb_find(CPUState *cpu, |
398 | TranslationBlock *last_tb, |
399 | int tb_exit, uint32_t cf_mask) |
400 | { |
401 | TranslationBlock *tb; |
402 | target_ulong cs_base, pc; |
403 | uint32_t flags; |
404 | |
405 | tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); |
406 | if (tb == NULL) { |
407 | mmap_lock(); |
408 | tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); |
409 | mmap_unlock(); |
410 | /* We add the TB in the virtual pc hash table for the fast lookup */ |
411 | atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); |
412 | } |
413 | #ifndef CONFIG_USER_ONLY |
414 | /* We don't take care of direct jumps when address mapping changes in |
415 | * system emulation. So it's not safe to make a direct jump to a TB |
416 | * spanning two pages because the mapping for the second page can change. |
417 | */ |
418 | if (tb->page_addr[1] != -1) { |
419 | last_tb = NULL; |
420 | } |
421 | #endif |
422 | /* See if we can patch the calling TB. */ |
423 | if (last_tb) { |
424 | tb_add_jump(last_tb, tb_exit, tb); |
425 | } |
426 | return tb; |
427 | } |
428 | |
429 | static inline bool cpu_handle_halt(CPUState *cpu) |
430 | { |
431 | if (cpu->halted) { |
432 | #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) |
433 | if ((cpu->interrupt_request & CPU_INTERRUPT_POLL) |
434 | && replay_interrupt()) { |
435 | X86CPU *x86_cpu = X86_CPU(cpu); |
436 | qemu_mutex_lock_iothread(); |
437 | apic_poll_irq(x86_cpu->apic_state); |
438 | cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); |
439 | qemu_mutex_unlock_iothread(); |
440 | } |
441 | #endif |
442 | if (!cpu_has_work(cpu)) { |
443 | return true; |
444 | } |
445 | |
446 | cpu->halted = 0; |
447 | } |
448 | |
449 | return false; |
450 | } |
451 | |
452 | static inline void cpu_handle_debug_exception(CPUState *cpu) |
453 | { |
454 | CPUClass *cc = CPU_GET_CLASS(cpu); |
455 | CPUWatchpoint *wp; |
456 | |
457 | if (!cpu->watchpoint_hit) { |
458 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
459 | wp->flags &= ~BP_WATCHPOINT_HIT; |
460 | } |
461 | } |
462 | |
463 | cc->debug_excp_handler(cpu); |
464 | } |
465 | |
466 | static inline bool cpu_handle_exception(CPUState *cpu, int *ret) |
467 | { |
468 | if (cpu->exception_index < 0) { |
469 | #ifndef CONFIG_USER_ONLY |
470 | if (replay_has_exception() |
471 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { |
472 | /* try to cause an exception pending in the log */ |
473 | cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true); |
474 | } |
475 | #endif |
476 | if (cpu->exception_index < 0) { |
477 | return false; |
478 | } |
479 | } |
480 | |
481 | if (cpu->exception_index >= EXCP_INTERRUPT) { |
482 | /* exit request from the cpu execution loop */ |
483 | *ret = cpu->exception_index; |
484 | if (*ret == EXCP_DEBUG) { |
485 | cpu_handle_debug_exception(cpu); |
486 | } |
487 | cpu->exception_index = -1; |
488 | return true; |
489 | } else { |
490 | #if defined(CONFIG_USER_ONLY) |
491 | /* if user mode only, we simulate a fake exception |
492 | which will be handled outside the cpu execution |
493 | loop */ |
494 | #if defined(TARGET_I386) |
495 | CPUClass *cc = CPU_GET_CLASS(cpu); |
496 | cc->do_interrupt(cpu); |
497 | #endif |
498 | *ret = cpu->exception_index; |
499 | cpu->exception_index = -1; |
500 | return true; |
501 | #else |
502 | if (replay_exception()) { |
503 | CPUClass *cc = CPU_GET_CLASS(cpu); |
504 | qemu_mutex_lock_iothread(); |
505 | cc->do_interrupt(cpu); |
506 | qemu_mutex_unlock_iothread(); |
507 | cpu->exception_index = -1; |
508 | } else if (!replay_has_interrupt()) { |
509 | /* give a chance to iothread in replay mode */ |
510 | *ret = EXCP_INTERRUPT; |
511 | return true; |
512 | } |
513 | #endif |
514 | } |
515 | |
516 | return false; |
517 | } |
518 | |
519 | static inline bool cpu_handle_interrupt(CPUState *cpu, |
520 | TranslationBlock **last_tb) |
521 | { |
522 | CPUClass *cc = CPU_GET_CLASS(cpu); |
523 | |
524 | /* Clear the interrupt flag now since we're processing |
525 | * cpu->interrupt_request and cpu->exit_request. |
526 | * Ensure zeroing happens before reading cpu->exit_request or |
527 | * cpu->interrupt_request (see also smp_wmb in cpu_exit()) |
528 | */ |
529 | atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0); |
530 | |
531 | if (unlikely(atomic_read(&cpu->interrupt_request))) { |
532 | int interrupt_request; |
533 | qemu_mutex_lock_iothread(); |
534 | interrupt_request = cpu->interrupt_request; |
535 | if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { |
536 | /* Mask out external interrupts for this step. */ |
537 | interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; |
538 | } |
539 | if (interrupt_request & CPU_INTERRUPT_DEBUG) { |
540 | cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; |
541 | cpu->exception_index = EXCP_DEBUG; |
542 | qemu_mutex_unlock_iothread(); |
543 | return true; |
544 | } |
545 | if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { |
546 | /* Do nothing */ |
547 | } else if (interrupt_request & CPU_INTERRUPT_HALT) { |
548 | replay_interrupt(); |
549 | cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; |
550 | cpu->halted = 1; |
551 | cpu->exception_index = EXCP_HLT; |
552 | qemu_mutex_unlock_iothread(); |
553 | return true; |
554 | } |
555 | #if defined(TARGET_I386) |
556 | else if (interrupt_request & CPU_INTERRUPT_INIT) { |
557 | X86CPU *x86_cpu = X86_CPU(cpu); |
558 | CPUArchState *env = &x86_cpu->env; |
559 | replay_interrupt(); |
560 | cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); |
561 | do_cpu_init(x86_cpu); |
562 | cpu->exception_index = EXCP_HALTED; |
563 | qemu_mutex_unlock_iothread(); |
564 | return true; |
565 | } |
566 | #else |
567 | else if (interrupt_request & CPU_INTERRUPT_RESET) { |
568 | replay_interrupt(); |
569 | cpu_reset(cpu); |
570 | qemu_mutex_unlock_iothread(); |
571 | return true; |
572 | } |
573 | #endif |
574 | /* The target hook has 3 exit conditions: |
575 | False when the interrupt isn't processed, |
576 | True when it is, and we should restart on a new TB, |
577 | and via longjmp via cpu_loop_exit. */ |
578 | else { |
579 | if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { |
580 | replay_interrupt(); |
581 | cpu->exception_index = -1; |
582 | *last_tb = NULL; |
583 | } |
584 | /* The target hook may have updated the 'cpu->interrupt_request'; |
585 | * reload the 'interrupt_request' value */ |
586 | interrupt_request = cpu->interrupt_request; |
587 | } |
588 | if (interrupt_request & CPU_INTERRUPT_EXITTB) { |
589 | cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
590 | /* ensure that no TB jump will be modified as |
591 | the program flow was changed */ |
592 | *last_tb = NULL; |
593 | } |
594 | |
595 | /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ |
596 | qemu_mutex_unlock_iothread(); |
597 | } |
598 | |
599 | /* Finally, check if we need to exit to the main loop. */ |
600 | if (unlikely(atomic_read(&cpu->exit_request)) |
601 | || (use_icount |
602 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { |
603 | atomic_set(&cpu->exit_request, 0); |
604 | if (cpu->exception_index == -1) { |
605 | cpu->exception_index = EXCP_INTERRUPT; |
606 | } |
607 | return true; |
608 | } |
609 | |
610 | return false; |
611 | } |
612 | |
613 | static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, |
614 | TranslationBlock **last_tb, int *tb_exit) |
615 | { |
616 | uintptr_t ret; |
617 | int32_t insns_left; |
618 | |
619 | trace_exec_tb(tb, tb->pc); |
620 | ret = cpu_tb_exec(cpu, tb); |
621 | tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); |
622 | *tb_exit = ret & TB_EXIT_MASK; |
623 | if (*tb_exit != TB_EXIT_REQUESTED) { |
624 | *last_tb = tb; |
625 | return; |
626 | } |
627 | |
628 | *last_tb = NULL; |
629 | insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32); |
630 | if (insns_left < 0) { |
631 | /* Something asked us to stop executing chained TBs; just |
632 | * continue round the main loop. Whatever requested the exit |
633 | * will also have set something else (eg exit_request or |
634 | * interrupt_request) which will be handled by |
635 | * cpu_handle_interrupt. cpu_handle_interrupt will also |
636 | * clear cpu->icount_decr.u16.high. |
637 | */ |
638 | return; |
639 | } |
640 | |
641 | /* Instruction counter expired. */ |
642 | assert(use_icount); |
643 | #ifndef CONFIG_USER_ONLY |
644 | /* Ensure global icount has gone forward */ |
645 | cpu_update_icount(cpu); |
646 | /* Refill decrementer and continue execution. */ |
647 | insns_left = MIN(0xffff, cpu->icount_budget); |
648 | cpu_neg(cpu)->icount_decr.u16.low = insns_left; |
649 | cpu->icount_extra = cpu->icount_budget - insns_left; |
650 | if (!cpu->icount_extra) { |
651 | /* Execute any remaining instructions, then let the main loop |
652 | * handle the next event. |
653 | */ |
654 | if (insns_left > 0) { |
655 | cpu_exec_nocache(cpu, insns_left, tb, false); |
656 | } |
657 | } |
658 | #endif |
659 | } |
660 | |
661 | /* main execution loop */ |
662 | |
663 | int cpu_exec(CPUState *cpu) |
664 | { |
665 | CPUClass *cc = CPU_GET_CLASS(cpu); |
666 | int ret; |
667 | SyncClocks sc = { 0 }; |
668 | |
669 | /* replay_interrupt may need current_cpu */ |
670 | current_cpu = cpu; |
671 | |
672 | if (cpu_handle_halt(cpu)) { |
673 | return EXCP_HALTED; |
674 | } |
675 | |
676 | rcu_read_lock(); |
677 | |
678 | cc->cpu_exec_enter(cpu); |
679 | |
680 | /* Calculate difference between guest clock and host clock. |
681 | * This delay includes the delay of the last cycle, so |
682 | * what we have to do is sleep until it is 0. As for the |
683 | * advance/delay we gain here, we try to fix it next time. |
684 | */ |
685 | init_delay_params(&sc, cpu); |
686 | |
687 | /* prepare setjmp context for exception handling */ |
688 | if (sigsetjmp(cpu->jmp_env, 0) != 0) { |
689 | #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6) |
690 | /* Some compilers wrongly smash all local variables after |
691 | * siglongjmp. There were bug reports for gcc 4.5.0 and clang. |
692 | * Reload essential local variables here for those compilers. |
693 | * Newer versions of gcc would complain about this code (-Wclobbered). */ |
694 | cpu = current_cpu; |
695 | cc = CPU_GET_CLASS(cpu); |
696 | #else /* buggy compiler */ |
697 | /* Assert that the compiler does not smash local variables. */ |
698 | g_assert(cpu == current_cpu); |
699 | g_assert(cc == CPU_GET_CLASS(cpu)); |
700 | #endif /* buggy compiler */ |
701 | #ifndef CONFIG_SOFTMMU |
702 | tcg_debug_assert(!have_mmap_lock()); |
703 | #endif |
704 | if (qemu_mutex_iothread_locked()) { |
705 | qemu_mutex_unlock_iothread(); |
706 | } |
707 | assert_no_pages_locked(); |
708 | } |
709 | |
710 | /* if an exception is pending, we execute it here */ |
711 | while (!cpu_handle_exception(cpu, &ret)) { |
712 | TranslationBlock *last_tb = NULL; |
713 | int tb_exit = 0; |
714 | |
715 | while (!cpu_handle_interrupt(cpu, &last_tb)) { |
716 | uint32_t cflags = cpu->cflags_next_tb; |
717 | TranslationBlock *tb; |
718 | |
719 | /* When requested, use an exact setting for cflags for the next |
720 | execution. This is used for icount, precise smc, and stop- |
721 | after-access watchpoints. Since this request should never |
722 | have CF_INVALID set, -1 is a convenient invalid value that |
723 | does not require tcg headers for cpu_common_reset. */ |
724 | if (cflags == -1) { |
725 | cflags = curr_cflags(); |
726 | } else { |
727 | cpu->cflags_next_tb = -1; |
728 | } |
729 | |
730 | tb = tb_find(cpu, last_tb, tb_exit, cflags); |
731 | cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); |
732 | /* Try to align the host and virtual clocks |
733 | if the guest is in advance */ |
734 | align_clocks(&sc, cpu); |
735 | } |
736 | } |
737 | |
738 | cc->cpu_exec_exit(cpu); |
739 | rcu_read_unlock(); |
740 | |
741 | return ret; |
742 | } |
743 | |