1/*
2 * QEMU monitor
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "qemu/osdep.h"
26#include "monitor-internal.h"
27#include "qapi/error.h"
28#include "qapi/qapi-emit-events.h"
29#include "qapi/qmp/qdict.h"
30#include "qapi/qmp/qstring.h"
31#include "qemu/error-report.h"
32#include "qemu/option.h"
33#include "sysemu/qtest.h"
34#include "sysemu/sysemu.h"
35#include "trace.h"
36
37/*
38 * To prevent flooding clients, events can be throttled. The
39 * throttling is calculated globally, rather than per-Monitor
40 * instance.
41 */
42typedef struct MonitorQAPIEventState {
43 QAPIEvent event; /* Throttling state for this event type and... */
44 QDict *data; /* ... data, see qapi_event_throttle_equal() */
45 QEMUTimer *timer; /* Timer for handling delayed events */
46 QDict *qdict; /* Delayed event (if any) */
47} MonitorQAPIEventState;
48
49typedef struct {
50 int64_t rate; /* Minimum time (in ns) between two events */
51} MonitorQAPIEventConf;
52
53/* Shared monitor I/O thread */
54IOThread *mon_iothread;
55
56/* Bottom half to dispatch the requests received from I/O thread */
57QEMUBH *qmp_dispatcher_bh;
58
59/* Protects mon_list, monitor_qapi_event_state, monitor_destroyed. */
60QemuMutex monitor_lock;
61static GHashTable *monitor_qapi_event_state;
62
63MonitorList mon_list;
64int mon_refcount;
65static bool monitor_destroyed;
66
67__thread Monitor *cur_mon;
68
69/**
70 * Is the current monitor, if any, a QMP monitor?
71 */
72bool monitor_cur_is_qmp(void)
73{
74 return cur_mon && monitor_is_qmp(cur_mon);
75}
76
77/**
78 * Is @mon is using readline?
79 * Note: not all HMP monitors use readline, e.g., gdbserver has a
80 * non-interactive HMP monitor, so readline is not used there.
81 */
82static inline bool monitor_uses_readline(const MonitorHMP *mon)
83{
84 return mon->use_readline;
85}
86
87static inline bool monitor_is_hmp_non_interactive(const Monitor *mon)
88{
89 if (monitor_is_qmp(mon)) {
90 return false;
91 }
92
93 return !monitor_uses_readline(container_of(mon, MonitorHMP, common));
94}
95
96static void monitor_flush_locked(Monitor *mon);
97
98static gboolean monitor_unblocked(GIOChannel *chan, GIOCondition cond,
99 void *opaque)
100{
101 Monitor *mon = opaque;
102
103 qemu_mutex_lock(&mon->mon_lock);
104 mon->out_watch = 0;
105 monitor_flush_locked(mon);
106 qemu_mutex_unlock(&mon->mon_lock);
107 return FALSE;
108}
109
110/* Caller must hold mon->mon_lock */
111static void monitor_flush_locked(Monitor *mon)
112{
113 int rc;
114 size_t len;
115 const char *buf;
116
117 if (mon->skip_flush) {
118 return;
119 }
120
121 buf = qstring_get_str(mon->outbuf);
122 len = qstring_get_length(mon->outbuf);
123
124 if (len && !mon->mux_out) {
125 rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len);
126 if ((rc < 0 && errno != EAGAIN) || (rc == len)) {
127 /* all flushed or error */
128 qobject_unref(mon->outbuf);
129 mon->outbuf = qstring_new();
130 return;
131 }
132 if (rc > 0) {
133 /* partial write */
134 QString *tmp = qstring_from_str(buf + rc);
135 qobject_unref(mon->outbuf);
136 mon->outbuf = tmp;
137 }
138 if (mon->out_watch == 0) {
139 mon->out_watch =
140 qemu_chr_fe_add_watch(&mon->chr, G_IO_OUT | G_IO_HUP,
141 monitor_unblocked, mon);
142 }
143 }
144}
145
146void monitor_flush(Monitor *mon)
147{
148 qemu_mutex_lock(&mon->mon_lock);
149 monitor_flush_locked(mon);
150 qemu_mutex_unlock(&mon->mon_lock);
151}
152
153/* flush at every end of line */
154int monitor_puts(Monitor *mon, const char *str)
155{
156 int i;
157 char c;
158
159 qemu_mutex_lock(&mon->mon_lock);
160 for (i = 0; str[i]; i++) {
161 c = str[i];
162 if (c == '\n') {
163 qstring_append_chr(mon->outbuf, '\r');
164 }
165 qstring_append_chr(mon->outbuf, c);
166 if (c == '\n') {
167 monitor_flush_locked(mon);
168 }
169 }
170 qemu_mutex_unlock(&mon->mon_lock);
171
172 return i;
173}
174
175int monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
176{
177 char *buf;
178 int n;
179
180 if (!mon) {
181 return -1;
182 }
183
184 if (monitor_is_qmp(mon)) {
185 return -1;
186 }
187
188 buf = g_strdup_vprintf(fmt, ap);
189 n = monitor_puts(mon, buf);
190 g_free(buf);
191 return n;
192}
193
194int monitor_printf(Monitor *mon, const char *fmt, ...)
195{
196 int ret;
197
198 va_list ap;
199 va_start(ap, fmt);
200 ret = monitor_vprintf(mon, fmt, ap);
201 va_end(ap);
202 return ret;
203}
204
205/*
206 * Print to current monitor if we have one, else to stderr.
207 */
208int error_vprintf(const char *fmt, va_list ap)
209{
210 if (cur_mon && !monitor_cur_is_qmp()) {
211 return monitor_vprintf(cur_mon, fmt, ap);
212 }
213 return vfprintf(stderr, fmt, ap);
214}
215
216int error_vprintf_unless_qmp(const char *fmt, va_list ap)
217{
218 if (!cur_mon) {
219 return vfprintf(stderr, fmt, ap);
220 }
221 if (!monitor_cur_is_qmp()) {
222 return monitor_vprintf(cur_mon, fmt, ap);
223 }
224 return -1;
225}
226
227
228static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
229 /* Limit guest-triggerable events to 1 per second */
230 [QAPI_EVENT_RTC_CHANGE] = { 1000 * SCALE_MS },
231 [QAPI_EVENT_WATCHDOG] = { 1000 * SCALE_MS },
232 [QAPI_EVENT_BALLOON_CHANGE] = { 1000 * SCALE_MS },
233 [QAPI_EVENT_QUORUM_REPORT_BAD] = { 1000 * SCALE_MS },
234 [QAPI_EVENT_QUORUM_FAILURE] = { 1000 * SCALE_MS },
235 [QAPI_EVENT_VSERPORT_CHANGE] = { 1000 * SCALE_MS },
236};
237
238/*
239 * Return the clock to use for recording an event's time.
240 * It's QEMU_CLOCK_REALTIME, except for qtests it's
241 * QEMU_CLOCK_VIRTUAL, to support testing rate limits.
242 * Beware: result is invalid before configure_accelerator().
243 */
244static inline QEMUClockType monitor_get_event_clock(void)
245{
246 return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
247}
248
249/*
250 * Broadcast an event to all monitors.
251 * @qdict is the event object. Its member "event" must match @event.
252 * Caller must hold monitor_lock.
253 */
254static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
255{
256 Monitor *mon;
257 MonitorQMP *qmp_mon;
258
259 trace_monitor_protocol_event_emit(event, qdict);
260 QTAILQ_FOREACH(mon, &mon_list, entry) {
261 if (!monitor_is_qmp(mon)) {
262 continue;
263 }
264
265 qmp_mon = container_of(mon, MonitorQMP, common);
266 if (qmp_mon->commands != &qmp_cap_negotiation_commands) {
267 qmp_send_response(qmp_mon, qdict);
268 }
269 }
270}
271
272static void monitor_qapi_event_handler(void *opaque);
273
274/*
275 * Queue a new event for emission to Monitor instances,
276 * applying any rate limiting if required.
277 */
278static void
279monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict)
280{
281 MonitorQAPIEventConf *evconf;
282 MonitorQAPIEventState *evstate;
283
284 assert(event < QAPI_EVENT__MAX);
285 evconf = &monitor_qapi_event_conf[event];
286 trace_monitor_protocol_event_queue(event, qdict, evconf->rate);
287
288 qemu_mutex_lock(&monitor_lock);
289
290 if (!evconf->rate) {
291 /* Unthrottled event */
292 monitor_qapi_event_emit(event, qdict);
293 } else {
294 QDict *data = qobject_to(QDict, qdict_get(qdict, "data"));
295 MonitorQAPIEventState key = { .event = event, .data = data };
296
297 evstate = g_hash_table_lookup(monitor_qapi_event_state, &key);
298 assert(!evstate || timer_pending(evstate->timer));
299
300 if (evstate) {
301 /*
302 * Timer is pending for (at least) evconf->rate ns after
303 * last send. Store event for sending when timer fires,
304 * replacing a prior stored event if any.
305 */
306 qobject_unref(evstate->qdict);
307 evstate->qdict = qobject_ref(qdict);
308 } else {
309 /*
310 * Last send was (at least) evconf->rate ns ago.
311 * Send immediately, and arm the timer to call
312 * monitor_qapi_event_handler() in evconf->rate ns. Any
313 * events arriving before then will be delayed until then.
314 */
315 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
316
317 monitor_qapi_event_emit(event, qdict);
318
319 evstate = g_new(MonitorQAPIEventState, 1);
320 evstate->event = event;
321 evstate->data = qobject_ref(data);
322 evstate->qdict = NULL;
323 evstate->timer = timer_new_ns(monitor_get_event_clock(),
324 monitor_qapi_event_handler,
325 evstate);
326 g_hash_table_add(monitor_qapi_event_state, evstate);
327 timer_mod_ns(evstate->timer, now + evconf->rate);
328 }
329 }
330
331 qemu_mutex_unlock(&monitor_lock);
332}
333
334void qapi_event_emit(QAPIEvent event, QDict *qdict)
335{
336 /*
337 * monitor_qapi_event_queue_no_reenter() is not reentrant: it
338 * would deadlock on monitor_lock. Work around by queueing
339 * events in thread-local storage.
340 * TODO: remove this, make it re-enter safe.
341 */
342 typedef struct MonitorQapiEvent {
343 QAPIEvent event;
344 QDict *qdict;
345 QSIMPLEQ_ENTRY(MonitorQapiEvent) entry;
346 } MonitorQapiEvent;
347 static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue;
348 static __thread bool reentered;
349 MonitorQapiEvent *ev;
350
351 if (!reentered) {
352 QSIMPLEQ_INIT(&event_queue);
353 }
354
355 ev = g_new(MonitorQapiEvent, 1);
356 ev->qdict = qobject_ref(qdict);
357 ev->event = event;
358 QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry);
359 if (reentered) {
360 return;
361 }
362
363 reentered = true;
364
365 while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) {
366 QSIMPLEQ_REMOVE_HEAD(&event_queue, entry);
367 monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict);
368 qobject_unref(ev->qdict);
369 g_free(ev);
370 }
371
372 reentered = false;
373}
374
375/*
376 * This function runs evconf->rate ns after sending a throttled
377 * event.
378 * If another event has since been stored, send it.
379 */
380static void monitor_qapi_event_handler(void *opaque)
381{
382 MonitorQAPIEventState *evstate = opaque;
383 MonitorQAPIEventConf *evconf = &monitor_qapi_event_conf[evstate->event];
384
385 trace_monitor_protocol_event_handler(evstate->event, evstate->qdict);
386 qemu_mutex_lock(&monitor_lock);
387
388 if (evstate->qdict) {
389 int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
390
391 monitor_qapi_event_emit(evstate->event, evstate->qdict);
392 qobject_unref(evstate->qdict);
393 evstate->qdict = NULL;
394 timer_mod_ns(evstate->timer, now + evconf->rate);
395 } else {
396 g_hash_table_remove(monitor_qapi_event_state, evstate);
397 qobject_unref(evstate->data);
398 timer_free(evstate->timer);
399 g_free(evstate);
400 }
401
402 qemu_mutex_unlock(&monitor_lock);
403}
404
405static unsigned int qapi_event_throttle_hash(const void *key)
406{
407 const MonitorQAPIEventState *evstate = key;
408 unsigned int hash = evstate->event * 255;
409
410 if (evstate->event == QAPI_EVENT_VSERPORT_CHANGE) {
411 hash += g_str_hash(qdict_get_str(evstate->data, "id"));
412 }
413
414 if (evstate->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
415 hash += g_str_hash(qdict_get_str(evstate->data, "node-name"));
416 }
417
418 return hash;
419}
420
421static gboolean qapi_event_throttle_equal(const void *a, const void *b)
422{
423 const MonitorQAPIEventState *eva = a;
424 const MonitorQAPIEventState *evb = b;
425
426 if (eva->event != evb->event) {
427 return FALSE;
428 }
429
430 if (eva->event == QAPI_EVENT_VSERPORT_CHANGE) {
431 return !strcmp(qdict_get_str(eva->data, "id"),
432 qdict_get_str(evb->data, "id"));
433 }
434
435 if (eva->event == QAPI_EVENT_QUORUM_REPORT_BAD) {
436 return !strcmp(qdict_get_str(eva->data, "node-name"),
437 qdict_get_str(evb->data, "node-name"));
438 }
439
440 return TRUE;
441}
442
443int monitor_suspend(Monitor *mon)
444{
445 if (monitor_is_hmp_non_interactive(mon)) {
446 return -ENOTTY;
447 }
448
449 atomic_inc(&mon->suspend_cnt);
450
451 if (mon->use_io_thread) {
452 /*
453 * Kick I/O thread to make sure this takes effect. It'll be
454 * evaluated again in prepare() of the watch object.
455 */
456 aio_notify(iothread_get_aio_context(mon_iothread));
457 }
458
459 trace_monitor_suspend(mon, 1);
460 return 0;
461}
462
463static void monitor_accept_input(void *opaque)
464{
465 Monitor *mon = opaque;
466
467 qemu_chr_fe_accept_input(&mon->chr);
468}
469
470void monitor_resume(Monitor *mon)
471{
472 if (monitor_is_hmp_non_interactive(mon)) {
473 return;
474 }
475
476 if (atomic_dec_fetch(&mon->suspend_cnt) == 0) {
477 AioContext *ctx;
478
479 if (mon->use_io_thread) {
480 ctx = iothread_get_aio_context(mon_iothread);
481 } else {
482 ctx = qemu_get_aio_context();
483 }
484
485 if (!monitor_is_qmp(mon)) {
486 MonitorHMP *hmp_mon = container_of(mon, MonitorHMP, common);
487 assert(hmp_mon->rs);
488 readline_show_prompt(hmp_mon->rs);
489 }
490
491 aio_bh_schedule_oneshot(ctx, monitor_accept_input, mon);
492 }
493
494 trace_monitor_suspend(mon, -1);
495}
496
497int monitor_can_read(void *opaque)
498{
499 Monitor *mon = opaque;
500
501 return !atomic_mb_read(&mon->suspend_cnt);
502}
503
504void monitor_list_append(Monitor *mon)
505{
506 qemu_mutex_lock(&monitor_lock);
507 /*
508 * This prevents inserting new monitors during monitor_cleanup().
509 * A cleaner solution would involve the main thread telling other
510 * threads to terminate, waiting for their termination.
511 */
512 if (!monitor_destroyed) {
513 QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
514 mon = NULL;
515 }
516 qemu_mutex_unlock(&monitor_lock);
517
518 if (mon) {
519 monitor_data_destroy(mon);
520 g_free(mon);
521 }
522}
523
524static void monitor_iothread_init(void)
525{
526 mon_iothread = iothread_create("mon_iothread", &error_abort);
527}
528
529void monitor_data_init(Monitor *mon, bool is_qmp, bool skip_flush,
530 bool use_io_thread)
531{
532 if (use_io_thread && !mon_iothread) {
533 monitor_iothread_init();
534 }
535 qemu_mutex_init(&mon->mon_lock);
536 mon->is_qmp = is_qmp;
537 mon->outbuf = qstring_new();
538 mon->skip_flush = skip_flush;
539 mon->use_io_thread = use_io_thread;
540}
541
542void monitor_data_destroy(Monitor *mon)
543{
544 g_free(mon->mon_cpu_path);
545 qemu_chr_fe_deinit(&mon->chr, false);
546 if (monitor_is_qmp(mon)) {
547 monitor_data_destroy_qmp(container_of(mon, MonitorQMP, common));
548 } else {
549 readline_free(container_of(mon, MonitorHMP, common)->rs);
550 }
551 qobject_unref(mon->outbuf);
552 qemu_mutex_destroy(&mon->mon_lock);
553}
554
555void monitor_cleanup(void)
556{
557 /*
558 * We need to explicitly stop the I/O thread (but not destroy it),
559 * clean up the monitor resources, then destroy the I/O thread since
560 * we need to unregister from chardev below in
561 * monitor_data_destroy(), and chardev is not thread-safe yet
562 */
563 if (mon_iothread) {
564 iothread_stop(mon_iothread);
565 }
566
567 /* Flush output buffers and destroy monitors */
568 qemu_mutex_lock(&monitor_lock);
569 monitor_destroyed = true;
570 while (!QTAILQ_EMPTY(&mon_list)) {
571 Monitor *mon = QTAILQ_FIRST(&mon_list);
572 QTAILQ_REMOVE(&mon_list, mon, entry);
573 /* Permit QAPI event emission from character frontend release */
574 qemu_mutex_unlock(&monitor_lock);
575 monitor_flush(mon);
576 monitor_data_destroy(mon);
577 qemu_mutex_lock(&monitor_lock);
578 g_free(mon);
579 }
580 qemu_mutex_unlock(&monitor_lock);
581
582 /* QEMUBHs needs to be deleted before destroying the I/O thread */
583 qemu_bh_delete(qmp_dispatcher_bh);
584 qmp_dispatcher_bh = NULL;
585 if (mon_iothread) {
586 iothread_destroy(mon_iothread);
587 mon_iothread = NULL;
588 }
589}
590
591static void monitor_qapi_event_init(void)
592{
593 monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash,
594 qapi_event_throttle_equal);
595}
596
597void monitor_init_globals_core(void)
598{
599 monitor_qapi_event_init();
600 qemu_mutex_init(&monitor_lock);
601
602 /*
603 * The dispatcher BH must run in the main loop thread, since we
604 * have commands assuming that context. It would be nice to get
605 * rid of those assumptions.
606 */
607 qmp_dispatcher_bh = aio_bh_new(iohandler_get_aio_context(),
608 monitor_qmp_bh_dispatcher,
609 NULL);
610}
611
612QemuOptsList qemu_mon_opts = {
613 .name = "mon",
614 .implied_opt_name = "chardev",
615 .head = QTAILQ_HEAD_INITIALIZER(qemu_mon_opts.head),
616 .desc = {
617 {
618 .name = "mode",
619 .type = QEMU_OPT_STRING,
620 },{
621 .name = "chardev",
622 .type = QEMU_OPT_STRING,
623 },{
624 .name = "pretty",
625 .type = QEMU_OPT_BOOL,
626 },
627 { /* end of list */ }
628 },
629};
630