| 1 | /* |
| 2 | * QEMU aio implementation |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2008 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Anthony Liguori <aliguori@us.ibm.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 10 | * the COPYING file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | #ifndef QEMU_AIO_H |
| 15 | #define QEMU_AIO_H |
| 16 | |
| 17 | #include "qemu/queue.h" |
| 18 | #include "qemu/event_notifier.h" |
| 19 | #include "qemu/thread.h" |
| 20 | #include "qemu/timer.h" |
| 21 | |
| 22 | typedef struct BlockAIOCB BlockAIOCB; |
| 23 | typedef void BlockCompletionFunc(void *opaque, int ret); |
| 24 | |
| 25 | typedef struct AIOCBInfo { |
| 26 | void (*cancel_async)(BlockAIOCB *acb); |
| 27 | AioContext *(*get_aio_context)(BlockAIOCB *acb); |
| 28 | size_t aiocb_size; |
| 29 | } AIOCBInfo; |
| 30 | |
| 31 | struct BlockAIOCB { |
| 32 | const AIOCBInfo *aiocb_info; |
| 33 | BlockDriverState *bs; |
| 34 | BlockCompletionFunc *cb; |
| 35 | void *opaque; |
| 36 | int refcnt; |
| 37 | }; |
| 38 | |
| 39 | void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, |
| 40 | BlockCompletionFunc *cb, void *opaque); |
| 41 | void qemu_aio_unref(void *p); |
| 42 | void qemu_aio_ref(void *p); |
| 43 | |
| 44 | typedef struct AioHandler AioHandler; |
| 45 | typedef void QEMUBHFunc(void *opaque); |
| 46 | typedef bool AioPollFn(void *opaque); |
| 47 | typedef void IOHandler(void *opaque); |
| 48 | |
| 49 | struct Coroutine; |
| 50 | struct ThreadPool; |
| 51 | struct LinuxAioState; |
| 52 | |
| 53 | struct AioContext { |
| 54 | GSource source; |
| 55 | |
| 56 | /* Used by AioContext users to protect from multi-threaded access. */ |
| 57 | QemuRecMutex lock; |
| 58 | |
| 59 | /* The list of registered AIO handlers. Protected by ctx->list_lock. */ |
| 60 | QLIST_HEAD(, AioHandler) aio_handlers; |
| 61 | |
| 62 | /* Used to avoid unnecessary event_notifier_set calls in aio_notify; |
| 63 | * accessed with atomic primitives. If this field is 0, everything |
| 64 | * (file descriptors, bottom halves, timers) will be re-evaluated |
| 65 | * before the next blocking poll(), thus the event_notifier_set call |
| 66 | * can be skipped. If it is non-zero, you may need to wake up a |
| 67 | * concurrent aio_poll or the glib main event loop, making |
| 68 | * event_notifier_set necessary. |
| 69 | * |
| 70 | * Bit 0 is reserved for GSource usage of the AioContext, and is 1 |
| 71 | * between a call to aio_ctx_prepare and the next call to aio_ctx_check. |
| 72 | * Bits 1-31 simply count the number of active calls to aio_poll |
| 73 | * that are in the prepare or poll phase. |
| 74 | * |
| 75 | * The GSource and aio_poll must use a different mechanism because |
| 76 | * there is no certainty that a call to GSource's prepare callback |
| 77 | * (via g_main_context_prepare) is indeed followed by check and |
| 78 | * dispatch. It's not clear whether this would be a bug, but let's |
| 79 | * play safe and allow it---it will just cause extra calls to |
| 80 | * event_notifier_set until the next call to dispatch. |
| 81 | * |
| 82 | * Instead, the aio_poll calls include both the prepare and the |
| 83 | * dispatch phase, hence a simple counter is enough for them. |
| 84 | */ |
| 85 | uint32_t notify_me; |
| 86 | |
| 87 | /* A lock to protect between QEMUBH and AioHandler adders and deleter, |
| 88 | * and to ensure that no callbacks are removed while we're walking and |
| 89 | * dispatching them. |
| 90 | */ |
| 91 | QemuLockCnt list_lock; |
| 92 | |
| 93 | /* Anchor of the list of Bottom Halves belonging to the context */ |
| 94 | struct QEMUBH *first_bh; |
| 95 | |
| 96 | /* Used by aio_notify. |
| 97 | * |
| 98 | * "notified" is used to avoid expensive event_notifier_test_and_clear |
| 99 | * calls. When it is clear, the EventNotifier is clear, or one thread |
| 100 | * is going to clear "notified" before processing more events. False |
| 101 | * positives are possible, i.e. "notified" could be set even though the |
| 102 | * EventNotifier is clear. |
| 103 | * |
| 104 | * Note that event_notifier_set *cannot* be optimized the same way. For |
| 105 | * more information on the problem that would result, see "#ifdef BUG2" |
| 106 | * in the docs/aio_notify_accept.promela formal model. |
| 107 | */ |
| 108 | bool notified; |
| 109 | EventNotifier notifier; |
| 110 | |
| 111 | QSLIST_HEAD(, Coroutine) scheduled_coroutines; |
| 112 | QEMUBH *co_schedule_bh; |
| 113 | |
| 114 | /* Thread pool for performing work and receiving completion callbacks. |
| 115 | * Has its own locking. |
| 116 | */ |
| 117 | struct ThreadPool *thread_pool; |
| 118 | |
| 119 | #ifdef CONFIG_LINUX_AIO |
| 120 | /* State for native Linux AIO. Uses aio_context_acquire/release for |
| 121 | * locking. |
| 122 | */ |
| 123 | struct LinuxAioState *linux_aio; |
| 124 | #endif |
| 125 | |
| 126 | /* TimerLists for calling timers - one per clock type. Has its own |
| 127 | * locking. |
| 128 | */ |
| 129 | QEMUTimerListGroup tlg; |
| 130 | |
| 131 | int external_disable_cnt; |
| 132 | |
| 133 | /* Number of AioHandlers without .io_poll() */ |
| 134 | int poll_disable_cnt; |
| 135 | |
| 136 | /* Polling mode parameters */ |
| 137 | int64_t poll_ns; /* current polling time in nanoseconds */ |
| 138 | int64_t poll_max_ns; /* maximum polling time in nanoseconds */ |
| 139 | int64_t poll_grow; /* polling time growth factor */ |
| 140 | int64_t poll_shrink; /* polling time shrink factor */ |
| 141 | |
| 142 | /* Are we in polling mode or monitoring file descriptors? */ |
| 143 | bool poll_started; |
| 144 | |
| 145 | /* epoll(7) state used when built with CONFIG_EPOLL */ |
| 146 | int epollfd; |
| 147 | bool epoll_enabled; |
| 148 | bool epoll_available; |
| 149 | }; |
| 150 | |
| 151 | /** |
| 152 | * aio_context_new: Allocate a new AioContext. |
| 153 | * |
| 154 | * AioContext provide a mini event-loop that can be waited on synchronously. |
| 155 | * They also provide bottom halves, a service to execute a piece of code |
| 156 | * as soon as possible. |
| 157 | */ |
| 158 | AioContext *aio_context_new(Error **errp); |
| 159 | |
| 160 | /** |
| 161 | * aio_context_ref: |
| 162 | * @ctx: The AioContext to operate on. |
| 163 | * |
| 164 | * Add a reference to an AioContext. |
| 165 | */ |
| 166 | void aio_context_ref(AioContext *ctx); |
| 167 | |
| 168 | /** |
| 169 | * aio_context_unref: |
| 170 | * @ctx: The AioContext to operate on. |
| 171 | * |
| 172 | * Drop a reference to an AioContext. |
| 173 | */ |
| 174 | void aio_context_unref(AioContext *ctx); |
| 175 | |
| 176 | /* Take ownership of the AioContext. If the AioContext will be shared between |
| 177 | * threads, and a thread does not want to be interrupted, it will have to |
| 178 | * take ownership around calls to aio_poll(). Otherwise, aio_poll() |
| 179 | * automatically takes care of calling aio_context_acquire and |
| 180 | * aio_context_release. |
| 181 | * |
| 182 | * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A |
| 183 | * thread still has to call those to avoid being interrupted by the guest. |
| 184 | * |
| 185 | * Bottom halves, timers and callbacks can be created or removed without |
| 186 | * acquiring the AioContext. |
| 187 | */ |
| 188 | void aio_context_acquire(AioContext *ctx); |
| 189 | |
| 190 | /* Relinquish ownership of the AioContext. */ |
| 191 | void aio_context_release(AioContext *ctx); |
| 192 | |
| 193 | /** |
| 194 | * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run |
| 195 | * only once and as soon as possible. |
| 196 | */ |
| 197 | void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque); |
| 198 | |
| 199 | /** |
| 200 | * aio_bh_new: Allocate a new bottom half structure. |
| 201 | * |
| 202 | * Bottom halves are lightweight callbacks whose invocation is guaranteed |
| 203 | * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure |
| 204 | * is opaque and must be allocated prior to its use. |
| 205 | */ |
| 206 | QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); |
| 207 | |
| 208 | /** |
| 209 | * aio_notify: Force processing of pending events. |
| 210 | * |
| 211 | * Similar to signaling a condition variable, aio_notify forces |
| 212 | * aio_poll to exit, so that the next call will re-examine pending events. |
| 213 | * The caller of aio_notify will usually call aio_poll again very soon, |
| 214 | * or go through another iteration of the GLib main loop. Hence, aio_notify |
| 215 | * also has the side effect of recalculating the sets of file descriptors |
| 216 | * that the main loop waits for. |
| 217 | * |
| 218 | * Calling aio_notify is rarely necessary, because for example scheduling |
| 219 | * a bottom half calls it already. |
| 220 | */ |
| 221 | void aio_notify(AioContext *ctx); |
| 222 | |
| 223 | /** |
| 224 | * aio_notify_accept: Acknowledge receiving an aio_notify. |
| 225 | * |
| 226 | * aio_notify() uses an EventNotifier in order to wake up a sleeping |
| 227 | * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are |
| 228 | * usually rare, but the AioContext has to clear the EventNotifier on |
| 229 | * every aio_poll() or g_main_context_iteration() in order to avoid |
| 230 | * busy waiting. This event_notifier_test_and_clear() cannot be done |
| 231 | * using the usual aio_context_set_event_notifier(), because it must |
| 232 | * be done before processing all events (file descriptors, bottom halves, |
| 233 | * timers). |
| 234 | * |
| 235 | * aio_notify_accept() is an optimized event_notifier_test_and_clear() |
| 236 | * that is specific to an AioContext's notifier; it is used internally |
| 237 | * to clear the EventNotifier only if aio_notify() had been called. |
| 238 | */ |
| 239 | void aio_notify_accept(AioContext *ctx); |
| 240 | |
| 241 | /** |
| 242 | * aio_bh_call: Executes callback function of the specified BH. |
| 243 | */ |
| 244 | void aio_bh_call(QEMUBH *bh); |
| 245 | |
| 246 | /** |
| 247 | * aio_bh_poll: Poll bottom halves for an AioContext. |
| 248 | * |
| 249 | * These are internal functions used by the QEMU main loop. |
| 250 | * And notice that multiple occurrences of aio_bh_poll cannot |
| 251 | * be called concurrently |
| 252 | */ |
| 253 | int aio_bh_poll(AioContext *ctx); |
| 254 | |
| 255 | /** |
| 256 | * qemu_bh_schedule: Schedule a bottom half. |
| 257 | * |
| 258 | * Scheduling a bottom half interrupts the main loop and causes the |
| 259 | * execution of the callback that was passed to qemu_bh_new. |
| 260 | * |
| 261 | * Bottom halves that are scheduled from a bottom half handler are instantly |
| 262 | * invoked. This can create an infinite loop if a bottom half handler |
| 263 | * schedules itself. |
| 264 | * |
| 265 | * @bh: The bottom half to be scheduled. |
| 266 | */ |
| 267 | void qemu_bh_schedule(QEMUBH *bh); |
| 268 | |
| 269 | /** |
| 270 | * qemu_bh_cancel: Cancel execution of a bottom half. |
| 271 | * |
| 272 | * Canceling execution of a bottom half undoes the effect of calls to |
| 273 | * qemu_bh_schedule without freeing its resources yet. While cancellation |
| 274 | * itself is also wait-free and thread-safe, it can of course race with the |
| 275 | * loop that executes bottom halves unless you are holding the iothread |
| 276 | * mutex. This makes it mostly useless if you are not holding the mutex. |
| 277 | * |
| 278 | * @bh: The bottom half to be canceled. |
| 279 | */ |
| 280 | void qemu_bh_cancel(QEMUBH *bh); |
| 281 | |
| 282 | /** |
| 283 | *qemu_bh_delete: Cancel execution of a bottom half and free its resources. |
| 284 | * |
| 285 | * Deleting a bottom half frees the memory that was allocated for it by |
| 286 | * qemu_bh_new. It also implies canceling the bottom half if it was |
| 287 | * scheduled. |
| 288 | * This func is async. The bottom half will do the delete action at the finial |
| 289 | * end. |
| 290 | * |
| 291 | * @bh: The bottom half to be deleted. |
| 292 | */ |
| 293 | void qemu_bh_delete(QEMUBH *bh); |
| 294 | |
| 295 | /* Return whether there are any pending callbacks from the GSource |
| 296 | * attached to the AioContext, before g_poll is invoked. |
| 297 | * |
| 298 | * This is used internally in the implementation of the GSource. |
| 299 | */ |
| 300 | bool aio_prepare(AioContext *ctx); |
| 301 | |
| 302 | /* Return whether there are any pending callbacks from the GSource |
| 303 | * attached to the AioContext, after g_poll is invoked. |
| 304 | * |
| 305 | * This is used internally in the implementation of the GSource. |
| 306 | */ |
| 307 | bool aio_pending(AioContext *ctx); |
| 308 | |
| 309 | /* Dispatch any pending callbacks from the GSource attached to the AioContext. |
| 310 | * |
| 311 | * This is used internally in the implementation of the GSource. |
| 312 | */ |
| 313 | void aio_dispatch(AioContext *ctx); |
| 314 | |
| 315 | /* Progress in completing AIO work to occur. This can issue new pending |
| 316 | * aio as a result of executing I/O completion or bh callbacks. |
| 317 | * |
| 318 | * Return whether any progress was made by executing AIO or bottom half |
| 319 | * handlers. If @blocking == true, this should always be true except |
| 320 | * if someone called aio_notify. |
| 321 | * |
| 322 | * If there are no pending bottom halves, but there are pending AIO |
| 323 | * operations, it may not be possible to make any progress without |
| 324 | * blocking. If @blocking is true, this function will wait until one |
| 325 | * or more AIO events have completed, to ensure something has moved |
| 326 | * before returning. |
| 327 | */ |
| 328 | bool aio_poll(AioContext *ctx, bool blocking); |
| 329 | |
| 330 | /* Register a file descriptor and associated callbacks. Behaves very similarly |
| 331 | * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will |
| 332 | * be invoked when using aio_poll(). |
| 333 | * |
| 334 | * Code that invokes AIO completion functions should rely on this function |
| 335 | * instead of qemu_set_fd_handler[2]. |
| 336 | */ |
| 337 | void aio_set_fd_handler(AioContext *ctx, |
| 338 | int fd, |
| 339 | bool is_external, |
| 340 | IOHandler *io_read, |
| 341 | IOHandler *io_write, |
| 342 | AioPollFn *io_poll, |
| 343 | void *opaque); |
| 344 | |
| 345 | /* Set polling begin/end callbacks for a file descriptor that has already been |
| 346 | * registered with aio_set_fd_handler. Do nothing if the file descriptor is |
| 347 | * not registered. |
| 348 | */ |
| 349 | void aio_set_fd_poll(AioContext *ctx, int fd, |
| 350 | IOHandler *io_poll_begin, |
| 351 | IOHandler *io_poll_end); |
| 352 | |
| 353 | /* Register an event notifier and associated callbacks. Behaves very similarly |
| 354 | * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks |
| 355 | * will be invoked when using aio_poll(). |
| 356 | * |
| 357 | * Code that invokes AIO completion functions should rely on this function |
| 358 | * instead of event_notifier_set_handler. |
| 359 | */ |
| 360 | void aio_set_event_notifier(AioContext *ctx, |
| 361 | EventNotifier *notifier, |
| 362 | bool is_external, |
| 363 | EventNotifierHandler *io_read, |
| 364 | AioPollFn *io_poll); |
| 365 | |
| 366 | /* Set polling begin/end callbacks for an event notifier that has already been |
| 367 | * registered with aio_set_event_notifier. Do nothing if the event notifier is |
| 368 | * not registered. |
| 369 | */ |
| 370 | void aio_set_event_notifier_poll(AioContext *ctx, |
| 371 | EventNotifier *notifier, |
| 372 | EventNotifierHandler *io_poll_begin, |
| 373 | EventNotifierHandler *io_poll_end); |
| 374 | |
| 375 | /* Return a GSource that lets the main loop poll the file descriptors attached |
| 376 | * to this AioContext. |
| 377 | */ |
| 378 | GSource *aio_get_g_source(AioContext *ctx); |
| 379 | |
| 380 | /* Return the ThreadPool bound to this AioContext */ |
| 381 | struct ThreadPool *aio_get_thread_pool(AioContext *ctx); |
| 382 | |
| 383 | /* Setup the LinuxAioState bound to this AioContext */ |
| 384 | struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp); |
| 385 | |
| 386 | /* Return the LinuxAioState bound to this AioContext */ |
| 387 | struct LinuxAioState *aio_get_linux_aio(AioContext *ctx); |
| 388 | |
| 389 | /** |
| 390 | * aio_timer_new_with_attrs: |
| 391 | * @ctx: the aio context |
| 392 | * @type: the clock type |
| 393 | * @scale: the scale |
| 394 | * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values |
| 395 | * to assign |
| 396 | * @cb: the callback to call on timer expiry |
| 397 | * @opaque: the opaque pointer to pass to the callback |
| 398 | * |
| 399 | * Allocate a new timer (with attributes) attached to the context @ctx. |
| 400 | * The function is responsible for memory allocation. |
| 401 | * |
| 402 | * The preferred interface is aio_timer_init or aio_timer_init_with_attrs. |
| 403 | * Use that unless you really need dynamic memory allocation. |
| 404 | * |
| 405 | * Returns: a pointer to the new timer |
| 406 | */ |
| 407 | static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx, |
| 408 | QEMUClockType type, |
| 409 | int scale, int attributes, |
| 410 | QEMUTimerCB *cb, void *opaque) |
| 411 | { |
| 412 | return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque); |
| 413 | } |
| 414 | |
| 415 | /** |
| 416 | * aio_timer_new: |
| 417 | * @ctx: the aio context |
| 418 | * @type: the clock type |
| 419 | * @scale: the scale |
| 420 | * @cb: the callback to call on timer expiry |
| 421 | * @opaque: the opaque pointer to pass to the callback |
| 422 | * |
| 423 | * Allocate a new timer attached to the context @ctx. |
| 424 | * See aio_timer_new_with_attrs for details. |
| 425 | * |
| 426 | * Returns: a pointer to the new timer |
| 427 | */ |
| 428 | static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, |
| 429 | int scale, |
| 430 | QEMUTimerCB *cb, void *opaque) |
| 431 | { |
| 432 | return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque); |
| 433 | } |
| 434 | |
| 435 | /** |
| 436 | * aio_timer_init_with_attrs: |
| 437 | * @ctx: the aio context |
| 438 | * @ts: the timer |
| 439 | * @type: the clock type |
| 440 | * @scale: the scale |
| 441 | * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values |
| 442 | * to assign |
| 443 | * @cb: the callback to call on timer expiry |
| 444 | * @opaque: the opaque pointer to pass to the callback |
| 445 | * |
| 446 | * Initialise a new timer (with attributes) attached to the context @ctx. |
| 447 | * The caller is responsible for memory allocation. |
| 448 | */ |
| 449 | static inline void aio_timer_init_with_attrs(AioContext *ctx, |
| 450 | QEMUTimer *ts, QEMUClockType type, |
| 451 | int scale, int attributes, |
| 452 | QEMUTimerCB *cb, void *opaque) |
| 453 | { |
| 454 | timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque); |
| 455 | } |
| 456 | |
| 457 | /** |
| 458 | * aio_timer_init: |
| 459 | * @ctx: the aio context |
| 460 | * @ts: the timer |
| 461 | * @type: the clock type |
| 462 | * @scale: the scale |
| 463 | * @cb: the callback to call on timer expiry |
| 464 | * @opaque: the opaque pointer to pass to the callback |
| 465 | * |
| 466 | * Initialise a new timer attached to the context @ctx. |
| 467 | * See aio_timer_init_with_attrs for details. |
| 468 | */ |
| 469 | static inline void aio_timer_init(AioContext *ctx, |
| 470 | QEMUTimer *ts, QEMUClockType type, |
| 471 | int scale, |
| 472 | QEMUTimerCB *cb, void *opaque) |
| 473 | { |
| 474 | timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque); |
| 475 | } |
| 476 | |
| 477 | /** |
| 478 | * aio_compute_timeout: |
| 479 | * @ctx: the aio context |
| 480 | * |
| 481 | * Compute the timeout that a blocking aio_poll should use. |
| 482 | */ |
| 483 | int64_t aio_compute_timeout(AioContext *ctx); |
| 484 | |
| 485 | /** |
| 486 | * aio_disable_external: |
| 487 | * @ctx: the aio context |
| 488 | * |
| 489 | * Disable the further processing of external clients. |
| 490 | */ |
| 491 | static inline void aio_disable_external(AioContext *ctx) |
| 492 | { |
| 493 | atomic_inc(&ctx->external_disable_cnt); |
| 494 | } |
| 495 | |
| 496 | /** |
| 497 | * aio_enable_external: |
| 498 | * @ctx: the aio context |
| 499 | * |
| 500 | * Enable the processing of external clients. |
| 501 | */ |
| 502 | static inline void aio_enable_external(AioContext *ctx) |
| 503 | { |
| 504 | int old; |
| 505 | |
| 506 | old = atomic_fetch_dec(&ctx->external_disable_cnt); |
| 507 | assert(old > 0); |
| 508 | if (old == 1) { |
| 509 | /* Kick event loop so it re-arms file descriptors */ |
| 510 | aio_notify(ctx); |
| 511 | } |
| 512 | } |
| 513 | |
| 514 | /** |
| 515 | * aio_external_disabled: |
| 516 | * @ctx: the aio context |
| 517 | * |
| 518 | * Return true if the external clients are disabled. |
| 519 | */ |
| 520 | static inline bool aio_external_disabled(AioContext *ctx) |
| 521 | { |
| 522 | return atomic_read(&ctx->external_disable_cnt); |
| 523 | } |
| 524 | |
| 525 | /** |
| 526 | * aio_node_check: |
| 527 | * @ctx: the aio context |
| 528 | * @is_external: Whether or not the checked node is an external event source. |
| 529 | * |
| 530 | * Check if the node's is_external flag is okay to be polled by the ctx at this |
| 531 | * moment. True means green light. |
| 532 | */ |
| 533 | static inline bool aio_node_check(AioContext *ctx, bool is_external) |
| 534 | { |
| 535 | return !is_external || !atomic_read(&ctx->external_disable_cnt); |
| 536 | } |
| 537 | |
| 538 | /** |
| 539 | * aio_co_schedule: |
| 540 | * @ctx: the aio context |
| 541 | * @co: the coroutine |
| 542 | * |
| 543 | * Start a coroutine on a remote AioContext. |
| 544 | * |
| 545 | * The coroutine must not be entered by anyone else while aio_co_schedule() |
| 546 | * is active. In addition the coroutine must have yielded unless ctx |
| 547 | * is the context in which the coroutine is running (i.e. the value of |
| 548 | * qemu_get_current_aio_context() from the coroutine itself). |
| 549 | */ |
| 550 | void aio_co_schedule(AioContext *ctx, struct Coroutine *co); |
| 551 | |
| 552 | /** |
| 553 | * aio_co_wake: |
| 554 | * @co: the coroutine |
| 555 | * |
| 556 | * Restart a coroutine on the AioContext where it was running last, thus |
| 557 | * preventing coroutines from jumping from one context to another when they |
| 558 | * go to sleep. |
| 559 | * |
| 560 | * aio_co_wake may be executed either in coroutine or non-coroutine |
| 561 | * context. The coroutine must not be entered by anyone else while |
| 562 | * aio_co_wake() is active. |
| 563 | */ |
| 564 | void aio_co_wake(struct Coroutine *co); |
| 565 | |
| 566 | /** |
| 567 | * aio_co_enter: |
| 568 | * @ctx: the context to run the coroutine |
| 569 | * @co: the coroutine to run |
| 570 | * |
| 571 | * Enter a coroutine in the specified AioContext. |
| 572 | */ |
| 573 | void aio_co_enter(AioContext *ctx, struct Coroutine *co); |
| 574 | |
| 575 | /** |
| 576 | * Return the AioContext whose event loop runs in the current thread. |
| 577 | * |
| 578 | * If called from an IOThread this will be the IOThread's AioContext. If |
| 579 | * called from another thread it will be the main loop AioContext. |
| 580 | */ |
| 581 | AioContext *qemu_get_current_aio_context(void); |
| 582 | |
| 583 | /** |
| 584 | * in_aio_context_home_thread: |
| 585 | * @ctx: the aio context |
| 586 | * |
| 587 | * Return whether we are running in the thread that normally runs @ctx. Note |
| 588 | * that acquiring/releasing ctx does not affect the outcome, each AioContext |
| 589 | * still only has one home thread that is responsible for running it. |
| 590 | */ |
| 591 | static inline bool in_aio_context_home_thread(AioContext *ctx) |
| 592 | { |
| 593 | return ctx == qemu_get_current_aio_context(); |
| 594 | } |
| 595 | |
| 596 | /** |
| 597 | * aio_context_setup: |
| 598 | * @ctx: the aio context |
| 599 | * |
| 600 | * Initialize the aio context. |
| 601 | */ |
| 602 | void aio_context_setup(AioContext *ctx); |
| 603 | |
| 604 | /** |
| 605 | * aio_context_destroy: |
| 606 | * @ctx: the aio context |
| 607 | * |
| 608 | * Destroy the aio context. |
| 609 | */ |
| 610 | void aio_context_destroy(AioContext *ctx); |
| 611 | |
| 612 | /** |
| 613 | * aio_context_set_poll_params: |
| 614 | * @ctx: the aio context |
| 615 | * @max_ns: how long to busy poll for, in nanoseconds |
| 616 | * @grow: polling time growth factor |
| 617 | * @shrink: polling time shrink factor |
| 618 | * |
| 619 | * Poll mode can be disabled by setting poll_max_ns to 0. |
| 620 | */ |
| 621 | void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, |
| 622 | int64_t grow, int64_t shrink, |
| 623 | Error **errp); |
| 624 | |
| 625 | #endif |
| 626 | |