1/*
2 * QEMU I/O channels
3 *
4 * Copyright (c) 2015 Red Hat, Inc.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 *
19 */
20
21#ifndef QIO_CHANNEL_H
22#define QIO_CHANNEL_H
23
24#include "qom/object.h"
25#include "qemu/coroutine.h"
26#include "block/aio.h"
27
28#define TYPE_QIO_CHANNEL "qio-channel"
29#define QIO_CHANNEL(obj) \
30 OBJECT_CHECK(QIOChannel, (obj), TYPE_QIO_CHANNEL)
31#define QIO_CHANNEL_CLASS(klass) \
32 OBJECT_CLASS_CHECK(QIOChannelClass, klass, TYPE_QIO_CHANNEL)
33#define QIO_CHANNEL_GET_CLASS(obj) \
34 OBJECT_GET_CLASS(QIOChannelClass, obj, TYPE_QIO_CHANNEL)
35
36typedef struct QIOChannel QIOChannel;
37typedef struct QIOChannelClass QIOChannelClass;
38
39#define QIO_CHANNEL_ERR_BLOCK -2
40
41typedef enum QIOChannelFeature QIOChannelFeature;
42
43enum QIOChannelFeature {
44 QIO_CHANNEL_FEATURE_FD_PASS,
45 QIO_CHANNEL_FEATURE_SHUTDOWN,
46 QIO_CHANNEL_FEATURE_LISTEN,
47};
48
49
50typedef enum QIOChannelShutdown QIOChannelShutdown;
51
52enum QIOChannelShutdown {
53 QIO_CHANNEL_SHUTDOWN_READ = 1,
54 QIO_CHANNEL_SHUTDOWN_WRITE = 2,
55 QIO_CHANNEL_SHUTDOWN_BOTH = 3,
56};
57
58typedef gboolean (*QIOChannelFunc)(QIOChannel *ioc,
59 GIOCondition condition,
60 gpointer data);
61
62/**
63 * QIOChannel:
64 *
65 * The QIOChannel defines the core API for a generic I/O channel
66 * class hierarchy. It is inspired by GIOChannel, but has the
67 * following differences
68 *
69 * - Use QOM to properly support arbitrary subclassing
70 * - Support use of iovecs for efficient I/O with multiple blocks
71 * - None of the character set translation, binary data exclusively
72 * - Direct support for QEMU Error object reporting
73 * - File descriptor passing
74 *
75 * This base class is abstract so cannot be instantiated. There
76 * will be subclasses for dealing with sockets, files, and higher
77 * level protocols such as TLS, WebSocket, etc.
78 */
79
80struct QIOChannel {
81 Object parent;
82 unsigned int features; /* bitmask of QIOChannelFeatures */
83 char *name;
84 AioContext *ctx;
85 Coroutine *read_coroutine;
86 Coroutine *write_coroutine;
87#ifdef _WIN32
88 HANDLE event; /* For use with GSource on Win32 */
89#endif
90};
91
92/**
93 * QIOChannelClass:
94 *
95 * This class defines the contract that all subclasses
96 * must follow to provide specific channel implementations.
97 * The first five callbacks are mandatory to support, others
98 * provide additional optional features.
99 *
100 * Consult the corresponding public API docs for a description
101 * of the semantics of each callback
102 */
103struct QIOChannelClass {
104 ObjectClass parent;
105
106 /* Mandatory callbacks */
107 ssize_t (*io_writev)(QIOChannel *ioc,
108 const struct iovec *iov,
109 size_t niov,
110 int *fds,
111 size_t nfds,
112 Error **errp);
113 ssize_t (*io_readv)(QIOChannel *ioc,
114 const struct iovec *iov,
115 size_t niov,
116 int **fds,
117 size_t *nfds,
118 Error **errp);
119 int (*io_close)(QIOChannel *ioc,
120 Error **errp);
121 GSource * (*io_create_watch)(QIOChannel *ioc,
122 GIOCondition condition);
123 int (*io_set_blocking)(QIOChannel *ioc,
124 bool enabled,
125 Error **errp);
126
127 /* Optional callbacks */
128 int (*io_shutdown)(QIOChannel *ioc,
129 QIOChannelShutdown how,
130 Error **errp);
131 void (*io_set_cork)(QIOChannel *ioc,
132 bool enabled);
133 void (*io_set_delay)(QIOChannel *ioc,
134 bool enabled);
135 off_t (*io_seek)(QIOChannel *ioc,
136 off_t offset,
137 int whence,
138 Error **errp);
139 void (*io_set_aio_fd_handler)(QIOChannel *ioc,
140 AioContext *ctx,
141 IOHandler *io_read,
142 IOHandler *io_write,
143 void *opaque);
144};
145
146/* General I/O handling functions */
147
148/**
149 * qio_channel_has_feature:
150 * @ioc: the channel object
151 * @feature: the feature to check support of
152 *
153 * Determine whether the channel implementation supports
154 * the optional feature named in @feature.
155 *
156 * Returns: true if supported, false otherwise.
157 */
158bool qio_channel_has_feature(QIOChannel *ioc,
159 QIOChannelFeature feature);
160
161/**
162 * qio_channel_set_feature:
163 * @ioc: the channel object
164 * @feature: the feature to set support for
165 *
166 * Add channel support for the feature named in @feature.
167 */
168void qio_channel_set_feature(QIOChannel *ioc,
169 QIOChannelFeature feature);
170
171/**
172 * qio_channel_set_name:
173 * @ioc: the channel object
174 * @name: the name of the channel
175 *
176 * Sets the name of the channel, which serves as an aid
177 * to debugging. The name is used when creating GSource
178 * watches for this channel.
179 */
180void qio_channel_set_name(QIOChannel *ioc,
181 const char *name);
182
183/**
184 * qio_channel_readv_full:
185 * @ioc: the channel object
186 * @iov: the array of memory regions to read data into
187 * @niov: the length of the @iov array
188 * @fds: pointer to an array that will received file handles
189 * @nfds: pointer filled with number of elements in @fds on return
190 * @errp: pointer to a NULL-initialized error object
191 *
192 * Read data from the IO channel, storing it in the
193 * memory regions referenced by @iov. Each element
194 * in the @iov will be fully populated with data
195 * before the next one is used. The @niov parameter
196 * specifies the total number of elements in @iov.
197 *
198 * It is not required for all @iov to be filled with
199 * data. If the channel is in blocking mode, at least
200 * one byte of data will be read, but no more is
201 * guaranteed. If the channel is non-blocking and no
202 * data is available, it will return QIO_CHANNEL_ERR_BLOCK
203 *
204 * If the channel has passed any file descriptors,
205 * the @fds array pointer will be allocated and
206 * the elements filled with the received file
207 * descriptors. The @nfds pointer will be updated
208 * to indicate the size of the @fds array that
209 * was allocated. It is the callers responsibility
210 * to call close() on each file descriptor and to
211 * call g_free() on the array pointer in @fds.
212 *
213 * It is an error to pass a non-NULL @fds parameter
214 * unless qio_channel_has_feature() returns a true
215 * value for the QIO_CHANNEL_FEATURE_FD_PASS constant.
216 *
217 * Returns: the number of bytes read, or -1 on error,
218 * or QIO_CHANNEL_ERR_BLOCK if no data is available
219 * and the channel is non-blocking
220 */
221ssize_t qio_channel_readv_full(QIOChannel *ioc,
222 const struct iovec *iov,
223 size_t niov,
224 int **fds,
225 size_t *nfds,
226 Error **errp);
227
228
229/**
230 * qio_channel_writev_full:
231 * @ioc: the channel object
232 * @iov: the array of memory regions to write data from
233 * @niov: the length of the @iov array
234 * @fds: an array of file handles to send
235 * @nfds: number of file handles in @fds
236 * @errp: pointer to a NULL-initialized error object
237 *
238 * Write data to the IO channel, reading it from the
239 * memory regions referenced by @iov. Each element
240 * in the @iov will be fully sent, before the next
241 * one is used. The @niov parameter specifies the
242 * total number of elements in @iov.
243 *
244 * It is not required for all @iov data to be fully
245 * sent. If the channel is in blocking mode, at least
246 * one byte of data will be sent, but no more is
247 * guaranteed. If the channel is non-blocking and no
248 * data can be sent, it will return QIO_CHANNEL_ERR_BLOCK
249 *
250 * If there are file descriptors to send, the @fds
251 * array should be non-NULL and provide the handles.
252 * All file descriptors will be sent if at least one
253 * byte of data was sent.
254 *
255 * It is an error to pass a non-NULL @fds parameter
256 * unless qio_channel_has_feature() returns a true
257 * value for the QIO_CHANNEL_FEATURE_FD_PASS constant.
258 *
259 * Returns: the number of bytes sent, or -1 on error,
260 * or QIO_CHANNEL_ERR_BLOCK if no data is can be sent
261 * and the channel is non-blocking
262 */
263ssize_t qio_channel_writev_full(QIOChannel *ioc,
264 const struct iovec *iov,
265 size_t niov,
266 int *fds,
267 size_t nfds,
268 Error **errp);
269
270/**
271 * qio_channel_readv_all_eof:
272 * @ioc: the channel object
273 * @iov: the array of memory regions to read data into
274 * @niov: the length of the @iov array
275 * @errp: pointer to a NULL-initialized error object
276 *
277 * Read data from the IO channel, storing it in the
278 * memory regions referenced by @iov. Each element
279 * in the @iov will be fully populated with data
280 * before the next one is used. The @niov parameter
281 * specifies the total number of elements in @iov.
282 *
283 * The function will wait for all requested data
284 * to be read, yielding from the current coroutine
285 * if required.
286 *
287 * If end-of-file occurs before any data is read,
288 * no error is reported; otherwise, if it occurs
289 * before all requested data has been read, an error
290 * will be reported.
291 *
292 * Returns: 1 if all bytes were read, 0 if end-of-file
293 * occurs without data, or -1 on error
294 */
295int qio_channel_readv_all_eof(QIOChannel *ioc,
296 const struct iovec *iov,
297 size_t niov,
298 Error **errp);
299
300/**
301 * qio_channel_readv_all:
302 * @ioc: the channel object
303 * @iov: the array of memory regions to read data into
304 * @niov: the length of the @iov array
305 * @errp: pointer to a NULL-initialized error object
306 *
307 * Read data from the IO channel, storing it in the
308 * memory regions referenced by @iov. Each element
309 * in the @iov will be fully populated with data
310 * before the next one is used. The @niov parameter
311 * specifies the total number of elements in @iov.
312 *
313 * The function will wait for all requested data
314 * to be read, yielding from the current coroutine
315 * if required.
316 *
317 * If end-of-file occurs before all requested data
318 * has been read, an error will be reported.
319 *
320 * Returns: 0 if all bytes were read, or -1 on error
321 */
322int qio_channel_readv_all(QIOChannel *ioc,
323 const struct iovec *iov,
324 size_t niov,
325 Error **errp);
326
327
328/**
329 * qio_channel_writev_all:
330 * @ioc: the channel object
331 * @iov: the array of memory regions to write data from
332 * @niov: the length of the @iov array
333 * @errp: pointer to a NULL-initialized error object
334 *
335 * Write data to the IO channel, reading it from the
336 * memory regions referenced by @iov. Each element
337 * in the @iov will be fully sent, before the next
338 * one is used. The @niov parameter specifies the
339 * total number of elements in @iov.
340 *
341 * The function will wait for all requested data
342 * to be written, yielding from the current coroutine
343 * if required.
344 *
345 * Returns: 0 if all bytes were written, or -1 on error
346 */
347int qio_channel_writev_all(QIOChannel *ioc,
348 const struct iovec *iov,
349 size_t niov,
350 Error **erp);
351
352/**
353 * qio_channel_readv:
354 * @ioc: the channel object
355 * @iov: the array of memory regions to read data into
356 * @niov: the length of the @iov array
357 * @errp: pointer to a NULL-initialized error object
358 *
359 * Behaves as qio_channel_readv_full() but does not support
360 * receiving of file handles.
361 */
362ssize_t qio_channel_readv(QIOChannel *ioc,
363 const struct iovec *iov,
364 size_t niov,
365 Error **errp);
366
367/**
368 * qio_channel_writev:
369 * @ioc: the channel object
370 * @iov: the array of memory regions to write data from
371 * @niov: the length of the @iov array
372 * @errp: pointer to a NULL-initialized error object
373 *
374 * Behaves as qio_channel_writev_full() but does not support
375 * sending of file handles.
376 */
377ssize_t qio_channel_writev(QIOChannel *ioc,
378 const struct iovec *iov,
379 size_t niov,
380 Error **errp);
381
382/**
383 * qio_channel_read:
384 * @ioc: the channel object
385 * @buf: the memory region to read data into
386 * @buflen: the length of @buf
387 * @errp: pointer to a NULL-initialized error object
388 *
389 * Behaves as qio_channel_readv_full() but does not support
390 * receiving of file handles, and only supports reading into
391 * a single memory region.
392 */
393ssize_t qio_channel_read(QIOChannel *ioc,
394 char *buf,
395 size_t buflen,
396 Error **errp);
397
398/**
399 * qio_channel_write:
400 * @ioc: the channel object
401 * @buf: the memory regions to send data from
402 * @buflen: the length of @buf
403 * @errp: pointer to a NULL-initialized error object
404 *
405 * Behaves as qio_channel_writev_full() but does not support
406 * sending of file handles, and only supports writing from a
407 * single memory region.
408 */
409ssize_t qio_channel_write(QIOChannel *ioc,
410 const char *buf,
411 size_t buflen,
412 Error **errp);
413
414/**
415 * qio_channel_read_all_eof:
416 * @ioc: the channel object
417 * @buf: the memory region to read data into
418 * @buflen: the number of bytes to @buf
419 * @errp: pointer to a NULL-initialized error object
420 *
421 * Reads @buflen bytes into @buf, possibly blocking or (if the
422 * channel is non-blocking) yielding from the current coroutine
423 * multiple times until the entire content is read. If end-of-file
424 * occurs immediately it is not an error, but if it occurs after
425 * data has been read it will return an error rather than a
426 * short-read. Otherwise behaves as qio_channel_read().
427 *
428 * Returns: 1 if all bytes were read, 0 if end-of-file occurs
429 * without data, or -1 on error
430 */
431int qio_channel_read_all_eof(QIOChannel *ioc,
432 char *buf,
433 size_t buflen,
434 Error **errp);
435
436/**
437 * qio_channel_read_all:
438 * @ioc: the channel object
439 * @buf: the memory region to read data into
440 * @buflen: the number of bytes to @buf
441 * @errp: pointer to a NULL-initialized error object
442 *
443 * Reads @buflen bytes into @buf, possibly blocking or (if the
444 * channel is non-blocking) yielding from the current coroutine
445 * multiple times until the entire content is read. If end-of-file
446 * occurs it will return an error rather than a short-read. Otherwise
447 * behaves as qio_channel_read().
448 *
449 * Returns: 0 if all bytes were read, or -1 on error
450 */
451int qio_channel_read_all(QIOChannel *ioc,
452 char *buf,
453 size_t buflen,
454 Error **errp);
455
456/**
457 * qio_channel_write_all:
458 * @ioc: the channel object
459 * @buf: the memory region to write data into
460 * @buflen: the number of bytes to @buf
461 * @errp: pointer to a NULL-initialized error object
462 *
463 * Writes @buflen bytes from @buf, possibly blocking or (if the
464 * channel is non-blocking) yielding from the current coroutine
465 * multiple times until the entire content is written. Otherwise
466 * behaves as qio_channel_write().
467 *
468 * Returns: 0 if all bytes were written, or -1 on error
469 */
470int qio_channel_write_all(QIOChannel *ioc,
471 const char *buf,
472 size_t buflen,
473 Error **errp);
474
475/**
476 * qio_channel_set_blocking:
477 * @ioc: the channel object
478 * @enabled: the blocking flag state
479 * @errp: pointer to a NULL-initialized error object
480 *
481 * If @enabled is true, then the channel is put into
482 * blocking mode, otherwise it will be non-blocking.
483 *
484 * In non-blocking mode, read/write operations may
485 * return QIO_CHANNEL_ERR_BLOCK if they would otherwise
486 * block on I/O
487 */
488int qio_channel_set_blocking(QIOChannel *ioc,
489 bool enabled,
490 Error **errp);
491
492/**
493 * qio_channel_close:
494 * @ioc: the channel object
495 * @errp: pointer to a NULL-initialized error object
496 *
497 * Close the channel, flushing any pending I/O
498 *
499 * Returns: 0 on success, -1 on error
500 */
501int qio_channel_close(QIOChannel *ioc,
502 Error **errp);
503
504/**
505 * qio_channel_shutdown:
506 * @ioc: the channel object
507 * @how: the direction to shutdown
508 * @errp: pointer to a NULL-initialized error object
509 *
510 * Shutdowns transmission and/or receiving of data
511 * without closing the underlying transport.
512 *
513 * Not all implementations will support this facility,
514 * so may report an error. To avoid errors, the
515 * caller may check for the feature flag
516 * QIO_CHANNEL_FEATURE_SHUTDOWN prior to calling
517 * this method.
518 *
519 * Returns: 0 on success, -1 on error
520 */
521int qio_channel_shutdown(QIOChannel *ioc,
522 QIOChannelShutdown how,
523 Error **errp);
524
525/**
526 * qio_channel_set_delay:
527 * @ioc: the channel object
528 * @enabled: the new flag state
529 *
530 * Controls whether the underlying transport is
531 * permitted to delay writes in order to merge
532 * small packets. If @enabled is true, then the
533 * writes may be delayed in order to opportunistically
534 * merge small packets into larger ones. If @enabled
535 * is false, writes are dispatched immediately with
536 * no delay.
537 *
538 * When @enabled is false, applications may wish to
539 * use the qio_channel_set_cork() method to explicitly
540 * control write merging.
541 *
542 * On channels which are backed by a socket, this
543 * API corresponds to the inverse of TCP_NODELAY flag,
544 * controlling whether the Nagle algorithm is active.
545 *
546 * This setting is merely a hint, so implementations are
547 * free to ignore this without it being considered an
548 * error.
549 */
550void qio_channel_set_delay(QIOChannel *ioc,
551 bool enabled);
552
553/**
554 * qio_channel_set_cork:
555 * @ioc: the channel object
556 * @enabled: the new flag state
557 *
558 * Controls whether the underlying transport is
559 * permitted to dispatch data that is written.
560 * If @enabled is true, then any data written will
561 * be queued in local buffers until @enabled is
562 * set to false once again.
563 *
564 * This feature is typically used when the automatic
565 * write coalescing facility is disabled via the
566 * qio_channel_set_delay() method.
567 *
568 * On channels which are backed by a socket, this
569 * API corresponds to the TCP_CORK flag.
570 *
571 * This setting is merely a hint, so implementations are
572 * free to ignore this without it being considered an
573 * error.
574 */
575void qio_channel_set_cork(QIOChannel *ioc,
576 bool enabled);
577
578
579/**
580 * qio_channel_seek:
581 * @ioc: the channel object
582 * @offset: the position to seek to, relative to @whence
583 * @whence: one of the (POSIX) SEEK_* constants listed below
584 * @errp: pointer to a NULL-initialized error object
585 *
586 * Moves the current I/O position within the channel
587 * @ioc, to be @offset. The value of @offset is
588 * interpreted relative to @whence:
589 *
590 * SEEK_SET - the position is set to @offset bytes
591 * SEEK_CUR - the position is moved by @offset bytes
592 * SEEK_END - the position is set to end of the file plus @offset bytes
593 *
594 * Not all implementations will support this facility,
595 * so may report an error.
596 *
597 * Returns: the new position on success, (off_t)-1 on failure
598 */
599off_t qio_channel_io_seek(QIOChannel *ioc,
600 off_t offset,
601 int whence,
602 Error **errp);
603
604
605/**
606 * qio_channel_create_watch:
607 * @ioc: the channel object
608 * @condition: the I/O condition to monitor
609 *
610 * Create a new main loop source that is used to watch
611 * for the I/O condition @condition. Typically the
612 * qio_channel_add_watch() method would be used instead
613 * of this, since it directly attaches a callback to
614 * the source
615 *
616 * Returns: the new main loop source.
617 */
618GSource *qio_channel_create_watch(QIOChannel *ioc,
619 GIOCondition condition);
620
621/**
622 * qio_channel_add_watch:
623 * @ioc: the channel object
624 * @condition: the I/O condition to monitor
625 * @func: callback to invoke when the source becomes ready
626 * @user_data: opaque data to pass to @func
627 * @notify: callback to free @user_data
628 *
629 * Create a new main loop source that is used to watch
630 * for the I/O condition @condition. The callback @func
631 * will be registered against the source, to be invoked
632 * when the source becomes ready. The optional @user_data
633 * will be passed to @func when it is invoked. The @notify
634 * callback will be used to free @user_data when the
635 * watch is deleted
636 *
637 * The returned source ID can be used with g_source_remove()
638 * to remove and free the source when no longer required.
639 * Alternatively the @func callback can return a FALSE
640 * value.
641 *
642 * Returns: the source ID
643 */
644guint qio_channel_add_watch(QIOChannel *ioc,
645 GIOCondition condition,
646 QIOChannelFunc func,
647 gpointer user_data,
648 GDestroyNotify notify);
649
650/**
651 * qio_channel_add_watch_full:
652 * @ioc: the channel object
653 * @condition: the I/O condition to monitor
654 * @func: callback to invoke when the source becomes ready
655 * @user_data: opaque data to pass to @func
656 * @notify: callback to free @user_data
657 * @context: the context to run the watch source
658 *
659 * Similar as qio_channel_add_watch(), but allows to specify context
660 * to run the watch source.
661 *
662 * Returns: the source ID
663 */
664guint qio_channel_add_watch_full(QIOChannel *ioc,
665 GIOCondition condition,
666 QIOChannelFunc func,
667 gpointer user_data,
668 GDestroyNotify notify,
669 GMainContext *context);
670
671/**
672 * qio_channel_add_watch_source:
673 * @ioc: the channel object
674 * @condition: the I/O condition to monitor
675 * @func: callback to invoke when the source becomes ready
676 * @user_data: opaque data to pass to @func
677 * @notify: callback to free @user_data
678 * @context: gcontext to bind the source to
679 *
680 * Similar as qio_channel_add_watch(), but allows to specify context
681 * to run the watch source, meanwhile return the GSource object
682 * instead of tag ID, with the GSource referenced already.
683 *
684 * Note: callers is responsible to unref the source when not needed.
685 *
686 * Returns: the source pointer
687 */
688GSource *qio_channel_add_watch_source(QIOChannel *ioc,
689 GIOCondition condition,
690 QIOChannelFunc func,
691 gpointer user_data,
692 GDestroyNotify notify,
693 GMainContext *context);
694
695/**
696 * qio_channel_attach_aio_context:
697 * @ioc: the channel object
698 * @ctx: the #AioContext to set the handlers on
699 *
700 * Request that qio_channel_yield() sets I/O handlers on
701 * the given #AioContext. If @ctx is %NULL, qio_channel_yield()
702 * uses QEMU's main thread event loop.
703 *
704 * You can move a #QIOChannel from one #AioContext to another even if
705 * I/O handlers are set for a coroutine. However, #QIOChannel provides
706 * no synchronization between the calls to qio_channel_yield() and
707 * qio_channel_attach_aio_context().
708 *
709 * Therefore you should first call qio_channel_detach_aio_context()
710 * to ensure that the coroutine is not entered concurrently. Then,
711 * while the coroutine has yielded, call qio_channel_attach_aio_context(),
712 * and then aio_co_schedule() to place the coroutine on the new
713 * #AioContext. The calls to qio_channel_detach_aio_context()
714 * and qio_channel_attach_aio_context() should be protected with
715 * aio_context_acquire() and aio_context_release().
716 */
717void qio_channel_attach_aio_context(QIOChannel *ioc,
718 AioContext *ctx);
719
720/**
721 * qio_channel_detach_aio_context:
722 * @ioc: the channel object
723 *
724 * Disable any I/O handlers set by qio_channel_yield(). With the
725 * help of aio_co_schedule(), this allows moving a coroutine that was
726 * paused by qio_channel_yield() to another context.
727 */
728void qio_channel_detach_aio_context(QIOChannel *ioc);
729
730/**
731 * qio_channel_yield:
732 * @ioc: the channel object
733 * @condition: the I/O condition to wait for
734 *
735 * Yields execution from the current coroutine until the condition
736 * indicated by @condition becomes available. @condition must
737 * be either %G_IO_IN or %G_IO_OUT; it cannot contain both. In
738 * addition, no two coroutine can be waiting on the same condition
739 * and channel at the same time.
740 *
741 * This must only be called from coroutine context. It is safe to
742 * reenter the coroutine externally while it is waiting; in this
743 * case the function will return even if @condition is not yet
744 * available.
745 */
746void coroutine_fn qio_channel_yield(QIOChannel *ioc,
747 GIOCondition condition);
748
749/**
750 * qio_channel_wait:
751 * @ioc: the channel object
752 * @condition: the I/O condition to wait for
753 *
754 * Block execution from the current thread until
755 * the condition indicated by @condition becomes
756 * available.
757 *
758 * This will enter a nested event loop to perform
759 * the wait.
760 */
761void qio_channel_wait(QIOChannel *ioc,
762 GIOCondition condition);
763
764/**
765 * qio_channel_set_aio_fd_handler:
766 * @ioc: the channel object
767 * @ctx: the AioContext to set the handlers on
768 * @io_read: the read handler
769 * @io_write: the write handler
770 * @opaque: the opaque value passed to the handler
771 *
772 * This is used internally by qio_channel_yield(). It can
773 * be used by channel implementations to forward the handlers
774 * to another channel (e.g. from #QIOChannelTLS to the
775 * underlying socket).
776 */
777void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
778 AioContext *ctx,
779 IOHandler *io_read,
780 IOHandler *io_write,
781 void *opaque);
782
783#endif /* QIO_CHANNEL_H */
784