1 | /* |
2 | * QEMU coroutines |
3 | * |
4 | * Copyright IBM, Corp. 2011 |
5 | * |
6 | * Authors: |
7 | * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> |
8 | * Kevin Wolf <kwolf@redhat.com> |
9 | * |
10 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. |
11 | * See the COPYING.LIB file in the top-level directory. |
12 | * |
13 | */ |
14 | |
15 | #include "qemu/osdep.h" |
16 | #include "trace.h" |
17 | #include "qemu/thread.h" |
18 | #include "qemu/atomic.h" |
19 | #include "qemu/coroutine.h" |
20 | #include "qemu/coroutine_int.h" |
21 | #include "block/aio.h" |
22 | |
23 | enum { |
24 | POOL_BATCH_SIZE = 64, |
25 | }; |
26 | |
27 | /** Free list to speed up creation */ |
28 | static QSLIST_HEAD(, Coroutine) release_pool = QSLIST_HEAD_INITIALIZER(pool); |
29 | static unsigned int release_pool_size; |
30 | static __thread QSLIST_HEAD(, Coroutine) alloc_pool = QSLIST_HEAD_INITIALIZER(pool); |
31 | static __thread unsigned int alloc_pool_size; |
32 | static __thread Notifier coroutine_pool_cleanup_notifier; |
33 | |
34 | static void coroutine_pool_cleanup(Notifier *n, void *value) |
35 | { |
36 | Coroutine *co; |
37 | Coroutine *tmp; |
38 | |
39 | QSLIST_FOREACH_SAFE(co, &alloc_pool, pool_next, tmp) { |
40 | QSLIST_REMOVE_HEAD(&alloc_pool, pool_next); |
41 | qemu_coroutine_delete(co); |
42 | } |
43 | } |
44 | |
45 | Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque) |
46 | { |
47 | Coroutine *co = NULL; |
48 | |
49 | if (CONFIG_COROUTINE_POOL) { |
50 | co = QSLIST_FIRST(&alloc_pool); |
51 | if (!co) { |
52 | if (release_pool_size > POOL_BATCH_SIZE) { |
53 | /* Slow path; a good place to register the destructor, too. */ |
54 | if (!coroutine_pool_cleanup_notifier.notify) { |
55 | coroutine_pool_cleanup_notifier.notify = coroutine_pool_cleanup; |
56 | qemu_thread_atexit_add(&coroutine_pool_cleanup_notifier); |
57 | } |
58 | |
59 | /* This is not exact; there could be a little skew between |
60 | * release_pool_size and the actual size of release_pool. But |
61 | * it is just a heuristic, it does not need to be perfect. |
62 | */ |
63 | alloc_pool_size = atomic_xchg(&release_pool_size, 0); |
64 | QSLIST_MOVE_ATOMIC(&alloc_pool, &release_pool); |
65 | co = QSLIST_FIRST(&alloc_pool); |
66 | } |
67 | } |
68 | if (co) { |
69 | QSLIST_REMOVE_HEAD(&alloc_pool, pool_next); |
70 | alloc_pool_size--; |
71 | } |
72 | } |
73 | |
74 | if (!co) { |
75 | co = qemu_coroutine_new(); |
76 | } |
77 | |
78 | co->entry = entry; |
79 | co->entry_arg = opaque; |
80 | QSIMPLEQ_INIT(&co->co_queue_wakeup); |
81 | return co; |
82 | } |
83 | |
84 | static void coroutine_delete(Coroutine *co) |
85 | { |
86 | co->caller = NULL; |
87 | |
88 | if (CONFIG_COROUTINE_POOL) { |
89 | if (release_pool_size < POOL_BATCH_SIZE * 2) { |
90 | QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next); |
91 | atomic_inc(&release_pool_size); |
92 | return; |
93 | } |
94 | if (alloc_pool_size < POOL_BATCH_SIZE) { |
95 | QSLIST_INSERT_HEAD(&alloc_pool, co, pool_next); |
96 | alloc_pool_size++; |
97 | return; |
98 | } |
99 | } |
100 | |
101 | qemu_coroutine_delete(co); |
102 | } |
103 | |
104 | void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co) |
105 | { |
106 | QSIMPLEQ_HEAD(, Coroutine) pending = QSIMPLEQ_HEAD_INITIALIZER(pending); |
107 | Coroutine *from = qemu_coroutine_self(); |
108 | |
109 | QSIMPLEQ_INSERT_TAIL(&pending, co, co_queue_next); |
110 | |
111 | /* Run co and any queued coroutines */ |
112 | while (!QSIMPLEQ_EMPTY(&pending)) { |
113 | Coroutine *to = QSIMPLEQ_FIRST(&pending); |
114 | CoroutineAction ret; |
115 | |
116 | /* Cannot rely on the read barrier for to in aio_co_wake(), as there are |
117 | * callers outside of aio_co_wake() */ |
118 | const char *scheduled = atomic_mb_read(&to->scheduled); |
119 | |
120 | QSIMPLEQ_REMOVE_HEAD(&pending, co_queue_next); |
121 | |
122 | trace_qemu_aio_coroutine_enter(ctx, from, to, to->entry_arg); |
123 | |
124 | /* if the Coroutine has already been scheduled, entering it again will |
125 | * cause us to enter it twice, potentially even after the coroutine has |
126 | * been deleted */ |
127 | if (scheduled) { |
128 | fprintf(stderr, |
129 | "%s: Co-routine was already scheduled in '%s'\n" , |
130 | __func__, scheduled); |
131 | abort(); |
132 | } |
133 | |
134 | if (to->caller) { |
135 | fprintf(stderr, "Co-routine re-entered recursively\n" ); |
136 | abort(); |
137 | } |
138 | |
139 | to->caller = from; |
140 | to->ctx = ctx; |
141 | |
142 | /* Store to->ctx before anything that stores to. Matches |
143 | * barrier in aio_co_wake and qemu_co_mutex_wake. |
144 | */ |
145 | smp_wmb(); |
146 | |
147 | ret = qemu_coroutine_switch(from, to, COROUTINE_ENTER); |
148 | |
149 | /* Queued coroutines are run depth-first; previously pending coroutines |
150 | * run after those queued more recently. |
151 | */ |
152 | QSIMPLEQ_PREPEND(&pending, &to->co_queue_wakeup); |
153 | |
154 | switch (ret) { |
155 | case COROUTINE_YIELD: |
156 | break; |
157 | case COROUTINE_TERMINATE: |
158 | assert(!to->locks_held); |
159 | trace_qemu_coroutine_terminate(to); |
160 | coroutine_delete(to); |
161 | break; |
162 | default: |
163 | abort(); |
164 | } |
165 | } |
166 | } |
167 | |
168 | void qemu_coroutine_enter(Coroutine *co) |
169 | { |
170 | qemu_aio_coroutine_enter(qemu_get_current_aio_context(), co); |
171 | } |
172 | |
173 | void qemu_coroutine_enter_if_inactive(Coroutine *co) |
174 | { |
175 | if (!qemu_coroutine_entered(co)) { |
176 | qemu_coroutine_enter(co); |
177 | } |
178 | } |
179 | |
180 | void coroutine_fn qemu_coroutine_yield(void) |
181 | { |
182 | Coroutine *self = qemu_coroutine_self(); |
183 | Coroutine *to = self->caller; |
184 | |
185 | trace_qemu_coroutine_yield(self, to); |
186 | |
187 | if (!to) { |
188 | fprintf(stderr, "Co-routine is yielding to no one\n" ); |
189 | abort(); |
190 | } |
191 | |
192 | self->caller = NULL; |
193 | qemu_coroutine_switch(self, to, COROUTINE_YIELD); |
194 | } |
195 | |
196 | bool qemu_coroutine_entered(Coroutine *co) |
197 | { |
198 | return co->caller; |
199 | } |
200 | |
201 | AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co) |
202 | { |
203 | return co->ctx; |
204 | } |
205 | |