1/*
2 Copyright 2011, 2012 Kristian Nielsen and Monty Program Ab
3
4 This file is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 This library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this. If not, see <http://www.gnu.org/licenses/>.
16*/
17
18/*
19 Implementation of async context spawning using Posix ucontext and
20 swapcontext().
21*/
22
23#include "mysys_priv.h"
24#include "m_string.h"
25#include "my_context.h"
26
27#ifdef HAVE_VALGRIND_MEMCHECK_H
28#include <valgrind/valgrind.h>
29#endif
30
31#ifdef MY_CONTEXT_USE_UCONTEXT
32/*
33 The makecontext() only allows to pass integers into the created context :-(
34 We want to pass pointers, so we do it this kinda hackish way.
35 Anyway, it should work everywhere, and at least it does not break strict
36 aliasing.
37*/
38union pass_void_ptr_as_2_int {
39 int a[2];
40 void *p;
41};
42
43
44/*
45 We use old-style function definition here, as this is passed to
46 makecontext(). And the type of the makecontext() argument does not match
47 the actual type (as the actual type can differ from call to call).
48*/
49static void
50my_context_spawn_internal(i0, i1)
51int i0, i1;
52{
53 int err;
54 struct my_context *c;
55 union pass_void_ptr_as_2_int u;
56
57 u.a[0]= i0;
58 u.a[1]= i1;
59 c= (struct my_context *)u.p;
60
61 (*c->user_func)(c->user_data);
62 c->active= 0;
63 err= setcontext(&c->base_context);
64 fprintf(stderr, "Aieie, setcontext() failed: %d (errno=%d)\n", err, errno);
65}
66
67
68int
69my_context_continue(struct my_context *c)
70{
71 int err;
72
73 if (!c->active)
74 return 0;
75
76 DBUG_SWAP_CODE_STATE(&c->dbug_state);
77 err= swapcontext(&c->base_context, &c->spawned_context);
78 DBUG_SWAP_CODE_STATE(&c->dbug_state);
79 if (err)
80 {
81 fprintf(stderr, "Aieie, swapcontext() failed: %d (errno=%d)\n",
82 err, errno);
83 return -1;
84 }
85
86 return c->active;
87}
88
89
90int
91my_context_spawn(struct my_context *c, void (*f)(void *), void *d)
92{
93 int err;
94 union pass_void_ptr_as_2_int u;
95
96 err= getcontext(&c->spawned_context);
97 if (err)
98 return -1;
99 c->spawned_context.uc_stack.ss_sp= c->stack;
100 c->spawned_context.uc_stack.ss_size= c->stack_size;
101 c->spawned_context.uc_link= NULL;
102 c->user_func= f;
103 c->user_data= d;
104 c->active= 1;
105 u.p= c;
106 makecontext(&c->spawned_context, my_context_spawn_internal, 2,
107 u.a[0], u.a[1]);
108
109 return my_context_continue(c);
110}
111
112
113int
114my_context_yield(struct my_context *c)
115{
116 int err;
117
118 if (!c->active)
119 return -1;
120
121 err= swapcontext(&c->spawned_context, &c->base_context);
122 if (err)
123 return -1;
124 return 0;
125}
126
127int
128my_context_init(struct my_context *c, size_t stack_size)
129{
130#if SIZEOF_CHARP > SIZEOF_INT*2
131#error Error: Unable to store pointer in 2 ints on this architecture
132#endif
133 bzero(c, sizeof(*c));
134 if (!(c->stack= malloc(stack_size)))
135 return -1; /* Out of memory */
136 c->stack_size= stack_size;
137#ifdef HAVE_VALGRIND_MEMCHECK_H
138 c->valgrind_stack_id=
139 VALGRIND_STACK_REGISTER(c->stack, ((unsigned char *)(c->stack))+stack_size);
140#endif
141 return 0;
142}
143
144void
145my_context_destroy(struct my_context *c)
146{
147 if (c->stack)
148 {
149#ifdef HAVE_VALGRIND_MEMCHECK_H
150 VALGRIND_STACK_DEREGISTER(c->valgrind_stack_id);
151#endif
152 free(c->stack);
153 }
154 DBUG_FREE_CODE_STATE(&c->dbug_state);
155}
156
157#endif /* MY_CONTEXT_USE_UCONTEXT */
158
159
160#ifdef MY_CONTEXT_USE_X86_64_GCC_ASM
161/*
162 GCC-amd64 implementation of my_context.
163
164 This is slightly optimized in the common case where we never yield
165 (eg. fetch next row and it is already fully received in buffer). In this
166 case we do not need to restore registers at return (though we still need to
167 save them as we cannot know if we will yield or not in advance).
168*/
169
170#include <stdint.h>
171#include <stdlib.h>
172
173/*
174 Layout of saved registers etc.
175 Since this is accessed through gcc inline assembler, it is simpler to just
176 use numbers than to try to define nice constants or structs.
177
178 0 0 %rsp
179 1 8 %rbp
180 2 16 %rbx
181 3 24 %r12
182 4 32 %r13
183 5 40 %r14
184 6 48 %r15
185 7 56 %rip for done
186 8 64 %rip for yield/continue
187*/
188
189int
190my_context_spawn(struct my_context *c, void (*f)(void *), void *d)
191{
192 int ret;
193
194 DBUG_SWAP_CODE_STATE(&c->dbug_state);
195
196 /*
197 There are 6 callee-save registers we need to save and restore when
198 suspending and continuing, plus stack pointer %rsp and instruction pointer
199 %rip.
200
201 However, if we never suspend, the user-supplied function will in any case
202 restore the 6 callee-save registers, so we can avoid restoring them in
203 this case.
204 */
205 __asm__ __volatile__
206 (
207 "movq %%rsp, (%[save])\n\t"
208 "movq %[stack], %%rsp\n\t"
209#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4) || __clang__) && \
210 !defined(__INTEL_COMPILER)
211 /*
212 This emits a DWARF DW_CFA_undefined directive to make the return address
213 undefined. This indicates that this is the top of the stack frame, and
214 helps tools that use DWARF stack unwinding to obtain stack traces.
215 (I use numeric constant to avoid a dependency on libdwarf includes).
216 */
217 ".cfi_escape 0x07, 16\n\t"
218#endif
219 "movq %%rbp, 8(%[save])\n\t"
220 "movq %%rbx, 16(%[save])\n\t"
221 "movq %%r12, 24(%[save])\n\t"
222 "movq %%r13, 32(%[save])\n\t"
223 "movq %%r14, 40(%[save])\n\t"
224 "movq %%r15, 48(%[save])\n\t"
225 "leaq 1f(%%rip), %%rax\n\t"
226 "leaq 2f(%%rip), %%rcx\n\t"
227 "movq %%rax, 56(%[save])\n\t"
228 "movq %%rcx, 64(%[save])\n\t"
229 /*
230 Constraint below puts the argument to the user function into %rdi, as
231 needed for the calling convention.
232 */
233 "callq *%[f]\n\t"
234 "jmpq *56(%[save])\n"
235 /*
236 Come here when operation is done.
237 We do not need to restore callee-save registers, as the called function
238 will do this for us if needed.
239 */
240 "1:\n\t"
241 "movq (%[save]), %%rsp\n\t"
242 "xorl %[ret], %[ret]\n\t"
243 "jmp 3f\n"
244 /* Come here when operation was suspended. */
245 "2:\n\t"
246 "movl $1, %[ret]\n"
247 "3:\n"
248 : [ret] "=a" (ret),
249 [f] "+S" (f),
250 /* Need this in %rdi to follow calling convention. */
251 [d] "+D" (d)
252 : [stack] "a" (c->stack_top),
253 /* Need this in callee-save register to preserve in function call. */
254 [save] "b" (&c->save[0])
255 : "rcx", "rdx", "r8", "r9", "r10", "r11", "memory", "cc"
256 );
257
258 DBUG_SWAP_CODE_STATE(&c->dbug_state);
259
260 return ret;
261}
262
263int
264my_context_continue(struct my_context *c)
265{
266 int ret;
267
268 DBUG_SWAP_CODE_STATE(&c->dbug_state);
269
270 __asm__ __volatile__
271 (
272 "movq (%[save]), %%rax\n\t"
273 "movq %%rsp, (%[save])\n\t"
274 "movq %%rax, %%rsp\n\t"
275 "movq 8(%[save]), %%rax\n\t"
276 "movq %%rbp, 8(%[save])\n\t"
277 "movq %%rax, %%rbp\n\t"
278 "movq 24(%[save]), %%rax\n\t"
279 "movq %%r12, 24(%[save])\n\t"
280 "movq %%rax, %%r12\n\t"
281 "movq 32(%[save]), %%rax\n\t"
282 "movq %%r13, 32(%[save])\n\t"
283 "movq %%rax, %%r13\n\t"
284 "movq 40(%[save]), %%rax\n\t"
285 "movq %%r14, 40(%[save])\n\t"
286 "movq %%rax, %%r14\n\t"
287 "movq 48(%[save]), %%rax\n\t"
288 "movq %%r15, 48(%[save])\n\t"
289 "movq %%rax, %%r15\n\t"
290
291 "leaq 1f(%%rip), %%rax\n\t"
292 "leaq 2f(%%rip), %%rcx\n\t"
293 "movq %%rax, 56(%[save])\n\t"
294 "movq 64(%[save]), %%rax\n\t"
295 "movq %%rcx, 64(%[save])\n\t"
296
297 "movq 16(%[save]), %%rcx\n\t"
298 "movq %%rbx, 16(%[save])\n\t"
299 "movq %%rcx, %%rbx\n\t"
300
301 "jmpq *%%rax\n"
302 /*
303 Come here when operation is done.
304 Be sure to use the same callee-save register for %[save] here and in
305 my_context_spawn(), so we preserve the value correctly at this point.
306 */
307 "1:\n\t"
308 "movq (%[save]), %%rsp\n\t"
309 "movq 8(%[save]), %%rbp\n\t"
310 /* %rbx is preserved from my_context_spawn() in this case. */
311 "movq 24(%[save]), %%r12\n\t"
312 "movq 32(%[save]), %%r13\n\t"
313 "movq 40(%[save]), %%r14\n\t"
314 "movq 48(%[save]), %%r15\n\t"
315 "xorl %[ret], %[ret]\n\t"
316 "jmp 3f\n"
317 /* Come here when operation is suspended. */
318 "2:\n\t"
319 "movl $1, %[ret]\n"
320 "3:\n"
321 : [ret] "=a" (ret)
322 : /* Need this in callee-save register to preserve in function call. */
323 [save] "b" (&c->save[0])
324 : "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", "memory", "cc"
325 );
326
327 DBUG_SWAP_CODE_STATE(&c->dbug_state);
328
329 return ret;
330}
331
332int
333my_context_yield(struct my_context *c)
334{
335 uint64_t *save= &c->save[0];
336 __asm__ __volatile__
337 (
338 "movq (%[save]), %%rax\n\t"
339 "movq %%rsp, (%[save])\n\t"
340 "movq %%rax, %%rsp\n\t"
341 "movq 8(%[save]), %%rax\n\t"
342 "movq %%rbp, 8(%[save])\n\t"
343 "movq %%rax, %%rbp\n\t"
344 "movq 16(%[save]), %%rax\n\t"
345 "movq %%rbx, 16(%[save])\n\t"
346 "movq %%rax, %%rbx\n\t"
347 "movq 24(%[save]), %%rax\n\t"
348 "movq %%r12, 24(%[save])\n\t"
349 "movq %%rax, %%r12\n\t"
350 "movq 32(%[save]), %%rax\n\t"
351 "movq %%r13, 32(%[save])\n\t"
352 "movq %%rax, %%r13\n\t"
353 "movq 40(%[save]), %%rax\n\t"
354 "movq %%r14, 40(%[save])\n\t"
355 "movq %%rax, %%r14\n\t"
356 "movq 48(%[save]), %%rax\n\t"
357 "movq %%r15, 48(%[save])\n\t"
358 "movq %%rax, %%r15\n\t"
359 "movq 64(%[save]), %%rax\n\t"
360 "leaq 1f(%%rip), %%rcx\n\t"
361 "movq %%rcx, 64(%[save])\n\t"
362
363 "jmpq *%%rax\n"
364
365 "1:\n"
366 : [save] "+D" (save)
367 :
368 : "rax", "rcx", "rdx", "rsi", "r8", "r9", "r10", "r11", "memory", "cc"
369 );
370 return 0;
371}
372
373int
374my_context_init(struct my_context *c, size_t stack_size)
375{
376 bzero(c, sizeof(*c));
377
378 if (!(c->stack_bot= malloc(stack_size)))
379 return -1; /* Out of memory */
380 /*
381 The x86_64 ABI specifies 16-byte stack alignment.
382 Also put two zero words at the top of the stack.
383 */
384 c->stack_top= (void *)
385 (( ((intptr)c->stack_bot + stack_size) & ~(intptr)0xf) - 16);
386 bzero(c->stack_top, 16);
387
388#ifdef HAVE_VALGRIND_MEMCHECK_H
389 c->valgrind_stack_id=
390 VALGRIND_STACK_REGISTER(c->stack_bot, c->stack_top);
391#endif
392 return 0;
393}
394
395void
396my_context_destroy(struct my_context *c)
397{
398 if (c->stack_bot)
399 {
400 free(c->stack_bot);
401#ifdef HAVE_VALGRIND_MEMCHECK_H
402 VALGRIND_STACK_DEREGISTER(c->valgrind_stack_id);
403#endif
404 }
405 DBUG_FREE_CODE_STATE(&c->dbug_state);
406}
407
408#endif /* MY_CONTEXT_USE_X86_64_GCC_ASM */
409
410
411#ifdef MY_CONTEXT_USE_I386_GCC_ASM
412/*
413 GCC-i386 implementation of my_context.
414
415 This is slightly optimized in the common case where we never yield
416 (eg. fetch next row and it is already fully received in buffer). In this
417 case we do not need to restore registers at return (though we still need to
418 save them as we cannot know if we will yield or not in advance).
419*/
420
421#include <stdint.h>
422#include <stdlib.h>
423
424/*
425 Layout of saved registers etc.
426 Since this is accessed through gcc inline assembler, it is simpler to just
427 use numbers than to try to define nice constants or structs.
428
429 0 0 %esp
430 1 4 %ebp
431 2 8 %ebx
432 3 12 %esi
433 4 16 %edi
434 5 20 %eip for done
435 6 24 %eip for yield/continue
436*/
437
438int
439my_context_spawn(struct my_context *c, void (*f)(void *), void *d)
440{
441 int ret;
442
443 DBUG_SWAP_CODE_STATE(&c->dbug_state);
444
445 /*
446 There are 4 callee-save registers we need to save and restore when
447 suspending and continuing, plus stack pointer %esp and instruction pointer
448 %eip.
449
450 However, if we never suspend, the user-supplied function will in any case
451 restore the 4 callee-save registers, so we can avoid restoring them in
452 this case.
453 */
454 __asm__ __volatile__
455 (
456 "movl %%esp, (%[save])\n\t"
457 "movl %[stack], %%esp\n\t"
458#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) && !defined(__INTEL_COMPILER)
459 /*
460 This emits a DWARF DW_CFA_undefined directive to make the return address
461 undefined. This indicates that this is the top of the stack frame, and
462 helps tools that use DWARF stack unwinding to obtain stack traces.
463 (I use numeric constant to avoid a dependency on libdwarf includes).
464 */
465 ".cfi_escape 0x07, 8\n\t"
466#endif
467 /* Push the parameter on the stack. */
468 "pushl %[d]\n\t"
469 "movl %%ebp, 4(%[save])\n\t"
470 "movl %%ebx, 8(%[save])\n\t"
471 "movl %%esi, 12(%[save])\n\t"
472 "movl %%edi, 16(%[save])\n\t"
473 /* Get label addresses in -fPIC-compatible way (no pc-relative on 32bit) */
474 "call 1f\n"
475 "1:\n\t"
476 "popl %%eax\n\t"
477 "addl $(2f-1b), %%eax\n\t"
478 "movl %%eax, 20(%[save])\n\t"
479 "addl $(3f-2f), %%eax\n\t"
480 "movl %%eax, 24(%[save])\n\t"
481 "call *%[f]\n\t"
482 "jmp *20(%[save])\n"
483 /*
484 Come here when operation is done.
485 We do not need to restore callee-save registers, as the called function
486 will do this for us if needed.
487 */
488 "2:\n\t"
489 "movl (%[save]), %%esp\n\t"
490 "xorl %[ret], %[ret]\n\t"
491 "jmp 4f\n"
492 /* Come here when operation was suspended. */
493 "3:\n\t"
494 "movl $1, %[ret]\n"
495 "4:\n"
496 : [ret] "=a" (ret),
497 [f] "+c" (f),
498 [d] "+d" (d)
499 : [stack] "a" (c->stack_top),
500 /* Need this in callee-save register to preserve across function call. */
501 [save] "D" (&c->save[0])
502 : "memory", "cc"
503 );
504
505 DBUG_SWAP_CODE_STATE(&c->dbug_state);
506
507 return ret;
508}
509
510int
511my_context_continue(struct my_context *c)
512{
513 int ret;
514
515 DBUG_SWAP_CODE_STATE(&c->dbug_state);
516
517 __asm__ __volatile__
518 (
519 "movl (%[save]), %%eax\n\t"
520 "movl %%esp, (%[save])\n\t"
521 "movl %%eax, %%esp\n\t"
522 "movl 4(%[save]), %%eax\n\t"
523 "movl %%ebp, 4(%[save])\n\t"
524 "movl %%eax, %%ebp\n\t"
525 "movl 8(%[save]), %%eax\n\t"
526 "movl %%ebx, 8(%[save])\n\t"
527 "movl %%eax, %%ebx\n\t"
528 "movl 12(%[save]), %%eax\n\t"
529 "movl %%esi, 12(%[save])\n\t"
530 "movl %%eax, %%esi\n\t"
531
532 "movl 24(%[save]), %%eax\n\t"
533 "call 1f\n"
534 "1:\n\t"
535 "popl %%ecx\n\t"
536 "addl $(2f-1b), %%ecx\n\t"
537 "movl %%ecx, 20(%[save])\n\t"
538 "addl $(3f-2f), %%ecx\n\t"
539 "movl %%ecx, 24(%[save])\n\t"
540
541 /* Must restore %edi last as it is also our %[save] register. */
542 "movl 16(%[save]), %%ecx\n\t"
543 "movl %%edi, 16(%[save])\n\t"
544 "movl %%ecx, %%edi\n\t"
545
546 "jmp *%%eax\n"
547 /*
548 Come here when operation is done.
549 Be sure to use the same callee-save register for %[save] here and in
550 my_context_spawn(), so we preserve the value correctly at this point.
551 */
552 "2:\n\t"
553 "movl (%[save]), %%esp\n\t"
554 "movl 4(%[save]), %%ebp\n\t"
555 "movl 8(%[save]), %%ebx\n\t"
556 "movl 12(%[save]), %%esi\n\t"
557 "movl 16(%[save]), %%edi\n\t"
558 "xorl %[ret], %[ret]\n\t"
559 "jmp 4f\n"
560 /* Come here when operation is suspended. */
561 "3:\n\t"
562 "movl $1, %[ret]\n"
563 "4:\n"
564 : [ret] "=a" (ret)
565 : /* Need this in callee-save register to preserve in function call. */
566 [save] "D" (&c->save[0])
567 : "ecx", "edx", "memory", "cc"
568 );
569
570 DBUG_SWAP_CODE_STATE(&c->dbug_state);
571
572 return ret;
573}
574
575int
576my_context_yield(struct my_context *c)
577{
578 uint64_t *save= &c->save[0];
579 __asm__ __volatile__
580 (
581 "movl (%[save]), %%eax\n\t"
582 "movl %%esp, (%[save])\n\t"
583 "movl %%eax, %%esp\n\t"
584 "movl 4(%[save]), %%eax\n\t"
585 "movl %%ebp, 4(%[save])\n\t"
586 "movl %%eax, %%ebp\n\t"
587 "movl 8(%[save]), %%eax\n\t"
588 "movl %%ebx, 8(%[save])\n\t"
589 "movl %%eax, %%ebx\n\t"
590 "movl 12(%[save]), %%eax\n\t"
591 "movl %%esi, 12(%[save])\n\t"
592 "movl %%eax, %%esi\n\t"
593 "movl 16(%[save]), %%eax\n\t"
594 "movl %%edi, 16(%[save])\n\t"
595 "movl %%eax, %%edi\n\t"
596
597 "movl 24(%[save]), %%eax\n\t"
598 "call 1f\n"
599 "1:\n\t"
600 "popl %%ecx\n\t"
601 "addl $(2f-1b), %%ecx\n\t"
602 "movl %%ecx, 24(%[save])\n\t"
603
604 "jmp *%%eax\n"
605
606 "2:\n"
607 : [save] "+d" (save)
608 :
609 : "eax", "ecx", "memory", "cc"
610 );
611 return 0;
612}
613
614int
615my_context_init(struct my_context *c, size_t stack_size)
616{
617 bzero(c, sizeof(*c));
618 if (!(c->stack_bot= malloc(stack_size)))
619 return -1; /* Out of memory */
620 c->stack_top= (void *)
621 (( ((intptr)c->stack_bot + stack_size) & ~(intptr)0xf) - 16);
622 bzero(c->stack_top, 16);
623
624#ifdef HAVE_VALGRIND_MEMCHECK_H
625 c->valgrind_stack_id=
626 VALGRIND_STACK_REGISTER(c->stack_bot, c->stack_top);
627#endif
628 return 0;
629}
630
631void
632my_context_destroy(struct my_context *c)
633{
634 if (c->stack_bot)
635 {
636 free(c->stack_bot);
637#ifdef HAVE_VALGRIND_MEMCHECK_H
638 VALGRIND_STACK_DEREGISTER(c->valgrind_stack_id);
639#endif
640 }
641 DBUG_FREE_CODE_STATE(&c->dbug_state);
642}
643
644#endif /* MY_CONTEXT_USE_I386_GCC_ASM */
645
646
647#ifdef MY_CONTEXT_USE_WIN32_FIBERS
648int
649my_context_yield(struct my_context *c)
650{
651 c->return_value= 1;
652 SwitchToFiber(c->app_fiber);
653 return 0;
654}
655
656
657static void WINAPI
658my_context_trampoline(void *p)
659{
660 struct my_context *c= (struct my_context *)p;
661 /*
662 Reuse the Fiber by looping infinitely, each time we are scheduled we
663 spawn the appropriate function and switch back when it is done.
664
665 This way we avoid the overhead of CreateFiber() for every asynchroneous
666 operation.
667 */
668 for(;;)
669 {
670 (*(c->user_func))(c->user_arg);
671 c->return_value= 0;
672 SwitchToFiber(c->app_fiber);
673 }
674}
675
676int
677my_context_init(struct my_context *c, size_t stack_size)
678{
679 bzero(c, sizeof(*c));
680 c->lib_fiber= CreateFiber(stack_size, my_context_trampoline, c);
681 if (c->lib_fiber)
682 return 0;
683 return -1;
684}
685
686void
687my_context_destroy(struct my_context *c)
688{
689 DBUG_FREE_CODE_STATE(&c->dbug_state);
690 if (c->lib_fiber)
691 {
692 DeleteFiber(c->lib_fiber);
693 c->lib_fiber= NULL;
694 }
695}
696
697int
698my_context_spawn(struct my_context *c, void (*f)(void *), void *d)
699{
700 c->user_func= f;
701 c->user_arg= d;
702 return my_context_continue(c);
703}
704
705int
706my_context_continue(struct my_context *c)
707{
708 /*
709 This seems to be a common trick to run ConvertThreadToFiber() only on the
710 first occurrence in a thread, in a way that works on multiple Windows
711 versions.
712 */
713 void *current_fiber= GetCurrentFiber();
714 if (current_fiber == NULL || current_fiber == (void *)0x1e00)
715 current_fiber= ConvertThreadToFiber(c);
716 c->app_fiber= current_fiber;
717 DBUG_SWAP_CODE_STATE(&c->dbug_state);
718 SwitchToFiber(c->lib_fiber);
719 DBUG_SWAP_CODE_STATE(&c->dbug_state);
720
721 return c->return_value;
722}
723
724#endif /* MY_CONTEXT_USE_WIN32_FIBERS */
725
726#ifdef MY_CONTEXT_DISABLE
727int
728my_context_continue(struct my_context *c __attribute__((unused)))
729{
730 return -1;
731}
732
733
734int
735my_context_spawn(struct my_context *c __attribute__((unused)),
736 void (*f)(void *) __attribute__((unused)),
737 void *d __attribute__((unused)))
738{
739 return -1;
740}
741
742
743int
744my_context_yield(struct my_context *c __attribute__((unused)))
745{
746 return -1;
747}
748
749int
750my_context_init(struct my_context *c __attribute__((unused)),
751 size_t stack_size __attribute__((unused)))
752{
753 return -1; /* Out of memory */
754}
755
756void
757my_context_destroy(struct my_context *c __attribute__((unused)))
758{
759}
760
761#endif
762