1 | /* Common threading primitives definitions for both POSIX and C11. |
2 | Copyright (C) 2017-2018 Free Software Foundation, Inc. |
3 | This file is part of the GNU C Library. |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License as published by the Free Software Foundation; either |
8 | version 2.1 of the License, or (at your option) any later version. |
9 | |
10 | The GNU C Library is distributed in the hope that it will be useful, |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | Lesser General Public License for more details. |
14 | |
15 | You should have received a copy of the GNU Lesser General Public |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ |
18 | |
19 | #ifndef _THREAD_SHARED_TYPES_H |
20 | #define _THREAD_SHARED_TYPES_H 1 |
21 | |
22 | /* Arch-specific definitions. Each architecture must define the following |
23 | macros to define the expected sizes of pthread data types: |
24 | |
25 | __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. |
26 | __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. |
27 | __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. |
28 | __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. |
29 | __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. |
30 | __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. |
31 | __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. |
32 | __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. |
33 | __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. |
34 | |
35 | Also, the following macros must be define for internal pthread_mutex_t |
36 | struct definitions (struct __pthread_mutex_s): |
37 | |
38 | __PTHREAD_COMPAT_PADDING_MID - any additional members after 'kind' |
39 | and before '__spin' (for 64 bits) or |
40 | '__nusers' (for 32 bits). |
41 | __PTHREAD_COMPAT_PADDING_END - any additional members at the end of |
42 | the internal structure. |
43 | __PTHREAD_MUTEX_LOCK_ELISION - 1 if the architecture supports lock |
44 | elision or 0 otherwise. |
45 | __PTHREAD_MUTEX_NUSERS_AFTER_KIND - control where to put __nusers. The |
46 | preferred value for new architectures |
47 | is 0. |
48 | __PTHREAD_MUTEX_USE_UNION - control whether internal __spins and |
49 | __list will be place inside a union for |
50 | linuxthreads compatibility. |
51 | The preferred value for new architectures |
52 | is 0. |
53 | |
54 | For a new port the preferred values for the required defines are: |
55 | |
56 | #define __PTHREAD_COMPAT_PADDING_MID |
57 | #define __PTHREAD_COMPAT_PADDING_END |
58 | #define __PTHREAD_MUTEX_LOCK_ELISION 0 |
59 | #define __PTHREAD_MUTEX_NUSERS_AFTER_KIND 0 |
60 | #define __PTHREAD_MUTEX_USE_UNION 0 |
61 | |
62 | __PTHREAD_MUTEX_LOCK_ELISION can be set to 1 if the hardware plans to |
63 | eventually support lock elision using transactional memory. |
64 | |
65 | The additional macro defines any constraint for the lock alignment |
66 | inside the thread structures: |
67 | |
68 | __LOCK_ALIGNMENT - for internal lock/futex usage. |
69 | |
70 | Same idea but for the once locking primitive: |
71 | |
72 | __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. |
73 | |
74 | And finally the internal pthread_rwlock_t (struct __pthread_rwlock_arch_t) |
75 | must be defined. |
76 | */ |
77 | #include <bits/pthreadtypes-arch.h> |
78 | |
79 | /* Common definition of pthread_mutex_t. */ |
80 | |
81 | #if !__PTHREAD_MUTEX_USE_UNION |
82 | typedef struct __pthread_internal_list |
83 | { |
84 | struct __pthread_internal_list *__prev; |
85 | struct __pthread_internal_list *__next; |
86 | } __pthread_list_t; |
87 | #else |
88 | typedef struct __pthread_internal_slist |
89 | { |
90 | struct __pthread_internal_slist *__next; |
91 | } __pthread_slist_t; |
92 | #endif |
93 | |
94 | /* Lock elision support. */ |
95 | #if __PTHREAD_MUTEX_LOCK_ELISION |
96 | # if !__PTHREAD_MUTEX_USE_UNION |
97 | # define __PTHREAD_SPINS_DATA \ |
98 | short __spins; \ |
99 | short __elision |
100 | # define __PTHREAD_SPINS 0, 0 |
101 | # else |
102 | # define __PTHREAD_SPINS_DATA \ |
103 | struct \ |
104 | { \ |
105 | short __espins; \ |
106 | short __eelision; \ |
107 | } __elision_data |
108 | # define __PTHREAD_SPINS { 0, 0 } |
109 | # define __spins __elision_data.__espins |
110 | # define __elision __elision_data.__eelision |
111 | # endif |
112 | #else |
113 | # define __PTHREAD_SPINS_DATA int __spins |
114 | /* Mutex __spins initializer used by PTHREAD_MUTEX_INITIALIZER. */ |
115 | # define __PTHREAD_SPINS 0 |
116 | #endif |
117 | |
118 | struct __pthread_mutex_s |
119 | { |
120 | int __lock __LOCK_ALIGNMENT; |
121 | unsigned int __count; |
122 | int __owner; |
123 | #if !__PTHREAD_MUTEX_NUSERS_AFTER_KIND |
124 | unsigned int __nusers; |
125 | #endif |
126 | /* KIND must stay at this position in the structure to maintain |
127 | binary compatibility with static initializers. |
128 | |
129 | Concurrency notes: |
130 | The __kind of a mutex is initialized either by the static |
131 | PTHREAD_MUTEX_INITIALIZER or by a call to pthread_mutex_init. |
132 | |
133 | After a mutex has been initialized, the __kind of a mutex is usually not |
134 | changed. BUT it can be set to -1 in pthread_mutex_destroy or elision can |
135 | be enabled. This is done concurrently in the pthread_mutex_*lock functions |
136 | by using the macro FORCE_ELISION. This macro is only defined for |
137 | architectures which supports lock elision. |
138 | |
139 | For elision, there are the flags PTHREAD_MUTEX_ELISION_NP and |
140 | PTHREAD_MUTEX_NO_ELISION_NP which can be set in addition to the already set |
141 | type of a mutex. |
142 | Before a mutex is initialized, only PTHREAD_MUTEX_NO_ELISION_NP can be set |
143 | with pthread_mutexattr_settype. |
144 | After a mutex has been initialized, the functions pthread_mutex_*lock can |
145 | enable elision - if the mutex-type and the machine supports it - by setting |
146 | the flag PTHREAD_MUTEX_ELISION_NP. This is done concurrently. Afterwards |
147 | the lock / unlock functions are using specific elision code-paths. */ |
148 | int __kind; |
149 | __PTHREAD_COMPAT_PADDING_MID |
150 | #if __PTHREAD_MUTEX_NUSERS_AFTER_KIND |
151 | unsigned int __nusers; |
152 | #endif |
153 | #if !__PTHREAD_MUTEX_USE_UNION |
154 | __PTHREAD_SPINS_DATA; |
155 | __pthread_list_t __list; |
156 | # define __PTHREAD_MUTEX_HAVE_PREV 1 |
157 | #else |
158 | __extension__ union |
159 | { |
160 | __PTHREAD_SPINS_DATA; |
161 | __pthread_slist_t __list; |
162 | }; |
163 | # define __PTHREAD_MUTEX_HAVE_PREV 0 |
164 | #endif |
165 | __PTHREAD_COMPAT_PADDING_END |
166 | }; |
167 | |
168 | |
169 | /* Common definition of pthread_cond_t. */ |
170 | |
171 | struct __pthread_cond_s |
172 | { |
173 | __extension__ union |
174 | { |
175 | __extension__ unsigned long long int __wseq; |
176 | struct |
177 | { |
178 | unsigned int __low; |
179 | unsigned int __high; |
180 | } __wseq32; |
181 | }; |
182 | __extension__ union |
183 | { |
184 | __extension__ unsigned long long int __g1_start; |
185 | struct |
186 | { |
187 | unsigned int __low; |
188 | unsigned int __high; |
189 | } __g1_start32; |
190 | }; |
191 | unsigned int __g_refs[2] __LOCK_ALIGNMENT; |
192 | unsigned int __g_size[2]; |
193 | unsigned int __g1_orig_size; |
194 | unsigned int __wrefs; |
195 | unsigned int __g_signals[2]; |
196 | }; |
197 | |
198 | #endif /* _THREAD_SHARED_TYPES_H */ |
199 | |