1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22#include "uv.h"
23#include "task.h"
24
25#include <stdio.h>
26#include <stdlib.h>
27#include <string.h> /* memset */
28
29#ifdef __POSIX__
30#include <pthread.h>
31#endif
32
33struct getaddrinfo_req {
34 uv_thread_t thread_id;
35 unsigned int counter;
36 uv_loop_t* loop;
37 uv_getaddrinfo_t handle;
38};
39
40
41struct fs_req {
42 uv_thread_t thread_id;
43 unsigned int counter;
44 uv_loop_t* loop;
45 uv_fs_t handle;
46};
47
48
49struct test_thread {
50 uv_thread_t thread_id;
51 int thread_called;
52};
53
54static void getaddrinfo_do(struct getaddrinfo_req* req);
55static void getaddrinfo_cb(uv_getaddrinfo_t* handle,
56 int status,
57 struct addrinfo* res);
58static void fs_do(struct fs_req* req);
59static void fs_cb(uv_fs_t* handle);
60
61static int thread_called;
62static uv_key_t tls_key;
63
64
65static void getaddrinfo_do(struct getaddrinfo_req* req) {
66 int r;
67
68 r = uv_getaddrinfo(req->loop,
69 &req->handle,
70 getaddrinfo_cb,
71 "localhost",
72 NULL,
73 NULL);
74 ASSERT(r == 0);
75}
76
77
78static void getaddrinfo_cb(uv_getaddrinfo_t* handle,
79 int status,
80 struct addrinfo* res) {
81 struct getaddrinfo_req* req;
82
83 ASSERT(status == 0);
84
85 req = container_of(handle, struct getaddrinfo_req, handle);
86 uv_freeaddrinfo(res);
87
88 if (--req->counter)
89 getaddrinfo_do(req);
90}
91
92
93static void fs_do(struct fs_req* req) {
94 int r;
95
96 r = uv_fs_stat(req->loop, &req->handle, ".", fs_cb);
97 ASSERT(r == 0);
98}
99
100
101static void fs_cb(uv_fs_t* handle) {
102 struct fs_req* req = container_of(handle, struct fs_req, handle);
103
104 uv_fs_req_cleanup(handle);
105
106 if (--req->counter)
107 fs_do(req);
108}
109
110
111static void do_work(void* arg) {
112 struct getaddrinfo_req getaddrinfo_reqs[4];
113 struct fs_req fs_reqs[4];
114 uv_loop_t loop;
115 size_t i;
116 struct test_thread* thread = arg;
117
118 ASSERT(0 == uv_loop_init(&loop));
119
120 for (i = 0; i < ARRAY_SIZE(getaddrinfo_reqs); i++) {
121 struct getaddrinfo_req* req = getaddrinfo_reqs + i;
122 req->counter = 4;
123 req->loop = &loop;
124 getaddrinfo_do(req);
125 }
126
127 for (i = 0; i < ARRAY_SIZE(fs_reqs); i++) {
128 struct fs_req* req = fs_reqs + i;
129 req->counter = 4;
130 req->loop = &loop;
131 fs_do(req);
132 }
133
134 ASSERT(0 == uv_run(&loop, UV_RUN_DEFAULT));
135 ASSERT(0 == uv_loop_close(&loop));
136 thread->thread_called = 1;
137}
138
139
140static void thread_entry(void* arg) {
141 ASSERT(arg == (void *) 42);
142 thread_called++;
143}
144
145
146TEST_IMPL(thread_create) {
147 uv_thread_t tid;
148 int r;
149
150 r = uv_thread_create(&tid, thread_entry, (void *) 42);
151 ASSERT(r == 0);
152
153 r = uv_thread_join(&tid);
154 ASSERT(r == 0);
155
156 ASSERT(thread_called == 1);
157
158 return 0;
159}
160
161
162/* Hilariously bad test name. Run a lot of tasks in the thread pool and verify
163 * that each "finished" callback is run in its originating thread.
164 */
165TEST_IMPL(threadpool_multiple_event_loops) {
166 struct test_thread threads[8];
167 size_t i;
168 int r;
169
170 memset(threads, 0, sizeof(threads));
171
172 for (i = 0; i < ARRAY_SIZE(threads); i++) {
173 r = uv_thread_create(&threads[i].thread_id, do_work, &threads[i]);
174 ASSERT(r == 0);
175 }
176
177 for (i = 0; i < ARRAY_SIZE(threads); i++) {
178 r = uv_thread_join(&threads[i].thread_id);
179 ASSERT(r == 0);
180 ASSERT(threads[i].thread_called == 1);
181 }
182
183 return 0;
184}
185
186
187static void tls_thread(void* arg) {
188 ASSERT(NULL == uv_key_get(&tls_key));
189 uv_key_set(&tls_key, arg);
190 ASSERT(arg == uv_key_get(&tls_key));
191 uv_key_set(&tls_key, NULL);
192 ASSERT(NULL == uv_key_get(&tls_key));
193}
194
195
196TEST_IMPL(thread_local_storage) {
197 char name[] = "main";
198 uv_thread_t threads[2];
199 ASSERT(0 == uv_key_create(&tls_key));
200 ASSERT(NULL == uv_key_get(&tls_key));
201 uv_key_set(&tls_key, name);
202 ASSERT(name == uv_key_get(&tls_key));
203 ASSERT(0 == uv_thread_create(threads + 0, tls_thread, threads + 0));
204 ASSERT(0 == uv_thread_create(threads + 1, tls_thread, threads + 1));
205 ASSERT(0 == uv_thread_join(threads + 0));
206 ASSERT(0 == uv_thread_join(threads + 1));
207 uv_key_delete(&tls_key);
208 return 0;
209}
210
211
212static void thread_check_stack(void* arg) {
213#if defined(__APPLE__)
214 size_t expected;
215 expected = arg == NULL ? 0 : ((uv_thread_options_t*)arg)->stack_size;
216 /* 512 kB is the default stack size of threads other than the main thread
217 * on MacOS. */
218 if (expected == 0)
219 expected = 512 * 1024;
220 ASSERT(pthread_get_stacksize_np(pthread_self()) >= expected);
221#elif defined(__linux__) && defined(__GLIBC__)
222 size_t expected;
223 struct rlimit lim;
224 size_t stack_size;
225 pthread_attr_t attr;
226 ASSERT(0 == getrlimit(RLIMIT_STACK, &lim));
227 if (lim.rlim_cur == RLIM_INFINITY)
228 lim.rlim_cur = 2 << 20; /* glibc default. */
229 ASSERT(0 == pthread_getattr_np(pthread_self(), &attr));
230 ASSERT(0 == pthread_attr_getstacksize(&attr, &stack_size));
231 expected = arg == NULL ? 0 : ((uv_thread_options_t*)arg)->stack_size;
232 if (expected == 0)
233 expected = (size_t)lim.rlim_cur;
234 ASSERT(stack_size >= expected);
235#endif
236}
237
238
239TEST_IMPL(thread_stack_size) {
240 uv_thread_t thread;
241 ASSERT(0 == uv_thread_create(&thread, thread_check_stack, NULL));
242 ASSERT(0 == uv_thread_join(&thread));
243 return 0;
244}
245
246TEST_IMPL(thread_stack_size_explicit) {
247 uv_thread_t thread;
248 uv_thread_options_t options;
249
250 options.flags = UV_THREAD_HAS_STACK_SIZE;
251 options.stack_size = 1024 * 1024;
252 ASSERT(0 == uv_thread_create_ex(&thread, &options,
253 thread_check_stack, &options));
254 ASSERT(0 == uv_thread_join(&thread));
255
256 options.stack_size = 8 * 1024 * 1024; /* larger than most default os sizes */
257 ASSERT(0 == uv_thread_create_ex(&thread, &options,
258 thread_check_stack, &options));
259 ASSERT(0 == uv_thread_join(&thread));
260
261 options.stack_size = 0;
262 ASSERT(0 == uv_thread_create_ex(&thread, &options,
263 thread_check_stack, &options));
264 ASSERT(0 == uv_thread_join(&thread));
265
266#ifdef PTHREAD_STACK_MIN
267 options.stack_size = PTHREAD_STACK_MIN - 42; /* unaligned size */
268 ASSERT(0 == uv_thread_create_ex(&thread, &options,
269 thread_check_stack, &options));
270 ASSERT(0 == uv_thread_join(&thread));
271
272 options.stack_size = PTHREAD_STACK_MIN / 2 - 42; /* unaligned size */
273 ASSERT(0 == uv_thread_create_ex(&thread, &options,
274 thread_check_stack, &options));
275 ASSERT(0 == uv_thread_join(&thread));
276#endif
277
278 /* unaligned size, should be larger than PTHREAD_STACK_MIN */
279 options.stack_size = 1234567;
280 ASSERT(0 == uv_thread_create_ex(&thread, &options,
281 thread_check_stack, &options));
282 ASSERT(0 == uv_thread_join(&thread));
283
284 return 0;
285}
286