| 1 | /* Copyright (C) 2002-2020 Free Software Foundation, Inc. |
| 2 | This file is part of the GNU C Library. |
| 3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
| 4 | |
| 5 | The GNU C Library is free software; you can redistribute it and/or |
| 6 | modify it under the terms of the GNU Lesser General Public |
| 7 | License as published by the Free Software Foundation; either |
| 8 | version 2.1 of the License, or (at your option) any later version. |
| 9 | |
| 10 | The GNU C Library is distributed in the hope that it will be useful, |
| 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | Lesser General Public License for more details. |
| 14 | |
| 15 | You should have received a copy of the GNU Lesser General Public |
| 16 | License along with the GNU C Library; if not, see |
| 17 | <https://www.gnu.org/licenses/>. */ |
| 18 | |
| 19 | #include <assert.h> |
| 20 | #include <errno.h> |
| 21 | #include <inttypes.h> |
| 22 | #include <stdio.h> |
| 23 | #include <stdio_ext.h> |
| 24 | #include <stdlib.h> |
| 25 | #include <string.h> |
| 26 | #include <sys/resource.h> |
| 27 | #include "pthreadP.h" |
| 28 | #include <lowlevellock.h> |
| 29 | #include <ldsodefs.h> |
| 30 | |
| 31 | |
| 32 | int |
| 33 | __pthread_getattr_np (pthread_t thread_id, pthread_attr_t *attr) |
| 34 | { |
| 35 | struct pthread *thread = (struct pthread *) thread_id; |
| 36 | |
| 37 | /* Prepare the new thread attribute. */ |
| 38 | int ret = __pthread_attr_init (attr); |
| 39 | if (ret != 0) |
| 40 | return ret; |
| 41 | |
| 42 | struct pthread_attr *iattr = (struct pthread_attr *) attr; |
| 43 | |
| 44 | lll_lock (thread->lock, LLL_PRIVATE); |
| 45 | |
| 46 | /* The thread library is responsible for keeping the values in the |
| 47 | thread desriptor up-to-date in case the user changes them. */ |
| 48 | memcpy (&iattr->schedparam, &thread->schedparam, |
| 49 | sizeof (struct sched_param)); |
| 50 | iattr->schedpolicy = thread->schedpolicy; |
| 51 | |
| 52 | /* Clear the flags work. */ |
| 53 | iattr->flags = thread->flags; |
| 54 | |
| 55 | /* The thread might be detached by now. */ |
| 56 | if (IS_DETACHED (thread)) |
| 57 | iattr->flags |= ATTR_FLAG_DETACHSTATE; |
| 58 | |
| 59 | /* This is the guardsize after adjusting it. */ |
| 60 | iattr->guardsize = thread->reported_guardsize; |
| 61 | |
| 62 | /* The sizes are subject to alignment. */ |
| 63 | if (__glibc_likely (thread->stackblock != NULL)) |
| 64 | { |
| 65 | /* The stack size reported to the user should not include the |
| 66 | guard size. */ |
| 67 | iattr->stacksize = thread->stackblock_size - thread->guardsize; |
| 68 | #if _STACK_GROWS_DOWN |
| 69 | iattr->stackaddr = (char *) thread->stackblock |
| 70 | + thread->stackblock_size; |
| 71 | #else |
| 72 | iattr->stackaddr = (char *) thread->stackblock; |
| 73 | #endif |
| 74 | } |
| 75 | else |
| 76 | { |
| 77 | /* No stack information available. This must be for the initial |
| 78 | thread. Get the info in some magical way. */ |
| 79 | |
| 80 | /* Stack size limit. */ |
| 81 | struct rlimit rl; |
| 82 | |
| 83 | /* The safest way to get the top of the stack is to read |
| 84 | /proc/self/maps and locate the line into which |
| 85 | __libc_stack_end falls. */ |
| 86 | FILE *fp = fopen ("/proc/self/maps" , "rce" ); |
| 87 | if (fp == NULL) |
| 88 | ret = errno; |
| 89 | /* We need the limit of the stack in any case. */ |
| 90 | else |
| 91 | { |
| 92 | if (__getrlimit (RLIMIT_STACK, &rl) != 0) |
| 93 | ret = errno; |
| 94 | else |
| 95 | { |
| 96 | /* We consider the main process stack to have ended with |
| 97 | the page containing __libc_stack_end. There is stuff below |
| 98 | it in the stack too, like the program arguments, environment |
| 99 | variables and auxv info, but we ignore those pages when |
| 100 | returning size so that the output is consistent when the |
| 101 | stack is marked executable due to a loaded DSO requiring |
| 102 | it. */ |
| 103 | void *stack_end = (void *) ((uintptr_t) __libc_stack_end |
| 104 | & -(uintptr_t) GLRO(dl_pagesize)); |
| 105 | #if _STACK_GROWS_DOWN |
| 106 | stack_end += GLRO(dl_pagesize); |
| 107 | #endif |
| 108 | /* We need no locking. */ |
| 109 | __fsetlocking (fp, FSETLOCKING_BYCALLER); |
| 110 | |
| 111 | /* Until we found an entry (which should always be the case) |
| 112 | mark the result as a failure. */ |
| 113 | ret = ENOENT; |
| 114 | |
| 115 | char *line = NULL; |
| 116 | size_t linelen = 0; |
| 117 | #if _STACK_GROWS_DOWN |
| 118 | uintptr_t last_to = 0; |
| 119 | #endif |
| 120 | |
| 121 | while (! feof_unlocked (fp)) |
| 122 | { |
| 123 | if (__getline (&line, &linelen, fp) <= 0) |
| 124 | break; |
| 125 | |
| 126 | uintptr_t from; |
| 127 | uintptr_t to; |
| 128 | if (sscanf (line, "%" SCNxPTR "-%" SCNxPTR, &from, &to) != 2) |
| 129 | continue; |
| 130 | if (from <= (uintptr_t) __libc_stack_end |
| 131 | && (uintptr_t) __libc_stack_end < to) |
| 132 | { |
| 133 | /* Found the entry. Now we have the info we need. */ |
| 134 | iattr->stackaddr = stack_end; |
| 135 | iattr->stacksize = |
| 136 | rl.rlim_cur - (size_t) (to - (uintptr_t) stack_end); |
| 137 | |
| 138 | /* Cut it down to align it to page size since otherwise we |
| 139 | risk going beyond rlimit when the kernel rounds up the |
| 140 | stack extension request. */ |
| 141 | iattr->stacksize = (iattr->stacksize |
| 142 | & -(intptr_t) GLRO(dl_pagesize)); |
| 143 | #if _STACK_GROWS_DOWN |
| 144 | /* The limit might be too high. */ |
| 145 | if ((size_t) iattr->stacksize |
| 146 | > (size_t) iattr->stackaddr - last_to) |
| 147 | iattr->stacksize = (size_t) iattr->stackaddr - last_to; |
| 148 | #else |
| 149 | /* The limit might be too high. */ |
| 150 | if ((size_t) iattr->stacksize |
| 151 | > to - (size_t) iattr->stackaddr) |
| 152 | iattr->stacksize = to - (size_t) iattr->stackaddr; |
| 153 | #endif |
| 154 | /* We succeed and no need to look further. */ |
| 155 | ret = 0; |
| 156 | break; |
| 157 | } |
| 158 | #if _STACK_GROWS_DOWN |
| 159 | last_to = to; |
| 160 | #endif |
| 161 | } |
| 162 | |
| 163 | free (line); |
| 164 | } |
| 165 | |
| 166 | fclose (fp); |
| 167 | } |
| 168 | } |
| 169 | |
| 170 | iattr->flags |= ATTR_FLAG_STACKADDR; |
| 171 | |
| 172 | if (ret == 0) |
| 173 | { |
| 174 | size_t size = 16; |
| 175 | cpu_set_t *cpuset = NULL; |
| 176 | |
| 177 | do |
| 178 | { |
| 179 | size <<= 1; |
| 180 | |
| 181 | void *newp = realloc (cpuset, size); |
| 182 | if (newp == NULL) |
| 183 | { |
| 184 | ret = ENOMEM; |
| 185 | break; |
| 186 | } |
| 187 | cpuset = (cpu_set_t *) newp; |
| 188 | |
| 189 | ret = __pthread_getaffinity_np (thread_id, size, cpuset); |
| 190 | } |
| 191 | /* Pick some ridiculous upper limit. Is 8 million CPUs enough? */ |
| 192 | while (ret == EINVAL && size < 1024 * 1024); |
| 193 | |
| 194 | if (ret == 0) |
| 195 | ret = __pthread_attr_setaffinity_np (attr, size, cpuset); |
| 196 | else if (ret == ENOSYS) |
| 197 | /* There is no such functionality. */ |
| 198 | ret = 0; |
| 199 | free (cpuset); |
| 200 | } |
| 201 | |
| 202 | lll_unlock (thread->lock, LLL_PRIVATE); |
| 203 | |
| 204 | if (ret != 0) |
| 205 | __pthread_attr_destroy (attr); |
| 206 | |
| 207 | return ret; |
| 208 | } |
| 209 | versioned_symbol (libc, __pthread_getattr_np, pthread_getattr_np, GLIBC_2_32); |
| 210 | |
| 211 | #if SHLIB_COMPAT (libc, GLIBC_2_2_3, GLIBC_2_32) |
| 212 | strong_alias (__pthread_getattr_np, __pthread_getattr_np_alias) |
| 213 | compat_symbol (libc, __pthread_getattr_np_alias, |
| 214 | pthread_getattr_np, GLIBC_2_2_3); |
| 215 | #endif |
| 216 | |