| 1 | /* |
| 2 | * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "jvm.h" |
| 26 | #include "logging/log.hpp" |
| 27 | #include "memory/allocation.inline.hpp" |
| 28 | #include "os_posix.inline.hpp" |
| 29 | #include "utilities/globalDefinitions.hpp" |
| 30 | #include "runtime/frame.inline.hpp" |
| 31 | #include "runtime/interfaceSupport.inline.hpp" |
| 32 | #include "services/memTracker.hpp" |
| 33 | #include "utilities/align.hpp" |
| 34 | #include "utilities/events.hpp" |
| 35 | #include "utilities/formatBuffer.hpp" |
| 36 | #include "utilities/macros.hpp" |
| 37 | #include "utilities/vmError.hpp" |
| 38 | |
| 39 | #include <dirent.h> |
| 40 | #include <dlfcn.h> |
| 41 | #include <grp.h> |
| 42 | #include <pwd.h> |
| 43 | #include <pthread.h> |
| 44 | #include <signal.h> |
| 45 | #include <sys/mman.h> |
| 46 | #include <sys/resource.h> |
| 47 | #include <sys/utsname.h> |
| 48 | #include <time.h> |
| 49 | #include <unistd.h> |
| 50 | |
| 51 | // Todo: provide a os::get_max_process_id() or similar. Number of processes |
| 52 | // may have been configured, can be read more accurately from proc fs etc. |
| 53 | #ifndef MAX_PID |
| 54 | #define MAX_PID INT_MAX |
| 55 | #endif |
| 56 | #define IS_VALID_PID(p) (p > 0 && p < MAX_PID) |
| 57 | |
| 58 | #define ROOT_UID 0 |
| 59 | |
| 60 | #ifndef MAP_ANONYMOUS |
| 61 | #define MAP_ANONYMOUS MAP_ANON |
| 62 | #endif |
| 63 | |
| 64 | #define check_with_errno(check_type, cond, msg) \ |
| 65 | do { \ |
| 66 | int err = errno; \ |
| 67 | check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err), \ |
| 68 | os::errno_name(err)); \ |
| 69 | } while (false) |
| 70 | |
| 71 | #define assert_with_errno(cond, msg) check_with_errno(assert, cond, msg) |
| 72 | #define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg) |
| 73 | |
| 74 | // Check core dump limit and report possible place where core can be found |
| 75 | void os::check_dump_limit(char* buffer, size_t bufferSize) { |
| 76 | if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { |
| 77 | jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line" ); |
| 78 | VMError::record_coredump_status(buffer, false); |
| 79 | return; |
| 80 | } |
| 81 | |
| 82 | int n; |
| 83 | struct rlimit rlim; |
| 84 | bool success; |
| 85 | |
| 86 | char core_path[PATH_MAX]; |
| 87 | n = get_core_path(core_path, PATH_MAX); |
| 88 | |
| 89 | if (n <= 0) { |
| 90 | jio_snprintf(buffer, bufferSize, "core.%d (may not exist)" , current_process_id()); |
| 91 | success = true; |
| 92 | #ifdef LINUX |
| 93 | } else if (core_path[0] == '"') { // redirect to user process |
| 94 | jio_snprintf(buffer, bufferSize, "Core dumps may be processed with %s" , core_path); |
| 95 | success = true; |
| 96 | #endif |
| 97 | } else if (getrlimit(RLIMIT_CORE, &rlim) != 0) { |
| 98 | jio_snprintf(buffer, bufferSize, "%s (may not exist)" , core_path); |
| 99 | success = true; |
| 100 | } else { |
| 101 | switch(rlim.rlim_cur) { |
| 102 | case RLIM_INFINITY: |
| 103 | jio_snprintf(buffer, bufferSize, "%s" , core_path); |
| 104 | success = true; |
| 105 | break; |
| 106 | case 0: |
| 107 | jio_snprintf(buffer, bufferSize, "Core dumps have been disabled. To enable core dumping, try \"ulimit -c unlimited\" before starting Java again" ); |
| 108 | success = false; |
| 109 | break; |
| 110 | default: |
| 111 | jio_snprintf(buffer, bufferSize, "%s (max size " UINT64_FORMAT " kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again" , core_path, uint64_t(rlim.rlim_cur) / 1024); |
| 112 | success = true; |
| 113 | break; |
| 114 | } |
| 115 | } |
| 116 | |
| 117 | VMError::record_coredump_status(buffer, success); |
| 118 | } |
| 119 | |
| 120 | int os::get_native_stack(address* stack, int frames, int toSkip) { |
| 121 | int frame_idx = 0; |
| 122 | int num_of_frames; // number of frames captured |
| 123 | frame fr = os::current_frame(); |
| 124 | while (fr.pc() && frame_idx < frames) { |
| 125 | if (toSkip > 0) { |
| 126 | toSkip --; |
| 127 | } else { |
| 128 | stack[frame_idx ++] = fr.pc(); |
| 129 | } |
| 130 | if (fr.fp() == NULL || fr.cb() != NULL || |
| 131 | fr.sender_pc() == NULL || os::is_first_C_frame(&fr)) break; |
| 132 | |
| 133 | if (fr.sender_pc() && !os::is_first_C_frame(&fr)) { |
| 134 | fr = os::get_sender_for_C_frame(&fr); |
| 135 | } else { |
| 136 | break; |
| 137 | } |
| 138 | } |
| 139 | num_of_frames = frame_idx; |
| 140 | for (; frame_idx < frames; frame_idx ++) { |
| 141 | stack[frame_idx] = NULL; |
| 142 | } |
| 143 | |
| 144 | return num_of_frames; |
| 145 | } |
| 146 | |
| 147 | |
| 148 | bool os::unsetenv(const char* name) { |
| 149 | assert(name != NULL, "Null pointer" ); |
| 150 | return (::unsetenv(name) == 0); |
| 151 | } |
| 152 | |
| 153 | int os::get_last_error() { |
| 154 | return errno; |
| 155 | } |
| 156 | |
| 157 | size_t os::lasterror(char *buf, size_t len) { |
| 158 | if (errno == 0) return 0; |
| 159 | |
| 160 | const char *s = os::strerror(errno); |
| 161 | size_t n = ::strlen(s); |
| 162 | if (n >= len) { |
| 163 | n = len - 1; |
| 164 | } |
| 165 | ::strncpy(buf, s, n); |
| 166 | buf[n] = '\0'; |
| 167 | return n; |
| 168 | } |
| 169 | |
| 170 | bool os::is_debugger_attached() { |
| 171 | // not implemented |
| 172 | return false; |
| 173 | } |
| 174 | |
| 175 | void os::wait_for_keypress_at_exit(void) { |
| 176 | // don't do anything on posix platforms |
| 177 | return; |
| 178 | } |
| 179 | |
| 180 | int os::create_file_for_heap(const char* dir) { |
| 181 | |
| 182 | const char name_template[] = "/jvmheap.XXXXXX" ; |
| 183 | |
| 184 | size_t fullname_len = strlen(dir) + strlen(name_template); |
| 185 | char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal); |
| 186 | if (fullname == NULL) { |
| 187 | vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)" , os::strerror(errno))); |
| 188 | return -1; |
| 189 | } |
| 190 | int n = snprintf(fullname, fullname_len + 1, "%s%s" , dir, name_template); |
| 191 | assert((size_t)n == fullname_len, "Unexpected number of characters in string" ); |
| 192 | |
| 193 | os::native_path(fullname); |
| 194 | |
| 195 | // set the file creation mask. |
| 196 | mode_t file_mode = S_IRUSR | S_IWUSR; |
| 197 | |
| 198 | // create a new file. |
| 199 | int fd = mkstemp(fullname); |
| 200 | |
| 201 | if (fd < 0) { |
| 202 | warning("Could not create file for heap with template %s" , fullname); |
| 203 | os::free(fullname); |
| 204 | return -1; |
| 205 | } |
| 206 | |
| 207 | // delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted. |
| 208 | int ret = unlink(fullname); |
| 209 | assert_with_errno(ret == 0, "unlink returned error" ); |
| 210 | |
| 211 | os::free(fullname); |
| 212 | return fd; |
| 213 | } |
| 214 | |
| 215 | static char* reserve_mmapped_memory(size_t bytes, char* requested_addr) { |
| 216 | char * addr; |
| 217 | int flags = MAP_PRIVATE NOT_AIX( | MAP_NORESERVE ) | MAP_ANONYMOUS; |
| 218 | if (requested_addr != NULL) { |
| 219 | assert((uintptr_t)requested_addr % os::vm_page_size() == 0, "Requested address should be aligned to OS page size" ); |
| 220 | flags |= MAP_FIXED; |
| 221 | } |
| 222 | |
| 223 | // Map reserved/uncommitted pages PROT_NONE so we fail early if we |
| 224 | // touch an uncommitted page. Otherwise, the read/write might |
| 225 | // succeed if we have enough swap space to back the physical page. |
| 226 | addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, |
| 227 | flags, -1, 0); |
| 228 | |
| 229 | if (addr != MAP_FAILED) { |
| 230 | MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC); |
| 231 | return addr; |
| 232 | } |
| 233 | return NULL; |
| 234 | } |
| 235 | |
| 236 | static int util_posix_fallocate(int fd, off_t offset, off_t len) { |
| 237 | #ifdef __APPLE__ |
| 238 | fstore_t store = { F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, len }; |
| 239 | // First we try to get a continuous chunk of disk space |
| 240 | int ret = fcntl(fd, F_PREALLOCATE, &store); |
| 241 | if (ret == -1) { |
| 242 | // Maybe we are too fragmented, try to allocate non-continuous range |
| 243 | store.fst_flags = F_ALLOCATEALL; |
| 244 | ret = fcntl(fd, F_PREALLOCATE, &store); |
| 245 | } |
| 246 | if(ret != -1) { |
| 247 | return ftruncate(fd, len); |
| 248 | } |
| 249 | return -1; |
| 250 | #else |
| 251 | return posix_fallocate(fd, offset, len); |
| 252 | #endif |
| 253 | } |
| 254 | |
| 255 | // Map the given address range to the provided file descriptor. |
| 256 | char* os::map_memory_to_file(char* base, size_t size, int fd) { |
| 257 | assert(fd != -1, "File descriptor is not valid" ); |
| 258 | |
| 259 | // allocate space for the file |
| 260 | int ret = util_posix_fallocate(fd, 0, (off_t)size); |
| 261 | if (ret != 0) { |
| 262 | vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory. error(%d)" , ret)); |
| 263 | return NULL; |
| 264 | } |
| 265 | |
| 266 | int prot = PROT_READ | PROT_WRITE; |
| 267 | int flags = MAP_SHARED; |
| 268 | if (base != NULL) { |
| 269 | flags |= MAP_FIXED; |
| 270 | } |
| 271 | char* addr = (char*)mmap(base, size, prot, flags, fd, 0); |
| 272 | |
| 273 | if (addr == MAP_FAILED) { |
| 274 | warning("Failed mmap to file. (%s)" , os::strerror(errno)); |
| 275 | return NULL; |
| 276 | } |
| 277 | if (base != NULL && addr != base) { |
| 278 | if (!os::release_memory(addr, size)) { |
| 279 | warning("Could not release memory on unsuccessful file mapping" ); |
| 280 | } |
| 281 | return NULL; |
| 282 | } |
| 283 | return addr; |
| 284 | } |
| 285 | |
| 286 | char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) { |
| 287 | assert(fd != -1, "File descriptor is not valid" ); |
| 288 | assert(base != NULL, "Base cannot be NULL" ); |
| 289 | |
| 290 | return map_memory_to_file(base, size, fd); |
| 291 | } |
| 292 | |
| 293 | // Multiple threads can race in this code, and can remap over each other with MAP_FIXED, |
| 294 | // so on posix, unmap the section at the start and at the end of the chunk that we mapped |
| 295 | // rather than unmapping and remapping the whole chunk to get requested alignment. |
| 296 | char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) { |
| 297 | assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, |
| 298 | "Alignment must be a multiple of allocation granularity (page size)" ); |
| 299 | assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned" ); |
| 300 | |
| 301 | size_t = size + alignment; |
| 302 | assert(extra_size >= size, "overflow, size is too large to allow alignment" ); |
| 303 | |
| 304 | char* ; |
| 305 | if (file_desc != -1) { |
| 306 | // For file mapping, we do not call os:reserve_memory(extra_size, NULL, alignment, file_desc) because |
| 307 | // we need to deal with shrinking of the file space later when we release extra memory after alignment. |
| 308 | // We also cannot called os:reserve_memory() with file_desc set to -1 because on aix we might get SHM memory. |
| 309 | // So here to call a helper function while reserve memory for us. After we have a aligned base, |
| 310 | // we will replace anonymous mapping with file mapping. |
| 311 | extra_base = reserve_mmapped_memory(extra_size, NULL); |
| 312 | if (extra_base != NULL) { |
| 313 | MemTracker::record_virtual_memory_reserve((address)extra_base, extra_size, CALLER_PC); |
| 314 | } |
| 315 | } else { |
| 316 | extra_base = os::reserve_memory(extra_size, NULL, alignment); |
| 317 | } |
| 318 | |
| 319 | if (extra_base == NULL) { |
| 320 | return NULL; |
| 321 | } |
| 322 | |
| 323 | // Do manual alignment |
| 324 | char* aligned_base = align_up(extra_base, alignment); |
| 325 | |
| 326 | // [ | | ] |
| 327 | // ^ extra_base |
| 328 | // ^ extra_base + begin_offset == aligned_base |
| 329 | // extra_base + begin_offset + size ^ |
| 330 | // extra_base + extra_size ^ |
| 331 | // |<>| == begin_offset |
| 332 | // end_offset == |<>| |
| 333 | size_t begin_offset = aligned_base - extra_base; |
| 334 | size_t end_offset = (extra_base + extra_size) - (aligned_base + size); |
| 335 | |
| 336 | if (begin_offset > 0) { |
| 337 | os::release_memory(extra_base, begin_offset); |
| 338 | } |
| 339 | |
| 340 | if (end_offset > 0) { |
| 341 | os::release_memory(extra_base + begin_offset + size, end_offset); |
| 342 | } |
| 343 | |
| 344 | if (file_desc != -1) { |
| 345 | // After we have an aligned address, we can replace anonymous mapping with file mapping |
| 346 | if (replace_existing_mapping_with_file_mapping(aligned_base, size, file_desc) == NULL) { |
| 347 | vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory" )); |
| 348 | } |
| 349 | MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC); |
| 350 | } |
| 351 | return aligned_base; |
| 352 | } |
| 353 | |
| 354 | int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { |
| 355 | // All supported POSIX platforms provide C99 semantics. |
| 356 | int result = ::vsnprintf(buf, len, fmt, args); |
| 357 | // If an encoding error occurred (result < 0) then it's not clear |
| 358 | // whether the buffer is NUL terminated, so ensure it is. |
| 359 | if ((result < 0) && (len > 0)) { |
| 360 | buf[len - 1] = '\0'; |
| 361 | } |
| 362 | return result; |
| 363 | } |
| 364 | |
| 365 | int os::get_fileno(FILE* fp) { |
| 366 | return NOT_AIX(::)fileno(fp); |
| 367 | } |
| 368 | |
| 369 | struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) { |
| 370 | return gmtime_r(clock, res); |
| 371 | } |
| 372 | |
| 373 | void os::Posix::print_load_average(outputStream* st) { |
| 374 | st->print("load average:" ); |
| 375 | double loadavg[3]; |
| 376 | os::loadavg(loadavg, 3); |
| 377 | st->print("%0.02f %0.02f %0.02f" , loadavg[0], loadavg[1], loadavg[2]); |
| 378 | st->cr(); |
| 379 | } |
| 380 | |
| 381 | void os::Posix::print_rlimit_info(outputStream* st) { |
| 382 | st->print("rlimit:" ); |
| 383 | struct rlimit rlim; |
| 384 | |
| 385 | st->print(" STACK " ); |
| 386 | getrlimit(RLIMIT_STACK, &rlim); |
| 387 | if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity" ); |
| 388 | else st->print(UINT64_FORMAT "k" , uint64_t(rlim.rlim_cur) / 1024); |
| 389 | |
| 390 | st->print(", CORE " ); |
| 391 | getrlimit(RLIMIT_CORE, &rlim); |
| 392 | if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity" ); |
| 393 | else st->print(UINT64_FORMAT "k" , uint64_t(rlim.rlim_cur) / 1024); |
| 394 | |
| 395 | // Isn't there on solaris |
| 396 | #if defined(AIX) |
| 397 | st->print(", NPROC " ); |
| 398 | st->print("%d" , sysconf(_SC_CHILD_MAX)); |
| 399 | #elif !defined(SOLARIS) |
| 400 | st->print(", NPROC " ); |
| 401 | getrlimit(RLIMIT_NPROC, &rlim); |
| 402 | if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity" ); |
| 403 | else st->print(UINT64_FORMAT, uint64_t(rlim.rlim_cur)); |
| 404 | #endif |
| 405 | |
| 406 | st->print(", NOFILE " ); |
| 407 | getrlimit(RLIMIT_NOFILE, &rlim); |
| 408 | if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity" ); |
| 409 | else st->print(UINT64_FORMAT, uint64_t(rlim.rlim_cur)); |
| 410 | |
| 411 | st->print(", AS " ); |
| 412 | getrlimit(RLIMIT_AS, &rlim); |
| 413 | if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity" ); |
| 414 | else st->print(UINT64_FORMAT "k" , uint64_t(rlim.rlim_cur) / 1024); |
| 415 | |
| 416 | st->print(", DATA " ); |
| 417 | getrlimit(RLIMIT_DATA, &rlim); |
| 418 | if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity" ); |
| 419 | else st->print(UINT64_FORMAT "k" , uint64_t(rlim.rlim_cur) / 1024); |
| 420 | |
| 421 | st->print(", FSIZE " ); |
| 422 | getrlimit(RLIMIT_FSIZE, &rlim); |
| 423 | if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity" ); |
| 424 | else st->print(UINT64_FORMAT "k" , uint64_t(rlim.rlim_cur) / 1024); |
| 425 | |
| 426 | st->cr(); |
| 427 | } |
| 428 | |
| 429 | void os::Posix::print_uname_info(outputStream* st) { |
| 430 | // kernel |
| 431 | st->print("uname:" ); |
| 432 | struct utsname name; |
| 433 | uname(&name); |
| 434 | st->print("%s " , name.sysname); |
| 435 | #ifdef ASSERT |
| 436 | st->print("%s " , name.nodename); |
| 437 | #endif |
| 438 | st->print("%s " , name.release); |
| 439 | st->print("%s " , name.version); |
| 440 | st->print("%s" , name.machine); |
| 441 | st->cr(); |
| 442 | } |
| 443 | |
| 444 | void os::Posix::print_umask(outputStream* st, mode_t umsk) { |
| 445 | st->print((umsk & S_IRUSR) ? "r" : "-" ); |
| 446 | st->print((umsk & S_IWUSR) ? "w" : "-" ); |
| 447 | st->print((umsk & S_IXUSR) ? "x" : "-" ); |
| 448 | st->print((umsk & S_IRGRP) ? "r" : "-" ); |
| 449 | st->print((umsk & S_IWGRP) ? "w" : "-" ); |
| 450 | st->print((umsk & S_IXGRP) ? "x" : "-" ); |
| 451 | st->print((umsk & S_IROTH) ? "r" : "-" ); |
| 452 | st->print((umsk & S_IWOTH) ? "w" : "-" ); |
| 453 | st->print((umsk & S_IXOTH) ? "x" : "-" ); |
| 454 | } |
| 455 | |
| 456 | void os::Posix::print_user_info(outputStream* st) { |
| 457 | unsigned id = (unsigned) ::getuid(); |
| 458 | st->print("uid : %u " , id); |
| 459 | id = (unsigned) ::geteuid(); |
| 460 | st->print("euid : %u " , id); |
| 461 | id = (unsigned) ::getgid(); |
| 462 | st->print("gid : %u " , id); |
| 463 | id = (unsigned) ::getegid(); |
| 464 | st->print_cr("egid : %u" , id); |
| 465 | st->cr(); |
| 466 | |
| 467 | mode_t umsk = ::umask(0); |
| 468 | ::umask(umsk); |
| 469 | st->print("umask: %04o (" , (unsigned) umsk); |
| 470 | print_umask(st, umsk); |
| 471 | st->print_cr(")" ); |
| 472 | st->cr(); |
| 473 | } |
| 474 | |
| 475 | |
| 476 | bool os::get_host_name(char* buf, size_t buflen) { |
| 477 | struct utsname name; |
| 478 | uname(&name); |
| 479 | jio_snprintf(buf, buflen, "%s" , name.nodename); |
| 480 | return true; |
| 481 | } |
| 482 | |
| 483 | bool os::has_allocatable_memory_limit(julong* limit) { |
| 484 | struct rlimit rlim; |
| 485 | int getrlimit_res = getrlimit(RLIMIT_AS, &rlim); |
| 486 | // if there was an error when calling getrlimit, assume that there is no limitation |
| 487 | // on virtual memory. |
| 488 | bool result; |
| 489 | if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) { |
| 490 | result = false; |
| 491 | } else { |
| 492 | *limit = (julong)rlim.rlim_cur; |
| 493 | result = true; |
| 494 | } |
| 495 | #ifdef _LP64 |
| 496 | return result; |
| 497 | #else |
| 498 | // arbitrary virtual space limit for 32 bit Unices found by testing. If |
| 499 | // getrlimit above returned a limit, bound it with this limit. Otherwise |
| 500 | // directly use it. |
| 501 | const julong max_virtual_limit = (julong)3800*M; |
| 502 | if (result) { |
| 503 | *limit = MIN2(*limit, max_virtual_limit); |
| 504 | } else { |
| 505 | *limit = max_virtual_limit; |
| 506 | } |
| 507 | |
| 508 | // bound by actually allocatable memory. The algorithm uses two bounds, an |
| 509 | // upper and a lower limit. The upper limit is the current highest amount of |
| 510 | // memory that could not be allocated, the lower limit is the current highest |
| 511 | // amount of memory that could be allocated. |
| 512 | // The algorithm iteratively refines the result by halving the difference |
| 513 | // between these limits, updating either the upper limit (if that value could |
| 514 | // not be allocated) or the lower limit (if the that value could be allocated) |
| 515 | // until the difference between these limits is "small". |
| 516 | |
| 517 | // the minimum amount of memory we care about allocating. |
| 518 | const julong min_allocation_size = M; |
| 519 | |
| 520 | julong upper_limit = *limit; |
| 521 | |
| 522 | // first check a few trivial cases |
| 523 | if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) { |
| 524 | *limit = upper_limit; |
| 525 | } else if (!is_allocatable(min_allocation_size)) { |
| 526 | // we found that not even min_allocation_size is allocatable. Return it |
| 527 | // anyway. There is no point to search for a better value any more. |
| 528 | *limit = min_allocation_size; |
| 529 | } else { |
| 530 | // perform the binary search. |
| 531 | julong lower_limit = min_allocation_size; |
| 532 | while ((upper_limit - lower_limit) > min_allocation_size) { |
| 533 | julong temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit; |
| 534 | temp_limit = align_down(temp_limit, min_allocation_size); |
| 535 | if (is_allocatable(temp_limit)) { |
| 536 | lower_limit = temp_limit; |
| 537 | } else { |
| 538 | upper_limit = temp_limit; |
| 539 | } |
| 540 | } |
| 541 | *limit = lower_limit; |
| 542 | } |
| 543 | return true; |
| 544 | #endif |
| 545 | } |
| 546 | |
| 547 | const char* os::get_current_directory(char *buf, size_t buflen) { |
| 548 | return getcwd(buf, buflen); |
| 549 | } |
| 550 | |
| 551 | FILE* os::open(int fd, const char* mode) { |
| 552 | return ::fdopen(fd, mode); |
| 553 | } |
| 554 | |
| 555 | ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { |
| 556 | return ::pread(fd, buf, nBytes, offset); |
| 557 | } |
| 558 | |
| 559 | void os::flockfile(FILE* fp) { |
| 560 | ::flockfile(fp); |
| 561 | } |
| 562 | |
| 563 | void os::funlockfile(FILE* fp) { |
| 564 | ::funlockfile(fp); |
| 565 | } |
| 566 | |
| 567 | DIR* os::opendir(const char* dirname) { |
| 568 | assert(dirname != NULL, "just checking" ); |
| 569 | return ::opendir(dirname); |
| 570 | } |
| 571 | |
| 572 | struct dirent* os::readdir(DIR* dirp) { |
| 573 | assert(dirp != NULL, "just checking" ); |
| 574 | return ::readdir(dirp); |
| 575 | } |
| 576 | |
| 577 | int os::closedir(DIR *dirp) { |
| 578 | assert(dirp != NULL, "just checking" ); |
| 579 | return ::closedir(dirp); |
| 580 | } |
| 581 | |
| 582 | // Builds a platform dependent Agent_OnLoad_<lib_name> function name |
| 583 | // which is used to find statically linked in agents. |
| 584 | // Parameters: |
| 585 | // sym_name: Symbol in library we are looking for |
| 586 | // lib_name: Name of library to look in, NULL for shared libs. |
| 587 | // is_absolute_path == true if lib_name is absolute path to agent |
| 588 | // such as "/a/b/libL.so" |
| 589 | // == false if only the base name of the library is passed in |
| 590 | // such as "L" |
| 591 | char* os::build_agent_function_name(const char *sym_name, const char *lib_name, |
| 592 | bool is_absolute_path) { |
| 593 | char *agent_entry_name; |
| 594 | size_t len; |
| 595 | size_t name_len; |
| 596 | size_t prefix_len = strlen(JNI_LIB_PREFIX); |
| 597 | size_t suffix_len = strlen(JNI_LIB_SUFFIX); |
| 598 | const char *start; |
| 599 | |
| 600 | if (lib_name != NULL) { |
| 601 | name_len = strlen(lib_name); |
| 602 | if (is_absolute_path) { |
| 603 | // Need to strip path, prefix and suffix |
| 604 | if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { |
| 605 | lib_name = ++start; |
| 606 | } |
| 607 | if (strlen(lib_name) <= (prefix_len + suffix_len)) { |
| 608 | return NULL; |
| 609 | } |
| 610 | lib_name += prefix_len; |
| 611 | name_len = strlen(lib_name) - suffix_len; |
| 612 | } |
| 613 | } |
| 614 | len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; |
| 615 | agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); |
| 616 | if (agent_entry_name == NULL) { |
| 617 | return NULL; |
| 618 | } |
| 619 | strcpy(agent_entry_name, sym_name); |
| 620 | if (lib_name != NULL) { |
| 621 | strcat(agent_entry_name, "_" ); |
| 622 | strncat(agent_entry_name, lib_name, name_len); |
| 623 | } |
| 624 | return agent_entry_name; |
| 625 | } |
| 626 | |
| 627 | int os::sleep(Thread* thread, jlong millis, bool interruptible) { |
| 628 | assert(thread == Thread::current(), "thread consistency check" ); |
| 629 | |
| 630 | ParkEvent * const slp = thread->_SleepEvent ; |
| 631 | slp->reset() ; |
| 632 | OrderAccess::fence() ; |
| 633 | |
| 634 | if (interruptible) { |
| 635 | jlong prevtime = javaTimeNanos(); |
| 636 | |
| 637 | for (;;) { |
| 638 | if (os::is_interrupted(thread, true)) { |
| 639 | return OS_INTRPT; |
| 640 | } |
| 641 | |
| 642 | jlong newtime = javaTimeNanos(); |
| 643 | |
| 644 | if (newtime - prevtime < 0) { |
| 645 | // time moving backwards, should only happen if no monotonic clock |
| 646 | // not a guarantee() because JVM should not abort on kernel/glibc bugs |
| 647 | assert(!os::supports_monotonic_clock(), "unexpected time moving backwards detected in os::sleep(interruptible)" ); |
| 648 | } else { |
| 649 | millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC; |
| 650 | } |
| 651 | |
| 652 | if (millis <= 0) { |
| 653 | return OS_OK; |
| 654 | } |
| 655 | |
| 656 | prevtime = newtime; |
| 657 | |
| 658 | { |
| 659 | assert(thread->is_Java_thread(), "sanity check" ); |
| 660 | JavaThread *jt = (JavaThread *) thread; |
| 661 | ThreadBlockInVM tbivm(jt); |
| 662 | OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */); |
| 663 | |
| 664 | jt->set_suspend_equivalent(); |
| 665 | // cleared by handle_special_suspend_equivalent_condition() or |
| 666 | // java_suspend_self() via check_and_wait_while_suspended() |
| 667 | |
| 668 | slp->park(millis); |
| 669 | |
| 670 | // were we externally suspended while we were waiting? |
| 671 | jt->check_and_wait_while_suspended(); |
| 672 | } |
| 673 | } |
| 674 | } else { |
| 675 | OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); |
| 676 | jlong prevtime = javaTimeNanos(); |
| 677 | |
| 678 | for (;;) { |
| 679 | // It'd be nice to avoid the back-to-back javaTimeNanos() calls on |
| 680 | // the 1st iteration ... |
| 681 | jlong newtime = javaTimeNanos(); |
| 682 | |
| 683 | if (newtime - prevtime < 0) { |
| 684 | // time moving backwards, should only happen if no monotonic clock |
| 685 | // not a guarantee() because JVM should not abort on kernel/glibc bugs |
| 686 | assert(!os::supports_monotonic_clock(), "unexpected time moving backwards detected on os::sleep(!interruptible)" ); |
| 687 | } else { |
| 688 | millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC; |
| 689 | } |
| 690 | |
| 691 | if (millis <= 0) break ; |
| 692 | |
| 693 | prevtime = newtime; |
| 694 | slp->park(millis); |
| 695 | } |
| 696 | return OS_OK ; |
| 697 | } |
| 698 | } |
| 699 | |
| 700 | void os::naked_short_nanosleep(jlong ns) { |
| 701 | struct timespec req; |
| 702 | assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only" ); |
| 703 | req.tv_sec = 0; |
| 704 | req.tv_nsec = ns; |
| 705 | ::nanosleep(&req, NULL); |
| 706 | return; |
| 707 | } |
| 708 | |
| 709 | void os::naked_short_sleep(jlong ms) { |
| 710 | assert(ms < MILLIUNITS, "Un-interruptable sleep, short time use only" ); |
| 711 | os::naked_short_nanosleep(ms * (NANOUNITS / MILLIUNITS)); |
| 712 | return; |
| 713 | } |
| 714 | |
| 715 | //////////////////////////////////////////////////////////////////////////////// |
| 716 | // interrupt support |
| 717 | |
| 718 | void os::interrupt(Thread* thread) { |
| 719 | debug_only(Thread::check_for_dangling_thread_pointer(thread);) |
| 720 | |
| 721 | OSThread* osthread = thread->osthread(); |
| 722 | |
| 723 | if (!osthread->interrupted()) { |
| 724 | osthread->set_interrupted(true); |
| 725 | // More than one thread can get here with the same value of osthread, |
| 726 | // resulting in multiple notifications. We do, however, want the store |
| 727 | // to interrupted() to be visible to other threads before we execute unpark(). |
| 728 | OrderAccess::fence(); |
| 729 | ParkEvent * const slp = thread->_SleepEvent ; |
| 730 | if (slp != NULL) slp->unpark() ; |
| 731 | } |
| 732 | |
| 733 | // For JSR166. Unpark even if interrupt status already was set |
| 734 | if (thread->is_Java_thread()) |
| 735 | ((JavaThread*)thread)->parker()->unpark(); |
| 736 | |
| 737 | ParkEvent * ev = thread->_ParkEvent ; |
| 738 | if (ev != NULL) ev->unpark() ; |
| 739 | } |
| 740 | |
| 741 | bool os::is_interrupted(Thread* thread, bool clear_interrupted) { |
| 742 | debug_only(Thread::check_for_dangling_thread_pointer(thread);) |
| 743 | |
| 744 | OSThread* osthread = thread->osthread(); |
| 745 | |
| 746 | bool interrupted = osthread->interrupted(); |
| 747 | |
| 748 | // NOTE that since there is no "lock" around the interrupt and |
| 749 | // is_interrupted operations, there is the possibility that the |
| 750 | // interrupted flag (in osThread) will be "false" but that the |
| 751 | // low-level events will be in the signaled state. This is |
| 752 | // intentional. The effect of this is that Object.wait() and |
| 753 | // LockSupport.park() will appear to have a spurious wakeup, which |
| 754 | // is allowed and not harmful, and the possibility is so rare that |
| 755 | // it is not worth the added complexity to add yet another lock. |
| 756 | // For the sleep event an explicit reset is performed on entry |
| 757 | // to os::sleep, so there is no early return. It has also been |
| 758 | // recommended not to put the interrupted flag into the "event" |
| 759 | // structure because it hides the issue. |
| 760 | if (interrupted && clear_interrupted) { |
| 761 | osthread->set_interrupted(false); |
| 762 | // consider thread->_SleepEvent->reset() ... optional optimization |
| 763 | } |
| 764 | |
| 765 | return interrupted; |
| 766 | } |
| 767 | |
| 768 | |
| 769 | |
| 770 | static const struct { |
| 771 | int sig; const char* name; |
| 772 | } |
| 773 | g_signal_info[] = |
| 774 | { |
| 775 | { SIGABRT, "SIGABRT" }, |
| 776 | #ifdef SIGAIO |
| 777 | { SIGAIO, "SIGAIO" }, |
| 778 | #endif |
| 779 | { SIGALRM, "SIGALRM" }, |
| 780 | #ifdef SIGALRM1 |
| 781 | { SIGALRM1, "SIGALRM1" }, |
| 782 | #endif |
| 783 | { SIGBUS, "SIGBUS" }, |
| 784 | #ifdef SIGCANCEL |
| 785 | { SIGCANCEL, "SIGCANCEL" }, |
| 786 | #endif |
| 787 | { SIGCHLD, "SIGCHLD" }, |
| 788 | #ifdef SIGCLD |
| 789 | { SIGCLD, "SIGCLD" }, |
| 790 | #endif |
| 791 | { SIGCONT, "SIGCONT" }, |
| 792 | #ifdef SIGCPUFAIL |
| 793 | { SIGCPUFAIL, "SIGCPUFAIL" }, |
| 794 | #endif |
| 795 | #ifdef SIGDANGER |
| 796 | { SIGDANGER, "SIGDANGER" }, |
| 797 | #endif |
| 798 | #ifdef SIGDIL |
| 799 | { SIGDIL, "SIGDIL" }, |
| 800 | #endif |
| 801 | #ifdef SIGEMT |
| 802 | { SIGEMT, "SIGEMT" }, |
| 803 | #endif |
| 804 | { SIGFPE, "SIGFPE" }, |
| 805 | #ifdef SIGFREEZE |
| 806 | { SIGFREEZE, "SIGFREEZE" }, |
| 807 | #endif |
| 808 | #ifdef SIGGFAULT |
| 809 | { SIGGFAULT, "SIGGFAULT" }, |
| 810 | #endif |
| 811 | #ifdef SIGGRANT |
| 812 | { SIGGRANT, "SIGGRANT" }, |
| 813 | #endif |
| 814 | { SIGHUP, "SIGHUP" }, |
| 815 | { SIGILL, "SIGILL" }, |
| 816 | { SIGINT, "SIGINT" }, |
| 817 | #ifdef SIGIO |
| 818 | { SIGIO, "SIGIO" }, |
| 819 | #endif |
| 820 | #ifdef SIGIOINT |
| 821 | { SIGIOINT, "SIGIOINT" }, |
| 822 | #endif |
| 823 | #ifdef SIGIOT |
| 824 | // SIGIOT is there for BSD compatibility, but on most Unices just a |
| 825 | // synonym for SIGABRT. The result should be "SIGABRT", not |
| 826 | // "SIGIOT". |
| 827 | #if (SIGIOT != SIGABRT ) |
| 828 | { SIGIOT, "SIGIOT" }, |
| 829 | #endif |
| 830 | #endif |
| 831 | #ifdef SIGKAP |
| 832 | { SIGKAP, "SIGKAP" }, |
| 833 | #endif |
| 834 | { SIGKILL, "SIGKILL" }, |
| 835 | #ifdef SIGLOST |
| 836 | { SIGLOST, "SIGLOST" }, |
| 837 | #endif |
| 838 | #ifdef SIGLWP |
| 839 | { SIGLWP, "SIGLWP" }, |
| 840 | #endif |
| 841 | #ifdef SIGLWPTIMER |
| 842 | { SIGLWPTIMER, "SIGLWPTIMER" }, |
| 843 | #endif |
| 844 | #ifdef SIGMIGRATE |
| 845 | { SIGMIGRATE, "SIGMIGRATE" }, |
| 846 | #endif |
| 847 | #ifdef SIGMSG |
| 848 | { SIGMSG, "SIGMSG" }, |
| 849 | #endif |
| 850 | { SIGPIPE, "SIGPIPE" }, |
| 851 | #ifdef SIGPOLL |
| 852 | { SIGPOLL, "SIGPOLL" }, |
| 853 | #endif |
| 854 | #ifdef SIGPRE |
| 855 | { SIGPRE, "SIGPRE" }, |
| 856 | #endif |
| 857 | { SIGPROF, "SIGPROF" }, |
| 858 | #ifdef SIGPTY |
| 859 | { SIGPTY, "SIGPTY" }, |
| 860 | #endif |
| 861 | #ifdef SIGPWR |
| 862 | { SIGPWR, "SIGPWR" }, |
| 863 | #endif |
| 864 | { SIGQUIT, "SIGQUIT" }, |
| 865 | #ifdef SIGRECONFIG |
| 866 | { SIGRECONFIG, "SIGRECONFIG" }, |
| 867 | #endif |
| 868 | #ifdef SIGRECOVERY |
| 869 | { SIGRECOVERY, "SIGRECOVERY" }, |
| 870 | #endif |
| 871 | #ifdef SIGRESERVE |
| 872 | { SIGRESERVE, "SIGRESERVE" }, |
| 873 | #endif |
| 874 | #ifdef SIGRETRACT |
| 875 | { SIGRETRACT, "SIGRETRACT" }, |
| 876 | #endif |
| 877 | #ifdef SIGSAK |
| 878 | { SIGSAK, "SIGSAK" }, |
| 879 | #endif |
| 880 | { SIGSEGV, "SIGSEGV" }, |
| 881 | #ifdef SIGSOUND |
| 882 | { SIGSOUND, "SIGSOUND" }, |
| 883 | #endif |
| 884 | #ifdef SIGSTKFLT |
| 885 | { SIGSTKFLT, "SIGSTKFLT" }, |
| 886 | #endif |
| 887 | { SIGSTOP, "SIGSTOP" }, |
| 888 | { SIGSYS, "SIGSYS" }, |
| 889 | #ifdef SIGSYSERROR |
| 890 | { SIGSYSERROR, "SIGSYSERROR" }, |
| 891 | #endif |
| 892 | #ifdef SIGTALRM |
| 893 | { SIGTALRM, "SIGTALRM" }, |
| 894 | #endif |
| 895 | { SIGTERM, "SIGTERM" }, |
| 896 | #ifdef SIGTHAW |
| 897 | { SIGTHAW, "SIGTHAW" }, |
| 898 | #endif |
| 899 | { SIGTRAP, "SIGTRAP" }, |
| 900 | #ifdef SIGTSTP |
| 901 | { SIGTSTP, "SIGTSTP" }, |
| 902 | #endif |
| 903 | { SIGTTIN, "SIGTTIN" }, |
| 904 | { SIGTTOU, "SIGTTOU" }, |
| 905 | #ifdef SIGURG |
| 906 | { SIGURG, "SIGURG" }, |
| 907 | #endif |
| 908 | { SIGUSR1, "SIGUSR1" }, |
| 909 | { SIGUSR2, "SIGUSR2" }, |
| 910 | #ifdef SIGVIRT |
| 911 | { SIGVIRT, "SIGVIRT" }, |
| 912 | #endif |
| 913 | { SIGVTALRM, "SIGVTALRM" }, |
| 914 | #ifdef SIGWAITING |
| 915 | { SIGWAITING, "SIGWAITING" }, |
| 916 | #endif |
| 917 | #ifdef SIGWINCH |
| 918 | { SIGWINCH, "SIGWINCH" }, |
| 919 | #endif |
| 920 | #ifdef SIGWINDOW |
| 921 | { SIGWINDOW, "SIGWINDOW" }, |
| 922 | #endif |
| 923 | { SIGXCPU, "SIGXCPU" }, |
| 924 | { SIGXFSZ, "SIGXFSZ" }, |
| 925 | #ifdef SIGXRES |
| 926 | { SIGXRES, "SIGXRES" }, |
| 927 | #endif |
| 928 | { -1, NULL } |
| 929 | }; |
| 930 | |
| 931 | // Returned string is a constant. For unknown signals "UNKNOWN" is returned. |
| 932 | const char* os::Posix::get_signal_name(int sig, char* out, size_t outlen) { |
| 933 | |
| 934 | const char* ret = NULL; |
| 935 | |
| 936 | #ifdef SIGRTMIN |
| 937 | if (sig >= SIGRTMIN && sig <= SIGRTMAX) { |
| 938 | if (sig == SIGRTMIN) { |
| 939 | ret = "SIGRTMIN" ; |
| 940 | } else if (sig == SIGRTMAX) { |
| 941 | ret = "SIGRTMAX" ; |
| 942 | } else { |
| 943 | jio_snprintf(out, outlen, "SIGRTMIN+%d" , sig - SIGRTMIN); |
| 944 | return out; |
| 945 | } |
| 946 | } |
| 947 | #endif |
| 948 | |
| 949 | if (sig > 0) { |
| 950 | for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) { |
| 951 | if (g_signal_info[idx].sig == sig) { |
| 952 | ret = g_signal_info[idx].name; |
| 953 | break; |
| 954 | } |
| 955 | } |
| 956 | } |
| 957 | |
| 958 | if (!ret) { |
| 959 | if (!is_valid_signal(sig)) { |
| 960 | ret = "INVALID" ; |
| 961 | } else { |
| 962 | ret = "UNKNOWN" ; |
| 963 | } |
| 964 | } |
| 965 | |
| 966 | if (out && outlen > 0) { |
| 967 | strncpy(out, ret, outlen); |
| 968 | out[outlen - 1] = '\0'; |
| 969 | } |
| 970 | return out; |
| 971 | } |
| 972 | |
| 973 | int os::Posix::get_signal_number(const char* signal_name) { |
| 974 | char tmp[30]; |
| 975 | const char* s = signal_name; |
| 976 | if (s[0] != 'S' || s[1] != 'I' || s[2] != 'G') { |
| 977 | jio_snprintf(tmp, sizeof(tmp), "SIG%s" , signal_name); |
| 978 | s = tmp; |
| 979 | } |
| 980 | for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) { |
| 981 | if (strcmp(g_signal_info[idx].name, s) == 0) { |
| 982 | return g_signal_info[idx].sig; |
| 983 | } |
| 984 | } |
| 985 | return -1; |
| 986 | } |
| 987 | |
| 988 | int os::get_signal_number(const char* signal_name) { |
| 989 | return os::Posix::get_signal_number(signal_name); |
| 990 | } |
| 991 | |
| 992 | // Returns true if signal number is valid. |
| 993 | bool os::Posix::is_valid_signal(int sig) { |
| 994 | // MacOS not really POSIX compliant: sigaddset does not return |
| 995 | // an error for invalid signal numbers. However, MacOS does not |
| 996 | // support real time signals and simply seems to have just 33 |
| 997 | // signals with no holes in the signal range. |
| 998 | #ifdef __APPLE__ |
| 999 | return sig >= 1 && sig < NSIG; |
| 1000 | #else |
| 1001 | // Use sigaddset to check for signal validity. |
| 1002 | sigset_t set; |
| 1003 | sigemptyset(&set); |
| 1004 | if (sigaddset(&set, sig) == -1 && errno == EINVAL) { |
| 1005 | return false; |
| 1006 | } |
| 1007 | return true; |
| 1008 | #endif |
| 1009 | } |
| 1010 | |
| 1011 | bool os::Posix::is_sig_ignored(int sig) { |
| 1012 | struct sigaction oact; |
| 1013 | sigaction(sig, (struct sigaction*)NULL, &oact); |
| 1014 | void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) |
| 1015 | : CAST_FROM_FN_PTR(void*, oact.sa_handler); |
| 1016 | if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) { |
| 1017 | return true; |
| 1018 | } else { |
| 1019 | return false; |
| 1020 | } |
| 1021 | } |
| 1022 | |
| 1023 | // Returns: |
| 1024 | // NULL for an invalid signal number |
| 1025 | // "SIG<num>" for a valid but unknown signal number |
| 1026 | // signal name otherwise. |
| 1027 | const char* os::exception_name(int sig, char* buf, size_t size) { |
| 1028 | if (!os::Posix::is_valid_signal(sig)) { |
| 1029 | return NULL; |
| 1030 | } |
| 1031 | const char* const name = os::Posix::get_signal_name(sig, buf, size); |
| 1032 | if (strcmp(name, "UNKNOWN" ) == 0) { |
| 1033 | jio_snprintf(buf, size, "SIG%d" , sig); |
| 1034 | } |
| 1035 | return buf; |
| 1036 | } |
| 1037 | |
| 1038 | #define NUM_IMPORTANT_SIGS 32 |
| 1039 | // Returns one-line short description of a signal set in a user provided buffer. |
| 1040 | const char* os::Posix::describe_signal_set_short(const sigset_t* set, char* buffer, size_t buf_size) { |
| 1041 | assert(buf_size == (NUM_IMPORTANT_SIGS + 1), "wrong buffer size" ); |
| 1042 | // Note: for shortness, just print out the first 32. That should |
| 1043 | // cover most of the useful ones, apart from realtime signals. |
| 1044 | for (int sig = 1; sig <= NUM_IMPORTANT_SIGS; sig++) { |
| 1045 | const int rc = sigismember(set, sig); |
| 1046 | if (rc == -1 && errno == EINVAL) { |
| 1047 | buffer[sig-1] = '?'; |
| 1048 | } else { |
| 1049 | buffer[sig-1] = rc == 0 ? '0' : '1'; |
| 1050 | } |
| 1051 | } |
| 1052 | buffer[NUM_IMPORTANT_SIGS] = 0; |
| 1053 | return buffer; |
| 1054 | } |
| 1055 | |
| 1056 | // Prints one-line description of a signal set. |
| 1057 | void os::Posix::print_signal_set_short(outputStream* st, const sigset_t* set) { |
| 1058 | char buf[NUM_IMPORTANT_SIGS + 1]; |
| 1059 | os::Posix::describe_signal_set_short(set, buf, sizeof(buf)); |
| 1060 | st->print("%s" , buf); |
| 1061 | } |
| 1062 | |
| 1063 | // Writes one-line description of a combination of sigaction.sa_flags into a user |
| 1064 | // provided buffer. Returns that buffer. |
| 1065 | const char* os::Posix::describe_sa_flags(int flags, char* buffer, size_t size) { |
| 1066 | char* p = buffer; |
| 1067 | size_t remaining = size; |
| 1068 | bool first = true; |
| 1069 | int idx = 0; |
| 1070 | |
| 1071 | assert(buffer, "invalid argument" ); |
| 1072 | |
| 1073 | if (size == 0) { |
| 1074 | return buffer; |
| 1075 | } |
| 1076 | |
| 1077 | strncpy(buffer, "none" , size); |
| 1078 | |
| 1079 | const struct { |
| 1080 | // NB: i is an unsigned int here because SA_RESETHAND is on some |
| 1081 | // systems 0x80000000, which is implicitly unsigned. Assignining |
| 1082 | // it to an int field would be an overflow in unsigned-to-signed |
| 1083 | // conversion. |
| 1084 | unsigned int i; |
| 1085 | const char* s; |
| 1086 | } flaginfo [] = { |
| 1087 | { SA_NOCLDSTOP, "SA_NOCLDSTOP" }, |
| 1088 | { SA_ONSTACK, "SA_ONSTACK" }, |
| 1089 | { SA_RESETHAND, "SA_RESETHAND" }, |
| 1090 | { SA_RESTART, "SA_RESTART" }, |
| 1091 | { SA_SIGINFO, "SA_SIGINFO" }, |
| 1092 | { SA_NOCLDWAIT, "SA_NOCLDWAIT" }, |
| 1093 | { SA_NODEFER, "SA_NODEFER" }, |
| 1094 | #ifdef AIX |
| 1095 | { SA_ONSTACK, "SA_ONSTACK" }, |
| 1096 | { SA_OLDSTYLE, "SA_OLDSTYLE" }, |
| 1097 | #endif |
| 1098 | { 0, NULL } |
| 1099 | }; |
| 1100 | |
| 1101 | for (idx = 0; flaginfo[idx].s && remaining > 1; idx++) { |
| 1102 | if (flags & flaginfo[idx].i) { |
| 1103 | if (first) { |
| 1104 | jio_snprintf(p, remaining, "%s" , flaginfo[idx].s); |
| 1105 | first = false; |
| 1106 | } else { |
| 1107 | jio_snprintf(p, remaining, "|%s" , flaginfo[idx].s); |
| 1108 | } |
| 1109 | const size_t len = strlen(p); |
| 1110 | p += len; |
| 1111 | remaining -= len; |
| 1112 | } |
| 1113 | } |
| 1114 | |
| 1115 | buffer[size - 1] = '\0'; |
| 1116 | |
| 1117 | return buffer; |
| 1118 | } |
| 1119 | |
| 1120 | // Prints one-line description of a combination of sigaction.sa_flags. |
| 1121 | void os::Posix::print_sa_flags(outputStream* st, int flags) { |
| 1122 | char buffer[0x100]; |
| 1123 | os::Posix::describe_sa_flags(flags, buffer, sizeof(buffer)); |
| 1124 | st->print("%s" , buffer); |
| 1125 | } |
| 1126 | |
| 1127 | // Helper function for os::Posix::print_siginfo_...(): |
| 1128 | // return a textual description for signal code. |
| 1129 | struct enum_sigcode_desc_t { |
| 1130 | const char* s_name; |
| 1131 | const char* s_desc; |
| 1132 | }; |
| 1133 | |
| 1134 | static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t* out) { |
| 1135 | |
| 1136 | const struct { |
| 1137 | int sig; int code; const char* s_code; const char* s_desc; |
| 1138 | } t1 [] = { |
| 1139 | { SIGILL, ILL_ILLOPC, "ILL_ILLOPC" , "Illegal opcode." }, |
| 1140 | { SIGILL, ILL_ILLOPN, "ILL_ILLOPN" , "Illegal operand." }, |
| 1141 | { SIGILL, ILL_ILLADR, "ILL_ILLADR" , "Illegal addressing mode." }, |
| 1142 | { SIGILL, ILL_ILLTRP, "ILL_ILLTRP" , "Illegal trap." }, |
| 1143 | { SIGILL, ILL_PRVOPC, "ILL_PRVOPC" , "Privileged opcode." }, |
| 1144 | { SIGILL, ILL_PRVREG, "ILL_PRVREG" , "Privileged register." }, |
| 1145 | { SIGILL, ILL_COPROC, "ILL_COPROC" , "Coprocessor error." }, |
| 1146 | { SIGILL, ILL_BADSTK, "ILL_BADSTK" , "Internal stack error." }, |
| 1147 | #if defined(IA64) && defined(LINUX) |
| 1148 | { SIGILL, ILL_BADIADDR, "ILL_BADIADDR" , "Unimplemented instruction address" }, |
| 1149 | { SIGILL, ILL_BREAK, "ILL_BREAK" , "Application Break instruction" }, |
| 1150 | #endif |
| 1151 | { SIGFPE, FPE_INTDIV, "FPE_INTDIV" , "Integer divide by zero." }, |
| 1152 | { SIGFPE, FPE_INTOVF, "FPE_INTOVF" , "Integer overflow." }, |
| 1153 | { SIGFPE, FPE_FLTDIV, "FPE_FLTDIV" , "Floating-point divide by zero." }, |
| 1154 | { SIGFPE, FPE_FLTOVF, "FPE_FLTOVF" , "Floating-point overflow." }, |
| 1155 | { SIGFPE, FPE_FLTUND, "FPE_FLTUND" , "Floating-point underflow." }, |
| 1156 | { SIGFPE, FPE_FLTRES, "FPE_FLTRES" , "Floating-point inexact result." }, |
| 1157 | { SIGFPE, FPE_FLTINV, "FPE_FLTINV" , "Invalid floating-point operation." }, |
| 1158 | { SIGFPE, FPE_FLTSUB, "FPE_FLTSUB" , "Subscript out of range." }, |
| 1159 | { SIGSEGV, SEGV_MAPERR, "SEGV_MAPERR" , "Address not mapped to object." }, |
| 1160 | { SIGSEGV, SEGV_ACCERR, "SEGV_ACCERR" , "Invalid permissions for mapped object." }, |
| 1161 | #ifdef AIX |
| 1162 | // no explanation found what keyerr would be |
| 1163 | { SIGSEGV, SEGV_KEYERR, "SEGV_KEYERR" , "key error" }, |
| 1164 | #endif |
| 1165 | #if defined(IA64) && !defined(AIX) |
| 1166 | { SIGSEGV, SEGV_PSTKOVF, "SEGV_PSTKOVF" , "Paragraph stack overflow" }, |
| 1167 | #endif |
| 1168 | #if defined(__sparc) && defined(SOLARIS) |
| 1169 | // define Solaris Sparc M7 ADI SEGV signals |
| 1170 | #if !defined(SEGV_ACCADI) |
| 1171 | #define SEGV_ACCADI 3 |
| 1172 | #endif |
| 1173 | { SIGSEGV, SEGV_ACCADI, "SEGV_ACCADI" , "ADI not enabled for mapped object." }, |
| 1174 | #if !defined(SEGV_ACCDERR) |
| 1175 | #define SEGV_ACCDERR 4 |
| 1176 | #endif |
| 1177 | { SIGSEGV, SEGV_ACCDERR, "SEGV_ACCDERR" , "ADI disrupting exception." }, |
| 1178 | #if !defined(SEGV_ACCPERR) |
| 1179 | #define SEGV_ACCPERR 5 |
| 1180 | #endif |
| 1181 | { SIGSEGV, SEGV_ACCPERR, "SEGV_ACCPERR" , "ADI precise exception." }, |
| 1182 | #endif // defined(__sparc) && defined(SOLARIS) |
| 1183 | { SIGBUS, BUS_ADRALN, "BUS_ADRALN" , "Invalid address alignment." }, |
| 1184 | { SIGBUS, BUS_ADRERR, "BUS_ADRERR" , "Nonexistent physical address." }, |
| 1185 | { SIGBUS, BUS_OBJERR, "BUS_OBJERR" , "Object-specific hardware error." }, |
| 1186 | { SIGTRAP, TRAP_BRKPT, "TRAP_BRKPT" , "Process breakpoint." }, |
| 1187 | { SIGTRAP, TRAP_TRACE, "TRAP_TRACE" , "Process trace trap." }, |
| 1188 | { SIGCHLD, CLD_EXITED, "CLD_EXITED" , "Child has exited." }, |
| 1189 | { SIGCHLD, CLD_KILLED, "CLD_KILLED" , "Child has terminated abnormally and did not create a core file." }, |
| 1190 | { SIGCHLD, CLD_DUMPED, "CLD_DUMPED" , "Child has terminated abnormally and created a core file." }, |
| 1191 | { SIGCHLD, CLD_TRAPPED, "CLD_TRAPPED" , "Traced child has trapped." }, |
| 1192 | { SIGCHLD, CLD_STOPPED, "CLD_STOPPED" , "Child has stopped." }, |
| 1193 | { SIGCHLD, CLD_CONTINUED,"CLD_CONTINUED" ,"Stopped child has continued." }, |
| 1194 | #ifdef SIGPOLL |
| 1195 | { SIGPOLL, POLL_OUT, "POLL_OUT" , "Output buffers available." }, |
| 1196 | { SIGPOLL, POLL_MSG, "POLL_MSG" , "Input message available." }, |
| 1197 | { SIGPOLL, POLL_ERR, "POLL_ERR" , "I/O error." }, |
| 1198 | { SIGPOLL, POLL_PRI, "POLL_PRI" , "High priority input available." }, |
| 1199 | { SIGPOLL, POLL_HUP, "POLL_HUP" , "Device disconnected. [Option End]" }, |
| 1200 | #endif |
| 1201 | { -1, -1, NULL, NULL } |
| 1202 | }; |
| 1203 | |
| 1204 | // Codes valid in any signal context. |
| 1205 | const struct { |
| 1206 | int code; const char* s_code; const char* s_desc; |
| 1207 | } t2 [] = { |
| 1208 | { SI_USER, "SI_USER" , "Signal sent by kill()." }, |
| 1209 | { SI_QUEUE, "SI_QUEUE" , "Signal sent by the sigqueue()." }, |
| 1210 | { SI_TIMER, "SI_TIMER" , "Signal generated by expiration of a timer set by timer_settime()." }, |
| 1211 | { SI_ASYNCIO, "SI_ASYNCIO" , "Signal generated by completion of an asynchronous I/O request." }, |
| 1212 | { SI_MESGQ, "SI_MESGQ" , "Signal generated by arrival of a message on an empty message queue." }, |
| 1213 | // Linux specific |
| 1214 | #ifdef SI_TKILL |
| 1215 | { SI_TKILL, "SI_TKILL" , "Signal sent by tkill (pthread_kill)" }, |
| 1216 | #endif |
| 1217 | #ifdef SI_DETHREAD |
| 1218 | { SI_DETHREAD, "SI_DETHREAD" , "Signal sent by execve() killing subsidiary threads" }, |
| 1219 | #endif |
| 1220 | #ifdef SI_KERNEL |
| 1221 | { SI_KERNEL, "SI_KERNEL" , "Signal sent by kernel." }, |
| 1222 | #endif |
| 1223 | #ifdef SI_SIGIO |
| 1224 | { SI_SIGIO, "SI_SIGIO" , "Signal sent by queued SIGIO" }, |
| 1225 | #endif |
| 1226 | |
| 1227 | #ifdef AIX |
| 1228 | { SI_UNDEFINED, "SI_UNDEFINED" ,"siginfo contains partial information" }, |
| 1229 | { SI_EMPTY, "SI_EMPTY" , "siginfo contains no useful information" }, |
| 1230 | #endif |
| 1231 | |
| 1232 | #ifdef __sun |
| 1233 | { SI_NOINFO, "SI_NOINFO" , "No signal information" }, |
| 1234 | { SI_RCTL, "SI_RCTL" , "kernel generated signal via rctl action" }, |
| 1235 | { SI_LWP, "SI_LWP" , "Signal sent via lwp_kill" }, |
| 1236 | #endif |
| 1237 | |
| 1238 | { -1, NULL, NULL } |
| 1239 | }; |
| 1240 | |
| 1241 | const char* s_code = NULL; |
| 1242 | const char* s_desc = NULL; |
| 1243 | |
| 1244 | for (int i = 0; t1[i].sig != -1; i ++) { |
| 1245 | if (t1[i].sig == si->si_signo && t1[i].code == si->si_code) { |
| 1246 | s_code = t1[i].s_code; |
| 1247 | s_desc = t1[i].s_desc; |
| 1248 | break; |
| 1249 | } |
| 1250 | } |
| 1251 | |
| 1252 | if (s_code == NULL) { |
| 1253 | for (int i = 0; t2[i].s_code != NULL; i ++) { |
| 1254 | if (t2[i].code == si->si_code) { |
| 1255 | s_code = t2[i].s_code; |
| 1256 | s_desc = t2[i].s_desc; |
| 1257 | } |
| 1258 | } |
| 1259 | } |
| 1260 | |
| 1261 | if (s_code == NULL) { |
| 1262 | out->s_name = "unknown" ; |
| 1263 | out->s_desc = "unknown" ; |
| 1264 | return false; |
| 1265 | } |
| 1266 | |
| 1267 | out->s_name = s_code; |
| 1268 | out->s_desc = s_desc; |
| 1269 | |
| 1270 | return true; |
| 1271 | } |
| 1272 | |
| 1273 | bool os::signal_sent_by_kill(const void* siginfo) { |
| 1274 | const siginfo_t* const si = (const siginfo_t*)siginfo; |
| 1275 | return si->si_code == SI_USER || si->si_code == SI_QUEUE |
| 1276 | #ifdef SI_TKILL |
| 1277 | || si->si_code == SI_TKILL |
| 1278 | #endif |
| 1279 | ; |
| 1280 | } |
| 1281 | |
| 1282 | void os::print_siginfo(outputStream* os, const void* si0) { |
| 1283 | |
| 1284 | const siginfo_t* const si = (const siginfo_t*) si0; |
| 1285 | |
| 1286 | char buf[20]; |
| 1287 | os->print("siginfo:" ); |
| 1288 | |
| 1289 | if (!si) { |
| 1290 | os->print(" <null>" ); |
| 1291 | return; |
| 1292 | } |
| 1293 | |
| 1294 | const int sig = si->si_signo; |
| 1295 | |
| 1296 | os->print(" si_signo: %d (%s)" , sig, os::Posix::get_signal_name(sig, buf, sizeof(buf))); |
| 1297 | |
| 1298 | enum_sigcode_desc_t ed; |
| 1299 | get_signal_code_description(si, &ed); |
| 1300 | os->print(", si_code: %d (%s)" , si->si_code, ed.s_name); |
| 1301 | |
| 1302 | if (si->si_errno) { |
| 1303 | os->print(", si_errno: %d" , si->si_errno); |
| 1304 | } |
| 1305 | |
| 1306 | // Output additional information depending on the signal code. |
| 1307 | |
| 1308 | // Note: Many implementations lump si_addr, si_pid, si_uid etc. together as unions, |
| 1309 | // so it depends on the context which member to use. For synchronous error signals, |
| 1310 | // we print si_addr, unless the signal was sent by another process or thread, in |
| 1311 | // which case we print out pid or tid of the sender. |
| 1312 | if (signal_sent_by_kill(si)) { |
| 1313 | const pid_t pid = si->si_pid; |
| 1314 | os->print(", si_pid: %ld" , (long) pid); |
| 1315 | if (IS_VALID_PID(pid)) { |
| 1316 | const pid_t me = getpid(); |
| 1317 | if (me == pid) { |
| 1318 | os->print(" (current process)" ); |
| 1319 | } |
| 1320 | } else { |
| 1321 | os->print(" (invalid)" ); |
| 1322 | } |
| 1323 | os->print(", si_uid: %ld" , (long) si->si_uid); |
| 1324 | if (sig == SIGCHLD) { |
| 1325 | os->print(", si_status: %d" , si->si_status); |
| 1326 | } |
| 1327 | } else if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || |
| 1328 | sig == SIGTRAP || sig == SIGFPE) { |
| 1329 | os->print(", si_addr: " PTR_FORMAT, p2i(si->si_addr)); |
| 1330 | #ifdef SIGPOLL |
| 1331 | } else if (sig == SIGPOLL) { |
| 1332 | os->print(", si_band: %ld" , si->si_band); |
| 1333 | #endif |
| 1334 | } |
| 1335 | |
| 1336 | } |
| 1337 | |
| 1338 | bool os::signal_thread(Thread* thread, int sig, const char* reason) { |
| 1339 | OSThread* osthread = thread->osthread(); |
| 1340 | if (osthread) { |
| 1341 | #if defined (SOLARIS) |
| 1342 | // Note: we cannot use pthread_kill on Solaris - not because |
| 1343 | // its missing, but because we do not have the pthread_t id. |
| 1344 | int status = thr_kill(osthread->thread_id(), sig); |
| 1345 | #else |
| 1346 | int status = pthread_kill(osthread->pthread_id(), sig); |
| 1347 | #endif |
| 1348 | if (status == 0) { |
| 1349 | Events::log(Thread::current(), "sent signal %d to Thread " INTPTR_FORMAT " because %s." , |
| 1350 | sig, p2i(thread), reason); |
| 1351 | return true; |
| 1352 | } |
| 1353 | } |
| 1354 | return false; |
| 1355 | } |
| 1356 | |
| 1357 | int os::Posix::unblock_thread_signal_mask(const sigset_t *set) { |
| 1358 | return pthread_sigmask(SIG_UNBLOCK, set, NULL); |
| 1359 | } |
| 1360 | |
| 1361 | address os::Posix::ucontext_get_pc(const ucontext_t* ctx) { |
| 1362 | #if defined(AIX) |
| 1363 | return Aix::ucontext_get_pc(ctx); |
| 1364 | #elif defined(BSD) |
| 1365 | return Bsd::ucontext_get_pc(ctx); |
| 1366 | #elif defined(LINUX) |
| 1367 | return Linux::ucontext_get_pc(ctx); |
| 1368 | #elif defined(SOLARIS) |
| 1369 | return Solaris::ucontext_get_pc(ctx); |
| 1370 | #else |
| 1371 | VMError::report_and_die("unimplemented ucontext_get_pc" ); |
| 1372 | #endif |
| 1373 | } |
| 1374 | |
| 1375 | void os::Posix::ucontext_set_pc(ucontext_t* ctx, address pc) { |
| 1376 | #if defined(AIX) |
| 1377 | Aix::ucontext_set_pc(ctx, pc); |
| 1378 | #elif defined(BSD) |
| 1379 | Bsd::ucontext_set_pc(ctx, pc); |
| 1380 | #elif defined(LINUX) |
| 1381 | Linux::ucontext_set_pc(ctx, pc); |
| 1382 | #elif defined(SOLARIS) |
| 1383 | Solaris::ucontext_set_pc(ctx, pc); |
| 1384 | #else |
| 1385 | VMError::report_and_die("unimplemented ucontext_get_pc" ); |
| 1386 | #endif |
| 1387 | } |
| 1388 | |
| 1389 | char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr) { |
| 1390 | size_t stack_size = 0; |
| 1391 | size_t guard_size = 0; |
| 1392 | int detachstate = 0; |
| 1393 | pthread_attr_getstacksize(attr, &stack_size); |
| 1394 | pthread_attr_getguardsize(attr, &guard_size); |
| 1395 | // Work around linux NPTL implementation error, see also os::create_thread() in os_linux.cpp. |
| 1396 | LINUX_ONLY(stack_size -= guard_size); |
| 1397 | pthread_attr_getdetachstate(attr, &detachstate); |
| 1398 | jio_snprintf(buf, buflen, "stacksize: " SIZE_FORMAT "k, guardsize: " SIZE_FORMAT "k, %s" , |
| 1399 | stack_size / 1024, guard_size / 1024, |
| 1400 | (detachstate == PTHREAD_CREATE_DETACHED ? "detached" : "joinable" )); |
| 1401 | return buf; |
| 1402 | } |
| 1403 | |
| 1404 | char* os::Posix::realpath(const char* filename, char* outbuf, size_t outbuflen) { |
| 1405 | |
| 1406 | if (filename == NULL || outbuf == NULL || outbuflen < 1) { |
| 1407 | assert(false, "os::Posix::realpath: invalid arguments." ); |
| 1408 | errno = EINVAL; |
| 1409 | return NULL; |
| 1410 | } |
| 1411 | |
| 1412 | char* result = NULL; |
| 1413 | |
| 1414 | // This assumes platform realpath() is implemented according to POSIX.1-2008. |
| 1415 | // POSIX.1-2008 allows to specify NULL for the output buffer, in which case |
| 1416 | // output buffer is dynamically allocated and must be ::free()'d by the caller. |
| 1417 | char* p = ::realpath(filename, NULL); |
| 1418 | if (p != NULL) { |
| 1419 | if (strlen(p) < outbuflen) { |
| 1420 | strcpy(outbuf, p); |
| 1421 | result = outbuf; |
| 1422 | } else { |
| 1423 | errno = ENAMETOOLONG; |
| 1424 | } |
| 1425 | ::free(p); // *not* os::free |
| 1426 | } else { |
| 1427 | // Fallback for platforms struggling with modern Posix standards (AIX 5.3, 6.1). If realpath |
| 1428 | // returns EINVAL, this may indicate that realpath is not POSIX.1-2008 compatible and |
| 1429 | // that it complains about the NULL we handed down as user buffer. |
| 1430 | // In this case, use the user provided buffer but at least check whether realpath caused |
| 1431 | // a memory overwrite. |
| 1432 | if (errno == EINVAL) { |
| 1433 | outbuf[outbuflen - 1] = '\0'; |
| 1434 | p = ::realpath(filename, outbuf); |
| 1435 | if (p != NULL) { |
| 1436 | guarantee(outbuf[outbuflen - 1] == '\0', "realpath buffer overwrite detected." ); |
| 1437 | result = p; |
| 1438 | } |
| 1439 | } |
| 1440 | } |
| 1441 | return result; |
| 1442 | |
| 1443 | } |
| 1444 | |
| 1445 | int os::stat(const char *path, struct stat *sbuf) { |
| 1446 | return ::stat(path, sbuf); |
| 1447 | } |
| 1448 | |
| 1449 | char * os::native_path(char *path) { |
| 1450 | return path; |
| 1451 | } |
| 1452 | |
| 1453 | // Check minimum allowable stack sizes for thread creation and to initialize |
| 1454 | // the java system classes, including StackOverflowError - depends on page |
| 1455 | // size. |
| 1456 | // The space needed for frames during startup is platform dependent. It |
| 1457 | // depends on word size, platform calling conventions, C frame layout and |
| 1458 | // interpreter/C1/C2 design decisions. Therefore this is given in a |
| 1459 | // platform (os/cpu) dependent constant. |
| 1460 | // To this, space for guard mechanisms is added, which depends on the |
| 1461 | // page size which again depends on the concrete system the VM is running |
| 1462 | // on. Space for libc guard pages is not included in this size. |
| 1463 | jint os::Posix::set_minimum_stack_sizes() { |
| 1464 | size_t os_min_stack_allowed = SOLARIS_ONLY(thr_min_stack()) NOT_SOLARIS(PTHREAD_STACK_MIN); |
| 1465 | |
| 1466 | _java_thread_min_stack_allowed = _java_thread_min_stack_allowed + |
| 1467 | JavaThread::stack_guard_zone_size() + |
| 1468 | JavaThread::stack_shadow_zone_size(); |
| 1469 | |
| 1470 | _java_thread_min_stack_allowed = align_up(_java_thread_min_stack_allowed, vm_page_size()); |
| 1471 | _java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, os_min_stack_allowed); |
| 1472 | |
| 1473 | size_t stack_size_in_bytes = ThreadStackSize * K; |
| 1474 | if (stack_size_in_bytes != 0 && |
| 1475 | stack_size_in_bytes < _java_thread_min_stack_allowed) { |
| 1476 | // The '-Xss' and '-XX:ThreadStackSize=N' options both set |
| 1477 | // ThreadStackSize so we go with "Java thread stack size" instead |
| 1478 | // of "ThreadStackSize" to be more friendly. |
| 1479 | tty->print_cr("\nThe Java thread stack size specified is too small. " |
| 1480 | "Specify at least " SIZE_FORMAT "k" , |
| 1481 | _java_thread_min_stack_allowed / K); |
| 1482 | return JNI_ERR; |
| 1483 | } |
| 1484 | |
| 1485 | // Make the stack size a multiple of the page size so that |
| 1486 | // the yellow/red zones can be guarded. |
| 1487 | JavaThread::set_stack_size_at_create(align_up(stack_size_in_bytes, vm_page_size())); |
| 1488 | |
| 1489 | // Reminder: a compiler thread is a Java thread. |
| 1490 | _compiler_thread_min_stack_allowed = _compiler_thread_min_stack_allowed + |
| 1491 | JavaThread::stack_guard_zone_size() + |
| 1492 | JavaThread::stack_shadow_zone_size(); |
| 1493 | |
| 1494 | _compiler_thread_min_stack_allowed = align_up(_compiler_thread_min_stack_allowed, vm_page_size()); |
| 1495 | _compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, os_min_stack_allowed); |
| 1496 | |
| 1497 | stack_size_in_bytes = CompilerThreadStackSize * K; |
| 1498 | if (stack_size_in_bytes != 0 && |
| 1499 | stack_size_in_bytes < _compiler_thread_min_stack_allowed) { |
| 1500 | tty->print_cr("\nThe CompilerThreadStackSize specified is too small. " |
| 1501 | "Specify at least " SIZE_FORMAT "k" , |
| 1502 | _compiler_thread_min_stack_allowed / K); |
| 1503 | return JNI_ERR; |
| 1504 | } |
| 1505 | |
| 1506 | _vm_internal_thread_min_stack_allowed = align_up(_vm_internal_thread_min_stack_allowed, vm_page_size()); |
| 1507 | _vm_internal_thread_min_stack_allowed = MAX2(_vm_internal_thread_min_stack_allowed, os_min_stack_allowed); |
| 1508 | |
| 1509 | stack_size_in_bytes = VMThreadStackSize * K; |
| 1510 | if (stack_size_in_bytes != 0 && |
| 1511 | stack_size_in_bytes < _vm_internal_thread_min_stack_allowed) { |
| 1512 | tty->print_cr("\nThe VMThreadStackSize specified is too small. " |
| 1513 | "Specify at least " SIZE_FORMAT "k" , |
| 1514 | _vm_internal_thread_min_stack_allowed / K); |
| 1515 | return JNI_ERR; |
| 1516 | } |
| 1517 | return JNI_OK; |
| 1518 | } |
| 1519 | |
| 1520 | // Called when creating the thread. The minimum stack sizes have already been calculated |
| 1521 | size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) { |
| 1522 | size_t stack_size; |
| 1523 | if (req_stack_size == 0) { |
| 1524 | stack_size = default_stack_size(thr_type); |
| 1525 | } else { |
| 1526 | stack_size = req_stack_size; |
| 1527 | } |
| 1528 | |
| 1529 | switch (thr_type) { |
| 1530 | case os::java_thread: |
| 1531 | // Java threads use ThreadStackSize which default value can be |
| 1532 | // changed with the flag -Xss |
| 1533 | if (req_stack_size == 0 && JavaThread::stack_size_at_create() > 0) { |
| 1534 | // no requested size and we have a more specific default value |
| 1535 | stack_size = JavaThread::stack_size_at_create(); |
| 1536 | } |
| 1537 | stack_size = MAX2(stack_size, |
| 1538 | _java_thread_min_stack_allowed); |
| 1539 | break; |
| 1540 | case os::compiler_thread: |
| 1541 | if (req_stack_size == 0 && CompilerThreadStackSize > 0) { |
| 1542 | // no requested size and we have a more specific default value |
| 1543 | stack_size = (size_t)(CompilerThreadStackSize * K); |
| 1544 | } |
| 1545 | stack_size = MAX2(stack_size, |
| 1546 | _compiler_thread_min_stack_allowed); |
| 1547 | break; |
| 1548 | case os::vm_thread: |
| 1549 | case os::pgc_thread: |
| 1550 | case os::cgc_thread: |
| 1551 | case os::watcher_thread: |
| 1552 | default: // presume the unknown thr_type is a VM internal |
| 1553 | if (req_stack_size == 0 && VMThreadStackSize > 0) { |
| 1554 | // no requested size and we have a more specific default value |
| 1555 | stack_size = (size_t)(VMThreadStackSize * K); |
| 1556 | } |
| 1557 | |
| 1558 | stack_size = MAX2(stack_size, |
| 1559 | _vm_internal_thread_min_stack_allowed); |
| 1560 | break; |
| 1561 | } |
| 1562 | |
| 1563 | // pthread_attr_setstacksize() may require that the size be rounded up to the OS page size. |
| 1564 | // Be careful not to round up to 0. Align down in that case. |
| 1565 | if (stack_size <= SIZE_MAX - vm_page_size()) { |
| 1566 | stack_size = align_up(stack_size, vm_page_size()); |
| 1567 | } else { |
| 1568 | stack_size = align_down(stack_size, vm_page_size()); |
| 1569 | } |
| 1570 | |
| 1571 | return stack_size; |
| 1572 | } |
| 1573 | |
| 1574 | bool os::Posix::is_root(uid_t uid){ |
| 1575 | return ROOT_UID == uid; |
| 1576 | } |
| 1577 | |
| 1578 | bool os::Posix::matches_effective_uid_or_root(uid_t uid) { |
| 1579 | return is_root(uid) || geteuid() == uid; |
| 1580 | } |
| 1581 | |
| 1582 | bool os::Posix::matches_effective_uid_and_gid_or_root(uid_t uid, gid_t gid) { |
| 1583 | return is_root(uid) || (geteuid() == uid && getegid() == gid); |
| 1584 | } |
| 1585 | |
| 1586 | Thread* os::ThreadCrashProtection::_protected_thread = NULL; |
| 1587 | os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL; |
| 1588 | volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0; |
| 1589 | |
| 1590 | os::ThreadCrashProtection::ThreadCrashProtection() { |
| 1591 | } |
| 1592 | |
| 1593 | /* |
| 1594 | * See the caveats for this class in os_posix.hpp |
| 1595 | * Protects the callback call so that SIGSEGV / SIGBUS jumps back into this |
| 1596 | * method and returns false. If none of the signals are raised, returns true. |
| 1597 | * The callback is supposed to provide the method that should be protected. |
| 1598 | */ |
| 1599 | bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) { |
| 1600 | sigset_t saved_sig_mask; |
| 1601 | |
| 1602 | Thread::muxAcquire(&_crash_mux, "CrashProtection" ); |
| 1603 | |
| 1604 | _protected_thread = Thread::current_or_null(); |
| 1605 | assert(_protected_thread != NULL, "Cannot crash protect a NULL thread" ); |
| 1606 | |
| 1607 | // we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask |
| 1608 | // since on at least some systems (OS X) siglongjmp will restore the mask |
| 1609 | // for the process, not the thread |
| 1610 | pthread_sigmask(0, NULL, &saved_sig_mask); |
| 1611 | if (sigsetjmp(_jmpbuf, 0) == 0) { |
| 1612 | // make sure we can see in the signal handler that we have crash protection |
| 1613 | // installed |
| 1614 | _crash_protection = this; |
| 1615 | cb.call(); |
| 1616 | // and clear the crash protection |
| 1617 | _crash_protection = NULL; |
| 1618 | _protected_thread = NULL; |
| 1619 | Thread::muxRelease(&_crash_mux); |
| 1620 | return true; |
| 1621 | } |
| 1622 | // this happens when we siglongjmp() back |
| 1623 | pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL); |
| 1624 | _crash_protection = NULL; |
| 1625 | _protected_thread = NULL; |
| 1626 | Thread::muxRelease(&_crash_mux); |
| 1627 | return false; |
| 1628 | } |
| 1629 | |
| 1630 | void os::ThreadCrashProtection::restore() { |
| 1631 | assert(_crash_protection != NULL, "must have crash protection" ); |
| 1632 | siglongjmp(_jmpbuf, 1); |
| 1633 | } |
| 1634 | |
| 1635 | void os::ThreadCrashProtection::check_crash_protection(int sig, |
| 1636 | Thread* thread) { |
| 1637 | |
| 1638 | if (thread != NULL && |
| 1639 | thread == _protected_thread && |
| 1640 | _crash_protection != NULL) { |
| 1641 | |
| 1642 | if (sig == SIGSEGV || sig == SIGBUS) { |
| 1643 | _crash_protection->restore(); |
| 1644 | } |
| 1645 | } |
| 1646 | } |
| 1647 | |
| 1648 | // Shared clock/time and other supporting routines for pthread_mutex/cond |
| 1649 | // initialization. This is enabled on Solaris but only some of the clock/time |
| 1650 | // functionality is actually used there. |
| 1651 | |
| 1652 | // Shared condattr object for use with relative timed-waits. Will be associated |
| 1653 | // with CLOCK_MONOTONIC if available to avoid issues with time-of-day changes, |
| 1654 | // but otherwise whatever default is used by the platform - generally the |
| 1655 | // time-of-day clock. |
| 1656 | static pthread_condattr_t _condAttr[1]; |
| 1657 | |
| 1658 | // Shared mutexattr to explicitly set the type to PTHREAD_MUTEX_NORMAL as not |
| 1659 | // all systems (e.g. FreeBSD) map the default to "normal". |
| 1660 | static pthread_mutexattr_t _mutexAttr[1]; |
| 1661 | |
| 1662 | // common basic initialization that is always supported |
| 1663 | static void pthread_init_common(void) { |
| 1664 | int status; |
| 1665 | if ((status = pthread_condattr_init(_condAttr)) != 0) { |
| 1666 | fatal("pthread_condattr_init: %s" , os::strerror(status)); |
| 1667 | } |
| 1668 | if ((status = pthread_mutexattr_init(_mutexAttr)) != 0) { |
| 1669 | fatal("pthread_mutexattr_init: %s" , os::strerror(status)); |
| 1670 | } |
| 1671 | if ((status = pthread_mutexattr_settype(_mutexAttr, PTHREAD_MUTEX_NORMAL)) != 0) { |
| 1672 | fatal("pthread_mutexattr_settype: %s" , os::strerror(status)); |
| 1673 | } |
| 1674 | // Solaris has it's own PlatformMonitor, distinct from the one for POSIX. |
| 1675 | NOT_SOLARIS(os::PlatformMonitor::init();) |
| 1676 | } |
| 1677 | |
| 1678 | #ifndef SOLARIS |
| 1679 | sigset_t sigs; |
| 1680 | struct sigaction sigact[NSIG]; |
| 1681 | |
| 1682 | struct sigaction* os::Posix::get_preinstalled_handler(int sig) { |
| 1683 | if (sigismember(&sigs, sig)) { |
| 1684 | return &sigact[sig]; |
| 1685 | } |
| 1686 | return NULL; |
| 1687 | } |
| 1688 | |
| 1689 | void os::Posix::save_preinstalled_handler(int sig, struct sigaction& oldAct) { |
| 1690 | assert(sig > 0 && sig < NSIG, "vm signal out of expected range" ); |
| 1691 | sigact[sig] = oldAct; |
| 1692 | sigaddset(&sigs, sig); |
| 1693 | } |
| 1694 | #endif |
| 1695 | |
| 1696 | // Not all POSIX types and API's are available on all notionally "posix" |
| 1697 | // platforms. If we have build-time support then we will check for actual |
| 1698 | // runtime support via dlopen/dlsym lookup. This allows for running on an |
| 1699 | // older OS version compared to the build platform. But if there is no |
| 1700 | // build time support then there cannot be any runtime support as we do not |
| 1701 | // know what the runtime types would be (for example clockid_t might be an |
| 1702 | // int or int64_t). |
| 1703 | // |
| 1704 | #ifdef SUPPORTS_CLOCK_MONOTONIC |
| 1705 | |
| 1706 | // This means we have clockid_t, clock_gettime et al and CLOCK_MONOTONIC |
| 1707 | |
| 1708 | int (*os::Posix::_clock_gettime)(clockid_t, struct timespec *) = NULL; |
| 1709 | int (*os::Posix::_clock_getres)(clockid_t, struct timespec *) = NULL; |
| 1710 | |
| 1711 | static int (*_pthread_condattr_setclock)(pthread_condattr_t *, clockid_t) = NULL; |
| 1712 | |
| 1713 | static bool _use_clock_monotonic_condattr = false; |
| 1714 | |
| 1715 | // Determine what POSIX API's are present and do appropriate |
| 1716 | // configuration. |
| 1717 | void os::Posix::init(void) { |
| 1718 | |
| 1719 | // NOTE: no logging available when this is called. Put logging |
| 1720 | // statements in init_2(). |
| 1721 | |
| 1722 | // 1. Check for CLOCK_MONOTONIC support. |
| 1723 | |
| 1724 | void* handle = NULL; |
| 1725 | |
| 1726 | // For linux we need librt, for other OS we can find |
| 1727 | // this function in regular libc. |
| 1728 | #ifdef NEEDS_LIBRT |
| 1729 | // We do dlopen's in this particular order due to bug in linux |
| 1730 | // dynamic loader (see 6348968) leading to crash on exit. |
| 1731 | handle = dlopen("librt.so.1" , RTLD_LAZY); |
| 1732 | if (handle == NULL) { |
| 1733 | handle = dlopen("librt.so" , RTLD_LAZY); |
| 1734 | } |
| 1735 | #endif |
| 1736 | |
| 1737 | if (handle == NULL) { |
| 1738 | handle = RTLD_DEFAULT; |
| 1739 | } |
| 1740 | |
| 1741 | int (*clock_getres_func)(clockid_t, struct timespec*) = |
| 1742 | (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres" ); |
| 1743 | int (*clock_gettime_func)(clockid_t, struct timespec*) = |
| 1744 | (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime" ); |
| 1745 | if (clock_getres_func != NULL && clock_gettime_func != NULL) { |
| 1746 | // We assume that if both clock_gettime and clock_getres support |
| 1747 | // CLOCK_MONOTONIC then the OS provides true high-res monotonic clock. |
| 1748 | struct timespec res; |
| 1749 | struct timespec tp; |
| 1750 | if (clock_getres_func(CLOCK_MONOTONIC, &res) == 0 && |
| 1751 | clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) { |
| 1752 | // Yes, monotonic clock is supported. |
| 1753 | _clock_gettime = clock_gettime_func; |
| 1754 | _clock_getres = clock_getres_func; |
| 1755 | } else { |
| 1756 | #ifdef NEEDS_LIBRT |
| 1757 | // Close librt if there is no monotonic clock. |
| 1758 | if (handle != RTLD_DEFAULT) { |
| 1759 | dlclose(handle); |
| 1760 | } |
| 1761 | #endif |
| 1762 | } |
| 1763 | } |
| 1764 | |
| 1765 | // 2. Check for pthread_condattr_setclock support. |
| 1766 | |
| 1767 | // libpthread is already loaded. |
| 1768 | int (*condattr_setclock_func)(pthread_condattr_t*, clockid_t) = |
| 1769 | (int (*)(pthread_condattr_t*, clockid_t))dlsym(RTLD_DEFAULT, |
| 1770 | "pthread_condattr_setclock" ); |
| 1771 | if (condattr_setclock_func != NULL) { |
| 1772 | _pthread_condattr_setclock = condattr_setclock_func; |
| 1773 | } |
| 1774 | |
| 1775 | // Now do general initialization. |
| 1776 | |
| 1777 | pthread_init_common(); |
| 1778 | |
| 1779 | #ifndef SOLARIS |
| 1780 | int status; |
| 1781 | if (_pthread_condattr_setclock != NULL && _clock_gettime != NULL) { |
| 1782 | if ((status = _pthread_condattr_setclock(_condAttr, CLOCK_MONOTONIC)) != 0) { |
| 1783 | if (status == EINVAL) { |
| 1784 | _use_clock_monotonic_condattr = false; |
| 1785 | warning("Unable to use monotonic clock with relative timed-waits" \ |
| 1786 | " - changes to the time-of-day clock may have adverse affects" ); |
| 1787 | } else { |
| 1788 | fatal("pthread_condattr_setclock: %s" , os::strerror(status)); |
| 1789 | } |
| 1790 | } else { |
| 1791 | _use_clock_monotonic_condattr = true; |
| 1792 | } |
| 1793 | } |
| 1794 | #endif // !SOLARIS |
| 1795 | |
| 1796 | } |
| 1797 | |
| 1798 | void os::Posix::init_2(void) { |
| 1799 | #ifndef SOLARIS |
| 1800 | log_info(os)("Use of CLOCK_MONOTONIC is%s supported" , |
| 1801 | (_clock_gettime != NULL ? "" : " not" )); |
| 1802 | log_info(os)("Use of pthread_condattr_setclock is%s supported" , |
| 1803 | (_pthread_condattr_setclock != NULL ? "" : " not" )); |
| 1804 | log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with %s" , |
| 1805 | _use_clock_monotonic_condattr ? "CLOCK_MONOTONIC" : "the default clock" ); |
| 1806 | sigemptyset(&sigs); |
| 1807 | #endif // !SOLARIS |
| 1808 | } |
| 1809 | |
| 1810 | #else // !SUPPORTS_CLOCK_MONOTONIC |
| 1811 | |
| 1812 | void os::Posix::init(void) { |
| 1813 | pthread_init_common(); |
| 1814 | } |
| 1815 | |
| 1816 | void os::Posix::init_2(void) { |
| 1817 | #ifndef SOLARIS |
| 1818 | log_info(os)("Use of CLOCK_MONOTONIC is not supported" ); |
| 1819 | log_info(os)("Use of pthread_condattr_setclock is not supported" ); |
| 1820 | log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with the default clock" ); |
| 1821 | sigemptyset(&sigs); |
| 1822 | #endif // !SOLARIS |
| 1823 | } |
| 1824 | |
| 1825 | #endif // SUPPORTS_CLOCK_MONOTONIC |
| 1826 | |
| 1827 | // Utility to convert the given timeout to an absolute timespec |
| 1828 | // (based on the appropriate clock) to use with pthread_cond_timewait, |
| 1829 | // and sem_timedwait(). |
| 1830 | // The clock queried here must be the clock used to manage the |
| 1831 | // timeout of the condition variable or semaphore. |
| 1832 | // |
| 1833 | // The passed in timeout value is either a relative time in nanoseconds |
| 1834 | // or an absolute time in milliseconds. A relative timeout will be |
| 1835 | // associated with CLOCK_MONOTONIC if available, unless the real-time clock |
| 1836 | // is explicitly requested; otherwise, or if absolute, |
| 1837 | // the default time-of-day clock will be used. |
| 1838 | |
| 1839 | // Given time is a 64-bit value and the time_t used in the timespec is |
| 1840 | // sometimes a signed-32-bit value we have to watch for overflow if times |
| 1841 | // way in the future are given. Further on Solaris versions |
| 1842 | // prior to 10 there is a restriction (see cond_timedwait) that the specified |
| 1843 | // number of seconds, in abstime, is less than current_time + 100000000. |
| 1844 | // As it will be over 20 years before "now + 100000000" will overflow we can |
| 1845 | // ignore overflow and just impose a hard-limit on seconds using the value |
| 1846 | // of "now + 100000000". This places a limit on the timeout of about 3.17 |
| 1847 | // years from "now". |
| 1848 | // |
| 1849 | #define MAX_SECS 100000000 |
| 1850 | |
| 1851 | // Calculate a new absolute time that is "timeout" nanoseconds from "now". |
| 1852 | // "unit" indicates the unit of "now_part_sec" (may be nanos or micros depending |
| 1853 | // on which clock API is being used). |
| 1854 | static void calc_rel_time(timespec* abstime, jlong timeout, jlong now_sec, |
| 1855 | jlong now_part_sec, jlong unit) { |
| 1856 | time_t max_secs = now_sec + MAX_SECS; |
| 1857 | |
| 1858 | jlong seconds = timeout / NANOUNITS; |
| 1859 | timeout %= NANOUNITS; // remaining nanos |
| 1860 | |
| 1861 | if (seconds >= MAX_SECS) { |
| 1862 | // More seconds than we can add, so pin to max_secs. |
| 1863 | abstime->tv_sec = max_secs; |
| 1864 | abstime->tv_nsec = 0; |
| 1865 | } else { |
| 1866 | abstime->tv_sec = now_sec + seconds; |
| 1867 | long nanos = (now_part_sec * (NANOUNITS / unit)) + timeout; |
| 1868 | if (nanos >= NANOUNITS) { // overflow |
| 1869 | abstime->tv_sec += 1; |
| 1870 | nanos -= NANOUNITS; |
| 1871 | } |
| 1872 | abstime->tv_nsec = nanos; |
| 1873 | } |
| 1874 | } |
| 1875 | |
| 1876 | // Unpack the given deadline in milliseconds since the epoch, into the given timespec. |
| 1877 | // The current time in seconds is also passed in to enforce an upper bound as discussed above. |
| 1878 | // This is only used with gettimeofday, when clock_gettime is not available. |
| 1879 | static void unpack_abs_time(timespec* abstime, jlong deadline, jlong now_sec) { |
| 1880 | time_t max_secs = now_sec + MAX_SECS; |
| 1881 | |
| 1882 | jlong seconds = deadline / MILLIUNITS; |
| 1883 | jlong millis = deadline % MILLIUNITS; |
| 1884 | |
| 1885 | if (seconds >= max_secs) { |
| 1886 | // Absolute seconds exceeds allowed max, so pin to max_secs. |
| 1887 | abstime->tv_sec = max_secs; |
| 1888 | abstime->tv_nsec = 0; |
| 1889 | } else { |
| 1890 | abstime->tv_sec = seconds; |
| 1891 | abstime->tv_nsec = millis * (NANOUNITS / MILLIUNITS); |
| 1892 | } |
| 1893 | } |
| 1894 | |
| 1895 | static jlong millis_to_nanos(jlong millis) { |
| 1896 | // We have to watch for overflow when converting millis to nanos, |
| 1897 | // but if millis is that large then we will end up limiting to |
| 1898 | // MAX_SECS anyway, so just do that here. |
| 1899 | if (millis / MILLIUNITS > MAX_SECS) { |
| 1900 | millis = jlong(MAX_SECS) * MILLIUNITS; |
| 1901 | } |
| 1902 | return millis * (NANOUNITS / MILLIUNITS); |
| 1903 | } |
| 1904 | |
| 1905 | static void to_abstime(timespec* abstime, jlong timeout, |
| 1906 | bool isAbsolute, bool isRealtime) { |
| 1907 | DEBUG_ONLY(int max_secs = MAX_SECS;) |
| 1908 | |
| 1909 | if (timeout < 0) { |
| 1910 | timeout = 0; |
| 1911 | } |
| 1912 | |
| 1913 | #ifdef SUPPORTS_CLOCK_MONOTONIC |
| 1914 | |
| 1915 | clockid_t clock = CLOCK_MONOTONIC; |
| 1916 | // need to ensure we have a runtime check for clock_gettime support |
| 1917 | if (!isAbsolute && os::Posix::supports_monotonic_clock()) { |
| 1918 | if (!_use_clock_monotonic_condattr || isRealtime) { |
| 1919 | clock = CLOCK_REALTIME; |
| 1920 | } |
| 1921 | struct timespec now; |
| 1922 | int status = os::Posix::clock_gettime(clock, &now); |
| 1923 | assert_status(status == 0, status, "clock_gettime" ); |
| 1924 | calc_rel_time(abstime, timeout, now.tv_sec, now.tv_nsec, NANOUNITS); |
| 1925 | DEBUG_ONLY(max_secs += now.tv_sec;) |
| 1926 | } else { |
| 1927 | |
| 1928 | #else |
| 1929 | |
| 1930 | { // Match the block scope. |
| 1931 | |
| 1932 | #endif // SUPPORTS_CLOCK_MONOTONIC |
| 1933 | |
| 1934 | // Time-of-day clock is all we can reliably use. |
| 1935 | struct timeval now; |
| 1936 | int status = gettimeofday(&now, NULL); |
| 1937 | assert_status(status == 0, errno, "gettimeofday" ); |
| 1938 | if (isAbsolute) { |
| 1939 | unpack_abs_time(abstime, timeout, now.tv_sec); |
| 1940 | } else { |
| 1941 | calc_rel_time(abstime, timeout, now.tv_sec, now.tv_usec, MICROUNITS); |
| 1942 | } |
| 1943 | DEBUG_ONLY(max_secs += now.tv_sec;) |
| 1944 | } |
| 1945 | |
| 1946 | assert(abstime->tv_sec >= 0, "tv_sec < 0" ); |
| 1947 | assert(abstime->tv_sec <= max_secs, "tv_sec > max_secs" ); |
| 1948 | assert(abstime->tv_nsec >= 0, "tv_nsec < 0" ); |
| 1949 | assert(abstime->tv_nsec < NANOUNITS, "tv_nsec >= NANOUNITS" ); |
| 1950 | } |
| 1951 | |
| 1952 | // Create an absolute time 'millis' milliseconds in the future, using the |
| 1953 | // real-time (time-of-day) clock. Used by PosixSemaphore. |
| 1954 | void os::Posix::to_RTC_abstime(timespec* abstime, int64_t millis) { |
| 1955 | to_abstime(abstime, millis_to_nanos(millis), |
| 1956 | false /* not absolute */, |
| 1957 | true /* use real-time clock */); |
| 1958 | } |
| 1959 | |
| 1960 | // Shared pthread_mutex/cond based PlatformEvent implementation. |
| 1961 | // Not currently usable by Solaris. |
| 1962 | |
| 1963 | #ifndef SOLARIS |
| 1964 | |
| 1965 | // PlatformEvent |
| 1966 | // |
| 1967 | // Assumption: |
| 1968 | // Only one parker can exist on an event, which is why we allocate |
| 1969 | // them per-thread. Multiple unparkers can coexist. |
| 1970 | // |
| 1971 | // _event serves as a restricted-range semaphore. |
| 1972 | // -1 : thread is blocked, i.e. there is a waiter |
| 1973 | // 0 : neutral: thread is running or ready, |
| 1974 | // could have been signaled after a wait started |
| 1975 | // 1 : signaled - thread is running or ready |
| 1976 | // |
| 1977 | // Having three states allows for some detection of bad usage - see |
| 1978 | // comments on unpark(). |
| 1979 | |
| 1980 | os::PlatformEvent::PlatformEvent() { |
| 1981 | int status = pthread_cond_init(_cond, _condAttr); |
| 1982 | assert_status(status == 0, status, "cond_init" ); |
| 1983 | status = pthread_mutex_init(_mutex, _mutexAttr); |
| 1984 | assert_status(status == 0, status, "mutex_init" ); |
| 1985 | _event = 0; |
| 1986 | _nParked = 0; |
| 1987 | } |
| 1988 | |
| 1989 | void os::PlatformEvent::park() { // AKA "down()" |
| 1990 | // Transitions for _event: |
| 1991 | // -1 => -1 : illegal |
| 1992 | // 1 => 0 : pass - return immediately |
| 1993 | // 0 => -1 : block; then set _event to 0 before returning |
| 1994 | |
| 1995 | // Invariant: Only the thread associated with the PlatformEvent |
| 1996 | // may call park(). |
| 1997 | assert(_nParked == 0, "invariant" ); |
| 1998 | |
| 1999 | int v; |
| 2000 | |
| 2001 | // atomically decrement _event |
| 2002 | for (;;) { |
| 2003 | v = _event; |
| 2004 | if (Atomic::cmpxchg(v - 1, &_event, v) == v) break; |
| 2005 | } |
| 2006 | guarantee(v >= 0, "invariant" ); |
| 2007 | |
| 2008 | if (v == 0) { // Do this the hard way by blocking ... |
| 2009 | int status = pthread_mutex_lock(_mutex); |
| 2010 | assert_status(status == 0, status, "mutex_lock" ); |
| 2011 | guarantee(_nParked == 0, "invariant" ); |
| 2012 | ++_nParked; |
| 2013 | while (_event < 0) { |
| 2014 | // OS-level "spurious wakeups" are ignored |
| 2015 | status = pthread_cond_wait(_cond, _mutex); |
| 2016 | assert_status(status == 0, status, "cond_wait" ); |
| 2017 | } |
| 2018 | --_nParked; |
| 2019 | |
| 2020 | _event = 0; |
| 2021 | status = pthread_mutex_unlock(_mutex); |
| 2022 | assert_status(status == 0, status, "mutex_unlock" ); |
| 2023 | // Paranoia to ensure our locked and lock-free paths interact |
| 2024 | // correctly with each other. |
| 2025 | OrderAccess::fence(); |
| 2026 | } |
| 2027 | guarantee(_event >= 0, "invariant" ); |
| 2028 | } |
| 2029 | |
| 2030 | int os::PlatformEvent::park(jlong millis) { |
| 2031 | // Transitions for _event: |
| 2032 | // -1 => -1 : illegal |
| 2033 | // 1 => 0 : pass - return immediately |
| 2034 | // 0 => -1 : block; then set _event to 0 before returning |
| 2035 | |
| 2036 | // Invariant: Only the thread associated with the Event/PlatformEvent |
| 2037 | // may call park(). |
| 2038 | assert(_nParked == 0, "invariant" ); |
| 2039 | |
| 2040 | int v; |
| 2041 | // atomically decrement _event |
| 2042 | for (;;) { |
| 2043 | v = _event; |
| 2044 | if (Atomic::cmpxchg(v - 1, &_event, v) == v) break; |
| 2045 | } |
| 2046 | guarantee(v >= 0, "invariant" ); |
| 2047 | |
| 2048 | if (v == 0) { // Do this the hard way by blocking ... |
| 2049 | struct timespec abst; |
| 2050 | to_abstime(&abst, millis_to_nanos(millis), false, false); |
| 2051 | |
| 2052 | int ret = OS_TIMEOUT; |
| 2053 | int status = pthread_mutex_lock(_mutex); |
| 2054 | assert_status(status == 0, status, "mutex_lock" ); |
| 2055 | guarantee(_nParked == 0, "invariant" ); |
| 2056 | ++_nParked; |
| 2057 | |
| 2058 | while (_event < 0) { |
| 2059 | status = pthread_cond_timedwait(_cond, _mutex, &abst); |
| 2060 | assert_status(status == 0 || status == ETIMEDOUT, |
| 2061 | status, "cond_timedwait" ); |
| 2062 | // OS-level "spurious wakeups" are ignored unless the archaic |
| 2063 | // FilterSpuriousWakeups is set false. That flag should be obsoleted. |
| 2064 | if (!FilterSpuriousWakeups) break; |
| 2065 | if (status == ETIMEDOUT) break; |
| 2066 | } |
| 2067 | --_nParked; |
| 2068 | |
| 2069 | if (_event >= 0) { |
| 2070 | ret = OS_OK; |
| 2071 | } |
| 2072 | |
| 2073 | _event = 0; |
| 2074 | status = pthread_mutex_unlock(_mutex); |
| 2075 | assert_status(status == 0, status, "mutex_unlock" ); |
| 2076 | // Paranoia to ensure our locked and lock-free paths interact |
| 2077 | // correctly with each other. |
| 2078 | OrderAccess::fence(); |
| 2079 | return ret; |
| 2080 | } |
| 2081 | return OS_OK; |
| 2082 | } |
| 2083 | |
| 2084 | void os::PlatformEvent::unpark() { |
| 2085 | // Transitions for _event: |
| 2086 | // 0 => 1 : just return |
| 2087 | // 1 => 1 : just return |
| 2088 | // -1 => either 0 or 1; must signal target thread |
| 2089 | // That is, we can safely transition _event from -1 to either |
| 2090 | // 0 or 1. |
| 2091 | // See also: "Semaphores in Plan 9" by Mullender & Cox |
| 2092 | // |
| 2093 | // Note: Forcing a transition from "-1" to "1" on an unpark() means |
| 2094 | // that it will take two back-to-back park() calls for the owning |
| 2095 | // thread to block. This has the benefit of forcing a spurious return |
| 2096 | // from the first park() call after an unpark() call which will help |
| 2097 | // shake out uses of park() and unpark() without checking state conditions |
| 2098 | // properly. This spurious return doesn't manifest itself in any user code |
| 2099 | // but only in the correctly written condition checking loops of ObjectMonitor, |
| 2100 | // Mutex/Monitor, Thread::muxAcquire and os::sleep |
| 2101 | |
| 2102 | if (Atomic::xchg(1, &_event) >= 0) return; |
| 2103 | |
| 2104 | int status = pthread_mutex_lock(_mutex); |
| 2105 | assert_status(status == 0, status, "mutex_lock" ); |
| 2106 | int anyWaiters = _nParked; |
| 2107 | assert(anyWaiters == 0 || anyWaiters == 1, "invariant" ); |
| 2108 | status = pthread_mutex_unlock(_mutex); |
| 2109 | assert_status(status == 0, status, "mutex_unlock" ); |
| 2110 | |
| 2111 | // Note that we signal() *after* dropping the lock for "immortal" Events. |
| 2112 | // This is safe and avoids a common class of futile wakeups. In rare |
| 2113 | // circumstances this can cause a thread to return prematurely from |
| 2114 | // cond_{timed}wait() but the spurious wakeup is benign and the victim |
| 2115 | // will simply re-test the condition and re-park itself. |
| 2116 | // This provides particular benefit if the underlying platform does not |
| 2117 | // provide wait morphing. |
| 2118 | |
| 2119 | if (anyWaiters != 0) { |
| 2120 | status = pthread_cond_signal(_cond); |
| 2121 | assert_status(status == 0, status, "cond_signal" ); |
| 2122 | } |
| 2123 | } |
| 2124 | |
| 2125 | // JSR166 support |
| 2126 | |
| 2127 | os::PlatformParker::PlatformParker() { |
| 2128 | int status; |
| 2129 | status = pthread_cond_init(&_cond[REL_INDEX], _condAttr); |
| 2130 | assert_status(status == 0, status, "cond_init rel" ); |
| 2131 | status = pthread_cond_init(&_cond[ABS_INDEX], NULL); |
| 2132 | assert_status(status == 0, status, "cond_init abs" ); |
| 2133 | status = pthread_mutex_init(_mutex, _mutexAttr); |
| 2134 | assert_status(status == 0, status, "mutex_init" ); |
| 2135 | _cur_index = -1; // mark as unused |
| 2136 | } |
| 2137 | |
| 2138 | // Parker::park decrements count if > 0, else does a condvar wait. Unpark |
| 2139 | // sets count to 1 and signals condvar. Only one thread ever waits |
| 2140 | // on the condvar. Contention seen when trying to park implies that someone |
| 2141 | // is unparking you, so don't wait. And spurious returns are fine, so there |
| 2142 | // is no need to track notifications. |
| 2143 | |
| 2144 | void Parker::park(bool isAbsolute, jlong time) { |
| 2145 | |
| 2146 | // Optional fast-path check: |
| 2147 | // Return immediately if a permit is available. |
| 2148 | // We depend on Atomic::xchg() having full barrier semantics |
| 2149 | // since we are doing a lock-free update to _counter. |
| 2150 | if (Atomic::xchg(0, &_counter) > 0) return; |
| 2151 | |
| 2152 | Thread* thread = Thread::current(); |
| 2153 | assert(thread->is_Java_thread(), "Must be JavaThread" ); |
| 2154 | JavaThread *jt = (JavaThread *)thread; |
| 2155 | |
| 2156 | // Optional optimization -- avoid state transitions if there's |
| 2157 | // an interrupt pending. |
| 2158 | if (Thread::is_interrupted(thread, false)) { |
| 2159 | return; |
| 2160 | } |
| 2161 | |
| 2162 | // Next, demultiplex/decode time arguments |
| 2163 | struct timespec absTime; |
| 2164 | if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all |
| 2165 | return; |
| 2166 | } |
| 2167 | if (time > 0) { |
| 2168 | to_abstime(&absTime, time, isAbsolute, false); |
| 2169 | } |
| 2170 | |
| 2171 | // Enter safepoint region |
| 2172 | // Beware of deadlocks such as 6317397. |
| 2173 | // The per-thread Parker:: mutex is a classic leaf-lock. |
| 2174 | // In particular a thread must never block on the Threads_lock while |
| 2175 | // holding the Parker:: mutex. If safepoints are pending both the |
| 2176 | // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. |
| 2177 | ThreadBlockInVM tbivm(jt); |
| 2178 | |
| 2179 | // Don't wait if cannot get lock since interference arises from |
| 2180 | // unparking. Also re-check interrupt before trying wait. |
| 2181 | if (Thread::is_interrupted(thread, false) || |
| 2182 | pthread_mutex_trylock(_mutex) != 0) { |
| 2183 | return; |
| 2184 | } |
| 2185 | |
| 2186 | int status; |
| 2187 | if (_counter > 0) { // no wait needed |
| 2188 | _counter = 0; |
| 2189 | status = pthread_mutex_unlock(_mutex); |
| 2190 | assert_status(status == 0, status, "invariant" ); |
| 2191 | // Paranoia to ensure our locked and lock-free paths interact |
| 2192 | // correctly with each other and Java-level accesses. |
| 2193 | OrderAccess::fence(); |
| 2194 | return; |
| 2195 | } |
| 2196 | |
| 2197 | OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); |
| 2198 | jt->set_suspend_equivalent(); |
| 2199 | // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() |
| 2200 | |
| 2201 | assert(_cur_index == -1, "invariant" ); |
| 2202 | if (time == 0) { |
| 2203 | _cur_index = REL_INDEX; // arbitrary choice when not timed |
| 2204 | status = pthread_cond_wait(&_cond[_cur_index], _mutex); |
| 2205 | assert_status(status == 0, status, "cond_timedwait" ); |
| 2206 | } |
| 2207 | else { |
| 2208 | _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX; |
| 2209 | status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime); |
| 2210 | assert_status(status == 0 || status == ETIMEDOUT, |
| 2211 | status, "cond_timedwait" ); |
| 2212 | } |
| 2213 | _cur_index = -1; |
| 2214 | |
| 2215 | _counter = 0; |
| 2216 | status = pthread_mutex_unlock(_mutex); |
| 2217 | assert_status(status == 0, status, "invariant" ); |
| 2218 | // Paranoia to ensure our locked and lock-free paths interact |
| 2219 | // correctly with each other and Java-level accesses. |
| 2220 | OrderAccess::fence(); |
| 2221 | |
| 2222 | // If externally suspended while waiting, re-suspend |
| 2223 | if (jt->handle_special_suspend_equivalent_condition()) { |
| 2224 | jt->java_suspend_self(); |
| 2225 | } |
| 2226 | } |
| 2227 | |
| 2228 | void Parker::unpark() { |
| 2229 | int status = pthread_mutex_lock(_mutex); |
| 2230 | assert_status(status == 0, status, "invariant" ); |
| 2231 | const int s = _counter; |
| 2232 | _counter = 1; |
| 2233 | // must capture correct index before unlocking |
| 2234 | int index = _cur_index; |
| 2235 | status = pthread_mutex_unlock(_mutex); |
| 2236 | assert_status(status == 0, status, "invariant" ); |
| 2237 | |
| 2238 | // Note that we signal() *after* dropping the lock for "immortal" Events. |
| 2239 | // This is safe and avoids a common class of futile wakeups. In rare |
| 2240 | // circumstances this can cause a thread to return prematurely from |
| 2241 | // cond_{timed}wait() but the spurious wakeup is benign and the victim |
| 2242 | // will simply re-test the condition and re-park itself. |
| 2243 | // This provides particular benefit if the underlying platform does not |
| 2244 | // provide wait morphing. |
| 2245 | |
| 2246 | if (s < 1 && index != -1) { |
| 2247 | // thread is definitely parked |
| 2248 | status = pthread_cond_signal(&_cond[index]); |
| 2249 | assert_status(status == 0, status, "invariant" ); |
| 2250 | } |
| 2251 | } |
| 2252 | |
| 2253 | // Platform Monitor implementation |
| 2254 | |
| 2255 | os::PlatformMonitor::Impl::Impl() : _next(NULL) { |
| 2256 | int status = pthread_cond_init(&_cond, _condAttr); |
| 2257 | assert_status(status == 0, status, "cond_init" ); |
| 2258 | status = pthread_mutex_init(&_mutex, _mutexAttr); |
| 2259 | assert_status(status == 0, status, "mutex_init" ); |
| 2260 | } |
| 2261 | |
| 2262 | os::PlatformMonitor::Impl::~Impl() { |
| 2263 | int status = pthread_cond_destroy(&_cond); |
| 2264 | assert_status(status == 0, status, "cond_destroy" ); |
| 2265 | status = pthread_mutex_destroy(&_mutex); |
| 2266 | assert_status(status == 0, status, "mutex_destroy" ); |
| 2267 | } |
| 2268 | |
| 2269 | #if PLATFORM_MONITOR_IMPL_INDIRECT |
| 2270 | |
| 2271 | pthread_mutex_t os::PlatformMonitor::_freelist_lock; |
| 2272 | os::PlatformMonitor::Impl* os::PlatformMonitor::_freelist = NULL; |
| 2273 | |
| 2274 | void os::PlatformMonitor::init() { |
| 2275 | int status = pthread_mutex_init(&_freelist_lock, _mutexAttr); |
| 2276 | assert_status(status == 0, status, "freelist lock init" ); |
| 2277 | } |
| 2278 | |
| 2279 | struct os::PlatformMonitor::WithFreeListLocked : public StackObj { |
| 2280 | WithFreeListLocked() { |
| 2281 | int status = pthread_mutex_lock(&_freelist_lock); |
| 2282 | assert_status(status == 0, status, "freelist lock" ); |
| 2283 | } |
| 2284 | |
| 2285 | ~WithFreeListLocked() { |
| 2286 | int status = pthread_mutex_unlock(&_freelist_lock); |
| 2287 | assert_status(status == 0, status, "freelist unlock" ); |
| 2288 | } |
| 2289 | }; |
| 2290 | |
| 2291 | os::PlatformMonitor::PlatformMonitor() { |
| 2292 | { |
| 2293 | WithFreeListLocked wfl; |
| 2294 | _impl = _freelist; |
| 2295 | if (_impl != NULL) { |
| 2296 | _freelist = _impl->_next; |
| 2297 | _impl->_next = NULL; |
| 2298 | return; |
| 2299 | } |
| 2300 | } |
| 2301 | _impl = new Impl(); |
| 2302 | } |
| 2303 | |
| 2304 | os::PlatformMonitor::~PlatformMonitor() { |
| 2305 | WithFreeListLocked wfl; |
| 2306 | assert(_impl->_next == NULL, "invariant" ); |
| 2307 | _impl->_next = _freelist; |
| 2308 | _freelist = _impl; |
| 2309 | } |
| 2310 | |
| 2311 | #endif // PLATFORM_MONITOR_IMPL_INDIRECT |
| 2312 | |
| 2313 | // Must already be locked |
| 2314 | int os::PlatformMonitor::wait(jlong millis) { |
| 2315 | assert(millis >= 0, "negative timeout" ); |
| 2316 | if (millis > 0) { |
| 2317 | struct timespec abst; |
| 2318 | // We have to watch for overflow when converting millis to nanos, |
| 2319 | // but if millis is that large then we will end up limiting to |
| 2320 | // MAX_SECS anyway, so just do that here. |
| 2321 | if (millis / MILLIUNITS > MAX_SECS) { |
| 2322 | millis = jlong(MAX_SECS) * MILLIUNITS; |
| 2323 | } |
| 2324 | to_abstime(&abst, millis * (NANOUNITS / MILLIUNITS), false, false); |
| 2325 | |
| 2326 | int ret = OS_TIMEOUT; |
| 2327 | int status = pthread_cond_timedwait(cond(), mutex(), &abst); |
| 2328 | assert_status(status == 0 || status == ETIMEDOUT, |
| 2329 | status, "cond_timedwait" ); |
| 2330 | if (status == 0) { |
| 2331 | ret = OS_OK; |
| 2332 | } |
| 2333 | return ret; |
| 2334 | } else { |
| 2335 | int status = pthread_cond_wait(cond(), mutex()); |
| 2336 | assert_status(status == 0, status, "cond_wait" ); |
| 2337 | return OS_OK; |
| 2338 | } |
| 2339 | } |
| 2340 | |
| 2341 | #endif // !SOLARIS |
| 2342 | |