1/*
2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25// no precompiled headers
26#include "jvm.h"
27#include "asm/macroAssembler.hpp"
28#include "classfile/classLoader.hpp"
29#include "classfile/systemDictionary.hpp"
30#include "classfile/vmSymbols.hpp"
31#include "code/codeCache.hpp"
32#include "code/icBuffer.hpp"
33#include "code/vtableStubs.hpp"
34#include "interpreter/interpreter.hpp"
35#include "logging/log.hpp"
36#include "memory/allocation.inline.hpp"
37#include "os_share_linux.hpp"
38#include "prims/jniFastGetField.hpp"
39#include "prims/jvm_misc.hpp"
40#include "runtime/arguments.hpp"
41#include "runtime/extendedPC.hpp"
42#include "runtime/frame.inline.hpp"
43#include "runtime/interfaceSupport.inline.hpp"
44#include "runtime/java.hpp"
45#include "runtime/javaCalls.hpp"
46#include "runtime/mutexLocker.hpp"
47#include "runtime/osThread.hpp"
48#include "runtime/sharedRuntime.hpp"
49#include "runtime/stubRoutines.hpp"
50#include "runtime/thread.inline.hpp"
51#include "runtime/timer.hpp"
52#include "services/memTracker.hpp"
53#include "utilities/align.hpp"
54#include "utilities/debug.hpp"
55#include "utilities/events.hpp"
56#include "utilities/vmError.hpp"
57
58// put OS-includes here
59# include <sys/types.h>
60# include <sys/mman.h>
61# include <pthread.h>
62# include <signal.h>
63# include <errno.h>
64# include <dlfcn.h>
65# include <stdlib.h>
66# include <stdio.h>
67# include <unistd.h>
68# include <sys/resource.h>
69# include <pthread.h>
70# include <sys/stat.h>
71# include <sys/time.h>
72# include <sys/utsname.h>
73# include <sys/socket.h>
74# include <sys/wait.h>
75# include <pwd.h>
76# include <poll.h>
77# include <ucontext.h>
78#ifndef AMD64
79# include <fpu_control.h>
80#endif
81
82#ifdef AMD64
83#define REG_SP REG_RSP
84#define REG_PC REG_RIP
85#define REG_FP REG_RBP
86#define SPELL_REG_SP "rsp"
87#define SPELL_REG_FP "rbp"
88#else
89#define REG_SP REG_UESP
90#define REG_PC REG_EIP
91#define REG_FP REG_EBP
92#define SPELL_REG_SP "esp"
93#define SPELL_REG_FP "ebp"
94#endif // AMD64
95
96address os::current_stack_pointer() {
97#ifdef SPARC_WORKS
98 void *esp;
99 __asm__("mov %%" SPELL_REG_SP ", %0":"=r"(esp));
100 return (address) ((char*)esp + sizeof(long)*2);
101#elif defined(__clang__)
102 void* esp;
103 __asm__ __volatile__ ("mov %%" SPELL_REG_SP ", %0":"=r"(esp):);
104 return (address) esp;
105#else
106 register void *esp __asm__ (SPELL_REG_SP);
107 return (address) esp;
108#endif
109}
110
111char* os::non_memory_address_word() {
112 // Must never look like an address returned by reserve_memory,
113 // even in its subfields (as defined by the CPU immediate fields,
114 // if the CPU splits constants across multiple instructions).
115
116 return (char*) -1;
117}
118
119address os::Linux::ucontext_get_pc(const ucontext_t * uc) {
120 return (address)uc->uc_mcontext.gregs[REG_PC];
121}
122
123void os::Linux::ucontext_set_pc(ucontext_t * uc, address pc) {
124 uc->uc_mcontext.gregs[REG_PC] = (intptr_t)pc;
125}
126
127intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
128 return (intptr_t*)uc->uc_mcontext.gregs[REG_SP];
129}
130
131intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) {
132 return (intptr_t*)uc->uc_mcontext.gregs[REG_FP];
133}
134
135// For Forte Analyzer AsyncGetCallTrace profiling support - thread
136// is currently interrupted by SIGPROF.
137// os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
138// frames. Currently we don't do that on Linux, so it's the same as
139// os::fetch_frame_from_context().
140// This method is also used for stack overflow signal handling.
141ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread,
142 const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
143
144 assert(thread != NULL, "just checking");
145 assert(ret_sp != NULL, "just checking");
146 assert(ret_fp != NULL, "just checking");
147
148 return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
149}
150
151ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
152 intptr_t** ret_sp, intptr_t** ret_fp) {
153
154 ExtendedPC epc;
155 const ucontext_t* uc = (const ucontext_t*)ucVoid;
156
157 if (uc != NULL) {
158 epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
159 if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
160 if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc);
161 } else {
162 // construct empty ExtendedPC for return value checking
163 epc = ExtendedPC(NULL);
164 if (ret_sp) *ret_sp = (intptr_t *)NULL;
165 if (ret_fp) *ret_fp = (intptr_t *)NULL;
166 }
167
168 return epc;
169}
170
171frame os::fetch_frame_from_context(const void* ucVoid) {
172 intptr_t* sp;
173 intptr_t* fp;
174 ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
175 return frame(sp, fp, epc.pc());
176}
177
178frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) {
179 intptr_t* sp;
180 intptr_t* fp;
181 ExtendedPC epc = os::Linux::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, &fp);
182 return frame(sp, fp, epc.pc());
183}
184
185bool os::Linux::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
186 address pc = (address) os::Linux::ucontext_get_pc(uc);
187 if (Interpreter::contains(pc)) {
188 // interpreter performs stack banging after the fixed frame header has
189 // been generated while the compilers perform it before. To maintain
190 // semantic consistency between interpreted and compiled frames, the
191 // method returns the Java sender of the current frame.
192 *fr = os::fetch_frame_from_ucontext(thread, uc);
193 if (!fr->is_first_java_frame()) {
194 // get_frame_at_stack_banging_point() is only called when we
195 // have well defined stacks so java_sender() calls do not need
196 // to assert safe_for_sender() first.
197 *fr = fr->java_sender();
198 }
199 } else {
200 // more complex code with compiled code
201 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
202 CodeBlob* cb = CodeCache::find_blob(pc);
203 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
204 // Not sure where the pc points to, fallback to default
205 // stack overflow handling
206 return false;
207 } else {
208 // in compiled code, the stack banging is performed just after the return pc
209 // has been pushed on the stack
210 intptr_t* fp = os::Linux::ucontext_get_fp(uc);
211 intptr_t* sp = os::Linux::ucontext_get_sp(uc);
212 *fr = frame(sp + 1, fp, (address)*sp);
213 if (!fr->is_java_frame()) {
214 assert(!fr->is_first_frame(), "Safety check");
215 // See java_sender() comment above.
216 *fr = fr->java_sender();
217 }
218 }
219 }
220 assert(fr->is_java_frame(), "Safety check");
221 return true;
222}
223
224// By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
225// turned off by -fomit-frame-pointer,
226frame os::get_sender_for_C_frame(frame* fr) {
227 return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
228}
229
230intptr_t* _get_previous_fp() {
231#ifdef SPARC_WORKS
232 intptr_t **ebp;
233 __asm__("mov %%" SPELL_REG_FP ", %0":"=r"(ebp));
234#elif defined(__clang__)
235 intptr_t **ebp;
236 __asm__ __volatile__ ("mov %%" SPELL_REG_FP ", %0":"=r"(ebp):);
237#else
238 register intptr_t **ebp __asm__ (SPELL_REG_FP);
239#endif
240 // ebp is for this frame (_get_previous_fp). We want the ebp for the
241 // caller of os::current_frame*(), so go up two frames. However, for
242 // optimized builds, _get_previous_fp() will be inlined, so only go
243 // up 1 frame in that case.
244#ifdef _NMT_NOINLINE_
245 return **(intptr_t***)ebp;
246#else
247 return *ebp;
248#endif
249}
250
251
252frame os::current_frame() {
253 intptr_t* fp = _get_previous_fp();
254 frame myframe((intptr_t*)os::current_stack_pointer(),
255 (intptr_t*)fp,
256 CAST_FROM_FN_PTR(address, os::current_frame));
257 if (os::is_first_C_frame(&myframe)) {
258 // stack is not walkable
259 return frame();
260 } else {
261 return os::get_sender_for_C_frame(&myframe);
262 }
263}
264
265// Utility functions
266
267// From IA32 System Programming Guide
268enum {
269 trap_page_fault = 0xE
270};
271
272extern "C" JNIEXPORT int
273JVM_handle_linux_signal(int sig,
274 siginfo_t* info,
275 void* ucVoid,
276 int abort_if_unrecognized) {
277 ucontext_t* uc = (ucontext_t*) ucVoid;
278
279 Thread* t = Thread::current_or_null_safe();
280
281 // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
282 // (no destructors can be run)
283 os::ThreadCrashProtection::check_crash_protection(sig, t);
284
285 SignalHandlerMark shm(t);
286
287 // Note: it's not uncommon that JNI code uses signal/sigset to install
288 // then restore certain signal handler (e.g. to temporarily block SIGPIPE,
289 // or have a SIGILL handler when detecting CPU type). When that happens,
290 // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To
291 // avoid unnecessary crash when libjsig is not preloaded, try handle signals
292 // that do not require siginfo/ucontext first.
293
294 if (sig == SIGPIPE || sig == SIGXFSZ) {
295 // allow chained handler to go first
296 if (os::Linux::chained_handler(sig, info, ucVoid)) {
297 return true;
298 } else {
299 // Ignoring SIGPIPE/SIGXFSZ - see bugs 4229104 or 6499219
300 return true;
301 }
302 }
303
304#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
305 if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
306 handle_assert_poison_fault(ucVoid, info->si_addr);
307 return 1;
308 }
309#endif
310
311 JavaThread* thread = NULL;
312 VMThread* vmthread = NULL;
313 if (os::Linux::signal_handlers_are_installed) {
314 if (t != NULL ){
315 if(t->is_Java_thread()) {
316 thread = (JavaThread*)t;
317 }
318 else if(t->is_VM_thread()){
319 vmthread = (VMThread *)t;
320 }
321 }
322 }
323/*
324 NOTE: does not seem to work on linux.
325 if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
326 // can't decode this kind of signal
327 info = NULL;
328 } else {
329 assert(sig == info->si_signo, "bad siginfo");
330 }
331*/
332 // decide if this trap can be handled by a stub
333 address stub = NULL;
334
335 address pc = NULL;
336
337 //%note os_trap_1
338 if (info != NULL && uc != NULL && thread != NULL) {
339 pc = (address) os::Linux::ucontext_get_pc(uc);
340
341 if (StubRoutines::is_safefetch_fault(pc)) {
342 os::Linux::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
343 return 1;
344 }
345
346#ifndef AMD64
347 // Halt if SI_KERNEL before more crashes get misdiagnosed as Java bugs
348 // This can happen in any running code (currently more frequently in
349 // interpreter code but has been seen in compiled code)
350 if (sig == SIGSEGV && info->si_addr == 0 && info->si_code == SI_KERNEL) {
351 fatal("An irrecoverable SI_KERNEL SIGSEGV has occurred due "
352 "to unstable signal handling in this distribution.");
353 }
354#endif // AMD64
355
356 // Handle ALL stack overflow variations here
357 if (sig == SIGSEGV) {
358 address addr = (address) info->si_addr;
359
360 // check if fault address is within thread stack
361 if (thread->on_local_stack(addr)) {
362 // stack overflow
363 if (thread->in_stack_yellow_reserved_zone(addr)) {
364 if (thread->thread_state() == _thread_in_Java) {
365 if (thread->in_stack_reserved_zone(addr)) {
366 frame fr;
367 if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) {
368 assert(fr.is_java_frame(), "Must be a Java frame");
369 frame activation =
370 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
371 if (activation.sp() != NULL) {
372 thread->disable_stack_reserved_zone();
373 if (activation.is_interpreted_frame()) {
374 thread->set_reserved_stack_activation((address)(
375 activation.fp() + frame::interpreter_frame_initial_sp_offset));
376 } else {
377 thread->set_reserved_stack_activation((address)activation.unextended_sp());
378 }
379 return 1;
380 }
381 }
382 }
383 // Throw a stack overflow exception. Guard pages will be reenabled
384 // while unwinding the stack.
385 thread->disable_stack_yellow_reserved_zone();
386 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
387 } else {
388 // Thread was in the vm or native code. Return and try to finish.
389 thread->disable_stack_yellow_reserved_zone();
390 return 1;
391 }
392 } else if (thread->in_stack_red_zone(addr)) {
393 // Fatal red zone violation. Disable the guard pages and fall through
394 // to handle_unexpected_exception way down below.
395 thread->disable_stack_red_zone();
396 tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
397
398 // This is a likely cause, but hard to verify. Let's just print
399 // it as a hint.
400 tty->print_raw_cr("Please check if any of your loaded .so files has "
401 "enabled executable stack (see man page execstack(8))");
402 } else {
403 // Accessing stack address below sp may cause SEGV if current
404 // thread has MAP_GROWSDOWN stack. This should only happen when
405 // current thread was created by user code with MAP_GROWSDOWN flag
406 // and then attached to VM. See notes in os_linux.cpp.
407 if (thread->osthread()->expanding_stack() == 0) {
408 thread->osthread()->set_expanding_stack();
409 if (os::Linux::manually_expand_stack(thread, addr)) {
410 thread->osthread()->clear_expanding_stack();
411 return 1;
412 }
413 thread->osthread()->clear_expanding_stack();
414 } else {
415 fatal("recursive segv. expanding stack.");
416 }
417 }
418 }
419 }
420
421 if ((sig == SIGSEGV) && VM_Version::is_cpuinfo_segv_addr(pc)) {
422 // Verify that OS save/restore AVX registers.
423 stub = VM_Version::cpuinfo_cont_addr();
424 }
425
426 if (thread->thread_state() == _thread_in_Java) {
427 // Java thread running in Java code => find exception handler if any
428 // a fault inside compiled code, the interpreter, or a stub
429
430 if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) {
431 stub = SharedRuntime::get_poll_stub(pc);
432 } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
433 // BugId 4454115: A read from a MappedByteBuffer can fault
434 // here if the underlying file has been truncated.
435 // Do not crash the VM in such a case.
436 CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
437 CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
438 if (nm != NULL && nm->has_unsafe_access()) {
439 address next_pc = Assembler::locate_next_instruction(pc);
440 stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
441 }
442 }
443 else
444
445#ifdef AMD64
446 if (sig == SIGFPE &&
447 (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
448 stub =
449 SharedRuntime::
450 continuation_for_implicit_exception(thread,
451 pc,
452 SharedRuntime::
453 IMPLICIT_DIVIDE_BY_ZERO);
454#else
455 if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
456 // HACK: si_code does not work on linux 2.2.12-20!!!
457 int op = pc[0];
458 if (op == 0xDB) {
459 // FIST
460 // TODO: The encoding of D2I in i486.ad can cause an exception
461 // prior to the fist instruction if there was an invalid operation
462 // pending. We want to dismiss that exception. From the win_32
463 // side it also seems that if it really was the fist causing
464 // the exception that we do the d2i by hand with different
465 // rounding. Seems kind of weird.
466 // NOTE: that we take the exception at the NEXT floating point instruction.
467 assert(pc[0] == 0xDB, "not a FIST opcode");
468 assert(pc[1] == 0x14, "not a FIST opcode");
469 assert(pc[2] == 0x24, "not a FIST opcode");
470 return true;
471 } else if (op == 0xF7) {
472 // IDIV
473 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
474 } else {
475 // TODO: handle more cases if we are using other x86 instructions
476 // that can generate SIGFPE signal on linux.
477 tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
478 fatal("please update this code.");
479 }
480#endif // AMD64
481 } else if (sig == SIGSEGV &&
482 MacroAssembler::uses_implicit_null_check(info->si_addr)) {
483 // Determination of interpreter/vtable stub/compiled code null exception
484 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
485 }
486 } else if (thread->thread_state() == _thread_in_vm &&
487 sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
488 thread->doing_unsafe_access()) {
489 address next_pc = Assembler::locate_next_instruction(pc);
490 stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
491 }
492
493 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
494 // and the heap gets shrunk before the field access.
495 if ((sig == SIGSEGV) || (sig == SIGBUS)) {
496 address addr = JNI_FastGetField::find_slowcase_pc(pc);
497 if (addr != (address)-1) {
498 stub = addr;
499 }
500 }
501 }
502
503#ifndef AMD64
504 // Execution protection violation
505 //
506 // This should be kept as the last step in the triage. We don't
507 // have a dedicated trap number for a no-execute fault, so be
508 // conservative and allow other handlers the first shot.
509 //
510 // Note: We don't test that info->si_code == SEGV_ACCERR here.
511 // this si_code is so generic that it is almost meaningless; and
512 // the si_code for this condition may change in the future.
513 // Furthermore, a false-positive should be harmless.
514 if (UnguardOnExecutionViolation > 0 &&
515 (sig == SIGSEGV || sig == SIGBUS) &&
516 uc->uc_mcontext.gregs[REG_TRAPNO] == trap_page_fault) {
517 int page_size = os::vm_page_size();
518 address addr = (address) info->si_addr;
519 address pc = os::Linux::ucontext_get_pc(uc);
520 // Make sure the pc and the faulting address are sane.
521 //
522 // If an instruction spans a page boundary, and the page containing
523 // the beginning of the instruction is executable but the following
524 // page is not, the pc and the faulting address might be slightly
525 // different - we still want to unguard the 2nd page in this case.
526 //
527 // 15 bytes seems to be a (very) safe value for max instruction size.
528 bool pc_is_near_addr =
529 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
530 bool instr_spans_page_boundary =
531 (align_down((intptr_t) pc ^ (intptr_t) addr,
532 (intptr_t) page_size) > 0);
533
534 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
535 static volatile address last_addr =
536 (address) os::non_memory_address_word();
537
538 // In conservative mode, don't unguard unless the address is in the VM
539 if (addr != last_addr &&
540 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
541
542 // Set memory to RWX and retry
543 address page_start = align_down(addr, page_size);
544 bool res = os::protect_memory((char*) page_start, page_size,
545 os::MEM_PROT_RWX);
546
547 log_debug(os)("Execution protection violation "
548 "at " INTPTR_FORMAT
549 ", unguarding " INTPTR_FORMAT ": %s, errno=%d", p2i(addr),
550 p2i(page_start), (res ? "success" : "failed"), errno);
551 stub = pc;
552
553 // Set last_addr so if we fault again at the same address, we don't end
554 // up in an endless loop.
555 //
556 // There are two potential complications here. Two threads trapping at
557 // the same address at the same time could cause one of the threads to
558 // think it already unguarded, and abort the VM. Likely very rare.
559 //
560 // The other race involves two threads alternately trapping at
561 // different addresses and failing to unguard the page, resulting in
562 // an endless loop. This condition is probably even more unlikely than
563 // the first.
564 //
565 // Although both cases could be avoided by using locks or thread local
566 // last_addr, these solutions are unnecessary complication: this
567 // handler is a best-effort safety net, not a complete solution. It is
568 // disabled by default and should only be used as a workaround in case
569 // we missed any no-execute-unsafe VM code.
570
571 last_addr = addr;
572 }
573 }
574 }
575#endif // !AMD64
576
577 if (stub != NULL) {
578 // save all thread context in case we need to restore it
579 if (thread != NULL) thread->set_saved_exception_pc(pc);
580
581 os::Linux::ucontext_set_pc(uc, stub);
582 return true;
583 }
584
585 // signal-chaining
586 if (os::Linux::chained_handler(sig, info, ucVoid)) {
587 return true;
588 }
589
590 if (!abort_if_unrecognized) {
591 // caller wants another chance, so give it to him
592 return false;
593 }
594
595 if (pc == NULL && uc != NULL) {
596 pc = os::Linux::ucontext_get_pc(uc);
597 }
598
599 // unmask current signal
600 sigset_t newset;
601 sigemptyset(&newset);
602 sigaddset(&newset, sig);
603 sigprocmask(SIG_UNBLOCK, &newset, NULL);
604
605 VMError::report_and_die(t, sig, pc, info, ucVoid);
606
607 ShouldNotReachHere();
608 return true; // Mute compiler
609}
610
611void os::Linux::init_thread_fpu_state(void) {
612#ifndef AMD64
613 // set fpu to 53 bit precision
614 set_fpu_control_word(0x27f);
615#endif // !AMD64
616}
617
618int os::Linux::get_fpu_control_word(void) {
619#ifdef AMD64
620 return 0;
621#else
622 int fpu_control;
623 _FPU_GETCW(fpu_control);
624 return fpu_control & 0xffff;
625#endif // AMD64
626}
627
628void os::Linux::set_fpu_control_word(int fpu_control) {
629#ifndef AMD64
630 _FPU_SETCW(fpu_control);
631#endif // !AMD64
632}
633
634// Check that the linux kernel version is 2.4 or higher since earlier
635// versions do not support SSE without patches.
636bool os::supports_sse() {
637#ifdef AMD64
638 return true;
639#else
640 struct utsname uts;
641 if( uname(&uts) != 0 ) return false; // uname fails?
642 char *minor_string;
643 int major = strtol(uts.release,&minor_string,10);
644 int minor = strtol(minor_string+1,NULL,10);
645 bool result = (major > 2 || (major==2 && minor >= 4));
646 log_info(os)("OS version is %d.%d, which %s support SSE/SSE2",
647 major,minor, result ? "DOES" : "does NOT");
648 return result;
649#endif // AMD64
650}
651
652bool os::is_allocatable(size_t bytes) {
653#ifdef AMD64
654 // unused on amd64?
655 return true;
656#else
657
658 if (bytes < 2 * G) {
659 return true;
660 }
661
662 char* addr = reserve_memory(bytes, NULL);
663
664 if (addr != NULL) {
665 release_memory(addr, bytes);
666 }
667
668 return addr != NULL;
669#endif // AMD64
670}
671
672////////////////////////////////////////////////////////////////////////////////
673// thread stack
674
675// Minimum usable stack sizes required to get to user code. Space for
676// HotSpot guard pages is added later.
677size_t os::Posix::_compiler_thread_min_stack_allowed = 48 * K;
678size_t os::Posix::_java_thread_min_stack_allowed = 40 * K;
679#ifdef _LP64
680size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
681#else
682size_t os::Posix::_vm_internal_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
683#endif // _LP64
684
685// return default stack size for thr_type
686size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
687 // default stack size (compiler thread needs larger stack)
688#ifdef AMD64
689 size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
690#else
691 size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
692#endif // AMD64
693 return s;
694}
695
696/////////////////////////////////////////////////////////////////////////////
697// helper functions for fatal error handler
698
699void os::print_context(outputStream *st, const void *context) {
700 if (context == NULL) return;
701
702 const ucontext_t *uc = (const ucontext_t*)context;
703 st->print_cr("Registers:");
704#ifdef AMD64
705 st->print( "RAX=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RAX]);
706 st->print(", RBX=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RBX]);
707 st->print(", RCX=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RCX]);
708 st->print(", RDX=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RDX]);
709 st->cr();
710 st->print( "RSP=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RSP]);
711 st->print(", RBP=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RBP]);
712 st->print(", RSI=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RSI]);
713 st->print(", RDI=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RDI]);
714 st->cr();
715 st->print( "R8 =" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R8]);
716 st->print(", R9 =" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R9]);
717 st->print(", R10=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R10]);
718 st->print(", R11=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R11]);
719 st->cr();
720 st->print( "R12=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R12]);
721 st->print(", R13=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R13]);
722 st->print(", R14=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R14]);
723 st->print(", R15=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_R15]);
724 st->cr();
725 st->print( "RIP=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RIP]);
726 st->print(", EFLAGS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_EFL]);
727 st->print(", CSGSFS=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_CSGSFS]);
728 st->print(", ERR=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_ERR]);
729 st->cr();
730 st->print(" TRAPNO=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_TRAPNO]);
731#else
732 st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EAX]);
733 st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBX]);
734 st->print(", ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ECX]);
735 st->print(", EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDX]);
736 st->cr();
737 st->print( "ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_UESP]);
738 st->print(", EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBP]);
739 st->print(", ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ESI]);
740 st->print(", EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDI]);
741 st->cr();
742 st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EIP]);
743 st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]);
744 st->print(", CR2=" PTR64_FORMAT, (uint64_t)uc->uc_mcontext.cr2);
745#endif // AMD64
746 st->cr();
747 st->cr();
748
749 intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
750 st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp));
751 print_hex_dump(st, (address)sp, (address)(sp + 8), sizeof(intptr_t));
752 st->cr();
753
754 // Note: it may be unsafe to inspect memory near pc. For example, pc may
755 // point to garbage if entry point in an nmethod is corrupted. Leave
756 // this at the end, and hope for the best.
757 address pc = os::Linux::ucontext_get_pc(uc);
758 print_instructions(st, pc, sizeof(char));
759 st->cr();
760}
761
762void os::print_register_info(outputStream *st, const void *context) {
763 if (context == NULL) return;
764
765 const ucontext_t *uc = (const ucontext_t*)context;
766
767 st->print_cr("Register to memory mapping:");
768 st->cr();
769
770 // this is horrendously verbose but the layout of the registers in the
771 // context does not match how we defined our abstract Register set, so
772 // we can't just iterate through the gregs area
773
774 // this is only for the "general purpose" registers
775
776#ifdef AMD64
777 st->print("RAX="); print_location(st, uc->uc_mcontext.gregs[REG_RAX]);
778 st->print("RBX="); print_location(st, uc->uc_mcontext.gregs[REG_RBX]);
779 st->print("RCX="); print_location(st, uc->uc_mcontext.gregs[REG_RCX]);
780 st->print("RDX="); print_location(st, uc->uc_mcontext.gregs[REG_RDX]);
781 st->print("RSP="); print_location(st, uc->uc_mcontext.gregs[REG_RSP]);
782 st->print("RBP="); print_location(st, uc->uc_mcontext.gregs[REG_RBP]);
783 st->print("RSI="); print_location(st, uc->uc_mcontext.gregs[REG_RSI]);
784 st->print("RDI="); print_location(st, uc->uc_mcontext.gregs[REG_RDI]);
785 st->print("R8 ="); print_location(st, uc->uc_mcontext.gregs[REG_R8]);
786 st->print("R9 ="); print_location(st, uc->uc_mcontext.gregs[REG_R9]);
787 st->print("R10="); print_location(st, uc->uc_mcontext.gregs[REG_R10]);
788 st->print("R11="); print_location(st, uc->uc_mcontext.gregs[REG_R11]);
789 st->print("R12="); print_location(st, uc->uc_mcontext.gregs[REG_R12]);
790 st->print("R13="); print_location(st, uc->uc_mcontext.gregs[REG_R13]);
791 st->print("R14="); print_location(st, uc->uc_mcontext.gregs[REG_R14]);
792 st->print("R15="); print_location(st, uc->uc_mcontext.gregs[REG_R15]);
793#else
794 st->print("EAX="); print_location(st, uc->uc_mcontext.gregs[REG_EAX]);
795 st->print("EBX="); print_location(st, uc->uc_mcontext.gregs[REG_EBX]);
796 st->print("ECX="); print_location(st, uc->uc_mcontext.gregs[REG_ECX]);
797 st->print("EDX="); print_location(st, uc->uc_mcontext.gregs[REG_EDX]);
798 st->print("ESP="); print_location(st, uc->uc_mcontext.gregs[REG_ESP]);
799 st->print("EBP="); print_location(st, uc->uc_mcontext.gregs[REG_EBP]);
800 st->print("ESI="); print_location(st, uc->uc_mcontext.gregs[REG_ESI]);
801 st->print("EDI="); print_location(st, uc->uc_mcontext.gregs[REG_EDI]);
802#endif // AMD64
803
804 st->cr();
805}
806
807void os::setup_fpu() {
808#ifndef AMD64
809 address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std();
810 __asm__ volatile ( "fldcw (%0)" :
811 : "r" (fpu_cntrl) : "memory");
812#endif // !AMD64
813}
814
815#ifndef PRODUCT
816void os::verify_stack_alignment() {
817#ifdef AMD64
818 assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
819#endif
820}
821#endif
822
823
824/*
825 * IA32 only: execute code at a high address in case buggy NX emulation is present. I.e. avoid CS limit
826 * updates (JDK-8023956).
827 */
828void os::workaround_expand_exec_shield_cs_limit() {
829#if defined(IA32)
830 assert(Linux::initial_thread_stack_bottom() != NULL, "sanity");
831 size_t page_size = os::vm_page_size();
832
833 /*
834 * JDK-8197429
835 *
836 * Expand the stack mapping to the end of the initial stack before
837 * attempting to install the codebuf. This is needed because newer
838 * Linux kernels impose a distance of a megabyte between stack
839 * memory and other memory regions. If we try to install the
840 * codebuf before expanding the stack the installation will appear
841 * to succeed but we'll get a segfault later if we expand the stack
842 * in Java code.
843 *
844 */
845 if (os::is_primordial_thread()) {
846 address limit = Linux::initial_thread_stack_bottom();
847 if (! DisablePrimordialThreadGuardPages) {
848 limit += JavaThread::stack_red_zone_size() +
849 JavaThread::stack_yellow_zone_size();
850 }
851 os::Linux::expand_stack_to(limit);
852 }
853
854 /*
855 * Take the highest VA the OS will give us and exec
856 *
857 * Although using -(pagesz) as mmap hint works on newer kernel as you would
858 * think, older variants affected by this work-around don't (search forward only).
859 *
860 * On the affected distributions, we understand the memory layout to be:
861 *
862 * TASK_LIMIT= 3G, main stack base close to TASK_LIMT.
863 *
864 * A few pages south main stack will do it.
865 *
866 * If we are embedded in an app other than launcher (initial != main stack),
867 * we don't have much control or understanding of the address space, just let it slide.
868 */
869 char* hint = (char*)(Linux::initial_thread_stack_bottom() -
870 (JavaThread::stack_guard_zone_size() + page_size));
871 char* codebuf = os::attempt_reserve_memory_at(page_size, hint);
872
873 if (codebuf == NULL) {
874 // JDK-8197429: There may be a stack gap of one megabyte between
875 // the limit of the stack and the nearest memory region: this is a
876 // Linux kernel workaround for CVE-2017-1000364. If we failed to
877 // map our codebuf, try again at an address one megabyte lower.
878 hint -= 1 * M;
879 codebuf = os::attempt_reserve_memory_at(page_size, hint);
880 }
881
882 if ((codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true))) {
883 return; // No matter, we tried, best effort.
884 }
885
886 MemTracker::record_virtual_memory_type((address)codebuf, mtInternal);
887
888 log_info(os)("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
889
890 // Some code to exec: the 'ret' instruction
891 codebuf[0] = 0xC3;
892
893 // Call the code in the codebuf
894 __asm__ volatile("call *%0" : : "r"(codebuf));
895
896 // keep the page mapped so CS limit isn't reduced.
897#endif
898}
899
900int os::extra_bang_size_in_bytes() {
901 // JDK-8050147 requires the full cache line bang for x86.
902 return VM_Version::L1_line_size();
903}
904