1 | /* |
2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "jvm.h" |
27 | #include "classfile/classLoader.hpp" |
28 | #include "classfile/javaClasses.hpp" |
29 | #include "classfile/moduleEntry.hpp" |
30 | #include "classfile/systemDictionary.hpp" |
31 | #include "classfile/vmSymbols.hpp" |
32 | #include "code/codeCache.hpp" |
33 | #include "code/icBuffer.hpp" |
34 | #include "code/vtableStubs.hpp" |
35 | #include "gc/shared/gcVMOperations.hpp" |
36 | #include "logging/log.hpp" |
37 | #include "interpreter/interpreter.hpp" |
38 | #include "logging/log.hpp" |
39 | #include "logging/logStream.hpp" |
40 | #include "memory/allocation.inline.hpp" |
41 | #include "memory/guardedMemory.hpp" |
42 | #include "memory/resourceArea.hpp" |
43 | #include "memory/universe.hpp" |
44 | #include "oops/compressedOops.inline.hpp" |
45 | #include "oops/oop.inline.hpp" |
46 | #include "prims/jvm_misc.hpp" |
47 | #include "runtime/arguments.hpp" |
48 | #include "runtime/atomic.hpp" |
49 | #include "runtime/frame.inline.hpp" |
50 | #include "runtime/handles.inline.hpp" |
51 | #include "runtime/interfaceSupport.inline.hpp" |
52 | #include "runtime/java.hpp" |
53 | #include "runtime/javaCalls.hpp" |
54 | #include "runtime/mutexLocker.hpp" |
55 | #include "runtime/os.inline.hpp" |
56 | #include "runtime/sharedRuntime.hpp" |
57 | #include "runtime/stubRoutines.hpp" |
58 | #include "runtime/thread.inline.hpp" |
59 | #include "runtime/threadSMR.hpp" |
60 | #include "runtime/vm_version.hpp" |
61 | #include "services/attachListener.hpp" |
62 | #include "services/mallocTracker.hpp" |
63 | #include "services/memTracker.hpp" |
64 | #include "services/nmtCommon.hpp" |
65 | #include "services/threadService.hpp" |
66 | #include "utilities/align.hpp" |
67 | #include "utilities/defaultStream.hpp" |
68 | #include "utilities/events.hpp" |
69 | |
70 | # include <signal.h> |
71 | # include <errno.h> |
72 | |
73 | OSThread* os::_starting_thread = NULL; |
74 | address os::_polling_page = NULL; |
75 | volatile unsigned int os::_rand_seed = 1; |
76 | int os::_processor_count = 0; |
77 | int os::_initial_active_processor_count = 0; |
78 | size_t os::_page_sizes[os::page_sizes_max]; |
79 | |
80 | #ifndef PRODUCT |
81 | julong os::num_mallocs = 0; // # of calls to malloc/realloc |
82 | julong os::alloc_bytes = 0; // # of bytes allocated |
83 | julong os::num_frees = 0; // # of calls to free |
84 | julong os::free_bytes = 0; // # of bytes freed |
85 | #endif |
86 | |
87 | static size_t cur_malloc_words = 0; // current size for MallocMaxTestWords |
88 | |
89 | DEBUG_ONLY(bool os::_mutex_init_done = false;) |
90 | |
91 | void os_init_globals() { |
92 | // Called from init_globals(). |
93 | // See Threads::create_vm() in thread.cpp, and init.cpp. |
94 | os::init_globals(); |
95 | } |
96 | |
97 | static time_t get_timezone(const struct tm* time_struct) { |
98 | #if defined(_ALLBSD_SOURCE) |
99 | return time_struct->tm_gmtoff; |
100 | #elif defined(_WINDOWS) |
101 | long zone; |
102 | _get_timezone(&zone); |
103 | return static_cast<time_t>(zone); |
104 | #else |
105 | return timezone; |
106 | #endif |
107 | } |
108 | |
109 | int os::snprintf(char* buf, size_t len, const char* fmt, ...) { |
110 | va_list args; |
111 | va_start(args, fmt); |
112 | int result = os::vsnprintf(buf, len, fmt, args); |
113 | va_end(args); |
114 | return result; |
115 | } |
116 | |
117 | // Fill in buffer with current local time as an ISO-8601 string. |
118 | // E.g., yyyy-mm-ddThh:mm:ss-zzzz. |
119 | // Returns buffer, or NULL if it failed. |
120 | // This would mostly be a call to |
121 | // strftime(...., "%Y-%m-%d" "T" "%H:%M:%S" "%z", ....) |
122 | // except that on Windows the %z behaves badly, so we do it ourselves. |
123 | // Also, people wanted milliseconds on there, |
124 | // and strftime doesn't do milliseconds. |
125 | char* os::iso8601_time(char* buffer, size_t buffer_length, bool utc) { |
126 | // Output will be of the form "YYYY-MM-DDThh:mm:ss.mmm+zzzz\0" |
127 | // 1 2 |
128 | // 12345678901234567890123456789 |
129 | // format string: "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d" |
130 | static const size_t needed_buffer = 29; |
131 | |
132 | // Sanity check the arguments |
133 | if (buffer == NULL) { |
134 | assert(false, "NULL buffer" ); |
135 | return NULL; |
136 | } |
137 | if (buffer_length < needed_buffer) { |
138 | assert(false, "buffer_length too small" ); |
139 | return NULL; |
140 | } |
141 | // Get the current time |
142 | jlong milliseconds_since_19700101 = javaTimeMillis(); |
143 | const int milliseconds_per_microsecond = 1000; |
144 | const time_t seconds_since_19700101 = |
145 | milliseconds_since_19700101 / milliseconds_per_microsecond; |
146 | const int milliseconds_after_second = |
147 | milliseconds_since_19700101 % milliseconds_per_microsecond; |
148 | // Convert the time value to a tm and timezone variable |
149 | struct tm time_struct; |
150 | if (utc) { |
151 | if (gmtime_pd(&seconds_since_19700101, &time_struct) == NULL) { |
152 | assert(false, "Failed gmtime_pd" ); |
153 | return NULL; |
154 | } |
155 | } else { |
156 | if (localtime_pd(&seconds_since_19700101, &time_struct) == NULL) { |
157 | assert(false, "Failed localtime_pd" ); |
158 | return NULL; |
159 | } |
160 | } |
161 | const time_t zone = get_timezone(&time_struct); |
162 | |
163 | // If daylight savings time is in effect, |
164 | // we are 1 hour East of our time zone |
165 | const time_t seconds_per_minute = 60; |
166 | const time_t minutes_per_hour = 60; |
167 | const time_t seconds_per_hour = seconds_per_minute * minutes_per_hour; |
168 | time_t UTC_to_local = zone; |
169 | if (time_struct.tm_isdst > 0) { |
170 | UTC_to_local = UTC_to_local - seconds_per_hour; |
171 | } |
172 | |
173 | // No offset when dealing with UTC |
174 | if (utc) { |
175 | UTC_to_local = 0; |
176 | } |
177 | |
178 | // Compute the time zone offset. |
179 | // localtime_pd() sets timezone to the difference (in seconds) |
180 | // between UTC and and local time. |
181 | // ISO 8601 says we need the difference between local time and UTC, |
182 | // we change the sign of the localtime_pd() result. |
183 | const time_t local_to_UTC = -(UTC_to_local); |
184 | // Then we have to figure out if if we are ahead (+) or behind (-) UTC. |
185 | char sign_local_to_UTC = '+'; |
186 | time_t abs_local_to_UTC = local_to_UTC; |
187 | if (local_to_UTC < 0) { |
188 | sign_local_to_UTC = '-'; |
189 | abs_local_to_UTC = -(abs_local_to_UTC); |
190 | } |
191 | // Convert time zone offset seconds to hours and minutes. |
192 | const time_t zone_hours = (abs_local_to_UTC / seconds_per_hour); |
193 | const time_t zone_min = |
194 | ((abs_local_to_UTC % seconds_per_hour) / seconds_per_minute); |
195 | |
196 | // Print an ISO 8601 date and time stamp into the buffer |
197 | const int year = 1900 + time_struct.tm_year; |
198 | const int month = 1 + time_struct.tm_mon; |
199 | const int printed = jio_snprintf(buffer, buffer_length, |
200 | "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d" , |
201 | year, |
202 | month, |
203 | time_struct.tm_mday, |
204 | time_struct.tm_hour, |
205 | time_struct.tm_min, |
206 | time_struct.tm_sec, |
207 | milliseconds_after_second, |
208 | sign_local_to_UTC, |
209 | zone_hours, |
210 | zone_min); |
211 | if (printed == 0) { |
212 | assert(false, "Failed jio_printf" ); |
213 | return NULL; |
214 | } |
215 | return buffer; |
216 | } |
217 | |
218 | OSReturn os::set_priority(Thread* thread, ThreadPriority p) { |
219 | debug_only(Thread::check_for_dangling_thread_pointer(thread);) |
220 | |
221 | if ((p >= MinPriority && p <= MaxPriority) || |
222 | (p == CriticalPriority && thread->is_ConcurrentGC_thread())) { |
223 | int priority = java_to_os_priority[p]; |
224 | return set_native_priority(thread, priority); |
225 | } else { |
226 | assert(false, "Should not happen" ); |
227 | return OS_ERR; |
228 | } |
229 | } |
230 | |
231 | // The mapping from OS priority back to Java priority may be inexact because |
232 | // Java priorities can map M:1 with native priorities. If you want the definite |
233 | // Java priority then use JavaThread::java_priority() |
234 | OSReturn os::get_priority(const Thread* const thread, ThreadPriority& priority) { |
235 | int p; |
236 | int os_prio; |
237 | OSReturn ret = get_native_priority(thread, &os_prio); |
238 | if (ret != OS_OK) return ret; |
239 | |
240 | if (java_to_os_priority[MaxPriority] > java_to_os_priority[MinPriority]) { |
241 | for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] > os_prio; p--) ; |
242 | } else { |
243 | // niceness values are in reverse order |
244 | for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] < os_prio; p--) ; |
245 | } |
246 | priority = (ThreadPriority)p; |
247 | return OS_OK; |
248 | } |
249 | |
250 | bool os::dll_build_name(char* buffer, size_t size, const char* fname) { |
251 | int n = jio_snprintf(buffer, size, "%s%s%s" , JNI_LIB_PREFIX, fname, JNI_LIB_SUFFIX); |
252 | return (n != -1); |
253 | } |
254 | |
255 | #if !defined(LINUX) && !defined(_WINDOWS) |
256 | bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) { |
257 | committed_start = start; |
258 | committed_size = size; |
259 | return true; |
260 | } |
261 | #endif |
262 | |
263 | // Helper for dll_locate_lib. |
264 | // Pass buffer and printbuffer as we already printed the path to buffer |
265 | // when we called get_current_directory. This way we avoid another buffer |
266 | // of size MAX_PATH. |
267 | static bool conc_path_file_and_check(char *buffer, char *printbuffer, size_t printbuflen, |
268 | const char* pname, char lastchar, const char* fname) { |
269 | |
270 | // Concatenate path and file name, but don't print double path separators. |
271 | const char *filesep = (WINDOWS_ONLY(lastchar == ':' ||) lastchar == os::file_separator()[0]) ? |
272 | "" : os::file_separator(); |
273 | int ret = jio_snprintf(printbuffer, printbuflen, "%s%s%s" , pname, filesep, fname); |
274 | // Check whether file exists. |
275 | if (ret != -1) { |
276 | struct stat statbuf; |
277 | return os::stat(buffer, &statbuf) == 0; |
278 | } |
279 | return false; |
280 | } |
281 | |
282 | bool os::dll_locate_lib(char *buffer, size_t buflen, |
283 | const char* pname, const char* fname) { |
284 | bool retval = false; |
285 | |
286 | size_t fullfnamelen = strlen(JNI_LIB_PREFIX) + strlen(fname) + strlen(JNI_LIB_SUFFIX); |
287 | char* fullfname = (char*)NEW_C_HEAP_ARRAY(char, fullfnamelen + 1, mtInternal); |
288 | if (dll_build_name(fullfname, fullfnamelen + 1, fname)) { |
289 | const size_t pnamelen = pname ? strlen(pname) : 0; |
290 | |
291 | if (pnamelen == 0) { |
292 | // If no path given, use current working directory. |
293 | const char* p = get_current_directory(buffer, buflen); |
294 | if (p != NULL) { |
295 | const size_t plen = strlen(buffer); |
296 | const char lastchar = buffer[plen - 1]; |
297 | retval = conc_path_file_and_check(buffer, &buffer[plen], buflen - plen, |
298 | "" , lastchar, fullfname); |
299 | } |
300 | } else if (strchr(pname, *os::path_separator()) != NULL) { |
301 | // A list of paths. Search for the path that contains the library. |
302 | int n; |
303 | char** pelements = split_path(pname, &n); |
304 | if (pelements != NULL) { |
305 | for (int i = 0; i < n; i++) { |
306 | char* path = pelements[i]; |
307 | // Really shouldn't be NULL, but check can't hurt. |
308 | size_t plen = (path == NULL) ? 0 : strlen(path); |
309 | if (plen == 0) { |
310 | continue; // Skip the empty path values. |
311 | } |
312 | const char lastchar = path[plen - 1]; |
313 | retval = conc_path_file_and_check(buffer, buffer, buflen, path, lastchar, fullfname); |
314 | if (retval) break; |
315 | } |
316 | // Release the storage allocated by split_path. |
317 | for (int i = 0; i < n; i++) { |
318 | if (pelements[i] != NULL) { |
319 | FREE_C_HEAP_ARRAY(char, pelements[i]); |
320 | } |
321 | } |
322 | FREE_C_HEAP_ARRAY(char*, pelements); |
323 | } |
324 | } else { |
325 | // A definite path. |
326 | const char lastchar = pname[pnamelen-1]; |
327 | retval = conc_path_file_and_check(buffer, buffer, buflen, pname, lastchar, fullfname); |
328 | } |
329 | } |
330 | |
331 | FREE_C_HEAP_ARRAY(char*, fullfname); |
332 | return retval; |
333 | } |
334 | |
335 | // --------------------- sun.misc.Signal (optional) --------------------- |
336 | |
337 | |
338 | // SIGBREAK is sent by the keyboard to query the VM state |
339 | #ifndef SIGBREAK |
340 | #define SIGBREAK SIGQUIT |
341 | #endif |
342 | |
343 | // sigexitnum_pd is a platform-specific special signal used for terminating the Signal thread. |
344 | |
345 | |
346 | static void signal_thread_entry(JavaThread* thread, TRAPS) { |
347 | os::set_priority(thread, NearMaxPriority); |
348 | while (true) { |
349 | int sig; |
350 | { |
351 | // FIXME : Currently we have not decided what should be the status |
352 | // for this java thread blocked here. Once we decide about |
353 | // that we should fix this. |
354 | sig = os::signal_wait(); |
355 | } |
356 | if (sig == os::sigexitnum_pd()) { |
357 | // Terminate the signal thread |
358 | return; |
359 | } |
360 | |
361 | switch (sig) { |
362 | case SIGBREAK: { |
363 | // Check if the signal is a trigger to start the Attach Listener - in that |
364 | // case don't print stack traces. |
365 | if (!DisableAttachMechanism && AttachListener::is_init_trigger()) { |
366 | continue; |
367 | } |
368 | // Print stack traces |
369 | // Any SIGBREAK operations added here should make sure to flush |
370 | // the output stream (e.g. tty->flush()) after output. See 4803766. |
371 | // Each module also prints an extra carriage return after its output. |
372 | VM_PrintThreads op; |
373 | VMThread::execute(&op); |
374 | VM_PrintJNI jni_op; |
375 | VMThread::execute(&jni_op); |
376 | VM_FindDeadlocks op1(tty); |
377 | VMThread::execute(&op1); |
378 | Universe::print_heap_at_SIGBREAK(); |
379 | if (PrintClassHistogram) { |
380 | VM_GC_HeapInspection op1(tty, true /* force full GC before heap inspection */); |
381 | VMThread::execute(&op1); |
382 | } |
383 | if (JvmtiExport::should_post_data_dump()) { |
384 | JvmtiExport::post_data_dump(); |
385 | } |
386 | break; |
387 | } |
388 | default: { |
389 | // Dispatch the signal to java |
390 | HandleMark hm(THREAD); |
391 | Klass* klass = SystemDictionary::resolve_or_null(vmSymbols::jdk_internal_misc_Signal(), THREAD); |
392 | if (klass != NULL) { |
393 | JavaValue result(T_VOID); |
394 | JavaCallArguments args; |
395 | args.push_int(sig); |
396 | JavaCalls::call_static( |
397 | &result, |
398 | klass, |
399 | vmSymbols::dispatch_name(), |
400 | vmSymbols::int_void_signature(), |
401 | &args, |
402 | THREAD |
403 | ); |
404 | } |
405 | if (HAS_PENDING_EXCEPTION) { |
406 | // tty is initialized early so we don't expect it to be null, but |
407 | // if it is we can't risk doing an initialization that might |
408 | // trigger additional out-of-memory conditions |
409 | if (tty != NULL) { |
410 | char klass_name[256]; |
411 | char tmp_sig_name[16]; |
412 | const char* sig_name = "UNKNOWN" ; |
413 | InstanceKlass::cast(PENDING_EXCEPTION->klass())-> |
414 | name()->as_klass_external_name(klass_name, 256); |
415 | if (os::exception_name(sig, tmp_sig_name, 16) != NULL) |
416 | sig_name = tmp_sig_name; |
417 | warning("Exception %s occurred dispatching signal %s to handler" |
418 | "- the VM may need to be forcibly terminated" , |
419 | klass_name, sig_name ); |
420 | } |
421 | CLEAR_PENDING_EXCEPTION; |
422 | } |
423 | } |
424 | } |
425 | } |
426 | } |
427 | |
428 | void os::init_before_ergo() { |
429 | initialize_initial_active_processor_count(); |
430 | // We need to initialize large page support here because ergonomics takes some |
431 | // decisions depending on large page support and the calculated large page size. |
432 | large_page_init(); |
433 | |
434 | // We need to adapt the configured number of stack protection pages given |
435 | // in 4K pages to the actual os page size. We must do this before setting |
436 | // up minimal stack sizes etc. in os::init_2(). |
437 | JavaThread::set_stack_red_zone_size (align_up(StackRedPages * 4 * K, vm_page_size())); |
438 | JavaThread::set_stack_yellow_zone_size (align_up(StackYellowPages * 4 * K, vm_page_size())); |
439 | JavaThread::set_stack_reserved_zone_size(align_up(StackReservedPages * 4 * K, vm_page_size())); |
440 | JavaThread::set_stack_shadow_zone_size (align_up(StackShadowPages * 4 * K, vm_page_size())); |
441 | |
442 | // VM version initialization identifies some characteristics of the |
443 | // platform that are used during ergonomic decisions. |
444 | VM_Version::init_before_ergo(); |
445 | } |
446 | |
447 | void os::initialize_jdk_signal_support(TRAPS) { |
448 | if (!ReduceSignalUsage) { |
449 | // Setup JavaThread for processing signals |
450 | const char thread_name[] = "Signal Dispatcher" ; |
451 | Handle string = java_lang_String::create_from_str(thread_name, CHECK); |
452 | |
453 | // Initialize thread_oop to put it into the system threadGroup |
454 | Handle thread_group (THREAD, Universe::system_thread_group()); |
455 | Handle thread_oop = JavaCalls::construct_new_instance(SystemDictionary::Thread_klass(), |
456 | vmSymbols::threadgroup_string_void_signature(), |
457 | thread_group, |
458 | string, |
459 | CHECK); |
460 | |
461 | Klass* group = SystemDictionary::ThreadGroup_klass(); |
462 | JavaValue result(T_VOID); |
463 | JavaCalls::call_special(&result, |
464 | thread_group, |
465 | group, |
466 | vmSymbols::add_method_name(), |
467 | vmSymbols::thread_void_signature(), |
468 | thread_oop, |
469 | CHECK); |
470 | |
471 | { MutexLocker mu(Threads_lock); |
472 | JavaThread* signal_thread = new JavaThread(&signal_thread_entry); |
473 | |
474 | // At this point it may be possible that no osthread was created for the |
475 | // JavaThread due to lack of memory. We would have to throw an exception |
476 | // in that case. However, since this must work and we do not allow |
477 | // exceptions anyway, check and abort if this fails. |
478 | if (signal_thread == NULL || signal_thread->osthread() == NULL) { |
479 | vm_exit_during_initialization("java.lang.OutOfMemoryError" , |
480 | os::native_thread_creation_failed_msg()); |
481 | } |
482 | |
483 | java_lang_Thread::set_thread(thread_oop(), signal_thread); |
484 | java_lang_Thread::set_priority(thread_oop(), NearMaxPriority); |
485 | java_lang_Thread::set_daemon(thread_oop()); |
486 | |
487 | signal_thread->set_threadObj(thread_oop()); |
488 | Threads::add(signal_thread); |
489 | Thread::start(signal_thread); |
490 | } |
491 | // Handle ^BREAK |
492 | os::signal(SIGBREAK, os::user_handler()); |
493 | } |
494 | } |
495 | |
496 | |
497 | void os::terminate_signal_thread() { |
498 | if (!ReduceSignalUsage) |
499 | signal_notify(sigexitnum_pd()); |
500 | } |
501 | |
502 | |
503 | // --------------------- loading libraries --------------------- |
504 | |
505 | typedef jint (JNICALL *JNI_OnLoad_t)(JavaVM *, void *); |
506 | extern struct JavaVM_ main_vm; |
507 | |
508 | static void* _native_java_library = NULL; |
509 | |
510 | void* os::native_java_library() { |
511 | if (_native_java_library == NULL) { |
512 | char buffer[JVM_MAXPATHLEN]; |
513 | char ebuf[1024]; |
514 | |
515 | // Try to load verify dll first. In 1.3 java dll depends on it and is not |
516 | // always able to find it when the loading executable is outside the JDK. |
517 | // In order to keep working with 1.2 we ignore any loading errors. |
518 | if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(), |
519 | "verify" )) { |
520 | dll_load(buffer, ebuf, sizeof(ebuf)); |
521 | } |
522 | |
523 | // Load java dll |
524 | if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(), |
525 | "java" )) { |
526 | _native_java_library = dll_load(buffer, ebuf, sizeof(ebuf)); |
527 | } |
528 | if (_native_java_library == NULL) { |
529 | vm_exit_during_initialization("Unable to load native library" , ebuf); |
530 | } |
531 | |
532 | #if defined(__OpenBSD__) |
533 | // Work-around OpenBSD's lack of $ORIGIN support by pre-loading libnet.so |
534 | // ignore errors |
535 | if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(), |
536 | "net" )) { |
537 | dll_load(buffer, ebuf, sizeof(ebuf)); |
538 | } |
539 | #endif |
540 | } |
541 | return _native_java_library; |
542 | } |
543 | |
544 | /* |
545 | * Support for finding Agent_On(Un)Load/Attach<_lib_name> if it exists. |
546 | * If check_lib == true then we are looking for an |
547 | * Agent_OnLoad_lib_name or Agent_OnAttach_lib_name function to determine if |
548 | * this library is statically linked into the image. |
549 | * If check_lib == false then we will look for the appropriate symbol in the |
550 | * executable if agent_lib->is_static_lib() == true or in the shared library |
551 | * referenced by 'handle'. |
552 | */ |
553 | void* os::find_agent_function(AgentLibrary *agent_lib, bool check_lib, |
554 | const char *syms[], size_t syms_len) { |
555 | assert(agent_lib != NULL, "sanity check" ); |
556 | const char *lib_name; |
557 | void *handle = agent_lib->os_lib(); |
558 | void *entryName = NULL; |
559 | char *agent_function_name; |
560 | size_t i; |
561 | |
562 | // If checking then use the agent name otherwise test is_static_lib() to |
563 | // see how to process this lookup |
564 | lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : NULL); |
565 | for (i = 0; i < syms_len; i++) { |
566 | agent_function_name = build_agent_function_name(syms[i], lib_name, agent_lib->is_absolute_path()); |
567 | if (agent_function_name == NULL) { |
568 | break; |
569 | } |
570 | entryName = dll_lookup(handle, agent_function_name); |
571 | FREE_C_HEAP_ARRAY(char, agent_function_name); |
572 | if (entryName != NULL) { |
573 | break; |
574 | } |
575 | } |
576 | return entryName; |
577 | } |
578 | |
579 | // See if the passed in agent is statically linked into the VM image. |
580 | bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[], |
581 | size_t syms_len) { |
582 | void *ret; |
583 | void *proc_handle; |
584 | void *save_handle; |
585 | |
586 | assert(agent_lib != NULL, "sanity check" ); |
587 | if (agent_lib->name() == NULL) { |
588 | return false; |
589 | } |
590 | proc_handle = get_default_process_handle(); |
591 | // Check for Agent_OnLoad/Attach_lib_name function |
592 | save_handle = agent_lib->os_lib(); |
593 | // We want to look in this process' symbol table. |
594 | agent_lib->set_os_lib(proc_handle); |
595 | ret = find_agent_function(agent_lib, true, syms, syms_len); |
596 | if (ret != NULL) { |
597 | // Found an entry point like Agent_OnLoad_lib_name so we have a static agent |
598 | agent_lib->set_valid(); |
599 | agent_lib->set_static_lib(true); |
600 | return true; |
601 | } |
602 | agent_lib->set_os_lib(save_handle); |
603 | return false; |
604 | } |
605 | |
606 | // --------------------- heap allocation utilities --------------------- |
607 | |
608 | char *os::strdup(const char *str, MEMFLAGS flags) { |
609 | size_t size = strlen(str); |
610 | char *dup_str = (char *)malloc(size + 1, flags); |
611 | if (dup_str == NULL) return NULL; |
612 | strcpy(dup_str, str); |
613 | return dup_str; |
614 | } |
615 | |
616 | char* os::strdup_check_oom(const char* str, MEMFLAGS flags) { |
617 | char* p = os::strdup(str, flags); |
618 | if (p == NULL) { |
619 | vm_exit_out_of_memory(strlen(str) + 1, OOM_MALLOC_ERROR, "os::strdup_check_oom" ); |
620 | } |
621 | return p; |
622 | } |
623 | |
624 | |
625 | #define paranoid 0 /* only set to 1 if you suspect checking code has bug */ |
626 | |
627 | #ifdef ASSERT |
628 | |
629 | static void verify_memory(void* ptr) { |
630 | GuardedMemory guarded(ptr); |
631 | if (!guarded.verify_guards()) { |
632 | LogTarget(Warning, malloc, free) lt; |
633 | ResourceMark rm; |
634 | LogStream ls(lt); |
635 | ls.print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees); |
636 | ls.print_cr("## memory stomp:" ); |
637 | guarded.print_on(&ls); |
638 | fatal("memory stomping error" ); |
639 | } |
640 | } |
641 | |
642 | #endif |
643 | |
644 | // |
645 | // This function supports testing of the malloc out of memory |
646 | // condition without really running the system out of memory. |
647 | // |
648 | static bool has_reached_max_malloc_test_peak(size_t alloc_size) { |
649 | if (MallocMaxTestWords > 0) { |
650 | size_t words = (alloc_size / BytesPerWord); |
651 | |
652 | if ((cur_malloc_words + words) > MallocMaxTestWords) { |
653 | return true; |
654 | } |
655 | Atomic::add(words, &cur_malloc_words); |
656 | } |
657 | return false; |
658 | } |
659 | |
660 | void* os::malloc(size_t size, MEMFLAGS flags) { |
661 | return os::malloc(size, flags, CALLER_PC); |
662 | } |
663 | |
664 | void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { |
665 | NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1)); |
666 | NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size)); |
667 | |
668 | // Since os::malloc can be called when the libjvm.{dll,so} is |
669 | // first loaded and we don't have a thread yet we must accept NULL also here. |
670 | assert(!os::ThreadCrashProtection::is_crash_protected(Thread::current_or_null()), |
671 | "malloc() not allowed when crash protection is set" ); |
672 | |
673 | if (size == 0) { |
674 | // return a valid pointer if size is zero |
675 | // if NULL is returned the calling functions assume out of memory. |
676 | size = 1; |
677 | } |
678 | |
679 | // NMT support |
680 | NMT_TrackingLevel level = MemTracker::tracking_level(); |
681 | size_t = MemTracker::malloc_header_size(level); |
682 | |
683 | #ifndef ASSERT |
684 | const size_t alloc_size = size + nmt_header_size; |
685 | #else |
686 | const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_header_size); |
687 | if (size + nmt_header_size > alloc_size) { // Check for rollover. |
688 | return NULL; |
689 | } |
690 | #endif |
691 | |
692 | // For the test flag -XX:MallocMaxTestWords |
693 | if (has_reached_max_malloc_test_peak(size)) { |
694 | return NULL; |
695 | } |
696 | |
697 | u_char* ptr; |
698 | ptr = (u_char*)::malloc(alloc_size); |
699 | |
700 | #ifdef ASSERT |
701 | if (ptr == NULL) { |
702 | return NULL; |
703 | } |
704 | // Wrap memory with guard |
705 | GuardedMemory guarded(ptr, size + nmt_header_size); |
706 | ptr = guarded.get_user_ptr(); |
707 | |
708 | if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) { |
709 | log_warning(malloc, free)("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, p2i(ptr)); |
710 | breakpoint(); |
711 | } |
712 | if (paranoid) { |
713 | verify_memory(ptr); |
714 | } |
715 | #endif |
716 | |
717 | // we do not track guard memory |
718 | return MemTracker::record_malloc((address)ptr, size, memflags, stack, level); |
719 | } |
720 | |
721 | void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) { |
722 | return os::realloc(memblock, size, flags, CALLER_PC); |
723 | } |
724 | |
725 | void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { |
726 | |
727 | // For the test flag -XX:MallocMaxTestWords |
728 | if (has_reached_max_malloc_test_peak(size)) { |
729 | return NULL; |
730 | } |
731 | |
732 | if (size == 0) { |
733 | // return a valid pointer if size is zero |
734 | // if NULL is returned the calling functions assume out of memory. |
735 | size = 1; |
736 | } |
737 | |
738 | #ifndef ASSERT |
739 | NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1)); |
740 | NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size)); |
741 | // NMT support |
742 | void* membase = MemTracker::record_free(memblock); |
743 | NMT_TrackingLevel level = MemTracker::tracking_level(); |
744 | size_t = MemTracker::malloc_header_size(level); |
745 | void* ptr = ::realloc(membase, size + nmt_header_size); |
746 | return MemTracker::record_malloc(ptr, size, memflags, stack, level); |
747 | #else |
748 | if (memblock == NULL) { |
749 | return os::malloc(size, memflags, stack); |
750 | } |
751 | if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) { |
752 | log_warning(malloc, free)("os::realloc caught " PTR_FORMAT, p2i(memblock)); |
753 | breakpoint(); |
754 | } |
755 | // NMT support |
756 | void* membase = MemTracker::malloc_base(memblock); |
757 | verify_memory(membase); |
758 | // always move the block |
759 | void* ptr = os::malloc(size, memflags, stack); |
760 | // Copy to new memory if malloc didn't fail |
761 | if (ptr != NULL ) { |
762 | GuardedMemory guarded(MemTracker::malloc_base(memblock)); |
763 | // Guard's user data contains NMT header |
764 | size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock); |
765 | memcpy(ptr, memblock, MIN2(size, memblock_size)); |
766 | if (paranoid) { |
767 | verify_memory(MemTracker::malloc_base(ptr)); |
768 | } |
769 | os::free(memblock); |
770 | } |
771 | return ptr; |
772 | #endif |
773 | } |
774 | |
775 | |
776 | void os::free(void *memblock) { |
777 | NOT_PRODUCT(inc_stat_counter(&num_frees, 1)); |
778 | #ifdef ASSERT |
779 | if (memblock == NULL) return; |
780 | if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) { |
781 | log_warning(malloc, free)("os::free caught " PTR_FORMAT, p2i(memblock)); |
782 | breakpoint(); |
783 | } |
784 | void* membase = MemTracker::record_free(memblock); |
785 | verify_memory(membase); |
786 | |
787 | GuardedMemory guarded(membase); |
788 | size_t size = guarded.get_user_size(); |
789 | inc_stat_counter(&free_bytes, size); |
790 | membase = guarded.release_for_freeing(); |
791 | ::free(membase); |
792 | #else |
793 | void* membase = MemTracker::record_free(memblock); |
794 | ::free(membase); |
795 | #endif |
796 | } |
797 | |
798 | void os::init_random(unsigned int initval) { |
799 | _rand_seed = initval; |
800 | } |
801 | |
802 | |
803 | static int random_helper(unsigned int rand_seed) { |
804 | /* standard, well-known linear congruential random generator with |
805 | * next_rand = (16807*seed) mod (2**31-1) |
806 | * see |
807 | * (1) "Random Number Generators: Good Ones Are Hard to Find", |
808 | * S.K. Park and K.W. Miller, Communications of the ACM 31:10 (Oct 1988), |
809 | * (2) "Two Fast Implementations of the 'Minimal Standard' Random |
810 | * Number Generator", David G. Carta, Comm. ACM 33, 1 (Jan 1990), pp. 87-88. |
811 | */ |
812 | const unsigned int a = 16807; |
813 | const unsigned int m = 2147483647; |
814 | const int q = m / a; assert(q == 127773, "weird math" ); |
815 | const int r = m % a; assert(r == 2836, "weird math" ); |
816 | |
817 | // compute az=2^31p+q |
818 | unsigned int lo = a * (rand_seed & 0xFFFF); |
819 | unsigned int hi = a * (rand_seed >> 16); |
820 | lo += (hi & 0x7FFF) << 16; |
821 | |
822 | // if q overflowed, ignore the overflow and increment q |
823 | if (lo > m) { |
824 | lo &= m; |
825 | ++lo; |
826 | } |
827 | lo += hi >> 15; |
828 | |
829 | // if (p+q) overflowed, ignore the overflow and increment (p+q) |
830 | if (lo > m) { |
831 | lo &= m; |
832 | ++lo; |
833 | } |
834 | return lo; |
835 | } |
836 | |
837 | int os::random() { |
838 | // Make updating the random seed thread safe. |
839 | while (true) { |
840 | unsigned int seed = _rand_seed; |
841 | unsigned int rand = random_helper(seed); |
842 | if (Atomic::cmpxchg(rand, &_rand_seed, seed) == seed) { |
843 | return static_cast<int>(rand); |
844 | } |
845 | } |
846 | } |
847 | |
848 | // The INITIALIZED state is distinguished from the SUSPENDED state because the |
849 | // conditions in which a thread is first started are different from those in which |
850 | // a suspension is resumed. These differences make it hard for us to apply the |
851 | // tougher checks when starting threads that we want to do when resuming them. |
852 | // However, when start_thread is called as a result of Thread.start, on a Java |
853 | // thread, the operation is synchronized on the Java Thread object. So there |
854 | // cannot be a race to start the thread and hence for the thread to exit while |
855 | // we are working on it. Non-Java threads that start Java threads either have |
856 | // to do so in a context in which races are impossible, or should do appropriate |
857 | // locking. |
858 | |
859 | void os::start_thread(Thread* thread) { |
860 | // guard suspend/resume |
861 | MutexLocker ml(thread->SR_lock(), Mutex::_no_safepoint_check_flag); |
862 | OSThread* osthread = thread->osthread(); |
863 | osthread->set_state(RUNNABLE); |
864 | pd_start_thread(thread); |
865 | } |
866 | |
867 | void os::abort(bool dump_core) { |
868 | abort(dump_core && CreateCoredumpOnCrash, NULL, NULL); |
869 | } |
870 | |
871 | //--------------------------------------------------------------------------- |
872 | // Helper functions for fatal error handler |
873 | |
874 | void os::print_hex_dump(outputStream* st, address start, address end, int unitsize) { |
875 | assert(unitsize == 1 || unitsize == 2 || unitsize == 4 || unitsize == 8, "just checking" ); |
876 | |
877 | start = align_down(start, unitsize); |
878 | |
879 | int cols = 0; |
880 | int cols_per_line = 0; |
881 | switch (unitsize) { |
882 | case 1: cols_per_line = 16; break; |
883 | case 2: cols_per_line = 8; break; |
884 | case 4: cols_per_line = 4; break; |
885 | case 8: cols_per_line = 2; break; |
886 | default: return; |
887 | } |
888 | |
889 | address p = start; |
890 | st->print(PTR_FORMAT ": " , p2i(start)); |
891 | while (p < end) { |
892 | if (is_readable_pointer(p)) { |
893 | switch (unitsize) { |
894 | case 1: st->print("%02x" , *(u1*)p); break; |
895 | case 2: st->print("%04x" , *(u2*)p); break; |
896 | case 4: st->print("%08x" , *(u4*)p); break; |
897 | case 8: st->print("%016" FORMAT64_MODIFIER "x" , *(u8*)p); break; |
898 | } |
899 | } else { |
900 | st->print("%*.*s" , 2*unitsize, 2*unitsize, "????????????????" ); |
901 | } |
902 | p += unitsize; |
903 | cols++; |
904 | if (cols >= cols_per_line && p < end) { |
905 | cols = 0; |
906 | st->cr(); |
907 | st->print(PTR_FORMAT ": " , p2i(p)); |
908 | } else { |
909 | st->print(" " ); |
910 | } |
911 | } |
912 | st->cr(); |
913 | } |
914 | |
915 | void os::print_instructions(outputStream* st, address pc, int unitsize) { |
916 | st->print_cr("Instructions: (pc=" PTR_FORMAT ")" , p2i(pc)); |
917 | print_hex_dump(st, pc - 256, pc + 256, unitsize); |
918 | } |
919 | |
920 | void os::print_environment_variables(outputStream* st, const char** env_list) { |
921 | if (env_list) { |
922 | st->print_cr("Environment Variables:" ); |
923 | |
924 | for (int i = 0; env_list[i] != NULL; i++) { |
925 | char *envvar = ::getenv(env_list[i]); |
926 | if (envvar != NULL) { |
927 | st->print("%s" , env_list[i]); |
928 | st->print("=" ); |
929 | st->print_cr("%s" , envvar); |
930 | } |
931 | } |
932 | } |
933 | } |
934 | |
935 | void os::print_cpu_info(outputStream* st, char* buf, size_t buflen) { |
936 | // cpu |
937 | st->print("CPU:" ); |
938 | st->print("total %d" , os::processor_count()); |
939 | // It's not safe to query number of active processors after crash |
940 | // st->print("(active %d)", os::active_processor_count()); but we can |
941 | // print the initial number of active processors. |
942 | // We access the raw value here because the assert in the accessor will |
943 | // fail if the crash occurs before initialization of this value. |
944 | st->print(" (initial active %d)" , _initial_active_processor_count); |
945 | st->print(" %s" , VM_Version::features_string()); |
946 | st->cr(); |
947 | pd_print_cpu_info(st, buf, buflen); |
948 | } |
949 | |
950 | // Print a one line string summarizing the cpu, number of cores, memory, and operating system version |
951 | void os::print_summary_info(outputStream* st, char* buf, size_t buflen) { |
952 | st->print("Host: " ); |
953 | #ifndef PRODUCT |
954 | if (get_host_name(buf, buflen)) { |
955 | st->print("%s, " , buf); |
956 | } |
957 | #endif // PRODUCT |
958 | get_summary_cpu_info(buf, buflen); |
959 | st->print("%s, " , buf); |
960 | size_t mem = physical_memory()/G; |
961 | if (mem == 0) { // for low memory systems |
962 | mem = physical_memory()/M; |
963 | st->print("%d cores, " SIZE_FORMAT "M, " , processor_count(), mem); |
964 | } else { |
965 | st->print("%d cores, " SIZE_FORMAT "G, " , processor_count(), mem); |
966 | } |
967 | get_summary_os_info(buf, buflen); |
968 | st->print_raw(buf); |
969 | st->cr(); |
970 | } |
971 | |
972 | void os::print_date_and_time(outputStream *st, char* buf, size_t buflen) { |
973 | const int secs_per_day = 86400; |
974 | const int secs_per_hour = 3600; |
975 | const int secs_per_min = 60; |
976 | |
977 | time_t tloc; |
978 | (void)time(&tloc); |
979 | char* timestring = ctime(&tloc); // ctime adds newline. |
980 | // edit out the newline |
981 | char* nl = strchr(timestring, '\n'); |
982 | if (nl != NULL) { |
983 | *nl = '\0'; |
984 | } |
985 | |
986 | struct tm tz; |
987 | if (localtime_pd(&tloc, &tz) != NULL) { |
988 | ::strftime(buf, buflen, "%Z" , &tz); |
989 | st->print("Time: %s %s" , timestring, buf); |
990 | } else { |
991 | st->print("Time: %s" , timestring); |
992 | } |
993 | |
994 | double t = os::elapsedTime(); |
995 | // NOTE: It tends to crash after a SEGV if we want to printf("%f",...) in |
996 | // Linux. Must be a bug in glibc ? Workaround is to round "t" to int |
997 | // before printf. We lost some precision, but who cares? |
998 | int eltime = (int)t; // elapsed time in seconds |
999 | |
1000 | // print elapsed time in a human-readable format: |
1001 | int eldays = eltime / secs_per_day; |
1002 | int day_secs = eldays * secs_per_day; |
1003 | int elhours = (eltime - day_secs) / secs_per_hour; |
1004 | int hour_secs = elhours * secs_per_hour; |
1005 | int elmins = (eltime - day_secs - hour_secs) / secs_per_min; |
1006 | int minute_secs = elmins * secs_per_min; |
1007 | int elsecs = (eltime - day_secs - hour_secs - minute_secs); |
1008 | st->print_cr(" elapsed time: %d seconds (%dd %dh %dm %ds)" , eltime, eldays, elhours, elmins, elsecs); |
1009 | } |
1010 | |
1011 | |
1012 | // Check if pointer can be read from (4-byte read access). |
1013 | // Helps to prove validity of a not-NULL pointer. |
1014 | // Returns true in very early stages of VM life when stub is not yet generated. |
1015 | #define SAFEFETCH_DEFAULT true |
1016 | bool os::is_readable_pointer(const void* p) { |
1017 | if (!CanUseSafeFetch32()) { |
1018 | return SAFEFETCH_DEFAULT; |
1019 | } |
1020 | int* const aligned = (int*) align_down((intptr_t)p, 4); |
1021 | int cafebabe = 0xcafebabe; // tester value 1 |
1022 | int deadbeef = 0xdeadbeef; // tester value 2 |
1023 | return (SafeFetch32(aligned, cafebabe) != cafebabe) || (SafeFetch32(aligned, deadbeef) != deadbeef); |
1024 | } |
1025 | |
1026 | bool os::is_readable_range(const void* from, const void* to) { |
1027 | if ((uintptr_t)from >= (uintptr_t)to) return false; |
1028 | for (uintptr_t p = align_down((uintptr_t)from, min_page_size()); p < (uintptr_t)to; p += min_page_size()) { |
1029 | if (!is_readable_pointer((const void*)p)) { |
1030 | return false; |
1031 | } |
1032 | } |
1033 | return true; |
1034 | } |
1035 | |
1036 | |
1037 | // moved from debug.cpp (used to be find()) but still called from there |
1038 | // The verbose parameter is only set by the debug code in one case |
1039 | void os::print_location(outputStream* st, intptr_t x, bool verbose) { |
1040 | address addr = (address)x; |
1041 | // Handle NULL first, so later checks don't need to protect against it. |
1042 | if (addr == NULL) { |
1043 | st->print_cr("0x0 is NULL" ); |
1044 | return; |
1045 | } |
1046 | |
1047 | // Check if addr points into a code blob. |
1048 | CodeBlob* b = CodeCache::find_blob_unsafe(addr); |
1049 | if (b != NULL) { |
1050 | b->dump_for_addr(addr, st, verbose); |
1051 | return; |
1052 | } |
1053 | |
1054 | // Check if addr points into Java heap. |
1055 | if (Universe::heap()->is_in(addr)) { |
1056 | oop o = oopDesc::oop_or_null(addr); |
1057 | if (o != NULL) { |
1058 | if ((HeapWord*)o == (HeapWord*)addr) { |
1059 | st->print(INTPTR_FORMAT " is an oop: " , p2i(addr)); |
1060 | } else { |
1061 | st->print(INTPTR_FORMAT " is pointing into object: " , p2i(addr)); |
1062 | } |
1063 | o->print_on(st); |
1064 | return; |
1065 | } |
1066 | } else if (Universe::heap()->is_in_reserved(addr)) { |
1067 | st->print_cr(INTPTR_FORMAT " is an unallocated location in the heap" , p2i(addr)); |
1068 | return; |
1069 | } |
1070 | |
1071 | // Compressed oop needs to be decoded first. |
1072 | #ifdef _LP64 |
1073 | if (UseCompressedOops && ((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) { |
1074 | narrowOop narrow_oop = (narrowOop)(uintptr_t)addr; |
1075 | oop o = CompressedOops::decode_raw(narrow_oop); |
1076 | |
1077 | if (oopDesc::is_valid(o)) { |
1078 | st->print(UINT32_FORMAT " is a compressed pointer to object: " , narrow_oop); |
1079 | o->print_on(st); |
1080 | return; |
1081 | } |
1082 | } |
1083 | #endif |
1084 | |
1085 | bool accessible = is_readable_pointer(addr); |
1086 | |
1087 | // Check if addr is a JNI handle. |
1088 | if (align_down((intptr_t)addr, sizeof(intptr_t)) != 0 && accessible) { |
1089 | if (JNIHandles::is_global_handle((jobject) addr)) { |
1090 | st->print_cr(INTPTR_FORMAT " is a global jni handle" , p2i(addr)); |
1091 | return; |
1092 | } |
1093 | if (JNIHandles::is_weak_global_handle((jobject) addr)) { |
1094 | st->print_cr(INTPTR_FORMAT " is a weak global jni handle" , p2i(addr)); |
1095 | return; |
1096 | } |
1097 | #ifndef PRODUCT |
1098 | // we don't keep the block list in product mode |
1099 | if (JNIHandles::is_local_handle((jobject) addr)) { |
1100 | st->print_cr(INTPTR_FORMAT " is a local jni handle" , p2i(addr)); |
1101 | return; |
1102 | } |
1103 | #endif |
1104 | } |
1105 | |
1106 | // Check if addr belongs to a Java thread. |
1107 | for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) { |
1108 | // If the addr is a java thread print information about that. |
1109 | if (addr == (address)thread) { |
1110 | if (verbose) { |
1111 | thread->print_on(st); |
1112 | } else { |
1113 | st->print_cr(INTPTR_FORMAT " is a thread" , p2i(addr)); |
1114 | } |
1115 | return; |
1116 | } |
1117 | // If the addr is in the stack region for this thread then report that |
1118 | // and print thread info |
1119 | if (thread->on_local_stack(addr)) { |
1120 | st->print_cr(INTPTR_FORMAT " is pointing into the stack for thread: " |
1121 | INTPTR_FORMAT, p2i(addr), p2i(thread)); |
1122 | if (verbose) thread->print_on(st); |
1123 | return; |
1124 | } |
1125 | } |
1126 | |
1127 | // Check if in metaspace and print types that have vptrs |
1128 | if (Metaspace::contains(addr)) { |
1129 | if (Klass::is_valid((Klass*)addr)) { |
1130 | st->print_cr(INTPTR_FORMAT " is a pointer to class: " , p2i(addr)); |
1131 | ((Klass*)addr)->print_on(st); |
1132 | } else if (Method::is_valid_method((const Method*)addr)) { |
1133 | ((Method*)addr)->print_value_on(st); |
1134 | st->cr(); |
1135 | } else { |
1136 | // Use addr->print() from the debugger instead (not here) |
1137 | st->print_cr(INTPTR_FORMAT " is pointing into metadata" , p2i(addr)); |
1138 | } |
1139 | return; |
1140 | } |
1141 | |
1142 | // Compressed klass needs to be decoded first. |
1143 | #ifdef _LP64 |
1144 | if (UseCompressedClassPointers && ((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) { |
1145 | narrowKlass narrow_klass = (narrowKlass)(uintptr_t)addr; |
1146 | Klass* k = CompressedKlassPointers::decode_raw(narrow_klass); |
1147 | |
1148 | if (Klass::is_valid(k)) { |
1149 | st->print_cr(UINT32_FORMAT " is a compressed pointer to class: " INTPTR_FORMAT, narrow_klass, p2i((HeapWord*)k)); |
1150 | k->print_on(st); |
1151 | return; |
1152 | } |
1153 | } |
1154 | #endif |
1155 | |
1156 | // Try an OS specific find |
1157 | if (os::find(addr, st)) { |
1158 | return; |
1159 | } |
1160 | |
1161 | if (accessible) { |
1162 | st->print(INTPTR_FORMAT " points into unknown readable memory:" , p2i(addr)); |
1163 | for (address p = addr; p < align_up(addr + 1, sizeof(intptr_t)); ++p) { |
1164 | st->print(" %02x" , *(u1*)p); |
1165 | } |
1166 | st->cr(); |
1167 | return; |
1168 | } |
1169 | |
1170 | st->print_cr(INTPTR_FORMAT " is an unknown value" , p2i(addr)); |
1171 | } |
1172 | |
1173 | // Looks like all platforms can use the same function to check if C |
1174 | // stack is walkable beyond current frame. The check for fp() is not |
1175 | // necessary on Sparc, but it's harmless. |
1176 | bool os::is_first_C_frame(frame* fr) { |
1177 | // Load up sp, fp, sender sp and sender fp, check for reasonable values. |
1178 | // Check usp first, because if that's bad the other accessors may fault |
1179 | // on some architectures. Ditto ufp second, etc. |
1180 | uintptr_t fp_align_mask = (uintptr_t)(sizeof(address)-1); |
1181 | // sp on amd can be 32 bit aligned. |
1182 | uintptr_t sp_align_mask = (uintptr_t)(sizeof(int)-1); |
1183 | |
1184 | uintptr_t usp = (uintptr_t)fr->sp(); |
1185 | if ((usp & sp_align_mask) != 0) return true; |
1186 | |
1187 | uintptr_t ufp = (uintptr_t)fr->fp(); |
1188 | if ((ufp & fp_align_mask) != 0) return true; |
1189 | |
1190 | uintptr_t old_sp = (uintptr_t)fr->sender_sp(); |
1191 | if ((old_sp & sp_align_mask) != 0) return true; |
1192 | if (old_sp == 0 || old_sp == (uintptr_t)-1) return true; |
1193 | |
1194 | uintptr_t old_fp = (uintptr_t)fr->link(); |
1195 | if ((old_fp & fp_align_mask) != 0) return true; |
1196 | if (old_fp == 0 || old_fp == (uintptr_t)-1 || old_fp == ufp) return true; |
1197 | |
1198 | // stack grows downwards; if old_fp is below current fp or if the stack |
1199 | // frame is too large, either the stack is corrupted or fp is not saved |
1200 | // on stack (i.e. on x86, ebp may be used as general register). The stack |
1201 | // is not walkable beyond current frame. |
1202 | if (old_fp < ufp) return true; |
1203 | if (old_fp - ufp > 64 * K) return true; |
1204 | |
1205 | return false; |
1206 | } |
1207 | |
1208 | |
1209 | // Set up the boot classpath. |
1210 | |
1211 | char* os::format_boot_path(const char* format_string, |
1212 | const char* home, |
1213 | int home_len, |
1214 | char fileSep, |
1215 | char pathSep) { |
1216 | assert((fileSep == '/' && pathSep == ':') || |
1217 | (fileSep == '\\' && pathSep == ';'), "unexpected separator chars" ); |
1218 | |
1219 | // Scan the format string to determine the length of the actual |
1220 | // boot classpath, and handle platform dependencies as well. |
1221 | int formatted_path_len = 0; |
1222 | const char* p; |
1223 | for (p = format_string; *p != 0; ++p) { |
1224 | if (*p == '%') formatted_path_len += home_len - 1; |
1225 | ++formatted_path_len; |
1226 | } |
1227 | |
1228 | char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1, mtInternal); |
1229 | if (formatted_path == NULL) { |
1230 | return NULL; |
1231 | } |
1232 | |
1233 | // Create boot classpath from format, substituting separator chars and |
1234 | // java home directory. |
1235 | char* q = formatted_path; |
1236 | for (p = format_string; *p != 0; ++p) { |
1237 | switch (*p) { |
1238 | case '%': |
1239 | strcpy(q, home); |
1240 | q += home_len; |
1241 | break; |
1242 | case '/': |
1243 | *q++ = fileSep; |
1244 | break; |
1245 | case ':': |
1246 | *q++ = pathSep; |
1247 | break; |
1248 | default: |
1249 | *q++ = *p; |
1250 | } |
1251 | } |
1252 | *q = '\0'; |
1253 | |
1254 | assert((q - formatted_path) == formatted_path_len, "formatted_path size botched" ); |
1255 | return formatted_path; |
1256 | } |
1257 | |
1258 | // This function is a proxy to fopen, it tries to add a non standard flag ('e' or 'N') |
1259 | // that ensures automatic closing of the file on exec. If it can not find support in |
1260 | // the underlying c library, it will make an extra system call (fcntl) to ensure automatic |
1261 | // closing of the file on exec. |
1262 | FILE* os::fopen(const char* path, const char* mode) { |
1263 | char modified_mode[20]; |
1264 | assert(strlen(mode) + 1 < sizeof(modified_mode), "mode chars plus one extra must fit in buffer" ); |
1265 | sprintf(modified_mode, "%s" LINUX_ONLY("e" ) BSD_ONLY("e" ) WINDOWS_ONLY("N" ), mode); |
1266 | FILE* file = ::fopen(path, modified_mode); |
1267 | |
1268 | #if !(defined LINUX || defined BSD || defined _WINDOWS) |
1269 | // assume fcntl FD_CLOEXEC support as a backup solution when 'e' or 'N' |
1270 | // is not supported as mode in fopen |
1271 | if (file != NULL) { |
1272 | int fd = fileno(file); |
1273 | if (fd != -1) { |
1274 | int fd_flags = fcntl(fd, F_GETFD); |
1275 | if (fd_flags != -1) { |
1276 | fcntl(fd, F_SETFD, fd_flags | FD_CLOEXEC); |
1277 | } |
1278 | } |
1279 | } |
1280 | #endif |
1281 | |
1282 | return file; |
1283 | } |
1284 | |
1285 | bool os::set_boot_path(char fileSep, char pathSep) { |
1286 | const char* home = Arguments::get_java_home(); |
1287 | int home_len = (int)strlen(home); |
1288 | |
1289 | struct stat st; |
1290 | |
1291 | // modular image if "modules" jimage exists |
1292 | char* jimage = format_boot_path("%/lib/" MODULES_IMAGE_NAME, home, home_len, fileSep, pathSep); |
1293 | if (jimage == NULL) return false; |
1294 | bool has_jimage = (os::stat(jimage, &st) == 0); |
1295 | if (has_jimage) { |
1296 | Arguments::set_sysclasspath(jimage, true); |
1297 | FREE_C_HEAP_ARRAY(char, jimage); |
1298 | return true; |
1299 | } |
1300 | FREE_C_HEAP_ARRAY(char, jimage); |
1301 | |
1302 | // check if developer build with exploded modules |
1303 | char* base_classes = format_boot_path("%/modules/" JAVA_BASE_NAME, home, home_len, fileSep, pathSep); |
1304 | if (base_classes == NULL) return false; |
1305 | if (os::stat(base_classes, &st) == 0) { |
1306 | Arguments::set_sysclasspath(base_classes, false); |
1307 | FREE_C_HEAP_ARRAY(char, base_classes); |
1308 | return true; |
1309 | } |
1310 | FREE_C_HEAP_ARRAY(char, base_classes); |
1311 | |
1312 | return false; |
1313 | } |
1314 | |
1315 | /* |
1316 | * Splits a path, based on its separator, the number of |
1317 | * elements is returned back in n. |
1318 | * It is the callers responsibility to: |
1319 | * a> check the value of n, and n may be 0. |
1320 | * b> ignore any empty path elements |
1321 | * c> free up the data. |
1322 | */ |
1323 | char** os::split_path(const char* path, int* n) { |
1324 | *n = 0; |
1325 | if (path == NULL || strlen(path) == 0) { |
1326 | return NULL; |
1327 | } |
1328 | const char psepchar = *os::path_separator(); |
1329 | char* inpath = (char*)NEW_C_HEAP_ARRAY(char, strlen(path) + 1, mtInternal); |
1330 | if (inpath == NULL) { |
1331 | return NULL; |
1332 | } |
1333 | strcpy(inpath, path); |
1334 | int count = 1; |
1335 | char* p = strchr(inpath, psepchar); |
1336 | // Get a count of elements to allocate memory |
1337 | while (p != NULL) { |
1338 | count++; |
1339 | p++; |
1340 | p = strchr(p, psepchar); |
1341 | } |
1342 | char** opath = (char**) NEW_C_HEAP_ARRAY(char*, count, mtInternal); |
1343 | if (opath == NULL) { |
1344 | return NULL; |
1345 | } |
1346 | |
1347 | // do the actual splitting |
1348 | p = inpath; |
1349 | for (int i = 0 ; i < count ; i++) { |
1350 | size_t len = strcspn(p, os::path_separator()); |
1351 | if (len > JVM_MAXPATHLEN) { |
1352 | return NULL; |
1353 | } |
1354 | // allocate the string and add terminator storage |
1355 | char* s = (char*)NEW_C_HEAP_ARRAY(char, len + 1, mtInternal); |
1356 | if (s == NULL) { |
1357 | return NULL; |
1358 | } |
1359 | strncpy(s, p, len); |
1360 | s[len] = '\0'; |
1361 | opath[i] = s; |
1362 | p += len + 1; |
1363 | } |
1364 | FREE_C_HEAP_ARRAY(char, inpath); |
1365 | *n = count; |
1366 | return opath; |
1367 | } |
1368 | |
1369 | // Returns true if the current stack pointer is above the stack shadow |
1370 | // pages, false otherwise. |
1371 | bool os::stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp) { |
1372 | if (!thread->is_Java_thread()) return false; |
1373 | // Check if we have StackShadowPages above the yellow zone. This parameter |
1374 | // is dependent on the depth of the maximum VM call stack possible from |
1375 | // the handler for stack overflow. 'instanceof' in the stack overflow |
1376 | // handler or a println uses at least 8k stack of VM and native code |
1377 | // respectively. |
1378 | const int framesize_in_bytes = |
1379 | Interpreter::size_top_interpreter_activation(method()) * wordSize; |
1380 | |
1381 | address limit = ((JavaThread*)thread)->stack_end() + |
1382 | (JavaThread::stack_guard_zone_size() + JavaThread::stack_shadow_zone_size()); |
1383 | |
1384 | return sp > (limit + framesize_in_bytes); |
1385 | } |
1386 | |
1387 | size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) { |
1388 | assert(min_pages > 0, "sanity" ); |
1389 | if (UseLargePages) { |
1390 | const size_t max_page_size = region_size / min_pages; |
1391 | |
1392 | for (size_t i = 0; _page_sizes[i] != 0; ++i) { |
1393 | const size_t page_size = _page_sizes[i]; |
1394 | if (page_size <= max_page_size) { |
1395 | if (!must_be_aligned || is_aligned(region_size, page_size)) { |
1396 | return page_size; |
1397 | } |
1398 | } |
1399 | } |
1400 | } |
1401 | |
1402 | return vm_page_size(); |
1403 | } |
1404 | |
1405 | size_t os::page_size_for_region_aligned(size_t region_size, size_t min_pages) { |
1406 | return page_size_for_region(region_size, min_pages, true); |
1407 | } |
1408 | |
1409 | size_t os::page_size_for_region_unaligned(size_t region_size, size_t min_pages) { |
1410 | return page_size_for_region(region_size, min_pages, false); |
1411 | } |
1412 | |
1413 | static const char* errno_to_string (int e, bool short_text) { |
1414 | #define ALL_SHARED_ENUMS(X) \ |
1415 | X(E2BIG, "Argument list too long") \ |
1416 | X(EACCES, "Permission denied") \ |
1417 | X(EADDRINUSE, "Address in use") \ |
1418 | X(EADDRNOTAVAIL, "Address not available") \ |
1419 | X(EAFNOSUPPORT, "Address family not supported") \ |
1420 | X(EAGAIN, "Resource unavailable, try again") \ |
1421 | X(EALREADY, "Connection already in progress") \ |
1422 | X(EBADF, "Bad file descriptor") \ |
1423 | X(EBADMSG, "Bad message") \ |
1424 | X(EBUSY, "Device or resource busy") \ |
1425 | X(ECANCELED, "Operation canceled") \ |
1426 | X(ECHILD, "No child processes") \ |
1427 | X(ECONNABORTED, "Connection aborted") \ |
1428 | X(ECONNREFUSED, "Connection refused") \ |
1429 | X(ECONNRESET, "Connection reset") \ |
1430 | X(EDEADLK, "Resource deadlock would occur") \ |
1431 | X(EDESTADDRREQ, "Destination address required") \ |
1432 | X(EDOM, "Mathematics argument out of domain of function") \ |
1433 | X(EEXIST, "File exists") \ |
1434 | X(EFAULT, "Bad address") \ |
1435 | X(EFBIG, "File too large") \ |
1436 | X(EHOSTUNREACH, "Host is unreachable") \ |
1437 | X(EIDRM, "Identifier removed") \ |
1438 | X(EILSEQ, "Illegal byte sequence") \ |
1439 | X(EINPROGRESS, "Operation in progress") \ |
1440 | X(EINTR, "Interrupted function") \ |
1441 | X(EINVAL, "Invalid argument") \ |
1442 | X(EIO, "I/O error") \ |
1443 | X(EISCONN, "Socket is connected") \ |
1444 | X(EISDIR, "Is a directory") \ |
1445 | X(ELOOP, "Too many levels of symbolic links") \ |
1446 | X(EMFILE, "Too many open files") \ |
1447 | X(EMLINK, "Too many links") \ |
1448 | X(EMSGSIZE, "Message too large") \ |
1449 | X(ENAMETOOLONG, "Filename too long") \ |
1450 | X(ENETDOWN, "Network is down") \ |
1451 | X(ENETRESET, "Connection aborted by network") \ |
1452 | X(ENETUNREACH, "Network unreachable") \ |
1453 | X(ENFILE, "Too many files open in system") \ |
1454 | X(ENOBUFS, "No buffer space available") \ |
1455 | X(ENODATA, "No message is available on the STREAM head read queue") \ |
1456 | X(ENODEV, "No such device") \ |
1457 | X(ENOENT, "No such file or directory") \ |
1458 | X(ENOEXEC, "Executable file format error") \ |
1459 | X(ENOLCK, "No locks available") \ |
1460 | X(ENOLINK, "Reserved") \ |
1461 | X(ENOMEM, "Not enough space") \ |
1462 | X(ENOMSG, "No message of the desired type") \ |
1463 | X(ENOPROTOOPT, "Protocol not available") \ |
1464 | X(ENOSPC, "No space left on device") \ |
1465 | X(ENOSR, "No STREAM resources") \ |
1466 | X(ENOSTR, "Not a STREAM") \ |
1467 | X(ENOSYS, "Function not supported") \ |
1468 | X(ENOTCONN, "The socket is not connected") \ |
1469 | X(ENOTDIR, "Not a directory") \ |
1470 | X(ENOTEMPTY, "Directory not empty") \ |
1471 | X(ENOTSOCK, "Not a socket") \ |
1472 | X(ENOTSUP, "Not supported") \ |
1473 | X(ENOTTY, "Inappropriate I/O control operation") \ |
1474 | X(ENXIO, "No such device or address") \ |
1475 | X(EOPNOTSUPP, "Operation not supported on socket") \ |
1476 | X(EOVERFLOW, "Value too large to be stored in data type") \ |
1477 | X(EPERM, "Operation not permitted") \ |
1478 | X(EPIPE, "Broken pipe") \ |
1479 | X(EPROTO, "Protocol error") \ |
1480 | X(EPROTONOSUPPORT, "Protocol not supported") \ |
1481 | X(EPROTOTYPE, "Protocol wrong type for socket") \ |
1482 | X(ERANGE, "Result too large") \ |
1483 | X(EROFS, "Read-only file system") \ |
1484 | X(ESPIPE, "Invalid seek") \ |
1485 | X(ESRCH, "No such process") \ |
1486 | X(ETIME, "Stream ioctl() timeout") \ |
1487 | X(ETIMEDOUT, "Connection timed out") \ |
1488 | X(ETXTBSY, "Text file busy") \ |
1489 | X(EWOULDBLOCK, "Operation would block") \ |
1490 | X(EXDEV, "Cross-device link") |
1491 | |
1492 | #define DEFINE_ENTRY(e, text) { e, #e, text }, |
1493 | |
1494 | static const struct { |
1495 | int v; |
1496 | const char* short_text; |
1497 | const char* long_text; |
1498 | } table [] = { |
1499 | |
1500 | ALL_SHARED_ENUMS(DEFINE_ENTRY) |
1501 | |
1502 | // The following enums are not defined on all platforms. |
1503 | #ifdef ESTALE |
1504 | DEFINE_ENTRY(ESTALE, "Reserved" ) |
1505 | #endif |
1506 | #ifdef EDQUOT |
1507 | DEFINE_ENTRY(EDQUOT, "Reserved" ) |
1508 | #endif |
1509 | #ifdef EMULTIHOP |
1510 | DEFINE_ENTRY(EMULTIHOP, "Reserved" ) |
1511 | #endif |
1512 | |
1513 | // End marker. |
1514 | { -1, "Unknown errno" , "Unknown error" } |
1515 | |
1516 | }; |
1517 | |
1518 | #undef DEFINE_ENTRY |
1519 | #undef ALL_FLAGS |
1520 | |
1521 | int i = 0; |
1522 | while (table[i].v != -1 && table[i].v != e) { |
1523 | i ++; |
1524 | } |
1525 | |
1526 | return short_text ? table[i].short_text : table[i].long_text; |
1527 | |
1528 | } |
1529 | |
1530 | const char* os::strerror(int e) { |
1531 | return errno_to_string(e, false); |
1532 | } |
1533 | |
1534 | const char* os::errno_name(int e) { |
1535 | return errno_to_string(e, true); |
1536 | } |
1537 | |
1538 | void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count) { |
1539 | LogTarget(Info, pagesize) log; |
1540 | if (log.is_enabled()) { |
1541 | LogStream out(log); |
1542 | |
1543 | out.print("%s: " , str); |
1544 | for (int i = 0; i < count; ++i) { |
1545 | out.print(" " SIZE_FORMAT, page_sizes[i]); |
1546 | } |
1547 | out.cr(); |
1548 | } |
1549 | } |
1550 | |
1551 | #define trace_page_size_params(size) byte_size_in_exact_unit(size), exact_unit_for_byte_size(size) |
1552 | |
1553 | void os::trace_page_sizes(const char* str, |
1554 | const size_t region_min_size, |
1555 | const size_t region_max_size, |
1556 | const size_t page_size, |
1557 | const char* base, |
1558 | const size_t size) { |
1559 | |
1560 | log_info(pagesize)("%s: " |
1561 | " min=" SIZE_FORMAT "%s" |
1562 | " max=" SIZE_FORMAT "%s" |
1563 | " base=" PTR_FORMAT |
1564 | " page_size=" SIZE_FORMAT "%s" |
1565 | " size=" SIZE_FORMAT "%s" , |
1566 | str, |
1567 | trace_page_size_params(region_min_size), |
1568 | trace_page_size_params(region_max_size), |
1569 | p2i(base), |
1570 | trace_page_size_params(page_size), |
1571 | trace_page_size_params(size)); |
1572 | } |
1573 | |
1574 | void os::trace_page_sizes_for_requested_size(const char* str, |
1575 | const size_t requested_size, |
1576 | const size_t page_size, |
1577 | const size_t alignment, |
1578 | const char* base, |
1579 | const size_t size) { |
1580 | |
1581 | log_info(pagesize)("%s:" |
1582 | " req_size=" SIZE_FORMAT "%s" |
1583 | " base=" PTR_FORMAT |
1584 | " page_size=" SIZE_FORMAT "%s" |
1585 | " alignment=" SIZE_FORMAT "%s" |
1586 | " size=" SIZE_FORMAT "%s" , |
1587 | str, |
1588 | trace_page_size_params(requested_size), |
1589 | p2i(base), |
1590 | trace_page_size_params(page_size), |
1591 | trace_page_size_params(alignment), |
1592 | trace_page_size_params(size)); |
1593 | } |
1594 | |
1595 | |
1596 | // This is the working definition of a server class machine: |
1597 | // >= 2 physical CPU's and >=2GB of memory, with some fuzz |
1598 | // because the graphics memory (?) sometimes masks physical memory. |
1599 | // If you want to change the definition of a server class machine |
1600 | // on some OS or platform, e.g., >=4GB on Windows platforms, |
1601 | // then you'll have to parameterize this method based on that state, |
1602 | // as was done for logical processors here, or replicate and |
1603 | // specialize this method for each platform. (Or fix os to have |
1604 | // some inheritance structure and use subclassing. Sigh.) |
1605 | // If you want some platform to always or never behave as a server |
1606 | // class machine, change the setting of AlwaysActAsServerClassMachine |
1607 | // and NeverActAsServerClassMachine in globals*.hpp. |
1608 | bool os::is_server_class_machine() { |
1609 | // First check for the early returns |
1610 | if (NeverActAsServerClassMachine) { |
1611 | return false; |
1612 | } |
1613 | if (AlwaysActAsServerClassMachine) { |
1614 | return true; |
1615 | } |
1616 | // Then actually look at the machine |
1617 | bool result = false; |
1618 | const unsigned int server_processors = 2; |
1619 | const julong server_memory = 2UL * G; |
1620 | // We seem not to get our full complement of memory. |
1621 | // We allow some part (1/8?) of the memory to be "missing", |
1622 | // based on the sizes of DIMMs, and maybe graphics cards. |
1623 | const julong missing_memory = 256UL * M; |
1624 | |
1625 | /* Is this a server class machine? */ |
1626 | if ((os::active_processor_count() >= (int)server_processors) && |
1627 | (os::physical_memory() >= (server_memory - missing_memory))) { |
1628 | const unsigned int logical_processors = |
1629 | VM_Version::logical_processors_per_package(); |
1630 | if (logical_processors > 1) { |
1631 | const unsigned int physical_packages = |
1632 | os::active_processor_count() / logical_processors; |
1633 | if (physical_packages >= server_processors) { |
1634 | result = true; |
1635 | } |
1636 | } else { |
1637 | result = true; |
1638 | } |
1639 | } |
1640 | return result; |
1641 | } |
1642 | |
1643 | void os::initialize_initial_active_processor_count() { |
1644 | assert(_initial_active_processor_count == 0, "Initial active processor count already set." ); |
1645 | _initial_active_processor_count = active_processor_count(); |
1646 | log_debug(os)("Initial active processor count set to %d" , _initial_active_processor_count); |
1647 | } |
1648 | |
1649 | void os::SuspendedThreadTask::run() { |
1650 | internal_do_task(); |
1651 | _done = true; |
1652 | } |
1653 | |
1654 | bool os::create_stack_guard_pages(char* addr, size_t bytes) { |
1655 | return os::pd_create_stack_guard_pages(addr, bytes); |
1656 | } |
1657 | |
1658 | char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint, int file_desc) { |
1659 | char* result = NULL; |
1660 | |
1661 | if (file_desc != -1) { |
1662 | // Could have called pd_reserve_memory() followed by replace_existing_mapping_with_file_mapping(), |
1663 | // but AIX may use SHM in which case its more trouble to detach the segment and remap memory to the file. |
1664 | result = os::map_memory_to_file(addr, bytes, file_desc); |
1665 | if (result != NULL) { |
1666 | MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); |
1667 | } |
1668 | } else { |
1669 | result = pd_reserve_memory(bytes, addr, alignment_hint); |
1670 | if (result != NULL) { |
1671 | MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); |
1672 | } |
1673 | } |
1674 | |
1675 | return result; |
1676 | } |
1677 | |
1678 | char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint, |
1679 | MEMFLAGS flags) { |
1680 | char* result = pd_reserve_memory(bytes, addr, alignment_hint); |
1681 | if (result != NULL) { |
1682 | MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); |
1683 | MemTracker::record_virtual_memory_type((address)result, flags); |
1684 | } |
1685 | |
1686 | return result; |
1687 | } |
1688 | |
1689 | char* os::attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc) { |
1690 | char* result = NULL; |
1691 | if (file_desc != -1) { |
1692 | result = pd_attempt_reserve_memory_at(bytes, addr, file_desc); |
1693 | if (result != NULL) { |
1694 | MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); |
1695 | } |
1696 | } else { |
1697 | result = pd_attempt_reserve_memory_at(bytes, addr); |
1698 | if (result != NULL) { |
1699 | MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); |
1700 | } |
1701 | } |
1702 | return result; |
1703 | } |
1704 | |
1705 | void os::split_reserved_memory(char *base, size_t size, |
1706 | size_t split, bool realloc) { |
1707 | pd_split_reserved_memory(base, size, split, realloc); |
1708 | } |
1709 | |
1710 | bool os::commit_memory(char* addr, size_t bytes, bool executable) { |
1711 | bool res = pd_commit_memory(addr, bytes, executable); |
1712 | if (res) { |
1713 | MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC); |
1714 | } |
1715 | return res; |
1716 | } |
1717 | |
1718 | bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, |
1719 | bool executable) { |
1720 | bool res = os::pd_commit_memory(addr, size, alignment_hint, executable); |
1721 | if (res) { |
1722 | MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); |
1723 | } |
1724 | return res; |
1725 | } |
1726 | |
1727 | void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable, |
1728 | const char* mesg) { |
1729 | pd_commit_memory_or_exit(addr, bytes, executable, mesg); |
1730 | MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC); |
1731 | } |
1732 | |
1733 | void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint, |
1734 | bool executable, const char* mesg) { |
1735 | os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg); |
1736 | MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); |
1737 | } |
1738 | |
1739 | bool os::uncommit_memory(char* addr, size_t bytes) { |
1740 | bool res; |
1741 | if (MemTracker::tracking_level() > NMT_minimal) { |
1742 | Tracker tkr(Tracker::uncommit); |
1743 | res = pd_uncommit_memory(addr, bytes); |
1744 | if (res) { |
1745 | tkr.record((address)addr, bytes); |
1746 | } |
1747 | } else { |
1748 | res = pd_uncommit_memory(addr, bytes); |
1749 | } |
1750 | return res; |
1751 | } |
1752 | |
1753 | bool os::release_memory(char* addr, size_t bytes) { |
1754 | bool res; |
1755 | if (MemTracker::tracking_level() > NMT_minimal) { |
1756 | Tracker tkr(Tracker::release); |
1757 | res = pd_release_memory(addr, bytes); |
1758 | if (res) { |
1759 | tkr.record((address)addr, bytes); |
1760 | } |
1761 | } else { |
1762 | res = pd_release_memory(addr, bytes); |
1763 | } |
1764 | return res; |
1765 | } |
1766 | |
1767 | void os::pretouch_memory(void* start, void* end, size_t page_size) { |
1768 | for (volatile char *p = (char*)start; p < (char*)end; p += page_size) { |
1769 | *p = 0; |
1770 | } |
1771 | } |
1772 | |
1773 | char* os::map_memory(int fd, const char* file_name, size_t file_offset, |
1774 | char *addr, size_t bytes, bool read_only, |
1775 | bool allow_exec) { |
1776 | char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec); |
1777 | if (result != NULL) { |
1778 | MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); |
1779 | } |
1780 | return result; |
1781 | } |
1782 | |
1783 | char* os::remap_memory(int fd, const char* file_name, size_t file_offset, |
1784 | char *addr, size_t bytes, bool read_only, |
1785 | bool allow_exec) { |
1786 | return pd_remap_memory(fd, file_name, file_offset, addr, bytes, |
1787 | read_only, allow_exec); |
1788 | } |
1789 | |
1790 | bool os::unmap_memory(char *addr, size_t bytes) { |
1791 | bool result; |
1792 | if (MemTracker::tracking_level() > NMT_minimal) { |
1793 | Tracker tkr(Tracker::release); |
1794 | result = pd_unmap_memory(addr, bytes); |
1795 | if (result) { |
1796 | tkr.record((address)addr, bytes); |
1797 | } |
1798 | } else { |
1799 | result = pd_unmap_memory(addr, bytes); |
1800 | } |
1801 | return result; |
1802 | } |
1803 | |
1804 | void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) { |
1805 | pd_free_memory(addr, bytes, alignment_hint); |
1806 | } |
1807 | |
1808 | void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { |
1809 | pd_realign_memory(addr, bytes, alignment_hint); |
1810 | } |
1811 | |
1812 | #ifndef _WINDOWS |
1813 | /* try to switch state from state "from" to state "to" |
1814 | * returns the state set after the method is complete |
1815 | */ |
1816 | os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from, |
1817 | os::SuspendResume::State to) |
1818 | { |
1819 | os::SuspendResume::State result = Atomic::cmpxchg(to, &_state, from); |
1820 | if (result == from) { |
1821 | // success |
1822 | return to; |
1823 | } |
1824 | return result; |
1825 | } |
1826 | #endif |
1827 | |