Fri, 06 Jul 2018 18:50:13 +0000
8146115: Improve docker container detection and resource configuration usage
Reviewed-by: bobv, dbuck
1 /*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 // Must be at least Windows 2000 or XP to use IsDebuggerPresent
26 #define _WIN32_WINNT 0x500
28 // no precompiled headers
29 #include "classfile/classLoader.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/icBuffer.hpp"
33 #include "code/vtableStubs.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/disassembler.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "jvm_windows.h"
38 #include "memory/allocation.inline.hpp"
39 #include "memory/filemap.hpp"
40 #include "mutex_windows.inline.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "os_share_windows.hpp"
43 #include "prims/jniFastGetField.hpp"
44 #include "prims/jvm.h"
45 #include "prims/jvm_misc.hpp"
46 #include "runtime/arguments.hpp"
47 #include "runtime/extendedPC.hpp"
48 #include "runtime/globals.hpp"
49 #include "runtime/interfaceSupport.hpp"
50 #include "runtime/java.hpp"
51 #include "runtime/javaCalls.hpp"
52 #include "runtime/mutexLocker.hpp"
53 #include "runtime/objectMonitor.hpp"
54 #include "runtime/orderAccess.inline.hpp"
55 #include "runtime/osThread.hpp"
56 #include "runtime/perfMemory.hpp"
57 #include "runtime/sharedRuntime.hpp"
58 #include "runtime/statSampler.hpp"
59 #include "runtime/stubRoutines.hpp"
60 #include "runtime/thread.inline.hpp"
61 #include "runtime/threadCritical.hpp"
62 #include "runtime/timer.hpp"
63 #include "services/attachListener.hpp"
64 #include "services/memTracker.hpp"
65 #include "services/runtimeService.hpp"
66 #include "utilities/decoder.hpp"
67 #include "utilities/defaultStream.hpp"
68 #include "utilities/events.hpp"
69 #include "utilities/growableArray.hpp"
70 #include "utilities/vmError.hpp"
72 #ifdef _DEBUG
73 #include <crtdbg.h>
74 #endif
77 #include <windows.h>
78 #include <sys/types.h>
79 #include <sys/stat.h>
80 #include <sys/timeb.h>
81 #include <objidl.h>
82 #include <shlobj.h>
84 #include <malloc.h>
85 #include <signal.h>
86 #include <direct.h>
87 #include <errno.h>
88 #include <fcntl.h>
89 #include <io.h>
90 #include <process.h> // For _beginthreadex(), _endthreadex()
91 #include <imagehlp.h> // For os::dll_address_to_function_name
92 /* for enumerating dll libraries */
93 #include <vdmdbg.h>
95 // for timer info max values which include all bits
96 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
98 // For DLL loading/load error detection
99 // Values of PE COFF
100 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
101 #define IMAGE_FILE_SIGNATURE_LENGTH 4
103 static HANDLE main_process;
104 static HANDLE main_thread;
105 static int main_thread_id;
107 static FILETIME process_creation_time;
108 static FILETIME process_exit_time;
109 static FILETIME process_user_time;
110 static FILETIME process_kernel_time;
112 #ifdef _M_IA64
113 #define __CPU__ ia64
114 #else
115 #ifdef _M_AMD64
116 #define __CPU__ amd64
117 #else
118 #define __CPU__ i486
119 #endif
120 #endif
122 // save DLL module handle, used by GetModuleFileName
124 HINSTANCE vm_lib_handle;
126 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
127 switch (reason) {
128 case DLL_PROCESS_ATTACH:
129 vm_lib_handle = hinst;
130 if(ForceTimeHighResolution)
131 timeBeginPeriod(1L);
132 break;
133 case DLL_PROCESS_DETACH:
134 if(ForceTimeHighResolution)
135 timeEndPeriod(1L);
137 break;
138 default:
139 break;
140 }
141 return true;
142 }
144 static inline double fileTimeAsDouble(FILETIME* time) {
145 const double high = (double) ((unsigned int) ~0);
146 const double split = 10000000.0;
147 double result = (time->dwLowDateTime / split) +
148 time->dwHighDateTime * (high/split);
149 return result;
150 }
152 // Implementation of os
154 bool os::getenv(const char* name, char* buffer, int len) {
155 int result = GetEnvironmentVariable(name, buffer, len);
156 return result > 0 && result < len;
157 }
159 bool os::unsetenv(const char* name) {
160 assert(name != NULL, "Null pointer");
161 return (SetEnvironmentVariable(name, NULL) == TRUE);
162 }
164 // No setuid programs under Windows.
165 bool os::have_special_privileges() {
166 return false;
167 }
170 // This method is a periodic task to check for misbehaving JNI applications
171 // under CheckJNI, we can add any periodic checks here.
172 // For Windows at the moment does nothing
173 void os::run_periodic_checks() {
174 return;
175 }
177 #ifndef _WIN64
178 // previous UnhandledExceptionFilter, if there is one
179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
182 #endif
183 void os::init_system_properties_values() {
184 /* sysclasspath, java_home, dll_dir */
185 {
186 char *home_path;
187 char *dll_path;
188 char *pslash;
189 char *bin = "\\bin";
190 char home_dir[MAX_PATH];
192 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) {
193 os::jvm_path(home_dir, sizeof(home_dir));
194 // Found the full path to jvm.dll.
195 // Now cut the path to <java_home>/jre if we can.
196 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */
197 pslash = strrchr(home_dir, '\\');
198 if (pslash != NULL) {
199 *pslash = '\0'; /* get rid of \{client|server} */
200 pslash = strrchr(home_dir, '\\');
201 if (pslash != NULL)
202 *pslash = '\0'; /* get rid of \bin */
203 }
204 }
206 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
207 if (home_path == NULL)
208 return;
209 strcpy(home_path, home_dir);
210 Arguments::set_java_home(home_path);
212 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal);
213 if (dll_path == NULL)
214 return;
215 strcpy(dll_path, home_dir);
216 strcat(dll_path, bin);
217 Arguments::set_dll_dir(dll_path);
219 if (!set_boot_path('\\', ';'))
220 return;
221 }
223 /* library_path */
224 #define EXT_DIR "\\lib\\ext"
225 #define BIN_DIR "\\bin"
226 #define PACKAGE_DIR "\\Sun\\Java"
227 {
228 /* Win32 library search order (See the documentation for LoadLibrary):
229 *
230 * 1. The directory from which application is loaded.
231 * 2. The system wide Java Extensions directory (Java only)
232 * 3. System directory (GetSystemDirectory)
233 * 4. Windows directory (GetWindowsDirectory)
234 * 5. The PATH environment variable
235 * 6. The current directory
236 */
238 char *library_path;
239 char tmp[MAX_PATH];
240 char *path_str = ::getenv("PATH");
242 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
243 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
245 library_path[0] = '\0';
247 GetModuleFileName(NULL, tmp, sizeof(tmp));
248 *(strrchr(tmp, '\\')) = '\0';
249 strcat(library_path, tmp);
251 GetWindowsDirectory(tmp, sizeof(tmp));
252 strcat(library_path, ";");
253 strcat(library_path, tmp);
254 strcat(library_path, PACKAGE_DIR BIN_DIR);
256 GetSystemDirectory(tmp, sizeof(tmp));
257 strcat(library_path, ";");
258 strcat(library_path, tmp);
260 GetWindowsDirectory(tmp, sizeof(tmp));
261 strcat(library_path, ";");
262 strcat(library_path, tmp);
264 if (path_str) {
265 strcat(library_path, ";");
266 strcat(library_path, path_str);
267 }
269 strcat(library_path, ";.");
271 Arguments::set_library_path(library_path);
272 FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
273 }
275 /* Default extensions directory */
276 {
277 char path[MAX_PATH];
278 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
279 GetWindowsDirectory(path, MAX_PATH);
280 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
281 path, PACKAGE_DIR, EXT_DIR);
282 Arguments::set_ext_dirs(buf);
283 }
284 #undef EXT_DIR
285 #undef BIN_DIR
286 #undef PACKAGE_DIR
288 /* Default endorsed standards directory. */
289 {
290 #define ENDORSED_DIR "\\lib\\endorsed"
291 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR);
292 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal);
293 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR);
294 Arguments::set_endorsed_dirs(buf);
295 #undef ENDORSED_DIR
296 }
298 #ifndef _WIN64
299 // set our UnhandledExceptionFilter and save any previous one
300 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
301 #endif
303 // Done
304 return;
305 }
307 void os::breakpoint() {
308 DebugBreak();
309 }
311 // Invoked from the BREAKPOINT Macro
312 extern "C" void breakpoint() {
313 os::breakpoint();
314 }
316 /*
317 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
318 * So far, this method is only used by Native Memory Tracking, which is
319 * only supported on Windows XP or later.
320 */
322 int os::get_native_stack(address* stack, int frames, int toSkip) {
323 #ifdef _NMT_NOINLINE_
324 toSkip ++;
325 #endif
326 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames,
327 (PVOID*)stack, NULL);
328 for (int index = captured; index < frames; index ++) {
329 stack[index] = NULL;
330 }
331 return captured;
332 }
335 // os::current_stack_base()
336 //
337 // Returns the base of the stack, which is the stack's
338 // starting address. This function must be called
339 // while running on the stack of the thread being queried.
341 address os::current_stack_base() {
342 MEMORY_BASIC_INFORMATION minfo;
343 address stack_bottom;
344 size_t stack_size;
346 VirtualQuery(&minfo, &minfo, sizeof(minfo));
347 stack_bottom = (address)minfo.AllocationBase;
348 stack_size = minfo.RegionSize;
350 // Add up the sizes of all the regions with the same
351 // AllocationBase.
352 while( 1 )
353 {
354 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
355 if ( stack_bottom == (address)minfo.AllocationBase )
356 stack_size += minfo.RegionSize;
357 else
358 break;
359 }
361 #ifdef _M_IA64
362 // IA64 has memory and register stacks
363 //
364 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
365 // at thread creation (1MB backing store growing upwards, 1MB memory stack
366 // growing downwards, 2MB summed up)
367 //
368 // ...
369 // ------- top of stack (high address) -----
370 // |
371 // | 1MB
372 // | Backing Store (Register Stack)
373 // |
374 // | / \
375 // | |
376 // | |
377 // | |
378 // ------------------------ stack base -----
379 // | 1MB
380 // | Memory Stack
381 // |
382 // | |
383 // | |
384 // | |
385 // | \ /
386 // |
387 // ----- bottom of stack (low address) -----
388 // ...
390 stack_size = stack_size / 2;
391 #endif
392 return stack_bottom + stack_size;
393 }
395 size_t os::current_stack_size() {
396 size_t sz;
397 MEMORY_BASIC_INFORMATION minfo;
398 VirtualQuery(&minfo, &minfo, sizeof(minfo));
399 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
400 return sz;
401 }
403 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
404 const struct tm* time_struct_ptr = localtime(clock);
405 if (time_struct_ptr != NULL) {
406 *res = *time_struct_ptr;
407 return res;
408 }
409 return NULL;
410 }
412 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
414 // Thread start routine for all new Java threads
415 static unsigned __stdcall java_start(Thread* thread) {
416 // Try to randomize the cache line index of hot stack frames.
417 // This helps when threads of the same stack traces evict each other's
418 // cache lines. The threads can be either from the same JVM instance, or
419 // from different JVM instances. The benefit is especially true for
420 // processors with hyperthreading technology.
421 static int counter = 0;
422 int pid = os::current_process_id();
423 _alloca(((pid ^ counter++) & 7) * 128);
425 OSThread* osthr = thread->osthread();
426 assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
428 if (UseNUMA) {
429 int lgrp_id = os::numa_get_group_id();
430 if (lgrp_id != -1) {
431 thread->set_lgrp_id(lgrp_id);
432 }
433 }
436 // Install a win32 structured exception handler around every thread created
437 // by VM, so VM can genrate error dump when an exception occurred in non-
438 // Java thread (e.g. VM thread).
439 __try {
440 thread->run();
441 } __except(topLevelExceptionFilter(
442 (_EXCEPTION_POINTERS*)_exception_info())) {
443 // Nothing to do.
444 }
446 // One less thread is executing
447 // When the VMThread gets here, the main thread may have already exited
448 // which frees the CodeHeap containing the Atomic::add code
449 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
450 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
451 }
453 return 0;
454 }
456 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) {
457 // Allocate the OSThread object
458 OSThread* osthread = new OSThread(NULL, NULL);
459 if (osthread == NULL) return NULL;
461 // Initialize support for Java interrupts
462 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
463 if (interrupt_event == NULL) {
464 delete osthread;
465 return NULL;
466 }
467 osthread->set_interrupt_event(interrupt_event);
469 // Store info on the Win32 thread into the OSThread
470 osthread->set_thread_handle(thread_handle);
471 osthread->set_thread_id(thread_id);
473 if (UseNUMA) {
474 int lgrp_id = os::numa_get_group_id();
475 if (lgrp_id != -1) {
476 thread->set_lgrp_id(lgrp_id);
477 }
478 }
480 // Initial thread state is INITIALIZED, not SUSPENDED
481 osthread->set_state(INITIALIZED);
483 return osthread;
484 }
487 bool os::create_attached_thread(JavaThread* thread) {
488 #ifdef ASSERT
489 thread->verify_not_published();
490 #endif
491 HANDLE thread_h;
492 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
493 &thread_h, THREAD_ALL_ACCESS, false, 0)) {
494 fatal("DuplicateHandle failed\n");
495 }
496 OSThread* osthread = create_os_thread(thread, thread_h,
497 (int)current_thread_id());
498 if (osthread == NULL) {
499 return false;
500 }
502 // Initial thread state is RUNNABLE
503 osthread->set_state(RUNNABLE);
505 thread->set_osthread(osthread);
506 return true;
507 }
509 bool os::create_main_thread(JavaThread* thread) {
510 #ifdef ASSERT
511 thread->verify_not_published();
512 #endif
513 if (_starting_thread == NULL) {
514 _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
515 if (_starting_thread == NULL) {
516 return false;
517 }
518 }
520 // The primordial thread is runnable from the start)
521 _starting_thread->set_state(RUNNABLE);
523 thread->set_osthread(_starting_thread);
524 return true;
525 }
527 // Allocate and initialize a new OSThread
528 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
529 unsigned thread_id;
531 // Allocate the OSThread object
532 OSThread* osthread = new OSThread(NULL, NULL);
533 if (osthread == NULL) {
534 return false;
535 }
537 // Initialize support for Java interrupts
538 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
539 if (interrupt_event == NULL) {
540 delete osthread;
541 return NULL;
542 }
543 osthread->set_interrupt_event(interrupt_event);
544 osthread->set_interrupted(false);
546 thread->set_osthread(osthread);
548 if (stack_size == 0) {
549 switch (thr_type) {
550 case os::java_thread:
551 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
552 if (JavaThread::stack_size_at_create() > 0)
553 stack_size = JavaThread::stack_size_at_create();
554 break;
555 case os::compiler_thread:
556 if (CompilerThreadStackSize > 0) {
557 stack_size = (size_t)(CompilerThreadStackSize * K);
558 break;
559 } // else fall through:
560 // use VMThreadStackSize if CompilerThreadStackSize is not defined
561 case os::vm_thread:
562 case os::pgc_thread:
563 case os::cgc_thread:
564 case os::watcher_thread:
565 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
566 break;
567 }
568 }
570 // Create the Win32 thread
571 //
572 // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
573 // does not specify stack size. Instead, it specifies the size of
574 // initially committed space. The stack size is determined by
575 // PE header in the executable. If the committed "stack_size" is larger
576 // than default value in the PE header, the stack is rounded up to the
577 // nearest multiple of 1MB. For example if the launcher has default
578 // stack size of 320k, specifying any size less than 320k does not
579 // affect the actual stack size at all, it only affects the initial
580 // commitment. On the other hand, specifying 'stack_size' larger than
581 // default value may cause significant increase in memory usage, because
582 // not only the stack space will be rounded up to MB, but also the
583 // entire space is committed upfront.
584 //
585 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
586 // for CreateThread() that can treat 'stack_size' as stack size. However we
587 // are not supposed to call CreateThread() directly according to MSDN
588 // document because JVM uses C runtime library. The good news is that the
589 // flag appears to work with _beginthredex() as well.
591 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION
592 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000)
593 #endif
595 HANDLE thread_handle =
596 (HANDLE)_beginthreadex(NULL,
597 (unsigned)stack_size,
598 (unsigned (__stdcall *)(void*)) java_start,
599 thread,
600 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION,
601 &thread_id);
602 if (thread_handle == NULL) {
603 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again
604 // without the flag.
605 thread_handle =
606 (HANDLE)_beginthreadex(NULL,
607 (unsigned)stack_size,
608 (unsigned (__stdcall *)(void*)) java_start,
609 thread,
610 CREATE_SUSPENDED,
611 &thread_id);
612 }
613 if (thread_handle == NULL) {
614 // Need to clean up stuff we've allocated so far
615 CloseHandle(osthread->interrupt_event());
616 thread->set_osthread(NULL);
617 delete osthread;
618 return NULL;
619 }
621 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
623 // Store info on the Win32 thread into the OSThread
624 osthread->set_thread_handle(thread_handle);
625 osthread->set_thread_id(thread_id);
627 // Initial thread state is INITIALIZED, not SUSPENDED
628 osthread->set_state(INITIALIZED);
630 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
631 return true;
632 }
635 // Free Win32 resources related to the OSThread
636 void os::free_thread(OSThread* osthread) {
637 assert(osthread != NULL, "osthread not set");
638 CloseHandle(osthread->thread_handle());
639 CloseHandle(osthread->interrupt_event());
640 delete osthread;
641 }
644 static int has_performance_count = 0;
645 static jlong first_filetime;
646 static jlong initial_performance_count;
647 static jlong performance_frequency;
650 jlong as_long(LARGE_INTEGER x) {
651 jlong result = 0; // initialization to avoid warning
652 set_high(&result, x.HighPart);
653 set_low(&result, x.LowPart);
654 return result;
655 }
658 jlong os::elapsed_counter() {
659 LARGE_INTEGER count;
660 if (has_performance_count) {
661 QueryPerformanceCounter(&count);
662 return as_long(count) - initial_performance_count;
663 } else {
664 FILETIME wt;
665 GetSystemTimeAsFileTime(&wt);
666 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime);
667 }
668 }
671 jlong os::elapsed_frequency() {
672 if (has_performance_count) {
673 return performance_frequency;
674 } else {
675 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601.
676 return 10000000;
677 }
678 }
681 julong os::available_memory() {
682 return win32::available_memory();
683 }
685 julong os::win32::available_memory() {
686 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
687 // value if total memory is larger than 4GB
688 MEMORYSTATUSEX ms;
689 ms.dwLength = sizeof(ms);
690 GlobalMemoryStatusEx(&ms);
692 return (julong)ms.ullAvailPhys;
693 }
695 julong os::physical_memory() {
696 return win32::physical_memory();
697 }
699 bool os::has_allocatable_memory_limit(julong* limit) {
700 MEMORYSTATUSEX ms;
701 ms.dwLength = sizeof(ms);
702 GlobalMemoryStatusEx(&ms);
703 #ifdef _LP64
704 *limit = (julong)ms.ullAvailVirtual;
705 return true;
706 #else
707 // Limit to 1400m because of the 2gb address space wall
708 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
709 return true;
710 #endif
711 }
713 // VC6 lacks DWORD_PTR
714 #if _MSC_VER < 1300
715 typedef UINT_PTR DWORD_PTR;
716 #endif
718 int os::active_processor_count() {
719 // User has overridden the number of active processors
720 if (ActiveProcessorCount > 0) {
721 if (PrintActiveCpus) {
722 tty->print_cr("active_processor_count: "
723 "active processor count set by user : %d",
724 ActiveProcessorCount);
725 }
726 return ActiveProcessorCount;
727 }
729 DWORD_PTR lpProcessAffinityMask = 0;
730 DWORD_PTR lpSystemAffinityMask = 0;
731 int proc_count = processor_count();
732 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
733 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
734 // Nof active processors is number of bits in process affinity mask
735 int bitcount = 0;
736 while (lpProcessAffinityMask != 0) {
737 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
738 bitcount++;
739 }
740 return bitcount;
741 } else {
742 return proc_count;
743 }
744 }
746 void os::set_native_thread_name(const char *name) {
747 // Not yet implemented.
748 return;
749 }
751 bool os::distribute_processes(uint length, uint* distribution) {
752 // Not yet implemented.
753 return false;
754 }
756 bool os::bind_to_processor(uint processor_id) {
757 // Not yet implemented.
758 return false;
759 }
761 static void initialize_performance_counter() {
762 LARGE_INTEGER count;
763 if (QueryPerformanceFrequency(&count)) {
764 has_performance_count = 1;
765 performance_frequency = as_long(count);
766 QueryPerformanceCounter(&count);
767 initial_performance_count = as_long(count);
768 } else {
769 has_performance_count = 0;
770 FILETIME wt;
771 GetSystemTimeAsFileTime(&wt);
772 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
773 }
774 }
777 double os::elapsedTime() {
778 return (double) elapsed_counter() / (double) elapsed_frequency();
779 }
782 // Windows format:
783 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
784 // Java format:
785 // Java standards require the number of milliseconds since 1/1/1970
787 // Constant offset - calculated using offset()
788 static jlong _offset = 116444736000000000;
789 // Fake time counter for reproducible results when debugging
790 static jlong fake_time = 0;
792 #ifdef ASSERT
793 // Just to be safe, recalculate the offset in debug mode
794 static jlong _calculated_offset = 0;
795 static int _has_calculated_offset = 0;
797 jlong offset() {
798 if (_has_calculated_offset) return _calculated_offset;
799 SYSTEMTIME java_origin;
800 java_origin.wYear = 1970;
801 java_origin.wMonth = 1;
802 java_origin.wDayOfWeek = 0; // ignored
803 java_origin.wDay = 1;
804 java_origin.wHour = 0;
805 java_origin.wMinute = 0;
806 java_origin.wSecond = 0;
807 java_origin.wMilliseconds = 0;
808 FILETIME jot;
809 if (!SystemTimeToFileTime(&java_origin, &jot)) {
810 fatal(err_msg("Error = %d\nWindows error", GetLastError()));
811 }
812 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
813 _has_calculated_offset = 1;
814 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
815 return _calculated_offset;
816 }
817 #else
818 jlong offset() {
819 return _offset;
820 }
821 #endif
823 jlong windows_to_java_time(FILETIME wt) {
824 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
825 return (a - offset()) / 10000;
826 }
828 FILETIME java_to_windows_time(jlong l) {
829 jlong a = (l * 10000) + offset();
830 FILETIME result;
831 result.dwHighDateTime = high(a);
832 result.dwLowDateTime = low(a);
833 return result;
834 }
836 bool os::supports_vtime() { return true; }
837 bool os::enable_vtime() { return false; }
838 bool os::vtime_enabled() { return false; }
840 double os::elapsedVTime() {
841 FILETIME created;
842 FILETIME exited;
843 FILETIME kernel;
844 FILETIME user;
845 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
846 // the resolution of windows_to_java_time() should be sufficient (ms)
847 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
848 } else {
849 return elapsedTime();
850 }
851 }
853 jlong os::javaTimeMillis() {
854 if (UseFakeTimers) {
855 return fake_time++;
856 } else {
857 FILETIME wt;
858 GetSystemTimeAsFileTime(&wt);
859 return windows_to_java_time(wt);
860 }
861 }
863 jlong os::javaTimeNanos() {
864 if (!has_performance_count) {
865 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do.
866 } else {
867 LARGE_INTEGER current_count;
868 QueryPerformanceCounter(¤t_count);
869 double current = as_long(current_count);
870 double freq = performance_frequency;
871 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
872 return time;
873 }
874 }
876 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
877 if (!has_performance_count) {
878 // javaTimeMillis() doesn't have much percision,
879 // but it is not going to wrap -- so all 64 bits
880 info_ptr->max_value = ALL_64_BITS;
882 // this is a wall clock timer, so may skip
883 info_ptr->may_skip_backward = true;
884 info_ptr->may_skip_forward = true;
885 } else {
886 jlong freq = performance_frequency;
887 if (freq < NANOSECS_PER_SEC) {
888 // the performance counter is 64 bits and we will
889 // be multiplying it -- so no wrap in 64 bits
890 info_ptr->max_value = ALL_64_BITS;
891 } else if (freq > NANOSECS_PER_SEC) {
892 // use the max value the counter can reach to
893 // determine the max value which could be returned
894 julong max_counter = (julong)ALL_64_BITS;
895 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
896 } else {
897 // the performance counter is 64 bits and we will
898 // be using it directly -- so no wrap in 64 bits
899 info_ptr->max_value = ALL_64_BITS;
900 }
902 // using a counter, so no skipping
903 info_ptr->may_skip_backward = false;
904 info_ptr->may_skip_forward = false;
905 }
906 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
907 }
909 char* os::local_time_string(char *buf, size_t buflen) {
910 SYSTEMTIME st;
911 GetLocalTime(&st);
912 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
913 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
914 return buf;
915 }
917 bool os::getTimesSecs(double* process_real_time,
918 double* process_user_time,
919 double* process_system_time) {
920 HANDLE h_process = GetCurrentProcess();
921 FILETIME create_time, exit_time, kernel_time, user_time;
922 BOOL result = GetProcessTimes(h_process,
923 &create_time,
924 &exit_time,
925 &kernel_time,
926 &user_time);
927 if (result != 0) {
928 FILETIME wt;
929 GetSystemTimeAsFileTime(&wt);
930 jlong rtc_millis = windows_to_java_time(wt);
931 jlong user_millis = windows_to_java_time(user_time);
932 jlong system_millis = windows_to_java_time(kernel_time);
933 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
934 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS);
935 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS);
936 return true;
937 } else {
938 return false;
939 }
940 }
942 void os::shutdown() {
944 // allow PerfMemory to attempt cleanup of any persistent resources
945 perfMemory_exit();
947 // flush buffered output, finish log files
948 ostream_abort();
950 // Check for abort hook
951 abort_hook_t abort_hook = Arguments::abort_hook();
952 if (abort_hook != NULL) {
953 abort_hook();
954 }
955 }
958 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
959 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION);
961 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) {
962 HINSTANCE dbghelp;
963 EXCEPTION_POINTERS ep;
964 MINIDUMP_EXCEPTION_INFORMATION mei;
965 MINIDUMP_EXCEPTION_INFORMATION* pmei;
967 HANDLE hProcess = GetCurrentProcess();
968 DWORD processId = GetCurrentProcessId();
969 HANDLE dumpFile;
970 MINIDUMP_TYPE dumpType;
971 static const char* cwd;
973 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows.
974 #ifndef ASSERT
975 // If running on a client version of Windows and user has not explicitly enabled dumping
976 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) {
977 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false);
978 return;
979 // If running on a server version of Windows and user has explictly disabled dumping
980 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) {
981 VMError::report_coredump_status("Minidump has been disabled from the command line", false);
982 return;
983 }
984 #else
985 if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) {
986 VMError::report_coredump_status("Minidump has been disabled from the command line", false);
987 return;
988 }
989 #endif
991 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
993 if (dbghelp == NULL) {
994 VMError::report_coredump_status("Failed to load dbghelp.dll", false);
995 return;
996 }
998 _MiniDumpWriteDump = CAST_TO_FN_PTR(
999 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
1000 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION),
1001 GetProcAddress(dbghelp, "MiniDumpWriteDump"));
1003 if (_MiniDumpWriteDump == NULL) {
1004 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false);
1005 return;
1006 }
1008 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData);
1010 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with
1011 // API_VERSION_NUMBER 11 or higher contains the ones we want though
1012 #if API_VERSION_NUMBER >= 11
1013 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo |
1014 MiniDumpWithUnloadedModules);
1015 #endif
1017 cwd = get_current_directory(NULL, 0);
1018 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp",cwd, current_process_id());
1019 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
1021 if (dumpFile == INVALID_HANDLE_VALUE) {
1022 VMError::report_coredump_status("Failed to create file for dumping", false);
1023 return;
1024 }
1025 if (exceptionRecord != NULL && contextRecord != NULL) {
1026 ep.ContextRecord = (PCONTEXT) contextRecord;
1027 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord;
1029 mei.ThreadId = GetCurrentThreadId();
1030 mei.ExceptionPointers = &ep;
1031 pmei = &mei;
1032 } else {
1033 pmei = NULL;
1034 }
1037 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1038 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1039 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1040 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1041 DWORD error = GetLastError();
1042 LPTSTR msgbuf = NULL;
1044 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
1045 FORMAT_MESSAGE_FROM_SYSTEM |
1046 FORMAT_MESSAGE_IGNORE_INSERTS,
1047 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) {
1049 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf);
1050 LocalFree(msgbuf);
1051 } else {
1052 // Call to FormatMessage failed, just include the result from GetLastError
1053 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error);
1054 }
1055 VMError::report_coredump_status(buffer, false);
1056 } else {
1057 VMError::report_coredump_status(buffer, true);
1058 }
1060 CloseHandle(dumpFile);
1061 }
1065 void os::abort(bool dump_core)
1066 {
1067 os::shutdown();
1068 // no core dump on Windows
1069 ::exit(1);
1070 }
1072 // Die immediately, no exit hook, no abort hook, no cleanup.
1073 void os::die() {
1074 _exit(-1);
1075 }
1077 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1078 // * dirent_md.c 1.15 00/02/02
1079 //
1080 // The declarations for DIR and struct dirent are in jvm_win32.h.
1082 /* Caller must have already run dirname through JVM_NativePath, which removes
1083 duplicate slashes and converts all instances of '/' into '\\'. */
1085 DIR *
1086 os::opendir(const char *dirname)
1087 {
1088 assert(dirname != NULL, "just checking"); // hotspot change
1089 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1090 DWORD fattr; // hotspot change
1091 char alt_dirname[4] = { 0, 0, 0, 0 };
1093 if (dirp == 0) {
1094 errno = ENOMEM;
1095 return 0;
1096 }
1098 /*
1099 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1100 * as a directory in FindFirstFile(). We detect this case here and
1101 * prepend the current drive name.
1102 */
1103 if (dirname[1] == '\0' && dirname[0] == '\\') {
1104 alt_dirname[0] = _getdrive() + 'A' - 1;
1105 alt_dirname[1] = ':';
1106 alt_dirname[2] = '\\';
1107 alt_dirname[3] = '\0';
1108 dirname = alt_dirname;
1109 }
1111 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1112 if (dirp->path == 0) {
1113 free(dirp, mtInternal);
1114 errno = ENOMEM;
1115 return 0;
1116 }
1117 strcpy(dirp->path, dirname);
1119 fattr = GetFileAttributes(dirp->path);
1120 if (fattr == 0xffffffff) {
1121 free(dirp->path, mtInternal);
1122 free(dirp, mtInternal);
1123 errno = ENOENT;
1124 return 0;
1125 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1126 free(dirp->path, mtInternal);
1127 free(dirp, mtInternal);
1128 errno = ENOTDIR;
1129 return 0;
1130 }
1132 /* Append "*.*", or possibly "\\*.*", to path */
1133 if (dirp->path[1] == ':'
1134 && (dirp->path[2] == '\0'
1135 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1136 /* No '\\' needed for cases like "Z:" or "Z:\" */
1137 strcat(dirp->path, "*.*");
1138 } else {
1139 strcat(dirp->path, "\\*.*");
1140 }
1142 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1143 if (dirp->handle == INVALID_HANDLE_VALUE) {
1144 if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1145 free(dirp->path, mtInternal);
1146 free(dirp, mtInternal);
1147 errno = EACCES;
1148 return 0;
1149 }
1150 }
1151 return dirp;
1152 }
1154 /* parameter dbuf unused on Windows */
1156 struct dirent *
1157 os::readdir(DIR *dirp, dirent *dbuf)
1158 {
1159 assert(dirp != NULL, "just checking"); // hotspot change
1160 if (dirp->handle == INVALID_HANDLE_VALUE) {
1161 return 0;
1162 }
1164 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1166 if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1167 if (GetLastError() == ERROR_INVALID_HANDLE) {
1168 errno = EBADF;
1169 return 0;
1170 }
1171 FindClose(dirp->handle);
1172 dirp->handle = INVALID_HANDLE_VALUE;
1173 }
1175 return &dirp->dirent;
1176 }
1178 int
1179 os::closedir(DIR *dirp)
1180 {
1181 assert(dirp != NULL, "just checking"); // hotspot change
1182 if (dirp->handle != INVALID_HANDLE_VALUE) {
1183 if (!FindClose(dirp->handle)) {
1184 errno = EBADF;
1185 return -1;
1186 }
1187 dirp->handle = INVALID_HANDLE_VALUE;
1188 }
1189 free(dirp->path, mtInternal);
1190 free(dirp, mtInternal);
1191 return 0;
1192 }
1194 // This must be hard coded because it's the system's temporary
1195 // directory not the java application's temp directory, ala java.io.tmpdir.
1196 const char* os::get_temp_directory() {
1197 static char path_buf[MAX_PATH];
1198 if (GetTempPath(MAX_PATH, path_buf)>0)
1199 return path_buf;
1200 else{
1201 path_buf[0]='\0';
1202 return path_buf;
1203 }
1204 }
1206 static bool file_exists(const char* filename) {
1207 if (filename == NULL || strlen(filename) == 0) {
1208 return false;
1209 }
1210 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1211 }
1213 bool os::dll_build_name(char *buffer, size_t buflen,
1214 const char* pname, const char* fname) {
1215 bool retval = false;
1216 const size_t pnamelen = pname ? strlen(pname) : 0;
1217 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1219 // Return error on buffer overflow.
1220 if (pnamelen + strlen(fname) + 10 > buflen) {
1221 return retval;
1222 }
1224 if (pnamelen == 0) {
1225 jio_snprintf(buffer, buflen, "%s.dll", fname);
1226 retval = true;
1227 } else if (c == ':' || c == '\\') {
1228 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1229 retval = true;
1230 } else if (strchr(pname, *os::path_separator()) != NULL) {
1231 int n;
1232 char** pelements = split_path(pname, &n);
1233 if (pelements == NULL) {
1234 return false;
1235 }
1236 for (int i = 0 ; i < n ; i++) {
1237 char* path = pelements[i];
1238 // Really shouldn't be NULL, but check can't hurt
1239 size_t plen = (path == NULL) ? 0 : strlen(path);
1240 if (plen == 0) {
1241 continue; // skip the empty path values
1242 }
1243 const char lastchar = path[plen - 1];
1244 if (lastchar == ':' || lastchar == '\\') {
1245 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1246 } else {
1247 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1248 }
1249 if (file_exists(buffer)) {
1250 retval = true;
1251 break;
1252 }
1253 }
1254 // release the storage
1255 for (int i = 0 ; i < n ; i++) {
1256 if (pelements[i] != NULL) {
1257 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1258 }
1259 }
1260 if (pelements != NULL) {
1261 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1262 }
1263 } else {
1264 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1265 retval = true;
1266 }
1267 return retval;
1268 }
1270 // Needs to be in os specific directory because windows requires another
1271 // header file <direct.h>
1272 const char* os::get_current_directory(char *buf, size_t buflen) {
1273 int n = static_cast<int>(buflen);
1274 if (buflen > INT_MAX) n = INT_MAX;
1275 return _getcwd(buf, n);
1276 }
1278 //-----------------------------------------------------------
1279 // Helper functions for fatal error handler
1280 #ifdef _WIN64
1281 // Helper routine which returns true if address in
1282 // within the NTDLL address space.
1283 //
1284 static bool _addr_in_ntdll( address addr )
1285 {
1286 HMODULE hmod;
1287 MODULEINFO minfo;
1289 hmod = GetModuleHandle("NTDLL.DLL");
1290 if ( hmod == NULL ) return false;
1291 if ( !os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod,
1292 &minfo, sizeof(MODULEINFO)) )
1293 return false;
1295 if ( (addr >= minfo.lpBaseOfDll) &&
1296 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage)))
1297 return true;
1298 else
1299 return false;
1300 }
1301 #endif
1304 // Enumerate all modules for a given process ID
1305 //
1306 // Notice that Windows 95/98/Me and Windows NT/2000/XP have
1307 // different API for doing this. We use PSAPI.DLL on NT based
1308 // Windows and ToolHelp on 95/98/Me.
1310 // Callback function that is called by enumerate_modules() on
1311 // every DLL module.
1312 // Input parameters:
1313 // int pid,
1314 // char* module_file_name,
1315 // address module_base_addr,
1316 // unsigned module_size,
1317 // void* param
1318 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *);
1320 // enumerate_modules for Windows NT, using PSAPI
1321 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param)
1322 {
1323 HANDLE hProcess ;
1325 # define MAX_NUM_MODULES 128
1326 HMODULE modules[MAX_NUM_MODULES];
1327 static char filename[ MAX_PATH ];
1328 int result = 0;
1330 if (!os::PSApiDll::PSApiAvailable()) {
1331 return 0;
1332 }
1334 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1335 FALSE, pid ) ;
1336 if (hProcess == NULL) return 0;
1338 DWORD size_needed;
1339 if (!os::PSApiDll::EnumProcessModules(hProcess, modules,
1340 sizeof(modules), &size_needed)) {
1341 CloseHandle( hProcess );
1342 return 0;
1343 }
1345 // number of modules that are currently loaded
1346 int num_modules = size_needed / sizeof(HMODULE);
1348 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1349 // Get Full pathname:
1350 if(!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i],
1351 filename, sizeof(filename))) {
1352 filename[0] = '\0';
1353 }
1355 MODULEINFO modinfo;
1356 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i],
1357 &modinfo, sizeof(modinfo))) {
1358 modinfo.lpBaseOfDll = NULL;
1359 modinfo.SizeOfImage = 0;
1360 }
1362 // Invoke callback function
1363 result = func(pid, filename, (address)modinfo.lpBaseOfDll,
1364 modinfo.SizeOfImage, param);
1365 if (result) break;
1366 }
1368 CloseHandle( hProcess ) ;
1369 return result;
1370 }
1373 // enumerate_modules for Windows 95/98/ME, using TOOLHELP
1374 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param)
1375 {
1376 HANDLE hSnapShot ;
1377 static MODULEENTRY32 modentry ;
1378 int result = 0;
1380 if (!os::Kernel32Dll::HelpToolsAvailable()) {
1381 return 0;
1382 }
1384 // Get a handle to a Toolhelp snapshot of the system
1385 hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ;
1386 if( hSnapShot == INVALID_HANDLE_VALUE ) {
1387 return FALSE ;
1388 }
1390 // iterate through all modules
1391 modentry.dwSize = sizeof(MODULEENTRY32) ;
1392 bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0;
1394 while( not_done ) {
1395 // invoke the callback
1396 result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr,
1397 modentry.modBaseSize, param);
1398 if (result) break;
1400 modentry.dwSize = sizeof(MODULEENTRY32) ;
1401 not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0;
1402 }
1404 CloseHandle(hSnapShot);
1405 return result;
1406 }
1408 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param )
1409 {
1410 // Get current process ID if caller doesn't provide it.
1411 if (!pid) pid = os::current_process_id();
1413 if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param);
1414 else return _enumerate_modules_windows(pid, func, param);
1415 }
1417 struct _modinfo {
1418 address addr;
1419 char* full_path; // point to a char buffer
1420 int buflen; // size of the buffer
1421 address base_addr;
1422 };
1424 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr,
1425 unsigned size, void * param) {
1426 struct _modinfo *pmod = (struct _modinfo *)param;
1427 if (!pmod) return -1;
1429 if (base_addr <= pmod->addr &&
1430 base_addr+size > pmod->addr) {
1431 // if a buffer is provided, copy path name to the buffer
1432 if (pmod->full_path) {
1433 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1434 }
1435 pmod->base_addr = base_addr;
1436 return 1;
1437 }
1438 return 0;
1439 }
1441 bool os::dll_address_to_library_name(address addr, char* buf,
1442 int buflen, int* offset) {
1443 // buf is not optional, but offset is optional
1444 assert(buf != NULL, "sanity check");
1446 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1447 // return the full path to the DLL file, sometimes it returns path
1448 // to the corresponding PDB file (debug info); sometimes it only
1449 // returns partial path, which makes life painful.
1451 struct _modinfo mi;
1452 mi.addr = addr;
1453 mi.full_path = buf;
1454 mi.buflen = buflen;
1455 int pid = os::current_process_id();
1456 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) {
1457 // buf already contains path name
1458 if (offset) *offset = addr - mi.base_addr;
1459 return true;
1460 }
1462 buf[0] = '\0';
1463 if (offset) *offset = -1;
1464 return false;
1465 }
1467 bool os::dll_address_to_function_name(address addr, char *buf,
1468 int buflen, int *offset) {
1469 // buf is not optional, but offset is optional
1470 assert(buf != NULL, "sanity check");
1472 if (Decoder::decode(addr, buf, buflen, offset)) {
1473 return true;
1474 }
1475 if (offset != NULL) *offset = -1;
1476 buf[0] = '\0';
1477 return false;
1478 }
1480 // save the start and end address of jvm.dll into param[0] and param[1]
1481 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr,
1482 unsigned size, void * param) {
1483 if (!param) return -1;
1485 if (base_addr <= (address)_locate_jvm_dll &&
1486 base_addr+size > (address)_locate_jvm_dll) {
1487 ((address*)param)[0] = base_addr;
1488 ((address*)param)[1] = base_addr + size;
1489 return 1;
1490 }
1491 return 0;
1492 }
1494 address vm_lib_location[2]; // start and end address of jvm.dll
1496 // check if addr is inside jvm.dll
1497 bool os::address_is_in_vm(address addr) {
1498 if (!vm_lib_location[0] || !vm_lib_location[1]) {
1499 int pid = os::current_process_id();
1500 if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) {
1501 assert(false, "Can't find jvm module.");
1502 return false;
1503 }
1504 }
1506 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1507 }
1509 // print module info; param is outputStream*
1510 static int _print_module(int pid, char* fname, address base,
1511 unsigned size, void* param) {
1512 if (!param) return -1;
1514 outputStream* st = (outputStream*)param;
1516 address end_addr = base + size;
1517 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname);
1518 return 0;
1519 }
1521 // Loads .dll/.so and
1522 // in case of error it checks if .dll/.so was built for the
1523 // same architecture as Hotspot is running on
1524 void * os::dll_load(const char *name, char *ebuf, int ebuflen)
1525 {
1526 void * result = LoadLibrary(name);
1527 if (result != NULL)
1528 {
1529 return result;
1530 }
1532 DWORD errcode = GetLastError();
1533 if (errcode == ERROR_MOD_NOT_FOUND) {
1534 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1);
1535 ebuf[ebuflen-1]='\0';
1536 return NULL;
1537 }
1539 // Parsing dll below
1540 // If we can read dll-info and find that dll was built
1541 // for an architecture other than Hotspot is running in
1542 // - then print to buffer "DLL was built for a different architecture"
1543 // else call os::lasterror to obtain system error message
1545 // Read system error message into ebuf
1546 // It may or may not be overwritten below (in the for loop and just above)
1547 lasterror(ebuf, (size_t) ebuflen);
1548 ebuf[ebuflen-1]='\0';
1549 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0);
1550 if (file_descriptor<0)
1551 {
1552 return NULL;
1553 }
1555 uint32_t signature_offset;
1556 uint16_t lib_arch=0;
1557 bool failed_to_get_lib_arch=
1558 (
1559 //Go to position 3c in the dll
1560 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0)
1561 ||
1562 // Read loacation of signature
1563 (sizeof(signature_offset)!=
1564 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset))))
1565 ||
1566 //Go to COFF File Header in dll
1567 //that is located after"signature" (4 bytes long)
1568 (os::seek_to_file_offset(file_descriptor,
1569 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0)
1570 ||
1571 //Read field that contains code of architecture
1572 // that dll was build for
1573 (sizeof(lib_arch)!=
1574 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch))))
1575 );
1577 ::close(file_descriptor);
1578 if (failed_to_get_lib_arch)
1579 {
1580 // file i/o error - report os::lasterror(...) msg
1581 return NULL;
1582 }
1584 typedef struct
1585 {
1586 uint16_t arch_code;
1587 char* arch_name;
1588 } arch_t;
1590 static const arch_t arch_array[]={
1591 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"},
1592 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"},
1593 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"}
1594 };
1595 #if (defined _M_IA64)
1596 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64;
1597 #elif (defined _M_AMD64)
1598 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64;
1599 #elif (defined _M_IX86)
1600 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386;
1601 #else
1602 #error Method os::dll_load requires that one of following \
1603 is defined :_M_IA64,_M_AMD64 or _M_IX86
1604 #endif
1607 // Obtain a string for printf operation
1608 // lib_arch_str shall contain string what platform this .dll was built for
1609 // running_arch_str shall string contain what platform Hotspot was built for
1610 char *running_arch_str=NULL,*lib_arch_str=NULL;
1611 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++)
1612 {
1613 if (lib_arch==arch_array[i].arch_code)
1614 lib_arch_str=arch_array[i].arch_name;
1615 if (running_arch==arch_array[i].arch_code)
1616 running_arch_str=arch_array[i].arch_name;
1617 }
1619 assert(running_arch_str,
1620 "Didn't find runing architecture code in arch_array");
1622 // If the architure is right
1623 // but some other error took place - report os::lasterror(...) msg
1624 if (lib_arch == running_arch)
1625 {
1626 return NULL;
1627 }
1629 if (lib_arch_str!=NULL)
1630 {
1631 ::_snprintf(ebuf, ebuflen-1,
1632 "Can't load %s-bit .dll on a %s-bit platform",
1633 lib_arch_str,running_arch_str);
1634 }
1635 else
1636 {
1637 // don't know what architecture this dll was build for
1638 ::_snprintf(ebuf, ebuflen-1,
1639 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1640 lib_arch,running_arch_str);
1641 }
1643 return NULL;
1644 }
1647 void os::print_dll_info(outputStream *st) {
1648 int pid = os::current_process_id();
1649 st->print_cr("Dynamic libraries:");
1650 enumerate_modules(pid, _print_module, (void *)st);
1651 }
1653 void os::print_os_info_brief(outputStream* st) {
1654 os::print_os_info(st);
1655 }
1657 void os::print_os_info(outputStream* st) {
1658 st->print("OS:");
1660 os::win32::print_windows_version(st);
1661 }
1663 void os::win32::print_windows_version(outputStream* st) {
1664 OSVERSIONINFOEX osvi;
1665 VS_FIXEDFILEINFO *file_info;
1666 TCHAR kernel32_path[MAX_PATH];
1667 UINT len, ret;
1669 // Use the GetVersionEx information to see if we're on a server or
1670 // workstation edition of Windows. Starting with Windows 8.1 we can't
1671 // trust the OS version information returned by this API.
1672 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1673 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1674 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1675 st->print_cr("Call to GetVersionEx failed");
1676 return;
1677 }
1678 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1680 // Get the full path to \Windows\System32\kernel32.dll and use that for
1681 // determining what version of Windows we're running on.
1682 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1683 ret = GetSystemDirectory(kernel32_path, len);
1684 if (ret == 0 || ret > len) {
1685 st->print_cr("Call to GetSystemDirectory failed");
1686 return;
1687 }
1688 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1690 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1691 if (version_size == 0) {
1692 st->print_cr("Call to GetFileVersionInfoSize failed");
1693 return;
1694 }
1696 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1697 if (version_info == NULL) {
1698 st->print_cr("Failed to allocate version_info");
1699 return;
1700 }
1702 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1703 os::free(version_info);
1704 st->print_cr("Call to GetFileVersionInfo failed");
1705 return;
1706 }
1708 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1709 os::free(version_info);
1710 st->print_cr("Call to VerQueryValue failed");
1711 return;
1712 }
1714 int major_version = HIWORD(file_info->dwProductVersionMS);
1715 int minor_version = LOWORD(file_info->dwProductVersionMS);
1716 int build_number = HIWORD(file_info->dwProductVersionLS);
1717 int build_minor = LOWORD(file_info->dwProductVersionLS);
1718 int os_vers = major_version * 1000 + minor_version;
1719 os::free(version_info);
1721 st->print(" Windows ");
1722 switch (os_vers) {
1724 case 6000:
1725 if (is_workstation) {
1726 st->print("Vista");
1727 } else {
1728 st->print("Server 2008");
1729 }
1730 break;
1732 case 6001:
1733 if (is_workstation) {
1734 st->print("7");
1735 } else {
1736 st->print("Server 2008 R2");
1737 }
1738 break;
1740 case 6002:
1741 if (is_workstation) {
1742 st->print("8");
1743 } else {
1744 st->print("Server 2012");
1745 }
1746 break;
1748 case 6003:
1749 if (is_workstation) {
1750 st->print("8.1");
1751 } else {
1752 st->print("Server 2012 R2");
1753 }
1754 break;
1756 case 6004:
1757 if (is_workstation) {
1758 st->print("10");
1759 } else {
1760 st->print("Server 2016");
1761 }
1762 break;
1764 default:
1765 // Unrecognized windows, print out its major and minor versions
1766 st->print("%d.%d", major_version, minor_version);
1767 break;
1768 }
1770 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1771 // find out whether we are running on 64 bit processor or not
1772 SYSTEM_INFO si;
1773 ZeroMemory(&si, sizeof(SYSTEM_INFO));
1774 os::Kernel32Dll::GetNativeSystemInfo(&si);
1775 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1776 st->print(" , 64 bit");
1777 }
1779 st->print(" Build %d", build_number);
1780 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1781 st->cr();
1782 }
1784 void os::pd_print_cpu_info(outputStream* st) {
1785 // Nothing to do for now.
1786 }
1788 void os::print_memory_info(outputStream* st) {
1789 st->print("Memory:");
1790 st->print(" %dk page", os::vm_page_size()>>10);
1792 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1793 // value if total memory is larger than 4GB
1794 MEMORYSTATUSEX ms;
1795 ms.dwLength = sizeof(ms);
1796 GlobalMemoryStatusEx(&ms);
1798 st->print(", physical %uk", os::physical_memory() >> 10);
1799 st->print("(%uk free)", os::available_memory() >> 10);
1801 st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1802 st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1803 st->cr();
1804 }
1806 void os::print_siginfo(outputStream *st, void *siginfo) {
1807 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo;
1808 st->print("siginfo:");
1809 st->print(" ExceptionCode=0x%x", er->ExceptionCode);
1811 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
1812 er->NumberParameters >= 2) {
1813 switch (er->ExceptionInformation[0]) {
1814 case 0: st->print(", reading address"); break;
1815 case 1: st->print(", writing address"); break;
1816 default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1817 er->ExceptionInformation[0]);
1818 }
1819 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1820 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR &&
1821 er->NumberParameters >= 2 && UseSharedSpaces) {
1822 FileMapInfo* mapinfo = FileMapInfo::current_info();
1823 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) {
1824 st->print("\n\nError accessing class data sharing archive." \
1825 " Mapped file inaccessible during execution, " \
1826 " possible disk/network problem.");
1827 }
1828 } else {
1829 int num = er->NumberParameters;
1830 if (num > 0) {
1831 st->print(", ExceptionInformation=");
1832 for (int i = 0; i < num; i++) {
1833 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1834 }
1835 }
1836 }
1837 st->cr();
1838 }
1840 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1841 // do nothing
1842 }
1844 static char saved_jvm_path[MAX_PATH] = {0};
1846 // Find the full path to the current module, jvm.dll
1847 void os::jvm_path(char *buf, jint buflen) {
1848 // Error checking.
1849 if (buflen < MAX_PATH) {
1850 assert(false, "must use a large-enough buffer");
1851 buf[0] = '\0';
1852 return;
1853 }
1854 // Lazy resolve the path to current module.
1855 if (saved_jvm_path[0] != 0) {
1856 strcpy(buf, saved_jvm_path);
1857 return;
1858 }
1860 buf[0] = '\0';
1861 if (Arguments::created_by_gamma_launcher()) {
1862 // Support for the gamma launcher. Check for an
1863 // JAVA_HOME environment variable
1864 // and fix up the path so it looks like
1865 // libjvm.so is installed there (append a fake suffix
1866 // hotspot/libjvm.so).
1867 char* java_home_var = ::getenv("JAVA_HOME");
1868 if (java_home_var != NULL && java_home_var[0] != 0 &&
1869 strlen(java_home_var) < (size_t)buflen) {
1871 strncpy(buf, java_home_var, buflen);
1873 // determine if this is a legacy image or modules image
1874 // modules image doesn't have "jre" subdirectory
1875 size_t len = strlen(buf);
1876 char* jrebin_p = buf + len;
1877 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1878 if (0 != _access(buf, 0)) {
1879 jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1880 }
1881 len = strlen(buf);
1882 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1883 }
1884 }
1886 if(buf[0] == '\0') {
1887 GetModuleFileName(vm_lib_handle, buf, buflen);
1888 }
1889 strncpy(saved_jvm_path, buf, MAX_PATH);
1890 }
1893 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1894 #ifndef _WIN64
1895 st->print("_");
1896 #endif
1897 }
1900 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1901 #ifndef _WIN64
1902 st->print("@%d", args_size * sizeof(int));
1903 #endif
1904 }
1906 // This method is a copy of JDK's sysGetLastErrorString
1907 // from src/windows/hpi/src/system_md.c
1909 size_t os::lasterror(char* buf, size_t len) {
1910 DWORD errval;
1912 if ((errval = GetLastError()) != 0) {
1913 // DOS error
1914 size_t n = (size_t)FormatMessage(
1915 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1916 NULL,
1917 errval,
1918 0,
1919 buf,
1920 (DWORD)len,
1921 NULL);
1922 if (n > 3) {
1923 // Drop final '.', CR, LF
1924 if (buf[n - 1] == '\n') n--;
1925 if (buf[n - 1] == '\r') n--;
1926 if (buf[n - 1] == '.') n--;
1927 buf[n] = '\0';
1928 }
1929 return n;
1930 }
1932 if (errno != 0) {
1933 // C runtime error that has no corresponding DOS error code
1934 const char* s = strerror(errno);
1935 size_t n = strlen(s);
1936 if (n >= len) n = len - 1;
1937 strncpy(buf, s, n);
1938 buf[n] = '\0';
1939 return n;
1940 }
1942 return 0;
1943 }
1945 int os::get_last_error() {
1946 DWORD error = GetLastError();
1947 if (error == 0)
1948 error = errno;
1949 return (int)error;
1950 }
1952 // sun.misc.Signal
1953 // NOTE that this is a workaround for an apparent kernel bug where if
1954 // a signal handler for SIGBREAK is installed then that signal handler
1955 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1956 // See bug 4416763.
1957 static void (*sigbreakHandler)(int) = NULL;
1959 static void UserHandler(int sig, void *siginfo, void *context) {
1960 os::signal_notify(sig);
1961 // We need to reinstate the signal handler each time...
1962 os::signal(sig, (void*)UserHandler);
1963 }
1965 void* os::user_handler() {
1966 return (void*) UserHandler;
1967 }
1969 void* os::signal(int signal_number, void* handler) {
1970 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1971 void (*oldHandler)(int) = sigbreakHandler;
1972 sigbreakHandler = (void (*)(int)) handler;
1973 return (void*) oldHandler;
1974 } else {
1975 return (void*)::signal(signal_number, (void (*)(int))handler);
1976 }
1977 }
1979 void os::signal_raise(int signal_number) {
1980 raise(signal_number);
1981 }
1983 // The Win32 C runtime library maps all console control events other than ^C
1984 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
1985 // logoff, and shutdown events. We therefore install our own console handler
1986 // that raises SIGTERM for the latter cases.
1987 //
1988 static BOOL WINAPI consoleHandler(DWORD event) {
1989 switch(event) {
1990 case CTRL_C_EVENT:
1991 if (is_error_reported()) {
1992 // Ctrl-C is pressed during error reporting, likely because the error
1993 // handler fails to abort. Let VM die immediately.
1994 os::die();
1995 }
1997 os::signal_raise(SIGINT);
1998 return TRUE;
1999 break;
2000 case CTRL_BREAK_EVENT:
2001 if (sigbreakHandler != NULL) {
2002 (*sigbreakHandler)(SIGBREAK);
2003 }
2004 return TRUE;
2005 break;
2006 case CTRL_LOGOFF_EVENT: {
2007 // Don't terminate JVM if it is running in a non-interactive session,
2008 // such as a service process.
2009 USEROBJECTFLAGS flags;
2010 HANDLE handle = GetProcessWindowStation();
2011 if (handle != NULL &&
2012 GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2013 sizeof( USEROBJECTFLAGS), NULL)) {
2014 // If it is a non-interactive session, let next handler to deal
2015 // with it.
2016 if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2017 return FALSE;
2018 }
2019 }
2020 }
2021 case CTRL_CLOSE_EVENT:
2022 case CTRL_SHUTDOWN_EVENT:
2023 os::signal_raise(SIGTERM);
2024 return TRUE;
2025 break;
2026 default:
2027 break;
2028 }
2029 return FALSE;
2030 }
2032 /*
2033 * The following code is moved from os.cpp for making this
2034 * code platform specific, which it is by its very nature.
2035 */
2037 // Return maximum OS signal used + 1 for internal use only
2038 // Used as exit signal for signal_thread
2039 int os::sigexitnum_pd(){
2040 return NSIG;
2041 }
2043 // a counter for each possible signal value, including signal_thread exit signal
2044 static volatile jint pending_signals[NSIG+1] = { 0 };
2045 static HANDLE sig_sem = NULL;
2047 void os::signal_init_pd() {
2048 // Initialize signal structures
2049 memset((void*)pending_signals, 0, sizeof(pending_signals));
2051 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2053 // Programs embedding the VM do not want it to attempt to receive
2054 // events like CTRL_LOGOFF_EVENT, which are used to implement the
2055 // shutdown hooks mechanism introduced in 1.3. For example, when
2056 // the VM is run as part of a Windows NT service (i.e., a servlet
2057 // engine in a web server), the correct behavior is for any console
2058 // control handler to return FALSE, not TRUE, because the OS's
2059 // "final" handler for such events allows the process to continue if
2060 // it is a service (while terminating it if it is not a service).
2061 // To make this behavior uniform and the mechanism simpler, we
2062 // completely disable the VM's usage of these console events if -Xrs
2063 // (=ReduceSignalUsage) is specified. This means, for example, that
2064 // the CTRL-BREAK thread dump mechanism is also disabled in this
2065 // case. See bugs 4323062, 4345157, and related bugs.
2067 if (!ReduceSignalUsage) {
2068 // Add a CTRL-C handler
2069 SetConsoleCtrlHandler(consoleHandler, TRUE);
2070 }
2071 }
2073 void os::signal_notify(int signal_number) {
2074 BOOL ret;
2075 if (sig_sem != NULL) {
2076 Atomic::inc(&pending_signals[signal_number]);
2077 ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2078 assert(ret != 0, "ReleaseSemaphore() failed");
2079 }
2080 }
2082 static int check_pending_signals(bool wait_for_signal) {
2083 DWORD ret;
2084 while (true) {
2085 for (int i = 0; i < NSIG + 1; i++) {
2086 jint n = pending_signals[i];
2087 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2088 return i;
2089 }
2090 }
2091 if (!wait_for_signal) {
2092 return -1;
2093 }
2095 JavaThread *thread = JavaThread::current();
2097 ThreadBlockInVM tbivm(thread);
2099 bool threadIsSuspended;
2100 do {
2101 thread->set_suspend_equivalent();
2102 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2103 ret = ::WaitForSingleObject(sig_sem, INFINITE);
2104 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2106 // were we externally suspended while we were waiting?
2107 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2108 if (threadIsSuspended) {
2109 //
2110 // The semaphore has been incremented, but while we were waiting
2111 // another thread suspended us. We don't want to continue running
2112 // while suspended because that would surprise the thread that
2113 // suspended us.
2114 //
2115 ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2116 assert(ret != 0, "ReleaseSemaphore() failed");
2118 thread->java_suspend_self();
2119 }
2120 } while (threadIsSuspended);
2121 }
2122 }
2124 int os::signal_lookup() {
2125 return check_pending_signals(false);
2126 }
2128 int os::signal_wait() {
2129 return check_pending_signals(true);
2130 }
2132 // Implicit OS exception handling
2134 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) {
2135 JavaThread* thread = JavaThread::current();
2136 // Save pc in thread
2137 #ifdef _M_IA64
2138 // Do not blow up if no thread info available.
2139 if (thread) {
2140 // Saving PRECISE pc (with slot information) in thread.
2141 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2142 // Convert precise PC into "Unix" format
2143 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2144 thread->set_saved_exception_pc((address)precise_pc);
2145 }
2146 // Set pc to handler
2147 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2148 // Clear out psr.ri (= Restart Instruction) in order to continue
2149 // at the beginning of the target bundle.
2150 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2151 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2152 #else
2153 #ifdef _M_AMD64
2154 // Do not blow up if no thread info available.
2155 if (thread) {
2156 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2157 }
2158 // Set pc to handler
2159 exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2160 #else
2161 // Do not blow up if no thread info available.
2162 if (thread) {
2163 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2164 }
2165 // Set pc to handler
2166 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2167 #endif
2168 #endif
2170 // Continue the execution
2171 return EXCEPTION_CONTINUE_EXECUTION;
2172 }
2175 // Used for PostMortemDump
2176 extern "C" void safepoints();
2177 extern "C" void find(int x);
2178 extern "C" void events();
2180 // According to Windows API documentation, an illegal instruction sequence should generate
2181 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2182 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2183 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2185 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2187 // From "Execution Protection in the Windows Operating System" draft 0.35
2188 // Once a system header becomes available, the "real" define should be
2189 // included or copied here.
2190 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2192 // Handle NAT Bit consumption on IA64.
2193 #ifdef _M_IA64
2194 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION
2195 #endif
2197 // Windows Vista/2008 heap corruption check
2198 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374
2200 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2201 // C++ compiler contain this error code. Because this is a compiler-generated
2202 // error, the code is not listed in the Win32 API header files.
2203 // The code is actually a cryptic mnemonic device, with the initial "E"
2204 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2205 // ASCII values of "msc".
2207 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363
2209 #define def_excpt(val) { #val, (val) }
2211 static const struct { char* name; uint number; } exceptlabels[] = {
2212 def_excpt(EXCEPTION_ACCESS_VIOLATION),
2213 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2214 def_excpt(EXCEPTION_BREAKPOINT),
2215 def_excpt(EXCEPTION_SINGLE_STEP),
2216 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2217 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2218 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2219 def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2220 def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2221 def_excpt(EXCEPTION_FLT_OVERFLOW),
2222 def_excpt(EXCEPTION_FLT_STACK_CHECK),
2223 def_excpt(EXCEPTION_FLT_UNDERFLOW),
2224 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2225 def_excpt(EXCEPTION_INT_OVERFLOW),
2226 def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2227 def_excpt(EXCEPTION_IN_PAGE_ERROR),
2228 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2229 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2230 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2231 def_excpt(EXCEPTION_STACK_OVERFLOW),
2232 def_excpt(EXCEPTION_INVALID_DISPOSITION),
2233 def_excpt(EXCEPTION_GUARD_PAGE),
2234 def_excpt(EXCEPTION_INVALID_HANDLE),
2235 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2236 def_excpt(EXCEPTION_HEAP_CORRUPTION)
2237 #ifdef _M_IA64
2238 , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION)
2239 #endif
2240 };
2242 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2243 uint code = static_cast<uint>(exception_code);
2244 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2245 if (exceptlabels[i].number == code) {
2246 jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2247 return buf;
2248 }
2249 }
2251 return NULL;
2252 }
2254 //-----------------------------------------------------------------------------
2255 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2256 // handle exception caused by idiv; should only happen for -MinInt/-1
2257 // (division by zero is handled explicitly)
2258 #ifdef _M_IA64
2259 assert(0, "Fix Handle_IDiv_Exception");
2260 #else
2261 #ifdef _M_AMD64
2262 PCONTEXT ctx = exceptionInfo->ContextRecord;
2263 address pc = (address)ctx->Rip;
2264 assert(pc[0] == 0xF7, "not an idiv opcode");
2265 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2266 assert(ctx->Rax == min_jint, "unexpected idiv exception");
2267 // set correct result values and continue after idiv instruction
2268 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes
2269 ctx->Rax = (DWORD64)min_jint; // result
2270 ctx->Rdx = (DWORD64)0; // remainder
2271 // Continue the execution
2272 #else
2273 PCONTEXT ctx = exceptionInfo->ContextRecord;
2274 address pc = (address)ctx->Eip;
2275 assert(pc[0] == 0xF7, "not an idiv opcode");
2276 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2277 assert(ctx->Eax == min_jint, "unexpected idiv exception");
2278 // set correct result values and continue after idiv instruction
2279 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes
2280 ctx->Eax = (DWORD)min_jint; // result
2281 ctx->Edx = (DWORD)0; // remainder
2282 // Continue the execution
2283 #endif
2284 #endif
2285 return EXCEPTION_CONTINUE_EXECUTION;
2286 }
2288 #ifndef _WIN64
2289 //-----------------------------------------------------------------------------
2290 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2291 // handle exception caused by native method modifying control word
2292 PCONTEXT ctx = exceptionInfo->ContextRecord;
2293 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2295 switch (exception_code) {
2296 case EXCEPTION_FLT_DENORMAL_OPERAND:
2297 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2298 case EXCEPTION_FLT_INEXACT_RESULT:
2299 case EXCEPTION_FLT_INVALID_OPERATION:
2300 case EXCEPTION_FLT_OVERFLOW:
2301 case EXCEPTION_FLT_STACK_CHECK:
2302 case EXCEPTION_FLT_UNDERFLOW:
2303 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2304 if (fp_control_word != ctx->FloatSave.ControlWord) {
2305 // Restore FPCW and mask out FLT exceptions
2306 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2307 // Mask out pending FLT exceptions
2308 ctx->FloatSave.StatusWord &= 0xffffff00;
2309 return EXCEPTION_CONTINUE_EXECUTION;
2310 }
2311 }
2313 if (prev_uef_handler != NULL) {
2314 // We didn't handle this exception so pass it to the previous
2315 // UnhandledExceptionFilter.
2316 return (prev_uef_handler)(exceptionInfo);
2317 }
2319 return EXCEPTION_CONTINUE_SEARCH;
2320 }
2321 #else //_WIN64
2322 /*
2323 On Windows, the mxcsr control bits are non-volatile across calls
2324 See also CR 6192333
2325 If EXCEPTION_FLT_* happened after some native method modified
2326 mxcsr - it is not a jvm fault.
2327 However should we decide to restore of mxcsr after a faulty
2328 native method we can uncomment following code
2329 jint MxCsr = INITIAL_MXCSR;
2330 // we can't use StubRoutines::addr_mxcsr_std()
2331 // because in Win64 mxcsr is not saved there
2332 if (MxCsr != ctx->MxCsr) {
2333 ctx->MxCsr = MxCsr;
2334 return EXCEPTION_CONTINUE_EXECUTION;
2335 }
2337 */
2338 #endif // _WIN64
2341 static inline void report_error(Thread* t, DWORD exception_code,
2342 address addr, void* siginfo, void* context) {
2343 VMError err(t, exception_code, addr, siginfo, context);
2344 err.report_and_die();
2346 // If UseOsErrorReporting, this will return here and save the error file
2347 // somewhere where we can find it in the minidump.
2348 }
2350 //-----------------------------------------------------------------------------
2351 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2352 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2353 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2354 #ifdef _M_IA64
2355 // On Itanium, we need the "precise pc", which has the slot number coded
2356 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2357 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2358 // Convert the pc to "Unix format", which has the slot number coded
2359 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2360 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2361 // information is saved in the Unix format.
2362 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2363 #else
2364 #ifdef _M_AMD64
2365 address pc = (address) exceptionInfo->ContextRecord->Rip;
2366 #else
2367 address pc = (address) exceptionInfo->ContextRecord->Eip;
2368 #endif
2369 #endif
2370 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady
2372 // Handle SafeFetch32 and SafeFetchN exceptions.
2373 if (StubRoutines::is_safefetch_fault(pc)) {
2374 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2375 }
2377 #ifndef _WIN64
2378 // Execution protection violation - win32 running on AMD64 only
2379 // Handled first to avoid misdiagnosis as a "normal" access violation;
2380 // This is safe to do because we have a new/unique ExceptionInformation
2381 // code for this condition.
2382 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2383 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2384 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2385 address addr = (address) exceptionRecord->ExceptionInformation[1];
2387 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2388 int page_size = os::vm_page_size();
2390 // Make sure the pc and the faulting address are sane.
2391 //
2392 // If an instruction spans a page boundary, and the page containing
2393 // the beginning of the instruction is executable but the following
2394 // page is not, the pc and the faulting address might be slightly
2395 // different - we still want to unguard the 2nd page in this case.
2396 //
2397 // 15 bytes seems to be a (very) safe value for max instruction size.
2398 bool pc_is_near_addr =
2399 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2400 bool instr_spans_page_boundary =
2401 (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2402 (intptr_t) page_size) > 0);
2404 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2405 static volatile address last_addr =
2406 (address) os::non_memory_address_word();
2408 // In conservative mode, don't unguard unless the address is in the VM
2409 if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2410 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2412 // Set memory to RWX and retry
2413 address page_start =
2414 (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2415 bool res = os::protect_memory((char*) page_start, page_size,
2416 os::MEM_PROT_RWX);
2418 if (PrintMiscellaneous && Verbose) {
2419 char buf[256];
2420 jio_snprintf(buf, sizeof(buf), "Execution protection violation "
2421 "at " INTPTR_FORMAT
2422 ", unguarding " INTPTR_FORMAT ": %s", addr,
2423 page_start, (res ? "success" : strerror(errno)));
2424 tty->print_raw_cr(buf);
2425 }
2427 // Set last_addr so if we fault again at the same address, we don't
2428 // end up in an endless loop.
2429 //
2430 // There are two potential complications here. Two threads trapping
2431 // at the same address at the same time could cause one of the
2432 // threads to think it already unguarded, and abort the VM. Likely
2433 // very rare.
2434 //
2435 // The other race involves two threads alternately trapping at
2436 // different addresses and failing to unguard the page, resulting in
2437 // an endless loop. This condition is probably even more unlikely
2438 // than the first.
2439 //
2440 // Although both cases could be avoided by using locks or thread
2441 // local last_addr, these solutions are unnecessary complication:
2442 // this handler is a best-effort safety net, not a complete solution.
2443 // It is disabled by default and should only be used as a workaround
2444 // in case we missed any no-execute-unsafe VM code.
2446 last_addr = addr;
2448 return EXCEPTION_CONTINUE_EXECUTION;
2449 }
2450 }
2452 // Last unguard failed or not unguarding
2453 tty->print_raw_cr("Execution protection violation");
2454 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2455 exceptionInfo->ContextRecord);
2456 return EXCEPTION_CONTINUE_SEARCH;
2457 }
2458 }
2459 #endif // _WIN64
2461 // Check to see if we caught the safepoint code in the
2462 // process of write protecting the memory serialization page.
2463 // It write enables the page immediately after protecting it
2464 // so just return.
2465 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
2466 JavaThread* thread = (JavaThread*) t;
2467 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2468 address addr = (address) exceptionRecord->ExceptionInformation[1];
2469 if ( os::is_memory_serialize_page(thread, addr) ) {
2470 // Block current thread until the memory serialize page permission restored.
2471 os::block_on_serialize_page_trap();
2472 return EXCEPTION_CONTINUE_EXECUTION;
2473 }
2474 }
2476 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2477 VM_Version::is_cpuinfo_segv_addr(pc)) {
2478 // Verify that OS save/restore AVX registers.
2479 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2480 }
2482 if (t != NULL && t->is_Java_thread()) {
2483 JavaThread* thread = (JavaThread*) t;
2484 bool in_java = thread->thread_state() == _thread_in_Java;
2486 // Handle potential stack overflows up front.
2487 if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2488 if (os::uses_stack_guard_pages()) {
2489 #ifdef _M_IA64
2490 // Use guard page for register stack.
2491 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2492 address addr = (address) exceptionRecord->ExceptionInformation[1];
2493 // Check for a register stack overflow on Itanium
2494 if (thread->addr_inside_register_stack_red_zone(addr)) {
2495 // Fatal red zone violation happens if the Java program
2496 // catches a StackOverflow error and does so much processing
2497 // that it runs beyond the unprotected yellow guard zone. As
2498 // a result, we are out of here.
2499 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2500 } else if(thread->addr_inside_register_stack(addr)) {
2501 // Disable the yellow zone which sets the state that
2502 // we've got a stack overflow problem.
2503 if (thread->stack_yellow_zone_enabled()) {
2504 thread->disable_stack_yellow_zone();
2505 }
2506 // Give us some room to process the exception.
2507 thread->disable_register_stack_guard();
2508 // Tracing with +Verbose.
2509 if (Verbose) {
2510 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2511 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2512 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2513 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2514 thread->register_stack_base(),
2515 thread->register_stack_base() + thread->stack_size());
2516 }
2518 // Reguard the permanent register stack red zone just to be sure.
2519 // We saw Windows silently disabling this without telling us.
2520 thread->enable_register_stack_red_zone();
2522 return Handle_Exception(exceptionInfo,
2523 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2524 }
2525 #endif
2526 if (thread->stack_yellow_zone_enabled()) {
2527 // Yellow zone violation. The o/s has unprotected the first yellow
2528 // zone page for us. Note: must call disable_stack_yellow_zone to
2529 // update the enabled status, even if the zone contains only one page.
2530 thread->disable_stack_yellow_zone();
2531 // If not in java code, return and hope for the best.
2532 return in_java ? Handle_Exception(exceptionInfo,
2533 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2534 : EXCEPTION_CONTINUE_EXECUTION;
2535 } else {
2536 // Fatal red zone violation.
2537 thread->disable_stack_red_zone();
2538 tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2539 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2540 exceptionInfo->ContextRecord);
2541 return EXCEPTION_CONTINUE_SEARCH;
2542 }
2543 } else if (in_java) {
2544 // JVM-managed guard pages cannot be used on win95/98. The o/s provides
2545 // a one-time-only guard page, which it has released to us. The next
2546 // stack overflow on this thread will result in an ACCESS_VIOLATION.
2547 return Handle_Exception(exceptionInfo,
2548 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2549 } else {
2550 // Can only return and hope for the best. Further stack growth will
2551 // result in an ACCESS_VIOLATION.
2552 return EXCEPTION_CONTINUE_EXECUTION;
2553 }
2554 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2555 // Either stack overflow or null pointer exception.
2556 if (in_java) {
2557 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2558 address addr = (address) exceptionRecord->ExceptionInformation[1];
2559 address stack_end = thread->stack_base() - thread->stack_size();
2560 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2561 // Stack overflow.
2562 assert(!os::uses_stack_guard_pages(),
2563 "should be caught by red zone code above.");
2564 return Handle_Exception(exceptionInfo,
2565 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2566 }
2567 //
2568 // Check for safepoint polling and implicit null
2569 // We only expect null pointers in the stubs (vtable)
2570 // the rest are checked explicitly now.
2571 //
2572 CodeBlob* cb = CodeCache::find_blob(pc);
2573 if (cb != NULL) {
2574 if (os::is_poll_address(addr)) {
2575 address stub = SharedRuntime::get_poll_stub(pc);
2576 return Handle_Exception(exceptionInfo, stub);
2577 }
2578 }
2579 {
2580 #ifdef _WIN64
2581 //
2582 // If it's a legal stack address map the entire region in
2583 //
2584 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2585 address addr = (address) exceptionRecord->ExceptionInformation[1];
2586 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) {
2587 addr = (address)((uintptr_t)addr &
2588 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2589 os::commit_memory((char *)addr, thread->stack_base() - addr,
2590 !ExecMem);
2591 return EXCEPTION_CONTINUE_EXECUTION;
2592 }
2593 else
2594 #endif
2595 {
2596 // Null pointer exception.
2597 #ifdef _M_IA64
2598 // Process implicit null checks in compiled code. Note: Implicit null checks
2599 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2600 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2601 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2602 // Handle implicit null check in UEP method entry
2603 if (cb && (cb->is_frame_complete_at(pc) ||
2604 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2605 if (Verbose) {
2606 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2607 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2608 tty->print_cr(" to addr " INTPTR_FORMAT, addr);
2609 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2610 *(bundle_start + 1), *bundle_start);
2611 }
2612 return Handle_Exception(exceptionInfo,
2613 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2614 }
2615 }
2617 // Implicit null checks were processed above. Hence, we should not reach
2618 // here in the usual case => die!
2619 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2620 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2621 exceptionInfo->ContextRecord);
2622 return EXCEPTION_CONTINUE_SEARCH;
2624 #else // !IA64
2626 // Windows 98 reports faulting addresses incorrectly
2627 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) ||
2628 !os::win32::is_nt()) {
2629 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2630 if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2631 }
2632 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2633 exceptionInfo->ContextRecord);
2634 return EXCEPTION_CONTINUE_SEARCH;
2635 #endif
2636 }
2637 }
2638 }
2640 #ifdef _WIN64
2641 // Special care for fast JNI field accessors.
2642 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2643 // in and the heap gets shrunk before the field access.
2644 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2645 address addr = JNI_FastGetField::find_slowcase_pc(pc);
2646 if (addr != (address)-1) {
2647 return Handle_Exception(exceptionInfo, addr);
2648 }
2649 }
2650 #endif
2652 // Stack overflow or null pointer exception in native code.
2653 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2654 exceptionInfo->ContextRecord);
2655 return EXCEPTION_CONTINUE_SEARCH;
2656 } // /EXCEPTION_ACCESS_VIOLATION
2657 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2658 #if defined _M_IA64
2659 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2660 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2661 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2663 // Compiled method patched to be non entrant? Following conditions must apply:
2664 // 1. must be first instruction in bundle
2665 // 2. must be a break instruction with appropriate code
2666 if((((uint64_t) pc & 0x0F) == 0) &&
2667 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2668 return Handle_Exception(exceptionInfo,
2669 (address)SharedRuntime::get_handle_wrong_method_stub());
2670 }
2671 } // /EXCEPTION_ILLEGAL_INSTRUCTION
2672 #endif
2675 if (in_java) {
2676 switch (exception_code) {
2677 case EXCEPTION_INT_DIVIDE_BY_ZERO:
2678 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2680 case EXCEPTION_INT_OVERFLOW:
2681 return Handle_IDiv_Exception(exceptionInfo);
2683 } // switch
2684 }
2685 #ifndef _WIN64
2686 if (((thread->thread_state() == _thread_in_Java) ||
2687 (thread->thread_state() == _thread_in_native)) &&
2688 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION)
2689 {
2690 LONG result=Handle_FLT_Exception(exceptionInfo);
2691 if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2692 }
2693 #endif //_WIN64
2694 }
2696 if (exception_code != EXCEPTION_BREAKPOINT) {
2697 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2698 exceptionInfo->ContextRecord);
2699 }
2700 return EXCEPTION_CONTINUE_SEARCH;
2701 }
2703 #ifndef _WIN64
2704 // Special care for fast JNI accessors.
2705 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2706 // the heap gets shrunk before the field access.
2707 // Need to install our own structured exception handler since native code may
2708 // install its own.
2709 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2710 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2711 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2712 address pc = (address) exceptionInfo->ContextRecord->Eip;
2713 address addr = JNI_FastGetField::find_slowcase_pc(pc);
2714 if (addr != (address)-1) {
2715 return Handle_Exception(exceptionInfo, addr);
2716 }
2717 }
2718 return EXCEPTION_CONTINUE_SEARCH;
2719 }
2721 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \
2722 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \
2723 __try { \
2724 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \
2725 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \
2726 } \
2727 return 0; \
2728 }
2730 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean)
2731 DEFINE_FAST_GETFIELD(jbyte, byte, Byte)
2732 DEFINE_FAST_GETFIELD(jchar, char, Char)
2733 DEFINE_FAST_GETFIELD(jshort, short, Short)
2734 DEFINE_FAST_GETFIELD(jint, int, Int)
2735 DEFINE_FAST_GETFIELD(jlong, long, Long)
2736 DEFINE_FAST_GETFIELD(jfloat, float, Float)
2737 DEFINE_FAST_GETFIELD(jdouble, double, Double)
2739 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2740 switch (type) {
2741 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2742 case T_BYTE: return (address)jni_fast_GetByteField_wrapper;
2743 case T_CHAR: return (address)jni_fast_GetCharField_wrapper;
2744 case T_SHORT: return (address)jni_fast_GetShortField_wrapper;
2745 case T_INT: return (address)jni_fast_GetIntField_wrapper;
2746 case T_LONG: return (address)jni_fast_GetLongField_wrapper;
2747 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper;
2748 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper;
2749 default: ShouldNotReachHere();
2750 }
2751 return (address)-1;
2752 }
2753 #endif
2755 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
2756 // Install a win32 structured exception handler around the test
2757 // function call so the VM can generate an error dump if needed.
2758 __try {
2759 (*funcPtr)();
2760 } __except(topLevelExceptionFilter(
2761 (_EXCEPTION_POINTERS*)_exception_info())) {
2762 // Nothing to do.
2763 }
2764 }
2766 // Virtual Memory
2768 int os::vm_page_size() { return os::win32::vm_page_size(); }
2769 int os::vm_allocation_granularity() {
2770 return os::win32::vm_allocation_granularity();
2771 }
2773 // Windows large page support is available on Windows 2003. In order to use
2774 // large page memory, the administrator must first assign additional privilege
2775 // to the user:
2776 // + select Control Panel -> Administrative Tools -> Local Security Policy
2777 // + select Local Policies -> User Rights Assignment
2778 // + double click "Lock pages in memory", add users and/or groups
2779 // + reboot
2780 // Note the above steps are needed for administrator as well, as administrators
2781 // by default do not have the privilege to lock pages in memory.
2782 //
2783 // Note about Windows 2003: although the API supports committing large page
2784 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2785 // scenario, I found through experiment it only uses large page if the entire
2786 // memory region is reserved and committed in a single VirtualAlloc() call.
2787 // This makes Windows large page support more or less like Solaris ISM, in
2788 // that the entire heap must be committed upfront. This probably will change
2789 // in the future, if so the code below needs to be revisited.
2791 #ifndef MEM_LARGE_PAGES
2792 #define MEM_LARGE_PAGES 0x20000000
2793 #endif
2795 static HANDLE _hProcess;
2796 static HANDLE _hToken;
2798 // Container for NUMA node list info
2799 class NUMANodeListHolder {
2800 private:
2801 int *_numa_used_node_list; // allocated below
2802 int _numa_used_node_count;
2804 void free_node_list() {
2805 if (_numa_used_node_list != NULL) {
2806 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal);
2807 }
2808 }
2810 public:
2811 NUMANodeListHolder() {
2812 _numa_used_node_count = 0;
2813 _numa_used_node_list = NULL;
2814 // do rest of initialization in build routine (after function pointers are set up)
2815 }
2817 ~NUMANodeListHolder() {
2818 free_node_list();
2819 }
2821 bool build() {
2822 DWORD_PTR proc_aff_mask;
2823 DWORD_PTR sys_aff_mask;
2824 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2825 ULONG highest_node_number;
2826 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false;
2827 free_node_list();
2828 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2829 for (unsigned int i = 0; i <= highest_node_number; i++) {
2830 ULONGLONG proc_mask_numa_node;
2831 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2832 if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2833 _numa_used_node_list[_numa_used_node_count++] = i;
2834 }
2835 }
2836 return (_numa_used_node_count > 1);
2837 }
2839 int get_count() {return _numa_used_node_count;}
2840 int get_node_list_entry(int n) {
2841 // for indexes out of range, returns -1
2842 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2843 }
2845 } numa_node_list_holder;
2849 static size_t _large_page_size = 0;
2851 static bool resolve_functions_for_large_page_init() {
2852 return os::Kernel32Dll::GetLargePageMinimumAvailable() &&
2853 os::Advapi32Dll::AdvapiAvailable();
2854 }
2856 static bool request_lock_memory_privilege() {
2857 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2858 os::current_process_id());
2860 LUID luid;
2861 if (_hProcess != NULL &&
2862 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2863 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2865 TOKEN_PRIVILEGES tp;
2866 tp.PrivilegeCount = 1;
2867 tp.Privileges[0].Luid = luid;
2868 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2870 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2871 // privilege. Check GetLastError() too. See MSDN document.
2872 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2873 (GetLastError() == ERROR_SUCCESS)) {
2874 return true;
2875 }
2876 }
2878 return false;
2879 }
2881 static void cleanup_after_large_page_init() {
2882 if (_hProcess) CloseHandle(_hProcess);
2883 _hProcess = NULL;
2884 if (_hToken) CloseHandle(_hToken);
2885 _hToken = NULL;
2886 }
2888 static bool numa_interleaving_init() {
2889 bool success = false;
2890 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2892 // print a warning if UseNUMAInterleaving flag is specified on command line
2893 bool warn_on_failure = use_numa_interleaving_specified;
2894 # define WARN(msg) if (warn_on_failure) { warning(msg); }
2896 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2897 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2898 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2900 if (os::Kernel32Dll::NumaCallsAvailable()) {
2901 if (numa_node_list_holder.build()) {
2902 if (PrintMiscellaneous && Verbose) {
2903 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2904 for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2905 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i));
2906 }
2907 tty->print("\n");
2908 }
2909 success = true;
2910 } else {
2911 WARN("Process does not cover multiple NUMA nodes.");
2912 }
2913 } else {
2914 WARN("NUMA Interleaving is not supported by the operating system.");
2915 }
2916 if (!success) {
2917 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2918 }
2919 return success;
2920 #undef WARN
2921 }
2923 // this routine is used whenever we need to reserve a contiguous VA range
2924 // but we need to make separate VirtualAlloc calls for each piece of the range
2925 // Reasons for doing this:
2926 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2927 // * UseNUMAInterleaving requires a separate node for each piece
2928 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot,
2929 bool should_inject_error=false) {
2930 char * p_buf;
2931 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2932 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2933 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2935 // first reserve enough address space in advance since we want to be
2936 // able to break a single contiguous virtual address range into multiple
2937 // large page commits but WS2003 does not allow reserving large page space
2938 // so we just use 4K pages for reserve, this gives us a legal contiguous
2939 // address space. then we will deallocate that reservation, and re alloc
2940 // using large pages
2941 const size_t size_of_reserve = bytes + chunk_size;
2942 if (bytes > size_of_reserve) {
2943 // Overflowed.
2944 return NULL;
2945 }
2946 p_buf = (char *) VirtualAlloc(addr,
2947 size_of_reserve, // size of Reserve
2948 MEM_RESERVE,
2949 PAGE_READWRITE);
2950 // If reservation failed, return NULL
2951 if (p_buf == NULL) return NULL;
2952 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2953 os::release_memory(p_buf, bytes + chunk_size);
2955 // we still need to round up to a page boundary (in case we are using large pages)
2956 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2957 // instead we handle this in the bytes_to_rq computation below
2958 p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2960 // now go through and allocate one chunk at a time until all bytes are
2961 // allocated
2962 size_t bytes_remaining = bytes;
2963 // An overflow of align_size_up() would have been caught above
2964 // in the calculation of size_of_reserve.
2965 char * next_alloc_addr = p_buf;
2966 HANDLE hProc = GetCurrentProcess();
2968 #ifdef ASSERT
2969 // Variable for the failure injection
2970 long ran_num = os::random();
2971 size_t fail_after = ran_num % bytes;
2972 #endif
2974 int count=0;
2975 while (bytes_remaining) {
2976 // select bytes_to_rq to get to the next chunk_size boundary
2978 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2979 // Note allocate and commit
2980 char * p_new;
2982 #ifdef ASSERT
2983 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2984 #else
2985 const bool inject_error_now = false;
2986 #endif
2988 if (inject_error_now) {
2989 p_new = NULL;
2990 } else {
2991 if (!UseNUMAInterleaving) {
2992 p_new = (char *) VirtualAlloc(next_alloc_addr,
2993 bytes_to_rq,
2994 flags,
2995 prot);
2996 } else {
2997 // get the next node to use from the used_node_list
2998 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2999 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3000 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc,
3001 next_alloc_addr,
3002 bytes_to_rq,
3003 flags,
3004 prot,
3005 node);
3006 }
3007 }
3009 if (p_new == NULL) {
3010 // Free any allocated pages
3011 if (next_alloc_addr > p_buf) {
3012 // Some memory was committed so release it.
3013 size_t bytes_to_release = bytes - bytes_remaining;
3014 // NMT has yet to record any individual blocks, so it
3015 // need to create a dummy 'reserve' record to match
3016 // the release.
3017 MemTracker::record_virtual_memory_reserve((address)p_buf,
3018 bytes_to_release, CALLER_PC);
3019 os::release_memory(p_buf, bytes_to_release);
3020 }
3021 #ifdef ASSERT
3022 if (should_inject_error) {
3023 if (TracePageSizes && Verbose) {
3024 tty->print_cr("Reserving pages individually failed.");
3025 }
3026 }
3027 #endif
3028 return NULL;
3029 }
3031 bytes_remaining -= bytes_to_rq;
3032 next_alloc_addr += bytes_to_rq;
3033 count++;
3034 }
3035 // Although the memory is allocated individually, it is returned as one.
3036 // NMT records it as one block.
3037 if ((flags & MEM_COMMIT) != 0) {
3038 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3039 } else {
3040 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3041 }
3043 // made it this far, success
3044 return p_buf;
3045 }
3049 void os::large_page_init() {
3050 if (!UseLargePages) return;
3052 // print a warning if any large page related flag is specified on command line
3053 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3054 !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3055 bool success = false;
3057 # define WARN(msg) if (warn_on_failure) { warning(msg); }
3058 if (resolve_functions_for_large_page_init()) {
3059 if (request_lock_memory_privilege()) {
3060 size_t s = os::Kernel32Dll::GetLargePageMinimum();
3061 if (s) {
3062 #if defined(IA32) || defined(AMD64)
3063 if (s > 4*M || LargePageSizeInBytes > 4*M) {
3064 WARN("JVM cannot use large pages bigger than 4mb.");
3065 } else {
3066 #endif
3067 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3068 _large_page_size = LargePageSizeInBytes;
3069 } else {
3070 _large_page_size = s;
3071 }
3072 success = true;
3073 #if defined(IA32) || defined(AMD64)
3074 }
3075 #endif
3076 } else {
3077 WARN("Large page is not supported by the processor.");
3078 }
3079 } else {
3080 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3081 }
3082 } else {
3083 WARN("Large page is not supported by the operating system.");
3084 }
3085 #undef WARN
3087 const size_t default_page_size = (size_t) vm_page_size();
3088 if (success && _large_page_size > default_page_size) {
3089 _page_sizes[0] = _large_page_size;
3090 _page_sizes[1] = default_page_size;
3091 _page_sizes[2] = 0;
3092 }
3094 cleanup_after_large_page_init();
3095 UseLargePages = success;
3096 }
3098 // On win32, one cannot release just a part of reserved memory, it's an
3099 // all or nothing deal. When we split a reservation, we must break the
3100 // reservation into two reservations.
3101 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3102 bool realloc) {
3103 if (size > 0) {
3104 release_memory(base, size);
3105 if (realloc) {
3106 reserve_memory(split, base);
3107 }
3108 if (size != split) {
3109 reserve_memory(size - split, base + split);
3110 }
3111 }
3112 }
3114 // Multiple threads can race in this code but it's not possible to unmap small sections of
3115 // virtual space to get requested alignment, like posix-like os's.
3116 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3117 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3118 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3119 "Alignment must be a multiple of allocation granularity (page size)");
3120 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3122 size_t extra_size = size + alignment;
3123 assert(extra_size >= size, "overflow, size is too large to allow alignment");
3125 char* aligned_base = NULL;
3127 do {
3128 char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3129 if (extra_base == NULL) {
3130 return NULL;
3131 }
3132 // Do manual alignment
3133 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3135 os::release_memory(extra_base, extra_size);
3137 aligned_base = os::reserve_memory(size, aligned_base);
3139 } while (aligned_base == NULL);
3141 return aligned_base;
3142 }
3144 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3145 assert((size_t)addr % os::vm_allocation_granularity() == 0,
3146 "reserve alignment");
3147 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
3148 char* res;
3149 // note that if UseLargePages is on, all the areas that require interleaving
3150 // will go thru reserve_memory_special rather than thru here.
3151 bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3152 if (!use_individual) {
3153 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3154 } else {
3155 elapsedTimer reserveTimer;
3156 if( Verbose && PrintMiscellaneous ) reserveTimer.start();
3157 // in numa interleaving, we have to allocate pages individually
3158 // (well really chunks of NUMAInterleaveGranularity size)
3159 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3160 if (res == NULL) {
3161 warning("NUMA page allocation failed");
3162 }
3163 if( Verbose && PrintMiscellaneous ) {
3164 reserveTimer.stop();
3165 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3166 reserveTimer.milliseconds(), reserveTimer.ticks());
3167 }
3168 }
3169 assert(res == NULL || addr == NULL || addr == res,
3170 "Unexpected address from reserve.");
3172 return res;
3173 }
3175 // Reserve memory at an arbitrary address, only if that area is
3176 // available (and not reserved for something else).
3177 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3178 // Windows os::reserve_memory() fails of the requested address range is
3179 // not avilable.
3180 return reserve_memory(bytes, requested_addr);
3181 }
3183 size_t os::large_page_size() {
3184 return _large_page_size;
3185 }
3187 bool os::can_commit_large_page_memory() {
3188 // Windows only uses large page memory when the entire region is reserved
3189 // and committed in a single VirtualAlloc() call. This may change in the
3190 // future, but with Windows 2003 it's not possible to commit on demand.
3191 return false;
3192 }
3194 bool os::can_execute_large_page_memory() {
3195 return true;
3196 }
3198 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) {
3199 assert(UseLargePages, "only for large pages");
3201 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3202 return NULL; // Fallback to small pages.
3203 }
3205 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3206 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3208 // with large pages, there are two cases where we need to use Individual Allocation
3209 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3210 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3211 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3212 if (TracePageSizes && Verbose) {
3213 tty->print_cr("Reserving large pages individually.");
3214 }
3215 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3216 if (p_buf == NULL) {
3217 // give an appropriate warning message
3218 if (UseNUMAInterleaving) {
3219 warning("NUMA large page allocation failed, UseLargePages flag ignored");
3220 }
3221 if (UseLargePagesIndividualAllocation) {
3222 warning("Individually allocated large pages failed, "
3223 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3224 }
3225 return NULL;
3226 }
3228 return p_buf;
3230 } else {
3231 if (TracePageSizes && Verbose) {
3232 tty->print_cr("Reserving large pages in a single large chunk.");
3233 }
3234 // normal policy just allocate it all at once
3235 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3236 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3237 if (res != NULL) {
3238 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3239 }
3241 return res;
3242 }
3243 }
3245 bool os::release_memory_special(char* base, size_t bytes) {
3246 assert(base != NULL, "Sanity check");
3247 return release_memory(base, bytes);
3248 }
3250 void os::print_statistics() {
3251 }
3253 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3254 int err = os::get_last_error();
3255 char buf[256];
3256 size_t buf_len = os::lasterror(buf, sizeof(buf));
3257 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3258 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3259 exec, buf_len != 0 ? buf : "<no_error_string>", err);
3260 }
3262 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3263 if (bytes == 0) {
3264 // Don't bother the OS with noops.
3265 return true;
3266 }
3267 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3268 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3269 // Don't attempt to print anything if the OS call fails. We're
3270 // probably low on resources, so the print itself may cause crashes.
3272 // unless we have NUMAInterleaving enabled, the range of a commit
3273 // is always within a reserve covered by a single VirtualAlloc
3274 // in that case we can just do a single commit for the requested size
3275 if (!UseNUMAInterleaving) {
3276 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3277 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3278 return false;
3279 }
3280 if (exec) {
3281 DWORD oldprot;
3282 // Windows doc says to use VirtualProtect to get execute permissions
3283 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3284 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3285 return false;
3286 }
3287 }
3288 return true;
3289 } else {
3291 // when NUMAInterleaving is enabled, the commit might cover a range that
3292 // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3293 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery
3294 // returns represents the number of bytes that can be committed in one step.
3295 size_t bytes_remaining = bytes;
3296 char * next_alloc_addr = addr;
3297 while (bytes_remaining > 0) {
3298 MEMORY_BASIC_INFORMATION alloc_info;
3299 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3300 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3301 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3302 PAGE_READWRITE) == NULL) {
3303 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3304 exec);)
3305 return false;
3306 }
3307 if (exec) {
3308 DWORD oldprot;
3309 if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3310 PAGE_EXECUTE_READWRITE, &oldprot)) {
3311 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3312 exec);)
3313 return false;
3314 }
3315 }
3316 bytes_remaining -= bytes_to_rq;
3317 next_alloc_addr += bytes_to_rq;
3318 }
3319 }
3320 // if we made it this far, return true
3321 return true;
3322 }
3324 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3325 bool exec) {
3326 // alignment_hint is ignored on this OS
3327 return pd_commit_memory(addr, size, exec);
3328 }
3330 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3331 const char* mesg) {
3332 assert(mesg != NULL, "mesg must be specified");
3333 if (!pd_commit_memory(addr, size, exec)) {
3334 warn_fail_commit_memory(addr, size, exec);
3335 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
3336 }
3337 }
3339 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3340 size_t alignment_hint, bool exec,
3341 const char* mesg) {
3342 // alignment_hint is ignored on this OS
3343 pd_commit_memory_or_exit(addr, size, exec, mesg);
3344 }
3346 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3347 if (bytes == 0) {
3348 // Don't bother the OS with noops.
3349 return true;
3350 }
3351 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3352 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3353 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3354 }
3356 bool os::pd_release_memory(char* addr, size_t bytes) {
3357 return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3358 }
3360 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3361 return os::commit_memory(addr, size, !ExecMem);
3362 }
3364 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3365 return os::uncommit_memory(addr, size);
3366 }
3368 // Set protections specified
3369 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3370 bool is_committed) {
3371 unsigned int p = 0;
3372 switch (prot) {
3373 case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3374 case MEM_PROT_READ: p = PAGE_READONLY; break;
3375 case MEM_PROT_RW: p = PAGE_READWRITE; break;
3376 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break;
3377 default:
3378 ShouldNotReachHere();
3379 }
3381 DWORD old_status;
3383 // Strange enough, but on Win32 one can change protection only for committed
3384 // memory, not a big deal anyway, as bytes less or equal than 64K
3385 if (!is_committed) {
3386 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3387 "cannot commit protection page");
3388 }
3389 // One cannot use os::guard_memory() here, as on Win32 guard page
3390 // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3391 //
3392 // Pages in the region become guard pages. Any attempt to access a guard page
3393 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3394 // the guard page status. Guard pages thus act as a one-time access alarm.
3395 return VirtualProtect(addr, bytes, p, &old_status) != 0;
3396 }
3398 bool os::guard_memory(char* addr, size_t bytes) {
3399 DWORD old_status;
3400 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3401 }
3403 bool os::unguard_memory(char* addr, size_t bytes) {
3404 DWORD old_status;
3405 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3406 }
3408 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3409 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3410 void os::numa_make_global(char *addr, size_t bytes) { }
3411 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { }
3412 bool os::numa_topology_changed() { return false; }
3413 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); }
3414 int os::numa_get_group_id() { return 0; }
3415 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3416 if (numa_node_list_holder.get_count() == 0 && size > 0) {
3417 // Provide an answer for UMA systems
3418 ids[0] = 0;
3419 return 1;
3420 } else {
3421 // check for size bigger than actual groups_num
3422 size = MIN2(size, numa_get_groups_num());
3423 for (int i = 0; i < (int)size; i++) {
3424 ids[i] = numa_node_list_holder.get_node_list_entry(i);
3425 }
3426 return size;
3427 }
3428 }
3430 bool os::get_page_info(char *start, page_info* info) {
3431 return false;
3432 }
3434 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
3435 return end;
3436 }
3438 char* os::non_memory_address_word() {
3439 // Must never look like an address returned by reserve_memory,
3440 // even in its subfields (as defined by the CPU immediate fields,
3441 // if the CPU splits constants across multiple instructions).
3442 return (char*)-1;
3443 }
3445 #define MAX_ERROR_COUNT 100
3446 #define SYS_THREAD_ERROR 0xffffffffUL
3448 void os::pd_start_thread(Thread* thread) {
3449 DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3450 // Returns previous suspend state:
3451 // 0: Thread was not suspended
3452 // 1: Thread is running now
3453 // >1: Thread is still suspended.
3454 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3455 }
3457 class HighResolutionInterval : public CHeapObj<mtThread> {
3458 // The default timer resolution seems to be 10 milliseconds.
3459 // (Where is this written down?)
3460 // If someone wants to sleep for only a fraction of the default,
3461 // then we set the timer resolution down to 1 millisecond for
3462 // the duration of their interval.
3463 // We carefully set the resolution back, since otherwise we
3464 // seem to incur an overhead (3%?) that we don't need.
3465 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3466 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3467 // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3468 // timeBeginPeriod() if the relative error exceeded some threshold.
3469 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3470 // to decreased efficiency related to increased timer "tick" rates. We want to minimize
3471 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3472 // resolution timers running.
3473 private:
3474 jlong resolution;
3475 public:
3476 HighResolutionInterval(jlong ms) {
3477 resolution = ms % 10L;
3478 if (resolution != 0) {
3479 MMRESULT result = timeBeginPeriod(1L);
3480 }
3481 }
3482 ~HighResolutionInterval() {
3483 if (resolution != 0) {
3484 MMRESULT result = timeEndPeriod(1L);
3485 }
3486 resolution = 0L;
3487 }
3488 };
3490 int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3491 jlong limit = (jlong) MAXDWORD;
3493 while(ms > limit) {
3494 int res;
3495 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT)
3496 return res;
3497 ms -= limit;
3498 }
3500 assert(thread == Thread::current(), "thread consistency check");
3501 OSThread* osthread = thread->osthread();
3502 OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3503 int result;
3504 if (interruptable) {
3505 assert(thread->is_Java_thread(), "must be java thread");
3506 JavaThread *jt = (JavaThread *) thread;
3507 ThreadBlockInVM tbivm(jt);
3509 jt->set_suspend_equivalent();
3510 // cleared by handle_special_suspend_equivalent_condition() or
3511 // java_suspend_self() via check_and_wait_while_suspended()
3513 HANDLE events[1];
3514 events[0] = osthread->interrupt_event();
3515 HighResolutionInterval *phri=NULL;
3516 if(!ForceTimeHighResolution)
3517 phri = new HighResolutionInterval( ms );
3518 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3519 result = OS_TIMEOUT;
3520 } else {
3521 ResetEvent(osthread->interrupt_event());
3522 osthread->set_interrupted(false);
3523 result = OS_INTRPT;
3524 }
3525 delete phri; //if it is NULL, harmless
3527 // were we externally suspended while we were waiting?
3528 jt->check_and_wait_while_suspended();
3529 } else {
3530 assert(!thread->is_Java_thread(), "must not be java thread");
3531 Sleep((long) ms);
3532 result = OS_TIMEOUT;
3533 }
3534 return result;
3535 }
3537 //
3538 // Short sleep, direct OS call.
3539 //
3540 // ms = 0, means allow others (if any) to run.
3541 //
3542 void os::naked_short_sleep(jlong ms) {
3543 assert(ms < 1000, "Un-interruptable sleep, short time use only");
3544 Sleep(ms);
3545 }
3547 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3548 void os::infinite_sleep() {
3549 while (true) { // sleep forever ...
3550 Sleep(100000); // ... 100 seconds at a time
3551 }
3552 }
3554 typedef BOOL (WINAPI * STTSignature)(void) ;
3556 os::YieldResult os::NakedYield() {
3557 // Use either SwitchToThread() or Sleep(0)
3558 // Consider passing back the return value from SwitchToThread().
3559 if (os::Kernel32Dll::SwitchToThreadAvailable()) {
3560 return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ;
3561 } else {
3562 Sleep(0);
3563 }
3564 return os::YIELD_UNKNOWN ;
3565 }
3567 void os::yield() { os::NakedYield(); }
3569 void os::yield_all(int attempts) {
3570 // Yields to all threads, including threads with lower priorities
3571 Sleep(1);
3572 }
3574 // Win32 only gives you access to seven real priorities at a time,
3575 // so we compress Java's ten down to seven. It would be better
3576 // if we dynamically adjusted relative priorities.
3578 int os::java_to_os_priority[CriticalPriority + 1] = {
3579 THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3580 THREAD_PRIORITY_LOWEST, // 1 MinPriority
3581 THREAD_PRIORITY_LOWEST, // 2
3582 THREAD_PRIORITY_BELOW_NORMAL, // 3
3583 THREAD_PRIORITY_BELOW_NORMAL, // 4
3584 THREAD_PRIORITY_NORMAL, // 5 NormPriority
3585 THREAD_PRIORITY_NORMAL, // 6
3586 THREAD_PRIORITY_ABOVE_NORMAL, // 7
3587 THREAD_PRIORITY_ABOVE_NORMAL, // 8
3588 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3589 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority
3590 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority
3591 };
3593 int prio_policy1[CriticalPriority + 1] = {
3594 THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3595 THREAD_PRIORITY_LOWEST, // 1 MinPriority
3596 THREAD_PRIORITY_LOWEST, // 2
3597 THREAD_PRIORITY_BELOW_NORMAL, // 3
3598 THREAD_PRIORITY_BELOW_NORMAL, // 4
3599 THREAD_PRIORITY_NORMAL, // 5 NormPriority
3600 THREAD_PRIORITY_ABOVE_NORMAL, // 6
3601 THREAD_PRIORITY_ABOVE_NORMAL, // 7
3602 THREAD_PRIORITY_HIGHEST, // 8
3603 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3604 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority
3605 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority
3606 };
3608 static int prio_init() {
3609 // If ThreadPriorityPolicy is 1, switch tables
3610 if (ThreadPriorityPolicy == 1) {
3611 int i;
3612 for (i = 0; i < CriticalPriority + 1; i++) {
3613 os::java_to_os_priority[i] = prio_policy1[i];
3614 }
3615 }
3616 if (UseCriticalJavaThreadPriority) {
3617 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ;
3618 }
3619 return 0;
3620 }
3622 OSReturn os::set_native_priority(Thread* thread, int priority) {
3623 if (!UseThreadPriorities) return OS_OK;
3624 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3625 return ret ? OS_OK : OS_ERR;
3626 }
3628 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) {
3629 if ( !UseThreadPriorities ) {
3630 *priority_ptr = java_to_os_priority[NormPriority];
3631 return OS_OK;
3632 }
3633 int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3634 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3635 assert(false, "GetThreadPriority failed");
3636 return OS_ERR;
3637 }
3638 *priority_ptr = os_prio;
3639 return OS_OK;
3640 }
3643 // Hint to the underlying OS that a task switch would not be good.
3644 // Void return because it's a hint and can fail.
3645 void os::hint_no_preempt() {}
3647 void os::interrupt(Thread* thread) {
3648 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3649 "possibility of dangling Thread pointer");
3651 OSThread* osthread = thread->osthread();
3652 osthread->set_interrupted(true);
3653 // More than one thread can get here with the same value of osthread,
3654 // resulting in multiple notifications. We do, however, want the store
3655 // to interrupted() to be visible to other threads before we post
3656 // the interrupt event.
3657 OrderAccess::release();
3658 SetEvent(osthread->interrupt_event());
3659 // For JSR166: unpark after setting status
3660 if (thread->is_Java_thread())
3661 ((JavaThread*)thread)->parker()->unpark();
3663 ParkEvent * ev = thread->_ParkEvent ;
3664 if (ev != NULL) ev->unpark() ;
3666 }
3669 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3670 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3671 "possibility of dangling Thread pointer");
3673 OSThread* osthread = thread->osthread();
3674 // There is no synchronization between the setting of the interrupt
3675 // and it being cleared here. It is critical - see 6535709 - that
3676 // we only clear the interrupt state, and reset the interrupt event,
3677 // if we are going to report that we were indeed interrupted - else
3678 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3679 // depending on the timing. By checking thread interrupt event to see
3680 // if the thread gets real interrupt thus prevent spurious wakeup.
3681 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3682 if (interrupted && clear_interrupted) {
3683 osthread->set_interrupted(false);
3684 ResetEvent(osthread->interrupt_event());
3685 } // Otherwise leave the interrupted state alone
3687 return interrupted;
3688 }
3690 // Get's a pc (hint) for a running thread. Currently used only for profiling.
3691 ExtendedPC os::get_thread_pc(Thread* thread) {
3692 CONTEXT context;
3693 context.ContextFlags = CONTEXT_CONTROL;
3694 HANDLE handle = thread->osthread()->thread_handle();
3695 #ifdef _M_IA64
3696 assert(0, "Fix get_thread_pc");
3697 return ExtendedPC(NULL);
3698 #else
3699 if (GetThreadContext(handle, &context)) {
3700 #ifdef _M_AMD64
3701 return ExtendedPC((address) context.Rip);
3702 #else
3703 return ExtendedPC((address) context.Eip);
3704 #endif
3705 } else {
3706 return ExtendedPC(NULL);
3707 }
3708 #endif
3709 }
3711 // GetCurrentThreadId() returns DWORD
3712 intx os::current_thread_id() { return GetCurrentThreadId(); }
3714 static int _initial_pid = 0;
3716 int os::current_process_id()
3717 {
3718 return (_initial_pid ? _initial_pid : _getpid());
3719 }
3721 int os::win32::_vm_page_size = 0;
3722 int os::win32::_vm_allocation_granularity = 0;
3723 int os::win32::_processor_type = 0;
3724 // Processor level is not available on non-NT systems, use vm_version instead
3725 int os::win32::_processor_level = 0;
3726 julong os::win32::_physical_memory = 0;
3727 size_t os::win32::_default_stack_size = 0;
3729 intx os::win32::_os_thread_limit = 0;
3730 volatile intx os::win32::_os_thread_count = 0;
3732 bool os::win32::_is_nt = false;
3733 bool os::win32::_is_windows_2003 = false;
3734 bool os::win32::_is_windows_server = false;
3736 void os::win32::initialize_system_info() {
3737 SYSTEM_INFO si;
3738 GetSystemInfo(&si);
3739 _vm_page_size = si.dwPageSize;
3740 _vm_allocation_granularity = si.dwAllocationGranularity;
3741 _processor_type = si.dwProcessorType;
3742 _processor_level = si.wProcessorLevel;
3743 set_processor_count(si.dwNumberOfProcessors);
3745 MEMORYSTATUSEX ms;
3746 ms.dwLength = sizeof(ms);
3748 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3749 // dwMemoryLoad (% of memory in use)
3750 GlobalMemoryStatusEx(&ms);
3751 _physical_memory = ms.ullTotalPhys;
3753 OSVERSIONINFOEX oi;
3754 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3755 GetVersionEx((OSVERSIONINFO*)&oi);
3756 switch(oi.dwPlatformId) {
3757 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
3758 case VER_PLATFORM_WIN32_NT:
3759 _is_nt = true;
3760 {
3761 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3762 if (os_vers == 5002) {
3763 _is_windows_2003 = true;
3764 }
3765 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3766 oi.wProductType == VER_NT_SERVER) {
3767 _is_windows_server = true;
3768 }
3769 }
3770 break;
3771 default: fatal("Unknown platform");
3772 }
3774 _default_stack_size = os::current_stack_size();
3775 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3776 assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3777 "stack size not a multiple of page size");
3779 initialize_performance_counter();
3781 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is
3782 // known to deadlock the system, if the VM issues to thread operations with
3783 // a too high frequency, e.g., such as changing the priorities.
3784 // The 6000 seems to work well - no deadlocks has been notices on the test
3785 // programs that we have seen experience this problem.
3786 if (!os::win32::is_nt()) {
3787 StarvationMonitorInterval = 6000;
3788 }
3789 }
3792 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) {
3793 char path[MAX_PATH];
3794 DWORD size;
3795 DWORD pathLen = (DWORD)sizeof(path);
3796 HINSTANCE result = NULL;
3798 // only allow library name without path component
3799 assert(strchr(name, '\\') == NULL, "path not allowed");
3800 assert(strchr(name, ':') == NULL, "path not allowed");
3801 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3802 jio_snprintf(ebuf, ebuflen,
3803 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3804 return NULL;
3805 }
3807 // search system directory
3808 if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3809 strcat(path, "\\");
3810 strcat(path, name);
3811 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3812 return result;
3813 }
3814 }
3816 // try Windows directory
3817 if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3818 strcat(path, "\\");
3819 strcat(path, name);
3820 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3821 return result;
3822 }
3823 }
3825 jio_snprintf(ebuf, ebuflen,
3826 "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3827 return NULL;
3828 }
3830 void os::win32::setmode_streams() {
3831 _setmode(_fileno(stdin), _O_BINARY);
3832 _setmode(_fileno(stdout), _O_BINARY);
3833 _setmode(_fileno(stderr), _O_BINARY);
3834 }
3837 bool os::is_debugger_attached() {
3838 return IsDebuggerPresent() ? true : false;
3839 }
3842 void os::wait_for_keypress_at_exit(void) {
3843 if (PauseAtExit) {
3844 fprintf(stderr, "Press any key to continue...\n");
3845 fgetc(stdin);
3846 }
3847 }
3850 int os::message_box(const char* title, const char* message) {
3851 int result = MessageBox(NULL, message, title,
3852 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3853 return result == IDYES;
3854 }
3856 int os::allocate_thread_local_storage() {
3857 return TlsAlloc();
3858 }
3861 void os::free_thread_local_storage(int index) {
3862 TlsFree(index);
3863 }
3866 void os::thread_local_storage_at_put(int index, void* value) {
3867 TlsSetValue(index, value);
3868 assert(thread_local_storage_at(index) == value, "Just checking");
3869 }
3872 void* os::thread_local_storage_at(int index) {
3873 return TlsGetValue(index);
3874 }
3877 #ifndef PRODUCT
3878 #ifndef _WIN64
3879 // Helpers to check whether NX protection is enabled
3880 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3881 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3882 pex->ExceptionRecord->NumberParameters > 0 &&
3883 pex->ExceptionRecord->ExceptionInformation[0] ==
3884 EXCEPTION_INFO_EXEC_VIOLATION) {
3885 return EXCEPTION_EXECUTE_HANDLER;
3886 }
3887 return EXCEPTION_CONTINUE_SEARCH;
3888 }
3890 void nx_check_protection() {
3891 // If NX is enabled we'll get an exception calling into code on the stack
3892 char code[] = { (char)0xC3 }; // ret
3893 void *code_ptr = (void *)code;
3894 __try {
3895 __asm call code_ptr
3896 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
3897 tty->print_raw_cr("NX protection detected.");
3898 }
3899 }
3900 #endif // _WIN64
3901 #endif // PRODUCT
3903 // this is called _before_ the global arguments have been parsed
3904 void os::init(void) {
3905 _initial_pid = _getpid();
3907 init_random(1234567);
3909 win32::initialize_system_info();
3910 win32::setmode_streams();
3911 init_page_sizes((size_t) win32::vm_page_size());
3913 // For better scalability on MP systems (must be called after initialize_system_info)
3914 #ifndef PRODUCT
3915 if (is_MP()) {
3916 NoYieldsInMicrolock = true;
3917 }
3918 #endif
3919 // This may be overridden later when argument processing is done.
3920 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
3921 os::win32::is_windows_2003());
3923 // Initialize main_process and main_thread
3924 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle
3925 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
3926 &main_thread, THREAD_ALL_ACCESS, false, 0)) {
3927 fatal("DuplicateHandle failed\n");
3928 }
3929 main_thread_id = (int) GetCurrentThreadId();
3930 }
3932 // To install functions for atexit processing
3933 extern "C" {
3934 static void perfMemory_exit_helper() {
3935 perfMemory_exit();
3936 }
3937 }
3939 static jint initSock();
3941 // this is called _after_ the global arguments have been parsed
3942 jint os::init_2(void) {
3943 // Allocate a single page and mark it as readable for safepoint polling
3944 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
3945 guarantee( polling_page != NULL, "Reserve Failed for polling page");
3947 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
3948 guarantee( return_page != NULL, "Commit Failed for polling page");
3950 os::set_polling_page( polling_page );
3952 #ifndef PRODUCT
3953 if( Verbose && PrintMiscellaneous )
3954 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
3955 #endif
3957 if (!UseMembar) {
3958 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
3959 guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
3961 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
3962 guarantee( return_page != NULL, "Commit Failed for memory serialize page");
3964 os::set_memory_serialize_page( mem_serialize_page );
3966 #ifndef PRODUCT
3967 if(Verbose && PrintMiscellaneous)
3968 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3969 #endif
3970 }
3972 // Setup Windows Exceptions
3974 // for debugging float code generation bugs
3975 if (ForceFloatExceptions) {
3976 #ifndef _WIN64
3977 static long fp_control_word = 0;
3978 __asm { fstcw fp_control_word }
3979 // see Intel PPro Manual, Vol. 2, p 7-16
3980 const long precision = 0x20;
3981 const long underflow = 0x10;
3982 const long overflow = 0x08;
3983 const long zero_div = 0x04;
3984 const long denorm = 0x02;
3985 const long invalid = 0x01;
3986 fp_control_word |= invalid;
3987 __asm { fldcw fp_control_word }
3988 #endif
3989 }
3991 // If stack_commit_size is 0, windows will reserve the default size,
3992 // but only commit a small portion of it.
3993 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
3994 size_t default_reserve_size = os::win32::default_stack_size();
3995 size_t actual_reserve_size = stack_commit_size;
3996 if (stack_commit_size < default_reserve_size) {
3997 // If stack_commit_size == 0, we want this too
3998 actual_reserve_size = default_reserve_size;
3999 }
4001 // Check minimum allowable stack size for thread creation and to initialize
4002 // the java system classes, including StackOverflowError - depends on page
4003 // size. Add a page for compiler2 recursion in main thread.
4004 // Add in 2*BytesPerWord times page size to account for VM stack during
4005 // class initialization depending on 32 or 64 bit VM.
4006 size_t min_stack_allowed =
4007 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4008 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
4009 if (actual_reserve_size < min_stack_allowed) {
4010 tty->print_cr("\nThe stack size specified is too small, "
4011 "Specify at least %dk",
4012 min_stack_allowed / K);
4013 return JNI_ERR;
4014 }
4016 JavaThread::set_stack_size_at_create(stack_commit_size);
4018 // Calculate theoretical max. size of Threads to guard gainst artifical
4019 // out-of-memory situations, where all available address-space has been
4020 // reserved by thread stacks.
4021 assert(actual_reserve_size != 0, "Must have a stack");
4023 // Calculate the thread limit when we should start doing Virtual Memory
4024 // banging. Currently when the threads will have used all but 200Mb of space.
4025 //
4026 // TODO: consider performing a similar calculation for commit size instead
4027 // as reserve size, since on a 64-bit platform we'll run into that more
4028 // often than running out of virtual memory space. We can use the
4029 // lower value of the two calculations as the os_thread_limit.
4030 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4031 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4033 // at exit methods are called in the reverse order of their registration.
4034 // there is no limit to the number of functions registered. atexit does
4035 // not set errno.
4037 if (PerfAllowAtExitRegistration) {
4038 // only register atexit functions if PerfAllowAtExitRegistration is set.
4039 // atexit functions can be delayed until process exit time, which
4040 // can be problematic for embedded VM situations. Embedded VMs should
4041 // call DestroyJavaVM() to assure that VM resources are released.
4043 // note: perfMemory_exit_helper atexit function may be removed in
4044 // the future if the appropriate cleanup code can be added to the
4045 // VM_Exit VMOperation's doit method.
4046 if (atexit(perfMemory_exit_helper) != 0) {
4047 warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4048 }
4049 }
4051 #ifndef _WIN64
4052 // Print something if NX is enabled (win32 on AMD64)
4053 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4054 #endif
4056 // initialize thread priority policy
4057 prio_init();
4059 if (UseNUMA && !ForceNUMA) {
4060 UseNUMA = false; // We don't fully support this yet
4061 }
4063 if (UseNUMAInterleaving) {
4064 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4065 bool success = numa_interleaving_init();
4066 if (!success) UseNUMAInterleaving = false;
4067 }
4069 if (initSock() != JNI_OK) {
4070 return JNI_ERR;
4071 }
4073 return JNI_OK;
4074 }
4076 // Mark the polling page as unreadable
4077 void os::make_polling_page_unreadable(void) {
4078 DWORD old_status;
4079 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) )
4080 fatal("Could not disable polling page");
4081 };
4083 // Mark the polling page as readable
4084 void os::make_polling_page_readable(void) {
4085 DWORD old_status;
4086 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) )
4087 fatal("Could not enable polling page");
4088 };
4091 int os::stat(const char *path, struct stat *sbuf) {
4092 char pathbuf[MAX_PATH];
4093 if (strlen(path) > MAX_PATH - 1) {
4094 errno = ENAMETOOLONG;
4095 return -1;
4096 }
4097 os::native_path(strcpy(pathbuf, path));
4098 int ret = ::stat(pathbuf, sbuf);
4099 if (sbuf != NULL && UseUTCFileTimestamp) {
4100 // Fix for 6539723. st_mtime returned from stat() is dependent on
4101 // the system timezone and so can return different values for the
4102 // same file if/when daylight savings time changes. This adjustment
4103 // makes sure the same timestamp is returned regardless of the TZ.
4104 //
4105 // See:
4106 // http://msdn.microsoft.com/library/
4107 // default.asp?url=/library/en-us/sysinfo/base/
4108 // time_zone_information_str.asp
4109 // and
4110 // http://msdn.microsoft.com/library/default.asp?url=
4111 // /library/en-us/sysinfo/base/settimezoneinformation.asp
4112 //
4113 // NOTE: there is a insidious bug here: If the timezone is changed
4114 // after the call to stat() but before 'GetTimeZoneInformation()', then
4115 // the adjustment we do here will be wrong and we'll return the wrong
4116 // value (which will likely end up creating an invalid class data
4117 // archive). Absent a better API for this, or some time zone locking
4118 // mechanism, we'll have to live with this risk.
4119 TIME_ZONE_INFORMATION tz;
4120 DWORD tzid = GetTimeZoneInformation(&tz);
4121 int daylightBias =
4122 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias;
4123 sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4124 }
4125 return ret;
4126 }
4129 #define FT2INT64(ft) \
4130 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4133 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4134 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4135 // of a thread.
4136 //
4137 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4138 // the fast estimate available on the platform.
4140 // current_thread_cpu_time() is not optimized for Windows yet
4141 jlong os::current_thread_cpu_time() {
4142 // return user + sys since the cost is the same
4143 return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4144 }
4146 jlong os::thread_cpu_time(Thread* thread) {
4147 // consistent with what current_thread_cpu_time() returns.
4148 return os::thread_cpu_time(thread, true /* user+sys */);
4149 }
4151 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4152 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4153 }
4155 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4156 // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4157 // If this function changes, os::is_thread_cpu_time_supported() should too
4158 if (os::win32::is_nt()) {
4159 FILETIME CreationTime;
4160 FILETIME ExitTime;
4161 FILETIME KernelTime;
4162 FILETIME UserTime;
4164 if ( GetThreadTimes(thread->osthread()->thread_handle(),
4165 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
4166 return -1;
4167 else
4168 if (user_sys_cpu_time) {
4169 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4170 } else {
4171 return FT2INT64(UserTime) * 100;
4172 }
4173 } else {
4174 return (jlong) timeGetTime() * 1000000;
4175 }
4176 }
4178 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4179 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
4180 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
4181 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
4182 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4183 }
4185 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4186 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
4187 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
4188 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
4189 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4190 }
4192 bool os::is_thread_cpu_time_supported() {
4193 // see os::thread_cpu_time
4194 if (os::win32::is_nt()) {
4195 FILETIME CreationTime;
4196 FILETIME ExitTime;
4197 FILETIME KernelTime;
4198 FILETIME UserTime;
4200 if ( GetThreadTimes(GetCurrentThread(),
4201 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
4202 return false;
4203 else
4204 return true;
4205 } else {
4206 return false;
4207 }
4208 }
4210 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4211 // It does have primitives (PDH API) to get CPU usage and run queue length.
4212 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4213 // If we wanted to implement loadavg on Windows, we have a few options:
4214 //
4215 // a) Query CPU usage and run queue length and "fake" an answer by
4216 // returning the CPU usage if it's under 100%, and the run queue
4217 // length otherwise. It turns out that querying is pretty slow
4218 // on Windows, on the order of 200 microseconds on a fast machine.
4219 // Note that on the Windows the CPU usage value is the % usage
4220 // since the last time the API was called (and the first call
4221 // returns 100%), so we'd have to deal with that as well.
4222 //
4223 // b) Sample the "fake" answer using a sampling thread and store
4224 // the answer in a global variable. The call to loadavg would
4225 // just return the value of the global, avoiding the slow query.
4226 //
4227 // c) Sample a better answer using exponential decay to smooth the
4228 // value. This is basically the algorithm used by UNIX kernels.
4229 //
4230 // Note that sampling thread starvation could affect both (b) and (c).
4231 int os::loadavg(double loadavg[], int nelem) {
4232 return -1;
4233 }
4236 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4237 bool os::dont_yield() {
4238 return DontYieldALot;
4239 }
4241 // This method is a slightly reworked copy of JDK's sysOpen
4242 // from src/windows/hpi/src/sys_api_md.c
4244 int os::open(const char *path, int oflag, int mode) {
4245 char pathbuf[MAX_PATH];
4247 if (strlen(path) > MAX_PATH - 1) {
4248 errno = ENAMETOOLONG;
4249 return -1;
4250 }
4251 os::native_path(strcpy(pathbuf, path));
4252 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4253 }
4255 FILE* os::open(int fd, const char* mode) {
4256 return ::_fdopen(fd, mode);
4257 }
4259 // Is a (classpath) directory empty?
4260 bool os::dir_is_empty(const char* path) {
4261 WIN32_FIND_DATA fd;
4262 HANDLE f = FindFirstFile(path, &fd);
4263 if (f == INVALID_HANDLE_VALUE) {
4264 return true;
4265 }
4266 FindClose(f);
4267 return false;
4268 }
4270 // create binary file, rewriting existing file if required
4271 int os::create_binary_file(const char* path, bool rewrite_existing) {
4272 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4273 if (!rewrite_existing) {
4274 oflags |= _O_EXCL;
4275 }
4276 return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4277 }
4279 // return current position of file pointer
4280 jlong os::current_file_offset(int fd) {
4281 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4282 }
4284 // move file pointer to the specified offset
4285 jlong os::seek_to_file_offset(int fd, jlong offset) {
4286 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4287 }
4290 jlong os::lseek(int fd, jlong offset, int whence) {
4291 return (jlong) ::_lseeki64(fd, offset, whence);
4292 }
4294 // This method is a slightly reworked copy of JDK's sysNativePath
4295 // from src/windows/hpi/src/path_md.c
4297 /* Convert a pathname to native format. On win32, this involves forcing all
4298 separators to be '\\' rather than '/' (both are legal inputs, but Win95
4299 sometimes rejects '/') and removing redundant separators. The input path is
4300 assumed to have been converted into the character encoding used by the local
4301 system. Because this might be a double-byte encoding, care is taken to
4302 treat double-byte lead characters correctly.
4304 This procedure modifies the given path in place, as the result is never
4305 longer than the original. There is no error return; this operation always
4306 succeeds. */
4307 char * os::native_path(char *path) {
4308 char *src = path, *dst = path, *end = path;
4309 char *colon = NULL; /* If a drive specifier is found, this will
4310 point to the colon following the drive
4311 letter */
4313 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */
4314 assert(((!::IsDBCSLeadByte('/'))
4315 && (!::IsDBCSLeadByte('\\'))
4316 && (!::IsDBCSLeadByte(':'))),
4317 "Illegal lead byte");
4319 /* Check for leading separators */
4320 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4321 while (isfilesep(*src)) {
4322 src++;
4323 }
4325 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4326 /* Remove leading separators if followed by drive specifier. This
4327 hack is necessary to support file URLs containing drive
4328 specifiers (e.g., "file://c:/path"). As a side effect,
4329 "/c:/path" can be used as an alternative to "c:/path". */
4330 *dst++ = *src++;
4331 colon = dst;
4332 *dst++ = ':';
4333 src++;
4334 } else {
4335 src = path;
4336 if (isfilesep(src[0]) && isfilesep(src[1])) {
4337 /* UNC pathname: Retain first separator; leave src pointed at
4338 second separator so that further separators will be collapsed
4339 into the second separator. The result will be a pathname
4340 beginning with "\\\\" followed (most likely) by a host name. */
4341 src = dst = path + 1;
4342 path[0] = '\\'; /* Force first separator to '\\' */
4343 }
4344 }
4346 end = dst;
4348 /* Remove redundant separators from remainder of path, forcing all
4349 separators to be '\\' rather than '/'. Also, single byte space
4350 characters are removed from the end of the path because those
4351 are not legal ending characters on this operating system.
4352 */
4353 while (*src != '\0') {
4354 if (isfilesep(*src)) {
4355 *dst++ = '\\'; src++;
4356 while (isfilesep(*src)) src++;
4357 if (*src == '\0') {
4358 /* Check for trailing separator */
4359 end = dst;
4360 if (colon == dst - 2) break; /* "z:\\" */
4361 if (dst == path + 1) break; /* "\\" */
4362 if (dst == path + 2 && isfilesep(path[0])) {
4363 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the
4364 beginning of a UNC pathname. Even though it is not, by
4365 itself, a valid UNC pathname, we leave it as is in order
4366 to be consistent with the path canonicalizer as well
4367 as the win32 APIs, which treat this case as an invalid
4368 UNC pathname rather than as an alias for the root
4369 directory of the current drive. */
4370 break;
4371 }
4372 end = --dst; /* Path does not denote a root directory, so
4373 remove trailing separator */
4374 break;
4375 }
4376 end = dst;
4377 } else {
4378 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */
4379 *dst++ = *src++;
4380 if (*src) *dst++ = *src++;
4381 end = dst;
4382 } else { /* Copy a single-byte character */
4383 char c = *src++;
4384 *dst++ = c;
4385 /* Space is not a legal ending character */
4386 if (c != ' ') end = dst;
4387 }
4388 }
4389 }
4391 *end = '\0';
4393 /* For "z:", add "." to work around a bug in the C runtime library */
4394 if (colon == dst - 1) {
4395 path[2] = '.';
4396 path[3] = '\0';
4397 }
4399 return path;
4400 }
4402 // This code is a copy of JDK's sysSetLength
4403 // from src/windows/hpi/src/sys_api_md.c
4405 int os::ftruncate(int fd, jlong length) {
4406 HANDLE h = (HANDLE)::_get_osfhandle(fd);
4407 long high = (long)(length >> 32);
4408 DWORD ret;
4410 if (h == (HANDLE)(-1)) {
4411 return -1;
4412 }
4414 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4415 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4416 return -1;
4417 }
4419 if (::SetEndOfFile(h) == FALSE) {
4420 return -1;
4421 }
4423 return 0;
4424 }
4427 // This code is a copy of JDK's sysSync
4428 // from src/windows/hpi/src/sys_api_md.c
4429 // except for the legacy workaround for a bug in Win 98
4431 int os::fsync(int fd) {
4432 HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4434 if ( (!::FlushFileBuffers(handle)) &&
4435 (GetLastError() != ERROR_ACCESS_DENIED) ) {
4436 /* from winerror.h */
4437 return -1;
4438 }
4439 return 0;
4440 }
4442 static int nonSeekAvailable(int, long *);
4443 static int stdinAvailable(int, long *);
4445 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR)
4446 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO)
4448 // This code is a copy of JDK's sysAvailable
4449 // from src/windows/hpi/src/sys_api_md.c
4451 int os::available(int fd, jlong *bytes) {
4452 jlong cur, end;
4453 struct _stati64 stbuf64;
4455 if (::_fstati64(fd, &stbuf64) >= 0) {
4456 int mode = stbuf64.st_mode;
4457 if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4458 int ret;
4459 long lpbytes;
4460 if (fd == 0) {
4461 ret = stdinAvailable(fd, &lpbytes);
4462 } else {
4463 ret = nonSeekAvailable(fd, &lpbytes);
4464 }
4465 (*bytes) = (jlong)(lpbytes);
4466 return ret;
4467 }
4468 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4469 return FALSE;
4470 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4471 return FALSE;
4472 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4473 return FALSE;
4474 }
4475 *bytes = end - cur;
4476 return TRUE;
4477 } else {
4478 return FALSE;
4479 }
4480 }
4482 // This code is a copy of JDK's nonSeekAvailable
4483 // from src/windows/hpi/src/sys_api_md.c
4485 static int nonSeekAvailable(int fd, long *pbytes) {
4486 /* This is used for available on non-seekable devices
4487 * (like both named and anonymous pipes, such as pipes
4488 * connected to an exec'd process).
4489 * Standard Input is a special case.
4490 *
4491 */
4492 HANDLE han;
4494 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4495 return FALSE;
4496 }
4498 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4499 /* PeekNamedPipe fails when at EOF. In that case we
4500 * simply make *pbytes = 0 which is consistent with the
4501 * behavior we get on Solaris when an fd is at EOF.
4502 * The only alternative is to raise an Exception,
4503 * which isn't really warranted.
4504 */
4505 if (::GetLastError() != ERROR_BROKEN_PIPE) {
4506 return FALSE;
4507 }
4508 *pbytes = 0;
4509 }
4510 return TRUE;
4511 }
4513 #define MAX_INPUT_EVENTS 2000
4515 // This code is a copy of JDK's stdinAvailable
4516 // from src/windows/hpi/src/sys_api_md.c
4518 static int stdinAvailable(int fd, long *pbytes) {
4519 HANDLE han;
4520 DWORD numEventsRead = 0; /* Number of events read from buffer */
4521 DWORD numEvents = 0; /* Number of events in buffer */
4522 DWORD i = 0; /* Loop index */
4523 DWORD curLength = 0; /* Position marker */
4524 DWORD actualLength = 0; /* Number of bytes readable */
4525 BOOL error = FALSE; /* Error holder */
4526 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */
4528 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4529 return FALSE;
4530 }
4532 /* Construct an array of input records in the console buffer */
4533 error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4534 if (error == 0) {
4535 return nonSeekAvailable(fd, pbytes);
4536 }
4538 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */
4539 if (numEvents > MAX_INPUT_EVENTS) {
4540 numEvents = MAX_INPUT_EVENTS;
4541 }
4543 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4544 if (lpBuffer == NULL) {
4545 return FALSE;
4546 }
4548 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4549 if (error == 0) {
4550 os::free(lpBuffer, mtInternal);
4551 return FALSE;
4552 }
4554 /* Examine input records for the number of bytes available */
4555 for(i=0; i<numEvents; i++) {
4556 if (lpBuffer[i].EventType == KEY_EVENT) {
4558 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4559 &(lpBuffer[i].Event);
4560 if (keyRecord->bKeyDown == TRUE) {
4561 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4562 curLength++;
4563 if (*keyPressed == '\r') {
4564 actualLength = curLength;
4565 }
4566 }
4567 }
4568 }
4570 if(lpBuffer != NULL) {
4571 os::free(lpBuffer, mtInternal);
4572 }
4574 *pbytes = (long) actualLength;
4575 return TRUE;
4576 }
4578 // Map a block of memory.
4579 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4580 char *addr, size_t bytes, bool read_only,
4581 bool allow_exec) {
4582 HANDLE hFile;
4583 char* base;
4585 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4586 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4587 if (hFile == NULL) {
4588 if (PrintMiscellaneous && Verbose) {
4589 DWORD err = GetLastError();
4590 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err);
4591 }
4592 return NULL;
4593 }
4595 if (allow_exec) {
4596 // CreateFileMapping/MapViewOfFileEx can't map executable memory
4597 // unless it comes from a PE image (which the shared archive is not.)
4598 // Even VirtualProtect refuses to give execute access to mapped memory
4599 // that was not previously executable.
4600 //
4601 // Instead, stick the executable region in anonymous memory. Yuck.
4602 // Penalty is that ~4 pages will not be shareable - in the future
4603 // we might consider DLLizing the shared archive with a proper PE
4604 // header so that mapping executable + sharing is possible.
4606 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4607 PAGE_READWRITE);
4608 if (base == NULL) {
4609 if (PrintMiscellaneous && Verbose) {
4610 DWORD err = GetLastError();
4611 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err);
4612 }
4613 CloseHandle(hFile);
4614 return NULL;
4615 }
4617 DWORD bytes_read;
4618 OVERLAPPED overlapped;
4619 overlapped.Offset = (DWORD)file_offset;
4620 overlapped.OffsetHigh = 0;
4621 overlapped.hEvent = NULL;
4622 // ReadFile guarantees that if the return value is true, the requested
4623 // number of bytes were read before returning.
4624 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4625 if (!res) {
4626 if (PrintMiscellaneous && Verbose) {
4627 DWORD err = GetLastError();
4628 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err);
4629 }
4630 release_memory(base, bytes);
4631 CloseHandle(hFile);
4632 return NULL;
4633 }
4634 } else {
4635 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4636 NULL /*file_name*/);
4637 if (hMap == NULL) {
4638 if (PrintMiscellaneous && Verbose) {
4639 DWORD err = GetLastError();
4640 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err);
4641 }
4642 CloseHandle(hFile);
4643 return NULL;
4644 }
4646 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4647 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4648 (DWORD)bytes, addr);
4649 if (base == NULL) {
4650 if (PrintMiscellaneous && Verbose) {
4651 DWORD err = GetLastError();
4652 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err);
4653 }
4654 CloseHandle(hMap);
4655 CloseHandle(hFile);
4656 return NULL;
4657 }
4659 if (CloseHandle(hMap) == 0) {
4660 if (PrintMiscellaneous && Verbose) {
4661 DWORD err = GetLastError();
4662 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err);
4663 }
4664 CloseHandle(hFile);
4665 return base;
4666 }
4667 }
4669 if (allow_exec) {
4670 DWORD old_protect;
4671 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4672 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4674 if (!res) {
4675 if (PrintMiscellaneous && Verbose) {
4676 DWORD err = GetLastError();
4677 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err);
4678 }
4679 // Don't consider this a hard error, on IA32 even if the
4680 // VirtualProtect fails, we should still be able to execute
4681 CloseHandle(hFile);
4682 return base;
4683 }
4684 }
4686 if (CloseHandle(hFile) == 0) {
4687 if (PrintMiscellaneous && Verbose) {
4688 DWORD err = GetLastError();
4689 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err);
4690 }
4691 return base;
4692 }
4694 return base;
4695 }
4698 // Remap a block of memory.
4699 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4700 char *addr, size_t bytes, bool read_only,
4701 bool allow_exec) {
4702 // This OS does not allow existing memory maps to be remapped so we
4703 // have to unmap the memory before we remap it.
4704 if (!os::unmap_memory(addr, bytes)) {
4705 return NULL;
4706 }
4708 // There is a very small theoretical window between the unmap_memory()
4709 // call above and the map_memory() call below where a thread in native
4710 // code may be able to access an address that is no longer mapped.
4712 return os::map_memory(fd, file_name, file_offset, addr, bytes,
4713 read_only, allow_exec);
4714 }
4717 // Unmap a block of memory.
4718 // Returns true=success, otherwise false.
4720 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4721 BOOL result = UnmapViewOfFile(addr);
4722 if (result == 0) {
4723 if (PrintMiscellaneous && Verbose) {
4724 DWORD err = GetLastError();
4725 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err);
4726 }
4727 return false;
4728 }
4729 return true;
4730 }
4732 void os::pause() {
4733 char filename[MAX_PATH];
4734 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4735 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4736 } else {
4737 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4738 }
4740 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4741 if (fd != -1) {
4742 struct stat buf;
4743 ::close(fd);
4744 while (::stat(filename, &buf) == 0) {
4745 Sleep(100);
4746 }
4747 } else {
4748 jio_fprintf(stderr,
4749 "Could not open pause file '%s', continuing immediately.\n", filename);
4750 }
4751 }
4753 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4754 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4755 }
4757 /*
4758 * See the caveats for this class in os_windows.hpp
4759 * Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4760 * into this method and returns false. If no OS EXCEPTION was raised, returns
4761 * true.
4762 * The callback is supposed to provide the method that should be protected.
4763 */
4764 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4765 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4766 assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4767 "crash_protection already set?");
4769 bool success = true;
4770 __try {
4771 WatcherThread::watcher_thread()->set_crash_protection(this);
4772 cb.call();
4773 } __except(EXCEPTION_EXECUTE_HANDLER) {
4774 // only for protection, nothing to do
4775 success = false;
4776 }
4777 WatcherThread::watcher_thread()->set_crash_protection(NULL);
4778 return success;
4779 }
4781 // An Event wraps a win32 "CreateEvent" kernel handle.
4782 //
4783 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
4784 //
4785 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle
4786 // field, and call CloseHandle() on the win32 event handle. Unpark() would
4787 // need to be modified to tolerate finding a NULL (invalid) win32 event handle.
4788 // In addition, an unpark() operation might fetch the handle field, but the
4789 // event could recycle between the fetch and the SetEvent() operation.
4790 // SetEvent() would either fail because the handle was invalid, or inadvertently work,
4791 // as the win32 handle value had been recycled. In an ideal world calling SetEvent()
4792 // on an stale but recycled handle would be harmless, but in practice this might
4793 // confuse other non-Sun code, so it's not a viable approach.
4794 //
4795 // 2: Once a win32 event handle is associated with an Event, it remains associated
4796 // with the Event. The event handle is never closed. This could be construed
4797 // as handle leakage, but only up to the maximum # of threads that have been extant
4798 // at any one time. This shouldn't be an issue, as windows platforms typically
4799 // permit a process to have hundreds of thousands of open handles.
4800 //
4801 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
4802 // and release unused handles.
4803 //
4804 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
4805 // It's not clear, however, that we wouldn't be trading one type of leak for another.
4806 //
4807 // 5. Use an RCU-like mechanism (Read-Copy Update).
4808 // Or perhaps something similar to Maged Michael's "Hazard pointers".
4809 //
4810 // We use (2).
4811 //
4812 // TODO-FIXME:
4813 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
4814 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
4815 // to recover from (or at least detect) the dreaded Windows 841176 bug.
4816 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
4817 // into a single win32 CreateEvent() handle.
4818 //
4819 // _Event transitions in park()
4820 // -1 => -1 : illegal
4821 // 1 => 0 : pass - return immediately
4822 // 0 => -1 : block
4823 //
4824 // _Event serves as a restricted-range semaphore :
4825 // -1 : thread is blocked
4826 // 0 : neutral - thread is running or ready
4827 // 1 : signaled - thread is running or ready
4828 //
4829 // Another possible encoding of _Event would be
4830 // with explicit "PARKED" and "SIGNALED" bits.
4832 int os::PlatformEvent::park (jlong Millis) {
4833 guarantee (_ParkHandle != NULL , "Invariant") ;
4834 guarantee (Millis > 0 , "Invariant") ;
4835 int v ;
4837 // CONSIDER: defer assigning a CreateEvent() handle to the Event until
4838 // the initial park() operation.
4840 for (;;) {
4841 v = _Event ;
4842 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4843 }
4844 guarantee ((v == 0) || (v == 1), "invariant") ;
4845 if (v != 0) return OS_OK ;
4847 // Do this the hard way by blocking ...
4848 // TODO: consider a brief spin here, gated on the success of recent
4849 // spin attempts by this thread.
4850 //
4851 // We decompose long timeouts into series of shorter timed waits.
4852 // Evidently large timo values passed in WaitForSingleObject() are problematic on some
4853 // versions of Windows. See EventWait() for details. This may be superstition. Or not.
4854 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
4855 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from
4856 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
4857 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv ==
4858 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
4859 // for the already waited time. This policy does not admit any new outcomes.
4860 // In the future, however, we might want to track the accumulated wait time and
4861 // adjust Millis accordingly if we encounter a spurious wakeup.
4863 const int MAXTIMEOUT = 0x10000000 ;
4864 DWORD rv = WAIT_TIMEOUT ;
4865 while (_Event < 0 && Millis > 0) {
4866 DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT)
4867 if (Millis > MAXTIMEOUT) {
4868 prd = MAXTIMEOUT ;
4869 }
4870 rv = ::WaitForSingleObject (_ParkHandle, prd) ;
4871 assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ;
4872 if (rv == WAIT_TIMEOUT) {
4873 Millis -= prd ;
4874 }
4875 }
4876 v = _Event ;
4877 _Event = 0 ;
4878 // see comment at end of os::PlatformEvent::park() below:
4879 OrderAccess::fence() ;
4880 // If we encounter a nearly simultanous timeout expiry and unpark()
4881 // we return OS_OK indicating we awoke via unpark().
4882 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
4883 return (v >= 0) ? OS_OK : OS_TIMEOUT ;
4884 }
4886 void os::PlatformEvent::park () {
4887 guarantee (_ParkHandle != NULL, "Invariant") ;
4888 // Invariant: Only the thread associated with the Event/PlatformEvent
4889 // may call park().
4890 int v ;
4891 for (;;) {
4892 v = _Event ;
4893 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4894 }
4895 guarantee ((v == 0) || (v == 1), "invariant") ;
4896 if (v != 0) return ;
4898 // Do this the hard way by blocking ...
4899 // TODO: consider a brief spin here, gated on the success of recent
4900 // spin attempts by this thread.
4901 while (_Event < 0) {
4902 DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ;
4903 assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ;
4904 }
4906 // Usually we'll find _Event == 0 at this point, but as
4907 // an optional optimization we clear it, just in case can
4908 // multiple unpark() operations drove _Event up to 1.
4909 _Event = 0 ;
4910 OrderAccess::fence() ;
4911 guarantee (_Event >= 0, "invariant") ;
4912 }
4914 void os::PlatformEvent::unpark() {
4915 guarantee (_ParkHandle != NULL, "Invariant") ;
4917 // Transitions for _Event:
4918 // 0 :=> 1
4919 // 1 :=> 1
4920 // -1 :=> either 0 or 1; must signal target thread
4921 // That is, we can safely transition _Event from -1 to either
4922 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back
4923 // unpark() calls.
4924 // See also: "Semaphores in Plan 9" by Mullender & Cox
4925 //
4926 // Note: Forcing a transition from "-1" to "1" on an unpark() means
4927 // that it will take two back-to-back park() calls for the owning
4928 // thread to block. This has the benefit of forcing a spurious return
4929 // from the first park() call after an unpark() call which will help
4930 // shake out uses of park() and unpark() without condition variables.
4932 if (Atomic::xchg(1, &_Event) >= 0) return;
4934 ::SetEvent(_ParkHandle);
4935 }
4938 // JSR166
4939 // -------------------------------------------------------
4941 /*
4942 * The Windows implementation of Park is very straightforward: Basic
4943 * operations on Win32 Events turn out to have the right semantics to
4944 * use them directly. We opportunistically resuse the event inherited
4945 * from Monitor.
4946 */
4949 void Parker::park(bool isAbsolute, jlong time) {
4950 guarantee (_ParkEvent != NULL, "invariant") ;
4951 // First, demultiplex/decode time arguments
4952 if (time < 0) { // don't wait
4953 return;
4954 }
4955 else if (time == 0 && !isAbsolute) {
4956 time = INFINITE;
4957 }
4958 else if (isAbsolute) {
4959 time -= os::javaTimeMillis(); // convert to relative time
4960 if (time <= 0) // already elapsed
4961 return;
4962 }
4963 else { // relative
4964 time /= 1000000; // Must coarsen from nanos to millis
4965 if (time == 0) // Wait for the minimal time unit if zero
4966 time = 1;
4967 }
4969 JavaThread* thread = (JavaThread*)(Thread::current());
4970 assert(thread->is_Java_thread(), "Must be JavaThread");
4971 JavaThread *jt = (JavaThread *)thread;
4973 // Don't wait if interrupted or already triggered
4974 if (Thread::is_interrupted(thread, false) ||
4975 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
4976 ResetEvent(_ParkEvent);
4977 return;
4978 }
4979 else {
4980 ThreadBlockInVM tbivm(jt);
4981 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4982 jt->set_suspend_equivalent();
4984 WaitForSingleObject(_ParkEvent, time);
4985 ResetEvent(_ParkEvent);
4987 // If externally suspended while waiting, re-suspend
4988 if (jt->handle_special_suspend_equivalent_condition()) {
4989 jt->java_suspend_self();
4990 }
4991 }
4992 }
4994 void Parker::unpark() {
4995 guarantee (_ParkEvent != NULL, "invariant") ;
4996 SetEvent(_ParkEvent);
4997 }
4999 // Run the specified command in a separate process. Return its exit value,
5000 // or -1 on failure (e.g. can't create a new process).
5001 int os::fork_and_exec(char* cmd) {
5002 STARTUPINFO si;
5003 PROCESS_INFORMATION pi;
5005 memset(&si, 0, sizeof(si));
5006 si.cb = sizeof(si);
5007 memset(&pi, 0, sizeof(pi));
5008 BOOL rslt = CreateProcess(NULL, // executable name - use command line
5009 cmd, // command line
5010 NULL, // process security attribute
5011 NULL, // thread security attribute
5012 TRUE, // inherits system handles
5013 0, // no creation flags
5014 NULL, // use parent's environment block
5015 NULL, // use parent's starting directory
5016 &si, // (in) startup information
5017 &pi); // (out) process information
5019 if (rslt) {
5020 // Wait until child process exits.
5021 WaitForSingleObject(pi.hProcess, INFINITE);
5023 DWORD exit_code;
5024 GetExitCodeProcess(pi.hProcess, &exit_code);
5026 // Close process and thread handles.
5027 CloseHandle(pi.hProcess);
5028 CloseHandle(pi.hThread);
5030 return (int)exit_code;
5031 } else {
5032 return -1;
5033 }
5034 }
5036 //--------------------------------------------------------------------------------------------------
5037 // Non-product code
5039 static int mallocDebugIntervalCounter = 0;
5040 static int mallocDebugCounter = 0;
5041 bool os::check_heap(bool force) {
5042 if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
5043 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
5044 // Note: HeapValidate executes two hardware breakpoints when it finds something
5045 // wrong; at these points, eax contains the address of the offending block (I think).
5046 // To get to the exlicit error message(s) below, just continue twice.
5047 HANDLE heap = GetProcessHeap();
5048 { HeapLock(heap);
5049 PROCESS_HEAP_ENTRY phe;
5050 phe.lpData = NULL;
5051 while (HeapWalk(heap, &phe) != 0) {
5052 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
5053 !HeapValidate(heap, 0, phe.lpData)) {
5054 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
5055 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData);
5056 fatal("corrupted C heap");
5057 }
5058 }
5059 DWORD err = GetLastError();
5060 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) {
5061 fatal(err_msg("heap walk aborted with error %d", err));
5062 }
5063 HeapUnlock(heap);
5064 }
5065 mallocDebugIntervalCounter = 0;
5066 }
5067 return true;
5068 }
5071 bool os::find(address addr, outputStream* st) {
5072 // Nothing yet
5073 return false;
5074 }
5076 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5077 DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5079 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
5080 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow();
5081 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5082 address addr = (address) exceptionRecord->ExceptionInformation[1];
5084 if (os::is_memory_serialize_page(thread, addr))
5085 return EXCEPTION_CONTINUE_EXECUTION;
5086 }
5088 return EXCEPTION_CONTINUE_SEARCH;
5089 }
5091 // We don't build a headless jre for Windows
5092 bool os::is_headless_jre() { return false; }
5094 static jint initSock() {
5095 WSADATA wsadata;
5097 if (!os::WinSock2Dll::WinSock2Available()) {
5098 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n",
5099 ::GetLastError());
5100 return JNI_ERR;
5101 }
5103 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5104 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5105 ::GetLastError());
5106 return JNI_ERR;
5107 }
5108 return JNI_OK;
5109 }
5111 struct hostent* os::get_host_by_name(char* name) {
5112 return (struct hostent*)os::WinSock2Dll::gethostbyname(name);
5113 }
5115 int os::socket_close(int fd) {
5116 return ::closesocket(fd);
5117 }
5119 int os::socket_available(int fd, jint *pbytes) {
5120 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes);
5121 return (ret < 0) ? 0 : 1;
5122 }
5124 int os::socket(int domain, int type, int protocol) {
5125 return ::socket(domain, type, protocol);
5126 }
5128 int os::listen(int fd, int count) {
5129 return ::listen(fd, count);
5130 }
5132 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5133 return ::connect(fd, him, len);
5134 }
5136 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
5137 return ::accept(fd, him, len);
5138 }
5140 int os::sendto(int fd, char* buf, size_t len, uint flags,
5141 struct sockaddr* to, socklen_t tolen) {
5143 return ::sendto(fd, buf, (int)len, flags, to, tolen);
5144 }
5146 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags,
5147 sockaddr* from, socklen_t* fromlen) {
5149 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen);
5150 }
5152 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5153 return ::recv(fd, buf, (int)nBytes, flags);
5154 }
5156 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5157 return ::send(fd, buf, (int)nBytes, flags);
5158 }
5160 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5161 return ::send(fd, buf, (int)nBytes, flags);
5162 }
5164 int os::timeout(int fd, long timeout) {
5165 fd_set tbl;
5166 struct timeval t;
5168 t.tv_sec = timeout / 1000;
5169 t.tv_usec = (timeout % 1000) * 1000;
5171 tbl.fd_count = 1;
5172 tbl.fd_array[0] = fd;
5174 return ::select(1, &tbl, 0, 0, &t);
5175 }
5177 int os::get_host_name(char* name, int namelen) {
5178 return ::gethostname(name, namelen);
5179 }
5181 int os::socket_shutdown(int fd, int howto) {
5182 return ::shutdown(fd, howto);
5183 }
5185 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
5186 return ::bind(fd, him, len);
5187 }
5189 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
5190 return ::getsockname(fd, him, len);
5191 }
5193 int os::get_sock_opt(int fd, int level, int optname,
5194 char* optval, socklen_t* optlen) {
5195 return ::getsockopt(fd, level, optname, optval, optlen);
5196 }
5198 int os::set_sock_opt(int fd, int level, int optname,
5199 const char* optval, socklen_t optlen) {
5200 return ::setsockopt(fd, level, optname, optval, optlen);
5201 }
5203 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5204 #if defined(IA32)
5205 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5206 #elif defined (AMD64)
5207 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5208 #endif
5210 // returns true if thread could be suspended,
5211 // false otherwise
5212 static bool do_suspend(HANDLE* h) {
5213 if (h != NULL) {
5214 if (SuspendThread(*h) != ~0) {
5215 return true;
5216 }
5217 }
5218 return false;
5219 }
5221 // resume the thread
5222 // calling resume on an active thread is a no-op
5223 static void do_resume(HANDLE* h) {
5224 if (h != NULL) {
5225 ResumeThread(*h);
5226 }
5227 }
5229 // retrieve a suspend/resume context capable handle
5230 // from the tid. Caller validates handle return value.
5231 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) {
5232 if (h != NULL) {
5233 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5234 }
5235 }
5237 //
5238 // Thread sampling implementation
5239 //
5240 void os::SuspendedThreadTask::internal_do_task() {
5241 CONTEXT ctxt;
5242 HANDLE h = NULL;
5244 // get context capable handle for thread
5245 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5247 // sanity
5248 if (h == NULL || h == INVALID_HANDLE_VALUE) {
5249 return;
5250 }
5252 // suspend the thread
5253 if (do_suspend(&h)) {
5254 ctxt.ContextFlags = sampling_context_flags;
5255 // get thread context
5256 GetThreadContext(h, &ctxt);
5257 SuspendedThreadTaskContext context(_thread, &ctxt);
5258 // pass context to Thread Sampling impl
5259 do_task(context);
5260 // resume thread
5261 do_resume(&h);
5262 }
5264 // close handle
5265 CloseHandle(h);
5266 }
5269 // Kernel32 API
5270 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);
5271 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD);
5272 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG);
5273 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG);
5274 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG);
5276 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL;
5277 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL;
5278 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL;
5279 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL;
5280 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL;
5283 BOOL os::Kernel32Dll::initialized = FALSE;
5284 SIZE_T os::Kernel32Dll::GetLargePageMinimum() {
5285 assert(initialized && _GetLargePageMinimum != NULL,
5286 "GetLargePageMinimumAvailable() not yet called");
5287 return _GetLargePageMinimum();
5288 }
5290 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() {
5291 if (!initialized) {
5292 initialize();
5293 }
5294 return _GetLargePageMinimum != NULL;
5295 }
5297 BOOL os::Kernel32Dll::NumaCallsAvailable() {
5298 if (!initialized) {
5299 initialize();
5300 }
5301 return _VirtualAllocExNuma != NULL;
5302 }
5304 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) {
5305 assert(initialized && _VirtualAllocExNuma != NULL,
5306 "NUMACallsAvailable() not yet called");
5308 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node);
5309 }
5311 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) {
5312 assert(initialized && _GetNumaHighestNodeNumber != NULL,
5313 "NUMACallsAvailable() not yet called");
5315 return _GetNumaHighestNodeNumber(ptr_highest_node_number);
5316 }
5318 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) {
5319 assert(initialized && _GetNumaNodeProcessorMask != NULL,
5320 "NUMACallsAvailable() not yet called");
5322 return _GetNumaNodeProcessorMask(node, proc_mask);
5323 }
5325 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip,
5326 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) {
5327 if (!initialized) {
5328 initialize();
5329 }
5331 if (_RtlCaptureStackBackTrace != NULL) {
5332 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture,
5333 BackTrace, BackTraceHash);
5334 } else {
5335 return 0;
5336 }
5337 }
5339 void os::Kernel32Dll::initializeCommon() {
5340 if (!initialized) {
5341 HMODULE handle = ::GetModuleHandle("Kernel32.dll");
5342 assert(handle != NULL, "Just check");
5343 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum");
5344 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma");
5345 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber");
5346 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask");
5347 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace");
5348 initialized = TRUE;
5349 }
5350 }
5354 #ifndef JDK6_OR_EARLIER
5356 void os::Kernel32Dll::initialize() {
5357 initializeCommon();
5358 }
5361 // Kernel32 API
5362 inline BOOL os::Kernel32Dll::SwitchToThread() {
5363 return ::SwitchToThread();
5364 }
5366 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
5367 return true;
5368 }
5370 // Help tools
5371 inline BOOL os::Kernel32Dll::HelpToolsAvailable() {
5372 return true;
5373 }
5375 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
5376 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
5377 }
5379 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5380 return ::Module32First(hSnapshot, lpme);
5381 }
5383 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5384 return ::Module32Next(hSnapshot, lpme);
5385 }
5387 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
5388 ::GetNativeSystemInfo(lpSystemInfo);
5389 }
5391 // PSAPI API
5392 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
5393 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
5394 }
5396 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
5397 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
5398 }
5400 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
5401 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
5402 }
5404 inline BOOL os::PSApiDll::PSApiAvailable() {
5405 return true;
5406 }
5409 // WinSock2 API
5410 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
5411 return ::WSAStartup(wVersionRequested, lpWSAData);
5412 }
5414 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
5415 return ::gethostbyname(name);
5416 }
5418 inline BOOL os::WinSock2Dll::WinSock2Available() {
5419 return true;
5420 }
5422 // Advapi API
5423 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
5424 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
5425 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
5426 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
5427 BufferLength, PreviousState, ReturnLength);
5428 }
5430 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
5431 PHANDLE TokenHandle) {
5432 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
5433 }
5435 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
5436 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
5437 }
5439 inline BOOL os::Advapi32Dll::AdvapiAvailable() {
5440 return true;
5441 }
5443 void* os::get_default_process_handle() {
5444 return (void*)GetModuleHandle(NULL);
5445 }
5447 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5448 // which is used to find statically linked in agents.
5449 // Additionally for windows, takes into account __stdcall names.
5450 // Parameters:
5451 // sym_name: Symbol in library we are looking for
5452 // lib_name: Name of library to look in, NULL for shared libs.
5453 // is_absolute_path == true if lib_name is absolute path to agent
5454 // such as "C:/a/b/L.dll"
5455 // == false if only the base name of the library is passed in
5456 // such as "L"
5457 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5458 bool is_absolute_path) {
5459 char *agent_entry_name;
5460 size_t len;
5461 size_t name_len;
5462 size_t prefix_len = strlen(JNI_LIB_PREFIX);
5463 size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5464 const char *start;
5466 if (lib_name != NULL) {
5467 len = name_len = strlen(lib_name);
5468 if (is_absolute_path) {
5469 // Need to strip path, prefix and suffix
5470 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5471 lib_name = ++start;
5472 } else {
5473 // Need to check for drive prefix
5474 if ((start = strchr(lib_name, ':')) != NULL) {
5475 lib_name = ++start;
5476 }
5477 }
5478 if (len <= (prefix_len + suffix_len)) {
5479 return NULL;
5480 }
5481 lib_name += prefix_len;
5482 name_len = strlen(lib_name) - suffix_len;
5483 }
5484 }
5485 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5486 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5487 if (agent_entry_name == NULL) {
5488 return NULL;
5489 }
5490 if (lib_name != NULL) {
5491 const char *p = strrchr(sym_name, '@');
5492 if (p != NULL && p != sym_name) {
5493 // sym_name == _Agent_OnLoad@XX
5494 strncpy(agent_entry_name, sym_name, (p - sym_name));
5495 agent_entry_name[(p-sym_name)] = '\0';
5496 // agent_entry_name == _Agent_OnLoad
5497 strcat(agent_entry_name, "_");
5498 strncat(agent_entry_name, lib_name, name_len);
5499 strcat(agent_entry_name, p);
5500 // agent_entry_name == _Agent_OnLoad_lib_name@XX
5501 } else {
5502 strcpy(agent_entry_name, sym_name);
5503 strcat(agent_entry_name, "_");
5504 strncat(agent_entry_name, lib_name, name_len);
5505 }
5506 } else {
5507 strcpy(agent_entry_name, sym_name);
5508 }
5509 return agent_entry_name;
5510 }
5512 #else
5513 // Kernel32 API
5514 typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
5515 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD);
5516 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32);
5517 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32);
5518 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO);
5520 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL;
5521 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL;
5522 Module32First_Fn os::Kernel32Dll::_Module32First = NULL;
5523 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL;
5524 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL;
5526 void os::Kernel32Dll::initialize() {
5527 if (!initialized) {
5528 HMODULE handle = ::GetModuleHandle("Kernel32.dll");
5529 assert(handle != NULL, "Just check");
5531 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread");
5532 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn)
5533 ::GetProcAddress(handle, "CreateToolhelp32Snapshot");
5534 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First");
5535 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next");
5536 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo");
5537 initializeCommon(); // resolve the functions that always need resolving
5539 initialized = TRUE;
5540 }
5541 }
5543 BOOL os::Kernel32Dll::SwitchToThread() {
5544 assert(initialized && _SwitchToThread != NULL,
5545 "SwitchToThreadAvailable() not yet called");
5546 return _SwitchToThread();
5547 }
5550 BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
5551 if (!initialized) {
5552 initialize();
5553 }
5554 return _SwitchToThread != NULL;
5555 }
5557 // Help tools
5558 BOOL os::Kernel32Dll::HelpToolsAvailable() {
5559 if (!initialized) {
5560 initialize();
5561 }
5562 return _CreateToolhelp32Snapshot != NULL &&
5563 _Module32First != NULL &&
5564 _Module32Next != NULL;
5565 }
5567 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
5568 assert(initialized && _CreateToolhelp32Snapshot != NULL,
5569 "HelpToolsAvailable() not yet called");
5571 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
5572 }
5574 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5575 assert(initialized && _Module32First != NULL,
5576 "HelpToolsAvailable() not yet called");
5578 return _Module32First(hSnapshot, lpme);
5579 }
5581 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5582 assert(initialized && _Module32Next != NULL,
5583 "HelpToolsAvailable() not yet called");
5585 return _Module32Next(hSnapshot, lpme);
5586 }
5589 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
5590 if (!initialized) {
5591 initialize();
5592 }
5593 return _GetNativeSystemInfo != NULL;
5594 }
5596 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
5597 assert(initialized && _GetNativeSystemInfo != NULL,
5598 "GetNativeSystemInfoAvailable() not yet called");
5600 _GetNativeSystemInfo(lpSystemInfo);
5601 }
5603 // PSAPI API
5606 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD);
5607 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);;
5608 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD);
5610 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL;
5611 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL;
5612 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL;
5613 BOOL os::PSApiDll::initialized = FALSE;
5615 void os::PSApiDll::initialize() {
5616 if (!initialized) {
5617 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0);
5618 if (handle != NULL) {
5619 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle,
5620 "EnumProcessModules");
5621 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle,
5622 "GetModuleFileNameExA");
5623 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle,
5624 "GetModuleInformation");
5625 }
5626 initialized = TRUE;
5627 }
5628 }
5632 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
5633 assert(initialized && _EnumProcessModules != NULL,
5634 "PSApiAvailable() not yet called");
5635 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
5636 }
5638 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
5639 assert(initialized && _GetModuleFileNameEx != NULL,
5640 "PSApiAvailable() not yet called");
5641 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
5642 }
5644 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
5645 assert(initialized && _GetModuleInformation != NULL,
5646 "PSApiAvailable() not yet called");
5647 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
5648 }
5650 BOOL os::PSApiDll::PSApiAvailable() {
5651 if (!initialized) {
5652 initialize();
5653 }
5654 return _EnumProcessModules != NULL &&
5655 _GetModuleFileNameEx != NULL &&
5656 _GetModuleInformation != NULL;
5657 }
5660 // WinSock2 API
5661 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA);
5662 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...);
5664 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL;
5665 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL;
5666 BOOL os::WinSock2Dll::initialized = FALSE;
5668 void os::WinSock2Dll::initialize() {
5669 if (!initialized) {
5670 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0);
5671 if (handle != NULL) {
5672 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup");
5673 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname");
5674 }
5675 initialized = TRUE;
5676 }
5677 }
5680 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
5681 assert(initialized && _WSAStartup != NULL,
5682 "WinSock2Available() not yet called");
5683 return _WSAStartup(wVersionRequested, lpWSAData);
5684 }
5686 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
5687 assert(initialized && _gethostbyname != NULL,
5688 "WinSock2Available() not yet called");
5689 return _gethostbyname(name);
5690 }
5692 BOOL os::WinSock2Dll::WinSock2Available() {
5693 if (!initialized) {
5694 initialize();
5695 }
5696 return _WSAStartup != NULL &&
5697 _gethostbyname != NULL;
5698 }
5700 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
5701 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE);
5702 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID);
5704 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL;
5705 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL;
5706 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL;
5707 BOOL os::Advapi32Dll::initialized = FALSE;
5709 void os::Advapi32Dll::initialize() {
5710 if (!initialized) {
5711 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0);
5712 if (handle != NULL) {
5713 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle,
5714 "AdjustTokenPrivileges");
5715 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle,
5716 "OpenProcessToken");
5717 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle,
5718 "LookupPrivilegeValueA");
5719 }
5720 initialized = TRUE;
5721 }
5722 }
5724 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
5725 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
5726 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
5727 assert(initialized && _AdjustTokenPrivileges != NULL,
5728 "AdvapiAvailable() not yet called");
5729 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
5730 BufferLength, PreviousState, ReturnLength);
5731 }
5733 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
5734 PHANDLE TokenHandle) {
5735 assert(initialized && _OpenProcessToken != NULL,
5736 "AdvapiAvailable() not yet called");
5737 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
5738 }
5740 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
5741 assert(initialized && _LookupPrivilegeValue != NULL,
5742 "AdvapiAvailable() not yet called");
5743 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
5744 }
5746 BOOL os::Advapi32Dll::AdvapiAvailable() {
5747 if (!initialized) {
5748 initialize();
5749 }
5750 return _AdjustTokenPrivileges != NULL &&
5751 _OpenProcessToken != NULL &&
5752 _LookupPrivilegeValue != NULL;
5753 }
5755 #endif
5757 #ifndef PRODUCT
5759 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5760 // contiguous memory block at a particular address.
5761 // The test first tries to find a good approximate address to allocate at by using the same
5762 // method to allocate some memory at any address. The test then tries to allocate memory in
5763 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5764 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5765 // the previously allocated memory is available for allocation. The only actual failure
5766 // that is reported is when the test tries to allocate at a particular location but gets a
5767 // different valid one. A NULL return value at this point is not considered an error but may
5768 // be legitimate.
5769 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5770 void TestReserveMemorySpecial_test() {
5771 if (!UseLargePages) {
5772 if (VerboseInternalVMTests) {
5773 gclog_or_tty->print("Skipping test because large pages are disabled");
5774 }
5775 return;
5776 }
5777 // save current value of globals
5778 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5779 bool old_use_numa_interleaving = UseNUMAInterleaving;
5781 // set globals to make sure we hit the correct code path
5782 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5784 // do an allocation at an address selected by the OS to get a good one.
5785 const size_t large_allocation_size = os::large_page_size() * 4;
5786 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5787 if (result == NULL) {
5788 if (VerboseInternalVMTests) {
5789 gclog_or_tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
5790 large_allocation_size);
5791 }
5792 } else {
5793 os::release_memory_special(result, large_allocation_size);
5795 // allocate another page within the recently allocated memory area which seems to be a good location. At least
5796 // we managed to get it once.
5797 const size_t expected_allocation_size = os::large_page_size();
5798 char* expected_location = result + os::large_page_size();
5799 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5800 if (actual_location == NULL) {
5801 if (VerboseInternalVMTests) {
5802 gclog_or_tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
5803 expected_location, large_allocation_size);
5804 }
5805 } else {
5806 // release memory
5807 os::release_memory_special(actual_location, expected_allocation_size);
5808 // only now check, after releasing any memory to avoid any leaks.
5809 assert(actual_location == expected_location,
5810 err_msg("Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5811 expected_location, expected_allocation_size, actual_location));
5812 }
5813 }
5815 // restore globals
5816 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5817 UseNUMAInterleaving = old_use_numa_interleaving;
5818 }
5819 #endif // PRODUCT