Thu, 14 Jun 2018 09:15:08 -0700
8081202: Hotspot compile warning: "Invalid suffix on literal; C++11 requires a space between literal and identifier"
Summary: Need to add a space between macro identifier and string literal
Reviewed-by: bpittore, stefank, dholmes, kbarrett
1 /*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 // Must be at least Windows 2000 or XP to use IsDebuggerPresent
26 #define _WIN32_WINNT 0x500
28 // no precompiled headers
29 #include "classfile/classLoader.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/icBuffer.hpp"
33 #include "code/vtableStubs.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/disassembler.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "jvm_windows.h"
38 #include "memory/allocation.inline.hpp"
39 #include "memory/filemap.hpp"
40 #include "mutex_windows.inline.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "os_share_windows.hpp"
43 #include "prims/jniFastGetField.hpp"
44 #include "prims/jvm.h"
45 #include "prims/jvm_misc.hpp"
46 #include "runtime/arguments.hpp"
47 #include "runtime/extendedPC.hpp"
48 #include "runtime/globals.hpp"
49 #include "runtime/interfaceSupport.hpp"
50 #include "runtime/java.hpp"
51 #include "runtime/javaCalls.hpp"
52 #include "runtime/mutexLocker.hpp"
53 #include "runtime/objectMonitor.hpp"
54 #include "runtime/orderAccess.inline.hpp"
55 #include "runtime/osThread.hpp"
56 #include "runtime/perfMemory.hpp"
57 #include "runtime/sharedRuntime.hpp"
58 #include "runtime/statSampler.hpp"
59 #include "runtime/stubRoutines.hpp"
60 #include "runtime/thread.inline.hpp"
61 #include "runtime/threadCritical.hpp"
62 #include "runtime/timer.hpp"
63 #include "services/attachListener.hpp"
64 #include "services/memTracker.hpp"
65 #include "services/runtimeService.hpp"
66 #include "utilities/decoder.hpp"
67 #include "utilities/defaultStream.hpp"
68 #include "utilities/events.hpp"
69 #include "utilities/growableArray.hpp"
70 #include "utilities/vmError.hpp"
72 #ifdef _DEBUG
73 #include <crtdbg.h>
74 #endif
77 #include <windows.h>
78 #include <sys/types.h>
79 #include <sys/stat.h>
80 #include <sys/timeb.h>
81 #include <objidl.h>
82 #include <shlobj.h>
84 #include <malloc.h>
85 #include <signal.h>
86 #include <direct.h>
87 #include <errno.h>
88 #include <fcntl.h>
89 #include <io.h>
90 #include <process.h> // For _beginthreadex(), _endthreadex()
91 #include <imagehlp.h> // For os::dll_address_to_function_name
92 /* for enumerating dll libraries */
93 #include <vdmdbg.h>
95 // for timer info max values which include all bits
96 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
98 // For DLL loading/load error detection
99 // Values of PE COFF
100 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
101 #define IMAGE_FILE_SIGNATURE_LENGTH 4
103 static HANDLE main_process;
104 static HANDLE main_thread;
105 static int main_thread_id;
107 static FILETIME process_creation_time;
108 static FILETIME process_exit_time;
109 static FILETIME process_user_time;
110 static FILETIME process_kernel_time;
112 #ifdef _M_IA64
113 #define __CPU__ ia64
114 #else
115 #ifdef _M_AMD64
116 #define __CPU__ amd64
117 #else
118 #define __CPU__ i486
119 #endif
120 #endif
122 // save DLL module handle, used by GetModuleFileName
124 HINSTANCE vm_lib_handle;
126 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
127 switch (reason) {
128 case DLL_PROCESS_ATTACH:
129 vm_lib_handle = hinst;
130 if(ForceTimeHighResolution)
131 timeBeginPeriod(1L);
132 break;
133 case DLL_PROCESS_DETACH:
134 if(ForceTimeHighResolution)
135 timeEndPeriod(1L);
137 break;
138 default:
139 break;
140 }
141 return true;
142 }
144 static inline double fileTimeAsDouble(FILETIME* time) {
145 const double high = (double) ((unsigned int) ~0);
146 const double split = 10000000.0;
147 double result = (time->dwLowDateTime / split) +
148 time->dwHighDateTime * (high/split);
149 return result;
150 }
152 // Implementation of os
154 bool os::getenv(const char* name, char* buffer, int len) {
155 int result = GetEnvironmentVariable(name, buffer, len);
156 return result > 0 && result < len;
157 }
159 bool os::unsetenv(const char* name) {
160 assert(name != NULL, "Null pointer");
161 return (SetEnvironmentVariable(name, NULL) == TRUE);
162 }
164 // No setuid programs under Windows.
165 bool os::have_special_privileges() {
166 return false;
167 }
170 // This method is a periodic task to check for misbehaving JNI applications
171 // under CheckJNI, we can add any periodic checks here.
172 // For Windows at the moment does nothing
173 void os::run_periodic_checks() {
174 return;
175 }
177 #ifndef _WIN64
178 // previous UnhandledExceptionFilter, if there is one
179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
182 #endif
183 void os::init_system_properties_values() {
184 /* sysclasspath, java_home, dll_dir */
185 {
186 char *home_path;
187 char *dll_path;
188 char *pslash;
189 char *bin = "\\bin";
190 char home_dir[MAX_PATH];
192 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) {
193 os::jvm_path(home_dir, sizeof(home_dir));
194 // Found the full path to jvm.dll.
195 // Now cut the path to <java_home>/jre if we can.
196 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */
197 pslash = strrchr(home_dir, '\\');
198 if (pslash != NULL) {
199 *pslash = '\0'; /* get rid of \{client|server} */
200 pslash = strrchr(home_dir, '\\');
201 if (pslash != NULL)
202 *pslash = '\0'; /* get rid of \bin */
203 }
204 }
206 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
207 if (home_path == NULL)
208 return;
209 strcpy(home_path, home_dir);
210 Arguments::set_java_home(home_path);
212 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal);
213 if (dll_path == NULL)
214 return;
215 strcpy(dll_path, home_dir);
216 strcat(dll_path, bin);
217 Arguments::set_dll_dir(dll_path);
219 if (!set_boot_path('\\', ';'))
220 return;
221 }
223 /* library_path */
224 #define EXT_DIR "\\lib\\ext"
225 #define BIN_DIR "\\bin"
226 #define PACKAGE_DIR "\\Sun\\Java"
227 {
228 /* Win32 library search order (See the documentation for LoadLibrary):
229 *
230 * 1. The directory from which application is loaded.
231 * 2. The system wide Java Extensions directory (Java only)
232 * 3. System directory (GetSystemDirectory)
233 * 4. Windows directory (GetWindowsDirectory)
234 * 5. The PATH environment variable
235 * 6. The current directory
236 */
238 char *library_path;
239 char tmp[MAX_PATH];
240 char *path_str = ::getenv("PATH");
242 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
243 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
245 library_path[0] = '\0';
247 GetModuleFileName(NULL, tmp, sizeof(tmp));
248 *(strrchr(tmp, '\\')) = '\0';
249 strcat(library_path, tmp);
251 GetWindowsDirectory(tmp, sizeof(tmp));
252 strcat(library_path, ";");
253 strcat(library_path, tmp);
254 strcat(library_path, PACKAGE_DIR BIN_DIR);
256 GetSystemDirectory(tmp, sizeof(tmp));
257 strcat(library_path, ";");
258 strcat(library_path, tmp);
260 GetWindowsDirectory(tmp, sizeof(tmp));
261 strcat(library_path, ";");
262 strcat(library_path, tmp);
264 if (path_str) {
265 strcat(library_path, ";");
266 strcat(library_path, path_str);
267 }
269 strcat(library_path, ";.");
271 Arguments::set_library_path(library_path);
272 FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
273 }
275 /* Default extensions directory */
276 {
277 char path[MAX_PATH];
278 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
279 GetWindowsDirectory(path, MAX_PATH);
280 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
281 path, PACKAGE_DIR, EXT_DIR);
282 Arguments::set_ext_dirs(buf);
283 }
284 #undef EXT_DIR
285 #undef BIN_DIR
286 #undef PACKAGE_DIR
288 /* Default endorsed standards directory. */
289 {
290 #define ENDORSED_DIR "\\lib\\endorsed"
291 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR);
292 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal);
293 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR);
294 Arguments::set_endorsed_dirs(buf);
295 #undef ENDORSED_DIR
296 }
298 #ifndef _WIN64
299 // set our UnhandledExceptionFilter and save any previous one
300 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
301 #endif
303 // Done
304 return;
305 }
307 void os::breakpoint() {
308 DebugBreak();
309 }
311 // Invoked from the BREAKPOINT Macro
312 extern "C" void breakpoint() {
313 os::breakpoint();
314 }
316 /*
317 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
318 * So far, this method is only used by Native Memory Tracking, which is
319 * only supported on Windows XP or later.
320 */
322 int os::get_native_stack(address* stack, int frames, int toSkip) {
323 #ifdef _NMT_NOINLINE_
324 toSkip ++;
325 #endif
326 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames,
327 (PVOID*)stack, NULL);
328 for (int index = captured; index < frames; index ++) {
329 stack[index] = NULL;
330 }
331 return captured;
332 }
335 // os::current_stack_base()
336 //
337 // Returns the base of the stack, which is the stack's
338 // starting address. This function must be called
339 // while running on the stack of the thread being queried.
341 address os::current_stack_base() {
342 MEMORY_BASIC_INFORMATION minfo;
343 address stack_bottom;
344 size_t stack_size;
346 VirtualQuery(&minfo, &minfo, sizeof(minfo));
347 stack_bottom = (address)minfo.AllocationBase;
348 stack_size = minfo.RegionSize;
350 // Add up the sizes of all the regions with the same
351 // AllocationBase.
352 while( 1 )
353 {
354 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
355 if ( stack_bottom == (address)minfo.AllocationBase )
356 stack_size += minfo.RegionSize;
357 else
358 break;
359 }
361 #ifdef _M_IA64
362 // IA64 has memory and register stacks
363 //
364 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
365 // at thread creation (1MB backing store growing upwards, 1MB memory stack
366 // growing downwards, 2MB summed up)
367 //
368 // ...
369 // ------- top of stack (high address) -----
370 // |
371 // | 1MB
372 // | Backing Store (Register Stack)
373 // |
374 // | / \
375 // | |
376 // | |
377 // | |
378 // ------------------------ stack base -----
379 // | 1MB
380 // | Memory Stack
381 // |
382 // | |
383 // | |
384 // | |
385 // | \ /
386 // |
387 // ----- bottom of stack (low address) -----
388 // ...
390 stack_size = stack_size / 2;
391 #endif
392 return stack_bottom + stack_size;
393 }
395 size_t os::current_stack_size() {
396 size_t sz;
397 MEMORY_BASIC_INFORMATION minfo;
398 VirtualQuery(&minfo, &minfo, sizeof(minfo));
399 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
400 return sz;
401 }
403 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
404 const struct tm* time_struct_ptr = localtime(clock);
405 if (time_struct_ptr != NULL) {
406 *res = *time_struct_ptr;
407 return res;
408 }
409 return NULL;
410 }
412 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
414 // Thread start routine for all new Java threads
415 static unsigned __stdcall java_start(Thread* thread) {
416 // Try to randomize the cache line index of hot stack frames.
417 // This helps when threads of the same stack traces evict each other's
418 // cache lines. The threads can be either from the same JVM instance, or
419 // from different JVM instances. The benefit is especially true for
420 // processors with hyperthreading technology.
421 static int counter = 0;
422 int pid = os::current_process_id();
423 _alloca(((pid ^ counter++) & 7) * 128);
425 OSThread* osthr = thread->osthread();
426 assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
428 if (UseNUMA) {
429 int lgrp_id = os::numa_get_group_id();
430 if (lgrp_id != -1) {
431 thread->set_lgrp_id(lgrp_id);
432 }
433 }
436 // Install a win32 structured exception handler around every thread created
437 // by VM, so VM can genrate error dump when an exception occurred in non-
438 // Java thread (e.g. VM thread).
439 __try {
440 thread->run();
441 } __except(topLevelExceptionFilter(
442 (_EXCEPTION_POINTERS*)_exception_info())) {
443 // Nothing to do.
444 }
446 // One less thread is executing
447 // When the VMThread gets here, the main thread may have already exited
448 // which frees the CodeHeap containing the Atomic::add code
449 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
450 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
451 }
453 return 0;
454 }
456 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) {
457 // Allocate the OSThread object
458 OSThread* osthread = new OSThread(NULL, NULL);
459 if (osthread == NULL) return NULL;
461 // Initialize support for Java interrupts
462 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
463 if (interrupt_event == NULL) {
464 delete osthread;
465 return NULL;
466 }
467 osthread->set_interrupt_event(interrupt_event);
469 // Store info on the Win32 thread into the OSThread
470 osthread->set_thread_handle(thread_handle);
471 osthread->set_thread_id(thread_id);
473 if (UseNUMA) {
474 int lgrp_id = os::numa_get_group_id();
475 if (lgrp_id != -1) {
476 thread->set_lgrp_id(lgrp_id);
477 }
478 }
480 // Initial thread state is INITIALIZED, not SUSPENDED
481 osthread->set_state(INITIALIZED);
483 return osthread;
484 }
487 bool os::create_attached_thread(JavaThread* thread) {
488 #ifdef ASSERT
489 thread->verify_not_published();
490 #endif
491 HANDLE thread_h;
492 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
493 &thread_h, THREAD_ALL_ACCESS, false, 0)) {
494 fatal("DuplicateHandle failed\n");
495 }
496 OSThread* osthread = create_os_thread(thread, thread_h,
497 (int)current_thread_id());
498 if (osthread == NULL) {
499 return false;
500 }
502 // Initial thread state is RUNNABLE
503 osthread->set_state(RUNNABLE);
505 thread->set_osthread(osthread);
506 return true;
507 }
509 bool os::create_main_thread(JavaThread* thread) {
510 #ifdef ASSERT
511 thread->verify_not_published();
512 #endif
513 if (_starting_thread == NULL) {
514 _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
515 if (_starting_thread == NULL) {
516 return false;
517 }
518 }
520 // The primordial thread is runnable from the start)
521 _starting_thread->set_state(RUNNABLE);
523 thread->set_osthread(_starting_thread);
524 return true;
525 }
527 // Allocate and initialize a new OSThread
528 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
529 unsigned thread_id;
531 // Allocate the OSThread object
532 OSThread* osthread = new OSThread(NULL, NULL);
533 if (osthread == NULL) {
534 return false;
535 }
537 // Initialize support for Java interrupts
538 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
539 if (interrupt_event == NULL) {
540 delete osthread;
541 return NULL;
542 }
543 osthread->set_interrupt_event(interrupt_event);
544 osthread->set_interrupted(false);
546 thread->set_osthread(osthread);
548 if (stack_size == 0) {
549 switch (thr_type) {
550 case os::java_thread:
551 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
552 if (JavaThread::stack_size_at_create() > 0)
553 stack_size = JavaThread::stack_size_at_create();
554 break;
555 case os::compiler_thread:
556 if (CompilerThreadStackSize > 0) {
557 stack_size = (size_t)(CompilerThreadStackSize * K);
558 break;
559 } // else fall through:
560 // use VMThreadStackSize if CompilerThreadStackSize is not defined
561 case os::vm_thread:
562 case os::pgc_thread:
563 case os::cgc_thread:
564 case os::watcher_thread:
565 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
566 break;
567 }
568 }
570 // Create the Win32 thread
571 //
572 // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
573 // does not specify stack size. Instead, it specifies the size of
574 // initially committed space. The stack size is determined by
575 // PE header in the executable. If the committed "stack_size" is larger
576 // than default value in the PE header, the stack is rounded up to the
577 // nearest multiple of 1MB. For example if the launcher has default
578 // stack size of 320k, specifying any size less than 320k does not
579 // affect the actual stack size at all, it only affects the initial
580 // commitment. On the other hand, specifying 'stack_size' larger than
581 // default value may cause significant increase in memory usage, because
582 // not only the stack space will be rounded up to MB, but also the
583 // entire space is committed upfront.
584 //
585 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
586 // for CreateThread() that can treat 'stack_size' as stack size. However we
587 // are not supposed to call CreateThread() directly according to MSDN
588 // document because JVM uses C runtime library. The good news is that the
589 // flag appears to work with _beginthredex() as well.
591 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION
592 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000)
593 #endif
595 HANDLE thread_handle =
596 (HANDLE)_beginthreadex(NULL,
597 (unsigned)stack_size,
598 (unsigned (__stdcall *)(void*)) java_start,
599 thread,
600 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION,
601 &thread_id);
602 if (thread_handle == NULL) {
603 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again
604 // without the flag.
605 thread_handle =
606 (HANDLE)_beginthreadex(NULL,
607 (unsigned)stack_size,
608 (unsigned (__stdcall *)(void*)) java_start,
609 thread,
610 CREATE_SUSPENDED,
611 &thread_id);
612 }
613 if (thread_handle == NULL) {
614 // Need to clean up stuff we've allocated so far
615 CloseHandle(osthread->interrupt_event());
616 thread->set_osthread(NULL);
617 delete osthread;
618 return NULL;
619 }
621 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
623 // Store info on the Win32 thread into the OSThread
624 osthread->set_thread_handle(thread_handle);
625 osthread->set_thread_id(thread_id);
627 // Initial thread state is INITIALIZED, not SUSPENDED
628 osthread->set_state(INITIALIZED);
630 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
631 return true;
632 }
635 // Free Win32 resources related to the OSThread
636 void os::free_thread(OSThread* osthread) {
637 assert(osthread != NULL, "osthread not set");
638 CloseHandle(osthread->thread_handle());
639 CloseHandle(osthread->interrupt_event());
640 delete osthread;
641 }
644 static int has_performance_count = 0;
645 static jlong first_filetime;
646 static jlong initial_performance_count;
647 static jlong performance_frequency;
650 jlong as_long(LARGE_INTEGER x) {
651 jlong result = 0; // initialization to avoid warning
652 set_high(&result, x.HighPart);
653 set_low(&result, x.LowPart);
654 return result;
655 }
658 jlong os::elapsed_counter() {
659 LARGE_INTEGER count;
660 if (has_performance_count) {
661 QueryPerformanceCounter(&count);
662 return as_long(count) - initial_performance_count;
663 } else {
664 FILETIME wt;
665 GetSystemTimeAsFileTime(&wt);
666 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime);
667 }
668 }
671 jlong os::elapsed_frequency() {
672 if (has_performance_count) {
673 return performance_frequency;
674 } else {
675 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601.
676 return 10000000;
677 }
678 }
681 julong os::available_memory() {
682 return win32::available_memory();
683 }
685 julong os::win32::available_memory() {
686 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
687 // value if total memory is larger than 4GB
688 MEMORYSTATUSEX ms;
689 ms.dwLength = sizeof(ms);
690 GlobalMemoryStatusEx(&ms);
692 return (julong)ms.ullAvailPhys;
693 }
695 julong os::physical_memory() {
696 return win32::physical_memory();
697 }
699 bool os::has_allocatable_memory_limit(julong* limit) {
700 MEMORYSTATUSEX ms;
701 ms.dwLength = sizeof(ms);
702 GlobalMemoryStatusEx(&ms);
703 #ifdef _LP64
704 *limit = (julong)ms.ullAvailVirtual;
705 return true;
706 #else
707 // Limit to 1400m because of the 2gb address space wall
708 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
709 return true;
710 #endif
711 }
713 // VC6 lacks DWORD_PTR
714 #if _MSC_VER < 1300
715 typedef UINT_PTR DWORD_PTR;
716 #endif
718 int os::active_processor_count() {
719 DWORD_PTR lpProcessAffinityMask = 0;
720 DWORD_PTR lpSystemAffinityMask = 0;
721 int proc_count = processor_count();
722 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
723 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
724 // Nof active processors is number of bits in process affinity mask
725 int bitcount = 0;
726 while (lpProcessAffinityMask != 0) {
727 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
728 bitcount++;
729 }
730 return bitcount;
731 } else {
732 return proc_count;
733 }
734 }
736 void os::set_native_thread_name(const char *name) {
737 // Not yet implemented.
738 return;
739 }
741 bool os::distribute_processes(uint length, uint* distribution) {
742 // Not yet implemented.
743 return false;
744 }
746 bool os::bind_to_processor(uint processor_id) {
747 // Not yet implemented.
748 return false;
749 }
751 static void initialize_performance_counter() {
752 LARGE_INTEGER count;
753 if (QueryPerformanceFrequency(&count)) {
754 has_performance_count = 1;
755 performance_frequency = as_long(count);
756 QueryPerformanceCounter(&count);
757 initial_performance_count = as_long(count);
758 } else {
759 has_performance_count = 0;
760 FILETIME wt;
761 GetSystemTimeAsFileTime(&wt);
762 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
763 }
764 }
767 double os::elapsedTime() {
768 return (double) elapsed_counter() / (double) elapsed_frequency();
769 }
772 // Windows format:
773 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
774 // Java format:
775 // Java standards require the number of milliseconds since 1/1/1970
777 // Constant offset - calculated using offset()
778 static jlong _offset = 116444736000000000;
779 // Fake time counter for reproducible results when debugging
780 static jlong fake_time = 0;
782 #ifdef ASSERT
783 // Just to be safe, recalculate the offset in debug mode
784 static jlong _calculated_offset = 0;
785 static int _has_calculated_offset = 0;
787 jlong offset() {
788 if (_has_calculated_offset) return _calculated_offset;
789 SYSTEMTIME java_origin;
790 java_origin.wYear = 1970;
791 java_origin.wMonth = 1;
792 java_origin.wDayOfWeek = 0; // ignored
793 java_origin.wDay = 1;
794 java_origin.wHour = 0;
795 java_origin.wMinute = 0;
796 java_origin.wSecond = 0;
797 java_origin.wMilliseconds = 0;
798 FILETIME jot;
799 if (!SystemTimeToFileTime(&java_origin, &jot)) {
800 fatal(err_msg("Error = %d\nWindows error", GetLastError()));
801 }
802 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
803 _has_calculated_offset = 1;
804 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
805 return _calculated_offset;
806 }
807 #else
808 jlong offset() {
809 return _offset;
810 }
811 #endif
813 jlong windows_to_java_time(FILETIME wt) {
814 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
815 return (a - offset()) / 10000;
816 }
818 FILETIME java_to_windows_time(jlong l) {
819 jlong a = (l * 10000) + offset();
820 FILETIME result;
821 result.dwHighDateTime = high(a);
822 result.dwLowDateTime = low(a);
823 return result;
824 }
826 bool os::supports_vtime() { return true; }
827 bool os::enable_vtime() { return false; }
828 bool os::vtime_enabled() { return false; }
830 double os::elapsedVTime() {
831 FILETIME created;
832 FILETIME exited;
833 FILETIME kernel;
834 FILETIME user;
835 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
836 // the resolution of windows_to_java_time() should be sufficient (ms)
837 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
838 } else {
839 return elapsedTime();
840 }
841 }
843 jlong os::javaTimeMillis() {
844 if (UseFakeTimers) {
845 return fake_time++;
846 } else {
847 FILETIME wt;
848 GetSystemTimeAsFileTime(&wt);
849 return windows_to_java_time(wt);
850 }
851 }
853 jlong os::javaTimeNanos() {
854 if (!has_performance_count) {
855 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do.
856 } else {
857 LARGE_INTEGER current_count;
858 QueryPerformanceCounter(¤t_count);
859 double current = as_long(current_count);
860 double freq = performance_frequency;
861 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
862 return time;
863 }
864 }
866 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
867 if (!has_performance_count) {
868 // javaTimeMillis() doesn't have much percision,
869 // but it is not going to wrap -- so all 64 bits
870 info_ptr->max_value = ALL_64_BITS;
872 // this is a wall clock timer, so may skip
873 info_ptr->may_skip_backward = true;
874 info_ptr->may_skip_forward = true;
875 } else {
876 jlong freq = performance_frequency;
877 if (freq < NANOSECS_PER_SEC) {
878 // the performance counter is 64 bits and we will
879 // be multiplying it -- so no wrap in 64 bits
880 info_ptr->max_value = ALL_64_BITS;
881 } else if (freq > NANOSECS_PER_SEC) {
882 // use the max value the counter can reach to
883 // determine the max value which could be returned
884 julong max_counter = (julong)ALL_64_BITS;
885 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
886 } else {
887 // the performance counter is 64 bits and we will
888 // be using it directly -- so no wrap in 64 bits
889 info_ptr->max_value = ALL_64_BITS;
890 }
892 // using a counter, so no skipping
893 info_ptr->may_skip_backward = false;
894 info_ptr->may_skip_forward = false;
895 }
896 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
897 }
899 char* os::local_time_string(char *buf, size_t buflen) {
900 SYSTEMTIME st;
901 GetLocalTime(&st);
902 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
903 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
904 return buf;
905 }
907 bool os::getTimesSecs(double* process_real_time,
908 double* process_user_time,
909 double* process_system_time) {
910 HANDLE h_process = GetCurrentProcess();
911 FILETIME create_time, exit_time, kernel_time, user_time;
912 BOOL result = GetProcessTimes(h_process,
913 &create_time,
914 &exit_time,
915 &kernel_time,
916 &user_time);
917 if (result != 0) {
918 FILETIME wt;
919 GetSystemTimeAsFileTime(&wt);
920 jlong rtc_millis = windows_to_java_time(wt);
921 jlong user_millis = windows_to_java_time(user_time);
922 jlong system_millis = windows_to_java_time(kernel_time);
923 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
924 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS);
925 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS);
926 return true;
927 } else {
928 return false;
929 }
930 }
932 void os::shutdown() {
934 // allow PerfMemory to attempt cleanup of any persistent resources
935 perfMemory_exit();
937 // flush buffered output, finish log files
938 ostream_abort();
940 // Check for abort hook
941 abort_hook_t abort_hook = Arguments::abort_hook();
942 if (abort_hook != NULL) {
943 abort_hook();
944 }
945 }
948 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
949 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION);
951 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) {
952 HINSTANCE dbghelp;
953 EXCEPTION_POINTERS ep;
954 MINIDUMP_EXCEPTION_INFORMATION mei;
955 MINIDUMP_EXCEPTION_INFORMATION* pmei;
957 HANDLE hProcess = GetCurrentProcess();
958 DWORD processId = GetCurrentProcessId();
959 HANDLE dumpFile;
960 MINIDUMP_TYPE dumpType;
961 static const char* cwd;
963 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows.
964 #ifndef ASSERT
965 // If running on a client version of Windows and user has not explicitly enabled dumping
966 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) {
967 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false);
968 return;
969 // If running on a server version of Windows and user has explictly disabled dumping
970 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) {
971 VMError::report_coredump_status("Minidump has been disabled from the command line", false);
972 return;
973 }
974 #else
975 if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) {
976 VMError::report_coredump_status("Minidump has been disabled from the command line", false);
977 return;
978 }
979 #endif
981 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
983 if (dbghelp == NULL) {
984 VMError::report_coredump_status("Failed to load dbghelp.dll", false);
985 return;
986 }
988 _MiniDumpWriteDump = CAST_TO_FN_PTR(
989 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
990 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION),
991 GetProcAddress(dbghelp, "MiniDumpWriteDump"));
993 if (_MiniDumpWriteDump == NULL) {
994 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false);
995 return;
996 }
998 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData);
1000 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with
1001 // API_VERSION_NUMBER 11 or higher contains the ones we want though
1002 #if API_VERSION_NUMBER >= 11
1003 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo |
1004 MiniDumpWithUnloadedModules);
1005 #endif
1007 cwd = get_current_directory(NULL, 0);
1008 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp",cwd, current_process_id());
1009 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
1011 if (dumpFile == INVALID_HANDLE_VALUE) {
1012 VMError::report_coredump_status("Failed to create file for dumping", false);
1013 return;
1014 }
1015 if (exceptionRecord != NULL && contextRecord != NULL) {
1016 ep.ContextRecord = (PCONTEXT) contextRecord;
1017 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord;
1019 mei.ThreadId = GetCurrentThreadId();
1020 mei.ExceptionPointers = &ep;
1021 pmei = &mei;
1022 } else {
1023 pmei = NULL;
1024 }
1027 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1028 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1029 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1030 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1031 DWORD error = GetLastError();
1032 LPTSTR msgbuf = NULL;
1034 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
1035 FORMAT_MESSAGE_FROM_SYSTEM |
1036 FORMAT_MESSAGE_IGNORE_INSERTS,
1037 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) {
1039 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf);
1040 LocalFree(msgbuf);
1041 } else {
1042 // Call to FormatMessage failed, just include the result from GetLastError
1043 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error);
1044 }
1045 VMError::report_coredump_status(buffer, false);
1046 } else {
1047 VMError::report_coredump_status(buffer, true);
1048 }
1050 CloseHandle(dumpFile);
1051 }
1055 void os::abort(bool dump_core)
1056 {
1057 os::shutdown();
1058 // no core dump on Windows
1059 ::exit(1);
1060 }
1062 // Die immediately, no exit hook, no abort hook, no cleanup.
1063 void os::die() {
1064 _exit(-1);
1065 }
1067 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1068 // * dirent_md.c 1.15 00/02/02
1069 //
1070 // The declarations for DIR and struct dirent are in jvm_win32.h.
1072 /* Caller must have already run dirname through JVM_NativePath, which removes
1073 duplicate slashes and converts all instances of '/' into '\\'. */
1075 DIR *
1076 os::opendir(const char *dirname)
1077 {
1078 assert(dirname != NULL, "just checking"); // hotspot change
1079 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1080 DWORD fattr; // hotspot change
1081 char alt_dirname[4] = { 0, 0, 0, 0 };
1083 if (dirp == 0) {
1084 errno = ENOMEM;
1085 return 0;
1086 }
1088 /*
1089 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1090 * as a directory in FindFirstFile(). We detect this case here and
1091 * prepend the current drive name.
1092 */
1093 if (dirname[1] == '\0' && dirname[0] == '\\') {
1094 alt_dirname[0] = _getdrive() + 'A' - 1;
1095 alt_dirname[1] = ':';
1096 alt_dirname[2] = '\\';
1097 alt_dirname[3] = '\0';
1098 dirname = alt_dirname;
1099 }
1101 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1102 if (dirp->path == 0) {
1103 free(dirp, mtInternal);
1104 errno = ENOMEM;
1105 return 0;
1106 }
1107 strcpy(dirp->path, dirname);
1109 fattr = GetFileAttributes(dirp->path);
1110 if (fattr == 0xffffffff) {
1111 free(dirp->path, mtInternal);
1112 free(dirp, mtInternal);
1113 errno = ENOENT;
1114 return 0;
1115 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1116 free(dirp->path, mtInternal);
1117 free(dirp, mtInternal);
1118 errno = ENOTDIR;
1119 return 0;
1120 }
1122 /* Append "*.*", or possibly "\\*.*", to path */
1123 if (dirp->path[1] == ':'
1124 && (dirp->path[2] == '\0'
1125 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1126 /* No '\\' needed for cases like "Z:" or "Z:\" */
1127 strcat(dirp->path, "*.*");
1128 } else {
1129 strcat(dirp->path, "\\*.*");
1130 }
1132 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1133 if (dirp->handle == INVALID_HANDLE_VALUE) {
1134 if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1135 free(dirp->path, mtInternal);
1136 free(dirp, mtInternal);
1137 errno = EACCES;
1138 return 0;
1139 }
1140 }
1141 return dirp;
1142 }
1144 /* parameter dbuf unused on Windows */
1146 struct dirent *
1147 os::readdir(DIR *dirp, dirent *dbuf)
1148 {
1149 assert(dirp != NULL, "just checking"); // hotspot change
1150 if (dirp->handle == INVALID_HANDLE_VALUE) {
1151 return 0;
1152 }
1154 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1156 if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1157 if (GetLastError() == ERROR_INVALID_HANDLE) {
1158 errno = EBADF;
1159 return 0;
1160 }
1161 FindClose(dirp->handle);
1162 dirp->handle = INVALID_HANDLE_VALUE;
1163 }
1165 return &dirp->dirent;
1166 }
1168 int
1169 os::closedir(DIR *dirp)
1170 {
1171 assert(dirp != NULL, "just checking"); // hotspot change
1172 if (dirp->handle != INVALID_HANDLE_VALUE) {
1173 if (!FindClose(dirp->handle)) {
1174 errno = EBADF;
1175 return -1;
1176 }
1177 dirp->handle = INVALID_HANDLE_VALUE;
1178 }
1179 free(dirp->path, mtInternal);
1180 free(dirp, mtInternal);
1181 return 0;
1182 }
1184 // This must be hard coded because it's the system's temporary
1185 // directory not the java application's temp directory, ala java.io.tmpdir.
1186 const char* os::get_temp_directory() {
1187 static char path_buf[MAX_PATH];
1188 if (GetTempPath(MAX_PATH, path_buf)>0)
1189 return path_buf;
1190 else{
1191 path_buf[0]='\0';
1192 return path_buf;
1193 }
1194 }
1196 static bool file_exists(const char* filename) {
1197 if (filename == NULL || strlen(filename) == 0) {
1198 return false;
1199 }
1200 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1201 }
1203 bool os::dll_build_name(char *buffer, size_t buflen,
1204 const char* pname, const char* fname) {
1205 bool retval = false;
1206 const size_t pnamelen = pname ? strlen(pname) : 0;
1207 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1209 // Return error on buffer overflow.
1210 if (pnamelen + strlen(fname) + 10 > buflen) {
1211 return retval;
1212 }
1214 if (pnamelen == 0) {
1215 jio_snprintf(buffer, buflen, "%s.dll", fname);
1216 retval = true;
1217 } else if (c == ':' || c == '\\') {
1218 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1219 retval = true;
1220 } else if (strchr(pname, *os::path_separator()) != NULL) {
1221 int n;
1222 char** pelements = split_path(pname, &n);
1223 if (pelements == NULL) {
1224 return false;
1225 }
1226 for (int i = 0 ; i < n ; i++) {
1227 char* path = pelements[i];
1228 // Really shouldn't be NULL, but check can't hurt
1229 size_t plen = (path == NULL) ? 0 : strlen(path);
1230 if (plen == 0) {
1231 continue; // skip the empty path values
1232 }
1233 const char lastchar = path[plen - 1];
1234 if (lastchar == ':' || lastchar == '\\') {
1235 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1236 } else {
1237 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1238 }
1239 if (file_exists(buffer)) {
1240 retval = true;
1241 break;
1242 }
1243 }
1244 // release the storage
1245 for (int i = 0 ; i < n ; i++) {
1246 if (pelements[i] != NULL) {
1247 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1248 }
1249 }
1250 if (pelements != NULL) {
1251 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1252 }
1253 } else {
1254 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1255 retval = true;
1256 }
1257 return retval;
1258 }
1260 // Needs to be in os specific directory because windows requires another
1261 // header file <direct.h>
1262 const char* os::get_current_directory(char *buf, size_t buflen) {
1263 int n = static_cast<int>(buflen);
1264 if (buflen > INT_MAX) n = INT_MAX;
1265 return _getcwd(buf, n);
1266 }
1268 //-----------------------------------------------------------
1269 // Helper functions for fatal error handler
1270 #ifdef _WIN64
1271 // Helper routine which returns true if address in
1272 // within the NTDLL address space.
1273 //
1274 static bool _addr_in_ntdll( address addr )
1275 {
1276 HMODULE hmod;
1277 MODULEINFO minfo;
1279 hmod = GetModuleHandle("NTDLL.DLL");
1280 if ( hmod == NULL ) return false;
1281 if ( !os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod,
1282 &minfo, sizeof(MODULEINFO)) )
1283 return false;
1285 if ( (addr >= minfo.lpBaseOfDll) &&
1286 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage)))
1287 return true;
1288 else
1289 return false;
1290 }
1291 #endif
1294 // Enumerate all modules for a given process ID
1295 //
1296 // Notice that Windows 95/98/Me and Windows NT/2000/XP have
1297 // different API for doing this. We use PSAPI.DLL on NT based
1298 // Windows and ToolHelp on 95/98/Me.
1300 // Callback function that is called by enumerate_modules() on
1301 // every DLL module.
1302 // Input parameters:
1303 // int pid,
1304 // char* module_file_name,
1305 // address module_base_addr,
1306 // unsigned module_size,
1307 // void* param
1308 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *);
1310 // enumerate_modules for Windows NT, using PSAPI
1311 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param)
1312 {
1313 HANDLE hProcess ;
1315 # define MAX_NUM_MODULES 128
1316 HMODULE modules[MAX_NUM_MODULES];
1317 static char filename[ MAX_PATH ];
1318 int result = 0;
1320 if (!os::PSApiDll::PSApiAvailable()) {
1321 return 0;
1322 }
1324 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1325 FALSE, pid ) ;
1326 if (hProcess == NULL) return 0;
1328 DWORD size_needed;
1329 if (!os::PSApiDll::EnumProcessModules(hProcess, modules,
1330 sizeof(modules), &size_needed)) {
1331 CloseHandle( hProcess );
1332 return 0;
1333 }
1335 // number of modules that are currently loaded
1336 int num_modules = size_needed / sizeof(HMODULE);
1338 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1339 // Get Full pathname:
1340 if(!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i],
1341 filename, sizeof(filename))) {
1342 filename[0] = '\0';
1343 }
1345 MODULEINFO modinfo;
1346 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i],
1347 &modinfo, sizeof(modinfo))) {
1348 modinfo.lpBaseOfDll = NULL;
1349 modinfo.SizeOfImage = 0;
1350 }
1352 // Invoke callback function
1353 result = func(pid, filename, (address)modinfo.lpBaseOfDll,
1354 modinfo.SizeOfImage, param);
1355 if (result) break;
1356 }
1358 CloseHandle( hProcess ) ;
1359 return result;
1360 }
1363 // enumerate_modules for Windows 95/98/ME, using TOOLHELP
1364 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param)
1365 {
1366 HANDLE hSnapShot ;
1367 static MODULEENTRY32 modentry ;
1368 int result = 0;
1370 if (!os::Kernel32Dll::HelpToolsAvailable()) {
1371 return 0;
1372 }
1374 // Get a handle to a Toolhelp snapshot of the system
1375 hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ;
1376 if( hSnapShot == INVALID_HANDLE_VALUE ) {
1377 return FALSE ;
1378 }
1380 // iterate through all modules
1381 modentry.dwSize = sizeof(MODULEENTRY32) ;
1382 bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0;
1384 while( not_done ) {
1385 // invoke the callback
1386 result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr,
1387 modentry.modBaseSize, param);
1388 if (result) break;
1390 modentry.dwSize = sizeof(MODULEENTRY32) ;
1391 not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0;
1392 }
1394 CloseHandle(hSnapShot);
1395 return result;
1396 }
1398 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param )
1399 {
1400 // Get current process ID if caller doesn't provide it.
1401 if (!pid) pid = os::current_process_id();
1403 if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param);
1404 else return _enumerate_modules_windows(pid, func, param);
1405 }
1407 struct _modinfo {
1408 address addr;
1409 char* full_path; // point to a char buffer
1410 int buflen; // size of the buffer
1411 address base_addr;
1412 };
1414 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr,
1415 unsigned size, void * param) {
1416 struct _modinfo *pmod = (struct _modinfo *)param;
1417 if (!pmod) return -1;
1419 if (base_addr <= pmod->addr &&
1420 base_addr+size > pmod->addr) {
1421 // if a buffer is provided, copy path name to the buffer
1422 if (pmod->full_path) {
1423 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1424 }
1425 pmod->base_addr = base_addr;
1426 return 1;
1427 }
1428 return 0;
1429 }
1431 bool os::dll_address_to_library_name(address addr, char* buf,
1432 int buflen, int* offset) {
1433 // buf is not optional, but offset is optional
1434 assert(buf != NULL, "sanity check");
1436 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1437 // return the full path to the DLL file, sometimes it returns path
1438 // to the corresponding PDB file (debug info); sometimes it only
1439 // returns partial path, which makes life painful.
1441 struct _modinfo mi;
1442 mi.addr = addr;
1443 mi.full_path = buf;
1444 mi.buflen = buflen;
1445 int pid = os::current_process_id();
1446 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) {
1447 // buf already contains path name
1448 if (offset) *offset = addr - mi.base_addr;
1449 return true;
1450 }
1452 buf[0] = '\0';
1453 if (offset) *offset = -1;
1454 return false;
1455 }
1457 bool os::dll_address_to_function_name(address addr, char *buf,
1458 int buflen, int *offset) {
1459 // buf is not optional, but offset is optional
1460 assert(buf != NULL, "sanity check");
1462 if (Decoder::decode(addr, buf, buflen, offset)) {
1463 return true;
1464 }
1465 if (offset != NULL) *offset = -1;
1466 buf[0] = '\0';
1467 return false;
1468 }
1470 // save the start and end address of jvm.dll into param[0] and param[1]
1471 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr,
1472 unsigned size, void * param) {
1473 if (!param) return -1;
1475 if (base_addr <= (address)_locate_jvm_dll &&
1476 base_addr+size > (address)_locate_jvm_dll) {
1477 ((address*)param)[0] = base_addr;
1478 ((address*)param)[1] = base_addr + size;
1479 return 1;
1480 }
1481 return 0;
1482 }
1484 address vm_lib_location[2]; // start and end address of jvm.dll
1486 // check if addr is inside jvm.dll
1487 bool os::address_is_in_vm(address addr) {
1488 if (!vm_lib_location[0] || !vm_lib_location[1]) {
1489 int pid = os::current_process_id();
1490 if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) {
1491 assert(false, "Can't find jvm module.");
1492 return false;
1493 }
1494 }
1496 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1497 }
1499 // print module info; param is outputStream*
1500 static int _print_module(int pid, char* fname, address base,
1501 unsigned size, void* param) {
1502 if (!param) return -1;
1504 outputStream* st = (outputStream*)param;
1506 address end_addr = base + size;
1507 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname);
1508 return 0;
1509 }
1511 // Loads .dll/.so and
1512 // in case of error it checks if .dll/.so was built for the
1513 // same architecture as Hotspot is running on
1514 void * os::dll_load(const char *name, char *ebuf, int ebuflen)
1515 {
1516 void * result = LoadLibrary(name);
1517 if (result != NULL)
1518 {
1519 return result;
1520 }
1522 DWORD errcode = GetLastError();
1523 if (errcode == ERROR_MOD_NOT_FOUND) {
1524 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1);
1525 ebuf[ebuflen-1]='\0';
1526 return NULL;
1527 }
1529 // Parsing dll below
1530 // If we can read dll-info and find that dll was built
1531 // for an architecture other than Hotspot is running in
1532 // - then print to buffer "DLL was built for a different architecture"
1533 // else call os::lasterror to obtain system error message
1535 // Read system error message into ebuf
1536 // It may or may not be overwritten below (in the for loop and just above)
1537 lasterror(ebuf, (size_t) ebuflen);
1538 ebuf[ebuflen-1]='\0';
1539 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0);
1540 if (file_descriptor<0)
1541 {
1542 return NULL;
1543 }
1545 uint32_t signature_offset;
1546 uint16_t lib_arch=0;
1547 bool failed_to_get_lib_arch=
1548 (
1549 //Go to position 3c in the dll
1550 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0)
1551 ||
1552 // Read loacation of signature
1553 (sizeof(signature_offset)!=
1554 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset))))
1555 ||
1556 //Go to COFF File Header in dll
1557 //that is located after"signature" (4 bytes long)
1558 (os::seek_to_file_offset(file_descriptor,
1559 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0)
1560 ||
1561 //Read field that contains code of architecture
1562 // that dll was build for
1563 (sizeof(lib_arch)!=
1564 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch))))
1565 );
1567 ::close(file_descriptor);
1568 if (failed_to_get_lib_arch)
1569 {
1570 // file i/o error - report os::lasterror(...) msg
1571 return NULL;
1572 }
1574 typedef struct
1575 {
1576 uint16_t arch_code;
1577 char* arch_name;
1578 } arch_t;
1580 static const arch_t arch_array[]={
1581 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"},
1582 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"},
1583 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"}
1584 };
1585 #if (defined _M_IA64)
1586 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64;
1587 #elif (defined _M_AMD64)
1588 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64;
1589 #elif (defined _M_IX86)
1590 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386;
1591 #else
1592 #error Method os::dll_load requires that one of following \
1593 is defined :_M_IA64,_M_AMD64 or _M_IX86
1594 #endif
1597 // Obtain a string for printf operation
1598 // lib_arch_str shall contain string what platform this .dll was built for
1599 // running_arch_str shall string contain what platform Hotspot was built for
1600 char *running_arch_str=NULL,*lib_arch_str=NULL;
1601 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++)
1602 {
1603 if (lib_arch==arch_array[i].arch_code)
1604 lib_arch_str=arch_array[i].arch_name;
1605 if (running_arch==arch_array[i].arch_code)
1606 running_arch_str=arch_array[i].arch_name;
1607 }
1609 assert(running_arch_str,
1610 "Didn't find runing architecture code in arch_array");
1612 // If the architure is right
1613 // but some other error took place - report os::lasterror(...) msg
1614 if (lib_arch == running_arch)
1615 {
1616 return NULL;
1617 }
1619 if (lib_arch_str!=NULL)
1620 {
1621 ::_snprintf(ebuf, ebuflen-1,
1622 "Can't load %s-bit .dll on a %s-bit platform",
1623 lib_arch_str,running_arch_str);
1624 }
1625 else
1626 {
1627 // don't know what architecture this dll was build for
1628 ::_snprintf(ebuf, ebuflen-1,
1629 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1630 lib_arch,running_arch_str);
1631 }
1633 return NULL;
1634 }
1637 void os::print_dll_info(outputStream *st) {
1638 int pid = os::current_process_id();
1639 st->print_cr("Dynamic libraries:");
1640 enumerate_modules(pid, _print_module, (void *)st);
1641 }
1643 void os::print_os_info_brief(outputStream* st) {
1644 os::print_os_info(st);
1645 }
1647 void os::print_os_info(outputStream* st) {
1648 st->print("OS:");
1650 os::win32::print_windows_version(st);
1651 }
1653 void os::win32::print_windows_version(outputStream* st) {
1654 OSVERSIONINFOEX osvi;
1655 VS_FIXEDFILEINFO *file_info;
1656 TCHAR kernel32_path[MAX_PATH];
1657 UINT len, ret;
1659 // Use the GetVersionEx information to see if we're on a server or
1660 // workstation edition of Windows. Starting with Windows 8.1 we can't
1661 // trust the OS version information returned by this API.
1662 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1663 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1664 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1665 st->print_cr("Call to GetVersionEx failed");
1666 return;
1667 }
1668 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1670 // Get the full path to \Windows\System32\kernel32.dll and use that for
1671 // determining what version of Windows we're running on.
1672 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1673 ret = GetSystemDirectory(kernel32_path, len);
1674 if (ret == 0 || ret > len) {
1675 st->print_cr("Call to GetSystemDirectory failed");
1676 return;
1677 }
1678 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1680 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1681 if (version_size == 0) {
1682 st->print_cr("Call to GetFileVersionInfoSize failed");
1683 return;
1684 }
1686 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1687 if (version_info == NULL) {
1688 st->print_cr("Failed to allocate version_info");
1689 return;
1690 }
1692 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1693 os::free(version_info);
1694 st->print_cr("Call to GetFileVersionInfo failed");
1695 return;
1696 }
1698 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1699 os::free(version_info);
1700 st->print_cr("Call to VerQueryValue failed");
1701 return;
1702 }
1704 int major_version = HIWORD(file_info->dwProductVersionMS);
1705 int minor_version = LOWORD(file_info->dwProductVersionMS);
1706 int build_number = HIWORD(file_info->dwProductVersionLS);
1707 int build_minor = LOWORD(file_info->dwProductVersionLS);
1708 int os_vers = major_version * 1000 + minor_version;
1709 os::free(version_info);
1711 st->print(" Windows ");
1712 switch (os_vers) {
1714 case 6000:
1715 if (is_workstation) {
1716 st->print("Vista");
1717 } else {
1718 st->print("Server 2008");
1719 }
1720 break;
1722 case 6001:
1723 if (is_workstation) {
1724 st->print("7");
1725 } else {
1726 st->print("Server 2008 R2");
1727 }
1728 break;
1730 case 6002:
1731 if (is_workstation) {
1732 st->print("8");
1733 } else {
1734 st->print("Server 2012");
1735 }
1736 break;
1738 case 6003:
1739 if (is_workstation) {
1740 st->print("8.1");
1741 } else {
1742 st->print("Server 2012 R2");
1743 }
1744 break;
1746 case 6004:
1747 if (is_workstation) {
1748 st->print("10");
1749 } else {
1750 st->print("Server 2016");
1751 }
1752 break;
1754 default:
1755 // Unrecognized windows, print out its major and minor versions
1756 st->print("%d.%d", major_version, minor_version);
1757 break;
1758 }
1760 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1761 // find out whether we are running on 64 bit processor or not
1762 SYSTEM_INFO si;
1763 ZeroMemory(&si, sizeof(SYSTEM_INFO));
1764 os::Kernel32Dll::GetNativeSystemInfo(&si);
1765 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1766 st->print(" , 64 bit");
1767 }
1769 st->print(" Build %d", build_number);
1770 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1771 st->cr();
1772 }
1774 void os::pd_print_cpu_info(outputStream* st) {
1775 // Nothing to do for now.
1776 }
1778 void os::print_memory_info(outputStream* st) {
1779 st->print("Memory:");
1780 st->print(" %dk page", os::vm_page_size()>>10);
1782 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1783 // value if total memory is larger than 4GB
1784 MEMORYSTATUSEX ms;
1785 ms.dwLength = sizeof(ms);
1786 GlobalMemoryStatusEx(&ms);
1788 st->print(", physical %uk", os::physical_memory() >> 10);
1789 st->print("(%uk free)", os::available_memory() >> 10);
1791 st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1792 st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1793 st->cr();
1794 }
1796 void os::print_siginfo(outputStream *st, void *siginfo) {
1797 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo;
1798 st->print("siginfo:");
1799 st->print(" ExceptionCode=0x%x", er->ExceptionCode);
1801 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
1802 er->NumberParameters >= 2) {
1803 switch (er->ExceptionInformation[0]) {
1804 case 0: st->print(", reading address"); break;
1805 case 1: st->print(", writing address"); break;
1806 default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1807 er->ExceptionInformation[0]);
1808 }
1809 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1810 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR &&
1811 er->NumberParameters >= 2 && UseSharedSpaces) {
1812 FileMapInfo* mapinfo = FileMapInfo::current_info();
1813 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) {
1814 st->print("\n\nError accessing class data sharing archive." \
1815 " Mapped file inaccessible during execution, " \
1816 " possible disk/network problem.");
1817 }
1818 } else {
1819 int num = er->NumberParameters;
1820 if (num > 0) {
1821 st->print(", ExceptionInformation=");
1822 for (int i = 0; i < num; i++) {
1823 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1824 }
1825 }
1826 }
1827 st->cr();
1828 }
1830 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1831 // do nothing
1832 }
1834 static char saved_jvm_path[MAX_PATH] = {0};
1836 // Find the full path to the current module, jvm.dll
1837 void os::jvm_path(char *buf, jint buflen) {
1838 // Error checking.
1839 if (buflen < MAX_PATH) {
1840 assert(false, "must use a large-enough buffer");
1841 buf[0] = '\0';
1842 return;
1843 }
1844 // Lazy resolve the path to current module.
1845 if (saved_jvm_path[0] != 0) {
1846 strcpy(buf, saved_jvm_path);
1847 return;
1848 }
1850 buf[0] = '\0';
1851 if (Arguments::created_by_gamma_launcher()) {
1852 // Support for the gamma launcher. Check for an
1853 // JAVA_HOME environment variable
1854 // and fix up the path so it looks like
1855 // libjvm.so is installed there (append a fake suffix
1856 // hotspot/libjvm.so).
1857 char* java_home_var = ::getenv("JAVA_HOME");
1858 if (java_home_var != NULL && java_home_var[0] != 0 &&
1859 strlen(java_home_var) < (size_t)buflen) {
1861 strncpy(buf, java_home_var, buflen);
1863 // determine if this is a legacy image or modules image
1864 // modules image doesn't have "jre" subdirectory
1865 size_t len = strlen(buf);
1866 char* jrebin_p = buf + len;
1867 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1868 if (0 != _access(buf, 0)) {
1869 jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1870 }
1871 len = strlen(buf);
1872 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1873 }
1874 }
1876 if(buf[0] == '\0') {
1877 GetModuleFileName(vm_lib_handle, buf, buflen);
1878 }
1879 strncpy(saved_jvm_path, buf, MAX_PATH);
1880 }
1883 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1884 #ifndef _WIN64
1885 st->print("_");
1886 #endif
1887 }
1890 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1891 #ifndef _WIN64
1892 st->print("@%d", args_size * sizeof(int));
1893 #endif
1894 }
1896 // This method is a copy of JDK's sysGetLastErrorString
1897 // from src/windows/hpi/src/system_md.c
1899 size_t os::lasterror(char* buf, size_t len) {
1900 DWORD errval;
1902 if ((errval = GetLastError()) != 0) {
1903 // DOS error
1904 size_t n = (size_t)FormatMessage(
1905 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1906 NULL,
1907 errval,
1908 0,
1909 buf,
1910 (DWORD)len,
1911 NULL);
1912 if (n > 3) {
1913 // Drop final '.', CR, LF
1914 if (buf[n - 1] == '\n') n--;
1915 if (buf[n - 1] == '\r') n--;
1916 if (buf[n - 1] == '.') n--;
1917 buf[n] = '\0';
1918 }
1919 return n;
1920 }
1922 if (errno != 0) {
1923 // C runtime error that has no corresponding DOS error code
1924 const char* s = strerror(errno);
1925 size_t n = strlen(s);
1926 if (n >= len) n = len - 1;
1927 strncpy(buf, s, n);
1928 buf[n] = '\0';
1929 return n;
1930 }
1932 return 0;
1933 }
1935 int os::get_last_error() {
1936 DWORD error = GetLastError();
1937 if (error == 0)
1938 error = errno;
1939 return (int)error;
1940 }
1942 // sun.misc.Signal
1943 // NOTE that this is a workaround for an apparent kernel bug where if
1944 // a signal handler for SIGBREAK is installed then that signal handler
1945 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1946 // See bug 4416763.
1947 static void (*sigbreakHandler)(int) = NULL;
1949 static void UserHandler(int sig, void *siginfo, void *context) {
1950 os::signal_notify(sig);
1951 // We need to reinstate the signal handler each time...
1952 os::signal(sig, (void*)UserHandler);
1953 }
1955 void* os::user_handler() {
1956 return (void*) UserHandler;
1957 }
1959 void* os::signal(int signal_number, void* handler) {
1960 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1961 void (*oldHandler)(int) = sigbreakHandler;
1962 sigbreakHandler = (void (*)(int)) handler;
1963 return (void*) oldHandler;
1964 } else {
1965 return (void*)::signal(signal_number, (void (*)(int))handler);
1966 }
1967 }
1969 void os::signal_raise(int signal_number) {
1970 raise(signal_number);
1971 }
1973 // The Win32 C runtime library maps all console control events other than ^C
1974 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
1975 // logoff, and shutdown events. We therefore install our own console handler
1976 // that raises SIGTERM for the latter cases.
1977 //
1978 static BOOL WINAPI consoleHandler(DWORD event) {
1979 switch(event) {
1980 case CTRL_C_EVENT:
1981 if (is_error_reported()) {
1982 // Ctrl-C is pressed during error reporting, likely because the error
1983 // handler fails to abort. Let VM die immediately.
1984 os::die();
1985 }
1987 os::signal_raise(SIGINT);
1988 return TRUE;
1989 break;
1990 case CTRL_BREAK_EVENT:
1991 if (sigbreakHandler != NULL) {
1992 (*sigbreakHandler)(SIGBREAK);
1993 }
1994 return TRUE;
1995 break;
1996 case CTRL_LOGOFF_EVENT: {
1997 // Don't terminate JVM if it is running in a non-interactive session,
1998 // such as a service process.
1999 USEROBJECTFLAGS flags;
2000 HANDLE handle = GetProcessWindowStation();
2001 if (handle != NULL &&
2002 GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2003 sizeof( USEROBJECTFLAGS), NULL)) {
2004 // If it is a non-interactive session, let next handler to deal
2005 // with it.
2006 if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2007 return FALSE;
2008 }
2009 }
2010 }
2011 case CTRL_CLOSE_EVENT:
2012 case CTRL_SHUTDOWN_EVENT:
2013 os::signal_raise(SIGTERM);
2014 return TRUE;
2015 break;
2016 default:
2017 break;
2018 }
2019 return FALSE;
2020 }
2022 /*
2023 * The following code is moved from os.cpp for making this
2024 * code platform specific, which it is by its very nature.
2025 */
2027 // Return maximum OS signal used + 1 for internal use only
2028 // Used as exit signal for signal_thread
2029 int os::sigexitnum_pd(){
2030 return NSIG;
2031 }
2033 // a counter for each possible signal value, including signal_thread exit signal
2034 static volatile jint pending_signals[NSIG+1] = { 0 };
2035 static HANDLE sig_sem = NULL;
2037 void os::signal_init_pd() {
2038 // Initialize signal structures
2039 memset((void*)pending_signals, 0, sizeof(pending_signals));
2041 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2043 // Programs embedding the VM do not want it to attempt to receive
2044 // events like CTRL_LOGOFF_EVENT, which are used to implement the
2045 // shutdown hooks mechanism introduced in 1.3. For example, when
2046 // the VM is run as part of a Windows NT service (i.e., a servlet
2047 // engine in a web server), the correct behavior is for any console
2048 // control handler to return FALSE, not TRUE, because the OS's
2049 // "final" handler for such events allows the process to continue if
2050 // it is a service (while terminating it if it is not a service).
2051 // To make this behavior uniform and the mechanism simpler, we
2052 // completely disable the VM's usage of these console events if -Xrs
2053 // (=ReduceSignalUsage) is specified. This means, for example, that
2054 // the CTRL-BREAK thread dump mechanism is also disabled in this
2055 // case. See bugs 4323062, 4345157, and related bugs.
2057 if (!ReduceSignalUsage) {
2058 // Add a CTRL-C handler
2059 SetConsoleCtrlHandler(consoleHandler, TRUE);
2060 }
2061 }
2063 void os::signal_notify(int signal_number) {
2064 BOOL ret;
2065 if (sig_sem != NULL) {
2066 Atomic::inc(&pending_signals[signal_number]);
2067 ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2068 assert(ret != 0, "ReleaseSemaphore() failed");
2069 }
2070 }
2072 static int check_pending_signals(bool wait_for_signal) {
2073 DWORD ret;
2074 while (true) {
2075 for (int i = 0; i < NSIG + 1; i++) {
2076 jint n = pending_signals[i];
2077 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2078 return i;
2079 }
2080 }
2081 if (!wait_for_signal) {
2082 return -1;
2083 }
2085 JavaThread *thread = JavaThread::current();
2087 ThreadBlockInVM tbivm(thread);
2089 bool threadIsSuspended;
2090 do {
2091 thread->set_suspend_equivalent();
2092 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2093 ret = ::WaitForSingleObject(sig_sem, INFINITE);
2094 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2096 // were we externally suspended while we were waiting?
2097 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2098 if (threadIsSuspended) {
2099 //
2100 // The semaphore has been incremented, but while we were waiting
2101 // another thread suspended us. We don't want to continue running
2102 // while suspended because that would surprise the thread that
2103 // suspended us.
2104 //
2105 ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2106 assert(ret != 0, "ReleaseSemaphore() failed");
2108 thread->java_suspend_self();
2109 }
2110 } while (threadIsSuspended);
2111 }
2112 }
2114 int os::signal_lookup() {
2115 return check_pending_signals(false);
2116 }
2118 int os::signal_wait() {
2119 return check_pending_signals(true);
2120 }
2122 // Implicit OS exception handling
2124 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) {
2125 JavaThread* thread = JavaThread::current();
2126 // Save pc in thread
2127 #ifdef _M_IA64
2128 // Do not blow up if no thread info available.
2129 if (thread) {
2130 // Saving PRECISE pc (with slot information) in thread.
2131 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2132 // Convert precise PC into "Unix" format
2133 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2134 thread->set_saved_exception_pc((address)precise_pc);
2135 }
2136 // Set pc to handler
2137 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2138 // Clear out psr.ri (= Restart Instruction) in order to continue
2139 // at the beginning of the target bundle.
2140 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2141 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2142 #else
2143 #ifdef _M_AMD64
2144 // Do not blow up if no thread info available.
2145 if (thread) {
2146 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2147 }
2148 // Set pc to handler
2149 exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2150 #else
2151 // Do not blow up if no thread info available.
2152 if (thread) {
2153 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2154 }
2155 // Set pc to handler
2156 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2157 #endif
2158 #endif
2160 // Continue the execution
2161 return EXCEPTION_CONTINUE_EXECUTION;
2162 }
2165 // Used for PostMortemDump
2166 extern "C" void safepoints();
2167 extern "C" void find(int x);
2168 extern "C" void events();
2170 // According to Windows API documentation, an illegal instruction sequence should generate
2171 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2172 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2173 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2175 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2177 // From "Execution Protection in the Windows Operating System" draft 0.35
2178 // Once a system header becomes available, the "real" define should be
2179 // included or copied here.
2180 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2182 // Handle NAT Bit consumption on IA64.
2183 #ifdef _M_IA64
2184 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION
2185 #endif
2187 // Windows Vista/2008 heap corruption check
2188 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374
2190 #define def_excpt(val) #val, val
2192 struct siglabel {
2193 char *name;
2194 int number;
2195 };
2197 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2198 // C++ compiler contain this error code. Because this is a compiler-generated
2199 // error, the code is not listed in the Win32 API header files.
2200 // The code is actually a cryptic mnemonic device, with the initial "E"
2201 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2202 // ASCII values of "msc".
2204 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363
2207 struct siglabel exceptlabels[] = {
2208 def_excpt(EXCEPTION_ACCESS_VIOLATION),
2209 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2210 def_excpt(EXCEPTION_BREAKPOINT),
2211 def_excpt(EXCEPTION_SINGLE_STEP),
2212 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2213 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2214 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2215 def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2216 def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2217 def_excpt(EXCEPTION_FLT_OVERFLOW),
2218 def_excpt(EXCEPTION_FLT_STACK_CHECK),
2219 def_excpt(EXCEPTION_FLT_UNDERFLOW),
2220 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2221 def_excpt(EXCEPTION_INT_OVERFLOW),
2222 def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2223 def_excpt(EXCEPTION_IN_PAGE_ERROR),
2224 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2225 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2226 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2227 def_excpt(EXCEPTION_STACK_OVERFLOW),
2228 def_excpt(EXCEPTION_INVALID_DISPOSITION),
2229 def_excpt(EXCEPTION_GUARD_PAGE),
2230 def_excpt(EXCEPTION_INVALID_HANDLE),
2231 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2232 def_excpt(EXCEPTION_HEAP_CORRUPTION),
2233 #ifdef _M_IA64
2234 def_excpt(EXCEPTION_REG_NAT_CONSUMPTION),
2235 #endif
2236 NULL, 0
2237 };
2239 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2240 for (int i = 0; exceptlabels[i].name != NULL; i++) {
2241 if (exceptlabels[i].number == exception_code) {
2242 jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2243 return buf;
2244 }
2245 }
2247 return NULL;
2248 }
2250 //-----------------------------------------------------------------------------
2251 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2252 // handle exception caused by idiv; should only happen for -MinInt/-1
2253 // (division by zero is handled explicitly)
2254 #ifdef _M_IA64
2255 assert(0, "Fix Handle_IDiv_Exception");
2256 #else
2257 #ifdef _M_AMD64
2258 PCONTEXT ctx = exceptionInfo->ContextRecord;
2259 address pc = (address)ctx->Rip;
2260 assert(pc[0] == 0xF7, "not an idiv opcode");
2261 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2262 assert(ctx->Rax == min_jint, "unexpected idiv exception");
2263 // set correct result values and continue after idiv instruction
2264 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes
2265 ctx->Rax = (DWORD)min_jint; // result
2266 ctx->Rdx = (DWORD)0; // remainder
2267 // Continue the execution
2268 #else
2269 PCONTEXT ctx = exceptionInfo->ContextRecord;
2270 address pc = (address)ctx->Eip;
2271 assert(pc[0] == 0xF7, "not an idiv opcode");
2272 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2273 assert(ctx->Eax == min_jint, "unexpected idiv exception");
2274 // set correct result values and continue after idiv instruction
2275 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes
2276 ctx->Eax = (DWORD)min_jint; // result
2277 ctx->Edx = (DWORD)0; // remainder
2278 // Continue the execution
2279 #endif
2280 #endif
2281 return EXCEPTION_CONTINUE_EXECUTION;
2282 }
2284 #ifndef _WIN64
2285 //-----------------------------------------------------------------------------
2286 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2287 // handle exception caused by native method modifying control word
2288 PCONTEXT ctx = exceptionInfo->ContextRecord;
2289 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2291 switch (exception_code) {
2292 case EXCEPTION_FLT_DENORMAL_OPERAND:
2293 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2294 case EXCEPTION_FLT_INEXACT_RESULT:
2295 case EXCEPTION_FLT_INVALID_OPERATION:
2296 case EXCEPTION_FLT_OVERFLOW:
2297 case EXCEPTION_FLT_STACK_CHECK:
2298 case EXCEPTION_FLT_UNDERFLOW:
2299 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2300 if (fp_control_word != ctx->FloatSave.ControlWord) {
2301 // Restore FPCW and mask out FLT exceptions
2302 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2303 // Mask out pending FLT exceptions
2304 ctx->FloatSave.StatusWord &= 0xffffff00;
2305 return EXCEPTION_CONTINUE_EXECUTION;
2306 }
2307 }
2309 if (prev_uef_handler != NULL) {
2310 // We didn't handle this exception so pass it to the previous
2311 // UnhandledExceptionFilter.
2312 return (prev_uef_handler)(exceptionInfo);
2313 }
2315 return EXCEPTION_CONTINUE_SEARCH;
2316 }
2317 #else //_WIN64
2318 /*
2319 On Windows, the mxcsr control bits are non-volatile across calls
2320 See also CR 6192333
2321 If EXCEPTION_FLT_* happened after some native method modified
2322 mxcsr - it is not a jvm fault.
2323 However should we decide to restore of mxcsr after a faulty
2324 native method we can uncomment following code
2325 jint MxCsr = INITIAL_MXCSR;
2326 // we can't use StubRoutines::addr_mxcsr_std()
2327 // because in Win64 mxcsr is not saved there
2328 if (MxCsr != ctx->MxCsr) {
2329 ctx->MxCsr = MxCsr;
2330 return EXCEPTION_CONTINUE_EXECUTION;
2331 }
2333 */
2334 #endif // _WIN64
2337 static inline void report_error(Thread* t, DWORD exception_code,
2338 address addr, void* siginfo, void* context) {
2339 VMError err(t, exception_code, addr, siginfo, context);
2340 err.report_and_die();
2342 // If UseOsErrorReporting, this will return here and save the error file
2343 // somewhere where we can find it in the minidump.
2344 }
2346 //-----------------------------------------------------------------------------
2347 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2348 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2349 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2350 #ifdef _M_IA64
2351 // On Itanium, we need the "precise pc", which has the slot number coded
2352 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2353 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2354 // Convert the pc to "Unix format", which has the slot number coded
2355 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2356 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2357 // information is saved in the Unix format.
2358 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2359 #else
2360 #ifdef _M_AMD64
2361 address pc = (address) exceptionInfo->ContextRecord->Rip;
2362 #else
2363 address pc = (address) exceptionInfo->ContextRecord->Eip;
2364 #endif
2365 #endif
2366 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady
2368 // Handle SafeFetch32 and SafeFetchN exceptions.
2369 if (StubRoutines::is_safefetch_fault(pc)) {
2370 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2371 }
2373 #ifndef _WIN64
2374 // Execution protection violation - win32 running on AMD64 only
2375 // Handled first to avoid misdiagnosis as a "normal" access violation;
2376 // This is safe to do because we have a new/unique ExceptionInformation
2377 // code for this condition.
2378 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2379 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2380 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2381 address addr = (address) exceptionRecord->ExceptionInformation[1];
2383 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2384 int page_size = os::vm_page_size();
2386 // Make sure the pc and the faulting address are sane.
2387 //
2388 // If an instruction spans a page boundary, and the page containing
2389 // the beginning of the instruction is executable but the following
2390 // page is not, the pc and the faulting address might be slightly
2391 // different - we still want to unguard the 2nd page in this case.
2392 //
2393 // 15 bytes seems to be a (very) safe value for max instruction size.
2394 bool pc_is_near_addr =
2395 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2396 bool instr_spans_page_boundary =
2397 (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2398 (intptr_t) page_size) > 0);
2400 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2401 static volatile address last_addr =
2402 (address) os::non_memory_address_word();
2404 // In conservative mode, don't unguard unless the address is in the VM
2405 if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2406 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2408 // Set memory to RWX and retry
2409 address page_start =
2410 (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2411 bool res = os::protect_memory((char*) page_start, page_size,
2412 os::MEM_PROT_RWX);
2414 if (PrintMiscellaneous && Verbose) {
2415 char buf[256];
2416 jio_snprintf(buf, sizeof(buf), "Execution protection violation "
2417 "at " INTPTR_FORMAT
2418 ", unguarding " INTPTR_FORMAT ": %s", addr,
2419 page_start, (res ? "success" : strerror(errno)));
2420 tty->print_raw_cr(buf);
2421 }
2423 // Set last_addr so if we fault again at the same address, we don't
2424 // end up in an endless loop.
2425 //
2426 // There are two potential complications here. Two threads trapping
2427 // at the same address at the same time could cause one of the
2428 // threads to think it already unguarded, and abort the VM. Likely
2429 // very rare.
2430 //
2431 // The other race involves two threads alternately trapping at
2432 // different addresses and failing to unguard the page, resulting in
2433 // an endless loop. This condition is probably even more unlikely
2434 // than the first.
2435 //
2436 // Although both cases could be avoided by using locks or thread
2437 // local last_addr, these solutions are unnecessary complication:
2438 // this handler is a best-effort safety net, not a complete solution.
2439 // It is disabled by default and should only be used as a workaround
2440 // in case we missed any no-execute-unsafe VM code.
2442 last_addr = addr;
2444 return EXCEPTION_CONTINUE_EXECUTION;
2445 }
2446 }
2448 // Last unguard failed or not unguarding
2449 tty->print_raw_cr("Execution protection violation");
2450 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2451 exceptionInfo->ContextRecord);
2452 return EXCEPTION_CONTINUE_SEARCH;
2453 }
2454 }
2455 #endif // _WIN64
2457 // Check to see if we caught the safepoint code in the
2458 // process of write protecting the memory serialization page.
2459 // It write enables the page immediately after protecting it
2460 // so just return.
2461 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
2462 JavaThread* thread = (JavaThread*) t;
2463 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2464 address addr = (address) exceptionRecord->ExceptionInformation[1];
2465 if ( os::is_memory_serialize_page(thread, addr) ) {
2466 // Block current thread until the memory serialize page permission restored.
2467 os::block_on_serialize_page_trap();
2468 return EXCEPTION_CONTINUE_EXECUTION;
2469 }
2470 }
2472 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2473 VM_Version::is_cpuinfo_segv_addr(pc)) {
2474 // Verify that OS save/restore AVX registers.
2475 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2476 }
2478 if (t != NULL && t->is_Java_thread()) {
2479 JavaThread* thread = (JavaThread*) t;
2480 bool in_java = thread->thread_state() == _thread_in_Java;
2482 // Handle potential stack overflows up front.
2483 if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2484 if (os::uses_stack_guard_pages()) {
2485 #ifdef _M_IA64
2486 // Use guard page for register stack.
2487 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2488 address addr = (address) exceptionRecord->ExceptionInformation[1];
2489 // Check for a register stack overflow on Itanium
2490 if (thread->addr_inside_register_stack_red_zone(addr)) {
2491 // Fatal red zone violation happens if the Java program
2492 // catches a StackOverflow error and does so much processing
2493 // that it runs beyond the unprotected yellow guard zone. As
2494 // a result, we are out of here.
2495 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2496 } else if(thread->addr_inside_register_stack(addr)) {
2497 // Disable the yellow zone which sets the state that
2498 // we've got a stack overflow problem.
2499 if (thread->stack_yellow_zone_enabled()) {
2500 thread->disable_stack_yellow_zone();
2501 }
2502 // Give us some room to process the exception.
2503 thread->disable_register_stack_guard();
2504 // Tracing with +Verbose.
2505 if (Verbose) {
2506 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2507 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2508 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2509 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2510 thread->register_stack_base(),
2511 thread->register_stack_base() + thread->stack_size());
2512 }
2514 // Reguard the permanent register stack red zone just to be sure.
2515 // We saw Windows silently disabling this without telling us.
2516 thread->enable_register_stack_red_zone();
2518 return Handle_Exception(exceptionInfo,
2519 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2520 }
2521 #endif
2522 if (thread->stack_yellow_zone_enabled()) {
2523 // Yellow zone violation. The o/s has unprotected the first yellow
2524 // zone page for us. Note: must call disable_stack_yellow_zone to
2525 // update the enabled status, even if the zone contains only one page.
2526 thread->disable_stack_yellow_zone();
2527 // If not in java code, return and hope for the best.
2528 return in_java ? Handle_Exception(exceptionInfo,
2529 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2530 : EXCEPTION_CONTINUE_EXECUTION;
2531 } else {
2532 // Fatal red zone violation.
2533 thread->disable_stack_red_zone();
2534 tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2535 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2536 exceptionInfo->ContextRecord);
2537 return EXCEPTION_CONTINUE_SEARCH;
2538 }
2539 } else if (in_java) {
2540 // JVM-managed guard pages cannot be used on win95/98. The o/s provides
2541 // a one-time-only guard page, which it has released to us. The next
2542 // stack overflow on this thread will result in an ACCESS_VIOLATION.
2543 return Handle_Exception(exceptionInfo,
2544 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2545 } else {
2546 // Can only return and hope for the best. Further stack growth will
2547 // result in an ACCESS_VIOLATION.
2548 return EXCEPTION_CONTINUE_EXECUTION;
2549 }
2550 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2551 // Either stack overflow or null pointer exception.
2552 if (in_java) {
2553 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2554 address addr = (address) exceptionRecord->ExceptionInformation[1];
2555 address stack_end = thread->stack_base() - thread->stack_size();
2556 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2557 // Stack overflow.
2558 assert(!os::uses_stack_guard_pages(),
2559 "should be caught by red zone code above.");
2560 return Handle_Exception(exceptionInfo,
2561 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2562 }
2563 //
2564 // Check for safepoint polling and implicit null
2565 // We only expect null pointers in the stubs (vtable)
2566 // the rest are checked explicitly now.
2567 //
2568 CodeBlob* cb = CodeCache::find_blob(pc);
2569 if (cb != NULL) {
2570 if (os::is_poll_address(addr)) {
2571 address stub = SharedRuntime::get_poll_stub(pc);
2572 return Handle_Exception(exceptionInfo, stub);
2573 }
2574 }
2575 {
2576 #ifdef _WIN64
2577 //
2578 // If it's a legal stack address map the entire region in
2579 //
2580 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2581 address addr = (address) exceptionRecord->ExceptionInformation[1];
2582 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) {
2583 addr = (address)((uintptr_t)addr &
2584 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2585 os::commit_memory((char *)addr, thread->stack_base() - addr,
2586 !ExecMem);
2587 return EXCEPTION_CONTINUE_EXECUTION;
2588 }
2589 else
2590 #endif
2591 {
2592 // Null pointer exception.
2593 #ifdef _M_IA64
2594 // Process implicit null checks in compiled code. Note: Implicit null checks
2595 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2596 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2597 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2598 // Handle implicit null check in UEP method entry
2599 if (cb && (cb->is_frame_complete_at(pc) ||
2600 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2601 if (Verbose) {
2602 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2603 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2604 tty->print_cr(" to addr " INTPTR_FORMAT, addr);
2605 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2606 *(bundle_start + 1), *bundle_start);
2607 }
2608 return Handle_Exception(exceptionInfo,
2609 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2610 }
2611 }
2613 // Implicit null checks were processed above. Hence, we should not reach
2614 // here in the usual case => die!
2615 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2616 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2617 exceptionInfo->ContextRecord);
2618 return EXCEPTION_CONTINUE_SEARCH;
2620 #else // !IA64
2622 // Windows 98 reports faulting addresses incorrectly
2623 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) ||
2624 !os::win32::is_nt()) {
2625 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2626 if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2627 }
2628 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2629 exceptionInfo->ContextRecord);
2630 return EXCEPTION_CONTINUE_SEARCH;
2631 #endif
2632 }
2633 }
2634 }
2636 #ifdef _WIN64
2637 // Special care for fast JNI field accessors.
2638 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2639 // in and the heap gets shrunk before the field access.
2640 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2641 address addr = JNI_FastGetField::find_slowcase_pc(pc);
2642 if (addr != (address)-1) {
2643 return Handle_Exception(exceptionInfo, addr);
2644 }
2645 }
2646 #endif
2648 // Stack overflow or null pointer exception in native code.
2649 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2650 exceptionInfo->ContextRecord);
2651 return EXCEPTION_CONTINUE_SEARCH;
2652 } // /EXCEPTION_ACCESS_VIOLATION
2653 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2654 #if defined _M_IA64
2655 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2656 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2657 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2659 // Compiled method patched to be non entrant? Following conditions must apply:
2660 // 1. must be first instruction in bundle
2661 // 2. must be a break instruction with appropriate code
2662 if((((uint64_t) pc & 0x0F) == 0) &&
2663 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2664 return Handle_Exception(exceptionInfo,
2665 (address)SharedRuntime::get_handle_wrong_method_stub());
2666 }
2667 } // /EXCEPTION_ILLEGAL_INSTRUCTION
2668 #endif
2671 if (in_java) {
2672 switch (exception_code) {
2673 case EXCEPTION_INT_DIVIDE_BY_ZERO:
2674 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2676 case EXCEPTION_INT_OVERFLOW:
2677 return Handle_IDiv_Exception(exceptionInfo);
2679 } // switch
2680 }
2681 #ifndef _WIN64
2682 if (((thread->thread_state() == _thread_in_Java) ||
2683 (thread->thread_state() == _thread_in_native)) &&
2684 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION)
2685 {
2686 LONG result=Handle_FLT_Exception(exceptionInfo);
2687 if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2688 }
2689 #endif //_WIN64
2690 }
2692 if (exception_code != EXCEPTION_BREAKPOINT) {
2693 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2694 exceptionInfo->ContextRecord);
2695 }
2696 return EXCEPTION_CONTINUE_SEARCH;
2697 }
2699 #ifndef _WIN64
2700 // Special care for fast JNI accessors.
2701 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2702 // the heap gets shrunk before the field access.
2703 // Need to install our own structured exception handler since native code may
2704 // install its own.
2705 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2706 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2707 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2708 address pc = (address) exceptionInfo->ContextRecord->Eip;
2709 address addr = JNI_FastGetField::find_slowcase_pc(pc);
2710 if (addr != (address)-1) {
2711 return Handle_Exception(exceptionInfo, addr);
2712 }
2713 }
2714 return EXCEPTION_CONTINUE_SEARCH;
2715 }
2717 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \
2718 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \
2719 __try { \
2720 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \
2721 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \
2722 } \
2723 return 0; \
2724 }
2726 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean)
2727 DEFINE_FAST_GETFIELD(jbyte, byte, Byte)
2728 DEFINE_FAST_GETFIELD(jchar, char, Char)
2729 DEFINE_FAST_GETFIELD(jshort, short, Short)
2730 DEFINE_FAST_GETFIELD(jint, int, Int)
2731 DEFINE_FAST_GETFIELD(jlong, long, Long)
2732 DEFINE_FAST_GETFIELD(jfloat, float, Float)
2733 DEFINE_FAST_GETFIELD(jdouble, double, Double)
2735 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2736 switch (type) {
2737 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2738 case T_BYTE: return (address)jni_fast_GetByteField_wrapper;
2739 case T_CHAR: return (address)jni_fast_GetCharField_wrapper;
2740 case T_SHORT: return (address)jni_fast_GetShortField_wrapper;
2741 case T_INT: return (address)jni_fast_GetIntField_wrapper;
2742 case T_LONG: return (address)jni_fast_GetLongField_wrapper;
2743 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper;
2744 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper;
2745 default: ShouldNotReachHere();
2746 }
2747 return (address)-1;
2748 }
2749 #endif
2751 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
2752 // Install a win32 structured exception handler around the test
2753 // function call so the VM can generate an error dump if needed.
2754 __try {
2755 (*funcPtr)();
2756 } __except(topLevelExceptionFilter(
2757 (_EXCEPTION_POINTERS*)_exception_info())) {
2758 // Nothing to do.
2759 }
2760 }
2762 // Virtual Memory
2764 int os::vm_page_size() { return os::win32::vm_page_size(); }
2765 int os::vm_allocation_granularity() {
2766 return os::win32::vm_allocation_granularity();
2767 }
2769 // Windows large page support is available on Windows 2003. In order to use
2770 // large page memory, the administrator must first assign additional privilege
2771 // to the user:
2772 // + select Control Panel -> Administrative Tools -> Local Security Policy
2773 // + select Local Policies -> User Rights Assignment
2774 // + double click "Lock pages in memory", add users and/or groups
2775 // + reboot
2776 // Note the above steps are needed for administrator as well, as administrators
2777 // by default do not have the privilege to lock pages in memory.
2778 //
2779 // Note about Windows 2003: although the API supports committing large page
2780 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2781 // scenario, I found through experiment it only uses large page if the entire
2782 // memory region is reserved and committed in a single VirtualAlloc() call.
2783 // This makes Windows large page support more or less like Solaris ISM, in
2784 // that the entire heap must be committed upfront. This probably will change
2785 // in the future, if so the code below needs to be revisited.
2787 #ifndef MEM_LARGE_PAGES
2788 #define MEM_LARGE_PAGES 0x20000000
2789 #endif
2791 static HANDLE _hProcess;
2792 static HANDLE _hToken;
2794 // Container for NUMA node list info
2795 class NUMANodeListHolder {
2796 private:
2797 int *_numa_used_node_list; // allocated below
2798 int _numa_used_node_count;
2800 void free_node_list() {
2801 if (_numa_used_node_list != NULL) {
2802 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal);
2803 }
2804 }
2806 public:
2807 NUMANodeListHolder() {
2808 _numa_used_node_count = 0;
2809 _numa_used_node_list = NULL;
2810 // do rest of initialization in build routine (after function pointers are set up)
2811 }
2813 ~NUMANodeListHolder() {
2814 free_node_list();
2815 }
2817 bool build() {
2818 DWORD_PTR proc_aff_mask;
2819 DWORD_PTR sys_aff_mask;
2820 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2821 ULONG highest_node_number;
2822 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false;
2823 free_node_list();
2824 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2825 for (unsigned int i = 0; i <= highest_node_number; i++) {
2826 ULONGLONG proc_mask_numa_node;
2827 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2828 if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2829 _numa_used_node_list[_numa_used_node_count++] = i;
2830 }
2831 }
2832 return (_numa_used_node_count > 1);
2833 }
2835 int get_count() {return _numa_used_node_count;}
2836 int get_node_list_entry(int n) {
2837 // for indexes out of range, returns -1
2838 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2839 }
2841 } numa_node_list_holder;
2845 static size_t _large_page_size = 0;
2847 static bool resolve_functions_for_large_page_init() {
2848 return os::Kernel32Dll::GetLargePageMinimumAvailable() &&
2849 os::Advapi32Dll::AdvapiAvailable();
2850 }
2852 static bool request_lock_memory_privilege() {
2853 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2854 os::current_process_id());
2856 LUID luid;
2857 if (_hProcess != NULL &&
2858 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2859 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2861 TOKEN_PRIVILEGES tp;
2862 tp.PrivilegeCount = 1;
2863 tp.Privileges[0].Luid = luid;
2864 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2866 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2867 // privilege. Check GetLastError() too. See MSDN document.
2868 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2869 (GetLastError() == ERROR_SUCCESS)) {
2870 return true;
2871 }
2872 }
2874 return false;
2875 }
2877 static void cleanup_after_large_page_init() {
2878 if (_hProcess) CloseHandle(_hProcess);
2879 _hProcess = NULL;
2880 if (_hToken) CloseHandle(_hToken);
2881 _hToken = NULL;
2882 }
2884 static bool numa_interleaving_init() {
2885 bool success = false;
2886 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2888 // print a warning if UseNUMAInterleaving flag is specified on command line
2889 bool warn_on_failure = use_numa_interleaving_specified;
2890 # define WARN(msg) if (warn_on_failure) { warning(msg); }
2892 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2893 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2894 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2896 if (os::Kernel32Dll::NumaCallsAvailable()) {
2897 if (numa_node_list_holder.build()) {
2898 if (PrintMiscellaneous && Verbose) {
2899 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2900 for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2901 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i));
2902 }
2903 tty->print("\n");
2904 }
2905 success = true;
2906 } else {
2907 WARN("Process does not cover multiple NUMA nodes.");
2908 }
2909 } else {
2910 WARN("NUMA Interleaving is not supported by the operating system.");
2911 }
2912 if (!success) {
2913 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2914 }
2915 return success;
2916 #undef WARN
2917 }
2919 // this routine is used whenever we need to reserve a contiguous VA range
2920 // but we need to make separate VirtualAlloc calls for each piece of the range
2921 // Reasons for doing this:
2922 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2923 // * UseNUMAInterleaving requires a separate node for each piece
2924 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot,
2925 bool should_inject_error=false) {
2926 char * p_buf;
2927 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2928 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2929 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2931 // first reserve enough address space in advance since we want to be
2932 // able to break a single contiguous virtual address range into multiple
2933 // large page commits but WS2003 does not allow reserving large page space
2934 // so we just use 4K pages for reserve, this gives us a legal contiguous
2935 // address space. then we will deallocate that reservation, and re alloc
2936 // using large pages
2937 const size_t size_of_reserve = bytes + chunk_size;
2938 if (bytes > size_of_reserve) {
2939 // Overflowed.
2940 return NULL;
2941 }
2942 p_buf = (char *) VirtualAlloc(addr,
2943 size_of_reserve, // size of Reserve
2944 MEM_RESERVE,
2945 PAGE_READWRITE);
2946 // If reservation failed, return NULL
2947 if (p_buf == NULL) return NULL;
2948 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2949 os::release_memory(p_buf, bytes + chunk_size);
2951 // we still need to round up to a page boundary (in case we are using large pages)
2952 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2953 // instead we handle this in the bytes_to_rq computation below
2954 p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2956 // now go through and allocate one chunk at a time until all bytes are
2957 // allocated
2958 size_t bytes_remaining = bytes;
2959 // An overflow of align_size_up() would have been caught above
2960 // in the calculation of size_of_reserve.
2961 char * next_alloc_addr = p_buf;
2962 HANDLE hProc = GetCurrentProcess();
2964 #ifdef ASSERT
2965 // Variable for the failure injection
2966 long ran_num = os::random();
2967 size_t fail_after = ran_num % bytes;
2968 #endif
2970 int count=0;
2971 while (bytes_remaining) {
2972 // select bytes_to_rq to get to the next chunk_size boundary
2974 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2975 // Note allocate and commit
2976 char * p_new;
2978 #ifdef ASSERT
2979 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2980 #else
2981 const bool inject_error_now = false;
2982 #endif
2984 if (inject_error_now) {
2985 p_new = NULL;
2986 } else {
2987 if (!UseNUMAInterleaving) {
2988 p_new = (char *) VirtualAlloc(next_alloc_addr,
2989 bytes_to_rq,
2990 flags,
2991 prot);
2992 } else {
2993 // get the next node to use from the used_node_list
2994 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2995 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2996 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc,
2997 next_alloc_addr,
2998 bytes_to_rq,
2999 flags,
3000 prot,
3001 node);
3002 }
3003 }
3005 if (p_new == NULL) {
3006 // Free any allocated pages
3007 if (next_alloc_addr > p_buf) {
3008 // Some memory was committed so release it.
3009 size_t bytes_to_release = bytes - bytes_remaining;
3010 // NMT has yet to record any individual blocks, so it
3011 // need to create a dummy 'reserve' record to match
3012 // the release.
3013 MemTracker::record_virtual_memory_reserve((address)p_buf,
3014 bytes_to_release, CALLER_PC);
3015 os::release_memory(p_buf, bytes_to_release);
3016 }
3017 #ifdef ASSERT
3018 if (should_inject_error) {
3019 if (TracePageSizes && Verbose) {
3020 tty->print_cr("Reserving pages individually failed.");
3021 }
3022 }
3023 #endif
3024 return NULL;
3025 }
3027 bytes_remaining -= bytes_to_rq;
3028 next_alloc_addr += bytes_to_rq;
3029 count++;
3030 }
3031 // Although the memory is allocated individually, it is returned as one.
3032 // NMT records it as one block.
3033 if ((flags & MEM_COMMIT) != 0) {
3034 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3035 } else {
3036 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3037 }
3039 // made it this far, success
3040 return p_buf;
3041 }
3045 void os::large_page_init() {
3046 if (!UseLargePages) return;
3048 // print a warning if any large page related flag is specified on command line
3049 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3050 !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3051 bool success = false;
3053 # define WARN(msg) if (warn_on_failure) { warning(msg); }
3054 if (resolve_functions_for_large_page_init()) {
3055 if (request_lock_memory_privilege()) {
3056 size_t s = os::Kernel32Dll::GetLargePageMinimum();
3057 if (s) {
3058 #if defined(IA32) || defined(AMD64)
3059 if (s > 4*M || LargePageSizeInBytes > 4*M) {
3060 WARN("JVM cannot use large pages bigger than 4mb.");
3061 } else {
3062 #endif
3063 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3064 _large_page_size = LargePageSizeInBytes;
3065 } else {
3066 _large_page_size = s;
3067 }
3068 success = true;
3069 #if defined(IA32) || defined(AMD64)
3070 }
3071 #endif
3072 } else {
3073 WARN("Large page is not supported by the processor.");
3074 }
3075 } else {
3076 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3077 }
3078 } else {
3079 WARN("Large page is not supported by the operating system.");
3080 }
3081 #undef WARN
3083 const size_t default_page_size = (size_t) vm_page_size();
3084 if (success && _large_page_size > default_page_size) {
3085 _page_sizes[0] = _large_page_size;
3086 _page_sizes[1] = default_page_size;
3087 _page_sizes[2] = 0;
3088 }
3090 cleanup_after_large_page_init();
3091 UseLargePages = success;
3092 }
3094 // On win32, one cannot release just a part of reserved memory, it's an
3095 // all or nothing deal. When we split a reservation, we must break the
3096 // reservation into two reservations.
3097 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3098 bool realloc) {
3099 if (size > 0) {
3100 release_memory(base, size);
3101 if (realloc) {
3102 reserve_memory(split, base);
3103 }
3104 if (size != split) {
3105 reserve_memory(size - split, base + split);
3106 }
3107 }
3108 }
3110 // Multiple threads can race in this code but it's not possible to unmap small sections of
3111 // virtual space to get requested alignment, like posix-like os's.
3112 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3113 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3114 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3115 "Alignment must be a multiple of allocation granularity (page size)");
3116 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3118 size_t extra_size = size + alignment;
3119 assert(extra_size >= size, "overflow, size is too large to allow alignment");
3121 char* aligned_base = NULL;
3123 do {
3124 char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3125 if (extra_base == NULL) {
3126 return NULL;
3127 }
3128 // Do manual alignment
3129 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3131 os::release_memory(extra_base, extra_size);
3133 aligned_base = os::reserve_memory(size, aligned_base);
3135 } while (aligned_base == NULL);
3137 return aligned_base;
3138 }
3140 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3141 assert((size_t)addr % os::vm_allocation_granularity() == 0,
3142 "reserve alignment");
3143 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
3144 char* res;
3145 // note that if UseLargePages is on, all the areas that require interleaving
3146 // will go thru reserve_memory_special rather than thru here.
3147 bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3148 if (!use_individual) {
3149 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3150 } else {
3151 elapsedTimer reserveTimer;
3152 if( Verbose && PrintMiscellaneous ) reserveTimer.start();
3153 // in numa interleaving, we have to allocate pages individually
3154 // (well really chunks of NUMAInterleaveGranularity size)
3155 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3156 if (res == NULL) {
3157 warning("NUMA page allocation failed");
3158 }
3159 if( Verbose && PrintMiscellaneous ) {
3160 reserveTimer.stop();
3161 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3162 reserveTimer.milliseconds(), reserveTimer.ticks());
3163 }
3164 }
3165 assert(res == NULL || addr == NULL || addr == res,
3166 "Unexpected address from reserve.");
3168 return res;
3169 }
3171 // Reserve memory at an arbitrary address, only if that area is
3172 // available (and not reserved for something else).
3173 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3174 // Windows os::reserve_memory() fails of the requested address range is
3175 // not avilable.
3176 return reserve_memory(bytes, requested_addr);
3177 }
3179 size_t os::large_page_size() {
3180 return _large_page_size;
3181 }
3183 bool os::can_commit_large_page_memory() {
3184 // Windows only uses large page memory when the entire region is reserved
3185 // and committed in a single VirtualAlloc() call. This may change in the
3186 // future, but with Windows 2003 it's not possible to commit on demand.
3187 return false;
3188 }
3190 bool os::can_execute_large_page_memory() {
3191 return true;
3192 }
3194 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) {
3195 assert(UseLargePages, "only for large pages");
3197 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3198 return NULL; // Fallback to small pages.
3199 }
3201 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3202 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3204 // with large pages, there are two cases where we need to use Individual Allocation
3205 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3206 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3207 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3208 if (TracePageSizes && Verbose) {
3209 tty->print_cr("Reserving large pages individually.");
3210 }
3211 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3212 if (p_buf == NULL) {
3213 // give an appropriate warning message
3214 if (UseNUMAInterleaving) {
3215 warning("NUMA large page allocation failed, UseLargePages flag ignored");
3216 }
3217 if (UseLargePagesIndividualAllocation) {
3218 warning("Individually allocated large pages failed, "
3219 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3220 }
3221 return NULL;
3222 }
3224 return p_buf;
3226 } else {
3227 if (TracePageSizes && Verbose) {
3228 tty->print_cr("Reserving large pages in a single large chunk.");
3229 }
3230 // normal policy just allocate it all at once
3231 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3232 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3233 if (res != NULL) {
3234 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3235 }
3237 return res;
3238 }
3239 }
3241 bool os::release_memory_special(char* base, size_t bytes) {
3242 assert(base != NULL, "Sanity check");
3243 return release_memory(base, bytes);
3244 }
3246 void os::print_statistics() {
3247 }
3249 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3250 int err = os::get_last_error();
3251 char buf[256];
3252 size_t buf_len = os::lasterror(buf, sizeof(buf));
3253 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3254 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3255 exec, buf_len != 0 ? buf : "<no_error_string>", err);
3256 }
3258 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3259 if (bytes == 0) {
3260 // Don't bother the OS with noops.
3261 return true;
3262 }
3263 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3264 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3265 // Don't attempt to print anything if the OS call fails. We're
3266 // probably low on resources, so the print itself may cause crashes.
3268 // unless we have NUMAInterleaving enabled, the range of a commit
3269 // is always within a reserve covered by a single VirtualAlloc
3270 // in that case we can just do a single commit for the requested size
3271 if (!UseNUMAInterleaving) {
3272 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3273 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3274 return false;
3275 }
3276 if (exec) {
3277 DWORD oldprot;
3278 // Windows doc says to use VirtualProtect to get execute permissions
3279 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3280 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3281 return false;
3282 }
3283 }
3284 return true;
3285 } else {
3287 // when NUMAInterleaving is enabled, the commit might cover a range that
3288 // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3289 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery
3290 // returns represents the number of bytes that can be committed in one step.
3291 size_t bytes_remaining = bytes;
3292 char * next_alloc_addr = addr;
3293 while (bytes_remaining > 0) {
3294 MEMORY_BASIC_INFORMATION alloc_info;
3295 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3296 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3297 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3298 PAGE_READWRITE) == NULL) {
3299 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3300 exec);)
3301 return false;
3302 }
3303 if (exec) {
3304 DWORD oldprot;
3305 if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3306 PAGE_EXECUTE_READWRITE, &oldprot)) {
3307 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3308 exec);)
3309 return false;
3310 }
3311 }
3312 bytes_remaining -= bytes_to_rq;
3313 next_alloc_addr += bytes_to_rq;
3314 }
3315 }
3316 // if we made it this far, return true
3317 return true;
3318 }
3320 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3321 bool exec) {
3322 // alignment_hint is ignored on this OS
3323 return pd_commit_memory(addr, size, exec);
3324 }
3326 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3327 const char* mesg) {
3328 assert(mesg != NULL, "mesg must be specified");
3329 if (!pd_commit_memory(addr, size, exec)) {
3330 warn_fail_commit_memory(addr, size, exec);
3331 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
3332 }
3333 }
3335 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3336 size_t alignment_hint, bool exec,
3337 const char* mesg) {
3338 // alignment_hint is ignored on this OS
3339 pd_commit_memory_or_exit(addr, size, exec, mesg);
3340 }
3342 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3343 if (bytes == 0) {
3344 // Don't bother the OS with noops.
3345 return true;
3346 }
3347 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3348 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3349 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3350 }
3352 bool os::pd_release_memory(char* addr, size_t bytes) {
3353 return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3354 }
3356 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3357 return os::commit_memory(addr, size, !ExecMem);
3358 }
3360 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3361 return os::uncommit_memory(addr, size);
3362 }
3364 // Set protections specified
3365 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3366 bool is_committed) {
3367 unsigned int p = 0;
3368 switch (prot) {
3369 case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3370 case MEM_PROT_READ: p = PAGE_READONLY; break;
3371 case MEM_PROT_RW: p = PAGE_READWRITE; break;
3372 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break;
3373 default:
3374 ShouldNotReachHere();
3375 }
3377 DWORD old_status;
3379 // Strange enough, but on Win32 one can change protection only for committed
3380 // memory, not a big deal anyway, as bytes less or equal than 64K
3381 if (!is_committed) {
3382 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3383 "cannot commit protection page");
3384 }
3385 // One cannot use os::guard_memory() here, as on Win32 guard page
3386 // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3387 //
3388 // Pages in the region become guard pages. Any attempt to access a guard page
3389 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3390 // the guard page status. Guard pages thus act as a one-time access alarm.
3391 return VirtualProtect(addr, bytes, p, &old_status) != 0;
3392 }
3394 bool os::guard_memory(char* addr, size_t bytes) {
3395 DWORD old_status;
3396 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3397 }
3399 bool os::unguard_memory(char* addr, size_t bytes) {
3400 DWORD old_status;
3401 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3402 }
3404 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3405 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3406 void os::numa_make_global(char *addr, size_t bytes) { }
3407 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { }
3408 bool os::numa_topology_changed() { return false; }
3409 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); }
3410 int os::numa_get_group_id() { return 0; }
3411 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3412 if (numa_node_list_holder.get_count() == 0 && size > 0) {
3413 // Provide an answer for UMA systems
3414 ids[0] = 0;
3415 return 1;
3416 } else {
3417 // check for size bigger than actual groups_num
3418 size = MIN2(size, numa_get_groups_num());
3419 for (int i = 0; i < (int)size; i++) {
3420 ids[i] = numa_node_list_holder.get_node_list_entry(i);
3421 }
3422 return size;
3423 }
3424 }
3426 bool os::get_page_info(char *start, page_info* info) {
3427 return false;
3428 }
3430 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
3431 return end;
3432 }
3434 char* os::non_memory_address_word() {
3435 // Must never look like an address returned by reserve_memory,
3436 // even in its subfields (as defined by the CPU immediate fields,
3437 // if the CPU splits constants across multiple instructions).
3438 return (char*)-1;
3439 }
3441 #define MAX_ERROR_COUNT 100
3442 #define SYS_THREAD_ERROR 0xffffffffUL
3444 void os::pd_start_thread(Thread* thread) {
3445 DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3446 // Returns previous suspend state:
3447 // 0: Thread was not suspended
3448 // 1: Thread is running now
3449 // >1: Thread is still suspended.
3450 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3451 }
3453 class HighResolutionInterval : public CHeapObj<mtThread> {
3454 // The default timer resolution seems to be 10 milliseconds.
3455 // (Where is this written down?)
3456 // If someone wants to sleep for only a fraction of the default,
3457 // then we set the timer resolution down to 1 millisecond for
3458 // the duration of their interval.
3459 // We carefully set the resolution back, since otherwise we
3460 // seem to incur an overhead (3%?) that we don't need.
3461 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3462 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3463 // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3464 // timeBeginPeriod() if the relative error exceeded some threshold.
3465 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3466 // to decreased efficiency related to increased timer "tick" rates. We want to minimize
3467 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3468 // resolution timers running.
3469 private:
3470 jlong resolution;
3471 public:
3472 HighResolutionInterval(jlong ms) {
3473 resolution = ms % 10L;
3474 if (resolution != 0) {
3475 MMRESULT result = timeBeginPeriod(1L);
3476 }
3477 }
3478 ~HighResolutionInterval() {
3479 if (resolution != 0) {
3480 MMRESULT result = timeEndPeriod(1L);
3481 }
3482 resolution = 0L;
3483 }
3484 };
3486 int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3487 jlong limit = (jlong) MAXDWORD;
3489 while(ms > limit) {
3490 int res;
3491 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT)
3492 return res;
3493 ms -= limit;
3494 }
3496 assert(thread == Thread::current(), "thread consistency check");
3497 OSThread* osthread = thread->osthread();
3498 OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3499 int result;
3500 if (interruptable) {
3501 assert(thread->is_Java_thread(), "must be java thread");
3502 JavaThread *jt = (JavaThread *) thread;
3503 ThreadBlockInVM tbivm(jt);
3505 jt->set_suspend_equivalent();
3506 // cleared by handle_special_suspend_equivalent_condition() or
3507 // java_suspend_self() via check_and_wait_while_suspended()
3509 HANDLE events[1];
3510 events[0] = osthread->interrupt_event();
3511 HighResolutionInterval *phri=NULL;
3512 if(!ForceTimeHighResolution)
3513 phri = new HighResolutionInterval( ms );
3514 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3515 result = OS_TIMEOUT;
3516 } else {
3517 ResetEvent(osthread->interrupt_event());
3518 osthread->set_interrupted(false);
3519 result = OS_INTRPT;
3520 }
3521 delete phri; //if it is NULL, harmless
3523 // were we externally suspended while we were waiting?
3524 jt->check_and_wait_while_suspended();
3525 } else {
3526 assert(!thread->is_Java_thread(), "must not be java thread");
3527 Sleep((long) ms);
3528 result = OS_TIMEOUT;
3529 }
3530 return result;
3531 }
3533 //
3534 // Short sleep, direct OS call.
3535 //
3536 // ms = 0, means allow others (if any) to run.
3537 //
3538 void os::naked_short_sleep(jlong ms) {
3539 assert(ms < 1000, "Un-interruptable sleep, short time use only");
3540 Sleep(ms);
3541 }
3543 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3544 void os::infinite_sleep() {
3545 while (true) { // sleep forever ...
3546 Sleep(100000); // ... 100 seconds at a time
3547 }
3548 }
3550 typedef BOOL (WINAPI * STTSignature)(void) ;
3552 os::YieldResult os::NakedYield() {
3553 // Use either SwitchToThread() or Sleep(0)
3554 // Consider passing back the return value from SwitchToThread().
3555 if (os::Kernel32Dll::SwitchToThreadAvailable()) {
3556 return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ;
3557 } else {
3558 Sleep(0);
3559 }
3560 return os::YIELD_UNKNOWN ;
3561 }
3563 void os::yield() { os::NakedYield(); }
3565 void os::yield_all(int attempts) {
3566 // Yields to all threads, including threads with lower priorities
3567 Sleep(1);
3568 }
3570 // Win32 only gives you access to seven real priorities at a time,
3571 // so we compress Java's ten down to seven. It would be better
3572 // if we dynamically adjusted relative priorities.
3574 int os::java_to_os_priority[CriticalPriority + 1] = {
3575 THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3576 THREAD_PRIORITY_LOWEST, // 1 MinPriority
3577 THREAD_PRIORITY_LOWEST, // 2
3578 THREAD_PRIORITY_BELOW_NORMAL, // 3
3579 THREAD_PRIORITY_BELOW_NORMAL, // 4
3580 THREAD_PRIORITY_NORMAL, // 5 NormPriority
3581 THREAD_PRIORITY_NORMAL, // 6
3582 THREAD_PRIORITY_ABOVE_NORMAL, // 7
3583 THREAD_PRIORITY_ABOVE_NORMAL, // 8
3584 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3585 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority
3586 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority
3587 };
3589 int prio_policy1[CriticalPriority + 1] = {
3590 THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3591 THREAD_PRIORITY_LOWEST, // 1 MinPriority
3592 THREAD_PRIORITY_LOWEST, // 2
3593 THREAD_PRIORITY_BELOW_NORMAL, // 3
3594 THREAD_PRIORITY_BELOW_NORMAL, // 4
3595 THREAD_PRIORITY_NORMAL, // 5 NormPriority
3596 THREAD_PRIORITY_ABOVE_NORMAL, // 6
3597 THREAD_PRIORITY_ABOVE_NORMAL, // 7
3598 THREAD_PRIORITY_HIGHEST, // 8
3599 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3600 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority
3601 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority
3602 };
3604 static int prio_init() {
3605 // If ThreadPriorityPolicy is 1, switch tables
3606 if (ThreadPriorityPolicy == 1) {
3607 int i;
3608 for (i = 0; i < CriticalPriority + 1; i++) {
3609 os::java_to_os_priority[i] = prio_policy1[i];
3610 }
3611 }
3612 if (UseCriticalJavaThreadPriority) {
3613 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ;
3614 }
3615 return 0;
3616 }
3618 OSReturn os::set_native_priority(Thread* thread, int priority) {
3619 if (!UseThreadPriorities) return OS_OK;
3620 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3621 return ret ? OS_OK : OS_ERR;
3622 }
3624 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) {
3625 if ( !UseThreadPriorities ) {
3626 *priority_ptr = java_to_os_priority[NormPriority];
3627 return OS_OK;
3628 }
3629 int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3630 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3631 assert(false, "GetThreadPriority failed");
3632 return OS_ERR;
3633 }
3634 *priority_ptr = os_prio;
3635 return OS_OK;
3636 }
3639 // Hint to the underlying OS that a task switch would not be good.
3640 // Void return because it's a hint and can fail.
3641 void os::hint_no_preempt() {}
3643 void os::interrupt(Thread* thread) {
3644 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3645 "possibility of dangling Thread pointer");
3647 OSThread* osthread = thread->osthread();
3648 osthread->set_interrupted(true);
3649 // More than one thread can get here with the same value of osthread,
3650 // resulting in multiple notifications. We do, however, want the store
3651 // to interrupted() to be visible to other threads before we post
3652 // the interrupt event.
3653 OrderAccess::release();
3654 SetEvent(osthread->interrupt_event());
3655 // For JSR166: unpark after setting status
3656 if (thread->is_Java_thread())
3657 ((JavaThread*)thread)->parker()->unpark();
3659 ParkEvent * ev = thread->_ParkEvent ;
3660 if (ev != NULL) ev->unpark() ;
3662 }
3665 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3666 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3667 "possibility of dangling Thread pointer");
3669 OSThread* osthread = thread->osthread();
3670 // There is no synchronization between the setting of the interrupt
3671 // and it being cleared here. It is critical - see 6535709 - that
3672 // we only clear the interrupt state, and reset the interrupt event,
3673 // if we are going to report that we were indeed interrupted - else
3674 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3675 // depending on the timing. By checking thread interrupt event to see
3676 // if the thread gets real interrupt thus prevent spurious wakeup.
3677 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3678 if (interrupted && clear_interrupted) {
3679 osthread->set_interrupted(false);
3680 ResetEvent(osthread->interrupt_event());
3681 } // Otherwise leave the interrupted state alone
3683 return interrupted;
3684 }
3686 // Get's a pc (hint) for a running thread. Currently used only for profiling.
3687 ExtendedPC os::get_thread_pc(Thread* thread) {
3688 CONTEXT context;
3689 context.ContextFlags = CONTEXT_CONTROL;
3690 HANDLE handle = thread->osthread()->thread_handle();
3691 #ifdef _M_IA64
3692 assert(0, "Fix get_thread_pc");
3693 return ExtendedPC(NULL);
3694 #else
3695 if (GetThreadContext(handle, &context)) {
3696 #ifdef _M_AMD64
3697 return ExtendedPC((address) context.Rip);
3698 #else
3699 return ExtendedPC((address) context.Eip);
3700 #endif
3701 } else {
3702 return ExtendedPC(NULL);
3703 }
3704 #endif
3705 }
3707 // GetCurrentThreadId() returns DWORD
3708 intx os::current_thread_id() { return GetCurrentThreadId(); }
3710 static int _initial_pid = 0;
3712 int os::current_process_id()
3713 {
3714 return (_initial_pid ? _initial_pid : _getpid());
3715 }
3717 int os::win32::_vm_page_size = 0;
3718 int os::win32::_vm_allocation_granularity = 0;
3719 int os::win32::_processor_type = 0;
3720 // Processor level is not available on non-NT systems, use vm_version instead
3721 int os::win32::_processor_level = 0;
3722 julong os::win32::_physical_memory = 0;
3723 size_t os::win32::_default_stack_size = 0;
3725 intx os::win32::_os_thread_limit = 0;
3726 volatile intx os::win32::_os_thread_count = 0;
3728 bool os::win32::_is_nt = false;
3729 bool os::win32::_is_windows_2003 = false;
3730 bool os::win32::_is_windows_server = false;
3732 void os::win32::initialize_system_info() {
3733 SYSTEM_INFO si;
3734 GetSystemInfo(&si);
3735 _vm_page_size = si.dwPageSize;
3736 _vm_allocation_granularity = si.dwAllocationGranularity;
3737 _processor_type = si.dwProcessorType;
3738 _processor_level = si.wProcessorLevel;
3739 set_processor_count(si.dwNumberOfProcessors);
3741 MEMORYSTATUSEX ms;
3742 ms.dwLength = sizeof(ms);
3744 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3745 // dwMemoryLoad (% of memory in use)
3746 GlobalMemoryStatusEx(&ms);
3747 _physical_memory = ms.ullTotalPhys;
3749 OSVERSIONINFOEX oi;
3750 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3751 GetVersionEx((OSVERSIONINFO*)&oi);
3752 switch(oi.dwPlatformId) {
3753 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
3754 case VER_PLATFORM_WIN32_NT:
3755 _is_nt = true;
3756 {
3757 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3758 if (os_vers == 5002) {
3759 _is_windows_2003 = true;
3760 }
3761 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3762 oi.wProductType == VER_NT_SERVER) {
3763 _is_windows_server = true;
3764 }
3765 }
3766 break;
3767 default: fatal("Unknown platform");
3768 }
3770 _default_stack_size = os::current_stack_size();
3771 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3772 assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3773 "stack size not a multiple of page size");
3775 initialize_performance_counter();
3777 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is
3778 // known to deadlock the system, if the VM issues to thread operations with
3779 // a too high frequency, e.g., such as changing the priorities.
3780 // The 6000 seems to work well - no deadlocks has been notices on the test
3781 // programs that we have seen experience this problem.
3782 if (!os::win32::is_nt()) {
3783 StarvationMonitorInterval = 6000;
3784 }
3785 }
3788 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) {
3789 char path[MAX_PATH];
3790 DWORD size;
3791 DWORD pathLen = (DWORD)sizeof(path);
3792 HINSTANCE result = NULL;
3794 // only allow library name without path component
3795 assert(strchr(name, '\\') == NULL, "path not allowed");
3796 assert(strchr(name, ':') == NULL, "path not allowed");
3797 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3798 jio_snprintf(ebuf, ebuflen,
3799 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3800 return NULL;
3801 }
3803 // search system directory
3804 if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3805 strcat(path, "\\");
3806 strcat(path, name);
3807 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3808 return result;
3809 }
3810 }
3812 // try Windows directory
3813 if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3814 strcat(path, "\\");
3815 strcat(path, name);
3816 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3817 return result;
3818 }
3819 }
3821 jio_snprintf(ebuf, ebuflen,
3822 "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3823 return NULL;
3824 }
3826 void os::win32::setmode_streams() {
3827 _setmode(_fileno(stdin), _O_BINARY);
3828 _setmode(_fileno(stdout), _O_BINARY);
3829 _setmode(_fileno(stderr), _O_BINARY);
3830 }
3833 bool os::is_debugger_attached() {
3834 return IsDebuggerPresent() ? true : false;
3835 }
3838 void os::wait_for_keypress_at_exit(void) {
3839 if (PauseAtExit) {
3840 fprintf(stderr, "Press any key to continue...\n");
3841 fgetc(stdin);
3842 }
3843 }
3846 int os::message_box(const char* title, const char* message) {
3847 int result = MessageBox(NULL, message, title,
3848 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3849 return result == IDYES;
3850 }
3852 int os::allocate_thread_local_storage() {
3853 return TlsAlloc();
3854 }
3857 void os::free_thread_local_storage(int index) {
3858 TlsFree(index);
3859 }
3862 void os::thread_local_storage_at_put(int index, void* value) {
3863 TlsSetValue(index, value);
3864 assert(thread_local_storage_at(index) == value, "Just checking");
3865 }
3868 void* os::thread_local_storage_at(int index) {
3869 return TlsGetValue(index);
3870 }
3873 #ifndef PRODUCT
3874 #ifndef _WIN64
3875 // Helpers to check whether NX protection is enabled
3876 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3877 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3878 pex->ExceptionRecord->NumberParameters > 0 &&
3879 pex->ExceptionRecord->ExceptionInformation[0] ==
3880 EXCEPTION_INFO_EXEC_VIOLATION) {
3881 return EXCEPTION_EXECUTE_HANDLER;
3882 }
3883 return EXCEPTION_CONTINUE_SEARCH;
3884 }
3886 void nx_check_protection() {
3887 // If NX is enabled we'll get an exception calling into code on the stack
3888 char code[] = { (char)0xC3 }; // ret
3889 void *code_ptr = (void *)code;
3890 __try {
3891 __asm call code_ptr
3892 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
3893 tty->print_raw_cr("NX protection detected.");
3894 }
3895 }
3896 #endif // _WIN64
3897 #endif // PRODUCT
3899 // this is called _before_ the global arguments have been parsed
3900 void os::init(void) {
3901 _initial_pid = _getpid();
3903 init_random(1234567);
3905 win32::initialize_system_info();
3906 win32::setmode_streams();
3907 init_page_sizes((size_t) win32::vm_page_size());
3909 // For better scalability on MP systems (must be called after initialize_system_info)
3910 #ifndef PRODUCT
3911 if (is_MP()) {
3912 NoYieldsInMicrolock = true;
3913 }
3914 #endif
3915 // This may be overridden later when argument processing is done.
3916 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
3917 os::win32::is_windows_2003());
3919 // Initialize main_process and main_thread
3920 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle
3921 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
3922 &main_thread, THREAD_ALL_ACCESS, false, 0)) {
3923 fatal("DuplicateHandle failed\n");
3924 }
3925 main_thread_id = (int) GetCurrentThreadId();
3926 }
3928 // To install functions for atexit processing
3929 extern "C" {
3930 static void perfMemory_exit_helper() {
3931 perfMemory_exit();
3932 }
3933 }
3935 static jint initSock();
3937 // this is called _after_ the global arguments have been parsed
3938 jint os::init_2(void) {
3939 // Allocate a single page and mark it as readable for safepoint polling
3940 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
3941 guarantee( polling_page != NULL, "Reserve Failed for polling page");
3943 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
3944 guarantee( return_page != NULL, "Commit Failed for polling page");
3946 os::set_polling_page( polling_page );
3948 #ifndef PRODUCT
3949 if( Verbose && PrintMiscellaneous )
3950 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
3951 #endif
3953 if (!UseMembar) {
3954 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
3955 guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
3957 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
3958 guarantee( return_page != NULL, "Commit Failed for memory serialize page");
3960 os::set_memory_serialize_page( mem_serialize_page );
3962 #ifndef PRODUCT
3963 if(Verbose && PrintMiscellaneous)
3964 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3965 #endif
3966 }
3968 // Setup Windows Exceptions
3970 // for debugging float code generation bugs
3971 if (ForceFloatExceptions) {
3972 #ifndef _WIN64
3973 static long fp_control_word = 0;
3974 __asm { fstcw fp_control_word }
3975 // see Intel PPro Manual, Vol. 2, p 7-16
3976 const long precision = 0x20;
3977 const long underflow = 0x10;
3978 const long overflow = 0x08;
3979 const long zero_div = 0x04;
3980 const long denorm = 0x02;
3981 const long invalid = 0x01;
3982 fp_control_word |= invalid;
3983 __asm { fldcw fp_control_word }
3984 #endif
3985 }
3987 // If stack_commit_size is 0, windows will reserve the default size,
3988 // but only commit a small portion of it.
3989 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
3990 size_t default_reserve_size = os::win32::default_stack_size();
3991 size_t actual_reserve_size = stack_commit_size;
3992 if (stack_commit_size < default_reserve_size) {
3993 // If stack_commit_size == 0, we want this too
3994 actual_reserve_size = default_reserve_size;
3995 }
3997 // Check minimum allowable stack size for thread creation and to initialize
3998 // the java system classes, including StackOverflowError - depends on page
3999 // size. Add a page for compiler2 recursion in main thread.
4000 // Add in 2*BytesPerWord times page size to account for VM stack during
4001 // class initialization depending on 32 or 64 bit VM.
4002 size_t min_stack_allowed =
4003 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
4004 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
4005 if (actual_reserve_size < min_stack_allowed) {
4006 tty->print_cr("\nThe stack size specified is too small, "
4007 "Specify at least %dk",
4008 min_stack_allowed / K);
4009 return JNI_ERR;
4010 }
4012 JavaThread::set_stack_size_at_create(stack_commit_size);
4014 // Calculate theoretical max. size of Threads to guard gainst artifical
4015 // out-of-memory situations, where all available address-space has been
4016 // reserved by thread stacks.
4017 assert(actual_reserve_size != 0, "Must have a stack");
4019 // Calculate the thread limit when we should start doing Virtual Memory
4020 // banging. Currently when the threads will have used all but 200Mb of space.
4021 //
4022 // TODO: consider performing a similar calculation for commit size instead
4023 // as reserve size, since on a 64-bit platform we'll run into that more
4024 // often than running out of virtual memory space. We can use the
4025 // lower value of the two calculations as the os_thread_limit.
4026 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4027 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4029 // at exit methods are called in the reverse order of their registration.
4030 // there is no limit to the number of functions registered. atexit does
4031 // not set errno.
4033 if (PerfAllowAtExitRegistration) {
4034 // only register atexit functions if PerfAllowAtExitRegistration is set.
4035 // atexit functions can be delayed until process exit time, which
4036 // can be problematic for embedded VM situations. Embedded VMs should
4037 // call DestroyJavaVM() to assure that VM resources are released.
4039 // note: perfMemory_exit_helper atexit function may be removed in
4040 // the future if the appropriate cleanup code can be added to the
4041 // VM_Exit VMOperation's doit method.
4042 if (atexit(perfMemory_exit_helper) != 0) {
4043 warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4044 }
4045 }
4047 #ifndef _WIN64
4048 // Print something if NX is enabled (win32 on AMD64)
4049 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4050 #endif
4052 // initialize thread priority policy
4053 prio_init();
4055 if (UseNUMA && !ForceNUMA) {
4056 UseNUMA = false; // We don't fully support this yet
4057 }
4059 if (UseNUMAInterleaving) {
4060 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4061 bool success = numa_interleaving_init();
4062 if (!success) UseNUMAInterleaving = false;
4063 }
4065 if (initSock() != JNI_OK) {
4066 return JNI_ERR;
4067 }
4069 return JNI_OK;
4070 }
4072 // Mark the polling page as unreadable
4073 void os::make_polling_page_unreadable(void) {
4074 DWORD old_status;
4075 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) )
4076 fatal("Could not disable polling page");
4077 };
4079 // Mark the polling page as readable
4080 void os::make_polling_page_readable(void) {
4081 DWORD old_status;
4082 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) )
4083 fatal("Could not enable polling page");
4084 };
4087 int os::stat(const char *path, struct stat *sbuf) {
4088 char pathbuf[MAX_PATH];
4089 if (strlen(path) > MAX_PATH - 1) {
4090 errno = ENAMETOOLONG;
4091 return -1;
4092 }
4093 os::native_path(strcpy(pathbuf, path));
4094 int ret = ::stat(pathbuf, sbuf);
4095 if (sbuf != NULL && UseUTCFileTimestamp) {
4096 // Fix for 6539723. st_mtime returned from stat() is dependent on
4097 // the system timezone and so can return different values for the
4098 // same file if/when daylight savings time changes. This adjustment
4099 // makes sure the same timestamp is returned regardless of the TZ.
4100 //
4101 // See:
4102 // http://msdn.microsoft.com/library/
4103 // default.asp?url=/library/en-us/sysinfo/base/
4104 // time_zone_information_str.asp
4105 // and
4106 // http://msdn.microsoft.com/library/default.asp?url=
4107 // /library/en-us/sysinfo/base/settimezoneinformation.asp
4108 //
4109 // NOTE: there is a insidious bug here: If the timezone is changed
4110 // after the call to stat() but before 'GetTimeZoneInformation()', then
4111 // the adjustment we do here will be wrong and we'll return the wrong
4112 // value (which will likely end up creating an invalid class data
4113 // archive). Absent a better API for this, or some time zone locking
4114 // mechanism, we'll have to live with this risk.
4115 TIME_ZONE_INFORMATION tz;
4116 DWORD tzid = GetTimeZoneInformation(&tz);
4117 int daylightBias =
4118 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias;
4119 sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4120 }
4121 return ret;
4122 }
4125 #define FT2INT64(ft) \
4126 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4129 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4130 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4131 // of a thread.
4132 //
4133 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4134 // the fast estimate available on the platform.
4136 // current_thread_cpu_time() is not optimized for Windows yet
4137 jlong os::current_thread_cpu_time() {
4138 // return user + sys since the cost is the same
4139 return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4140 }
4142 jlong os::thread_cpu_time(Thread* thread) {
4143 // consistent with what current_thread_cpu_time() returns.
4144 return os::thread_cpu_time(thread, true /* user+sys */);
4145 }
4147 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4148 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4149 }
4151 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4152 // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4153 // If this function changes, os::is_thread_cpu_time_supported() should too
4154 if (os::win32::is_nt()) {
4155 FILETIME CreationTime;
4156 FILETIME ExitTime;
4157 FILETIME KernelTime;
4158 FILETIME UserTime;
4160 if ( GetThreadTimes(thread->osthread()->thread_handle(),
4161 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
4162 return -1;
4163 else
4164 if (user_sys_cpu_time) {
4165 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4166 } else {
4167 return FT2INT64(UserTime) * 100;
4168 }
4169 } else {
4170 return (jlong) timeGetTime() * 1000000;
4171 }
4172 }
4174 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4175 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
4176 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
4177 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
4178 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4179 }
4181 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4182 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
4183 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
4184 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
4185 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4186 }
4188 bool os::is_thread_cpu_time_supported() {
4189 // see os::thread_cpu_time
4190 if (os::win32::is_nt()) {
4191 FILETIME CreationTime;
4192 FILETIME ExitTime;
4193 FILETIME KernelTime;
4194 FILETIME UserTime;
4196 if ( GetThreadTimes(GetCurrentThread(),
4197 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
4198 return false;
4199 else
4200 return true;
4201 } else {
4202 return false;
4203 }
4204 }
4206 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4207 // It does have primitives (PDH API) to get CPU usage and run queue length.
4208 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4209 // If we wanted to implement loadavg on Windows, we have a few options:
4210 //
4211 // a) Query CPU usage and run queue length and "fake" an answer by
4212 // returning the CPU usage if it's under 100%, and the run queue
4213 // length otherwise. It turns out that querying is pretty slow
4214 // on Windows, on the order of 200 microseconds on a fast machine.
4215 // Note that on the Windows the CPU usage value is the % usage
4216 // since the last time the API was called (and the first call
4217 // returns 100%), so we'd have to deal with that as well.
4218 //
4219 // b) Sample the "fake" answer using a sampling thread and store
4220 // the answer in a global variable. The call to loadavg would
4221 // just return the value of the global, avoiding the slow query.
4222 //
4223 // c) Sample a better answer using exponential decay to smooth the
4224 // value. This is basically the algorithm used by UNIX kernels.
4225 //
4226 // Note that sampling thread starvation could affect both (b) and (c).
4227 int os::loadavg(double loadavg[], int nelem) {
4228 return -1;
4229 }
4232 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4233 bool os::dont_yield() {
4234 return DontYieldALot;
4235 }
4237 // This method is a slightly reworked copy of JDK's sysOpen
4238 // from src/windows/hpi/src/sys_api_md.c
4240 int os::open(const char *path, int oflag, int mode) {
4241 char pathbuf[MAX_PATH];
4243 if (strlen(path) > MAX_PATH - 1) {
4244 errno = ENAMETOOLONG;
4245 return -1;
4246 }
4247 os::native_path(strcpy(pathbuf, path));
4248 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4249 }
4251 FILE* os::open(int fd, const char* mode) {
4252 return ::_fdopen(fd, mode);
4253 }
4255 // Is a (classpath) directory empty?
4256 bool os::dir_is_empty(const char* path) {
4257 WIN32_FIND_DATA fd;
4258 HANDLE f = FindFirstFile(path, &fd);
4259 if (f == INVALID_HANDLE_VALUE) {
4260 return true;
4261 }
4262 FindClose(f);
4263 return false;
4264 }
4266 // create binary file, rewriting existing file if required
4267 int os::create_binary_file(const char* path, bool rewrite_existing) {
4268 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4269 if (!rewrite_existing) {
4270 oflags |= _O_EXCL;
4271 }
4272 return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4273 }
4275 // return current position of file pointer
4276 jlong os::current_file_offset(int fd) {
4277 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4278 }
4280 // move file pointer to the specified offset
4281 jlong os::seek_to_file_offset(int fd, jlong offset) {
4282 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4283 }
4286 jlong os::lseek(int fd, jlong offset, int whence) {
4287 return (jlong) ::_lseeki64(fd, offset, whence);
4288 }
4290 // This method is a slightly reworked copy of JDK's sysNativePath
4291 // from src/windows/hpi/src/path_md.c
4293 /* Convert a pathname to native format. On win32, this involves forcing all
4294 separators to be '\\' rather than '/' (both are legal inputs, but Win95
4295 sometimes rejects '/') and removing redundant separators. The input path is
4296 assumed to have been converted into the character encoding used by the local
4297 system. Because this might be a double-byte encoding, care is taken to
4298 treat double-byte lead characters correctly.
4300 This procedure modifies the given path in place, as the result is never
4301 longer than the original. There is no error return; this operation always
4302 succeeds. */
4303 char * os::native_path(char *path) {
4304 char *src = path, *dst = path, *end = path;
4305 char *colon = NULL; /* If a drive specifier is found, this will
4306 point to the colon following the drive
4307 letter */
4309 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */
4310 assert(((!::IsDBCSLeadByte('/'))
4311 && (!::IsDBCSLeadByte('\\'))
4312 && (!::IsDBCSLeadByte(':'))),
4313 "Illegal lead byte");
4315 /* Check for leading separators */
4316 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4317 while (isfilesep(*src)) {
4318 src++;
4319 }
4321 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4322 /* Remove leading separators if followed by drive specifier. This
4323 hack is necessary to support file URLs containing drive
4324 specifiers (e.g., "file://c:/path"). As a side effect,
4325 "/c:/path" can be used as an alternative to "c:/path". */
4326 *dst++ = *src++;
4327 colon = dst;
4328 *dst++ = ':';
4329 src++;
4330 } else {
4331 src = path;
4332 if (isfilesep(src[0]) && isfilesep(src[1])) {
4333 /* UNC pathname: Retain first separator; leave src pointed at
4334 second separator so that further separators will be collapsed
4335 into the second separator. The result will be a pathname
4336 beginning with "\\\\" followed (most likely) by a host name. */
4337 src = dst = path + 1;
4338 path[0] = '\\'; /* Force first separator to '\\' */
4339 }
4340 }
4342 end = dst;
4344 /* Remove redundant separators from remainder of path, forcing all
4345 separators to be '\\' rather than '/'. Also, single byte space
4346 characters are removed from the end of the path because those
4347 are not legal ending characters on this operating system.
4348 */
4349 while (*src != '\0') {
4350 if (isfilesep(*src)) {
4351 *dst++ = '\\'; src++;
4352 while (isfilesep(*src)) src++;
4353 if (*src == '\0') {
4354 /* Check for trailing separator */
4355 end = dst;
4356 if (colon == dst - 2) break; /* "z:\\" */
4357 if (dst == path + 1) break; /* "\\" */
4358 if (dst == path + 2 && isfilesep(path[0])) {
4359 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the
4360 beginning of a UNC pathname. Even though it is not, by
4361 itself, a valid UNC pathname, we leave it as is in order
4362 to be consistent with the path canonicalizer as well
4363 as the win32 APIs, which treat this case as an invalid
4364 UNC pathname rather than as an alias for the root
4365 directory of the current drive. */
4366 break;
4367 }
4368 end = --dst; /* Path does not denote a root directory, so
4369 remove trailing separator */
4370 break;
4371 }
4372 end = dst;
4373 } else {
4374 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */
4375 *dst++ = *src++;
4376 if (*src) *dst++ = *src++;
4377 end = dst;
4378 } else { /* Copy a single-byte character */
4379 char c = *src++;
4380 *dst++ = c;
4381 /* Space is not a legal ending character */
4382 if (c != ' ') end = dst;
4383 }
4384 }
4385 }
4387 *end = '\0';
4389 /* For "z:", add "." to work around a bug in the C runtime library */
4390 if (colon == dst - 1) {
4391 path[2] = '.';
4392 path[3] = '\0';
4393 }
4395 return path;
4396 }
4398 // This code is a copy of JDK's sysSetLength
4399 // from src/windows/hpi/src/sys_api_md.c
4401 int os::ftruncate(int fd, jlong length) {
4402 HANDLE h = (HANDLE)::_get_osfhandle(fd);
4403 long high = (long)(length >> 32);
4404 DWORD ret;
4406 if (h == (HANDLE)(-1)) {
4407 return -1;
4408 }
4410 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4411 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4412 return -1;
4413 }
4415 if (::SetEndOfFile(h) == FALSE) {
4416 return -1;
4417 }
4419 return 0;
4420 }
4423 // This code is a copy of JDK's sysSync
4424 // from src/windows/hpi/src/sys_api_md.c
4425 // except for the legacy workaround for a bug in Win 98
4427 int os::fsync(int fd) {
4428 HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4430 if ( (!::FlushFileBuffers(handle)) &&
4431 (GetLastError() != ERROR_ACCESS_DENIED) ) {
4432 /* from winerror.h */
4433 return -1;
4434 }
4435 return 0;
4436 }
4438 static int nonSeekAvailable(int, long *);
4439 static int stdinAvailable(int, long *);
4441 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR)
4442 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO)
4444 // This code is a copy of JDK's sysAvailable
4445 // from src/windows/hpi/src/sys_api_md.c
4447 int os::available(int fd, jlong *bytes) {
4448 jlong cur, end;
4449 struct _stati64 stbuf64;
4451 if (::_fstati64(fd, &stbuf64) >= 0) {
4452 int mode = stbuf64.st_mode;
4453 if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4454 int ret;
4455 long lpbytes;
4456 if (fd == 0) {
4457 ret = stdinAvailable(fd, &lpbytes);
4458 } else {
4459 ret = nonSeekAvailable(fd, &lpbytes);
4460 }
4461 (*bytes) = (jlong)(lpbytes);
4462 return ret;
4463 }
4464 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4465 return FALSE;
4466 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4467 return FALSE;
4468 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4469 return FALSE;
4470 }
4471 *bytes = end - cur;
4472 return TRUE;
4473 } else {
4474 return FALSE;
4475 }
4476 }
4478 // This code is a copy of JDK's nonSeekAvailable
4479 // from src/windows/hpi/src/sys_api_md.c
4481 static int nonSeekAvailable(int fd, long *pbytes) {
4482 /* This is used for available on non-seekable devices
4483 * (like both named and anonymous pipes, such as pipes
4484 * connected to an exec'd process).
4485 * Standard Input is a special case.
4486 *
4487 */
4488 HANDLE han;
4490 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4491 return FALSE;
4492 }
4494 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4495 /* PeekNamedPipe fails when at EOF. In that case we
4496 * simply make *pbytes = 0 which is consistent with the
4497 * behavior we get on Solaris when an fd is at EOF.
4498 * The only alternative is to raise an Exception,
4499 * which isn't really warranted.
4500 */
4501 if (::GetLastError() != ERROR_BROKEN_PIPE) {
4502 return FALSE;
4503 }
4504 *pbytes = 0;
4505 }
4506 return TRUE;
4507 }
4509 #define MAX_INPUT_EVENTS 2000
4511 // This code is a copy of JDK's stdinAvailable
4512 // from src/windows/hpi/src/sys_api_md.c
4514 static int stdinAvailable(int fd, long *pbytes) {
4515 HANDLE han;
4516 DWORD numEventsRead = 0; /* Number of events read from buffer */
4517 DWORD numEvents = 0; /* Number of events in buffer */
4518 DWORD i = 0; /* Loop index */
4519 DWORD curLength = 0; /* Position marker */
4520 DWORD actualLength = 0; /* Number of bytes readable */
4521 BOOL error = FALSE; /* Error holder */
4522 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */
4524 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4525 return FALSE;
4526 }
4528 /* Construct an array of input records in the console buffer */
4529 error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4530 if (error == 0) {
4531 return nonSeekAvailable(fd, pbytes);
4532 }
4534 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */
4535 if (numEvents > MAX_INPUT_EVENTS) {
4536 numEvents = MAX_INPUT_EVENTS;
4537 }
4539 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4540 if (lpBuffer == NULL) {
4541 return FALSE;
4542 }
4544 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4545 if (error == 0) {
4546 os::free(lpBuffer, mtInternal);
4547 return FALSE;
4548 }
4550 /* Examine input records for the number of bytes available */
4551 for(i=0; i<numEvents; i++) {
4552 if (lpBuffer[i].EventType == KEY_EVENT) {
4554 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4555 &(lpBuffer[i].Event);
4556 if (keyRecord->bKeyDown == TRUE) {
4557 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4558 curLength++;
4559 if (*keyPressed == '\r') {
4560 actualLength = curLength;
4561 }
4562 }
4563 }
4564 }
4566 if(lpBuffer != NULL) {
4567 os::free(lpBuffer, mtInternal);
4568 }
4570 *pbytes = (long) actualLength;
4571 return TRUE;
4572 }
4574 // Map a block of memory.
4575 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4576 char *addr, size_t bytes, bool read_only,
4577 bool allow_exec) {
4578 HANDLE hFile;
4579 char* base;
4581 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4582 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4583 if (hFile == NULL) {
4584 if (PrintMiscellaneous && Verbose) {
4585 DWORD err = GetLastError();
4586 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err);
4587 }
4588 return NULL;
4589 }
4591 if (allow_exec) {
4592 // CreateFileMapping/MapViewOfFileEx can't map executable memory
4593 // unless it comes from a PE image (which the shared archive is not.)
4594 // Even VirtualProtect refuses to give execute access to mapped memory
4595 // that was not previously executable.
4596 //
4597 // Instead, stick the executable region in anonymous memory. Yuck.
4598 // Penalty is that ~4 pages will not be shareable - in the future
4599 // we might consider DLLizing the shared archive with a proper PE
4600 // header so that mapping executable + sharing is possible.
4602 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4603 PAGE_READWRITE);
4604 if (base == NULL) {
4605 if (PrintMiscellaneous && Verbose) {
4606 DWORD err = GetLastError();
4607 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err);
4608 }
4609 CloseHandle(hFile);
4610 return NULL;
4611 }
4613 DWORD bytes_read;
4614 OVERLAPPED overlapped;
4615 overlapped.Offset = (DWORD)file_offset;
4616 overlapped.OffsetHigh = 0;
4617 overlapped.hEvent = NULL;
4618 // ReadFile guarantees that if the return value is true, the requested
4619 // number of bytes were read before returning.
4620 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4621 if (!res) {
4622 if (PrintMiscellaneous && Verbose) {
4623 DWORD err = GetLastError();
4624 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err);
4625 }
4626 release_memory(base, bytes);
4627 CloseHandle(hFile);
4628 return NULL;
4629 }
4630 } else {
4631 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4632 NULL /*file_name*/);
4633 if (hMap == NULL) {
4634 if (PrintMiscellaneous && Verbose) {
4635 DWORD err = GetLastError();
4636 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err);
4637 }
4638 CloseHandle(hFile);
4639 return NULL;
4640 }
4642 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4643 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4644 (DWORD)bytes, addr);
4645 if (base == NULL) {
4646 if (PrintMiscellaneous && Verbose) {
4647 DWORD err = GetLastError();
4648 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err);
4649 }
4650 CloseHandle(hMap);
4651 CloseHandle(hFile);
4652 return NULL;
4653 }
4655 if (CloseHandle(hMap) == 0) {
4656 if (PrintMiscellaneous && Verbose) {
4657 DWORD err = GetLastError();
4658 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err);
4659 }
4660 CloseHandle(hFile);
4661 return base;
4662 }
4663 }
4665 if (allow_exec) {
4666 DWORD old_protect;
4667 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4668 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4670 if (!res) {
4671 if (PrintMiscellaneous && Verbose) {
4672 DWORD err = GetLastError();
4673 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err);
4674 }
4675 // Don't consider this a hard error, on IA32 even if the
4676 // VirtualProtect fails, we should still be able to execute
4677 CloseHandle(hFile);
4678 return base;
4679 }
4680 }
4682 if (CloseHandle(hFile) == 0) {
4683 if (PrintMiscellaneous && Verbose) {
4684 DWORD err = GetLastError();
4685 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err);
4686 }
4687 return base;
4688 }
4690 return base;
4691 }
4694 // Remap a block of memory.
4695 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4696 char *addr, size_t bytes, bool read_only,
4697 bool allow_exec) {
4698 // This OS does not allow existing memory maps to be remapped so we
4699 // have to unmap the memory before we remap it.
4700 if (!os::unmap_memory(addr, bytes)) {
4701 return NULL;
4702 }
4704 // There is a very small theoretical window between the unmap_memory()
4705 // call above and the map_memory() call below where a thread in native
4706 // code may be able to access an address that is no longer mapped.
4708 return os::map_memory(fd, file_name, file_offset, addr, bytes,
4709 read_only, allow_exec);
4710 }
4713 // Unmap a block of memory.
4714 // Returns true=success, otherwise false.
4716 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4717 BOOL result = UnmapViewOfFile(addr);
4718 if (result == 0) {
4719 if (PrintMiscellaneous && Verbose) {
4720 DWORD err = GetLastError();
4721 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err);
4722 }
4723 return false;
4724 }
4725 return true;
4726 }
4728 void os::pause() {
4729 char filename[MAX_PATH];
4730 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4731 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4732 } else {
4733 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4734 }
4736 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4737 if (fd != -1) {
4738 struct stat buf;
4739 ::close(fd);
4740 while (::stat(filename, &buf) == 0) {
4741 Sleep(100);
4742 }
4743 } else {
4744 jio_fprintf(stderr,
4745 "Could not open pause file '%s', continuing immediately.\n", filename);
4746 }
4747 }
4749 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4750 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4751 }
4753 /*
4754 * See the caveats for this class in os_windows.hpp
4755 * Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4756 * into this method and returns false. If no OS EXCEPTION was raised, returns
4757 * true.
4758 * The callback is supposed to provide the method that should be protected.
4759 */
4760 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4761 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4762 assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4763 "crash_protection already set?");
4765 bool success = true;
4766 __try {
4767 WatcherThread::watcher_thread()->set_crash_protection(this);
4768 cb.call();
4769 } __except(EXCEPTION_EXECUTE_HANDLER) {
4770 // only for protection, nothing to do
4771 success = false;
4772 }
4773 WatcherThread::watcher_thread()->set_crash_protection(NULL);
4774 return success;
4775 }
4777 // An Event wraps a win32 "CreateEvent" kernel handle.
4778 //
4779 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
4780 //
4781 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle
4782 // field, and call CloseHandle() on the win32 event handle. Unpark() would
4783 // need to be modified to tolerate finding a NULL (invalid) win32 event handle.
4784 // In addition, an unpark() operation might fetch the handle field, but the
4785 // event could recycle between the fetch and the SetEvent() operation.
4786 // SetEvent() would either fail because the handle was invalid, or inadvertently work,
4787 // as the win32 handle value had been recycled. In an ideal world calling SetEvent()
4788 // on an stale but recycled handle would be harmless, but in practice this might
4789 // confuse other non-Sun code, so it's not a viable approach.
4790 //
4791 // 2: Once a win32 event handle is associated with an Event, it remains associated
4792 // with the Event. The event handle is never closed. This could be construed
4793 // as handle leakage, but only up to the maximum # of threads that have been extant
4794 // at any one time. This shouldn't be an issue, as windows platforms typically
4795 // permit a process to have hundreds of thousands of open handles.
4796 //
4797 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
4798 // and release unused handles.
4799 //
4800 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
4801 // It's not clear, however, that we wouldn't be trading one type of leak for another.
4802 //
4803 // 5. Use an RCU-like mechanism (Read-Copy Update).
4804 // Or perhaps something similar to Maged Michael's "Hazard pointers".
4805 //
4806 // We use (2).
4807 //
4808 // TODO-FIXME:
4809 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
4810 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
4811 // to recover from (or at least detect) the dreaded Windows 841176 bug.
4812 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
4813 // into a single win32 CreateEvent() handle.
4814 //
4815 // _Event transitions in park()
4816 // -1 => -1 : illegal
4817 // 1 => 0 : pass - return immediately
4818 // 0 => -1 : block
4819 //
4820 // _Event serves as a restricted-range semaphore :
4821 // -1 : thread is blocked
4822 // 0 : neutral - thread is running or ready
4823 // 1 : signaled - thread is running or ready
4824 //
4825 // Another possible encoding of _Event would be
4826 // with explicit "PARKED" and "SIGNALED" bits.
4828 int os::PlatformEvent::park (jlong Millis) {
4829 guarantee (_ParkHandle != NULL , "Invariant") ;
4830 guarantee (Millis > 0 , "Invariant") ;
4831 int v ;
4833 // CONSIDER: defer assigning a CreateEvent() handle to the Event until
4834 // the initial park() operation.
4836 for (;;) {
4837 v = _Event ;
4838 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4839 }
4840 guarantee ((v == 0) || (v == 1), "invariant") ;
4841 if (v != 0) return OS_OK ;
4843 // Do this the hard way by blocking ...
4844 // TODO: consider a brief spin here, gated on the success of recent
4845 // spin attempts by this thread.
4846 //
4847 // We decompose long timeouts into series of shorter timed waits.
4848 // Evidently large timo values passed in WaitForSingleObject() are problematic on some
4849 // versions of Windows. See EventWait() for details. This may be superstition. Or not.
4850 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
4851 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from
4852 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
4853 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv ==
4854 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
4855 // for the already waited time. This policy does not admit any new outcomes.
4856 // In the future, however, we might want to track the accumulated wait time and
4857 // adjust Millis accordingly if we encounter a spurious wakeup.
4859 const int MAXTIMEOUT = 0x10000000 ;
4860 DWORD rv = WAIT_TIMEOUT ;
4861 while (_Event < 0 && Millis > 0) {
4862 DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT)
4863 if (Millis > MAXTIMEOUT) {
4864 prd = MAXTIMEOUT ;
4865 }
4866 rv = ::WaitForSingleObject (_ParkHandle, prd) ;
4867 assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ;
4868 if (rv == WAIT_TIMEOUT) {
4869 Millis -= prd ;
4870 }
4871 }
4872 v = _Event ;
4873 _Event = 0 ;
4874 // see comment at end of os::PlatformEvent::park() below:
4875 OrderAccess::fence() ;
4876 // If we encounter a nearly simultanous timeout expiry and unpark()
4877 // we return OS_OK indicating we awoke via unpark().
4878 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
4879 return (v >= 0) ? OS_OK : OS_TIMEOUT ;
4880 }
4882 void os::PlatformEvent::park () {
4883 guarantee (_ParkHandle != NULL, "Invariant") ;
4884 // Invariant: Only the thread associated with the Event/PlatformEvent
4885 // may call park().
4886 int v ;
4887 for (;;) {
4888 v = _Event ;
4889 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4890 }
4891 guarantee ((v == 0) || (v == 1), "invariant") ;
4892 if (v != 0) return ;
4894 // Do this the hard way by blocking ...
4895 // TODO: consider a brief spin here, gated on the success of recent
4896 // spin attempts by this thread.
4897 while (_Event < 0) {
4898 DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ;
4899 assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ;
4900 }
4902 // Usually we'll find _Event == 0 at this point, but as
4903 // an optional optimization we clear it, just in case can
4904 // multiple unpark() operations drove _Event up to 1.
4905 _Event = 0 ;
4906 OrderAccess::fence() ;
4907 guarantee (_Event >= 0, "invariant") ;
4908 }
4910 void os::PlatformEvent::unpark() {
4911 guarantee (_ParkHandle != NULL, "Invariant") ;
4913 // Transitions for _Event:
4914 // 0 :=> 1
4915 // 1 :=> 1
4916 // -1 :=> either 0 or 1; must signal target thread
4917 // That is, we can safely transition _Event from -1 to either
4918 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back
4919 // unpark() calls.
4920 // See also: "Semaphores in Plan 9" by Mullender & Cox
4921 //
4922 // Note: Forcing a transition from "-1" to "1" on an unpark() means
4923 // that it will take two back-to-back park() calls for the owning
4924 // thread to block. This has the benefit of forcing a spurious return
4925 // from the first park() call after an unpark() call which will help
4926 // shake out uses of park() and unpark() without condition variables.
4928 if (Atomic::xchg(1, &_Event) >= 0) return;
4930 ::SetEvent(_ParkHandle);
4931 }
4934 // JSR166
4935 // -------------------------------------------------------
4937 /*
4938 * The Windows implementation of Park is very straightforward: Basic
4939 * operations on Win32 Events turn out to have the right semantics to
4940 * use them directly. We opportunistically resuse the event inherited
4941 * from Monitor.
4942 */
4945 void Parker::park(bool isAbsolute, jlong time) {
4946 guarantee (_ParkEvent != NULL, "invariant") ;
4947 // First, demultiplex/decode time arguments
4948 if (time < 0) { // don't wait
4949 return;
4950 }
4951 else if (time == 0 && !isAbsolute) {
4952 time = INFINITE;
4953 }
4954 else if (isAbsolute) {
4955 time -= os::javaTimeMillis(); // convert to relative time
4956 if (time <= 0) // already elapsed
4957 return;
4958 }
4959 else { // relative
4960 time /= 1000000; // Must coarsen from nanos to millis
4961 if (time == 0) // Wait for the minimal time unit if zero
4962 time = 1;
4963 }
4965 JavaThread* thread = (JavaThread*)(Thread::current());
4966 assert(thread->is_Java_thread(), "Must be JavaThread");
4967 JavaThread *jt = (JavaThread *)thread;
4969 // Don't wait if interrupted or already triggered
4970 if (Thread::is_interrupted(thread, false) ||
4971 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
4972 ResetEvent(_ParkEvent);
4973 return;
4974 }
4975 else {
4976 ThreadBlockInVM tbivm(jt);
4977 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4978 jt->set_suspend_equivalent();
4980 WaitForSingleObject(_ParkEvent, time);
4981 ResetEvent(_ParkEvent);
4983 // If externally suspended while waiting, re-suspend
4984 if (jt->handle_special_suspend_equivalent_condition()) {
4985 jt->java_suspend_self();
4986 }
4987 }
4988 }
4990 void Parker::unpark() {
4991 guarantee (_ParkEvent != NULL, "invariant") ;
4992 SetEvent(_ParkEvent);
4993 }
4995 // Run the specified command in a separate process. Return its exit value,
4996 // or -1 on failure (e.g. can't create a new process).
4997 int os::fork_and_exec(char* cmd) {
4998 STARTUPINFO si;
4999 PROCESS_INFORMATION pi;
5001 memset(&si, 0, sizeof(si));
5002 si.cb = sizeof(si);
5003 memset(&pi, 0, sizeof(pi));
5004 BOOL rslt = CreateProcess(NULL, // executable name - use command line
5005 cmd, // command line
5006 NULL, // process security attribute
5007 NULL, // thread security attribute
5008 TRUE, // inherits system handles
5009 0, // no creation flags
5010 NULL, // use parent's environment block
5011 NULL, // use parent's starting directory
5012 &si, // (in) startup information
5013 &pi); // (out) process information
5015 if (rslt) {
5016 // Wait until child process exits.
5017 WaitForSingleObject(pi.hProcess, INFINITE);
5019 DWORD exit_code;
5020 GetExitCodeProcess(pi.hProcess, &exit_code);
5022 // Close process and thread handles.
5023 CloseHandle(pi.hProcess);
5024 CloseHandle(pi.hThread);
5026 return (int)exit_code;
5027 } else {
5028 return -1;
5029 }
5030 }
5032 //--------------------------------------------------------------------------------------------------
5033 // Non-product code
5035 static int mallocDebugIntervalCounter = 0;
5036 static int mallocDebugCounter = 0;
5037 bool os::check_heap(bool force) {
5038 if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
5039 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
5040 // Note: HeapValidate executes two hardware breakpoints when it finds something
5041 // wrong; at these points, eax contains the address of the offending block (I think).
5042 // To get to the exlicit error message(s) below, just continue twice.
5043 HANDLE heap = GetProcessHeap();
5044 { HeapLock(heap);
5045 PROCESS_HEAP_ENTRY phe;
5046 phe.lpData = NULL;
5047 while (HeapWalk(heap, &phe) != 0) {
5048 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
5049 !HeapValidate(heap, 0, phe.lpData)) {
5050 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
5051 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData);
5052 fatal("corrupted C heap");
5053 }
5054 }
5055 DWORD err = GetLastError();
5056 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) {
5057 fatal(err_msg("heap walk aborted with error %d", err));
5058 }
5059 HeapUnlock(heap);
5060 }
5061 mallocDebugIntervalCounter = 0;
5062 }
5063 return true;
5064 }
5067 bool os::find(address addr, outputStream* st) {
5068 // Nothing yet
5069 return false;
5070 }
5072 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5073 DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5075 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
5076 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow();
5077 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5078 address addr = (address) exceptionRecord->ExceptionInformation[1];
5080 if (os::is_memory_serialize_page(thread, addr))
5081 return EXCEPTION_CONTINUE_EXECUTION;
5082 }
5084 return EXCEPTION_CONTINUE_SEARCH;
5085 }
5087 // We don't build a headless jre for Windows
5088 bool os::is_headless_jre() { return false; }
5090 static jint initSock() {
5091 WSADATA wsadata;
5093 if (!os::WinSock2Dll::WinSock2Available()) {
5094 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n",
5095 ::GetLastError());
5096 return JNI_ERR;
5097 }
5099 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5100 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5101 ::GetLastError());
5102 return JNI_ERR;
5103 }
5104 return JNI_OK;
5105 }
5107 struct hostent* os::get_host_by_name(char* name) {
5108 return (struct hostent*)os::WinSock2Dll::gethostbyname(name);
5109 }
5111 int os::socket_close(int fd) {
5112 return ::closesocket(fd);
5113 }
5115 int os::socket_available(int fd, jint *pbytes) {
5116 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes);
5117 return (ret < 0) ? 0 : 1;
5118 }
5120 int os::socket(int domain, int type, int protocol) {
5121 return ::socket(domain, type, protocol);
5122 }
5124 int os::listen(int fd, int count) {
5125 return ::listen(fd, count);
5126 }
5128 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5129 return ::connect(fd, him, len);
5130 }
5132 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
5133 return ::accept(fd, him, len);
5134 }
5136 int os::sendto(int fd, char* buf, size_t len, uint flags,
5137 struct sockaddr* to, socklen_t tolen) {
5139 return ::sendto(fd, buf, (int)len, flags, to, tolen);
5140 }
5142 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags,
5143 sockaddr* from, socklen_t* fromlen) {
5145 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen);
5146 }
5148 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5149 return ::recv(fd, buf, (int)nBytes, flags);
5150 }
5152 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5153 return ::send(fd, buf, (int)nBytes, flags);
5154 }
5156 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5157 return ::send(fd, buf, (int)nBytes, flags);
5158 }
5160 int os::timeout(int fd, long timeout) {
5161 fd_set tbl;
5162 struct timeval t;
5164 t.tv_sec = timeout / 1000;
5165 t.tv_usec = (timeout % 1000) * 1000;
5167 tbl.fd_count = 1;
5168 tbl.fd_array[0] = fd;
5170 return ::select(1, &tbl, 0, 0, &t);
5171 }
5173 int os::get_host_name(char* name, int namelen) {
5174 return ::gethostname(name, namelen);
5175 }
5177 int os::socket_shutdown(int fd, int howto) {
5178 return ::shutdown(fd, howto);
5179 }
5181 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
5182 return ::bind(fd, him, len);
5183 }
5185 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
5186 return ::getsockname(fd, him, len);
5187 }
5189 int os::get_sock_opt(int fd, int level, int optname,
5190 char* optval, socklen_t* optlen) {
5191 return ::getsockopt(fd, level, optname, optval, optlen);
5192 }
5194 int os::set_sock_opt(int fd, int level, int optname,
5195 const char* optval, socklen_t optlen) {
5196 return ::setsockopt(fd, level, optname, optval, optlen);
5197 }
5199 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5200 #if defined(IA32)
5201 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5202 #elif defined (AMD64)
5203 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5204 #endif
5206 // returns true if thread could be suspended,
5207 // false otherwise
5208 static bool do_suspend(HANDLE* h) {
5209 if (h != NULL) {
5210 if (SuspendThread(*h) != ~0) {
5211 return true;
5212 }
5213 }
5214 return false;
5215 }
5217 // resume the thread
5218 // calling resume on an active thread is a no-op
5219 static void do_resume(HANDLE* h) {
5220 if (h != NULL) {
5221 ResumeThread(*h);
5222 }
5223 }
5225 // retrieve a suspend/resume context capable handle
5226 // from the tid. Caller validates handle return value.
5227 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) {
5228 if (h != NULL) {
5229 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5230 }
5231 }
5233 //
5234 // Thread sampling implementation
5235 //
5236 void os::SuspendedThreadTask::internal_do_task() {
5237 CONTEXT ctxt;
5238 HANDLE h = NULL;
5240 // get context capable handle for thread
5241 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5243 // sanity
5244 if (h == NULL || h == INVALID_HANDLE_VALUE) {
5245 return;
5246 }
5248 // suspend the thread
5249 if (do_suspend(&h)) {
5250 ctxt.ContextFlags = sampling_context_flags;
5251 // get thread context
5252 GetThreadContext(h, &ctxt);
5253 SuspendedThreadTaskContext context(_thread, &ctxt);
5254 // pass context to Thread Sampling impl
5255 do_task(context);
5256 // resume thread
5257 do_resume(&h);
5258 }
5260 // close handle
5261 CloseHandle(h);
5262 }
5265 // Kernel32 API
5266 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);
5267 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD);
5268 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG);
5269 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG);
5270 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG);
5272 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL;
5273 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL;
5274 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL;
5275 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL;
5276 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL;
5279 BOOL os::Kernel32Dll::initialized = FALSE;
5280 SIZE_T os::Kernel32Dll::GetLargePageMinimum() {
5281 assert(initialized && _GetLargePageMinimum != NULL,
5282 "GetLargePageMinimumAvailable() not yet called");
5283 return _GetLargePageMinimum();
5284 }
5286 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() {
5287 if (!initialized) {
5288 initialize();
5289 }
5290 return _GetLargePageMinimum != NULL;
5291 }
5293 BOOL os::Kernel32Dll::NumaCallsAvailable() {
5294 if (!initialized) {
5295 initialize();
5296 }
5297 return _VirtualAllocExNuma != NULL;
5298 }
5300 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) {
5301 assert(initialized && _VirtualAllocExNuma != NULL,
5302 "NUMACallsAvailable() not yet called");
5304 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node);
5305 }
5307 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) {
5308 assert(initialized && _GetNumaHighestNodeNumber != NULL,
5309 "NUMACallsAvailable() not yet called");
5311 return _GetNumaHighestNodeNumber(ptr_highest_node_number);
5312 }
5314 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) {
5315 assert(initialized && _GetNumaNodeProcessorMask != NULL,
5316 "NUMACallsAvailable() not yet called");
5318 return _GetNumaNodeProcessorMask(node, proc_mask);
5319 }
5321 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip,
5322 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) {
5323 if (!initialized) {
5324 initialize();
5325 }
5327 if (_RtlCaptureStackBackTrace != NULL) {
5328 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture,
5329 BackTrace, BackTraceHash);
5330 } else {
5331 return 0;
5332 }
5333 }
5335 void os::Kernel32Dll::initializeCommon() {
5336 if (!initialized) {
5337 HMODULE handle = ::GetModuleHandle("Kernel32.dll");
5338 assert(handle != NULL, "Just check");
5339 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum");
5340 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma");
5341 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber");
5342 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask");
5343 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace");
5344 initialized = TRUE;
5345 }
5346 }
5350 #ifndef JDK6_OR_EARLIER
5352 void os::Kernel32Dll::initialize() {
5353 initializeCommon();
5354 }
5357 // Kernel32 API
5358 inline BOOL os::Kernel32Dll::SwitchToThread() {
5359 return ::SwitchToThread();
5360 }
5362 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
5363 return true;
5364 }
5366 // Help tools
5367 inline BOOL os::Kernel32Dll::HelpToolsAvailable() {
5368 return true;
5369 }
5371 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
5372 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
5373 }
5375 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5376 return ::Module32First(hSnapshot, lpme);
5377 }
5379 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5380 return ::Module32Next(hSnapshot, lpme);
5381 }
5383 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
5384 ::GetNativeSystemInfo(lpSystemInfo);
5385 }
5387 // PSAPI API
5388 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
5389 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
5390 }
5392 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
5393 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
5394 }
5396 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
5397 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
5398 }
5400 inline BOOL os::PSApiDll::PSApiAvailable() {
5401 return true;
5402 }
5405 // WinSock2 API
5406 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
5407 return ::WSAStartup(wVersionRequested, lpWSAData);
5408 }
5410 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
5411 return ::gethostbyname(name);
5412 }
5414 inline BOOL os::WinSock2Dll::WinSock2Available() {
5415 return true;
5416 }
5418 // Advapi API
5419 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
5420 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
5421 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
5422 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
5423 BufferLength, PreviousState, ReturnLength);
5424 }
5426 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
5427 PHANDLE TokenHandle) {
5428 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
5429 }
5431 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
5432 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
5433 }
5435 inline BOOL os::Advapi32Dll::AdvapiAvailable() {
5436 return true;
5437 }
5439 void* os::get_default_process_handle() {
5440 return (void*)GetModuleHandle(NULL);
5441 }
5443 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5444 // which is used to find statically linked in agents.
5445 // Additionally for windows, takes into account __stdcall names.
5446 // Parameters:
5447 // sym_name: Symbol in library we are looking for
5448 // lib_name: Name of library to look in, NULL for shared libs.
5449 // is_absolute_path == true if lib_name is absolute path to agent
5450 // such as "C:/a/b/L.dll"
5451 // == false if only the base name of the library is passed in
5452 // such as "L"
5453 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5454 bool is_absolute_path) {
5455 char *agent_entry_name;
5456 size_t len;
5457 size_t name_len;
5458 size_t prefix_len = strlen(JNI_LIB_PREFIX);
5459 size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5460 const char *start;
5462 if (lib_name != NULL) {
5463 len = name_len = strlen(lib_name);
5464 if (is_absolute_path) {
5465 // Need to strip path, prefix and suffix
5466 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5467 lib_name = ++start;
5468 } else {
5469 // Need to check for drive prefix
5470 if ((start = strchr(lib_name, ':')) != NULL) {
5471 lib_name = ++start;
5472 }
5473 }
5474 if (len <= (prefix_len + suffix_len)) {
5475 return NULL;
5476 }
5477 lib_name += prefix_len;
5478 name_len = strlen(lib_name) - suffix_len;
5479 }
5480 }
5481 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5482 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5483 if (agent_entry_name == NULL) {
5484 return NULL;
5485 }
5486 if (lib_name != NULL) {
5487 const char *p = strrchr(sym_name, '@');
5488 if (p != NULL && p != sym_name) {
5489 // sym_name == _Agent_OnLoad@XX
5490 strncpy(agent_entry_name, sym_name, (p - sym_name));
5491 agent_entry_name[(p-sym_name)] = '\0';
5492 // agent_entry_name == _Agent_OnLoad
5493 strcat(agent_entry_name, "_");
5494 strncat(agent_entry_name, lib_name, name_len);
5495 strcat(agent_entry_name, p);
5496 // agent_entry_name == _Agent_OnLoad_lib_name@XX
5497 } else {
5498 strcpy(agent_entry_name, sym_name);
5499 strcat(agent_entry_name, "_");
5500 strncat(agent_entry_name, lib_name, name_len);
5501 }
5502 } else {
5503 strcpy(agent_entry_name, sym_name);
5504 }
5505 return agent_entry_name;
5506 }
5508 #else
5509 // Kernel32 API
5510 typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
5511 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD);
5512 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32);
5513 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32);
5514 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO);
5516 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL;
5517 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL;
5518 Module32First_Fn os::Kernel32Dll::_Module32First = NULL;
5519 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL;
5520 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL;
5522 void os::Kernel32Dll::initialize() {
5523 if (!initialized) {
5524 HMODULE handle = ::GetModuleHandle("Kernel32.dll");
5525 assert(handle != NULL, "Just check");
5527 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread");
5528 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn)
5529 ::GetProcAddress(handle, "CreateToolhelp32Snapshot");
5530 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First");
5531 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next");
5532 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo");
5533 initializeCommon(); // resolve the functions that always need resolving
5535 initialized = TRUE;
5536 }
5537 }
5539 BOOL os::Kernel32Dll::SwitchToThread() {
5540 assert(initialized && _SwitchToThread != NULL,
5541 "SwitchToThreadAvailable() not yet called");
5542 return _SwitchToThread();
5543 }
5546 BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
5547 if (!initialized) {
5548 initialize();
5549 }
5550 return _SwitchToThread != NULL;
5551 }
5553 // Help tools
5554 BOOL os::Kernel32Dll::HelpToolsAvailable() {
5555 if (!initialized) {
5556 initialize();
5557 }
5558 return _CreateToolhelp32Snapshot != NULL &&
5559 _Module32First != NULL &&
5560 _Module32Next != NULL;
5561 }
5563 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
5564 assert(initialized && _CreateToolhelp32Snapshot != NULL,
5565 "HelpToolsAvailable() not yet called");
5567 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
5568 }
5570 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5571 assert(initialized && _Module32First != NULL,
5572 "HelpToolsAvailable() not yet called");
5574 return _Module32First(hSnapshot, lpme);
5575 }
5577 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5578 assert(initialized && _Module32Next != NULL,
5579 "HelpToolsAvailable() not yet called");
5581 return _Module32Next(hSnapshot, lpme);
5582 }
5585 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
5586 if (!initialized) {
5587 initialize();
5588 }
5589 return _GetNativeSystemInfo != NULL;
5590 }
5592 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
5593 assert(initialized && _GetNativeSystemInfo != NULL,
5594 "GetNativeSystemInfoAvailable() not yet called");
5596 _GetNativeSystemInfo(lpSystemInfo);
5597 }
5599 // PSAPI API
5602 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD);
5603 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);;
5604 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD);
5606 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL;
5607 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL;
5608 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL;
5609 BOOL os::PSApiDll::initialized = FALSE;
5611 void os::PSApiDll::initialize() {
5612 if (!initialized) {
5613 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0);
5614 if (handle != NULL) {
5615 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle,
5616 "EnumProcessModules");
5617 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle,
5618 "GetModuleFileNameExA");
5619 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle,
5620 "GetModuleInformation");
5621 }
5622 initialized = TRUE;
5623 }
5624 }
5628 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
5629 assert(initialized && _EnumProcessModules != NULL,
5630 "PSApiAvailable() not yet called");
5631 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
5632 }
5634 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
5635 assert(initialized && _GetModuleFileNameEx != NULL,
5636 "PSApiAvailable() not yet called");
5637 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
5638 }
5640 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
5641 assert(initialized && _GetModuleInformation != NULL,
5642 "PSApiAvailable() not yet called");
5643 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
5644 }
5646 BOOL os::PSApiDll::PSApiAvailable() {
5647 if (!initialized) {
5648 initialize();
5649 }
5650 return _EnumProcessModules != NULL &&
5651 _GetModuleFileNameEx != NULL &&
5652 _GetModuleInformation != NULL;
5653 }
5656 // WinSock2 API
5657 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA);
5658 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...);
5660 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL;
5661 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL;
5662 BOOL os::WinSock2Dll::initialized = FALSE;
5664 void os::WinSock2Dll::initialize() {
5665 if (!initialized) {
5666 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0);
5667 if (handle != NULL) {
5668 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup");
5669 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname");
5670 }
5671 initialized = TRUE;
5672 }
5673 }
5676 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
5677 assert(initialized && _WSAStartup != NULL,
5678 "WinSock2Available() not yet called");
5679 return _WSAStartup(wVersionRequested, lpWSAData);
5680 }
5682 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
5683 assert(initialized && _gethostbyname != NULL,
5684 "WinSock2Available() not yet called");
5685 return _gethostbyname(name);
5686 }
5688 BOOL os::WinSock2Dll::WinSock2Available() {
5689 if (!initialized) {
5690 initialize();
5691 }
5692 return _WSAStartup != NULL &&
5693 _gethostbyname != NULL;
5694 }
5696 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
5697 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE);
5698 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID);
5700 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL;
5701 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL;
5702 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL;
5703 BOOL os::Advapi32Dll::initialized = FALSE;
5705 void os::Advapi32Dll::initialize() {
5706 if (!initialized) {
5707 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0);
5708 if (handle != NULL) {
5709 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle,
5710 "AdjustTokenPrivileges");
5711 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle,
5712 "OpenProcessToken");
5713 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle,
5714 "LookupPrivilegeValueA");
5715 }
5716 initialized = TRUE;
5717 }
5718 }
5720 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
5721 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
5722 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
5723 assert(initialized && _AdjustTokenPrivileges != NULL,
5724 "AdvapiAvailable() not yet called");
5725 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
5726 BufferLength, PreviousState, ReturnLength);
5727 }
5729 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
5730 PHANDLE TokenHandle) {
5731 assert(initialized && _OpenProcessToken != NULL,
5732 "AdvapiAvailable() not yet called");
5733 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
5734 }
5736 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
5737 assert(initialized && _LookupPrivilegeValue != NULL,
5738 "AdvapiAvailable() not yet called");
5739 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
5740 }
5742 BOOL os::Advapi32Dll::AdvapiAvailable() {
5743 if (!initialized) {
5744 initialize();
5745 }
5746 return _AdjustTokenPrivileges != NULL &&
5747 _OpenProcessToken != NULL &&
5748 _LookupPrivilegeValue != NULL;
5749 }
5751 #endif
5753 #ifndef PRODUCT
5755 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5756 // contiguous memory block at a particular address.
5757 // The test first tries to find a good approximate address to allocate at by using the same
5758 // method to allocate some memory at any address. The test then tries to allocate memory in
5759 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5760 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5761 // the previously allocated memory is available for allocation. The only actual failure
5762 // that is reported is when the test tries to allocate at a particular location but gets a
5763 // different valid one. A NULL return value at this point is not considered an error but may
5764 // be legitimate.
5765 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5766 void TestReserveMemorySpecial_test() {
5767 if (!UseLargePages) {
5768 if (VerboseInternalVMTests) {
5769 gclog_or_tty->print("Skipping test because large pages are disabled");
5770 }
5771 return;
5772 }
5773 // save current value of globals
5774 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5775 bool old_use_numa_interleaving = UseNUMAInterleaving;
5777 // set globals to make sure we hit the correct code path
5778 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5780 // do an allocation at an address selected by the OS to get a good one.
5781 const size_t large_allocation_size = os::large_page_size() * 4;
5782 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5783 if (result == NULL) {
5784 if (VerboseInternalVMTests) {
5785 gclog_or_tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
5786 large_allocation_size);
5787 }
5788 } else {
5789 os::release_memory_special(result, large_allocation_size);
5791 // allocate another page within the recently allocated memory area which seems to be a good location. At least
5792 // we managed to get it once.
5793 const size_t expected_allocation_size = os::large_page_size();
5794 char* expected_location = result + os::large_page_size();
5795 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5796 if (actual_location == NULL) {
5797 if (VerboseInternalVMTests) {
5798 gclog_or_tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
5799 expected_location, large_allocation_size);
5800 }
5801 } else {
5802 // release memory
5803 os::release_memory_special(actual_location, expected_allocation_size);
5804 // only now check, after releasing any memory to avoid any leaks.
5805 assert(actual_location == expected_location,
5806 err_msg("Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5807 expected_location, expected_allocation_size, actual_location));
5808 }
5809 }
5811 // restore globals
5812 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5813 UseNUMAInterleaving = old_use_numa_interleaving;
5814 }
5815 #endif // PRODUCT