src/os/windows/vm/os_windows.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
equal deleted inserted replaced
-1:000000000000 0:f90c822e73f8
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 // Must be at least Windows 2000 or XP to use IsDebuggerPresent
26 #define _WIN32_WINNT 0x500
27
28 // no precompiled headers
29 #include "classfile/classLoader.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/icBuffer.hpp"
33 #include "code/vtableStubs.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "compiler/disassembler.hpp"
36 #include "interpreter/interpreter.hpp"
37 #include "jvm_windows.h"
38 #include "memory/allocation.inline.hpp"
39 #include "memory/filemap.hpp"
40 #include "mutex_windows.inline.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "os_share_windows.hpp"
43 #include "prims/jniFastGetField.hpp"
44 #include "prims/jvm.h"
45 #include "prims/jvm_misc.hpp"
46 #include "runtime/arguments.hpp"
47 #include "runtime/extendedPC.hpp"
48 #include "runtime/globals.hpp"
49 #include "runtime/interfaceSupport.hpp"
50 #include "runtime/java.hpp"
51 #include "runtime/javaCalls.hpp"
52 #include "runtime/mutexLocker.hpp"
53 #include "runtime/objectMonitor.hpp"
54 #include "runtime/osThread.hpp"
55 #include "runtime/perfMemory.hpp"
56 #include "runtime/sharedRuntime.hpp"
57 #include "runtime/statSampler.hpp"
58 #include "runtime/stubRoutines.hpp"
59 #include "runtime/thread.inline.hpp"
60 #include "runtime/threadCritical.hpp"
61 #include "runtime/timer.hpp"
62 #include "services/attachListener.hpp"
63 #include "services/memTracker.hpp"
64 #include "services/runtimeService.hpp"
65 #include "utilities/decoder.hpp"
66 #include "utilities/defaultStream.hpp"
67 #include "utilities/events.hpp"
68 #include "utilities/growableArray.hpp"
69 #include "utilities/vmError.hpp"
70
71 #ifdef _DEBUG
72 #include <crtdbg.h>
73 #endif
74
75
76 #include <windows.h>
77 #include <sys/types.h>
78 #include <sys/stat.h>
79 #include <sys/timeb.h>
80 #include <objidl.h>
81 #include <shlobj.h>
82
83 #include <malloc.h>
84 #include <signal.h>
85 #include <direct.h>
86 #include <errno.h>
87 #include <fcntl.h>
88 #include <io.h>
89 #include <process.h> // For _beginthreadex(), _endthreadex()
90 #include <imagehlp.h> // For os::dll_address_to_function_name
91 /* for enumerating dll libraries */
92 #include <vdmdbg.h>
93
94 // for timer info max values which include all bits
95 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
96
97 // For DLL loading/load error detection
98 // Values of PE COFF
99 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
100 #define IMAGE_FILE_SIGNATURE_LENGTH 4
101
102 static HANDLE main_process;
103 static HANDLE main_thread;
104 static int main_thread_id;
105
106 static FILETIME process_creation_time;
107 static FILETIME process_exit_time;
108 static FILETIME process_user_time;
109 static FILETIME process_kernel_time;
110
111 #ifdef _M_IA64
112 #define __CPU__ ia64
113 #elif _M_AMD64
114 #define __CPU__ amd64
115 #else
116 #define __CPU__ i486
117 #endif
118
119 // save DLL module handle, used by GetModuleFileName
120
121 HINSTANCE vm_lib_handle;
122
123 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
124 switch (reason) {
125 case DLL_PROCESS_ATTACH:
126 vm_lib_handle = hinst;
127 if(ForceTimeHighResolution)
128 timeBeginPeriod(1L);
129 break;
130 case DLL_PROCESS_DETACH:
131 if(ForceTimeHighResolution)
132 timeEndPeriod(1L);
133 break;
134 default:
135 break;
136 }
137 return true;
138 }
139
140 static inline double fileTimeAsDouble(FILETIME* time) {
141 const double high = (double) ((unsigned int) ~0);
142 const double split = 10000000.0;
143 double result = (time->dwLowDateTime / split) +
144 time->dwHighDateTime * (high/split);
145 return result;
146 }
147
148 // Implementation of os
149
150 bool os::getenv(const char* name, char* buffer, int len) {
151 int result = GetEnvironmentVariable(name, buffer, len);
152 return result > 0 && result < len;
153 }
154
155
156 // No setuid programs under Windows.
157 bool os::have_special_privileges() {
158 return false;
159 }
160
161
162 // This method is a periodic task to check for misbehaving JNI applications
163 // under CheckJNI, we can add any periodic checks here.
164 // For Windows at the moment does nothing
165 void os::run_periodic_checks() {
166 return;
167 }
168
169 #ifndef _WIN64
170 // previous UnhandledExceptionFilter, if there is one
171 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
172
173 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
174 #endif
175 void os::init_system_properties_values() {
176 /* sysclasspath, java_home, dll_dir */
177 {
178 char *home_path;
179 char *dll_path;
180 char *pslash;
181 char *bin = "\\bin";
182 char home_dir[MAX_PATH];
183
184 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) {
185 os::jvm_path(home_dir, sizeof(home_dir));
186 // Found the full path to jvm.dll.
187 // Now cut the path to <java_home>/jre if we can.
188 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */
189 pslash = strrchr(home_dir, '\\');
190 if (pslash != NULL) {
191 *pslash = '\0'; /* get rid of \{client|server} */
192 pslash = strrchr(home_dir, '\\');
193 if (pslash != NULL)
194 *pslash = '\0'; /* get rid of \bin */
195 }
196 }
197
198 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
199 if (home_path == NULL)
200 return;
201 strcpy(home_path, home_dir);
202 Arguments::set_java_home(home_path);
203
204 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal);
205 if (dll_path == NULL)
206 return;
207 strcpy(dll_path, home_dir);
208 strcat(dll_path, bin);
209 Arguments::set_dll_dir(dll_path);
210
211 if (!set_boot_path('\\', ';'))
212 return;
213 }
214
215 /* library_path */
216 #define EXT_DIR "\\lib\\ext"
217 #define BIN_DIR "\\bin"
218 #define PACKAGE_DIR "\\Sun\\Java"
219 {
220 /* Win32 library search order (See the documentation for LoadLibrary):
221 *
222 * 1. The directory from which application is loaded.
223 * 2. The system wide Java Extensions directory (Java only)
224 * 3. System directory (GetSystemDirectory)
225 * 4. Windows directory (GetWindowsDirectory)
226 * 5. The PATH environment variable
227 * 6. The current directory
228 */
229
230 char *library_path;
231 char tmp[MAX_PATH];
232 char *path_str = ::getenv("PATH");
233
234 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
235 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
236
237 library_path[0] = '\0';
238
239 GetModuleFileName(NULL, tmp, sizeof(tmp));
240 *(strrchr(tmp, '\\')) = '\0';
241 strcat(library_path, tmp);
242
243 GetWindowsDirectory(tmp, sizeof(tmp));
244 strcat(library_path, ";");
245 strcat(library_path, tmp);
246 strcat(library_path, PACKAGE_DIR BIN_DIR);
247
248 GetSystemDirectory(tmp, sizeof(tmp));
249 strcat(library_path, ";");
250 strcat(library_path, tmp);
251
252 GetWindowsDirectory(tmp, sizeof(tmp));
253 strcat(library_path, ";");
254 strcat(library_path, tmp);
255
256 if (path_str) {
257 strcat(library_path, ";");
258 strcat(library_path, path_str);
259 }
260
261 strcat(library_path, ";.");
262
263 Arguments::set_library_path(library_path);
264 FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
265 }
266
267 /* Default extensions directory */
268 {
269 char path[MAX_PATH];
270 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
271 GetWindowsDirectory(path, MAX_PATH);
272 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
273 path, PACKAGE_DIR, EXT_DIR);
274 Arguments::set_ext_dirs(buf);
275 }
276 #undef EXT_DIR
277 #undef BIN_DIR
278 #undef PACKAGE_DIR
279
280 /* Default endorsed standards directory. */
281 {
282 #define ENDORSED_DIR "\\lib\\endorsed"
283 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR);
284 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal);
285 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR);
286 Arguments::set_endorsed_dirs(buf);
287 #undef ENDORSED_DIR
288 }
289
290 #ifndef _WIN64
291 // set our UnhandledExceptionFilter and save any previous one
292 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
293 #endif
294
295 // Done
296 return;
297 }
298
299 void os::breakpoint() {
300 DebugBreak();
301 }
302
303 // Invoked from the BREAKPOINT Macro
304 extern "C" void breakpoint() {
305 os::breakpoint();
306 }
307
308 /*
309 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
310 * So far, this method is only used by Native Memory Tracking, which is
311 * only supported on Windows XP or later.
312 */
313 address os::get_caller_pc(int n) {
314 #ifdef _NMT_NOINLINE_
315 n ++;
316 #endif
317 address pc;
318 if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
319 return pc;
320 }
321 return NULL;
322 }
323
324
325 // os::current_stack_base()
326 //
327 // Returns the base of the stack, which is the stack's
328 // starting address. This function must be called
329 // while running on the stack of the thread being queried.
330
331 address os::current_stack_base() {
332 MEMORY_BASIC_INFORMATION minfo;
333 address stack_bottom;
334 size_t stack_size;
335
336 VirtualQuery(&minfo, &minfo, sizeof(minfo));
337 stack_bottom = (address)minfo.AllocationBase;
338 stack_size = minfo.RegionSize;
339
340 // Add up the sizes of all the regions with the same
341 // AllocationBase.
342 while( 1 )
343 {
344 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
345 if ( stack_bottom == (address)minfo.AllocationBase )
346 stack_size += minfo.RegionSize;
347 else
348 break;
349 }
350
351 #ifdef _M_IA64
352 // IA64 has memory and register stacks
353 //
354 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
355 // at thread creation (1MB backing store growing upwards, 1MB memory stack
356 // growing downwards, 2MB summed up)
357 //
358 // ...
359 // ------- top of stack (high address) -----
360 // |
361 // | 1MB
362 // | Backing Store (Register Stack)
363 // |
364 // | / \
365 // | |
366 // | |
367 // | |
368 // ------------------------ stack base -----
369 // | 1MB
370 // | Memory Stack
371 // |
372 // | |
373 // | |
374 // | |
375 // | \ /
376 // |
377 // ----- bottom of stack (low address) -----
378 // ...
379
380 stack_size = stack_size / 2;
381 #endif
382 return stack_bottom + stack_size;
383 }
384
385 size_t os::current_stack_size() {
386 size_t sz;
387 MEMORY_BASIC_INFORMATION minfo;
388 VirtualQuery(&minfo, &minfo, sizeof(minfo));
389 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
390 return sz;
391 }
392
393 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
394 const struct tm* time_struct_ptr = localtime(clock);
395 if (time_struct_ptr != NULL) {
396 *res = *time_struct_ptr;
397 return res;
398 }
399 return NULL;
400 }
401
402 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
403
404 // Thread start routine for all new Java threads
405 static unsigned __stdcall java_start(Thread* thread) {
406 // Try to randomize the cache line index of hot stack frames.
407 // This helps when threads of the same stack traces evict each other's
408 // cache lines. The threads can be either from the same JVM instance, or
409 // from different JVM instances. The benefit is especially true for
410 // processors with hyperthreading technology.
411 static int counter = 0;
412 int pid = os::current_process_id();
413 _alloca(((pid ^ counter++) & 7) * 128);
414
415 OSThread* osthr = thread->osthread();
416 assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
417
418 if (UseNUMA) {
419 int lgrp_id = os::numa_get_group_id();
420 if (lgrp_id != -1) {
421 thread->set_lgrp_id(lgrp_id);
422 }
423 }
424
425
426 // Install a win32 structured exception handler around every thread created
427 // by VM, so VM can genrate error dump when an exception occurred in non-
428 // Java thread (e.g. VM thread).
429 __try {
430 thread->run();
431 } __except(topLevelExceptionFilter(
432 (_EXCEPTION_POINTERS*)_exception_info())) {
433 // Nothing to do.
434 }
435
436 // One less thread is executing
437 // When the VMThread gets here, the main thread may have already exited
438 // which frees the CodeHeap containing the Atomic::add code
439 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
440 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
441 }
442
443 return 0;
444 }
445
446 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) {
447 // Allocate the OSThread object
448 OSThread* osthread = new OSThread(NULL, NULL);
449 if (osthread == NULL) return NULL;
450
451 // Initialize support for Java interrupts
452 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
453 if (interrupt_event == NULL) {
454 delete osthread;
455 return NULL;
456 }
457 osthread->set_interrupt_event(interrupt_event);
458
459 // Store info on the Win32 thread into the OSThread
460 osthread->set_thread_handle(thread_handle);
461 osthread->set_thread_id(thread_id);
462
463 if (UseNUMA) {
464 int lgrp_id = os::numa_get_group_id();
465 if (lgrp_id != -1) {
466 thread->set_lgrp_id(lgrp_id);
467 }
468 }
469
470 // Initial thread state is INITIALIZED, not SUSPENDED
471 osthread->set_state(INITIALIZED);
472
473 return osthread;
474 }
475
476
477 bool os::create_attached_thread(JavaThread* thread) {
478 #ifdef ASSERT
479 thread->verify_not_published();
480 #endif
481 HANDLE thread_h;
482 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
483 &thread_h, THREAD_ALL_ACCESS, false, 0)) {
484 fatal("DuplicateHandle failed\n");
485 }
486 OSThread* osthread = create_os_thread(thread, thread_h,
487 (int)current_thread_id());
488 if (osthread == NULL) {
489 return false;
490 }
491
492 // Initial thread state is RUNNABLE
493 osthread->set_state(RUNNABLE);
494
495 thread->set_osthread(osthread);
496 return true;
497 }
498
499 bool os::create_main_thread(JavaThread* thread) {
500 #ifdef ASSERT
501 thread->verify_not_published();
502 #endif
503 if (_starting_thread == NULL) {
504 _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
505 if (_starting_thread == NULL) {
506 return false;
507 }
508 }
509
510 // The primordial thread is runnable from the start)
511 _starting_thread->set_state(RUNNABLE);
512
513 thread->set_osthread(_starting_thread);
514 return true;
515 }
516
517 // Allocate and initialize a new OSThread
518 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
519 unsigned thread_id;
520
521 // Allocate the OSThread object
522 OSThread* osthread = new OSThread(NULL, NULL);
523 if (osthread == NULL) {
524 return false;
525 }
526
527 // Initialize support for Java interrupts
528 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
529 if (interrupt_event == NULL) {
530 delete osthread;
531 return NULL;
532 }
533 osthread->set_interrupt_event(interrupt_event);
534 osthread->set_interrupted(false);
535
536 thread->set_osthread(osthread);
537
538 if (stack_size == 0) {
539 switch (thr_type) {
540 case os::java_thread:
541 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
542 if (JavaThread::stack_size_at_create() > 0)
543 stack_size = JavaThread::stack_size_at_create();
544 break;
545 case os::compiler_thread:
546 if (CompilerThreadStackSize > 0) {
547 stack_size = (size_t)(CompilerThreadStackSize * K);
548 break;
549 } // else fall through:
550 // use VMThreadStackSize if CompilerThreadStackSize is not defined
551 case os::vm_thread:
552 case os::pgc_thread:
553 case os::cgc_thread:
554 case os::watcher_thread:
555 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
556 break;
557 }
558 }
559
560 // Create the Win32 thread
561 //
562 // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
563 // does not specify stack size. Instead, it specifies the size of
564 // initially committed space. The stack size is determined by
565 // PE header in the executable. If the committed "stack_size" is larger
566 // than default value in the PE header, the stack is rounded up to the
567 // nearest multiple of 1MB. For example if the launcher has default
568 // stack size of 320k, specifying any size less than 320k does not
569 // affect the actual stack size at all, it only affects the initial
570 // commitment. On the other hand, specifying 'stack_size' larger than
571 // default value may cause significant increase in memory usage, because
572 // not only the stack space will be rounded up to MB, but also the
573 // entire space is committed upfront.
574 //
575 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
576 // for CreateThread() that can treat 'stack_size' as stack size. However we
577 // are not supposed to call CreateThread() directly according to MSDN
578 // document because JVM uses C runtime library. The good news is that the
579 // flag appears to work with _beginthredex() as well.
580
581 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION
582 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000)
583 #endif
584
585 HANDLE thread_handle =
586 (HANDLE)_beginthreadex(NULL,
587 (unsigned)stack_size,
588 (unsigned (__stdcall *)(void*)) java_start,
589 thread,
590 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION,
591 &thread_id);
592 if (thread_handle == NULL) {
593 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again
594 // without the flag.
595 thread_handle =
596 (HANDLE)_beginthreadex(NULL,
597 (unsigned)stack_size,
598 (unsigned (__stdcall *)(void*)) java_start,
599 thread,
600 CREATE_SUSPENDED,
601 &thread_id);
602 }
603 if (thread_handle == NULL) {
604 // Need to clean up stuff we've allocated so far
605 CloseHandle(osthread->interrupt_event());
606 thread->set_osthread(NULL);
607 delete osthread;
608 return NULL;
609 }
610
611 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
612
613 // Store info on the Win32 thread into the OSThread
614 osthread->set_thread_handle(thread_handle);
615 osthread->set_thread_id(thread_id);
616
617 // Initial thread state is INITIALIZED, not SUSPENDED
618 osthread->set_state(INITIALIZED);
619
620 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
621 return true;
622 }
623
624
625 // Free Win32 resources related to the OSThread
626 void os::free_thread(OSThread* osthread) {
627 assert(osthread != NULL, "osthread not set");
628 CloseHandle(osthread->thread_handle());
629 CloseHandle(osthread->interrupt_event());
630 delete osthread;
631 }
632
633
634 static int has_performance_count = 0;
635 static jlong first_filetime;
636 static jlong initial_performance_count;
637 static jlong performance_frequency;
638
639
640 jlong as_long(LARGE_INTEGER x) {
641 jlong result = 0; // initialization to avoid warning
642 set_high(&result, x.HighPart);
643 set_low(&result, x.LowPart);
644 return result;
645 }
646
647
648 jlong os::elapsed_counter() {
649 LARGE_INTEGER count;
650 if (has_performance_count) {
651 QueryPerformanceCounter(&count);
652 return as_long(count) - initial_performance_count;
653 } else {
654 FILETIME wt;
655 GetSystemTimeAsFileTime(&wt);
656 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime);
657 }
658 }
659
660
661 jlong os::elapsed_frequency() {
662 if (has_performance_count) {
663 return performance_frequency;
664 } else {
665 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601.
666 return 10000000;
667 }
668 }
669
670
671 julong os::available_memory() {
672 return win32::available_memory();
673 }
674
675 julong os::win32::available_memory() {
676 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
677 // value if total memory is larger than 4GB
678 MEMORYSTATUSEX ms;
679 ms.dwLength = sizeof(ms);
680 GlobalMemoryStatusEx(&ms);
681
682 return (julong)ms.ullAvailPhys;
683 }
684
685 julong os::physical_memory() {
686 return win32::physical_memory();
687 }
688
689 bool os::has_allocatable_memory_limit(julong* limit) {
690 MEMORYSTATUSEX ms;
691 ms.dwLength = sizeof(ms);
692 GlobalMemoryStatusEx(&ms);
693 #ifdef _LP64
694 *limit = (julong)ms.ullAvailVirtual;
695 return true;
696 #else
697 // Limit to 1400m because of the 2gb address space wall
698 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
699 return true;
700 #endif
701 }
702
703 // VC6 lacks DWORD_PTR
704 #if _MSC_VER < 1300
705 typedef UINT_PTR DWORD_PTR;
706 #endif
707
708 int os::active_processor_count() {
709 DWORD_PTR lpProcessAffinityMask = 0;
710 DWORD_PTR lpSystemAffinityMask = 0;
711 int proc_count = processor_count();
712 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
713 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
714 // Nof active processors is number of bits in process affinity mask
715 int bitcount = 0;
716 while (lpProcessAffinityMask != 0) {
717 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
718 bitcount++;
719 }
720 return bitcount;
721 } else {
722 return proc_count;
723 }
724 }
725
726 void os::set_native_thread_name(const char *name) {
727 // Not yet implemented.
728 return;
729 }
730
731 bool os::distribute_processes(uint length, uint* distribution) {
732 // Not yet implemented.
733 return false;
734 }
735
736 bool os::bind_to_processor(uint processor_id) {
737 // Not yet implemented.
738 return false;
739 }
740
741 static void initialize_performance_counter() {
742 LARGE_INTEGER count;
743 if (QueryPerformanceFrequency(&count)) {
744 has_performance_count = 1;
745 performance_frequency = as_long(count);
746 QueryPerformanceCounter(&count);
747 initial_performance_count = as_long(count);
748 } else {
749 has_performance_count = 0;
750 FILETIME wt;
751 GetSystemTimeAsFileTime(&wt);
752 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
753 }
754 }
755
756
757 double os::elapsedTime() {
758 return (double) elapsed_counter() / (double) elapsed_frequency();
759 }
760
761
762 // Windows format:
763 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
764 // Java format:
765 // Java standards require the number of milliseconds since 1/1/1970
766
767 // Constant offset - calculated using offset()
768 static jlong _offset = 116444736000000000;
769 // Fake time counter for reproducible results when debugging
770 static jlong fake_time = 0;
771
772 #ifdef ASSERT
773 // Just to be safe, recalculate the offset in debug mode
774 static jlong _calculated_offset = 0;
775 static int _has_calculated_offset = 0;
776
777 jlong offset() {
778 if (_has_calculated_offset) return _calculated_offset;
779 SYSTEMTIME java_origin;
780 java_origin.wYear = 1970;
781 java_origin.wMonth = 1;
782 java_origin.wDayOfWeek = 0; // ignored
783 java_origin.wDay = 1;
784 java_origin.wHour = 0;
785 java_origin.wMinute = 0;
786 java_origin.wSecond = 0;
787 java_origin.wMilliseconds = 0;
788 FILETIME jot;
789 if (!SystemTimeToFileTime(&java_origin, &jot)) {
790 fatal(err_msg("Error = %d\nWindows error", GetLastError()));
791 }
792 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
793 _has_calculated_offset = 1;
794 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
795 return _calculated_offset;
796 }
797 #else
798 jlong offset() {
799 return _offset;
800 }
801 #endif
802
803 jlong windows_to_java_time(FILETIME wt) {
804 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
805 return (a - offset()) / 10000;
806 }
807
808 FILETIME java_to_windows_time(jlong l) {
809 jlong a = (l * 10000) + offset();
810 FILETIME result;
811 result.dwHighDateTime = high(a);
812 result.dwLowDateTime = low(a);
813 return result;
814 }
815
816 bool os::supports_vtime() { return true; }
817 bool os::enable_vtime() { return false; }
818 bool os::vtime_enabled() { return false; }
819
820 double os::elapsedVTime() {
821 FILETIME created;
822 FILETIME exited;
823 FILETIME kernel;
824 FILETIME user;
825 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
826 // the resolution of windows_to_java_time() should be sufficient (ms)
827 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
828 } else {
829 return elapsedTime();
830 }
831 }
832
833 jlong os::javaTimeMillis() {
834 if (UseFakeTimers) {
835 return fake_time++;
836 } else {
837 FILETIME wt;
838 GetSystemTimeAsFileTime(&wt);
839 return windows_to_java_time(wt);
840 }
841 }
842
843 jlong os::javaTimeNanos() {
844 if (!has_performance_count) {
845 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do.
846 } else {
847 LARGE_INTEGER current_count;
848 QueryPerformanceCounter(&current_count);
849 double current = as_long(current_count);
850 double freq = performance_frequency;
851 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
852 return time;
853 }
854 }
855
856 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
857 if (!has_performance_count) {
858 // javaTimeMillis() doesn't have much percision,
859 // but it is not going to wrap -- so all 64 bits
860 info_ptr->max_value = ALL_64_BITS;
861
862 // this is a wall clock timer, so may skip
863 info_ptr->may_skip_backward = true;
864 info_ptr->may_skip_forward = true;
865 } else {
866 jlong freq = performance_frequency;
867 if (freq < NANOSECS_PER_SEC) {
868 // the performance counter is 64 bits and we will
869 // be multiplying it -- so no wrap in 64 bits
870 info_ptr->max_value = ALL_64_BITS;
871 } else if (freq > NANOSECS_PER_SEC) {
872 // use the max value the counter can reach to
873 // determine the max value which could be returned
874 julong max_counter = (julong)ALL_64_BITS;
875 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
876 } else {
877 // the performance counter is 64 bits and we will
878 // be using it directly -- so no wrap in 64 bits
879 info_ptr->max_value = ALL_64_BITS;
880 }
881
882 // using a counter, so no skipping
883 info_ptr->may_skip_backward = false;
884 info_ptr->may_skip_forward = false;
885 }
886 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
887 }
888
889 char* os::local_time_string(char *buf, size_t buflen) {
890 SYSTEMTIME st;
891 GetLocalTime(&st);
892 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
893 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
894 return buf;
895 }
896
897 bool os::getTimesSecs(double* process_real_time,
898 double* process_user_time,
899 double* process_system_time) {
900 HANDLE h_process = GetCurrentProcess();
901 FILETIME create_time, exit_time, kernel_time, user_time;
902 BOOL result = GetProcessTimes(h_process,
903 &create_time,
904 &exit_time,
905 &kernel_time,
906 &user_time);
907 if (result != 0) {
908 FILETIME wt;
909 GetSystemTimeAsFileTime(&wt);
910 jlong rtc_millis = windows_to_java_time(wt);
911 jlong user_millis = windows_to_java_time(user_time);
912 jlong system_millis = windows_to_java_time(kernel_time);
913 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
914 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS);
915 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS);
916 return true;
917 } else {
918 return false;
919 }
920 }
921
922 void os::shutdown() {
923
924 // allow PerfMemory to attempt cleanup of any persistent resources
925 perfMemory_exit();
926
927 // flush buffered output, finish log files
928 ostream_abort();
929
930 // Check for abort hook
931 abort_hook_t abort_hook = Arguments::abort_hook();
932 if (abort_hook != NULL) {
933 abort_hook();
934 }
935 }
936
937
938 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
939 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION);
940
941 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) {
942 HINSTANCE dbghelp;
943 EXCEPTION_POINTERS ep;
944 MINIDUMP_EXCEPTION_INFORMATION mei;
945 MINIDUMP_EXCEPTION_INFORMATION* pmei;
946
947 HANDLE hProcess = GetCurrentProcess();
948 DWORD processId = GetCurrentProcessId();
949 HANDLE dumpFile;
950 MINIDUMP_TYPE dumpType;
951 static const char* cwd;
952
953 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows.
954 #ifndef ASSERT
955 // If running on a client version of Windows and user has not explicitly enabled dumping
956 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) {
957 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false);
958 return;
959 // If running on a server version of Windows and user has explictly disabled dumping
960 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) {
961 VMError::report_coredump_status("Minidump has been disabled from the command line", false);
962 return;
963 }
964 #else
965 if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) {
966 VMError::report_coredump_status("Minidump has been disabled from the command line", false);
967 return;
968 }
969 #endif
970
971 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
972
973 if (dbghelp == NULL) {
974 VMError::report_coredump_status("Failed to load dbghelp.dll", false);
975 return;
976 }
977
978 _MiniDumpWriteDump = CAST_TO_FN_PTR(
979 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION,
980 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION),
981 GetProcAddress(dbghelp, "MiniDumpWriteDump"));
982
983 if (_MiniDumpWriteDump == NULL) {
984 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false);
985 return;
986 }
987
988 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData);
989
990 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with
991 // API_VERSION_NUMBER 11 or higher contains the ones we want though
992 #if API_VERSION_NUMBER >= 11
993 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo |
994 MiniDumpWithUnloadedModules);
995 #endif
996
997 cwd = get_current_directory(NULL, 0);
998 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp",cwd, current_process_id());
999 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
1000
1001 if (dumpFile == INVALID_HANDLE_VALUE) {
1002 VMError::report_coredump_status("Failed to create file for dumping", false);
1003 return;
1004 }
1005 if (exceptionRecord != NULL && contextRecord != NULL) {
1006 ep.ContextRecord = (PCONTEXT) contextRecord;
1007 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord;
1008
1009 mei.ThreadId = GetCurrentThreadId();
1010 mei.ExceptionPointers = &ep;
1011 pmei = &mei;
1012 } else {
1013 pmei = NULL;
1014 }
1015
1016
1017 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1018 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1019 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1020 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1021 DWORD error = GetLastError();
1022 LPTSTR msgbuf = NULL;
1023
1024 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
1025 FORMAT_MESSAGE_FROM_SYSTEM |
1026 FORMAT_MESSAGE_IGNORE_INSERTS,
1027 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) {
1028
1029 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf);
1030 LocalFree(msgbuf);
1031 } else {
1032 // Call to FormatMessage failed, just include the result from GetLastError
1033 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error);
1034 }
1035 VMError::report_coredump_status(buffer, false);
1036 } else {
1037 VMError::report_coredump_status(buffer, true);
1038 }
1039
1040 CloseHandle(dumpFile);
1041 }
1042
1043
1044
1045 void os::abort(bool dump_core)
1046 {
1047 os::shutdown();
1048 // no core dump on Windows
1049 ::exit(1);
1050 }
1051
1052 // Die immediately, no exit hook, no abort hook, no cleanup.
1053 void os::die() {
1054 _exit(-1);
1055 }
1056
1057 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1058 // * dirent_md.c 1.15 00/02/02
1059 //
1060 // The declarations for DIR and struct dirent are in jvm_win32.h.
1061
1062 /* Caller must have already run dirname through JVM_NativePath, which removes
1063 duplicate slashes and converts all instances of '/' into '\\'. */
1064
1065 DIR *
1066 os::opendir(const char *dirname)
1067 {
1068 assert(dirname != NULL, "just checking"); // hotspot change
1069 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1070 DWORD fattr; // hotspot change
1071 char alt_dirname[4] = { 0, 0, 0, 0 };
1072
1073 if (dirp == 0) {
1074 errno = ENOMEM;
1075 return 0;
1076 }
1077
1078 /*
1079 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1080 * as a directory in FindFirstFile(). We detect this case here and
1081 * prepend the current drive name.
1082 */
1083 if (dirname[1] == '\0' && dirname[0] == '\\') {
1084 alt_dirname[0] = _getdrive() + 'A' - 1;
1085 alt_dirname[1] = ':';
1086 alt_dirname[2] = '\\';
1087 alt_dirname[3] = '\0';
1088 dirname = alt_dirname;
1089 }
1090
1091 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1092 if (dirp->path == 0) {
1093 free(dirp, mtInternal);
1094 errno = ENOMEM;
1095 return 0;
1096 }
1097 strcpy(dirp->path, dirname);
1098
1099 fattr = GetFileAttributes(dirp->path);
1100 if (fattr == 0xffffffff) {
1101 free(dirp->path, mtInternal);
1102 free(dirp, mtInternal);
1103 errno = ENOENT;
1104 return 0;
1105 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1106 free(dirp->path, mtInternal);
1107 free(dirp, mtInternal);
1108 errno = ENOTDIR;
1109 return 0;
1110 }
1111
1112 /* Append "*.*", or possibly "\\*.*", to path */
1113 if (dirp->path[1] == ':'
1114 && (dirp->path[2] == '\0'
1115 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1116 /* No '\\' needed for cases like "Z:" or "Z:\" */
1117 strcat(dirp->path, "*.*");
1118 } else {
1119 strcat(dirp->path, "\\*.*");
1120 }
1121
1122 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1123 if (dirp->handle == INVALID_HANDLE_VALUE) {
1124 if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1125 free(dirp->path, mtInternal);
1126 free(dirp, mtInternal);
1127 errno = EACCES;
1128 return 0;
1129 }
1130 }
1131 return dirp;
1132 }
1133
1134 /* parameter dbuf unused on Windows */
1135
1136 struct dirent *
1137 os::readdir(DIR *dirp, dirent *dbuf)
1138 {
1139 assert(dirp != NULL, "just checking"); // hotspot change
1140 if (dirp->handle == INVALID_HANDLE_VALUE) {
1141 return 0;
1142 }
1143
1144 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1145
1146 if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1147 if (GetLastError() == ERROR_INVALID_HANDLE) {
1148 errno = EBADF;
1149 return 0;
1150 }
1151 FindClose(dirp->handle);
1152 dirp->handle = INVALID_HANDLE_VALUE;
1153 }
1154
1155 return &dirp->dirent;
1156 }
1157
1158 int
1159 os::closedir(DIR *dirp)
1160 {
1161 assert(dirp != NULL, "just checking"); // hotspot change
1162 if (dirp->handle != INVALID_HANDLE_VALUE) {
1163 if (!FindClose(dirp->handle)) {
1164 errno = EBADF;
1165 return -1;
1166 }
1167 dirp->handle = INVALID_HANDLE_VALUE;
1168 }
1169 free(dirp->path, mtInternal);
1170 free(dirp, mtInternal);
1171 return 0;
1172 }
1173
1174 // This must be hard coded because it's the system's temporary
1175 // directory not the java application's temp directory, ala java.io.tmpdir.
1176 const char* os::get_temp_directory() {
1177 static char path_buf[MAX_PATH];
1178 if (GetTempPath(MAX_PATH, path_buf)>0)
1179 return path_buf;
1180 else{
1181 path_buf[0]='\0';
1182 return path_buf;
1183 }
1184 }
1185
1186 static bool file_exists(const char* filename) {
1187 if (filename == NULL || strlen(filename) == 0) {
1188 return false;
1189 }
1190 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1191 }
1192
1193 bool os::dll_build_name(char *buffer, size_t buflen,
1194 const char* pname, const char* fname) {
1195 bool retval = false;
1196 const size_t pnamelen = pname ? strlen(pname) : 0;
1197 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1198
1199 // Return error on buffer overflow.
1200 if (pnamelen + strlen(fname) + 10 > buflen) {
1201 return retval;
1202 }
1203
1204 if (pnamelen == 0) {
1205 jio_snprintf(buffer, buflen, "%s.dll", fname);
1206 retval = true;
1207 } else if (c == ':' || c == '\\') {
1208 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1209 retval = true;
1210 } else if (strchr(pname, *os::path_separator()) != NULL) {
1211 int n;
1212 char** pelements = split_path(pname, &n);
1213 if (pelements == NULL) {
1214 return false;
1215 }
1216 for (int i = 0 ; i < n ; i++) {
1217 char* path = pelements[i];
1218 // Really shouldn't be NULL, but check can't hurt
1219 size_t plen = (path == NULL) ? 0 : strlen(path);
1220 if (plen == 0) {
1221 continue; // skip the empty path values
1222 }
1223 const char lastchar = path[plen - 1];
1224 if (lastchar == ':' || lastchar == '\\') {
1225 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1226 } else {
1227 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1228 }
1229 if (file_exists(buffer)) {
1230 retval = true;
1231 break;
1232 }
1233 }
1234 // release the storage
1235 for (int i = 0 ; i < n ; i++) {
1236 if (pelements[i] != NULL) {
1237 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1238 }
1239 }
1240 if (pelements != NULL) {
1241 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1242 }
1243 } else {
1244 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1245 retval = true;
1246 }
1247 return retval;
1248 }
1249
1250 // Needs to be in os specific directory because windows requires another
1251 // header file <direct.h>
1252 const char* os::get_current_directory(char *buf, size_t buflen) {
1253 int n = static_cast<int>(buflen);
1254 if (buflen > INT_MAX) n = INT_MAX;
1255 return _getcwd(buf, n);
1256 }
1257
1258 //-----------------------------------------------------------
1259 // Helper functions for fatal error handler
1260 #ifdef _WIN64
1261 // Helper routine which returns true if address in
1262 // within the NTDLL address space.
1263 //
1264 static bool _addr_in_ntdll( address addr )
1265 {
1266 HMODULE hmod;
1267 MODULEINFO minfo;
1268
1269 hmod = GetModuleHandle("NTDLL.DLL");
1270 if ( hmod == NULL ) return false;
1271 if ( !os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod,
1272 &minfo, sizeof(MODULEINFO)) )
1273 return false;
1274
1275 if ( (addr >= minfo.lpBaseOfDll) &&
1276 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage)))
1277 return true;
1278 else
1279 return false;
1280 }
1281 #endif
1282
1283
1284 // Enumerate all modules for a given process ID
1285 //
1286 // Notice that Windows 95/98/Me and Windows NT/2000/XP have
1287 // different API for doing this. We use PSAPI.DLL on NT based
1288 // Windows and ToolHelp on 95/98/Me.
1289
1290 // Callback function that is called by enumerate_modules() on
1291 // every DLL module.
1292 // Input parameters:
1293 // int pid,
1294 // char* module_file_name,
1295 // address module_base_addr,
1296 // unsigned module_size,
1297 // void* param
1298 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *);
1299
1300 // enumerate_modules for Windows NT, using PSAPI
1301 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param)
1302 {
1303 HANDLE hProcess ;
1304
1305 # define MAX_NUM_MODULES 128
1306 HMODULE modules[MAX_NUM_MODULES];
1307 static char filename[ MAX_PATH ];
1308 int result = 0;
1309
1310 if (!os::PSApiDll::PSApiAvailable()) {
1311 return 0;
1312 }
1313
1314 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1315 FALSE, pid ) ;
1316 if (hProcess == NULL) return 0;
1317
1318 DWORD size_needed;
1319 if (!os::PSApiDll::EnumProcessModules(hProcess, modules,
1320 sizeof(modules), &size_needed)) {
1321 CloseHandle( hProcess );
1322 return 0;
1323 }
1324
1325 // number of modules that are currently loaded
1326 int num_modules = size_needed / sizeof(HMODULE);
1327
1328 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1329 // Get Full pathname:
1330 if(!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i],
1331 filename, sizeof(filename))) {
1332 filename[0] = '\0';
1333 }
1334
1335 MODULEINFO modinfo;
1336 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i],
1337 &modinfo, sizeof(modinfo))) {
1338 modinfo.lpBaseOfDll = NULL;
1339 modinfo.SizeOfImage = 0;
1340 }
1341
1342 // Invoke callback function
1343 result = func(pid, filename, (address)modinfo.lpBaseOfDll,
1344 modinfo.SizeOfImage, param);
1345 if (result) break;
1346 }
1347
1348 CloseHandle( hProcess ) ;
1349 return result;
1350 }
1351
1352
1353 // enumerate_modules for Windows 95/98/ME, using TOOLHELP
1354 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param)
1355 {
1356 HANDLE hSnapShot ;
1357 static MODULEENTRY32 modentry ;
1358 int result = 0;
1359
1360 if (!os::Kernel32Dll::HelpToolsAvailable()) {
1361 return 0;
1362 }
1363
1364 // Get a handle to a Toolhelp snapshot of the system
1365 hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ;
1366 if( hSnapShot == INVALID_HANDLE_VALUE ) {
1367 return FALSE ;
1368 }
1369
1370 // iterate through all modules
1371 modentry.dwSize = sizeof(MODULEENTRY32) ;
1372 bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0;
1373
1374 while( not_done ) {
1375 // invoke the callback
1376 result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr,
1377 modentry.modBaseSize, param);
1378 if (result) break;
1379
1380 modentry.dwSize = sizeof(MODULEENTRY32) ;
1381 not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0;
1382 }
1383
1384 CloseHandle(hSnapShot);
1385 return result;
1386 }
1387
1388 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param )
1389 {
1390 // Get current process ID if caller doesn't provide it.
1391 if (!pid) pid = os::current_process_id();
1392
1393 if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param);
1394 else return _enumerate_modules_windows(pid, func, param);
1395 }
1396
1397 struct _modinfo {
1398 address addr;
1399 char* full_path; // point to a char buffer
1400 int buflen; // size of the buffer
1401 address base_addr;
1402 };
1403
1404 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr,
1405 unsigned size, void * param) {
1406 struct _modinfo *pmod = (struct _modinfo *)param;
1407 if (!pmod) return -1;
1408
1409 if (base_addr <= pmod->addr &&
1410 base_addr+size > pmod->addr) {
1411 // if a buffer is provided, copy path name to the buffer
1412 if (pmod->full_path) {
1413 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1414 }
1415 pmod->base_addr = base_addr;
1416 return 1;
1417 }
1418 return 0;
1419 }
1420
1421 bool os::dll_address_to_library_name(address addr, char* buf,
1422 int buflen, int* offset) {
1423 // buf is not optional, but offset is optional
1424 assert(buf != NULL, "sanity check");
1425
1426 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1427 // return the full path to the DLL file, sometimes it returns path
1428 // to the corresponding PDB file (debug info); sometimes it only
1429 // returns partial path, which makes life painful.
1430
1431 struct _modinfo mi;
1432 mi.addr = addr;
1433 mi.full_path = buf;
1434 mi.buflen = buflen;
1435 int pid = os::current_process_id();
1436 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) {
1437 // buf already contains path name
1438 if (offset) *offset = addr - mi.base_addr;
1439 return true;
1440 }
1441
1442 buf[0] = '\0';
1443 if (offset) *offset = -1;
1444 return false;
1445 }
1446
1447 bool os::dll_address_to_function_name(address addr, char *buf,
1448 int buflen, int *offset) {
1449 // buf is not optional, but offset is optional
1450 assert(buf != NULL, "sanity check");
1451
1452 if (Decoder::decode(addr, buf, buflen, offset)) {
1453 return true;
1454 }
1455 if (offset != NULL) *offset = -1;
1456 buf[0] = '\0';
1457 return false;
1458 }
1459
1460 // save the start and end address of jvm.dll into param[0] and param[1]
1461 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr,
1462 unsigned size, void * param) {
1463 if (!param) return -1;
1464
1465 if (base_addr <= (address)_locate_jvm_dll &&
1466 base_addr+size > (address)_locate_jvm_dll) {
1467 ((address*)param)[0] = base_addr;
1468 ((address*)param)[1] = base_addr + size;
1469 return 1;
1470 }
1471 return 0;
1472 }
1473
1474 address vm_lib_location[2]; // start and end address of jvm.dll
1475
1476 // check if addr is inside jvm.dll
1477 bool os::address_is_in_vm(address addr) {
1478 if (!vm_lib_location[0] || !vm_lib_location[1]) {
1479 int pid = os::current_process_id();
1480 if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) {
1481 assert(false, "Can't find jvm module.");
1482 return false;
1483 }
1484 }
1485
1486 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1487 }
1488
1489 // print module info; param is outputStream*
1490 static int _print_module(int pid, char* fname, address base,
1491 unsigned size, void* param) {
1492 if (!param) return -1;
1493
1494 outputStream* st = (outputStream*)param;
1495
1496 address end_addr = base + size;
1497 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname);
1498 return 0;
1499 }
1500
1501 // Loads .dll/.so and
1502 // in case of error it checks if .dll/.so was built for the
1503 // same architecture as Hotspot is running on
1504 void * os::dll_load(const char *name, char *ebuf, int ebuflen)
1505 {
1506 void * result = LoadLibrary(name);
1507 if (result != NULL)
1508 {
1509 return result;
1510 }
1511
1512 DWORD errcode = GetLastError();
1513 if (errcode == ERROR_MOD_NOT_FOUND) {
1514 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1);
1515 ebuf[ebuflen-1]='\0';
1516 return NULL;
1517 }
1518
1519 // Parsing dll below
1520 // If we can read dll-info and find that dll was built
1521 // for an architecture other than Hotspot is running in
1522 // - then print to buffer "DLL was built for a different architecture"
1523 // else call os::lasterror to obtain system error message
1524
1525 // Read system error message into ebuf
1526 // It may or may not be overwritten below (in the for loop and just above)
1527 lasterror(ebuf, (size_t) ebuflen);
1528 ebuf[ebuflen-1]='\0';
1529 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0);
1530 if (file_descriptor<0)
1531 {
1532 return NULL;
1533 }
1534
1535 uint32_t signature_offset;
1536 uint16_t lib_arch=0;
1537 bool failed_to_get_lib_arch=
1538 (
1539 //Go to position 3c in the dll
1540 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0)
1541 ||
1542 // Read loacation of signature
1543 (sizeof(signature_offset)!=
1544 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset))))
1545 ||
1546 //Go to COFF File Header in dll
1547 //that is located after"signature" (4 bytes long)
1548 (os::seek_to_file_offset(file_descriptor,
1549 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0)
1550 ||
1551 //Read field that contains code of architecture
1552 // that dll was build for
1553 (sizeof(lib_arch)!=
1554 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch))))
1555 );
1556
1557 ::close(file_descriptor);
1558 if (failed_to_get_lib_arch)
1559 {
1560 // file i/o error - report os::lasterror(...) msg
1561 return NULL;
1562 }
1563
1564 typedef struct
1565 {
1566 uint16_t arch_code;
1567 char* arch_name;
1568 } arch_t;
1569
1570 static const arch_t arch_array[]={
1571 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"},
1572 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"},
1573 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"}
1574 };
1575 #if (defined _M_IA64)
1576 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64;
1577 #elif (defined _M_AMD64)
1578 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64;
1579 #elif (defined _M_IX86)
1580 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386;
1581 #else
1582 #error Method os::dll_load requires that one of following \
1583 is defined :_M_IA64,_M_AMD64 or _M_IX86
1584 #endif
1585
1586
1587 // Obtain a string for printf operation
1588 // lib_arch_str shall contain string what platform this .dll was built for
1589 // running_arch_str shall string contain what platform Hotspot was built for
1590 char *running_arch_str=NULL,*lib_arch_str=NULL;
1591 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++)
1592 {
1593 if (lib_arch==arch_array[i].arch_code)
1594 lib_arch_str=arch_array[i].arch_name;
1595 if (running_arch==arch_array[i].arch_code)
1596 running_arch_str=arch_array[i].arch_name;
1597 }
1598
1599 assert(running_arch_str,
1600 "Didn't find runing architecture code in arch_array");
1601
1602 // If the architure is right
1603 // but some other error took place - report os::lasterror(...) msg
1604 if (lib_arch == running_arch)
1605 {
1606 return NULL;
1607 }
1608
1609 if (lib_arch_str!=NULL)
1610 {
1611 ::_snprintf(ebuf, ebuflen-1,
1612 "Can't load %s-bit .dll on a %s-bit platform",
1613 lib_arch_str,running_arch_str);
1614 }
1615 else
1616 {
1617 // don't know what architecture this dll was build for
1618 ::_snprintf(ebuf, ebuflen-1,
1619 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1620 lib_arch,running_arch_str);
1621 }
1622
1623 return NULL;
1624 }
1625
1626
1627 void os::print_dll_info(outputStream *st) {
1628 int pid = os::current_process_id();
1629 st->print_cr("Dynamic libraries:");
1630 enumerate_modules(pid, _print_module, (void *)st);
1631 }
1632
1633 void os::print_os_info_brief(outputStream* st) {
1634 os::print_os_info(st);
1635 }
1636
1637 void os::print_os_info(outputStream* st) {
1638 st->print("OS:");
1639
1640 os::win32::print_windows_version(st);
1641 }
1642
1643 void os::win32::print_windows_version(outputStream* st) {
1644 OSVERSIONINFOEX osvi;
1645 SYSTEM_INFO si;
1646
1647 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1648 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1649
1650 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1651 st->print_cr("N/A");
1652 return;
1653 }
1654
1655 int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;
1656
1657 ZeroMemory(&si, sizeof(SYSTEM_INFO));
1658 if (os_vers >= 5002) {
1659 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1660 // find out whether we are running on 64 bit processor or not.
1661 if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) {
1662 os::Kernel32Dll::GetNativeSystemInfo(&si);
1663 } else {
1664 GetSystemInfo(&si);
1665 }
1666 }
1667
1668 if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
1669 switch (os_vers) {
1670 case 3051: st->print(" Windows NT 3.51"); break;
1671 case 4000: st->print(" Windows NT 4.0"); break;
1672 case 5000: st->print(" Windows 2000"); break;
1673 case 5001: st->print(" Windows XP"); break;
1674 case 5002:
1675 if (osvi.wProductType == VER_NT_WORKSTATION &&
1676 si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1677 st->print(" Windows XP x64 Edition");
1678 } else {
1679 st->print(" Windows Server 2003 family");
1680 }
1681 break;
1682
1683 case 6000:
1684 if (osvi.wProductType == VER_NT_WORKSTATION) {
1685 st->print(" Windows Vista");
1686 } else {
1687 st->print(" Windows Server 2008");
1688 }
1689 break;
1690
1691 case 6001:
1692 if (osvi.wProductType == VER_NT_WORKSTATION) {
1693 st->print(" Windows 7");
1694 } else {
1695 st->print(" Windows Server 2008 R2");
1696 }
1697 break;
1698
1699 case 6002:
1700 if (osvi.wProductType == VER_NT_WORKSTATION) {
1701 st->print(" Windows 8");
1702 } else {
1703 st->print(" Windows Server 2012");
1704 }
1705 break;
1706
1707 case 6003:
1708 if (osvi.wProductType == VER_NT_WORKSTATION) {
1709 st->print(" Windows 8.1");
1710 } else {
1711 st->print(" Windows Server 2012 R2");
1712 }
1713 break;
1714
1715 default: // future os
1716 // Unrecognized windows, print out its major and minor versions
1717 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
1718 }
1719 } else {
1720 switch (os_vers) {
1721 case 4000: st->print(" Windows 95"); break;
1722 case 4010: st->print(" Windows 98"); break;
1723 case 4090: st->print(" Windows Me"); break;
1724 default: // future windows, print out its major and minor versions
1725 st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
1726 }
1727 }
1728
1729 if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1730 st->print(" , 64 bit");
1731 }
1732
1733 st->print(" Build %d", osvi.dwBuildNumber);
1734 st->print(" %s", osvi.szCSDVersion); // service pack
1735 st->cr();
1736 }
1737
1738 void os::pd_print_cpu_info(outputStream* st) {
1739 // Nothing to do for now.
1740 }
1741
1742 void os::print_memory_info(outputStream* st) {
1743 st->print("Memory:");
1744 st->print(" %dk page", os::vm_page_size()>>10);
1745
1746 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1747 // value if total memory is larger than 4GB
1748 MEMORYSTATUSEX ms;
1749 ms.dwLength = sizeof(ms);
1750 GlobalMemoryStatusEx(&ms);
1751
1752 st->print(", physical %uk", os::physical_memory() >> 10);
1753 st->print("(%uk free)", os::available_memory() >> 10);
1754
1755 st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1756 st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1757 st->cr();
1758 }
1759
1760 void os::print_siginfo(outputStream *st, void *siginfo) {
1761 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo;
1762 st->print("siginfo:");
1763 st->print(" ExceptionCode=0x%x", er->ExceptionCode);
1764
1765 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
1766 er->NumberParameters >= 2) {
1767 switch (er->ExceptionInformation[0]) {
1768 case 0: st->print(", reading address"); break;
1769 case 1: st->print(", writing address"); break;
1770 default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1771 er->ExceptionInformation[0]);
1772 }
1773 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1774 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR &&
1775 er->NumberParameters >= 2 && UseSharedSpaces) {
1776 FileMapInfo* mapinfo = FileMapInfo::current_info();
1777 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) {
1778 st->print("\n\nError accessing class data sharing archive." \
1779 " Mapped file inaccessible during execution, " \
1780 " possible disk/network problem.");
1781 }
1782 } else {
1783 int num = er->NumberParameters;
1784 if (num > 0) {
1785 st->print(", ExceptionInformation=");
1786 for (int i = 0; i < num; i++) {
1787 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1788 }
1789 }
1790 }
1791 st->cr();
1792 }
1793
1794 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1795 // do nothing
1796 }
1797
1798 static char saved_jvm_path[MAX_PATH] = {0};
1799
1800 // Find the full path to the current module, jvm.dll
1801 void os::jvm_path(char *buf, jint buflen) {
1802 // Error checking.
1803 if (buflen < MAX_PATH) {
1804 assert(false, "must use a large-enough buffer");
1805 buf[0] = '\0';
1806 return;
1807 }
1808 // Lazy resolve the path to current module.
1809 if (saved_jvm_path[0] != 0) {
1810 strcpy(buf, saved_jvm_path);
1811 return;
1812 }
1813
1814 buf[0] = '\0';
1815 if (Arguments::created_by_gamma_launcher()) {
1816 // Support for the gamma launcher. Check for an
1817 // JAVA_HOME environment variable
1818 // and fix up the path so it looks like
1819 // libjvm.so is installed there (append a fake suffix
1820 // hotspot/libjvm.so).
1821 char* java_home_var = ::getenv("JAVA_HOME");
1822 if (java_home_var != NULL && java_home_var[0] != 0 &&
1823 strlen(java_home_var) < (size_t)buflen) {
1824
1825 strncpy(buf, java_home_var, buflen);
1826
1827 // determine if this is a legacy image or modules image
1828 // modules image doesn't have "jre" subdirectory
1829 size_t len = strlen(buf);
1830 char* jrebin_p = buf + len;
1831 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1832 if (0 != _access(buf, 0)) {
1833 jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1834 }
1835 len = strlen(buf);
1836 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1837 }
1838 }
1839
1840 if(buf[0] == '\0') {
1841 GetModuleFileName(vm_lib_handle, buf, buflen);
1842 }
1843 strncpy(saved_jvm_path, buf, MAX_PATH);
1844 }
1845
1846
1847 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1848 #ifndef _WIN64
1849 st->print("_");
1850 #endif
1851 }
1852
1853
1854 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1855 #ifndef _WIN64
1856 st->print("@%d", args_size * sizeof(int));
1857 #endif
1858 }
1859
1860 // This method is a copy of JDK's sysGetLastErrorString
1861 // from src/windows/hpi/src/system_md.c
1862
1863 size_t os::lasterror(char* buf, size_t len) {
1864 DWORD errval;
1865
1866 if ((errval = GetLastError()) != 0) {
1867 // DOS error
1868 size_t n = (size_t)FormatMessage(
1869 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1870 NULL,
1871 errval,
1872 0,
1873 buf,
1874 (DWORD)len,
1875 NULL);
1876 if (n > 3) {
1877 // Drop final '.', CR, LF
1878 if (buf[n - 1] == '\n') n--;
1879 if (buf[n - 1] == '\r') n--;
1880 if (buf[n - 1] == '.') n--;
1881 buf[n] = '\0';
1882 }
1883 return n;
1884 }
1885
1886 if (errno != 0) {
1887 // C runtime error that has no corresponding DOS error code
1888 const char* s = strerror(errno);
1889 size_t n = strlen(s);
1890 if (n >= len) n = len - 1;
1891 strncpy(buf, s, n);
1892 buf[n] = '\0';
1893 return n;
1894 }
1895
1896 return 0;
1897 }
1898
1899 int os::get_last_error() {
1900 DWORD error = GetLastError();
1901 if (error == 0)
1902 error = errno;
1903 return (int)error;
1904 }
1905
1906 // sun.misc.Signal
1907 // NOTE that this is a workaround for an apparent kernel bug where if
1908 // a signal handler for SIGBREAK is installed then that signal handler
1909 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1910 // See bug 4416763.
1911 static void (*sigbreakHandler)(int) = NULL;
1912
1913 static void UserHandler(int sig, void *siginfo, void *context) {
1914 os::signal_notify(sig);
1915 // We need to reinstate the signal handler each time...
1916 os::signal(sig, (void*)UserHandler);
1917 }
1918
1919 void* os::user_handler() {
1920 return (void*) UserHandler;
1921 }
1922
1923 void* os::signal(int signal_number, void* handler) {
1924 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1925 void (*oldHandler)(int) = sigbreakHandler;
1926 sigbreakHandler = (void (*)(int)) handler;
1927 return (void*) oldHandler;
1928 } else {
1929 return (void*)::signal(signal_number, (void (*)(int))handler);
1930 }
1931 }
1932
1933 void os::signal_raise(int signal_number) {
1934 raise(signal_number);
1935 }
1936
1937 // The Win32 C runtime library maps all console control events other than ^C
1938 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
1939 // logoff, and shutdown events. We therefore install our own console handler
1940 // that raises SIGTERM for the latter cases.
1941 //
1942 static BOOL WINAPI consoleHandler(DWORD event) {
1943 switch(event) {
1944 case CTRL_C_EVENT:
1945 if (is_error_reported()) {
1946 // Ctrl-C is pressed during error reporting, likely because the error
1947 // handler fails to abort. Let VM die immediately.
1948 os::die();
1949 }
1950
1951 os::signal_raise(SIGINT);
1952 return TRUE;
1953 break;
1954 case CTRL_BREAK_EVENT:
1955 if (sigbreakHandler != NULL) {
1956 (*sigbreakHandler)(SIGBREAK);
1957 }
1958 return TRUE;
1959 break;
1960 case CTRL_LOGOFF_EVENT: {
1961 // Don't terminate JVM if it is running in a non-interactive session,
1962 // such as a service process.
1963 USEROBJECTFLAGS flags;
1964 HANDLE handle = GetProcessWindowStation();
1965 if (handle != NULL &&
1966 GetUserObjectInformation(handle, UOI_FLAGS, &flags,
1967 sizeof( USEROBJECTFLAGS), NULL)) {
1968 // If it is a non-interactive session, let next handler to deal
1969 // with it.
1970 if ((flags.dwFlags & WSF_VISIBLE) == 0) {
1971 return FALSE;
1972 }
1973 }
1974 }
1975 case CTRL_CLOSE_EVENT:
1976 case CTRL_SHUTDOWN_EVENT:
1977 os::signal_raise(SIGTERM);
1978 return TRUE;
1979 break;
1980 default:
1981 break;
1982 }
1983 return FALSE;
1984 }
1985
1986 /*
1987 * The following code is moved from os.cpp for making this
1988 * code platform specific, which it is by its very nature.
1989 */
1990
1991 // Return maximum OS signal used + 1 for internal use only
1992 // Used as exit signal for signal_thread
1993 int os::sigexitnum_pd(){
1994 return NSIG;
1995 }
1996
1997 // a counter for each possible signal value, including signal_thread exit signal
1998 static volatile jint pending_signals[NSIG+1] = { 0 };
1999 static HANDLE sig_sem = NULL;
2000
2001 void os::signal_init_pd() {
2002 // Initialize signal structures
2003 memset((void*)pending_signals, 0, sizeof(pending_signals));
2004
2005 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2006
2007 // Programs embedding the VM do not want it to attempt to receive
2008 // events like CTRL_LOGOFF_EVENT, which are used to implement the
2009 // shutdown hooks mechanism introduced in 1.3. For example, when
2010 // the VM is run as part of a Windows NT service (i.e., a servlet
2011 // engine in a web server), the correct behavior is for any console
2012 // control handler to return FALSE, not TRUE, because the OS's
2013 // "final" handler for such events allows the process to continue if
2014 // it is a service (while terminating it if it is not a service).
2015 // To make this behavior uniform and the mechanism simpler, we
2016 // completely disable the VM's usage of these console events if -Xrs
2017 // (=ReduceSignalUsage) is specified. This means, for example, that
2018 // the CTRL-BREAK thread dump mechanism is also disabled in this
2019 // case. See bugs 4323062, 4345157, and related bugs.
2020
2021 if (!ReduceSignalUsage) {
2022 // Add a CTRL-C handler
2023 SetConsoleCtrlHandler(consoleHandler, TRUE);
2024 }
2025 }
2026
2027 void os::signal_notify(int signal_number) {
2028 BOOL ret;
2029 if (sig_sem != NULL) {
2030 Atomic::inc(&pending_signals[signal_number]);
2031 ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2032 assert(ret != 0, "ReleaseSemaphore() failed");
2033 }
2034 }
2035
2036 static int check_pending_signals(bool wait_for_signal) {
2037 DWORD ret;
2038 while (true) {
2039 for (int i = 0; i < NSIG + 1; i++) {
2040 jint n = pending_signals[i];
2041 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2042 return i;
2043 }
2044 }
2045 if (!wait_for_signal) {
2046 return -1;
2047 }
2048
2049 JavaThread *thread = JavaThread::current();
2050
2051 ThreadBlockInVM tbivm(thread);
2052
2053 bool threadIsSuspended;
2054 do {
2055 thread->set_suspend_equivalent();
2056 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2057 ret = ::WaitForSingleObject(sig_sem, INFINITE);
2058 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2059
2060 // were we externally suspended while we were waiting?
2061 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2062 if (threadIsSuspended) {
2063 //
2064 // The semaphore has been incremented, but while we were waiting
2065 // another thread suspended us. We don't want to continue running
2066 // while suspended because that would surprise the thread that
2067 // suspended us.
2068 //
2069 ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2070 assert(ret != 0, "ReleaseSemaphore() failed");
2071
2072 thread->java_suspend_self();
2073 }
2074 } while (threadIsSuspended);
2075 }
2076 }
2077
2078 int os::signal_lookup() {
2079 return check_pending_signals(false);
2080 }
2081
2082 int os::signal_wait() {
2083 return check_pending_signals(true);
2084 }
2085
2086 // Implicit OS exception handling
2087
2088 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) {
2089 JavaThread* thread = JavaThread::current();
2090 // Save pc in thread
2091 #ifdef _M_IA64
2092 // Do not blow up if no thread info available.
2093 if (thread) {
2094 // Saving PRECISE pc (with slot information) in thread.
2095 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2096 // Convert precise PC into "Unix" format
2097 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2098 thread->set_saved_exception_pc((address)precise_pc);
2099 }
2100 // Set pc to handler
2101 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2102 // Clear out psr.ri (= Restart Instruction) in order to continue
2103 // at the beginning of the target bundle.
2104 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2105 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2106 #elif _M_AMD64
2107 // Do not blow up if no thread info available.
2108 if (thread) {
2109 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2110 }
2111 // Set pc to handler
2112 exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2113 #else
2114 // Do not blow up if no thread info available.
2115 if (thread) {
2116 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2117 }
2118 // Set pc to handler
2119 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2120 #endif
2121
2122 // Continue the execution
2123 return EXCEPTION_CONTINUE_EXECUTION;
2124 }
2125
2126
2127 // Used for PostMortemDump
2128 extern "C" void safepoints();
2129 extern "C" void find(int x);
2130 extern "C" void events();
2131
2132 // According to Windows API documentation, an illegal instruction sequence should generate
2133 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2134 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2135 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2136
2137 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2138
2139 // From "Execution Protection in the Windows Operating System" draft 0.35
2140 // Once a system header becomes available, the "real" define should be
2141 // included or copied here.
2142 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2143
2144 // Handle NAT Bit consumption on IA64.
2145 #ifdef _M_IA64
2146 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION
2147 #endif
2148
2149 // Windows Vista/2008 heap corruption check
2150 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374
2151
2152 #define def_excpt(val) #val, val
2153
2154 struct siglabel {
2155 char *name;
2156 int number;
2157 };
2158
2159 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2160 // C++ compiler contain this error code. Because this is a compiler-generated
2161 // error, the code is not listed in the Win32 API header files.
2162 // The code is actually a cryptic mnemonic device, with the initial "E"
2163 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2164 // ASCII values of "msc".
2165
2166 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363
2167
2168
2169 struct siglabel exceptlabels[] = {
2170 def_excpt(EXCEPTION_ACCESS_VIOLATION),
2171 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2172 def_excpt(EXCEPTION_BREAKPOINT),
2173 def_excpt(EXCEPTION_SINGLE_STEP),
2174 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2175 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2176 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2177 def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2178 def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2179 def_excpt(EXCEPTION_FLT_OVERFLOW),
2180 def_excpt(EXCEPTION_FLT_STACK_CHECK),
2181 def_excpt(EXCEPTION_FLT_UNDERFLOW),
2182 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2183 def_excpt(EXCEPTION_INT_OVERFLOW),
2184 def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2185 def_excpt(EXCEPTION_IN_PAGE_ERROR),
2186 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2187 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2188 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2189 def_excpt(EXCEPTION_STACK_OVERFLOW),
2190 def_excpt(EXCEPTION_INVALID_DISPOSITION),
2191 def_excpt(EXCEPTION_GUARD_PAGE),
2192 def_excpt(EXCEPTION_INVALID_HANDLE),
2193 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2194 def_excpt(EXCEPTION_HEAP_CORRUPTION),
2195 #ifdef _M_IA64
2196 def_excpt(EXCEPTION_REG_NAT_CONSUMPTION),
2197 #endif
2198 NULL, 0
2199 };
2200
2201 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2202 for (int i = 0; exceptlabels[i].name != NULL; i++) {
2203 if (exceptlabels[i].number == exception_code) {
2204 jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2205 return buf;
2206 }
2207 }
2208
2209 return NULL;
2210 }
2211
2212 //-----------------------------------------------------------------------------
2213 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2214 // handle exception caused by idiv; should only happen for -MinInt/-1
2215 // (division by zero is handled explicitly)
2216 #ifdef _M_IA64
2217 assert(0, "Fix Handle_IDiv_Exception");
2218 #elif _M_AMD64
2219 PCONTEXT ctx = exceptionInfo->ContextRecord;
2220 address pc = (address)ctx->Rip;
2221 assert(pc[0] == 0xF7, "not an idiv opcode");
2222 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2223 assert(ctx->Rax == min_jint, "unexpected idiv exception");
2224 // set correct result values and continue after idiv instruction
2225 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes
2226 ctx->Rax = (DWORD)min_jint; // result
2227 ctx->Rdx = (DWORD)0; // remainder
2228 // Continue the execution
2229 #else
2230 PCONTEXT ctx = exceptionInfo->ContextRecord;
2231 address pc = (address)ctx->Eip;
2232 assert(pc[0] == 0xF7, "not an idiv opcode");
2233 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2234 assert(ctx->Eax == min_jint, "unexpected idiv exception");
2235 // set correct result values and continue after idiv instruction
2236 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes
2237 ctx->Eax = (DWORD)min_jint; // result
2238 ctx->Edx = (DWORD)0; // remainder
2239 // Continue the execution
2240 #endif
2241 return EXCEPTION_CONTINUE_EXECUTION;
2242 }
2243
2244 #ifndef _WIN64
2245 //-----------------------------------------------------------------------------
2246 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2247 // handle exception caused by native method modifying control word
2248 PCONTEXT ctx = exceptionInfo->ContextRecord;
2249 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2250
2251 switch (exception_code) {
2252 case EXCEPTION_FLT_DENORMAL_OPERAND:
2253 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2254 case EXCEPTION_FLT_INEXACT_RESULT:
2255 case EXCEPTION_FLT_INVALID_OPERATION:
2256 case EXCEPTION_FLT_OVERFLOW:
2257 case EXCEPTION_FLT_STACK_CHECK:
2258 case EXCEPTION_FLT_UNDERFLOW:
2259 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2260 if (fp_control_word != ctx->FloatSave.ControlWord) {
2261 // Restore FPCW and mask out FLT exceptions
2262 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2263 // Mask out pending FLT exceptions
2264 ctx->FloatSave.StatusWord &= 0xffffff00;
2265 return EXCEPTION_CONTINUE_EXECUTION;
2266 }
2267 }
2268
2269 if (prev_uef_handler != NULL) {
2270 // We didn't handle this exception so pass it to the previous
2271 // UnhandledExceptionFilter.
2272 return (prev_uef_handler)(exceptionInfo);
2273 }
2274
2275 return EXCEPTION_CONTINUE_SEARCH;
2276 }
2277 #else //_WIN64
2278 /*
2279 On Windows, the mxcsr control bits are non-volatile across calls
2280 See also CR 6192333
2281 If EXCEPTION_FLT_* happened after some native method modified
2282 mxcsr - it is not a jvm fault.
2283 However should we decide to restore of mxcsr after a faulty
2284 native method we can uncomment following code
2285 jint MxCsr = INITIAL_MXCSR;
2286 // we can't use StubRoutines::addr_mxcsr_std()
2287 // because in Win64 mxcsr is not saved there
2288 if (MxCsr != ctx->MxCsr) {
2289 ctx->MxCsr = MxCsr;
2290 return EXCEPTION_CONTINUE_EXECUTION;
2291 }
2292
2293 */
2294 #endif // _WIN64
2295
2296
2297 static inline void report_error(Thread* t, DWORD exception_code,
2298 address addr, void* siginfo, void* context) {
2299 VMError err(t, exception_code, addr, siginfo, context);
2300 err.report_and_die();
2301
2302 // If UseOsErrorReporting, this will return here and save the error file
2303 // somewhere where we can find it in the minidump.
2304 }
2305
2306 //-----------------------------------------------------------------------------
2307 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2308 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2309 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2310 #ifdef _M_IA64
2311 // On Itanium, we need the "precise pc", which has the slot number coded
2312 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2313 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2314 // Convert the pc to "Unix format", which has the slot number coded
2315 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2316 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2317 // information is saved in the Unix format.
2318 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2319 #elif _M_AMD64
2320 address pc = (address) exceptionInfo->ContextRecord->Rip;
2321 #else
2322 address pc = (address) exceptionInfo->ContextRecord->Eip;
2323 #endif
2324 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady
2325
2326 // Handle SafeFetch32 and SafeFetchN exceptions.
2327 if (StubRoutines::is_safefetch_fault(pc)) {
2328 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2329 }
2330
2331 #ifndef _WIN64
2332 // Execution protection violation - win32 running on AMD64 only
2333 // Handled first to avoid misdiagnosis as a "normal" access violation;
2334 // This is safe to do because we have a new/unique ExceptionInformation
2335 // code for this condition.
2336 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2337 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2338 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2339 address addr = (address) exceptionRecord->ExceptionInformation[1];
2340
2341 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2342 int page_size = os::vm_page_size();
2343
2344 // Make sure the pc and the faulting address are sane.
2345 //
2346 // If an instruction spans a page boundary, and the page containing
2347 // the beginning of the instruction is executable but the following
2348 // page is not, the pc and the faulting address might be slightly
2349 // different - we still want to unguard the 2nd page in this case.
2350 //
2351 // 15 bytes seems to be a (very) safe value for max instruction size.
2352 bool pc_is_near_addr =
2353 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2354 bool instr_spans_page_boundary =
2355 (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2356 (intptr_t) page_size) > 0);
2357
2358 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2359 static volatile address last_addr =
2360 (address) os::non_memory_address_word();
2361
2362 // In conservative mode, don't unguard unless the address is in the VM
2363 if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2364 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2365
2366 // Set memory to RWX and retry
2367 address page_start =
2368 (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2369 bool res = os::protect_memory((char*) page_start, page_size,
2370 os::MEM_PROT_RWX);
2371
2372 if (PrintMiscellaneous && Verbose) {
2373 char buf[256];
2374 jio_snprintf(buf, sizeof(buf), "Execution protection violation "
2375 "at " INTPTR_FORMAT
2376 ", unguarding " INTPTR_FORMAT ": %s", addr,
2377 page_start, (res ? "success" : strerror(errno)));
2378 tty->print_raw_cr(buf);
2379 }
2380
2381 // Set last_addr so if we fault again at the same address, we don't
2382 // end up in an endless loop.
2383 //
2384 // There are two potential complications here. Two threads trapping
2385 // at the same address at the same time could cause one of the
2386 // threads to think it already unguarded, and abort the VM. Likely
2387 // very rare.
2388 //
2389 // The other race involves two threads alternately trapping at
2390 // different addresses and failing to unguard the page, resulting in
2391 // an endless loop. This condition is probably even more unlikely
2392 // than the first.
2393 //
2394 // Although both cases could be avoided by using locks or thread
2395 // local last_addr, these solutions are unnecessary complication:
2396 // this handler is a best-effort safety net, not a complete solution.
2397 // It is disabled by default and should only be used as a workaround
2398 // in case we missed any no-execute-unsafe VM code.
2399
2400 last_addr = addr;
2401
2402 return EXCEPTION_CONTINUE_EXECUTION;
2403 }
2404 }
2405
2406 // Last unguard failed or not unguarding
2407 tty->print_raw_cr("Execution protection violation");
2408 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2409 exceptionInfo->ContextRecord);
2410 return EXCEPTION_CONTINUE_SEARCH;
2411 }
2412 }
2413 #endif // _WIN64
2414
2415 // Check to see if we caught the safepoint code in the
2416 // process of write protecting the memory serialization page.
2417 // It write enables the page immediately after protecting it
2418 // so just return.
2419 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
2420 JavaThread* thread = (JavaThread*) t;
2421 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2422 address addr = (address) exceptionRecord->ExceptionInformation[1];
2423 if ( os::is_memory_serialize_page(thread, addr) ) {
2424 // Block current thread until the memory serialize page permission restored.
2425 os::block_on_serialize_page_trap();
2426 return EXCEPTION_CONTINUE_EXECUTION;
2427 }
2428 }
2429
2430 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2431 VM_Version::is_cpuinfo_segv_addr(pc)) {
2432 // Verify that OS save/restore AVX registers.
2433 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2434 }
2435
2436 if (t != NULL && t->is_Java_thread()) {
2437 JavaThread* thread = (JavaThread*) t;
2438 bool in_java = thread->thread_state() == _thread_in_Java;
2439
2440 // Handle potential stack overflows up front.
2441 if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2442 if (os::uses_stack_guard_pages()) {
2443 #ifdef _M_IA64
2444 // Use guard page for register stack.
2445 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2446 address addr = (address) exceptionRecord->ExceptionInformation[1];
2447 // Check for a register stack overflow on Itanium
2448 if (thread->addr_inside_register_stack_red_zone(addr)) {
2449 // Fatal red zone violation happens if the Java program
2450 // catches a StackOverflow error and does so much processing
2451 // that it runs beyond the unprotected yellow guard zone. As
2452 // a result, we are out of here.
2453 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2454 } else if(thread->addr_inside_register_stack(addr)) {
2455 // Disable the yellow zone which sets the state that
2456 // we've got a stack overflow problem.
2457 if (thread->stack_yellow_zone_enabled()) {
2458 thread->disable_stack_yellow_zone();
2459 }
2460 // Give us some room to process the exception.
2461 thread->disable_register_stack_guard();
2462 // Tracing with +Verbose.
2463 if (Verbose) {
2464 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2465 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2466 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2467 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2468 thread->register_stack_base(),
2469 thread->register_stack_base() + thread->stack_size());
2470 }
2471
2472 // Reguard the permanent register stack red zone just to be sure.
2473 // We saw Windows silently disabling this without telling us.
2474 thread->enable_register_stack_red_zone();
2475
2476 return Handle_Exception(exceptionInfo,
2477 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2478 }
2479 #endif
2480 if (thread->stack_yellow_zone_enabled()) {
2481 // Yellow zone violation. The o/s has unprotected the first yellow
2482 // zone page for us. Note: must call disable_stack_yellow_zone to
2483 // update the enabled status, even if the zone contains only one page.
2484 thread->disable_stack_yellow_zone();
2485 // If not in java code, return and hope for the best.
2486 return in_java ? Handle_Exception(exceptionInfo,
2487 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2488 : EXCEPTION_CONTINUE_EXECUTION;
2489 } else {
2490 // Fatal red zone violation.
2491 thread->disable_stack_red_zone();
2492 tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2493 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2494 exceptionInfo->ContextRecord);
2495 return EXCEPTION_CONTINUE_SEARCH;
2496 }
2497 } else if (in_java) {
2498 // JVM-managed guard pages cannot be used on win95/98. The o/s provides
2499 // a one-time-only guard page, which it has released to us. The next
2500 // stack overflow on this thread will result in an ACCESS_VIOLATION.
2501 return Handle_Exception(exceptionInfo,
2502 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2503 } else {
2504 // Can only return and hope for the best. Further stack growth will
2505 // result in an ACCESS_VIOLATION.
2506 return EXCEPTION_CONTINUE_EXECUTION;
2507 }
2508 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2509 // Either stack overflow or null pointer exception.
2510 if (in_java) {
2511 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2512 address addr = (address) exceptionRecord->ExceptionInformation[1];
2513 address stack_end = thread->stack_base() - thread->stack_size();
2514 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2515 // Stack overflow.
2516 assert(!os::uses_stack_guard_pages(),
2517 "should be caught by red zone code above.");
2518 return Handle_Exception(exceptionInfo,
2519 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2520 }
2521 //
2522 // Check for safepoint polling and implicit null
2523 // We only expect null pointers in the stubs (vtable)
2524 // the rest are checked explicitly now.
2525 //
2526 CodeBlob* cb = CodeCache::find_blob(pc);
2527 if (cb != NULL) {
2528 if (os::is_poll_address(addr)) {
2529 address stub = SharedRuntime::get_poll_stub(pc);
2530 return Handle_Exception(exceptionInfo, stub);
2531 }
2532 }
2533 {
2534 #ifdef _WIN64
2535 //
2536 // If it's a legal stack address map the entire region in
2537 //
2538 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2539 address addr = (address) exceptionRecord->ExceptionInformation[1];
2540 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) {
2541 addr = (address)((uintptr_t)addr &
2542 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2543 os::commit_memory((char *)addr, thread->stack_base() - addr,
2544 !ExecMem);
2545 return EXCEPTION_CONTINUE_EXECUTION;
2546 }
2547 else
2548 #endif
2549 {
2550 // Null pointer exception.
2551 #ifdef _M_IA64
2552 // Process implicit null checks in compiled code. Note: Implicit null checks
2553 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2554 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2555 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2556 // Handle implicit null check in UEP method entry
2557 if (cb && (cb->is_frame_complete_at(pc) ||
2558 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2559 if (Verbose) {
2560 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2561 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2562 tty->print_cr(" to addr " INTPTR_FORMAT, addr);
2563 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2564 *(bundle_start + 1), *bundle_start);
2565 }
2566 return Handle_Exception(exceptionInfo,
2567 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2568 }
2569 }
2570
2571 // Implicit null checks were processed above. Hence, we should not reach
2572 // here in the usual case => die!
2573 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2574 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2575 exceptionInfo->ContextRecord);
2576 return EXCEPTION_CONTINUE_SEARCH;
2577
2578 #else // !IA64
2579
2580 // Windows 98 reports faulting addresses incorrectly
2581 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) ||
2582 !os::win32::is_nt()) {
2583 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2584 if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2585 }
2586 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2587 exceptionInfo->ContextRecord);
2588 return EXCEPTION_CONTINUE_SEARCH;
2589 #endif
2590 }
2591 }
2592 }
2593
2594 #ifdef _WIN64
2595 // Special care for fast JNI field accessors.
2596 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2597 // in and the heap gets shrunk before the field access.
2598 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2599 address addr = JNI_FastGetField::find_slowcase_pc(pc);
2600 if (addr != (address)-1) {
2601 return Handle_Exception(exceptionInfo, addr);
2602 }
2603 }
2604 #endif
2605
2606 // Stack overflow or null pointer exception in native code.
2607 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2608 exceptionInfo->ContextRecord);
2609 return EXCEPTION_CONTINUE_SEARCH;
2610 } // /EXCEPTION_ACCESS_VIOLATION
2611 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2612 #if defined _M_IA64
2613 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2614 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2615 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2616
2617 // Compiled method patched to be non entrant? Following conditions must apply:
2618 // 1. must be first instruction in bundle
2619 // 2. must be a break instruction with appropriate code
2620 if((((uint64_t) pc & 0x0F) == 0) &&
2621 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2622 return Handle_Exception(exceptionInfo,
2623 (address)SharedRuntime::get_handle_wrong_method_stub());
2624 }
2625 } // /EXCEPTION_ILLEGAL_INSTRUCTION
2626 #endif
2627
2628
2629 if (in_java) {
2630 switch (exception_code) {
2631 case EXCEPTION_INT_DIVIDE_BY_ZERO:
2632 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2633
2634 case EXCEPTION_INT_OVERFLOW:
2635 return Handle_IDiv_Exception(exceptionInfo);
2636
2637 } // switch
2638 }
2639 #ifndef _WIN64
2640 if (((thread->thread_state() == _thread_in_Java) ||
2641 (thread->thread_state() == _thread_in_native)) &&
2642 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION)
2643 {
2644 LONG result=Handle_FLT_Exception(exceptionInfo);
2645 if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2646 }
2647 #endif //_WIN64
2648 }
2649
2650 if (exception_code != EXCEPTION_BREAKPOINT) {
2651 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2652 exceptionInfo->ContextRecord);
2653 }
2654 return EXCEPTION_CONTINUE_SEARCH;
2655 }
2656
2657 #ifndef _WIN64
2658 // Special care for fast JNI accessors.
2659 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2660 // the heap gets shrunk before the field access.
2661 // Need to install our own structured exception handler since native code may
2662 // install its own.
2663 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2664 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2665 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2666 address pc = (address) exceptionInfo->ContextRecord->Eip;
2667 address addr = JNI_FastGetField::find_slowcase_pc(pc);
2668 if (addr != (address)-1) {
2669 return Handle_Exception(exceptionInfo, addr);
2670 }
2671 }
2672 return EXCEPTION_CONTINUE_SEARCH;
2673 }
2674
2675 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \
2676 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \
2677 __try { \
2678 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \
2679 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \
2680 } \
2681 return 0; \
2682 }
2683
2684 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean)
2685 DEFINE_FAST_GETFIELD(jbyte, byte, Byte)
2686 DEFINE_FAST_GETFIELD(jchar, char, Char)
2687 DEFINE_FAST_GETFIELD(jshort, short, Short)
2688 DEFINE_FAST_GETFIELD(jint, int, Int)
2689 DEFINE_FAST_GETFIELD(jlong, long, Long)
2690 DEFINE_FAST_GETFIELD(jfloat, float, Float)
2691 DEFINE_FAST_GETFIELD(jdouble, double, Double)
2692
2693 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2694 switch (type) {
2695 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2696 case T_BYTE: return (address)jni_fast_GetByteField_wrapper;
2697 case T_CHAR: return (address)jni_fast_GetCharField_wrapper;
2698 case T_SHORT: return (address)jni_fast_GetShortField_wrapper;
2699 case T_INT: return (address)jni_fast_GetIntField_wrapper;
2700 case T_LONG: return (address)jni_fast_GetLongField_wrapper;
2701 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper;
2702 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper;
2703 default: ShouldNotReachHere();
2704 }
2705 return (address)-1;
2706 }
2707 #endif
2708
2709 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) {
2710 // Install a win32 structured exception handler around the test
2711 // function call so the VM can generate an error dump if needed.
2712 __try {
2713 (*funcPtr)();
2714 } __except(topLevelExceptionFilter(
2715 (_EXCEPTION_POINTERS*)_exception_info())) {
2716 // Nothing to do.
2717 }
2718 }
2719
2720 // Virtual Memory
2721
2722 int os::vm_page_size() { return os::win32::vm_page_size(); }
2723 int os::vm_allocation_granularity() {
2724 return os::win32::vm_allocation_granularity();
2725 }
2726
2727 // Windows large page support is available on Windows 2003. In order to use
2728 // large page memory, the administrator must first assign additional privilege
2729 // to the user:
2730 // + select Control Panel -> Administrative Tools -> Local Security Policy
2731 // + select Local Policies -> User Rights Assignment
2732 // + double click "Lock pages in memory", add users and/or groups
2733 // + reboot
2734 // Note the above steps are needed for administrator as well, as administrators
2735 // by default do not have the privilege to lock pages in memory.
2736 //
2737 // Note about Windows 2003: although the API supports committing large page
2738 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2739 // scenario, I found through experiment it only uses large page if the entire
2740 // memory region is reserved and committed in a single VirtualAlloc() call.
2741 // This makes Windows large page support more or less like Solaris ISM, in
2742 // that the entire heap must be committed upfront. This probably will change
2743 // in the future, if so the code below needs to be revisited.
2744
2745 #ifndef MEM_LARGE_PAGES
2746 #define MEM_LARGE_PAGES 0x20000000
2747 #endif
2748
2749 static HANDLE _hProcess;
2750 static HANDLE _hToken;
2751
2752 // Container for NUMA node list info
2753 class NUMANodeListHolder {
2754 private:
2755 int *_numa_used_node_list; // allocated below
2756 int _numa_used_node_count;
2757
2758 void free_node_list() {
2759 if (_numa_used_node_list != NULL) {
2760 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal);
2761 }
2762 }
2763
2764 public:
2765 NUMANodeListHolder() {
2766 _numa_used_node_count = 0;
2767 _numa_used_node_list = NULL;
2768 // do rest of initialization in build routine (after function pointers are set up)
2769 }
2770
2771 ~NUMANodeListHolder() {
2772 free_node_list();
2773 }
2774
2775 bool build() {
2776 DWORD_PTR proc_aff_mask;
2777 DWORD_PTR sys_aff_mask;
2778 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2779 ULONG highest_node_number;
2780 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false;
2781 free_node_list();
2782 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2783 for (unsigned int i = 0; i <= highest_node_number; i++) {
2784 ULONGLONG proc_mask_numa_node;
2785 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2786 if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2787 _numa_used_node_list[_numa_used_node_count++] = i;
2788 }
2789 }
2790 return (_numa_used_node_count > 1);
2791 }
2792
2793 int get_count() {return _numa_used_node_count;}
2794 int get_node_list_entry(int n) {
2795 // for indexes out of range, returns -1
2796 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2797 }
2798
2799 } numa_node_list_holder;
2800
2801
2802
2803 static size_t _large_page_size = 0;
2804
2805 static bool resolve_functions_for_large_page_init() {
2806 return os::Kernel32Dll::GetLargePageMinimumAvailable() &&
2807 os::Advapi32Dll::AdvapiAvailable();
2808 }
2809
2810 static bool request_lock_memory_privilege() {
2811 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2812 os::current_process_id());
2813
2814 LUID luid;
2815 if (_hProcess != NULL &&
2816 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2817 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2818
2819 TOKEN_PRIVILEGES tp;
2820 tp.PrivilegeCount = 1;
2821 tp.Privileges[0].Luid = luid;
2822 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2823
2824 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2825 // privilege. Check GetLastError() too. See MSDN document.
2826 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2827 (GetLastError() == ERROR_SUCCESS)) {
2828 return true;
2829 }
2830 }
2831
2832 return false;
2833 }
2834
2835 static void cleanup_after_large_page_init() {
2836 if (_hProcess) CloseHandle(_hProcess);
2837 _hProcess = NULL;
2838 if (_hToken) CloseHandle(_hToken);
2839 _hToken = NULL;
2840 }
2841
2842 static bool numa_interleaving_init() {
2843 bool success = false;
2844 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2845
2846 // print a warning if UseNUMAInterleaving flag is specified on command line
2847 bool warn_on_failure = use_numa_interleaving_specified;
2848 # define WARN(msg) if (warn_on_failure) { warning(msg); }
2849
2850 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2851 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2852 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2853
2854 if (os::Kernel32Dll::NumaCallsAvailable()) {
2855 if (numa_node_list_holder.build()) {
2856 if (PrintMiscellaneous && Verbose) {
2857 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2858 for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2859 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i));
2860 }
2861 tty->print("\n");
2862 }
2863 success = true;
2864 } else {
2865 WARN("Process does not cover multiple NUMA nodes.");
2866 }
2867 } else {
2868 WARN("NUMA Interleaving is not supported by the operating system.");
2869 }
2870 if (!success) {
2871 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2872 }
2873 return success;
2874 #undef WARN
2875 }
2876
2877 // this routine is used whenever we need to reserve a contiguous VA range
2878 // but we need to make separate VirtualAlloc calls for each piece of the range
2879 // Reasons for doing this:
2880 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2881 // * UseNUMAInterleaving requires a separate node for each piece
2882 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot,
2883 bool should_inject_error=false) {
2884 char * p_buf;
2885 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2886 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2887 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2888
2889 // first reserve enough address space in advance since we want to be
2890 // able to break a single contiguous virtual address range into multiple
2891 // large page commits but WS2003 does not allow reserving large page space
2892 // so we just use 4K pages for reserve, this gives us a legal contiguous
2893 // address space. then we will deallocate that reservation, and re alloc
2894 // using large pages
2895 const size_t size_of_reserve = bytes + chunk_size;
2896 if (bytes > size_of_reserve) {
2897 // Overflowed.
2898 return NULL;
2899 }
2900 p_buf = (char *) VirtualAlloc(addr,
2901 size_of_reserve, // size of Reserve
2902 MEM_RESERVE,
2903 PAGE_READWRITE);
2904 // If reservation failed, return NULL
2905 if (p_buf == NULL) return NULL;
2906 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
2907 os::release_memory(p_buf, bytes + chunk_size);
2908
2909 // we still need to round up to a page boundary (in case we are using large pages)
2910 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2911 // instead we handle this in the bytes_to_rq computation below
2912 p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2913
2914 // now go through and allocate one chunk at a time until all bytes are
2915 // allocated
2916 size_t bytes_remaining = bytes;
2917 // An overflow of align_size_up() would have been caught above
2918 // in the calculation of size_of_reserve.
2919 char * next_alloc_addr = p_buf;
2920 HANDLE hProc = GetCurrentProcess();
2921
2922 #ifdef ASSERT
2923 // Variable for the failure injection
2924 long ran_num = os::random();
2925 size_t fail_after = ran_num % bytes;
2926 #endif
2927
2928 int count=0;
2929 while (bytes_remaining) {
2930 // select bytes_to_rq to get to the next chunk_size boundary
2931
2932 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2933 // Note allocate and commit
2934 char * p_new;
2935
2936 #ifdef ASSERT
2937 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2938 #else
2939 const bool inject_error_now = false;
2940 #endif
2941
2942 if (inject_error_now) {
2943 p_new = NULL;
2944 } else {
2945 if (!UseNUMAInterleaving) {
2946 p_new = (char *) VirtualAlloc(next_alloc_addr,
2947 bytes_to_rq,
2948 flags,
2949 prot);
2950 } else {
2951 // get the next node to use from the used_node_list
2952 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2953 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2954 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc,
2955 next_alloc_addr,
2956 bytes_to_rq,
2957 flags,
2958 prot,
2959 node);
2960 }
2961 }
2962
2963 if (p_new == NULL) {
2964 // Free any allocated pages
2965 if (next_alloc_addr > p_buf) {
2966 // Some memory was committed so release it.
2967 size_t bytes_to_release = bytes - bytes_remaining;
2968 // NMT has yet to record any individual blocks, so it
2969 // need to create a dummy 'reserve' record to match
2970 // the release.
2971 MemTracker::record_virtual_memory_reserve((address)p_buf,
2972 bytes_to_release, mtNone, CALLER_PC);
2973 os::release_memory(p_buf, bytes_to_release);
2974 }
2975 #ifdef ASSERT
2976 if (should_inject_error) {
2977 if (TracePageSizes && Verbose) {
2978 tty->print_cr("Reserving pages individually failed.");
2979 }
2980 }
2981 #endif
2982 return NULL;
2983 }
2984
2985 bytes_remaining -= bytes_to_rq;
2986 next_alloc_addr += bytes_to_rq;
2987 count++;
2988 }
2989 // Although the memory is allocated individually, it is returned as one.
2990 // NMT records it as one block.
2991 address pc = CALLER_PC;
2992 if ((flags & MEM_COMMIT) != 0) {
2993 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
2994 } else {
2995 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
2996 }
2997
2998 // made it this far, success
2999 return p_buf;
3000 }
3001
3002
3003
3004 void os::large_page_init() {
3005 if (!UseLargePages) return;
3006
3007 // print a warning if any large page related flag is specified on command line
3008 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3009 !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3010 bool success = false;
3011
3012 # define WARN(msg) if (warn_on_failure) { warning(msg); }
3013 if (resolve_functions_for_large_page_init()) {
3014 if (request_lock_memory_privilege()) {
3015 size_t s = os::Kernel32Dll::GetLargePageMinimum();
3016 if (s) {
3017 #if defined(IA32) || defined(AMD64)
3018 if (s > 4*M || LargePageSizeInBytes > 4*M) {
3019 WARN("JVM cannot use large pages bigger than 4mb.");
3020 } else {
3021 #endif
3022 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3023 _large_page_size = LargePageSizeInBytes;
3024 } else {
3025 _large_page_size = s;
3026 }
3027 success = true;
3028 #if defined(IA32) || defined(AMD64)
3029 }
3030 #endif
3031 } else {
3032 WARN("Large page is not supported by the processor.");
3033 }
3034 } else {
3035 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3036 }
3037 } else {
3038 WARN("Large page is not supported by the operating system.");
3039 }
3040 #undef WARN
3041
3042 const size_t default_page_size = (size_t) vm_page_size();
3043 if (success && _large_page_size > default_page_size) {
3044 _page_sizes[0] = _large_page_size;
3045 _page_sizes[1] = default_page_size;
3046 _page_sizes[2] = 0;
3047 }
3048
3049 cleanup_after_large_page_init();
3050 UseLargePages = success;
3051 }
3052
3053 // On win32, one cannot release just a part of reserved memory, it's an
3054 // all or nothing deal. When we split a reservation, we must break the
3055 // reservation into two reservations.
3056 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3057 bool realloc) {
3058 if (size > 0) {
3059 release_memory(base, size);
3060 if (realloc) {
3061 reserve_memory(split, base);
3062 }
3063 if (size != split) {
3064 reserve_memory(size - split, base + split);
3065 }
3066 }
3067 }
3068
3069 // Multiple threads can race in this code but it's not possible to unmap small sections of
3070 // virtual space to get requested alignment, like posix-like os's.
3071 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3072 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3073 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3074 "Alignment must be a multiple of allocation granularity (page size)");
3075 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3076
3077 size_t extra_size = size + alignment;
3078 assert(extra_size >= size, "overflow, size is too large to allow alignment");
3079
3080 char* aligned_base = NULL;
3081
3082 do {
3083 char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3084 if (extra_base == NULL) {
3085 return NULL;
3086 }
3087 // Do manual alignment
3088 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3089
3090 os::release_memory(extra_base, extra_size);
3091
3092 aligned_base = os::reserve_memory(size, aligned_base);
3093
3094 } while (aligned_base == NULL);
3095
3096 return aligned_base;
3097 }
3098
3099 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3100 assert((size_t)addr % os::vm_allocation_granularity() == 0,
3101 "reserve alignment");
3102 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
3103 char* res;
3104 // note that if UseLargePages is on, all the areas that require interleaving
3105 // will go thru reserve_memory_special rather than thru here.
3106 bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3107 if (!use_individual) {
3108 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3109 } else {
3110 elapsedTimer reserveTimer;
3111 if( Verbose && PrintMiscellaneous ) reserveTimer.start();
3112 // in numa interleaving, we have to allocate pages individually
3113 // (well really chunks of NUMAInterleaveGranularity size)
3114 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3115 if (res == NULL) {
3116 warning("NUMA page allocation failed");
3117 }
3118 if( Verbose && PrintMiscellaneous ) {
3119 reserveTimer.stop();
3120 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3121 reserveTimer.milliseconds(), reserveTimer.ticks());
3122 }
3123 }
3124 assert(res == NULL || addr == NULL || addr == res,
3125 "Unexpected address from reserve.");
3126
3127 return res;
3128 }
3129
3130 // Reserve memory at an arbitrary address, only if that area is
3131 // available (and not reserved for something else).
3132 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3133 // Windows os::reserve_memory() fails of the requested address range is
3134 // not avilable.
3135 return reserve_memory(bytes, requested_addr);
3136 }
3137
3138 size_t os::large_page_size() {
3139 return _large_page_size;
3140 }
3141
3142 bool os::can_commit_large_page_memory() {
3143 // Windows only uses large page memory when the entire region is reserved
3144 // and committed in a single VirtualAlloc() call. This may change in the
3145 // future, but with Windows 2003 it's not possible to commit on demand.
3146 return false;
3147 }
3148
3149 bool os::can_execute_large_page_memory() {
3150 return true;
3151 }
3152
3153 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) {
3154 assert(UseLargePages, "only for large pages");
3155
3156 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3157 return NULL; // Fallback to small pages.
3158 }
3159
3160 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3161 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3162
3163 // with large pages, there are two cases where we need to use Individual Allocation
3164 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3165 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3166 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3167 if (TracePageSizes && Verbose) {
3168 tty->print_cr("Reserving large pages individually.");
3169 }
3170 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3171 if (p_buf == NULL) {
3172 // give an appropriate warning message
3173 if (UseNUMAInterleaving) {
3174 warning("NUMA large page allocation failed, UseLargePages flag ignored");
3175 }
3176 if (UseLargePagesIndividualAllocation) {
3177 warning("Individually allocated large pages failed, "
3178 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3179 }
3180 return NULL;
3181 }
3182
3183 return p_buf;
3184
3185 } else {
3186 if (TracePageSizes && Verbose) {
3187 tty->print_cr("Reserving large pages in a single large chunk.");
3188 }
3189 // normal policy just allocate it all at once
3190 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3191 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3192 if (res != NULL) {
3193 address pc = CALLER_PC;
3194 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
3195 }
3196
3197 return res;
3198 }
3199 }
3200
3201 bool os::release_memory_special(char* base, size_t bytes) {
3202 assert(base != NULL, "Sanity check");
3203 return release_memory(base, bytes);
3204 }
3205
3206 void os::print_statistics() {
3207 }
3208
3209 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3210 int err = os::get_last_error();
3211 char buf[256];
3212 size_t buf_len = os::lasterror(buf, sizeof(buf));
3213 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3214 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3215 exec, buf_len != 0 ? buf : "<no_error_string>", err);
3216 }
3217
3218 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3219 if (bytes == 0) {
3220 // Don't bother the OS with noops.
3221 return true;
3222 }
3223 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3224 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3225 // Don't attempt to print anything if the OS call fails. We're
3226 // probably low on resources, so the print itself may cause crashes.
3227
3228 // unless we have NUMAInterleaving enabled, the range of a commit
3229 // is always within a reserve covered by a single VirtualAlloc
3230 // in that case we can just do a single commit for the requested size
3231 if (!UseNUMAInterleaving) {
3232 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3233 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3234 return false;
3235 }
3236 if (exec) {
3237 DWORD oldprot;
3238 // Windows doc says to use VirtualProtect to get execute permissions
3239 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3240 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3241 return false;
3242 }
3243 }
3244 return true;
3245 } else {
3246
3247 // when NUMAInterleaving is enabled, the commit might cover a range that
3248 // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3249 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery
3250 // returns represents the number of bytes that can be committed in one step.
3251 size_t bytes_remaining = bytes;
3252 char * next_alloc_addr = addr;
3253 while (bytes_remaining > 0) {
3254 MEMORY_BASIC_INFORMATION alloc_info;
3255 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3256 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3257 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3258 PAGE_READWRITE) == NULL) {
3259 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3260 exec);)
3261 return false;
3262 }
3263 if (exec) {
3264 DWORD oldprot;
3265 if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3266 PAGE_EXECUTE_READWRITE, &oldprot)) {
3267 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3268 exec);)
3269 return false;
3270 }
3271 }
3272 bytes_remaining -= bytes_to_rq;
3273 next_alloc_addr += bytes_to_rq;
3274 }
3275 }
3276 // if we made it this far, return true
3277 return true;
3278 }
3279
3280 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3281 bool exec) {
3282 // alignment_hint is ignored on this OS
3283 return pd_commit_memory(addr, size, exec);
3284 }
3285
3286 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3287 const char* mesg) {
3288 assert(mesg != NULL, "mesg must be specified");
3289 if (!pd_commit_memory(addr, size, exec)) {
3290 warn_fail_commit_memory(addr, size, exec);
3291 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
3292 }
3293 }
3294
3295 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3296 size_t alignment_hint, bool exec,
3297 const char* mesg) {
3298 // alignment_hint is ignored on this OS
3299 pd_commit_memory_or_exit(addr, size, exec, mesg);
3300 }
3301
3302 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3303 if (bytes == 0) {
3304 // Don't bother the OS with noops.
3305 return true;
3306 }
3307 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3308 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3309 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3310 }
3311
3312 bool os::pd_release_memory(char* addr, size_t bytes) {
3313 return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3314 }
3315
3316 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3317 return os::commit_memory(addr, size, !ExecMem);
3318 }
3319
3320 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3321 return os::uncommit_memory(addr, size);
3322 }
3323
3324 // Set protections specified
3325 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3326 bool is_committed) {
3327 unsigned int p = 0;
3328 switch (prot) {
3329 case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3330 case MEM_PROT_READ: p = PAGE_READONLY; break;
3331 case MEM_PROT_RW: p = PAGE_READWRITE; break;
3332 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break;
3333 default:
3334 ShouldNotReachHere();
3335 }
3336
3337 DWORD old_status;
3338
3339 // Strange enough, but on Win32 one can change protection only for committed
3340 // memory, not a big deal anyway, as bytes less or equal than 64K
3341 if (!is_committed) {
3342 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3343 "cannot commit protection page");
3344 }
3345 // One cannot use os::guard_memory() here, as on Win32 guard page
3346 // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3347 //
3348 // Pages in the region become guard pages. Any attempt to access a guard page
3349 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3350 // the guard page status. Guard pages thus act as a one-time access alarm.
3351 return VirtualProtect(addr, bytes, p, &old_status) != 0;
3352 }
3353
3354 bool os::guard_memory(char* addr, size_t bytes) {
3355 DWORD old_status;
3356 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3357 }
3358
3359 bool os::unguard_memory(char* addr, size_t bytes) {
3360 DWORD old_status;
3361 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3362 }
3363
3364 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3365 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3366 void os::numa_make_global(char *addr, size_t bytes) { }
3367 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { }
3368 bool os::numa_topology_changed() { return false; }
3369 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); }
3370 int os::numa_get_group_id() { return 0; }
3371 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3372 if (numa_node_list_holder.get_count() == 0 && size > 0) {
3373 // Provide an answer for UMA systems
3374 ids[0] = 0;
3375 return 1;
3376 } else {
3377 // check for size bigger than actual groups_num
3378 size = MIN2(size, numa_get_groups_num());
3379 for (int i = 0; i < (int)size; i++) {
3380 ids[i] = numa_node_list_holder.get_node_list_entry(i);
3381 }
3382 return size;
3383 }
3384 }
3385
3386 bool os::get_page_info(char *start, page_info* info) {
3387 return false;
3388 }
3389
3390 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
3391 return end;
3392 }
3393
3394 char* os::non_memory_address_word() {
3395 // Must never look like an address returned by reserve_memory,
3396 // even in its subfields (as defined by the CPU immediate fields,
3397 // if the CPU splits constants across multiple instructions).
3398 return (char*)-1;
3399 }
3400
3401 #define MAX_ERROR_COUNT 100
3402 #define SYS_THREAD_ERROR 0xffffffffUL
3403
3404 void os::pd_start_thread(Thread* thread) {
3405 DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3406 // Returns previous suspend state:
3407 // 0: Thread was not suspended
3408 // 1: Thread is running now
3409 // >1: Thread is still suspended.
3410 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3411 }
3412
3413 class HighResolutionInterval : public CHeapObj<mtThread> {
3414 // The default timer resolution seems to be 10 milliseconds.
3415 // (Where is this written down?)
3416 // If someone wants to sleep for only a fraction of the default,
3417 // then we set the timer resolution down to 1 millisecond for
3418 // the duration of their interval.
3419 // We carefully set the resolution back, since otherwise we
3420 // seem to incur an overhead (3%?) that we don't need.
3421 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3422 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3423 // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3424 // timeBeginPeriod() if the relative error exceeded some threshold.
3425 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3426 // to decreased efficiency related to increased timer "tick" rates. We want to minimize
3427 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3428 // resolution timers running.
3429 private:
3430 jlong resolution;
3431 public:
3432 HighResolutionInterval(jlong ms) {
3433 resolution = ms % 10L;
3434 if (resolution != 0) {
3435 MMRESULT result = timeBeginPeriod(1L);
3436 }
3437 }
3438 ~HighResolutionInterval() {
3439 if (resolution != 0) {
3440 MMRESULT result = timeEndPeriod(1L);
3441 }
3442 resolution = 0L;
3443 }
3444 };
3445
3446 int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3447 jlong limit = (jlong) MAXDWORD;
3448
3449 while(ms > limit) {
3450 int res;
3451 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT)
3452 return res;
3453 ms -= limit;
3454 }
3455
3456 assert(thread == Thread::current(), "thread consistency check");
3457 OSThread* osthread = thread->osthread();
3458 OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3459 int result;
3460 if (interruptable) {
3461 assert(thread->is_Java_thread(), "must be java thread");
3462 JavaThread *jt = (JavaThread *) thread;
3463 ThreadBlockInVM tbivm(jt);
3464
3465 jt->set_suspend_equivalent();
3466 // cleared by handle_special_suspend_equivalent_condition() or
3467 // java_suspend_self() via check_and_wait_while_suspended()
3468
3469 HANDLE events[1];
3470 events[0] = osthread->interrupt_event();
3471 HighResolutionInterval *phri=NULL;
3472 if(!ForceTimeHighResolution)
3473 phri = new HighResolutionInterval( ms );
3474 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3475 result = OS_TIMEOUT;
3476 } else {
3477 ResetEvent(osthread->interrupt_event());
3478 osthread->set_interrupted(false);
3479 result = OS_INTRPT;
3480 }
3481 delete phri; //if it is NULL, harmless
3482
3483 // were we externally suspended while we were waiting?
3484 jt->check_and_wait_while_suspended();
3485 } else {
3486 assert(!thread->is_Java_thread(), "must not be java thread");
3487 Sleep((long) ms);
3488 result = OS_TIMEOUT;
3489 }
3490 return result;
3491 }
3492
3493 //
3494 // Short sleep, direct OS call.
3495 //
3496 // ms = 0, means allow others (if any) to run.
3497 //
3498 void os::naked_short_sleep(jlong ms) {
3499 assert(ms < 1000, "Un-interruptable sleep, short time use only");
3500 Sleep(ms);
3501 }
3502
3503 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3504 void os::infinite_sleep() {
3505 while (true) { // sleep forever ...
3506 Sleep(100000); // ... 100 seconds at a time
3507 }
3508 }
3509
3510 typedef BOOL (WINAPI * STTSignature)(void) ;
3511
3512 os::YieldResult os::NakedYield() {
3513 // Use either SwitchToThread() or Sleep(0)
3514 // Consider passing back the return value from SwitchToThread().
3515 if (os::Kernel32Dll::SwitchToThreadAvailable()) {
3516 return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ;
3517 } else {
3518 Sleep(0);
3519 }
3520 return os::YIELD_UNKNOWN ;
3521 }
3522
3523 void os::yield() { os::NakedYield(); }
3524
3525 void os::yield_all(int attempts) {
3526 // Yields to all threads, including threads with lower priorities
3527 Sleep(1);
3528 }
3529
3530 // Win32 only gives you access to seven real priorities at a time,
3531 // so we compress Java's ten down to seven. It would be better
3532 // if we dynamically adjusted relative priorities.
3533
3534 int os::java_to_os_priority[CriticalPriority + 1] = {
3535 THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3536 THREAD_PRIORITY_LOWEST, // 1 MinPriority
3537 THREAD_PRIORITY_LOWEST, // 2
3538 THREAD_PRIORITY_BELOW_NORMAL, // 3
3539 THREAD_PRIORITY_BELOW_NORMAL, // 4
3540 THREAD_PRIORITY_NORMAL, // 5 NormPriority
3541 THREAD_PRIORITY_NORMAL, // 6
3542 THREAD_PRIORITY_ABOVE_NORMAL, // 7
3543 THREAD_PRIORITY_ABOVE_NORMAL, // 8
3544 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3545 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority
3546 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority
3547 };
3548
3549 int prio_policy1[CriticalPriority + 1] = {
3550 THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3551 THREAD_PRIORITY_LOWEST, // 1 MinPriority
3552 THREAD_PRIORITY_LOWEST, // 2
3553 THREAD_PRIORITY_BELOW_NORMAL, // 3
3554 THREAD_PRIORITY_BELOW_NORMAL, // 4
3555 THREAD_PRIORITY_NORMAL, // 5 NormPriority
3556 THREAD_PRIORITY_ABOVE_NORMAL, // 6
3557 THREAD_PRIORITY_ABOVE_NORMAL, // 7
3558 THREAD_PRIORITY_HIGHEST, // 8
3559 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3560 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority
3561 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority
3562 };
3563
3564 static int prio_init() {
3565 // If ThreadPriorityPolicy is 1, switch tables
3566 if (ThreadPriorityPolicy == 1) {
3567 int i;
3568 for (i = 0; i < CriticalPriority + 1; i++) {
3569 os::java_to_os_priority[i] = prio_policy1[i];
3570 }
3571 }
3572 if (UseCriticalJavaThreadPriority) {
3573 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ;
3574 }
3575 return 0;
3576 }
3577
3578 OSReturn os::set_native_priority(Thread* thread, int priority) {
3579 if (!UseThreadPriorities) return OS_OK;
3580 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3581 return ret ? OS_OK : OS_ERR;
3582 }
3583
3584 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) {
3585 if ( !UseThreadPriorities ) {
3586 *priority_ptr = java_to_os_priority[NormPriority];
3587 return OS_OK;
3588 }
3589 int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3590 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3591 assert(false, "GetThreadPriority failed");
3592 return OS_ERR;
3593 }
3594 *priority_ptr = os_prio;
3595 return OS_OK;
3596 }
3597
3598
3599 // Hint to the underlying OS that a task switch would not be good.
3600 // Void return because it's a hint and can fail.
3601 void os::hint_no_preempt() {}
3602
3603 void os::interrupt(Thread* thread) {
3604 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3605 "possibility of dangling Thread pointer");
3606
3607 OSThread* osthread = thread->osthread();
3608 osthread->set_interrupted(true);
3609 // More than one thread can get here with the same value of osthread,
3610 // resulting in multiple notifications. We do, however, want the store
3611 // to interrupted() to be visible to other threads before we post
3612 // the interrupt event.
3613 OrderAccess::release();
3614 SetEvent(osthread->interrupt_event());
3615 // For JSR166: unpark after setting status
3616 if (thread->is_Java_thread())
3617 ((JavaThread*)thread)->parker()->unpark();
3618
3619 ParkEvent * ev = thread->_ParkEvent ;
3620 if (ev != NULL) ev->unpark() ;
3621
3622 }
3623
3624
3625 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3626 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3627 "possibility of dangling Thread pointer");
3628
3629 OSThread* osthread = thread->osthread();
3630 // There is no synchronization between the setting of the interrupt
3631 // and it being cleared here. It is critical - see 6535709 - that
3632 // we only clear the interrupt state, and reset the interrupt event,
3633 // if we are going to report that we were indeed interrupted - else
3634 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3635 // depending on the timing. By checking thread interrupt event to see
3636 // if the thread gets real interrupt thus prevent spurious wakeup.
3637 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3638 if (interrupted && clear_interrupted) {
3639 osthread->set_interrupted(false);
3640 ResetEvent(osthread->interrupt_event());
3641 } // Otherwise leave the interrupted state alone
3642
3643 return interrupted;
3644 }
3645
3646 // Get's a pc (hint) for a running thread. Currently used only for profiling.
3647 ExtendedPC os::get_thread_pc(Thread* thread) {
3648 CONTEXT context;
3649 context.ContextFlags = CONTEXT_CONTROL;
3650 HANDLE handle = thread->osthread()->thread_handle();
3651 #ifdef _M_IA64
3652 assert(0, "Fix get_thread_pc");
3653 return ExtendedPC(NULL);
3654 #else
3655 if (GetThreadContext(handle, &context)) {
3656 #ifdef _M_AMD64
3657 return ExtendedPC((address) context.Rip);
3658 #else
3659 return ExtendedPC((address) context.Eip);
3660 #endif
3661 } else {
3662 return ExtendedPC(NULL);
3663 }
3664 #endif
3665 }
3666
3667 // GetCurrentThreadId() returns DWORD
3668 intx os::current_thread_id() { return GetCurrentThreadId(); }
3669
3670 static int _initial_pid = 0;
3671
3672 int os::current_process_id()
3673 {
3674 return (_initial_pid ? _initial_pid : _getpid());
3675 }
3676
3677 int os::win32::_vm_page_size = 0;
3678 int os::win32::_vm_allocation_granularity = 0;
3679 int os::win32::_processor_type = 0;
3680 // Processor level is not available on non-NT systems, use vm_version instead
3681 int os::win32::_processor_level = 0;
3682 julong os::win32::_physical_memory = 0;
3683 size_t os::win32::_default_stack_size = 0;
3684
3685 intx os::win32::_os_thread_limit = 0;
3686 volatile intx os::win32::_os_thread_count = 0;
3687
3688 bool os::win32::_is_nt = false;
3689 bool os::win32::_is_windows_2003 = false;
3690 bool os::win32::_is_windows_server = false;
3691
3692 void os::win32::initialize_system_info() {
3693 SYSTEM_INFO si;
3694 GetSystemInfo(&si);
3695 _vm_page_size = si.dwPageSize;
3696 _vm_allocation_granularity = si.dwAllocationGranularity;
3697 _processor_type = si.dwProcessorType;
3698 _processor_level = si.wProcessorLevel;
3699 set_processor_count(si.dwNumberOfProcessors);
3700
3701 MEMORYSTATUSEX ms;
3702 ms.dwLength = sizeof(ms);
3703
3704 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3705 // dwMemoryLoad (% of memory in use)
3706 GlobalMemoryStatusEx(&ms);
3707 _physical_memory = ms.ullTotalPhys;
3708
3709 OSVERSIONINFOEX oi;
3710 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3711 GetVersionEx((OSVERSIONINFO*)&oi);
3712 switch(oi.dwPlatformId) {
3713 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
3714 case VER_PLATFORM_WIN32_NT:
3715 _is_nt = true;
3716 {
3717 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3718 if (os_vers == 5002) {
3719 _is_windows_2003 = true;
3720 }
3721 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3722 oi.wProductType == VER_NT_SERVER) {
3723 _is_windows_server = true;
3724 }
3725 }
3726 break;
3727 default: fatal("Unknown platform");
3728 }
3729
3730 _default_stack_size = os::current_stack_size();
3731 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3732 assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3733 "stack size not a multiple of page size");
3734
3735 initialize_performance_counter();
3736
3737 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is
3738 // known to deadlock the system, if the VM issues to thread operations with
3739 // a too high frequency, e.g., such as changing the priorities.
3740 // The 6000 seems to work well - no deadlocks has been notices on the test
3741 // programs that we have seen experience this problem.
3742 if (!os::win32::is_nt()) {
3743 StarvationMonitorInterval = 6000;
3744 }
3745 }
3746
3747
3748 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) {
3749 char path[MAX_PATH];
3750 DWORD size;
3751 DWORD pathLen = (DWORD)sizeof(path);
3752 HINSTANCE result = NULL;
3753
3754 // only allow library name without path component
3755 assert(strchr(name, '\\') == NULL, "path not allowed");
3756 assert(strchr(name, ':') == NULL, "path not allowed");
3757 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3758 jio_snprintf(ebuf, ebuflen,
3759 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3760 return NULL;
3761 }
3762
3763 // search system directory
3764 if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3765 strcat(path, "\\");
3766 strcat(path, name);
3767 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3768 return result;
3769 }
3770 }
3771
3772 // try Windows directory
3773 if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3774 strcat(path, "\\");
3775 strcat(path, name);
3776 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3777 return result;
3778 }
3779 }
3780
3781 jio_snprintf(ebuf, ebuflen,
3782 "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3783 return NULL;
3784 }
3785
3786 void os::win32::setmode_streams() {
3787 _setmode(_fileno(stdin), _O_BINARY);
3788 _setmode(_fileno(stdout), _O_BINARY);
3789 _setmode(_fileno(stderr), _O_BINARY);
3790 }
3791
3792
3793 bool os::is_debugger_attached() {
3794 return IsDebuggerPresent() ? true : false;
3795 }
3796
3797
3798 void os::wait_for_keypress_at_exit(void) {
3799 if (PauseAtExit) {
3800 fprintf(stderr, "Press any key to continue...\n");
3801 fgetc(stdin);
3802 }
3803 }
3804
3805
3806 int os::message_box(const char* title, const char* message) {
3807 int result = MessageBox(NULL, message, title,
3808 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3809 return result == IDYES;
3810 }
3811
3812 int os::allocate_thread_local_storage() {
3813 return TlsAlloc();
3814 }
3815
3816
3817 void os::free_thread_local_storage(int index) {
3818 TlsFree(index);
3819 }
3820
3821
3822 void os::thread_local_storage_at_put(int index, void* value) {
3823 TlsSetValue(index, value);
3824 assert(thread_local_storage_at(index) == value, "Just checking");
3825 }
3826
3827
3828 void* os::thread_local_storage_at(int index) {
3829 return TlsGetValue(index);
3830 }
3831
3832
3833 #ifndef PRODUCT
3834 #ifndef _WIN64
3835 // Helpers to check whether NX protection is enabled
3836 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3837 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3838 pex->ExceptionRecord->NumberParameters > 0 &&
3839 pex->ExceptionRecord->ExceptionInformation[0] ==
3840 EXCEPTION_INFO_EXEC_VIOLATION) {
3841 return EXCEPTION_EXECUTE_HANDLER;
3842 }
3843 return EXCEPTION_CONTINUE_SEARCH;
3844 }
3845
3846 void nx_check_protection() {
3847 // If NX is enabled we'll get an exception calling into code on the stack
3848 char code[] = { (char)0xC3 }; // ret
3849 void *code_ptr = (void *)code;
3850 __try {
3851 __asm call code_ptr
3852 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
3853 tty->print_raw_cr("NX protection detected.");
3854 }
3855 }
3856 #endif // _WIN64
3857 #endif // PRODUCT
3858
3859 // this is called _before_ the global arguments have been parsed
3860 void os::init(void) {
3861 _initial_pid = _getpid();
3862
3863 init_random(1234567);
3864
3865 win32::initialize_system_info();
3866 win32::setmode_streams();
3867 init_page_sizes((size_t) win32::vm_page_size());
3868
3869 // For better scalability on MP systems (must be called after initialize_system_info)
3870 #ifndef PRODUCT
3871 if (is_MP()) {
3872 NoYieldsInMicrolock = true;
3873 }
3874 #endif
3875 // This may be overridden later when argument processing is done.
3876 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
3877 os::win32::is_windows_2003());
3878
3879 // Initialize main_process and main_thread
3880 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle
3881 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
3882 &main_thread, THREAD_ALL_ACCESS, false, 0)) {
3883 fatal("DuplicateHandle failed\n");
3884 }
3885 main_thread_id = (int) GetCurrentThreadId();
3886 }
3887
3888 // To install functions for atexit processing
3889 extern "C" {
3890 static void perfMemory_exit_helper() {
3891 perfMemory_exit();
3892 }
3893 }
3894
3895 static jint initSock();
3896
3897 // this is called _after_ the global arguments have been parsed
3898 jint os::init_2(void) {
3899 // Allocate a single page and mark it as readable for safepoint polling
3900 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
3901 guarantee( polling_page != NULL, "Reserve Failed for polling page");
3902
3903 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
3904 guarantee( return_page != NULL, "Commit Failed for polling page");
3905
3906 os::set_polling_page( polling_page );
3907
3908 #ifndef PRODUCT
3909 if( Verbose && PrintMiscellaneous )
3910 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
3911 #endif
3912
3913 if (!UseMembar) {
3914 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
3915 guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
3916
3917 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
3918 guarantee( return_page != NULL, "Commit Failed for memory serialize page");
3919
3920 os::set_memory_serialize_page( mem_serialize_page );
3921
3922 #ifndef PRODUCT
3923 if(Verbose && PrintMiscellaneous)
3924 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3925 #endif
3926 }
3927
3928 // Setup Windows Exceptions
3929
3930 // for debugging float code generation bugs
3931 if (ForceFloatExceptions) {
3932 #ifndef _WIN64
3933 static long fp_control_word = 0;
3934 __asm { fstcw fp_control_word }
3935 // see Intel PPro Manual, Vol. 2, p 7-16
3936 const long precision = 0x20;
3937 const long underflow = 0x10;
3938 const long overflow = 0x08;
3939 const long zero_div = 0x04;
3940 const long denorm = 0x02;
3941 const long invalid = 0x01;
3942 fp_control_word |= invalid;
3943 __asm { fldcw fp_control_word }
3944 #endif
3945 }
3946
3947 // If stack_commit_size is 0, windows will reserve the default size,
3948 // but only commit a small portion of it.
3949 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
3950 size_t default_reserve_size = os::win32::default_stack_size();
3951 size_t actual_reserve_size = stack_commit_size;
3952 if (stack_commit_size < default_reserve_size) {
3953 // If stack_commit_size == 0, we want this too
3954 actual_reserve_size = default_reserve_size;
3955 }
3956
3957 // Check minimum allowable stack size for thread creation and to initialize
3958 // the java system classes, including StackOverflowError - depends on page
3959 // size. Add a page for compiler2 recursion in main thread.
3960 // Add in 2*BytesPerWord times page size to account for VM stack during
3961 // class initialization depending on 32 or 64 bit VM.
3962 size_t min_stack_allowed =
3963 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
3964 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
3965 if (actual_reserve_size < min_stack_allowed) {
3966 tty->print_cr("\nThe stack size specified is too small, "
3967 "Specify at least %dk",
3968 min_stack_allowed / K);
3969 return JNI_ERR;
3970 }
3971
3972 JavaThread::set_stack_size_at_create(stack_commit_size);
3973
3974 // Calculate theoretical max. size of Threads to guard gainst artifical
3975 // out-of-memory situations, where all available address-space has been
3976 // reserved by thread stacks.
3977 assert(actual_reserve_size != 0, "Must have a stack");
3978
3979 // Calculate the thread limit when we should start doing Virtual Memory
3980 // banging. Currently when the threads will have used all but 200Mb of space.
3981 //
3982 // TODO: consider performing a similar calculation for commit size instead
3983 // as reserve size, since on a 64-bit platform we'll run into that more
3984 // often than running out of virtual memory space. We can use the
3985 // lower value of the two calculations as the os_thread_limit.
3986 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
3987 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
3988
3989 // at exit methods are called in the reverse order of their registration.
3990 // there is no limit to the number of functions registered. atexit does
3991 // not set errno.
3992
3993 if (PerfAllowAtExitRegistration) {
3994 // only register atexit functions if PerfAllowAtExitRegistration is set.
3995 // atexit functions can be delayed until process exit time, which
3996 // can be problematic for embedded VM situations. Embedded VMs should
3997 // call DestroyJavaVM() to assure that VM resources are released.
3998
3999 // note: perfMemory_exit_helper atexit function may be removed in
4000 // the future if the appropriate cleanup code can be added to the
4001 // VM_Exit VMOperation's doit method.
4002 if (atexit(perfMemory_exit_helper) != 0) {
4003 warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4004 }
4005 }
4006
4007 #ifndef _WIN64
4008 // Print something if NX is enabled (win32 on AMD64)
4009 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4010 #endif
4011
4012 // initialize thread priority policy
4013 prio_init();
4014
4015 if (UseNUMA && !ForceNUMA) {
4016 UseNUMA = false; // We don't fully support this yet
4017 }
4018
4019 if (UseNUMAInterleaving) {
4020 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4021 bool success = numa_interleaving_init();
4022 if (!success) UseNUMAInterleaving = false;
4023 }
4024
4025 if (initSock() != JNI_OK) {
4026 return JNI_ERR;
4027 }
4028
4029 return JNI_OK;
4030 }
4031
4032 void os::init_3(void) {
4033 return;
4034 }
4035
4036 // Mark the polling page as unreadable
4037 void os::make_polling_page_unreadable(void) {
4038 DWORD old_status;
4039 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) )
4040 fatal("Could not disable polling page");
4041 };
4042
4043 // Mark the polling page as readable
4044 void os::make_polling_page_readable(void) {
4045 DWORD old_status;
4046 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) )
4047 fatal("Could not enable polling page");
4048 };
4049
4050
4051 int os::stat(const char *path, struct stat *sbuf) {
4052 char pathbuf[MAX_PATH];
4053 if (strlen(path) > MAX_PATH - 1) {
4054 errno = ENAMETOOLONG;
4055 return -1;
4056 }
4057 os::native_path(strcpy(pathbuf, path));
4058 int ret = ::stat(pathbuf, sbuf);
4059 if (sbuf != NULL && UseUTCFileTimestamp) {
4060 // Fix for 6539723. st_mtime returned from stat() is dependent on
4061 // the system timezone and so can return different values for the
4062 // same file if/when daylight savings time changes. This adjustment
4063 // makes sure the same timestamp is returned regardless of the TZ.
4064 //
4065 // See:
4066 // http://msdn.microsoft.com/library/
4067 // default.asp?url=/library/en-us/sysinfo/base/
4068 // time_zone_information_str.asp
4069 // and
4070 // http://msdn.microsoft.com/library/default.asp?url=
4071 // /library/en-us/sysinfo/base/settimezoneinformation.asp
4072 //
4073 // NOTE: there is a insidious bug here: If the timezone is changed
4074 // after the call to stat() but before 'GetTimeZoneInformation()', then
4075 // the adjustment we do here will be wrong and we'll return the wrong
4076 // value (which will likely end up creating an invalid class data
4077 // archive). Absent a better API for this, or some time zone locking
4078 // mechanism, we'll have to live with this risk.
4079 TIME_ZONE_INFORMATION tz;
4080 DWORD tzid = GetTimeZoneInformation(&tz);
4081 int daylightBias =
4082 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias;
4083 sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4084 }
4085 return ret;
4086 }
4087
4088
4089 #define FT2INT64(ft) \
4090 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4091
4092
4093 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4094 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4095 // of a thread.
4096 //
4097 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4098 // the fast estimate available on the platform.
4099
4100 // current_thread_cpu_time() is not optimized for Windows yet
4101 jlong os::current_thread_cpu_time() {
4102 // return user + sys since the cost is the same
4103 return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4104 }
4105
4106 jlong os::thread_cpu_time(Thread* thread) {
4107 // consistent with what current_thread_cpu_time() returns.
4108 return os::thread_cpu_time(thread, true /* user+sys */);
4109 }
4110
4111 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4112 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4113 }
4114
4115 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4116 // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4117 // If this function changes, os::is_thread_cpu_time_supported() should too
4118 if (os::win32::is_nt()) {
4119 FILETIME CreationTime;
4120 FILETIME ExitTime;
4121 FILETIME KernelTime;
4122 FILETIME UserTime;
4123
4124 if ( GetThreadTimes(thread->osthread()->thread_handle(),
4125 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
4126 return -1;
4127 else
4128 if (user_sys_cpu_time) {
4129 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4130 } else {
4131 return FT2INT64(UserTime) * 100;
4132 }
4133 } else {
4134 return (jlong) timeGetTime() * 1000000;
4135 }
4136 }
4137
4138 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4139 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
4140 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
4141 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
4142 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4143 }
4144
4145 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4146 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
4147 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
4148 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
4149 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4150 }
4151
4152 bool os::is_thread_cpu_time_supported() {
4153 // see os::thread_cpu_time
4154 if (os::win32::is_nt()) {
4155 FILETIME CreationTime;
4156 FILETIME ExitTime;
4157 FILETIME KernelTime;
4158 FILETIME UserTime;
4159
4160 if ( GetThreadTimes(GetCurrentThread(),
4161 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0)
4162 return false;
4163 else
4164 return true;
4165 } else {
4166 return false;
4167 }
4168 }
4169
4170 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4171 // It does have primitives (PDH API) to get CPU usage and run queue length.
4172 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4173 // If we wanted to implement loadavg on Windows, we have a few options:
4174 //
4175 // a) Query CPU usage and run queue length and "fake" an answer by
4176 // returning the CPU usage if it's under 100%, and the run queue
4177 // length otherwise. It turns out that querying is pretty slow
4178 // on Windows, on the order of 200 microseconds on a fast machine.
4179 // Note that on the Windows the CPU usage value is the % usage
4180 // since the last time the API was called (and the first call
4181 // returns 100%), so we'd have to deal with that as well.
4182 //
4183 // b) Sample the "fake" answer using a sampling thread and store
4184 // the answer in a global variable. The call to loadavg would
4185 // just return the value of the global, avoiding the slow query.
4186 //
4187 // c) Sample a better answer using exponential decay to smooth the
4188 // value. This is basically the algorithm used by UNIX kernels.
4189 //
4190 // Note that sampling thread starvation could affect both (b) and (c).
4191 int os::loadavg(double loadavg[], int nelem) {
4192 return -1;
4193 }
4194
4195
4196 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4197 bool os::dont_yield() {
4198 return DontYieldALot;
4199 }
4200
4201 // This method is a slightly reworked copy of JDK's sysOpen
4202 // from src/windows/hpi/src/sys_api_md.c
4203
4204 int os::open(const char *path, int oflag, int mode) {
4205 char pathbuf[MAX_PATH];
4206
4207 if (strlen(path) > MAX_PATH - 1) {
4208 errno = ENAMETOOLONG;
4209 return -1;
4210 }
4211 os::native_path(strcpy(pathbuf, path));
4212 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4213 }
4214
4215 FILE* os::open(int fd, const char* mode) {
4216 return ::_fdopen(fd, mode);
4217 }
4218
4219 // Is a (classpath) directory empty?
4220 bool os::dir_is_empty(const char* path) {
4221 WIN32_FIND_DATA fd;
4222 HANDLE f = FindFirstFile(path, &fd);
4223 if (f == INVALID_HANDLE_VALUE) {
4224 return true;
4225 }
4226 FindClose(f);
4227 return false;
4228 }
4229
4230 // create binary file, rewriting existing file if required
4231 int os::create_binary_file(const char* path, bool rewrite_existing) {
4232 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4233 if (!rewrite_existing) {
4234 oflags |= _O_EXCL;
4235 }
4236 return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4237 }
4238
4239 // return current position of file pointer
4240 jlong os::current_file_offset(int fd) {
4241 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4242 }
4243
4244 // move file pointer to the specified offset
4245 jlong os::seek_to_file_offset(int fd, jlong offset) {
4246 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4247 }
4248
4249
4250 jlong os::lseek(int fd, jlong offset, int whence) {
4251 return (jlong) ::_lseeki64(fd, offset, whence);
4252 }
4253
4254 // This method is a slightly reworked copy of JDK's sysNativePath
4255 // from src/windows/hpi/src/path_md.c
4256
4257 /* Convert a pathname to native format. On win32, this involves forcing all
4258 separators to be '\\' rather than '/' (both are legal inputs, but Win95
4259 sometimes rejects '/') and removing redundant separators. The input path is
4260 assumed to have been converted into the character encoding used by the local
4261 system. Because this might be a double-byte encoding, care is taken to
4262 treat double-byte lead characters correctly.
4263
4264 This procedure modifies the given path in place, as the result is never
4265 longer than the original. There is no error return; this operation always
4266 succeeds. */
4267 char * os::native_path(char *path) {
4268 char *src = path, *dst = path, *end = path;
4269 char *colon = NULL; /* If a drive specifier is found, this will
4270 point to the colon following the drive
4271 letter */
4272
4273 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */
4274 assert(((!::IsDBCSLeadByte('/'))
4275 && (!::IsDBCSLeadByte('\\'))
4276 && (!::IsDBCSLeadByte(':'))),
4277 "Illegal lead byte");
4278
4279 /* Check for leading separators */
4280 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4281 while (isfilesep(*src)) {
4282 src++;
4283 }
4284
4285 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4286 /* Remove leading separators if followed by drive specifier. This
4287 hack is necessary to support file URLs containing drive
4288 specifiers (e.g., "file://c:/path"). As a side effect,
4289 "/c:/path" can be used as an alternative to "c:/path". */
4290 *dst++ = *src++;
4291 colon = dst;
4292 *dst++ = ':';
4293 src++;
4294 } else {
4295 src = path;
4296 if (isfilesep(src[0]) && isfilesep(src[1])) {
4297 /* UNC pathname: Retain first separator; leave src pointed at
4298 second separator so that further separators will be collapsed
4299 into the second separator. The result will be a pathname
4300 beginning with "\\\\" followed (most likely) by a host name. */
4301 src = dst = path + 1;
4302 path[0] = '\\'; /* Force first separator to '\\' */
4303 }
4304 }
4305
4306 end = dst;
4307
4308 /* Remove redundant separators from remainder of path, forcing all
4309 separators to be '\\' rather than '/'. Also, single byte space
4310 characters are removed from the end of the path because those
4311 are not legal ending characters on this operating system.
4312 */
4313 while (*src != '\0') {
4314 if (isfilesep(*src)) {
4315 *dst++ = '\\'; src++;
4316 while (isfilesep(*src)) src++;
4317 if (*src == '\0') {
4318 /* Check for trailing separator */
4319 end = dst;
4320 if (colon == dst - 2) break; /* "z:\\" */
4321 if (dst == path + 1) break; /* "\\" */
4322 if (dst == path + 2 && isfilesep(path[0])) {
4323 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the
4324 beginning of a UNC pathname. Even though it is not, by
4325 itself, a valid UNC pathname, we leave it as is in order
4326 to be consistent with the path canonicalizer as well
4327 as the win32 APIs, which treat this case as an invalid
4328 UNC pathname rather than as an alias for the root
4329 directory of the current drive. */
4330 break;
4331 }
4332 end = --dst; /* Path does not denote a root directory, so
4333 remove trailing separator */
4334 break;
4335 }
4336 end = dst;
4337 } else {
4338 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */
4339 *dst++ = *src++;
4340 if (*src) *dst++ = *src++;
4341 end = dst;
4342 } else { /* Copy a single-byte character */
4343 char c = *src++;
4344 *dst++ = c;
4345 /* Space is not a legal ending character */
4346 if (c != ' ') end = dst;
4347 }
4348 }
4349 }
4350
4351 *end = '\0';
4352
4353 /* For "z:", add "." to work around a bug in the C runtime library */
4354 if (colon == dst - 1) {
4355 path[2] = '.';
4356 path[3] = '\0';
4357 }
4358
4359 return path;
4360 }
4361
4362 // This code is a copy of JDK's sysSetLength
4363 // from src/windows/hpi/src/sys_api_md.c
4364
4365 int os::ftruncate(int fd, jlong length) {
4366 HANDLE h = (HANDLE)::_get_osfhandle(fd);
4367 long high = (long)(length >> 32);
4368 DWORD ret;
4369
4370 if (h == (HANDLE)(-1)) {
4371 return -1;
4372 }
4373
4374 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4375 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4376 return -1;
4377 }
4378
4379 if (::SetEndOfFile(h) == FALSE) {
4380 return -1;
4381 }
4382
4383 return 0;
4384 }
4385
4386
4387 // This code is a copy of JDK's sysSync
4388 // from src/windows/hpi/src/sys_api_md.c
4389 // except for the legacy workaround for a bug in Win 98
4390
4391 int os::fsync(int fd) {
4392 HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4393
4394 if ( (!::FlushFileBuffers(handle)) &&
4395 (GetLastError() != ERROR_ACCESS_DENIED) ) {
4396 /* from winerror.h */
4397 return -1;
4398 }
4399 return 0;
4400 }
4401
4402 static int nonSeekAvailable(int, long *);
4403 static int stdinAvailable(int, long *);
4404
4405 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR)
4406 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO)
4407
4408 // This code is a copy of JDK's sysAvailable
4409 // from src/windows/hpi/src/sys_api_md.c
4410
4411 int os::available(int fd, jlong *bytes) {
4412 jlong cur, end;
4413 struct _stati64 stbuf64;
4414
4415 if (::_fstati64(fd, &stbuf64) >= 0) {
4416 int mode = stbuf64.st_mode;
4417 if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4418 int ret;
4419 long lpbytes;
4420 if (fd == 0) {
4421 ret = stdinAvailable(fd, &lpbytes);
4422 } else {
4423 ret = nonSeekAvailable(fd, &lpbytes);
4424 }
4425 (*bytes) = (jlong)(lpbytes);
4426 return ret;
4427 }
4428 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4429 return FALSE;
4430 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4431 return FALSE;
4432 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4433 return FALSE;
4434 }
4435 *bytes = end - cur;
4436 return TRUE;
4437 } else {
4438 return FALSE;
4439 }
4440 }
4441
4442 // This code is a copy of JDK's nonSeekAvailable
4443 // from src/windows/hpi/src/sys_api_md.c
4444
4445 static int nonSeekAvailable(int fd, long *pbytes) {
4446 /* This is used for available on non-seekable devices
4447 * (like both named and anonymous pipes, such as pipes
4448 * connected to an exec'd process).
4449 * Standard Input is a special case.
4450 *
4451 */
4452 HANDLE han;
4453
4454 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4455 return FALSE;
4456 }
4457
4458 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4459 /* PeekNamedPipe fails when at EOF. In that case we
4460 * simply make *pbytes = 0 which is consistent with the
4461 * behavior we get on Solaris when an fd is at EOF.
4462 * The only alternative is to raise an Exception,
4463 * which isn't really warranted.
4464 */
4465 if (::GetLastError() != ERROR_BROKEN_PIPE) {
4466 return FALSE;
4467 }
4468 *pbytes = 0;
4469 }
4470 return TRUE;
4471 }
4472
4473 #define MAX_INPUT_EVENTS 2000
4474
4475 // This code is a copy of JDK's stdinAvailable
4476 // from src/windows/hpi/src/sys_api_md.c
4477
4478 static int stdinAvailable(int fd, long *pbytes) {
4479 HANDLE han;
4480 DWORD numEventsRead = 0; /* Number of events read from buffer */
4481 DWORD numEvents = 0; /* Number of events in buffer */
4482 DWORD i = 0; /* Loop index */
4483 DWORD curLength = 0; /* Position marker */
4484 DWORD actualLength = 0; /* Number of bytes readable */
4485 BOOL error = FALSE; /* Error holder */
4486 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */
4487
4488 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4489 return FALSE;
4490 }
4491
4492 /* Construct an array of input records in the console buffer */
4493 error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4494 if (error == 0) {
4495 return nonSeekAvailable(fd, pbytes);
4496 }
4497
4498 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */
4499 if (numEvents > MAX_INPUT_EVENTS) {
4500 numEvents = MAX_INPUT_EVENTS;
4501 }
4502
4503 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4504 if (lpBuffer == NULL) {
4505 return FALSE;
4506 }
4507
4508 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4509 if (error == 0) {
4510 os::free(lpBuffer, mtInternal);
4511 return FALSE;
4512 }
4513
4514 /* Examine input records for the number of bytes available */
4515 for(i=0; i<numEvents; i++) {
4516 if (lpBuffer[i].EventType == KEY_EVENT) {
4517
4518 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4519 &(lpBuffer[i].Event);
4520 if (keyRecord->bKeyDown == TRUE) {
4521 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4522 curLength++;
4523 if (*keyPressed == '\r') {
4524 actualLength = curLength;
4525 }
4526 }
4527 }
4528 }
4529
4530 if(lpBuffer != NULL) {
4531 os::free(lpBuffer, mtInternal);
4532 }
4533
4534 *pbytes = (long) actualLength;
4535 return TRUE;
4536 }
4537
4538 // Map a block of memory.
4539 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4540 char *addr, size_t bytes, bool read_only,
4541 bool allow_exec) {
4542 HANDLE hFile;
4543 char* base;
4544
4545 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4546 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4547 if (hFile == NULL) {
4548 if (PrintMiscellaneous && Verbose) {
4549 DWORD err = GetLastError();
4550 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err);
4551 }
4552 return NULL;
4553 }
4554
4555 if (allow_exec) {
4556 // CreateFileMapping/MapViewOfFileEx can't map executable memory
4557 // unless it comes from a PE image (which the shared archive is not.)
4558 // Even VirtualProtect refuses to give execute access to mapped memory
4559 // that was not previously executable.
4560 //
4561 // Instead, stick the executable region in anonymous memory. Yuck.
4562 // Penalty is that ~4 pages will not be shareable - in the future
4563 // we might consider DLLizing the shared archive with a proper PE
4564 // header so that mapping executable + sharing is possible.
4565
4566 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4567 PAGE_READWRITE);
4568 if (base == NULL) {
4569 if (PrintMiscellaneous && Verbose) {
4570 DWORD err = GetLastError();
4571 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err);
4572 }
4573 CloseHandle(hFile);
4574 return NULL;
4575 }
4576
4577 DWORD bytes_read;
4578 OVERLAPPED overlapped;
4579 overlapped.Offset = (DWORD)file_offset;
4580 overlapped.OffsetHigh = 0;
4581 overlapped.hEvent = NULL;
4582 // ReadFile guarantees that if the return value is true, the requested
4583 // number of bytes were read before returning.
4584 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4585 if (!res) {
4586 if (PrintMiscellaneous && Verbose) {
4587 DWORD err = GetLastError();
4588 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err);
4589 }
4590 release_memory(base, bytes);
4591 CloseHandle(hFile);
4592 return NULL;
4593 }
4594 } else {
4595 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4596 NULL /*file_name*/);
4597 if (hMap == NULL) {
4598 if (PrintMiscellaneous && Verbose) {
4599 DWORD err = GetLastError();
4600 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err);
4601 }
4602 CloseHandle(hFile);
4603 return NULL;
4604 }
4605
4606 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4607 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4608 (DWORD)bytes, addr);
4609 if (base == NULL) {
4610 if (PrintMiscellaneous && Verbose) {
4611 DWORD err = GetLastError();
4612 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err);
4613 }
4614 CloseHandle(hMap);
4615 CloseHandle(hFile);
4616 return NULL;
4617 }
4618
4619 if (CloseHandle(hMap) == 0) {
4620 if (PrintMiscellaneous && Verbose) {
4621 DWORD err = GetLastError();
4622 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err);
4623 }
4624 CloseHandle(hFile);
4625 return base;
4626 }
4627 }
4628
4629 if (allow_exec) {
4630 DWORD old_protect;
4631 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4632 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4633
4634 if (!res) {
4635 if (PrintMiscellaneous && Verbose) {
4636 DWORD err = GetLastError();
4637 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err);
4638 }
4639 // Don't consider this a hard error, on IA32 even if the
4640 // VirtualProtect fails, we should still be able to execute
4641 CloseHandle(hFile);
4642 return base;
4643 }
4644 }
4645
4646 if (CloseHandle(hFile) == 0) {
4647 if (PrintMiscellaneous && Verbose) {
4648 DWORD err = GetLastError();
4649 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err);
4650 }
4651 return base;
4652 }
4653
4654 return base;
4655 }
4656
4657
4658 // Remap a block of memory.
4659 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4660 char *addr, size_t bytes, bool read_only,
4661 bool allow_exec) {
4662 // This OS does not allow existing memory maps to be remapped so we
4663 // have to unmap the memory before we remap it.
4664 if (!os::unmap_memory(addr, bytes)) {
4665 return NULL;
4666 }
4667
4668 // There is a very small theoretical window between the unmap_memory()
4669 // call above and the map_memory() call below where a thread in native
4670 // code may be able to access an address that is no longer mapped.
4671
4672 return os::map_memory(fd, file_name, file_offset, addr, bytes,
4673 read_only, allow_exec);
4674 }
4675
4676
4677 // Unmap a block of memory.
4678 // Returns true=success, otherwise false.
4679
4680 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4681 BOOL result = UnmapViewOfFile(addr);
4682 if (result == 0) {
4683 if (PrintMiscellaneous && Verbose) {
4684 DWORD err = GetLastError();
4685 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err);
4686 }
4687 return false;
4688 }
4689 return true;
4690 }
4691
4692 void os::pause() {
4693 char filename[MAX_PATH];
4694 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4695 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4696 } else {
4697 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4698 }
4699
4700 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4701 if (fd != -1) {
4702 struct stat buf;
4703 ::close(fd);
4704 while (::stat(filename, &buf) == 0) {
4705 Sleep(100);
4706 }
4707 } else {
4708 jio_fprintf(stderr,
4709 "Could not open pause file '%s', continuing immediately.\n", filename);
4710 }
4711 }
4712
4713 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4714 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4715 }
4716
4717 /*
4718 * See the caveats for this class in os_windows.hpp
4719 * Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4720 * into this method and returns false. If no OS EXCEPTION was raised, returns
4721 * true.
4722 * The callback is supposed to provide the method that should be protected.
4723 */
4724 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4725 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4726 assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4727 "crash_protection already set?");
4728
4729 bool success = true;
4730 __try {
4731 WatcherThread::watcher_thread()->set_crash_protection(this);
4732 cb.call();
4733 } __except(EXCEPTION_EXECUTE_HANDLER) {
4734 // only for protection, nothing to do
4735 success = false;
4736 }
4737 WatcherThread::watcher_thread()->set_crash_protection(NULL);
4738 return success;
4739 }
4740
4741 // An Event wraps a win32 "CreateEvent" kernel handle.
4742 //
4743 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
4744 //
4745 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle
4746 // field, and call CloseHandle() on the win32 event handle. Unpark() would
4747 // need to be modified to tolerate finding a NULL (invalid) win32 event handle.
4748 // In addition, an unpark() operation might fetch the handle field, but the
4749 // event could recycle between the fetch and the SetEvent() operation.
4750 // SetEvent() would either fail because the handle was invalid, or inadvertently work,
4751 // as the win32 handle value had been recycled. In an ideal world calling SetEvent()
4752 // on an stale but recycled handle would be harmless, but in practice this might
4753 // confuse other non-Sun code, so it's not a viable approach.
4754 //
4755 // 2: Once a win32 event handle is associated with an Event, it remains associated
4756 // with the Event. The event handle is never closed. This could be construed
4757 // as handle leakage, but only up to the maximum # of threads that have been extant
4758 // at any one time. This shouldn't be an issue, as windows platforms typically
4759 // permit a process to have hundreds of thousands of open handles.
4760 //
4761 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
4762 // and release unused handles.
4763 //
4764 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
4765 // It's not clear, however, that we wouldn't be trading one type of leak for another.
4766 //
4767 // 5. Use an RCU-like mechanism (Read-Copy Update).
4768 // Or perhaps something similar to Maged Michael's "Hazard pointers".
4769 //
4770 // We use (2).
4771 //
4772 // TODO-FIXME:
4773 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
4774 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
4775 // to recover from (or at least detect) the dreaded Windows 841176 bug.
4776 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
4777 // into a single win32 CreateEvent() handle.
4778 //
4779 // _Event transitions in park()
4780 // -1 => -1 : illegal
4781 // 1 => 0 : pass - return immediately
4782 // 0 => -1 : block
4783 //
4784 // _Event serves as a restricted-range semaphore :
4785 // -1 : thread is blocked
4786 // 0 : neutral - thread is running or ready
4787 // 1 : signaled - thread is running or ready
4788 //
4789 // Another possible encoding of _Event would be
4790 // with explicit "PARKED" and "SIGNALED" bits.
4791
4792 int os::PlatformEvent::park (jlong Millis) {
4793 guarantee (_ParkHandle != NULL , "Invariant") ;
4794 guarantee (Millis > 0 , "Invariant") ;
4795 int v ;
4796
4797 // CONSIDER: defer assigning a CreateEvent() handle to the Event until
4798 // the initial park() operation.
4799
4800 for (;;) {
4801 v = _Event ;
4802 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4803 }
4804 guarantee ((v == 0) || (v == 1), "invariant") ;
4805 if (v != 0) return OS_OK ;
4806
4807 // Do this the hard way by blocking ...
4808 // TODO: consider a brief spin here, gated on the success of recent
4809 // spin attempts by this thread.
4810 //
4811 // We decompose long timeouts into series of shorter timed waits.
4812 // Evidently large timo values passed in WaitForSingleObject() are problematic on some
4813 // versions of Windows. See EventWait() for details. This may be superstition. Or not.
4814 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
4815 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from
4816 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
4817 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv ==
4818 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
4819 // for the already waited time. This policy does not admit any new outcomes.
4820 // In the future, however, we might want to track the accumulated wait time and
4821 // adjust Millis accordingly if we encounter a spurious wakeup.
4822
4823 const int MAXTIMEOUT = 0x10000000 ;
4824 DWORD rv = WAIT_TIMEOUT ;
4825 while (_Event < 0 && Millis > 0) {
4826 DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT)
4827 if (Millis > MAXTIMEOUT) {
4828 prd = MAXTIMEOUT ;
4829 }
4830 rv = ::WaitForSingleObject (_ParkHandle, prd) ;
4831 assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ;
4832 if (rv == WAIT_TIMEOUT) {
4833 Millis -= prd ;
4834 }
4835 }
4836 v = _Event ;
4837 _Event = 0 ;
4838 // see comment at end of os::PlatformEvent::park() below:
4839 OrderAccess::fence() ;
4840 // If we encounter a nearly simultanous timeout expiry and unpark()
4841 // we return OS_OK indicating we awoke via unpark().
4842 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
4843 return (v >= 0) ? OS_OK : OS_TIMEOUT ;
4844 }
4845
4846 void os::PlatformEvent::park () {
4847 guarantee (_ParkHandle != NULL, "Invariant") ;
4848 // Invariant: Only the thread associated with the Event/PlatformEvent
4849 // may call park().
4850 int v ;
4851 for (;;) {
4852 v = _Event ;
4853 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
4854 }
4855 guarantee ((v == 0) || (v == 1), "invariant") ;
4856 if (v != 0) return ;
4857
4858 // Do this the hard way by blocking ...
4859 // TODO: consider a brief spin here, gated on the success of recent
4860 // spin attempts by this thread.
4861 while (_Event < 0) {
4862 DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ;
4863 assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ;
4864 }
4865
4866 // Usually we'll find _Event == 0 at this point, but as
4867 // an optional optimization we clear it, just in case can
4868 // multiple unpark() operations drove _Event up to 1.
4869 _Event = 0 ;
4870 OrderAccess::fence() ;
4871 guarantee (_Event >= 0, "invariant") ;
4872 }
4873
4874 void os::PlatformEvent::unpark() {
4875 guarantee (_ParkHandle != NULL, "Invariant") ;
4876
4877 // Transitions for _Event:
4878 // 0 :=> 1
4879 // 1 :=> 1
4880 // -1 :=> either 0 or 1; must signal target thread
4881 // That is, we can safely transition _Event from -1 to either
4882 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back
4883 // unpark() calls.
4884 // See also: "Semaphores in Plan 9" by Mullender & Cox
4885 //
4886 // Note: Forcing a transition from "-1" to "1" on an unpark() means
4887 // that it will take two back-to-back park() calls for the owning
4888 // thread to block. This has the benefit of forcing a spurious return
4889 // from the first park() call after an unpark() call which will help
4890 // shake out uses of park() and unpark() without condition variables.
4891
4892 if (Atomic::xchg(1, &_Event) >= 0) return;
4893
4894 ::SetEvent(_ParkHandle);
4895 }
4896
4897
4898 // JSR166
4899 // -------------------------------------------------------
4900
4901 /*
4902 * The Windows implementation of Park is very straightforward: Basic
4903 * operations on Win32 Events turn out to have the right semantics to
4904 * use them directly. We opportunistically resuse the event inherited
4905 * from Monitor.
4906 */
4907
4908
4909 void Parker::park(bool isAbsolute, jlong time) {
4910 guarantee (_ParkEvent != NULL, "invariant") ;
4911 // First, demultiplex/decode time arguments
4912 if (time < 0) { // don't wait
4913 return;
4914 }
4915 else if (time == 0 && !isAbsolute) {
4916 time = INFINITE;
4917 }
4918 else if (isAbsolute) {
4919 time -= os::javaTimeMillis(); // convert to relative time
4920 if (time <= 0) // already elapsed
4921 return;
4922 }
4923 else { // relative
4924 time /= 1000000; // Must coarsen from nanos to millis
4925 if (time == 0) // Wait for the minimal time unit if zero
4926 time = 1;
4927 }
4928
4929 JavaThread* thread = (JavaThread*)(Thread::current());
4930 assert(thread->is_Java_thread(), "Must be JavaThread");
4931 JavaThread *jt = (JavaThread *)thread;
4932
4933 // Don't wait if interrupted or already triggered
4934 if (Thread::is_interrupted(thread, false) ||
4935 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
4936 ResetEvent(_ParkEvent);
4937 return;
4938 }
4939 else {
4940 ThreadBlockInVM tbivm(jt);
4941 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4942 jt->set_suspend_equivalent();
4943
4944 WaitForSingleObject(_ParkEvent, time);
4945 ResetEvent(_ParkEvent);
4946
4947 // If externally suspended while waiting, re-suspend
4948 if (jt->handle_special_suspend_equivalent_condition()) {
4949 jt->java_suspend_self();
4950 }
4951 }
4952 }
4953
4954 void Parker::unpark() {
4955 guarantee (_ParkEvent != NULL, "invariant") ;
4956 SetEvent(_ParkEvent);
4957 }
4958
4959 // Run the specified command in a separate process. Return its exit value,
4960 // or -1 on failure (e.g. can't create a new process).
4961 int os::fork_and_exec(char* cmd) {
4962 STARTUPINFO si;
4963 PROCESS_INFORMATION pi;
4964
4965 memset(&si, 0, sizeof(si));
4966 si.cb = sizeof(si);
4967 memset(&pi, 0, sizeof(pi));
4968 BOOL rslt = CreateProcess(NULL, // executable name - use command line
4969 cmd, // command line
4970 NULL, // process security attribute
4971 NULL, // thread security attribute
4972 TRUE, // inherits system handles
4973 0, // no creation flags
4974 NULL, // use parent's environment block
4975 NULL, // use parent's starting directory
4976 &si, // (in) startup information
4977 &pi); // (out) process information
4978
4979 if (rslt) {
4980 // Wait until child process exits.
4981 WaitForSingleObject(pi.hProcess, INFINITE);
4982
4983 DWORD exit_code;
4984 GetExitCodeProcess(pi.hProcess, &exit_code);
4985
4986 // Close process and thread handles.
4987 CloseHandle(pi.hProcess);
4988 CloseHandle(pi.hThread);
4989
4990 return (int)exit_code;
4991 } else {
4992 return -1;
4993 }
4994 }
4995
4996 //--------------------------------------------------------------------------------------------------
4997 // Non-product code
4998
4999 static int mallocDebugIntervalCounter = 0;
5000 static int mallocDebugCounter = 0;
5001 bool os::check_heap(bool force) {
5002 if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
5003 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
5004 // Note: HeapValidate executes two hardware breakpoints when it finds something
5005 // wrong; at these points, eax contains the address of the offending block (I think).
5006 // To get to the exlicit error message(s) below, just continue twice.
5007 HANDLE heap = GetProcessHeap();
5008 { HeapLock(heap);
5009 PROCESS_HEAP_ENTRY phe;
5010 phe.lpData = NULL;
5011 while (HeapWalk(heap, &phe) != 0) {
5012 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
5013 !HeapValidate(heap, 0, phe.lpData)) {
5014 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
5015 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData);
5016 fatal("corrupted C heap");
5017 }
5018 }
5019 DWORD err = GetLastError();
5020 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) {
5021 fatal(err_msg("heap walk aborted with error %d", err));
5022 }
5023 HeapUnlock(heap);
5024 }
5025 mallocDebugIntervalCounter = 0;
5026 }
5027 return true;
5028 }
5029
5030
5031 bool os::find(address addr, outputStream* st) {
5032 // Nothing yet
5033 return false;
5034 }
5035
5036 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5037 DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5038
5039 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) {
5040 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow();
5041 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5042 address addr = (address) exceptionRecord->ExceptionInformation[1];
5043
5044 if (os::is_memory_serialize_page(thread, addr))
5045 return EXCEPTION_CONTINUE_EXECUTION;
5046 }
5047
5048 return EXCEPTION_CONTINUE_SEARCH;
5049 }
5050
5051 // We don't build a headless jre for Windows
5052 bool os::is_headless_jre() { return false; }
5053
5054 static jint initSock() {
5055 WSADATA wsadata;
5056
5057 if (!os::WinSock2Dll::WinSock2Available()) {
5058 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n",
5059 ::GetLastError());
5060 return JNI_ERR;
5061 }
5062
5063 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5064 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5065 ::GetLastError());
5066 return JNI_ERR;
5067 }
5068 return JNI_OK;
5069 }
5070
5071 struct hostent* os::get_host_by_name(char* name) {
5072 return (struct hostent*)os::WinSock2Dll::gethostbyname(name);
5073 }
5074
5075 int os::socket_close(int fd) {
5076 return ::closesocket(fd);
5077 }
5078
5079 int os::socket_available(int fd, jint *pbytes) {
5080 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes);
5081 return (ret < 0) ? 0 : 1;
5082 }
5083
5084 int os::socket(int domain, int type, int protocol) {
5085 return ::socket(domain, type, protocol);
5086 }
5087
5088 int os::listen(int fd, int count) {
5089 return ::listen(fd, count);
5090 }
5091
5092 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5093 return ::connect(fd, him, len);
5094 }
5095
5096 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
5097 return ::accept(fd, him, len);
5098 }
5099
5100 int os::sendto(int fd, char* buf, size_t len, uint flags,
5101 struct sockaddr* to, socklen_t tolen) {
5102
5103 return ::sendto(fd, buf, (int)len, flags, to, tolen);
5104 }
5105
5106 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags,
5107 sockaddr* from, socklen_t* fromlen) {
5108
5109 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen);
5110 }
5111
5112 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5113 return ::recv(fd, buf, (int)nBytes, flags);
5114 }
5115
5116 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5117 return ::send(fd, buf, (int)nBytes, flags);
5118 }
5119
5120 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5121 return ::send(fd, buf, (int)nBytes, flags);
5122 }
5123
5124 int os::timeout(int fd, long timeout) {
5125 fd_set tbl;
5126 struct timeval t;
5127
5128 t.tv_sec = timeout / 1000;
5129 t.tv_usec = (timeout % 1000) * 1000;
5130
5131 tbl.fd_count = 1;
5132 tbl.fd_array[0] = fd;
5133
5134 return ::select(1, &tbl, 0, 0, &t);
5135 }
5136
5137 int os::get_host_name(char* name, int namelen) {
5138 return ::gethostname(name, namelen);
5139 }
5140
5141 int os::socket_shutdown(int fd, int howto) {
5142 return ::shutdown(fd, howto);
5143 }
5144
5145 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
5146 return ::bind(fd, him, len);
5147 }
5148
5149 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
5150 return ::getsockname(fd, him, len);
5151 }
5152
5153 int os::get_sock_opt(int fd, int level, int optname,
5154 char* optval, socklen_t* optlen) {
5155 return ::getsockopt(fd, level, optname, optval, optlen);
5156 }
5157
5158 int os::set_sock_opt(int fd, int level, int optname,
5159 const char* optval, socklen_t optlen) {
5160 return ::setsockopt(fd, level, optname, optval, optlen);
5161 }
5162
5163 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5164 #if defined(IA32)
5165 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5166 #elif defined (AMD64)
5167 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5168 #endif
5169
5170 // returns true if thread could be suspended,
5171 // false otherwise
5172 static bool do_suspend(HANDLE* h) {
5173 if (h != NULL) {
5174 if (SuspendThread(*h) != ~0) {
5175 return true;
5176 }
5177 }
5178 return false;
5179 }
5180
5181 // resume the thread
5182 // calling resume on an active thread is a no-op
5183 static void do_resume(HANDLE* h) {
5184 if (h != NULL) {
5185 ResumeThread(*h);
5186 }
5187 }
5188
5189 // retrieve a suspend/resume context capable handle
5190 // from the tid. Caller validates handle return value.
5191 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) {
5192 if (h != NULL) {
5193 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5194 }
5195 }
5196
5197 //
5198 // Thread sampling implementation
5199 //
5200 void os::SuspendedThreadTask::internal_do_task() {
5201 CONTEXT ctxt;
5202 HANDLE h = NULL;
5203
5204 // get context capable handle for thread
5205 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5206
5207 // sanity
5208 if (h == NULL || h == INVALID_HANDLE_VALUE) {
5209 return;
5210 }
5211
5212 // suspend the thread
5213 if (do_suspend(&h)) {
5214 ctxt.ContextFlags = sampling_context_flags;
5215 // get thread context
5216 GetThreadContext(h, &ctxt);
5217 SuspendedThreadTaskContext context(_thread, &ctxt);
5218 // pass context to Thread Sampling impl
5219 do_task(context);
5220 // resume thread
5221 do_resume(&h);
5222 }
5223
5224 // close handle
5225 CloseHandle(h);
5226 }
5227
5228
5229 // Kernel32 API
5230 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);
5231 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD);
5232 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG);
5233 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG);
5234 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG);
5235
5236 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL;
5237 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL;
5238 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL;
5239 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL;
5240 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL;
5241
5242
5243 BOOL os::Kernel32Dll::initialized = FALSE;
5244 SIZE_T os::Kernel32Dll::GetLargePageMinimum() {
5245 assert(initialized && _GetLargePageMinimum != NULL,
5246 "GetLargePageMinimumAvailable() not yet called");
5247 return _GetLargePageMinimum();
5248 }
5249
5250 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() {
5251 if (!initialized) {
5252 initialize();
5253 }
5254 return _GetLargePageMinimum != NULL;
5255 }
5256
5257 BOOL os::Kernel32Dll::NumaCallsAvailable() {
5258 if (!initialized) {
5259 initialize();
5260 }
5261 return _VirtualAllocExNuma != NULL;
5262 }
5263
5264 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) {
5265 assert(initialized && _VirtualAllocExNuma != NULL,
5266 "NUMACallsAvailable() not yet called");
5267
5268 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node);
5269 }
5270
5271 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) {
5272 assert(initialized && _GetNumaHighestNodeNumber != NULL,
5273 "NUMACallsAvailable() not yet called");
5274
5275 return _GetNumaHighestNodeNumber(ptr_highest_node_number);
5276 }
5277
5278 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) {
5279 assert(initialized && _GetNumaNodeProcessorMask != NULL,
5280 "NUMACallsAvailable() not yet called");
5281
5282 return _GetNumaNodeProcessorMask(node, proc_mask);
5283 }
5284
5285 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip,
5286 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) {
5287 if (!initialized) {
5288 initialize();
5289 }
5290
5291 if (_RtlCaptureStackBackTrace != NULL) {
5292 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture,
5293 BackTrace, BackTraceHash);
5294 } else {
5295 return 0;
5296 }
5297 }
5298
5299 void os::Kernel32Dll::initializeCommon() {
5300 if (!initialized) {
5301 HMODULE handle = ::GetModuleHandle("Kernel32.dll");
5302 assert(handle != NULL, "Just check");
5303 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum");
5304 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma");
5305 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber");
5306 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask");
5307 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace");
5308 initialized = TRUE;
5309 }
5310 }
5311
5312
5313
5314 #ifndef JDK6_OR_EARLIER
5315
5316 void os::Kernel32Dll::initialize() {
5317 initializeCommon();
5318 }
5319
5320
5321 // Kernel32 API
5322 inline BOOL os::Kernel32Dll::SwitchToThread() {
5323 return ::SwitchToThread();
5324 }
5325
5326 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
5327 return true;
5328 }
5329
5330 // Help tools
5331 inline BOOL os::Kernel32Dll::HelpToolsAvailable() {
5332 return true;
5333 }
5334
5335 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
5336 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
5337 }
5338
5339 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5340 return ::Module32First(hSnapshot, lpme);
5341 }
5342
5343 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5344 return ::Module32Next(hSnapshot, lpme);
5345 }
5346
5347
5348 inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
5349 return true;
5350 }
5351
5352 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
5353 ::GetNativeSystemInfo(lpSystemInfo);
5354 }
5355
5356 // PSAPI API
5357 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
5358 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
5359 }
5360
5361 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
5362 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
5363 }
5364
5365 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
5366 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
5367 }
5368
5369 inline BOOL os::PSApiDll::PSApiAvailable() {
5370 return true;
5371 }
5372
5373
5374 // WinSock2 API
5375 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
5376 return ::WSAStartup(wVersionRequested, lpWSAData);
5377 }
5378
5379 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
5380 return ::gethostbyname(name);
5381 }
5382
5383 inline BOOL os::WinSock2Dll::WinSock2Available() {
5384 return true;
5385 }
5386
5387 // Advapi API
5388 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
5389 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
5390 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
5391 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
5392 BufferLength, PreviousState, ReturnLength);
5393 }
5394
5395 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
5396 PHANDLE TokenHandle) {
5397 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
5398 }
5399
5400 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
5401 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
5402 }
5403
5404 inline BOOL os::Advapi32Dll::AdvapiAvailable() {
5405 return true;
5406 }
5407
5408 void* os::get_default_process_handle() {
5409 return (void*)GetModuleHandle(NULL);
5410 }
5411
5412 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5413 // which is used to find statically linked in agents.
5414 // Additionally for windows, takes into account __stdcall names.
5415 // Parameters:
5416 // sym_name: Symbol in library we are looking for
5417 // lib_name: Name of library to look in, NULL for shared libs.
5418 // is_absolute_path == true if lib_name is absolute path to agent
5419 // such as "C:/a/b/L.dll"
5420 // == false if only the base name of the library is passed in
5421 // such as "L"
5422 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5423 bool is_absolute_path) {
5424 char *agent_entry_name;
5425 size_t len;
5426 size_t name_len;
5427 size_t prefix_len = strlen(JNI_LIB_PREFIX);
5428 size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5429 const char *start;
5430
5431 if (lib_name != NULL) {
5432 len = name_len = strlen(lib_name);
5433 if (is_absolute_path) {
5434 // Need to strip path, prefix and suffix
5435 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5436 lib_name = ++start;
5437 } else {
5438 // Need to check for drive prefix
5439 if ((start = strchr(lib_name, ':')) != NULL) {
5440 lib_name = ++start;
5441 }
5442 }
5443 if (len <= (prefix_len + suffix_len)) {
5444 return NULL;
5445 }
5446 lib_name += prefix_len;
5447 name_len = strlen(lib_name) - suffix_len;
5448 }
5449 }
5450 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5451 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5452 if (agent_entry_name == NULL) {
5453 return NULL;
5454 }
5455 if (lib_name != NULL) {
5456 const char *p = strrchr(sym_name, '@');
5457 if (p != NULL && p != sym_name) {
5458 // sym_name == _Agent_OnLoad@XX
5459 strncpy(agent_entry_name, sym_name, (p - sym_name));
5460 agent_entry_name[(p-sym_name)] = '\0';
5461 // agent_entry_name == _Agent_OnLoad
5462 strcat(agent_entry_name, "_");
5463 strncat(agent_entry_name, lib_name, name_len);
5464 strcat(agent_entry_name, p);
5465 // agent_entry_name == _Agent_OnLoad_lib_name@XX
5466 } else {
5467 strcpy(agent_entry_name, sym_name);
5468 strcat(agent_entry_name, "_");
5469 strncat(agent_entry_name, lib_name, name_len);
5470 }
5471 } else {
5472 strcpy(agent_entry_name, sym_name);
5473 }
5474 return agent_entry_name;
5475 }
5476
5477 #else
5478 // Kernel32 API
5479 typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
5480 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD);
5481 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32);
5482 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32);
5483 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO);
5484
5485 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL;
5486 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL;
5487 Module32First_Fn os::Kernel32Dll::_Module32First = NULL;
5488 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL;
5489 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL;
5490
5491 void os::Kernel32Dll::initialize() {
5492 if (!initialized) {
5493 HMODULE handle = ::GetModuleHandle("Kernel32.dll");
5494 assert(handle != NULL, "Just check");
5495
5496 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread");
5497 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn)
5498 ::GetProcAddress(handle, "CreateToolhelp32Snapshot");
5499 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First");
5500 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next");
5501 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo");
5502 initializeCommon(); // resolve the functions that always need resolving
5503
5504 initialized = TRUE;
5505 }
5506 }
5507
5508 BOOL os::Kernel32Dll::SwitchToThread() {
5509 assert(initialized && _SwitchToThread != NULL,
5510 "SwitchToThreadAvailable() not yet called");
5511 return _SwitchToThread();
5512 }
5513
5514
5515 BOOL os::Kernel32Dll::SwitchToThreadAvailable() {
5516 if (!initialized) {
5517 initialize();
5518 }
5519 return _SwitchToThread != NULL;
5520 }
5521
5522 // Help tools
5523 BOOL os::Kernel32Dll::HelpToolsAvailable() {
5524 if (!initialized) {
5525 initialize();
5526 }
5527 return _CreateToolhelp32Snapshot != NULL &&
5528 _Module32First != NULL &&
5529 _Module32Next != NULL;
5530 }
5531
5532 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) {
5533 assert(initialized && _CreateToolhelp32Snapshot != NULL,
5534 "HelpToolsAvailable() not yet called");
5535
5536 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId);
5537 }
5538
5539 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5540 assert(initialized && _Module32First != NULL,
5541 "HelpToolsAvailable() not yet called");
5542
5543 return _Module32First(hSnapshot, lpme);
5544 }
5545
5546 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) {
5547 assert(initialized && _Module32Next != NULL,
5548 "HelpToolsAvailable() not yet called");
5549
5550 return _Module32Next(hSnapshot, lpme);
5551 }
5552
5553
5554 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
5555 if (!initialized) {
5556 initialize();
5557 }
5558 return _GetNativeSystemInfo != NULL;
5559 }
5560
5561 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
5562 assert(initialized && _GetNativeSystemInfo != NULL,
5563 "GetNativeSystemInfoAvailable() not yet called");
5564
5565 _GetNativeSystemInfo(lpSystemInfo);
5566 }
5567
5568 // PSAPI API
5569
5570
5571 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD);
5572 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);;
5573 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD);
5574
5575 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL;
5576 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL;
5577 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL;
5578 BOOL os::PSApiDll::initialized = FALSE;
5579
5580 void os::PSApiDll::initialize() {
5581 if (!initialized) {
5582 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0);
5583 if (handle != NULL) {
5584 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle,
5585 "EnumProcessModules");
5586 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle,
5587 "GetModuleFileNameExA");
5588 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle,
5589 "GetModuleInformation");
5590 }
5591 initialized = TRUE;
5592 }
5593 }
5594
5595
5596
5597 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) {
5598 assert(initialized && _EnumProcessModules != NULL,
5599 "PSApiAvailable() not yet called");
5600 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded);
5601 }
5602
5603 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) {
5604 assert(initialized && _GetModuleFileNameEx != NULL,
5605 "PSApiAvailable() not yet called");
5606 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize);
5607 }
5608
5609 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) {
5610 assert(initialized && _GetModuleInformation != NULL,
5611 "PSApiAvailable() not yet called");
5612 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb);
5613 }
5614
5615 BOOL os::PSApiDll::PSApiAvailable() {
5616 if (!initialized) {
5617 initialize();
5618 }
5619 return _EnumProcessModules != NULL &&
5620 _GetModuleFileNameEx != NULL &&
5621 _GetModuleInformation != NULL;
5622 }
5623
5624
5625 // WinSock2 API
5626 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA);
5627 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...);
5628
5629 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL;
5630 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL;
5631 BOOL os::WinSock2Dll::initialized = FALSE;
5632
5633 void os::WinSock2Dll::initialize() {
5634 if (!initialized) {
5635 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0);
5636 if (handle != NULL) {
5637 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup");
5638 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname");
5639 }
5640 initialized = TRUE;
5641 }
5642 }
5643
5644
5645 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) {
5646 assert(initialized && _WSAStartup != NULL,
5647 "WinSock2Available() not yet called");
5648 return _WSAStartup(wVersionRequested, lpWSAData);
5649 }
5650
5651 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) {
5652 assert(initialized && _gethostbyname != NULL,
5653 "WinSock2Available() not yet called");
5654 return _gethostbyname(name);
5655 }
5656
5657 BOOL os::WinSock2Dll::WinSock2Available() {
5658 if (!initialized) {
5659 initialize();
5660 }
5661 return _WSAStartup != NULL &&
5662 _gethostbyname != NULL;
5663 }
5664
5665 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
5666 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE);
5667 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID);
5668
5669 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL;
5670 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL;
5671 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL;
5672 BOOL os::Advapi32Dll::initialized = FALSE;
5673
5674 void os::Advapi32Dll::initialize() {
5675 if (!initialized) {
5676 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0);
5677 if (handle != NULL) {
5678 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle,
5679 "AdjustTokenPrivileges");
5680 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle,
5681 "OpenProcessToken");
5682 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle,
5683 "LookupPrivilegeValueA");
5684 }
5685 initialized = TRUE;
5686 }
5687 }
5688
5689 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle,
5690 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength,
5691 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) {
5692 assert(initialized && _AdjustTokenPrivileges != NULL,
5693 "AdvapiAvailable() not yet called");
5694 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState,
5695 BufferLength, PreviousState, ReturnLength);
5696 }
5697
5698 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess,
5699 PHANDLE TokenHandle) {
5700 assert(initialized && _OpenProcessToken != NULL,
5701 "AdvapiAvailable() not yet called");
5702 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle);
5703 }
5704
5705 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) {
5706 assert(initialized && _LookupPrivilegeValue != NULL,
5707 "AdvapiAvailable() not yet called");
5708 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid);
5709 }
5710
5711 BOOL os::Advapi32Dll::AdvapiAvailable() {
5712 if (!initialized) {
5713 initialize();
5714 }
5715 return _AdjustTokenPrivileges != NULL &&
5716 _OpenProcessToken != NULL &&
5717 _LookupPrivilegeValue != NULL;
5718 }
5719
5720 #endif
5721
5722 #ifndef PRODUCT
5723
5724 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5725 // contiguous memory block at a particular address.
5726 // The test first tries to find a good approximate address to allocate at by using the same
5727 // method to allocate some memory at any address. The test then tries to allocate memory in
5728 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5729 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5730 // the previously allocated memory is available for allocation. The only actual failure
5731 // that is reported is when the test tries to allocate at a particular location but gets a
5732 // different valid one. A NULL return value at this point is not considered an error but may
5733 // be legitimate.
5734 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5735 void TestReserveMemorySpecial_test() {
5736 if (!UseLargePages) {
5737 if (VerboseInternalVMTests) {
5738 gclog_or_tty->print("Skipping test because large pages are disabled");
5739 }
5740 return;
5741 }
5742 // save current value of globals
5743 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5744 bool old_use_numa_interleaving = UseNUMAInterleaving;
5745
5746 // set globals to make sure we hit the correct code path
5747 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5748
5749 // do an allocation at an address selected by the OS to get a good one.
5750 const size_t large_allocation_size = os::large_page_size() * 4;
5751 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5752 if (result == NULL) {
5753 if (VerboseInternalVMTests) {
5754 gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.",
5755 large_allocation_size);
5756 }
5757 } else {
5758 os::release_memory_special(result, large_allocation_size);
5759
5760 // allocate another page within the recently allocated memory area which seems to be a good location. At least
5761 // we managed to get it once.
5762 const size_t expected_allocation_size = os::large_page_size();
5763 char* expected_location = result + os::large_page_size();
5764 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5765 if (actual_location == NULL) {
5766 if (VerboseInternalVMTests) {
5767 gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.",
5768 expected_location, large_allocation_size);
5769 }
5770 } else {
5771 // release memory
5772 os::release_memory_special(actual_location, expected_allocation_size);
5773 // only now check, after releasing any memory to avoid any leaks.
5774 assert(actual_location == expected_location,
5775 err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
5776 expected_location, expected_allocation_size, actual_location));
5777 }
5778 }
5779
5780 // restore globals
5781 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5782 UseNUMAInterleaving = old_use_numa_interleaving;
5783 }
5784 #endif // PRODUCT
5785

mercurial