Merge

Thu, 04 Nov 2010 16:17:54 -0700

author
trims
date
Thu, 04 Nov 2010 16:17:54 -0700
changeset 2247
4ac698856c43
parent 2215
08f0f4a3ddd6
parent 2246
9de67bf4244d
child 2248
698b7b727e12
child 2277
5caa30ea147b

Merge

src/os/linux/vm/objectMonitor_linux.cpp file | annotate | diff | comparison | revisions
src/os/linux/vm/objectMonitor_linux.hpp file | annotate | diff | comparison | revisions
src/os/linux/vm/objectMonitor_linux.inline.hpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/objectMonitor_solaris.cpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/objectMonitor_solaris.hpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/objectMonitor_solaris.inline.hpp file | annotate | diff | comparison | revisions
src/os/windows/vm/objectMonitor_windows.cpp file | annotate | diff | comparison | revisions
src/os/windows/vm/objectMonitor_windows.hpp file | annotate | diff | comparison | revisions
src/os/windows/vm/objectMonitor_windows.inline.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Thu Nov 04 15:19:16 2010 -0700
     1.2 +++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Thu Nov 04 16:17:54 2010 -0700
     1.3 @@ -664,7 +664,7 @@
     1.4    // Use temps to avoid kills
     1.5    LIR_Opr t1 = FrameMap::G1_opr;
     1.6    LIR_Opr t2 = FrameMap::G3_opr;
     1.7 -  LIR_Opr addr = (type == objectType) ? new_register(T_OBJECT) : new_pointer_register();
     1.8 +  LIR_Opr addr = new_pointer_register();
     1.9  
    1.10    // get address of field
    1.11    obj.load_item();
     2.1 --- a/src/cpu/sparc/vm/globals_sparc.hpp	Thu Nov 04 15:19:16 2010 -0700
     2.2 +++ b/src/cpu/sparc/vm/globals_sparc.hpp	Thu Nov 04 16:17:54 2010 -0700
     2.3 @@ -62,3 +62,5 @@
     2.4  
     2.5  define_pd_global(bool, RewriteBytecodes,     true);
     2.6  define_pd_global(bool, RewriteFrequentPairs, true);
     2.7 +
     2.8 +define_pd_global(bool, UseMembar,            false);
     3.1 --- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Thu Nov 04 15:19:16 2010 -0700
     3.2 +++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Thu Nov 04 16:17:54 2010 -0700
     3.3 @@ -499,7 +499,7 @@
     3.4    Register new_val_reg = new_val()->as_register();
     3.5    __ cmpptr(new_val_reg, (int32_t) NULL_WORD);
     3.6    __ jcc(Assembler::equal, _continuation);
     3.7 -  ce->store_parameter(addr()->as_register(), 0);
     3.8 +  ce->store_parameter(addr()->as_pointer_register(), 0);
     3.9    __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
    3.10    __ jmp(_continuation);
    3.11  }
     4.1 --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Nov 04 15:19:16 2010 -0700
     4.2 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Nov 04 16:17:54 2010 -0700
     4.3 @@ -765,7 +765,7 @@
     4.4      ShouldNotReachHere();
     4.5    }
     4.6  
     4.7 -  LIR_Opr addr = (type == objectType) ? new_register(T_OBJECT) : new_pointer_register();
     4.8 +  LIR_Opr addr = new_pointer_register();
     4.9    LIR_Address* a;
    4.10    if(offset.result()->is_constant()) {
    4.11      a = new LIR_Address(obj.result(),
     5.1 --- a/src/cpu/x86/vm/globals_x86.hpp	Thu Nov 04 15:19:16 2010 -0700
     5.2 +++ b/src/cpu/x86/vm/globals_x86.hpp	Thu Nov 04 16:17:54 2010 -0700
     5.3 @@ -63,3 +63,5 @@
     5.4  
     5.5  define_pd_global(bool, RewriteBytecodes,     true);
     5.6  define_pd_global(bool, RewriteFrequentPairs, true);
     5.7 +
     5.8 +define_pd_global(bool, UseMembar,            false);
     6.1 --- a/src/cpu/zero/vm/globals_zero.hpp	Thu Nov 04 15:19:16 2010 -0700
     6.2 +++ b/src/cpu/zero/vm/globals_zero.hpp	Thu Nov 04 16:17:54 2010 -0700
     6.3 @@ -45,3 +45,5 @@
     6.4  
     6.5  define_pd_global(bool,  RewriteBytecodes,     true);
     6.6  define_pd_global(bool,  RewriteFrequentPairs, true);
     6.7 +
     6.8 +define_pd_global(bool,  UseMembar,            false);
     7.1 --- a/src/os/linux/vm/attachListener_linux.cpp	Thu Nov 04 15:19:16 2010 -0700
     7.2 +++ b/src/os/linux/vm/attachListener_linux.cpp	Thu Nov 04 16:17:54 2010 -0700
     7.3 @@ -176,10 +176,10 @@
     7.4  
     7.5    int n = snprintf(path, UNIX_PATH_MAX, "%s/.java_pid%d",
     7.6                     os::get_temp_directory(), os::current_process_id());
     7.7 -  if (n <= (int)UNIX_PATH_MAX) {
     7.8 +  if (n < (int)UNIX_PATH_MAX) {
     7.9      n = snprintf(initial_path, UNIX_PATH_MAX, "%s.tmp", path);
    7.10    }
    7.11 -  if (n > (int)UNIX_PATH_MAX) {
    7.12 +  if (n >= (int)UNIX_PATH_MAX) {
    7.13      return -1;
    7.14    }
    7.15  
     8.1 --- a/src/os/linux/vm/objectMonitor_linux.cpp	Thu Nov 04 15:19:16 2010 -0700
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,24 +0,0 @@
     8.4 -
     8.5 -/*
     8.6 - * Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
     8.7 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.8 - *
     8.9 - * This code is free software; you can redistribute it and/or modify it
    8.10 - * under the terms of the GNU General Public License version 2 only, as
    8.11 - * published by the Free Software Foundation.
    8.12 - *
    8.13 - * This code is distributed in the hope that it will be useful, but WITHOUT
    8.14 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    8.15 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    8.16 - * version 2 for more details (a copy is included in the LICENSE file that
    8.17 - * accompanied this code).
    8.18 - *
    8.19 - * You should have received a copy of the GNU General Public License version
    8.20 - * 2 along with this work; if not, write to the Free Software Foundation,
    8.21 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    8.22 - *
    8.23 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    8.24 - * or visit www.oracle.com if you need additional information or have any
    8.25 - * questions.
    8.26 - *
    8.27 - */
     9.1 --- a/src/os/linux/vm/objectMonitor_linux.hpp	Thu Nov 04 15:19:16 2010 -0700
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,25 +0,0 @@
     9.4 -/*
     9.5 - * Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
     9.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     9.7 - *
     9.8 - * This code is free software; you can redistribute it and/or modify it
     9.9 - * under the terms of the GNU General Public License version 2 only, as
    9.10 - * published by the Free Software Foundation.
    9.11 - *
    9.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
    9.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    9.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    9.15 - * version 2 for more details (a copy is included in the LICENSE file that
    9.16 - * accompanied this code).
    9.17 - *
    9.18 - * You should have received a copy of the GNU General Public License version
    9.19 - * 2 along with this work; if not, write to the Free Software Foundation,
    9.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    9.21 - *
    9.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    9.23 - * or visit www.oracle.com if you need additional information or have any
    9.24 - * questions.
    9.25 - *
    9.26 - */
    9.27 -
    9.28 - private:
    10.1 --- a/src/os/linux/vm/objectMonitor_linux.inline.hpp	Thu Nov 04 15:19:16 2010 -0700
    10.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.3 @@ -1,23 +0,0 @@
    10.4 -/*
    10.5 - * Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
    10.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.7 - *
    10.8 - * This code is free software; you can redistribute it and/or modify it
    10.9 - * under the terms of the GNU General Public License version 2 only, as
   10.10 - * published by the Free Software Foundation.
   10.11 - *
   10.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   10.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   10.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   10.15 - * version 2 for more details (a copy is included in the LICENSE file that
   10.16 - * accompanied this code).
   10.17 - *
   10.18 - * You should have received a copy of the GNU General Public License version
   10.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   10.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   10.21 - *
   10.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   10.23 - * or visit www.oracle.com if you need additional information or have any
   10.24 - * questions.
   10.25 - *
   10.26 - */
    11.1 --- a/src/os/linux/vm/os_linux.cpp	Thu Nov 04 15:19:16 2010 -0700
    11.2 +++ b/src/os/linux/vm/os_linux.cpp	Thu Nov 04 16:17:54 2010 -0700
    11.3 @@ -1,5 +1,5 @@
    11.4  /*
    11.5 - * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
    11.6 + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    11.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    11.8   *
    11.9   * This code is free software; you can redistribute it and/or modify it
   11.10 @@ -827,8 +827,10 @@
   11.11  
   11.12        switch (thr_type) {
   11.13        case os::java_thread:
   11.14 -        // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
   11.15 -        if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
   11.16 +        // Java threads use ThreadStackSize which default value can be
   11.17 +        // changed with the flag -Xss
   11.18 +        assert (JavaThread::stack_size_at_create() > 0, "this should be set");
   11.19 +        stack_size = JavaThread::stack_size_at_create();
   11.20          break;
   11.21        case os::compiler_thread:
   11.22          if (CompilerThreadStackSize > 0) {
   11.23 @@ -3922,12 +3924,21 @@
   11.24    Linux::signal_sets_init();
   11.25    Linux::install_signal_handlers();
   11.26  
   11.27 +  // Check minimum allowable stack size for thread creation and to initialize
   11.28 +  // the java system classes, including StackOverflowError - depends on page
   11.29 +  // size.  Add a page for compiler2 recursion in main thread.
   11.30 +  // Add in 2*BytesPerWord times page size to account for VM stack during
   11.31 +  // class initialization depending on 32 or 64 bit VM.
   11.32 +  os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
   11.33 +            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
   11.34 +                    2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::page_size());
   11.35 +
   11.36    size_t threadStackSizeInBytes = ThreadStackSize * K;
   11.37    if (threadStackSizeInBytes != 0 &&
   11.38 -      threadStackSizeInBytes < Linux::min_stack_allowed) {
   11.39 +      threadStackSizeInBytes < os::Linux::min_stack_allowed) {
   11.40          tty->print_cr("\nThe stack size specified is too small, "
   11.41                        "Specify at least %dk",
   11.42 -                      Linux::min_stack_allowed / K);
   11.43 +                      os::Linux::min_stack_allowed/ K);
   11.44          return JNI_ERR;
   11.45    }
   11.46  
   11.47 @@ -4839,7 +4850,7 @@
   11.48  
   11.49    // Next, demultiplex/decode time arguments
   11.50    timespec absTime;
   11.51 -  if (time < 0) { // don't wait at all
   11.52 +  if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
   11.53      return;
   11.54    }
   11.55    if (time > 0) {
    12.1 --- a/src/os/solaris/vm/objectMonitor_solaris.cpp	Thu Nov 04 15:19:16 2010 -0700
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,23 +0,0 @@
    12.4 -/*
    12.5 - * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
    12.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    12.7 - *
    12.8 - * This code is free software; you can redistribute it and/or modify it
    12.9 - * under the terms of the GNU General Public License version 2 only, as
   12.10 - * published by the Free Software Foundation.
   12.11 - *
   12.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   12.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12.15 - * version 2 for more details (a copy is included in the LICENSE file that
   12.16 - * accompanied this code).
   12.17 - *
   12.18 - * You should have received a copy of the GNU General Public License version
   12.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   12.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   12.21 - *
   12.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   12.23 - * or visit www.oracle.com if you need additional information or have any
   12.24 - * questions.
   12.25 - *
   12.26 - */
    13.1 --- a/src/os/solaris/vm/objectMonitor_solaris.hpp	Thu Nov 04 15:19:16 2010 -0700
    13.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.3 @@ -1,25 +0,0 @@
    13.4 -/*
    13.5 - * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
    13.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    13.7 - *
    13.8 - * This code is free software; you can redistribute it and/or modify it
    13.9 - * under the terms of the GNU General Public License version 2 only, as
   13.10 - * published by the Free Software Foundation.
   13.11 - *
   13.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   13.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   13.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13.15 - * version 2 for more details (a copy is included in the LICENSE file that
   13.16 - * accompanied this code).
   13.17 - *
   13.18 - * You should have received a copy of the GNU General Public License version
   13.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   13.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   13.21 - *
   13.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   13.23 - * or visit www.oracle.com if you need additional information or have any
   13.24 - * questions.
   13.25 - *
   13.26 - */
   13.27 -
   13.28 - private:
    14.1 --- a/src/os/solaris/vm/objectMonitor_solaris.inline.hpp	Thu Nov 04 15:19:16 2010 -0700
    14.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.3 @@ -1,23 +0,0 @@
    14.4 -/*
    14.5 - * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
    14.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    14.7 - *
    14.8 - * This code is free software; you can redistribute it and/or modify it
    14.9 - * under the terms of the GNU General Public License version 2 only, as
   14.10 - * published by the Free Software Foundation.
   14.11 - *
   14.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   14.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   14.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   14.15 - * version 2 for more details (a copy is included in the LICENSE file that
   14.16 - * accompanied this code).
   14.17 - *
   14.18 - * You should have received a copy of the GNU General Public License version
   14.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   14.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   14.21 - *
   14.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   14.23 - * or visit www.oracle.com if you need additional information or have any
   14.24 - * questions.
   14.25 - *
   14.26 - */
    15.1 --- a/src/os/solaris/vm/os_solaris.cpp	Thu Nov 04 15:19:16 2010 -0700
    15.2 +++ b/src/os/solaris/vm/os_solaris.cpp	Thu Nov 04 16:17:54 2010 -0700
    15.3 @@ -1,5 +1,5 @@
    15.4  /*
    15.5 - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
    15.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    15.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    15.8   *
    15.9   * This code is free software; you can redistribute it and/or modify it
   15.10 @@ -4878,18 +4878,17 @@
   15.11    // Check minimum allowable stack size for thread creation and to initialize
   15.12    // the java system classes, including StackOverflowError - depends on page
   15.13    // size.  Add a page for compiler2 recursion in main thread.
   15.14 -  // Add in BytesPerWord times page size to account for VM stack during
   15.15 +  // Add in 2*BytesPerWord times page size to account for VM stack during
   15.16    // class initialization depending on 32 or 64 bit VM.
   15.17 -  guarantee((Solaris::min_stack_allowed >=
   15.18 -    (StackYellowPages+StackRedPages+StackShadowPages+BytesPerWord
   15.19 -     COMPILER2_PRESENT(+1)) * page_size),
   15.20 -    "need to increase Solaris::min_stack_allowed on this platform");
   15.21 +  os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
   15.22 +            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
   15.23 +                    2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
   15.24  
   15.25    size_t threadStackSizeInBytes = ThreadStackSize * K;
   15.26    if (threadStackSizeInBytes != 0 &&
   15.27 -    threadStackSizeInBytes < Solaris::min_stack_allowed) {
   15.28 +    threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
   15.29      tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
   15.30 -                  Solaris::min_stack_allowed/K);
   15.31 +                  os::Solaris::min_stack_allowed/K);
   15.32      return JNI_ERR;
   15.33    }
   15.34  
   15.35 @@ -5837,7 +5836,7 @@
   15.36  
   15.37    // First, demultiplex/decode time arguments
   15.38    timespec absTime;
   15.39 -  if (time < 0) { // don't wait at all
   15.40 +  if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
   15.41      return;
   15.42    }
   15.43    if (time > 0) {
    16.1 --- a/src/os/windows/vm/objectMonitor_windows.cpp	Thu Nov 04 15:19:16 2010 -0700
    16.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.3 @@ -1,25 +0,0 @@
    16.4 -/*
    16.5 - * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
    16.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    16.7 - *
    16.8 - * This code is free software; you can redistribute it and/or modify it
    16.9 - * under the terms of the GNU General Public License version 2 only, as
   16.10 - * published by the Free Software Foundation.
   16.11 - *
   16.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   16.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   16.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   16.15 - * version 2 for more details (a copy is included in the LICENSE file that
   16.16 - * accompanied this code).
   16.17 - *
   16.18 - * You should have received a copy of the GNU General Public License version
   16.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   16.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   16.21 - *
   16.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   16.23 - * or visit www.oracle.com if you need additional information or have any
   16.24 - * questions.
   16.25 - *
   16.26 - */
   16.27 -
   16.28 -#include "incls/_precompiled.incl"
    17.1 --- a/src/os/windows/vm/objectMonitor_windows.hpp	Thu Nov 04 15:19:16 2010 -0700
    17.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.3 @@ -1,25 +0,0 @@
    17.4 -/*
    17.5 - * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
    17.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    17.7 - *
    17.8 - * This code is free software; you can redistribute it and/or modify it
    17.9 - * under the terms of the GNU General Public License version 2 only, as
   17.10 - * published by the Free Software Foundation.
   17.11 - *
   17.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   17.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   17.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   17.15 - * version 2 for more details (a copy is included in the LICENSE file that
   17.16 - * accompanied this code).
   17.17 - *
   17.18 - * You should have received a copy of the GNU General Public License version
   17.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   17.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   17.21 - *
   17.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   17.23 - * or visit www.oracle.com if you need additional information or have any
   17.24 - * questions.
   17.25 - *
   17.26 - */
   17.27 -
   17.28 - private:
    18.1 --- a/src/os/windows/vm/objectMonitor_windows.inline.hpp	Thu Nov 04 15:19:16 2010 -0700
    18.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.3 @@ -1,23 +0,0 @@
    18.4 -/*
    18.5 - * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
    18.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    18.7 - *
    18.8 - * This code is free software; you can redistribute it and/or modify it
    18.9 - * under the terms of the GNU General Public License version 2 only, as
   18.10 - * published by the Free Software Foundation.
   18.11 - *
   18.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   18.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   18.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   18.15 - * version 2 for more details (a copy is included in the LICENSE file that
   18.16 - * accompanied this code).
   18.17 - *
   18.18 - * You should have received a copy of the GNU General Public License version
   18.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   18.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   18.21 - *
   18.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   18.23 - * or visit www.oracle.com if you need additional information or have any
   18.24 - * questions.
   18.25 - *
   18.26 - */
    19.1 --- a/src/os/windows/vm/os_windows.cpp	Thu Nov 04 15:19:16 2010 -0700
    19.2 +++ b/src/os/windows/vm/os_windows.cpp	Thu Nov 04 16:17:54 2010 -0700
    19.3 @@ -3311,7 +3311,6 @@
    19.4    }
    19.5  }
    19.6  
    19.7 -
    19.8  // this is called _after_ the global arguments have been parsed
    19.9  jint os::init_2(void) {
   19.10    // Allocate a single page and mark it as readable for safepoint polling
   19.11 @@ -3390,6 +3389,21 @@
   19.12      actual_reserve_size = default_reserve_size;
   19.13    }
   19.14  
   19.15 +  // Check minimum allowable stack size for thread creation and to initialize
   19.16 +  // the java system classes, including StackOverflowError - depends on page
   19.17 +  // size.  Add a page for compiler2 recursion in main thread.
   19.18 +  // Add in 2*BytesPerWord times page size to account for VM stack during
   19.19 +  // class initialization depending on 32 or 64 bit VM.
   19.20 +  size_t min_stack_allowed =
   19.21 +            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
   19.22 +            2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
   19.23 +  if (actual_reserve_size < min_stack_allowed) {
   19.24 +    tty->print_cr("\nThe stack size specified is too small, "
   19.25 +                  "Specify at least %dk",
   19.26 +                  min_stack_allowed / K);
   19.27 +    return JNI_ERR;
   19.28 +  }
   19.29 +
   19.30    JavaThread::set_stack_size_at_create(stack_commit_size);
   19.31  
   19.32    // Calculate theoretical max. size of Threads to guard gainst artifical
   19.33 @@ -3992,7 +4006,7 @@
   19.34    if (time < 0) { // don't wait
   19.35      return;
   19.36    }
   19.37 -  else if (time == 0) {
   19.38 +  else if (time == 0 && !isAbsolute) {
   19.39      time = INFINITE;
   19.40    }
   19.41    else if  (isAbsolute) {
    20.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Nov 04 15:19:16 2010 -0700
    20.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Nov 04 16:17:54 2010 -0700
    20.3 @@ -1350,7 +1350,6 @@
    20.4      addr = ptr;
    20.5    }
    20.6    assert(addr->is_register(), "must be a register at this point");
    20.7 -  assert(addr->type() == T_OBJECT, "addr should point to an object");
    20.8  
    20.9    LIR_Opr xor_res = new_pointer_register();
   20.10    LIR_Opr xor_shift_res = new_pointer_register();
    21.1 --- a/src/share/vm/classfile/classFileParser.cpp	Thu Nov 04 15:19:16 2010 -0700
    21.2 +++ b/src/share/vm/classfile/classFileParser.cpp	Thu Nov 04 16:17:54 2010 -0700
    21.3 @@ -4309,20 +4309,21 @@
    21.4  }
    21.5  
    21.6  
    21.7 -// Unqualified names may not contain the characters '.', ';', or '/'.
    21.8 -// Method names also may not contain the characters '<' or '>', unless <init> or <clinit>.
    21.9 -// Note that method names may not be <init> or <clinit> in this method.
   21.10 -// Because these names have been checked as special cases before calling this method
   21.11 -// in verify_legal_method_name.
   21.12 -bool ClassFileParser::verify_unqualified_name(char* name, unsigned int length, int type) {
   21.13 +// Unqualified names may not contain the characters '.', ';', '[', or '/'.
   21.14 +// Method names also may not contain the characters '<' or '>', unless <init>
   21.15 +// or <clinit>.  Note that method names may not be <init> or <clinit> in this
   21.16 +// method.  Because these names have been checked as special cases before
   21.17 +// calling this method in verify_legal_method_name.
   21.18 +bool ClassFileParser::verify_unqualified_name(
   21.19 +    char* name, unsigned int length, int type) {
   21.20    jchar ch;
   21.21  
   21.22    for (char* p = name; p != name + length; ) {
   21.23      ch = *p;
   21.24      if (ch < 128) {
   21.25        p++;
   21.26 -      if (ch == '.' || ch == ';') {
   21.27 -        return false;   // do not permit '.' or ';'
   21.28 +      if (ch == '.' || ch == ';' || ch == '[' ) {
   21.29 +        return false;   // do not permit '.', ';', or '['
   21.30        }
   21.31        if (type != LegalClass && ch == '/') {
   21.32          return false;   // do not permit '/' unless it's class name
    22.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.2 +++ b/src/share/vm/classfile/stackMapTableFormat.hpp	Thu Nov 04 16:17:54 2010 -0700
    22.3 @@ -0,0 +1,916 @@
    22.4 +/*
    22.5 + * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
    22.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    22.7 + *
    22.8 + * This code is free software; you can redistribute it and/or modify it
    22.9 + * under the terms of the GNU General Public License version 2 only, as
   22.10 + * published by the Free Software Foundation.
   22.11 + *
   22.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   22.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   22.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   22.15 + * version 2 for more details (a copy is included in the LICENSE file that
   22.16 + * accompanied this code).
   22.17 + *
   22.18 + * You should have received a copy of the GNU General Public License version
   22.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   22.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   22.21 + *
   22.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   22.23 + * or visit www.oracle.com if you need additional information or have any
   22.24 + * questions.
   22.25 + *
   22.26 + */
   22.27 +
   22.28 +// These classes represent the stack-map substructures described in the JVMS
   22.29 +// (hence the non-conforming naming scheme).
   22.30 +
   22.31 +// These classes work with the types in their compressed form in-place (as they
   22.32 +// would appear in the classfile).  No virtual methods or fields allowed.
   22.33 +
   22.34 +class verification_type_info {
   22.35 + private:
   22.36 +  // u1 tag
   22.37 +  // u2 cpool_index || u2 bci (for ITEM_Object & ITEM_Uninitailized only)
   22.38 +
   22.39 +  address tag_addr() const { return (address)this; }
   22.40 +  address cpool_index_addr() const { return tag_addr() + sizeof(u1); }
   22.41 +  address bci_addr() const { return cpool_index_addr(); }
   22.42 +
   22.43 + protected:
   22.44 +  // No constructors  - should be 'private', but GCC issues a warning if it is
   22.45 +  verification_type_info() {}
   22.46 +  verification_type_info(const verification_type_info&) {}
   22.47 +
   22.48 + public:
   22.49 +
   22.50 +  static verification_type_info* at(address addr) {
   22.51 +    return (verification_type_info*)addr;
   22.52 +  }
   22.53 +
   22.54 +  static verification_type_info* create_at(address addr, u1 tag) {
   22.55 +    verification_type_info* vti = (verification_type_info*)addr;
   22.56 +    vti->set_tag(tag);
   22.57 +    return vti;
   22.58 +  }
   22.59 +
   22.60 +  static verification_type_info* create_object_at(address addr, u2 cp_idx) {
   22.61 +    verification_type_info* vti = (verification_type_info*)addr;
   22.62 +    vti->set_tag(ITEM_Object);
   22.63 +    vti->set_cpool_index(cp_idx);
   22.64 +    return vti;
   22.65 +  }
   22.66 +
   22.67 +  static verification_type_info* create_uninit_at(address addr, u2 bci) {
   22.68 +    verification_type_info* vti = (verification_type_info*)addr;
   22.69 +    vti->set_tag(ITEM_Uninitialized);
   22.70 +    vti->set_bci(bci);
   22.71 +    return vti;
   22.72 +  }
   22.73 +
   22.74 +  static size_t calculate_size(u1 tag) {
   22.75 +    if (tag == ITEM_Object || tag == ITEM_Uninitialized) {
   22.76 +      return sizeof(u1) + sizeof(u2);
   22.77 +    } else {
   22.78 +      return sizeof(u1);
   22.79 +    }
   22.80 +  }
   22.81 +
   22.82 +  static size_t max_size() { return sizeof(u1) + sizeof(u2); }
   22.83 +
   22.84 +  u1 tag() const { return *(u1*)tag_addr(); }
   22.85 +  void set_tag(u1 tag) { *((u1*)tag_addr()) = tag; }
   22.86 +
   22.87 +  bool is_object() const { return tag() == ITEM_Object; }
   22.88 +  bool is_uninitialized() const { return tag() == ITEM_Uninitialized; }
   22.89 +
   22.90 +  u2 cpool_index() const {
   22.91 +    assert(is_object(), "This type has no cp_index");
   22.92 +    return Bytes::get_Java_u2(cpool_index_addr());
   22.93 +  }
   22.94 +  void set_cpool_index(u2 idx) {
   22.95 +    assert(is_object(), "This type has no cp_index");
   22.96 +    Bytes::put_Java_u2(cpool_index_addr(), idx);
   22.97 +  }
   22.98 +
   22.99 +  u2 bci() const {
  22.100 +    assert(is_uninitialized(), "This type has no bci");
  22.101 +    return Bytes::get_Java_u2(bci_addr());
  22.102 +  }
  22.103 +
  22.104 +  void set_bci(u2 bci) {
  22.105 +    assert(is_uninitialized(), "This type has no bci");
  22.106 +    Bytes::put_Java_u2(bci_addr(), bci);
  22.107 +  }
  22.108 +
  22.109 +  void copy_from(verification_type_info* from) {
  22.110 +    set_tag(from->tag());
  22.111 +    if (from->is_object()) {
  22.112 +      set_cpool_index(from->cpool_index());
  22.113 +    } else if (from->is_uninitialized()) {
  22.114 +      set_bci(from->bci());
  22.115 +    }
  22.116 +  }
  22.117 +
  22.118 +  size_t size() const {
  22.119 +    return calculate_size(tag());
  22.120 +  }
  22.121 +
  22.122 +  verification_type_info* next() {
  22.123 +    return (verification_type_info*)((address)this + size());
  22.124 +  }
  22.125 +
  22.126 +  // This method is used when reading unverified data in order to ensure
  22.127 +  // that we don't read past a particular memory limit.  It returns false
  22.128 +  // if any part of the data structure is outside the specified memory bounds.
  22.129 +  bool verify(address start, address end) {
  22.130 +    return ((address)this >= start &&
  22.131 +            (address)this < end &&
  22.132 +            (bci_addr() + sizeof(u2) <= end ||
  22.133 +                !is_object() && !is_uninitialized()));
  22.134 +  }
  22.135 +
  22.136 +#ifdef ASSERT
  22.137 +  void print_on(outputStream* st) {
  22.138 +    switch (tag()) {
  22.139 +      case ITEM_Top: st->print("Top"); break;
  22.140 +      case ITEM_Integer: st->print("Integer"); break;
  22.141 +      case ITEM_Float: st->print("Float"); break;
  22.142 +      case ITEM_Double: st->print("Double"); break;
  22.143 +      case ITEM_Long: st->print("Long"); break;
  22.144 +      case ITEM_Null: st->print("Null"); break;
  22.145 +      case ITEM_UninitializedThis:
  22.146 +        st->print("UninitializedThis"); break;
  22.147 +      case ITEM_Uninitialized:
  22.148 +        st->print("Uninitialized[#%d]", bci()); break;
  22.149 +      case ITEM_Object:
  22.150 +        st->print("Object[#%d]", cpool_index()); break;
  22.151 +      default:
  22.152 +        assert(false, "Bad verification_type_info");
  22.153 +    }
  22.154 +  }
  22.155 +#endif
  22.156 +};
  22.157 +
  22.158 +#define FOR_EACH_STACKMAP_FRAME_TYPE(macro, arg1, arg2) \
  22.159 +  macro(same_frame, arg1, arg2) \
  22.160 +  macro(same_frame_extended, arg1, arg2) \
  22.161 +  macro(same_frame_1_stack_item_frame, arg1, arg2) \
  22.162 +  macro(same_frame_1_stack_item_extended, arg1, arg2) \
  22.163 +  macro(chop_frame, arg1, arg2) \
  22.164 +  macro(append_frame, arg1, arg2) \
  22.165 +  macro(full_frame, arg1, arg2)
  22.166 +
  22.167 +#define SM_FORWARD_DECL(type, arg1, arg2) class type;
  22.168 +FOR_EACH_STACKMAP_FRAME_TYPE(SM_FORWARD_DECL, x, x)
  22.169 +#undef SM_FORWARD_DECL
  22.170 +
  22.171 +class stack_map_frame {
  22.172 + protected:
  22.173 +  address frame_type_addr() const { return (address)this; }
  22.174 +
  22.175 +  // No constructors  - should be 'private', but GCC issues a warning if it is
  22.176 +  stack_map_frame() {}
  22.177 +  stack_map_frame(const stack_map_frame&) {}
  22.178 +
  22.179 + public:
  22.180 +
  22.181 +  static stack_map_frame* at(address addr) {
  22.182 +    return (stack_map_frame*)addr;
  22.183 +  }
  22.184 +
  22.185 +  stack_map_frame* next() const {
  22.186 +    return at((address)this + size());
  22.187 +  }
  22.188 +
  22.189 +  u1 frame_type() const { return *(u1*)frame_type_addr(); }
  22.190 +  void set_frame_type(u1 type) { *((u1*)frame_type_addr()) = type; }
  22.191 +
  22.192 +  // pseudo-virtual methods
  22.193 +  inline size_t size() const;
  22.194 +  inline int offset_delta() const;
  22.195 +  inline void set_offset_delta(int offset_delta);
  22.196 +  inline int number_of_types() const; // number of types contained in the frame
  22.197 +  inline verification_type_info* types() const; // pointer to first type
  22.198 +  inline bool is_valid_offset(int offset_delta) const;
  22.199 +
  22.200 +  // This method must be used when reading unverified data in order to ensure
  22.201 +  // that we don't read past a particular memory limit.  It returns false
  22.202 +  // if any part of the data structure is outside the specified memory bounds.
  22.203 +  inline bool verify(address start, address end) const;
  22.204 +#ifdef ASSERT
  22.205 +  inline void print_on(outputStream* st) const;
  22.206 +#endif
  22.207 +
  22.208 +  // Create as_xxx and is_xxx methods for the subtypes
  22.209 +#define FRAME_TYPE_DECL(stackmap_frame_type, arg1, arg2) \
  22.210 +  inline stackmap_frame_type* as_##stackmap_frame_type() const; \
  22.211 +  bool is_##stackmap_frame_type() { \
  22.212 +    return as_##stackmap_frame_type() != NULL; \
  22.213 +  }
  22.214 +
  22.215 +  FOR_EACH_STACKMAP_FRAME_TYPE(FRAME_TYPE_DECL, x, x)
  22.216 +#undef FRAME_TYPE_DECL
  22.217 +};
  22.218 +
  22.219 +class same_frame : public stack_map_frame {
  22.220 + private:
  22.221 +  static int frame_type_to_offset_delta(u1 frame_type) {
  22.222 +      return frame_type + 1; }
  22.223 +  static u1 offset_delta_to_frame_type(int offset_delta) {
  22.224 +      return (u1)(offset_delta - 1); }
  22.225 +
  22.226 + public:
  22.227 +
  22.228 +  static bool is_frame_type(u1 tag) {
  22.229 +    return tag < 64;
  22.230 +  }
  22.231 +
  22.232 +  static same_frame* at(address addr) {
  22.233 +    assert(is_frame_type(*addr), "Wrong frame id");
  22.234 +    return (same_frame*)addr;
  22.235 +  }
  22.236 +
  22.237 +  static same_frame* create_at(address addr, int offset_delta) {
  22.238 +    same_frame* sm = (same_frame*)addr;
  22.239 +    sm->set_offset_delta(offset_delta);
  22.240 +    return sm;
  22.241 +  }
  22.242 +
  22.243 +  static size_t calculate_size() { return sizeof(u1); }
  22.244 +
  22.245 +  size_t size() const { return calculate_size(); }
  22.246 +  int offset_delta() const { return frame_type_to_offset_delta(frame_type()); }
  22.247 +
  22.248 +  void set_offset_delta(int offset_delta) {
  22.249 +    assert(offset_delta <= 64, "Offset too large for same_frame");
  22.250 +    set_frame_type(offset_delta_to_frame_type(offset_delta));
  22.251 +  }
  22.252 +
  22.253 +  int number_of_types() const { return 0; }
  22.254 +  verification_type_info* types() const { return NULL; }
  22.255 +
  22.256 +  bool is_valid_offset(int offset_delta) const {
  22.257 +    return is_frame_type(offset_delta_to_frame_type(offset_delta));
  22.258 +  }
  22.259 +
  22.260 +  bool verify_subtype(address start, address end) const {
  22.261 +    return true;
  22.262 +  }
  22.263 +
  22.264 +#ifdef ASSERT
  22.265 +  void print_on(outputStream* st) const {
  22.266 +    st->print("same_frame(%d)", offset_delta());
  22.267 +  }
  22.268 +#endif
  22.269 +};
  22.270 +
  22.271 +class same_frame_extended : public stack_map_frame {
  22.272 + private:
  22.273 +  enum { _frame_id = 251 };
  22.274 +  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
  22.275 +
  22.276 + public:
  22.277 +  static bool is_frame_type(u1 tag) {
  22.278 +    return tag == _frame_id;
  22.279 +  }
  22.280 +
  22.281 +  static same_frame_extended* at(address addr) {
  22.282 +    assert(is_frame_type(*addr), "Wrong frame type");
  22.283 +    return (same_frame_extended*)addr;
  22.284 +  }
  22.285 +
  22.286 +  static same_frame_extended* create_at(address addr, u2 offset_delta) {
  22.287 +    same_frame_extended* sm = (same_frame_extended*)addr;
  22.288 +    sm->set_frame_type(_frame_id);
  22.289 +    sm->set_offset_delta(offset_delta);
  22.290 +    return sm;
  22.291 +  }
  22.292 +
  22.293 +  static size_t calculate_size() { return sizeof(u1) + sizeof(u2); }
  22.294 +
  22.295 +  size_t size() const { return calculate_size(); }
  22.296 +  int offset_delta() const {
  22.297 +    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
  22.298 +  }
  22.299 +
  22.300 +  void set_offset_delta(int offset_delta) {
  22.301 +    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
  22.302 +  }
  22.303 +
  22.304 +  int number_of_types() const { return 0; }
  22.305 +  verification_type_info* types() const { return NULL; }
  22.306 +  bool is_valid_offset(int offset) const { return true; }
  22.307 +
  22.308 +  bool verify_subtype(address start, address end) const {
  22.309 +    return frame_type_addr() + size() <= end;
  22.310 +  }
  22.311 +
  22.312 +#ifdef ASSERT
  22.313 +  void print_on(outputStream* st) const {
  22.314 +    st->print("same_frame_extended(%d)", offset_delta());
  22.315 +  }
  22.316 +#endif
  22.317 +};
  22.318 +
  22.319 +class same_frame_1_stack_item_frame : public stack_map_frame {
  22.320 + private:
  22.321 +  address type_addr() const { return frame_type_addr() + sizeof(u1); }
  22.322 +
  22.323 +  static int frame_type_to_offset_delta(u1 frame_type) {
  22.324 +      return frame_type - 63; }
  22.325 +  static u1 offset_delta_to_frame_type(int offset_delta) {
  22.326 +      return (u1)(offset_delta + 63); }
  22.327 +
  22.328 + public:
  22.329 +  static bool is_frame_type(u1 tag) {
  22.330 +    return tag >= 64 && tag < 128;
  22.331 +  }
  22.332 +
  22.333 +  static same_frame_1_stack_item_frame* at(address addr) {
  22.334 +    assert(is_frame_type(*addr), "Wrong frame id");
  22.335 +    return (same_frame_1_stack_item_frame*)addr;
  22.336 +  }
  22.337 +
  22.338 +  static same_frame_1_stack_item_frame* create_at(
  22.339 +      address addr, int offset_delta, verification_type_info* vti) {
  22.340 +    same_frame_1_stack_item_frame* sm = (same_frame_1_stack_item_frame*)addr;
  22.341 +    sm->set_offset_delta(offset_delta);
  22.342 +    if (vti != NULL) {
  22.343 +      sm->set_type(vti);
  22.344 +    }
  22.345 +    return sm;
  22.346 +  }
  22.347 +
  22.348 +  static size_t calculate_size(verification_type_info* vti) {
  22.349 +    return sizeof(u1) + vti->size();
  22.350 +  }
  22.351 +
  22.352 +  static size_t max_size() {
  22.353 +    return sizeof(u1) + verification_type_info::max_size();
  22.354 +  }
  22.355 +
  22.356 +  size_t size() const { return calculate_size(types()); }
  22.357 +  int offset_delta() const { return frame_type_to_offset_delta(frame_type()); }
  22.358 +
  22.359 +  void set_offset_delta(int offset_delta) {
  22.360 +    assert(offset_delta > 0 && offset_delta <= 64,
  22.361 +           "Offset too large for this frame type");
  22.362 +    set_frame_type(offset_delta_to_frame_type(offset_delta));
  22.363 +  }
  22.364 +
  22.365 +  void set_type(verification_type_info* vti) {
  22.366 +    verification_type_info* cur = types();
  22.367 +    cur->copy_from(vti);
  22.368 +  }
  22.369 +
  22.370 +  int number_of_types() const { return 1; }
  22.371 +  verification_type_info* types() const {
  22.372 +    return verification_type_info::at(type_addr());
  22.373 +  }
  22.374 +
  22.375 +  bool is_valid_offset(int offset_delta) const {
  22.376 +    return is_frame_type(offset_delta_to_frame_type(offset_delta));
  22.377 +  }
  22.378 +
  22.379 +  bool verify_subtype(address start, address end) const {
  22.380 +    return types()->verify(start, end);
  22.381 +  }
  22.382 +
  22.383 +#ifdef ASSERT
  22.384 +  void print_on(outputStream* st) const {
  22.385 +    st->print("same_frame_1_stack_item_frame(%d,", offset_delta());
  22.386 +    types()->print_on(st);
  22.387 +    st->print(")");
  22.388 +  }
  22.389 +#endif
  22.390 +};
  22.391 +
  22.392 +class same_frame_1_stack_item_extended : public stack_map_frame {
  22.393 + private:
  22.394 +  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
  22.395 +  address type_addr() const { return offset_delta_addr() + sizeof(u2); }
  22.396 +
  22.397 +  enum { _frame_id = 247 };
  22.398 +
  22.399 + public:
  22.400 +  static bool is_frame_type(u1 tag) {
  22.401 +    return tag == _frame_id;
  22.402 +  }
  22.403 +
  22.404 +  static same_frame_1_stack_item_extended* at(address addr) {
  22.405 +    assert(is_frame_type(*addr), "Wrong frame id");
  22.406 +    return (same_frame_1_stack_item_extended*)addr;
  22.407 +  }
  22.408 +
  22.409 +  static same_frame_1_stack_item_extended* create_at(
  22.410 +      address addr, int offset_delta, verification_type_info* vti) {
  22.411 +    same_frame_1_stack_item_extended* sm =
  22.412 +       (same_frame_1_stack_item_extended*)addr;
  22.413 +    sm->set_frame_type(_frame_id);
  22.414 +    sm->set_offset_delta(offset_delta);
  22.415 +    if (vti != NULL) {
  22.416 +      sm->set_type(vti);
  22.417 +    }
  22.418 +    return sm;
  22.419 +  }
  22.420 +
  22.421 +  static size_t calculate_size(verification_type_info* vti) {
  22.422 +    return sizeof(u1) + sizeof(u2) + vti->size();
  22.423 +  }
  22.424 +
  22.425 +  size_t size() const { return calculate_size(types()); }
  22.426 +  int offset_delta() const {
  22.427 +    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
  22.428 +  }
  22.429 +
  22.430 +  void set_offset_delta(int offset_delta) {
  22.431 +    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
  22.432 +  }
  22.433 +
  22.434 +  void set_type(verification_type_info* vti) {
  22.435 +    verification_type_info* cur = types();
  22.436 +    cur->copy_from(vti);
  22.437 +  }
  22.438 +
  22.439 +  int number_of_types() const { return 1; }
  22.440 +  verification_type_info* types() const {
  22.441 +    return verification_type_info::at(type_addr());
  22.442 +  }
  22.443 +  bool is_valid_offset(int offset) { return true; }
  22.444 +
  22.445 +  bool verify_subtype(address start, address end) const {
  22.446 +    return type_addr() < end && types()->verify(start, end);
  22.447 +  }
  22.448 +
  22.449 +#ifdef ASSERT
  22.450 +  void print_on(outputStream* st) const {
  22.451 +    st->print("same_frame_1_stack_item_extended(%d,", offset_delta());
  22.452 +    types()->print_on(st);
  22.453 +    st->print(")");
  22.454 +  }
  22.455 +#endif
  22.456 +};
  22.457 +
  22.458 +class chop_frame : public stack_map_frame {
  22.459 + private:
  22.460 +  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
  22.461 +
  22.462 +  static int frame_type_to_chops(u1 frame_type) {
  22.463 +    int chop = 251 - frame_type;
  22.464 +    return chop;
  22.465 +  }
  22.466 +
  22.467 +  static u1 chops_to_frame_type(int chop) {
  22.468 +    return 251 - chop;
  22.469 +  }
  22.470 +
  22.471 + public:
  22.472 +  static bool is_frame_type(u1 tag) {
  22.473 +    return frame_type_to_chops(tag) > 0 && frame_type_to_chops(tag) < 4;
  22.474 +  }
  22.475 +
  22.476 +  static chop_frame* at(address addr) {
  22.477 +    assert(is_frame_type(*addr), "Wrong frame id");
  22.478 +    return (chop_frame*)addr;
  22.479 +  }
  22.480 +
  22.481 +  static chop_frame* create_at(address addr, int offset_delta, int chops) {
  22.482 +    chop_frame* sm = (chop_frame*)addr;
  22.483 +    sm->set_chops(chops);
  22.484 +    sm->set_offset_delta(offset_delta);
  22.485 +    return sm;
  22.486 +  }
  22.487 +
  22.488 +  static size_t calculate_size() {
  22.489 +    return sizeof(u1) + sizeof(u2);
  22.490 +  }
  22.491 +
  22.492 +  size_t size() const { return calculate_size(); }
  22.493 +  int offset_delta() const {
  22.494 +    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
  22.495 +  }
  22.496 +  void set_offset_delta(int offset_delta) {
  22.497 +    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
  22.498 +  }
  22.499 +
  22.500 +  int chops() const {
  22.501 +    int chops = frame_type_to_chops(frame_type());
  22.502 +    assert(chops > 0 && chops < 4, "Invalid number of chops in frame");
  22.503 +    return chops;
  22.504 +  }
  22.505 +  void set_chops(int chops) {
  22.506 +    assert(chops > 0 && chops <= 3, "Bad number of chops");
  22.507 +    set_frame_type(chops_to_frame_type(chops));
  22.508 +  }
  22.509 +
  22.510 +  int number_of_types() const { return 0; }
  22.511 +  verification_type_info* types() const { return NULL; }
  22.512 +  bool is_valid_offset(int offset) { return true; }
  22.513 +
  22.514 +  bool verify_subtype(address start, address end) const {
  22.515 +    return frame_type_addr() + size() <= end;
  22.516 +  }
  22.517 +
  22.518 +#ifdef ASSERT
  22.519 +  void print_on(outputStream* st) const {
  22.520 +    st->print("chop_frame(%d,%d)", offset_delta(), chops());
  22.521 +  }
  22.522 +#endif
  22.523 +};
  22.524 +
  22.525 +class append_frame : public stack_map_frame {
  22.526 + private:
  22.527 +  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
  22.528 +  address types_addr() const { return offset_delta_addr() + sizeof(u2); }
  22.529 +
  22.530 +  static int frame_type_to_appends(u1 frame_type) {
  22.531 +    int append = frame_type - 251;
  22.532 +    return append;
  22.533 +  }
  22.534 +
  22.535 +  static u1 appends_to_frame_type(int appends) {
  22.536 +    assert(appends > 0 && appends < 4, "Invalid append amount");
  22.537 +    return 251 + appends;
  22.538 +  }
  22.539 +
  22.540 + public:
  22.541 +  static bool is_frame_type(u1 tag) {
  22.542 +    return frame_type_to_appends(tag) > 0 && frame_type_to_appends(tag) < 4;
  22.543 +  }
  22.544 +
  22.545 +  static append_frame* at(address addr) {
  22.546 +    assert(is_frame_type(*addr), "Wrong frame id");
  22.547 +    return (append_frame*)addr;
  22.548 +  }
  22.549 +
  22.550 +  static append_frame* create_at(
  22.551 +      address addr, int offset_delta, int appends,
  22.552 +      verification_type_info* types) {
  22.553 +    append_frame* sm = (append_frame*)addr;
  22.554 +    sm->set_appends(appends);
  22.555 +    sm->set_offset_delta(offset_delta);
  22.556 +    if (types != NULL) {
  22.557 +      verification_type_info* cur = sm->types();
  22.558 +      for (int i = 0; i < appends; ++i) {
  22.559 +        cur->copy_from(types);
  22.560 +        cur = cur->next();
  22.561 +        types = types->next();
  22.562 +      }
  22.563 +    }
  22.564 +    return sm;
  22.565 +  }
  22.566 +
  22.567 +  static size_t calculate_size(int appends, verification_type_info* types) {
  22.568 +    size_t sz = sizeof(u1) + sizeof(u2);
  22.569 +    for (int i = 0; i < appends; ++i) {
  22.570 +      sz += types->size();
  22.571 +      types = types->next();
  22.572 +    }
  22.573 +    return sz;
  22.574 +  }
  22.575 +
  22.576 +  static size_t max_size() {
  22.577 +    return sizeof(u1) + sizeof(u2) + 3 * verification_type_info::max_size();
  22.578 +  }
  22.579 +
  22.580 +  size_t size() const { return calculate_size(number_of_types(), types()); }
  22.581 +  int offset_delta() const {
  22.582 +    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
  22.583 +  }
  22.584 +
  22.585 +  void set_offset_delta(int offset_delta) {
  22.586 +    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
  22.587 +  }
  22.588 +
  22.589 +  void set_appends(int appends) {
  22.590 +    assert(appends > 0 && appends < 4, "Bad number of appends");
  22.591 +    set_frame_type(appends_to_frame_type(appends));
  22.592 +  }
  22.593 +
  22.594 +  int number_of_types() const {
  22.595 +    int appends = frame_type_to_appends(frame_type());
  22.596 +    assert(appends > 0 && appends < 4, "Invalid number of appends in frame");
  22.597 +    return appends;
  22.598 +  }
  22.599 +  verification_type_info* types() const {
  22.600 +    return verification_type_info::at(types_addr());
  22.601 +  }
  22.602 +  bool is_valid_offset(int offset) const { return true; }
  22.603 +
  22.604 +  bool verify_subtype(address start, address end) const {
  22.605 +    verification_type_info* vti = types();
  22.606 +    if ((address)vti < end && vti->verify(start, end)) {
  22.607 +      int nof = number_of_types();
  22.608 +      vti = vti->next();
  22.609 +      if (nof < 2 || vti->verify(start, end)) {
  22.610 +        vti = vti->next();
  22.611 +        if (nof < 3 || vti->verify(start, end)) {
  22.612 +          return true;
  22.613 +        }
  22.614 +      }
  22.615 +    }
  22.616 +    return false;
  22.617 +  }
  22.618 +
  22.619 +#ifdef ASSERT
  22.620 +  void print_on(outputStream* st) const {
  22.621 +    st->print("append_frame(%d,", offset_delta());
  22.622 +    verification_type_info* vti = types();
  22.623 +    for (int i = 0; i < number_of_types(); ++i) {
  22.624 +      vti->print_on(st);
  22.625 +      if (i != number_of_types() - 1) {
  22.626 +        st->print(",");
  22.627 +      }
  22.628 +      vti = vti->next();
  22.629 +    }
  22.630 +    st->print(")");
  22.631 +  }
  22.632 +#endif
  22.633 +};
  22.634 +
  22.635 +class full_frame : public stack_map_frame {
  22.636 + private:
  22.637 +  address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
  22.638 +  address num_locals_addr() const { return offset_delta_addr() + sizeof(u2); }
  22.639 +  address locals_addr() const { return num_locals_addr() + sizeof(u2); }
  22.640 +  address stack_slots_addr(address end_of_locals) const {
  22.641 +      return end_of_locals; }
  22.642 +  address stack_addr(address end_of_locals) const {
  22.643 +      return stack_slots_addr(end_of_locals) + sizeof(u2); }
  22.644 +
  22.645 +  enum { _frame_id = 255 };
  22.646 +
  22.647 + public:
  22.648 +  static bool is_frame_type(u1 tag) {
  22.649 +    return tag == _frame_id;
  22.650 +  }
  22.651 +
  22.652 +  static full_frame* at(address addr) {
  22.653 +    assert(is_frame_type(*addr), "Wrong frame id");
  22.654 +    return (full_frame*)addr;
  22.655 +  }
  22.656 +
  22.657 +  static full_frame* create_at(
  22.658 +      address addr, int offset_delta, int num_locals,
  22.659 +      verification_type_info* locals,
  22.660 +      int stack_slots, verification_type_info* stack) {
  22.661 +    full_frame* sm = (full_frame*)addr;
  22.662 +    sm->set_frame_type(_frame_id);
  22.663 +    sm->set_offset_delta(offset_delta);
  22.664 +    sm->set_num_locals(num_locals);
  22.665 +    if (locals != NULL) {
  22.666 +      verification_type_info* cur = sm->locals();
  22.667 +      for (int i = 0; i < num_locals; ++i) {
  22.668 +        cur->copy_from(locals);
  22.669 +        cur = cur->next();
  22.670 +        locals = locals->next();
  22.671 +      }
  22.672 +      address end_of_locals = (address)cur;
  22.673 +      sm->set_stack_slots(end_of_locals, stack_slots);
  22.674 +      cur = sm->stack(end_of_locals);
  22.675 +      for (int i = 0; i < stack_slots; ++i) {
  22.676 +        cur->copy_from(stack);
  22.677 +        cur = cur->next();
  22.678 +        stack = stack->next();
  22.679 +      }
  22.680 +    }
  22.681 +    return sm;
  22.682 +  }
  22.683 +
  22.684 +  static size_t calculate_size(
  22.685 +      int num_locals, verification_type_info* locals,
  22.686 +      int stack_slots, verification_type_info* stack) {
  22.687 +    size_t sz = sizeof(u1) + sizeof(u2) + sizeof(u2) + sizeof(u2);
  22.688 +    verification_type_info* vti = locals;
  22.689 +    for (int i = 0; i < num_locals; ++i) {
  22.690 +      sz += vti->size();
  22.691 +      vti = vti->next();
  22.692 +    }
  22.693 +    vti = stack;
  22.694 +    for (int i = 0; i < stack_slots; ++i) {
  22.695 +      sz += vti->size();
  22.696 +      vti = vti->next();
  22.697 +    }
  22.698 +    return sz;
  22.699 +  }
  22.700 +
  22.701 +  static size_t max_size(int locals, int stack) {
  22.702 +    return sizeof(u1) + 3 * sizeof(u2) +
  22.703 +        (locals + stack) * verification_type_info::max_size();
  22.704 +  }
  22.705 +
  22.706 +  size_t size() const {
  22.707 +    address eol = end_of_locals();
  22.708 +    return calculate_size(num_locals(), locals(), stack_slots(eol), stack(eol));
  22.709 +  }
  22.710 +
  22.711 +  int offset_delta() const {
  22.712 +    return Bytes::get_Java_u2(offset_delta_addr()) + 1;
  22.713 +  }
  22.714 +  int num_locals() const { return Bytes::get_Java_u2(num_locals_addr()); }
  22.715 +  verification_type_info* locals() const {
  22.716 +    return verification_type_info::at(locals_addr());
  22.717 +  }
  22.718 +  address end_of_locals() const {
  22.719 +    verification_type_info* vti = locals();
  22.720 +    for (int i = 0; i < num_locals(); ++i) {
  22.721 +      vti = vti->next();
  22.722 +    }
  22.723 +    return (address)vti;
  22.724 +  }
  22.725 +  int stack_slots(address end_of_locals) const {
  22.726 +    return Bytes::get_Java_u2(stack_slots_addr(end_of_locals));
  22.727 +  }
  22.728 +  verification_type_info* stack(address end_of_locals) const {
  22.729 +    return verification_type_info::at(stack_addr(end_of_locals));
  22.730 +  }
  22.731 +
  22.732 +  void set_offset_delta(int offset_delta) {
  22.733 +    Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
  22.734 +  }
  22.735 +  void set_num_locals(int num_locals) {
  22.736 +    Bytes::put_Java_u2(num_locals_addr(), num_locals);
  22.737 +  }
  22.738 +  void set_stack_slots(address end_of_locals, int stack_slots) {
  22.739 +    Bytes::put_Java_u2(stack_slots_addr(end_of_locals), stack_slots);
  22.740 +  }
  22.741 +
  22.742 +  // These return only the locals.  Extra processing is required for stack
  22.743 +  // types of full frames.
  22.744 +  int number_of_types() const { return num_locals(); }
  22.745 +  verification_type_info* types() const { return locals(); }
  22.746 +  bool is_valid_offset(int offset) { return true; }
  22.747 +
  22.748 +  bool verify_subtype(address start, address end) const {
  22.749 +    verification_type_info* vti = types();
  22.750 +    if ((address)vti >= end) {
  22.751 +      return false;
  22.752 +    }
  22.753 +    int count = number_of_types();
  22.754 +    for (int i = 0; i < count; ++i) {
  22.755 +      if (!vti->verify(start, end)) {
  22.756 +        return false;
  22.757 +      }
  22.758 +      vti = vti->next();
  22.759 +    }
  22.760 +    address eol = (address)vti;
  22.761 +    if (eol + sizeof(u2) > end) {
  22.762 +      return false;
  22.763 +    }
  22.764 +    count = stack_slots(eol);
  22.765 +    vti = stack(eol);
  22.766 +    for (int i = 0; i < stack_slots(eol); ++i) {
  22.767 +      if (!vti->verify(start, end)) {
  22.768 +        return false;
  22.769 +      }
  22.770 +      vti = vti->next();
  22.771 +    }
  22.772 +    return true;
  22.773 +  }
  22.774 +
  22.775 +#ifdef ASSERT
  22.776 +  void print_on(outputStream* st) const {
  22.777 +    st->print("full_frame(%d,{", offset_delta());
  22.778 +    verification_type_info* vti = locals();
  22.779 +    for (int i = 0; i < num_locals(); ++i) {
  22.780 +      vti->print_on(st);
  22.781 +      if (i != num_locals() - 1) {
  22.782 +        st->print(",");
  22.783 +      }
  22.784 +      vti = vti->next();
  22.785 +    }
  22.786 +    st->print("},{");
  22.787 +    address end_of_locals = (address)vti;
  22.788 +    vti = stack(end_of_locals);
  22.789 +    int ss = stack_slots(end_of_locals);
  22.790 +    for (int i = 0; i < ss; ++i) {
  22.791 +      vti->print_on(st);
  22.792 +      if (i != ss - 1) {
  22.793 +        st->print(",");
  22.794 +      }
  22.795 +      vti = vti->next();
  22.796 +    }
  22.797 +    st->print("})");
  22.798 +  }
  22.799 +#endif
  22.800 +};
  22.801 +
  22.802 +#define VIRTUAL_DISPATCH(stack_frame_type, func_name, args) \
  22.803 +  stack_frame_type* item_##stack_frame_type = as_##stack_frame_type(); \
  22.804 +  if (item_##stack_frame_type != NULL) { \
  22.805 +    return item_##stack_frame_type->func_name args;  \
  22.806 +  }
  22.807 +
  22.808 +#define VOID_VIRTUAL_DISPATCH(stack_frame_type, func_name, args) \
  22.809 +  stack_frame_type* item_##stack_frame_type = as_##stack_frame_type(); \
  22.810 +  if (item_##stack_frame_type != NULL) { \
  22.811 +    item_##stack_frame_type->func_name args;  \
  22.812 +    return; \
  22.813 +  }
  22.814 +
  22.815 +size_t stack_map_frame::size() const {
  22.816 +  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, size, ());
  22.817 +  return 0;
  22.818 +}
  22.819 +
  22.820 +int stack_map_frame::offset_delta() const {
  22.821 +  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, offset_delta, ());
  22.822 +  return 0;
  22.823 +}
  22.824 +
  22.825 +void stack_map_frame::set_offset_delta(int offset_delta) {
  22.826 +  FOR_EACH_STACKMAP_FRAME_TYPE(
  22.827 +      VOID_VIRTUAL_DISPATCH, set_offset_delta, (offset_delta));
  22.828 +}
  22.829 +
  22.830 +int stack_map_frame::number_of_types() const {
  22.831 +  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, number_of_types, ());
  22.832 +  return 0;
  22.833 +}
  22.834 +
  22.835 +verification_type_info* stack_map_frame::types() const {
  22.836 +  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, types, ());
  22.837 +  return NULL;
  22.838 +}
  22.839 +
  22.840 +bool stack_map_frame::is_valid_offset(int offset) const {
  22.841 +  FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, is_valid_offset, (offset));
  22.842 +  return true;
  22.843 +}
  22.844 +
  22.845 +bool stack_map_frame::verify(address start, address end) const {
  22.846 +  if (frame_type_addr() >= start && frame_type_addr() < end) {
  22.847 +    FOR_EACH_STACKMAP_FRAME_TYPE(
  22.848 +       VIRTUAL_DISPATCH, verify_subtype, (start, end));
  22.849 +  }
  22.850 +  return false;
  22.851 +}
  22.852 +
  22.853 +#ifdef ASSERT
  22.854 +void stack_map_frame::print_on(outputStream* st) const {
  22.855 +  FOR_EACH_STACKMAP_FRAME_TYPE(VOID_VIRTUAL_DISPATCH, print_on, (st));
  22.856 +}
  22.857 +#endif
  22.858 +
  22.859 +#undef VIRTUAL_DISPATCH
  22.860 +#undef VOID_VIRTUAL_DISPATCH
  22.861 +
  22.862 +#define AS_SUBTYPE_DEF(stack_frame_type, arg1, arg2) \
  22.863 +stack_frame_type* stack_map_frame::as_##stack_frame_type() const { \
  22.864 +  if (stack_frame_type::is_frame_type(frame_type())) { \
  22.865 +    return (stack_frame_type*)this; \
  22.866 +  } else { \
  22.867 +    return NULL; \
  22.868 +  } \
  22.869 +}
  22.870 +
  22.871 +FOR_EACH_STACKMAP_FRAME_TYPE(AS_SUBTYPE_DEF, x, x)
  22.872 +#undef AS_SUBTYPE_DEF
  22.873 +
  22.874 +class stack_map_table_attribute {
  22.875 + private:
  22.876 +  address name_index_addr() const {
  22.877 +      return (address)this; }
  22.878 +  address attribute_length_addr() const {
  22.879 +      return name_index_addr() + sizeof(u2); }
  22.880 +  address number_of_entries_addr() const {
  22.881 +      return attribute_length_addr() + sizeof(u4); }
  22.882 +  address entries_addr() const {
  22.883 +      return number_of_entries_addr() + sizeof(u2); }
  22.884 +
  22.885 + protected:
  22.886 +  // No constructors  - should be 'private', but GCC issues a warning if it is
  22.887 +  stack_map_table_attribute() {}
  22.888 +  stack_map_table_attribute(const stack_map_table_attribute&) {}
  22.889 +
  22.890 + public:
  22.891 +
  22.892 +  static stack_map_table_attribute* at(address addr) {
  22.893 +    return (stack_map_table_attribute*)addr;
  22.894 +  }
  22.895 +
  22.896 +  u2 name_index() const {
  22.897 +       return Bytes::get_Java_u2(name_index_addr()); }
  22.898 +  u4 attribute_length() const {
  22.899 +      return Bytes::get_Java_u4(attribute_length_addr()); }
  22.900 +  u2 number_of_entries() const {
  22.901 +      return Bytes::get_Java_u2(number_of_entries_addr()); }
  22.902 +  stack_map_frame* entries() const {
  22.903 +    return stack_map_frame::at(entries_addr());
  22.904 +  }
  22.905 +
  22.906 +  static size_t header_size() {
  22.907 +      return sizeof(u2) + sizeof(u4);
  22.908 +  }
  22.909 +
  22.910 +  void set_name_index(u2 idx) {
  22.911 +    Bytes::put_Java_u2(name_index_addr(), idx);
  22.912 +  }
  22.913 +  void set_attribute_length(u4 len) {
  22.914 +    Bytes::put_Java_u4(attribute_length_addr(), len);
  22.915 +  }
  22.916 +  void set_number_of_entries(u2 num) {
  22.917 +    Bytes::put_Java_u2(number_of_entries_addr(), num);
  22.918 +  }
  22.919 +};
    23.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Nov 04 15:19:16 2010 -0700
    23.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Nov 04 16:17:54 2010 -0700
    23.3 @@ -354,12 +354,8 @@
    23.4  double CMSStats::time_until_cms_gen_full() const {
    23.5    size_t cms_free = _cms_gen->cmsSpace()->free();
    23.6    GenCollectedHeap* gch = GenCollectedHeap::heap();
    23.7 -  size_t expected_promotion = gch->get_gen(0)->capacity();
    23.8 -  if (HandlePromotionFailure) {
    23.9 -    expected_promotion = MIN2(
   23.10 -        (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
   23.11 -        expected_promotion);
   23.12 -  }
   23.13 +  size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
   23.14 +                                   (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
   23.15    if (cms_free > expected_promotion) {
   23.16      // Start a cms collection if there isn't enough space to promote
   23.17      // for the next minor collection.  Use the padded average as
   23.18 @@ -865,57 +861,18 @@
   23.19    return free() + _virtual_space.uncommitted_size();
   23.20  }
   23.21  
   23.22 -bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
   23.23 -    size_t max_promotion_in_bytes,
   23.24 -    bool younger_handles_promotion_failure) const {
   23.25 -
   23.26 -  // This is the most conservative test.  Full promotion is
   23.27 -  // guaranteed if this is used. The multiplicative factor is to
   23.28 -  // account for the worst case "dilatation".
   23.29 -  double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
   23.30 -  if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
   23.31 -    adjusted_max_promo_bytes = (double)max_uintx;
   23.32 -  }
   23.33 -  bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
   23.34 -
   23.35 -  if (younger_handles_promotion_failure && !result) {
   23.36 -    // Full promotion is not guaranteed because fragmentation
   23.37 -    // of the cms generation can prevent the full promotion.
   23.38 -    result = (max_available() >= (size_t)adjusted_max_promo_bytes);
   23.39 -
   23.40 -    if (!result) {
   23.41 -      // With promotion failure handling the test for the ability
   23.42 -      // to support the promotion does not have to be guaranteed.
   23.43 -      // Use an average of the amount promoted.
   23.44 -      result = max_available() >= (size_t)
   23.45 -        gc_stats()->avg_promoted()->padded_average();
   23.46 -      if (PrintGC && Verbose && result) {
   23.47 -        gclog_or_tty->print_cr(
   23.48 -          "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
   23.49 -          " max_available: " SIZE_FORMAT
   23.50 -          " avg_promoted: " SIZE_FORMAT,
   23.51 -          max_available(), (size_t)
   23.52 -          gc_stats()->avg_promoted()->padded_average());
   23.53 -      }
   23.54 -    } else {
   23.55 -      if (PrintGC && Verbose) {
   23.56 -        gclog_or_tty->print_cr(
   23.57 -          "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
   23.58 -          " max_available: " SIZE_FORMAT
   23.59 -          " adj_max_promo_bytes: " SIZE_FORMAT,
   23.60 -          max_available(), (size_t)adjusted_max_promo_bytes);
   23.61 -      }
   23.62 -    }
   23.63 -  } else {
   23.64 -    if (PrintGC && Verbose) {
   23.65 -      gclog_or_tty->print_cr(
   23.66 -        "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
   23.67 -        " contiguous_available: " SIZE_FORMAT
   23.68 -        " adj_max_promo_bytes: " SIZE_FORMAT,
   23.69 -        max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
   23.70 -    }
   23.71 -  }
   23.72 -  return result;
   23.73 +bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
   23.74 +  size_t available = max_available();
   23.75 +  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
   23.76 +  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
   23.77 +  if (PrintGC && Verbose) {
   23.78 +    gclog_or_tty->print_cr(
   23.79 +      "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
   23.80 +      "max_promo("SIZE_FORMAT")",
   23.81 +      res? "":" not", available, res? ">=":"<",
   23.82 +      av_promo, max_promotion_in_bytes);
   23.83 +  }
   23.84 +  return res;
   23.85  }
   23.86  
   23.87  // At a promotion failure dump information on block layout in heap
   23.88 @@ -6091,23 +6048,14 @@
   23.89    assert(_collectorState == Resizing, "Change of collector state to"
   23.90      " Resizing must be done under the freelistLocks (plural)");
   23.91  
   23.92 -  // Now that sweeping has been completed, if the GCH's
   23.93 -  // incremental_collection_will_fail flag is set, clear it,
   23.94 +  // Now that sweeping has been completed, we clear
   23.95 +  // the incremental_collection_failed flag,
   23.96    // thus inviting a younger gen collection to promote into
   23.97    // this generation. If such a promotion may still fail,
   23.98    // the flag will be set again when a young collection is
   23.99    // attempted.
  23.100 -  // I think the incremental_collection_will_fail flag's use
  23.101 -  // is specific to a 2 generation collection policy, so i'll
  23.102 -  // assert that that's the configuration we are operating within.
  23.103 -  // The use of the flag can and should be generalized appropriately
  23.104 -  // in the future to deal with a general n-generation system.
  23.105 -
  23.106    GenCollectedHeap* gch = GenCollectedHeap::heap();
  23.107 -  assert(gch->collector_policy()->is_two_generation_policy(),
  23.108 -         "Resetting of incremental_collection_will_fail flag"
  23.109 -         " may be incorrect otherwise");
  23.110 -  gch->clear_incremental_collection_will_fail();
  23.111 +  gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
  23.112    gch->update_full_collections_completed(_collection_count_start);
  23.113  }
  23.114  
    24.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu Nov 04 15:19:16 2010 -0700
    24.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu Nov 04 16:17:54 2010 -0700
    24.3 @@ -1185,8 +1185,7 @@
    24.4    virtual void par_promote_alloc_done(int thread_num);
    24.5    virtual void par_oop_since_save_marks_iterate_done(int thread_num);
    24.6  
    24.7 -  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
    24.8 -    bool younger_handles_promotion_failure) const;
    24.9 +  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
   24.10  
   24.11    // Inform this (non-young) generation that a promotion failure was
   24.12    // encountered during a collection of a younger generation that
    25.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Thu Nov 04 15:19:16 2010 -0700
    25.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Thu Nov 04 16:17:54 2010 -0700
    25.3 @@ -1,5 +1,5 @@
    25.4  /*
    25.5 - * Copyright (c) 2001, 2006, Oracle and/or its affiliates. All rights reserved.
    25.6 + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    25.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    25.8   *
    25.9   * This code is free software; you can redistribute it and/or modify it
   25.10 @@ -272,12 +272,16 @@
   25.11    }
   25.12  }
   25.13  
   25.14 -// Wait until the next synchronous GC or a timeout, whichever is earlier.
   25.15 -void ConcurrentMarkSweepThread::wait_on_cms_lock(long t) {
   25.16 +// Wait until the next synchronous GC, a concurrent full gc request,
   25.17 +// or a timeout, whichever is earlier.
   25.18 +void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
   25.19    MutexLockerEx x(CGC_lock,
   25.20                    Mutex::_no_safepoint_check_flag);
   25.21 +  if (_should_terminate || _collector->_full_gc_requested) {
   25.22 +    return;
   25.23 +  }
   25.24    set_CMS_flag(CMS_cms_wants_token);   // to provoke notifies
   25.25 -  CGC_lock->wait(Mutex::_no_safepoint_check_flag, t);
   25.26 +  CGC_lock->wait(Mutex::_no_safepoint_check_flag, t_millis);
   25.27    clear_CMS_flag(CMS_cms_wants_token);
   25.28    assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
   25.29           "Should not be set");
   25.30 @@ -289,7 +293,8 @@
   25.31        icms_wait();
   25.32        return;
   25.33      } else {
   25.34 -      // Wait until the next synchronous GC or a timeout, whichever is earlier
   25.35 +      // Wait until the next synchronous GC, a concurrent full gc
   25.36 +      // request or a timeout, whichever is earlier.
   25.37        wait_on_cms_lock(CMSWaitDuration);
   25.38      }
   25.39      // Check if we should start a CMS collection cycle
    26.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Thu Nov 04 15:19:16 2010 -0700
    26.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Thu Nov 04 16:17:54 2010 -0700
    26.3 @@ -120,8 +120,10 @@
    26.4    }
    26.5  
    26.6    // Wait on CMS lock until the next synchronous GC
    26.7 -  // or given timeout, whichever is earlier.
    26.8 -  void    wait_on_cms_lock(long t); // milliseconds
    26.9 +  // or given timeout, whichever is earlier. A timeout value
   26.10 +  // of 0 indicates that there is no upper bound on the wait time.
   26.11 +  // A concurrent full gc request terminates the wait.
   26.12 +  void wait_on_cms_lock(long t_millis);
   26.13  
   26.14    // The CMS thread will yield during the work portion of its cycle
   26.15    // only when requested to.  Both synchronous and asychronous requests
    27.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Nov 04 15:19:16 2010 -0700
    27.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Nov 04 16:17:54 2010 -0700
    27.3 @@ -2418,6 +2418,8 @@
    27.4    for (int i = 0; i < (int)_max_task_num; ++i) {
    27.5      OopTaskQueue* queue = _task_queues->queue(i);
    27.6      queue->set_empty();
    27.7 +    // Clear any partial regions from the CMTasks
    27.8 +    _tasks[i]->clear_aborted_region();
    27.9    }
   27.10  }
   27.11  
   27.12 @@ -2706,7 +2708,6 @@
   27.13    clear_marking_state();
   27.14    for (int i = 0; i < (int)_max_task_num; ++i) {
   27.15      _tasks[i]->clear_region_fields();
   27.16 -    _tasks[i]->clear_aborted_region();
   27.17    }
   27.18    _has_aborted = true;
   27.19  
   27.20 @@ -2985,7 +2986,7 @@
   27.21  
   27.22    _nextMarkBitMap                = nextMarkBitMap;
   27.23    clear_region_fields();
   27.24 -  clear_aborted_region();
   27.25 +  assert(_aborted_region.is_empty(), "should have been cleared");
   27.26  
   27.27    _calls                         = 0;
   27.28    _elapsed_time_ms               = 0.0;
    28.1 --- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Thu Nov 04 15:19:16 2010 -0700
    28.2 +++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Thu Nov 04 16:17:54 2010 -0700
    28.3 @@ -175,7 +175,7 @@
    28.4    }
    28.5    assert(start_card > _array->index_for(_bottom), "Cannot be first card");
    28.6    assert(_array->offset_array(start_card-1) <= N_words,
    28.7 -    "Offset card has an unexpected value");
    28.8 +         "Offset card has an unexpected value");
    28.9    size_t start_card_for_region = start_card;
   28.10    u_char offset = max_jubyte;
   28.11    for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
   28.12 @@ -577,6 +577,16 @@
   28.13  #endif
   28.14  }
   28.15  
   28.16 +void
   28.17 +G1BlockOffsetArray::set_for_starts_humongous(HeapWord* new_end) {
   28.18 +  assert(_end ==  new_end, "_end should have already been updated");
   28.19 +
   28.20 +  // The first BOT entry should have offset 0.
   28.21 +  _array->set_offset_array(_array->index_for(_bottom), 0);
   28.22 +  // The rest should point to the first one.
   28.23 +  set_remainder_to_point_to_start(_bottom + N_words, new_end);
   28.24 +}
   28.25 +
   28.26  //////////////////////////////////////////////////////////////////////
   28.27  // G1BlockOffsetArrayContigSpace
   28.28  //////////////////////////////////////////////////////////////////////
   28.29 @@ -626,3 +636,12 @@
   28.30           "Precondition of call");
   28.31    _array->set_offset_array(bottom_index, 0);
   28.32  }
   28.33 +
   28.34 +void
   28.35 +G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_end) {
   28.36 +  G1BlockOffsetArray::set_for_starts_humongous(new_end);
   28.37 +
   28.38 +  // Make sure _next_offset_threshold and _next_offset_index point to new_end.
   28.39 +  _next_offset_threshold = new_end;
   28.40 +  _next_offset_index     = _array->index_for(new_end);
   28.41 +}
    29.1 --- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Thu Nov 04 15:19:16 2010 -0700
    29.2 +++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Thu Nov 04 16:17:54 2010 -0700
    29.3 @@ -436,6 +436,8 @@
    29.4    }
    29.5  
    29.6    void check_all_cards(size_t left_card, size_t right_card) const;
    29.7 +
    29.8 +  virtual void set_for_starts_humongous(HeapWord* new_end);
    29.9  };
   29.10  
   29.11  // A subtype of BlockOffsetArray that takes advantage of the fact
   29.12 @@ -484,4 +486,6 @@
   29.13  
   29.14    HeapWord* block_start_unsafe(const void* addr);
   29.15    HeapWord* block_start_unsafe_const(const void* addr) const;
   29.16 +
   29.17 +  virtual void set_for_starts_humongous(HeapWord* new_end);
   29.18  };
    30.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Nov 04 15:19:16 2010 -0700
    30.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Nov 04 16:17:54 2010 -0700
    30.3 @@ -791,7 +791,7 @@
    30.4    int                _worker_i;
    30.5  public:
    30.6    RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
    30.7 -    _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i),
    30.8 +    _cl(g1->g1_rem_set(), worker_i),
    30.9      _worker_i(worker_i),
   30.10      _g1h(g1)
   30.11    { }
   30.12 @@ -890,7 +890,7 @@
   30.13      abandon_cur_alloc_region();
   30.14      abandon_gc_alloc_regions();
   30.15      assert(_cur_alloc_region == NULL, "Invariant.");
   30.16 -    g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
   30.17 +    g1_rem_set()->cleanupHRRS();
   30.18      tear_down_region_lists();
   30.19      set_used_regions_to_need_zero_fill();
   30.20  
   30.21 @@ -1506,15 +1506,11 @@
   30.22    }
   30.23  
   30.24    // Also create a G1 rem set.
   30.25 -  if (G1UseHRIntoRS) {
   30.26 -    if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
   30.27 -      _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs());
   30.28 -    } else {
   30.29 -      vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
   30.30 -      return JNI_ENOMEM;
   30.31 -    }
   30.32 +  if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
   30.33 +    _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
   30.34    } else {
   30.35 -    _g1_rem_set = new StupidG1RemSet(this);
   30.36 +    vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
   30.37 +    return JNI_ENOMEM;
   30.38    }
   30.39  
   30.40    // Carve out the G1 part of the heap.
   30.41 @@ -2706,8 +2702,7 @@
   30.42  }
   30.43  
   30.44  size_t G1CollectedHeap::cards_scanned() {
   30.45 -  HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set();
   30.46 -  return g1_rset->cardsScanned();
   30.47 +  return g1_rem_set()->cardsScanned();
   30.48  }
   30.49  
   30.50  void
   30.51 @@ -3850,6 +3845,54 @@
   30.52                 undo_waste() * HeapWordSize / K);
   30.53  }
   30.54  
   30.55 +#ifdef ASSERT
   30.56 +bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
   30.57 +  assert(ref != NULL, "invariant");
   30.58 +  assert(UseCompressedOops, "sanity");
   30.59 +  assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
   30.60 +  oop p = oopDesc::load_decode_heap_oop(ref);
   30.61 +  assert(_g1h->is_in_g1_reserved(p),
   30.62 +         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
   30.63 +  return true;
   30.64 +}
   30.65 +
   30.66 +bool G1ParScanThreadState::verify_ref(oop* ref) const {
   30.67 +  assert(ref != NULL, "invariant");
   30.68 +  if (has_partial_array_mask(ref)) {
   30.69 +    // Must be in the collection set--it's already been copied.
   30.70 +    oop p = clear_partial_array_mask(ref);
   30.71 +    assert(_g1h->obj_in_cs(p),
   30.72 +           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
   30.73 +  } else {
   30.74 +    oop p = oopDesc::load_decode_heap_oop(ref);
   30.75 +    assert(_g1h->is_in_g1_reserved(p),
   30.76 +           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
   30.77 +  }
   30.78 +  return true;
   30.79 +}
   30.80 +
   30.81 +bool G1ParScanThreadState::verify_task(StarTask ref) const {
   30.82 +  if (ref.is_narrow()) {
   30.83 +    return verify_ref((narrowOop*) ref);
   30.84 +  } else {
   30.85 +    return verify_ref((oop*) ref);
   30.86 +  }
   30.87 +}
   30.88 +#endif // ASSERT
   30.89 +
   30.90 +void G1ParScanThreadState::trim_queue() {
   30.91 +  StarTask ref;
   30.92 +  do {
   30.93 +    // Drain the overflow stack first, so other threads can steal.
   30.94 +    while (refs()->pop_overflow(ref)) {
   30.95 +      deal_with_reference(ref);
   30.96 +    }
   30.97 +    while (refs()->pop_local(ref)) {
   30.98 +      deal_with_reference(ref);
   30.99 +    }
  30.100 +  } while (!refs()->is_empty());
  30.101 +}
  30.102 +
  30.103  G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
  30.104    _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
  30.105    _par_scan_state(par_scan_state) { }
  30.106 @@ -4052,38 +4095,43 @@
  30.107      : _g1h(g1h), _par_scan_state(par_scan_state),
  30.108        _queues(queues), _terminator(terminator) {}
  30.109  
  30.110 -  void do_void() {
  30.111 -    G1ParScanThreadState* pss = par_scan_state();
  30.112 -    while (true) {
  30.113 +  void do_void();
  30.114 +
  30.115 +private:
  30.116 +  inline bool offer_termination();
  30.117 +};
  30.118 +
  30.119 +bool G1ParEvacuateFollowersClosure::offer_termination() {
  30.120 +  G1ParScanThreadState* const pss = par_scan_state();
  30.121 +  pss->start_term_time();
  30.122 +  const bool res = terminator()->offer_termination();
  30.123 +  pss->end_term_time();
  30.124 +  return res;
  30.125 +}
  30.126 +
  30.127 +void G1ParEvacuateFollowersClosure::do_void() {
  30.128 +  StarTask stolen_task;
  30.129 +  G1ParScanThreadState* const pss = par_scan_state();
  30.130 +  pss->trim_queue();
  30.131 +
  30.132 +  do {
  30.133 +    while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
  30.134 +      assert(pss->verify_task(stolen_task), "sanity");
  30.135 +      if (stolen_task.is_narrow()) {
  30.136 +        pss->deal_with_reference((narrowOop*) stolen_task);
  30.137 +      } else {
  30.138 +        pss->deal_with_reference((oop*) stolen_task);
  30.139 +      }
  30.140 +
  30.141 +      // We've just processed a reference and we might have made
  30.142 +      // available new entries on the queues. So we have to make sure
  30.143 +      // we drain the queues as necessary.
  30.144        pss->trim_queue();
  30.145 -
  30.146 -      StarTask stolen_task;
  30.147 -      if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
  30.148 -        // slightly paranoid tests; I'm trying to catch potential
  30.149 -        // problems before we go into push_on_queue to know where the
  30.150 -        // problem is coming from
  30.151 -        assert((oop*)stolen_task != NULL, "Error");
  30.152 -        if (stolen_task.is_narrow()) {
  30.153 -          assert(UseCompressedOops, "Error");
  30.154 -          narrowOop* p = (narrowOop*) stolen_task;
  30.155 -          assert(has_partial_array_mask(p) ||
  30.156 -                 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "Error");
  30.157 -          pss->push_on_queue(p);
  30.158 -        } else {
  30.159 -          oop* p = (oop*) stolen_task;
  30.160 -          assert(has_partial_array_mask(p) || _g1h->is_in_g1_reserved(*p), "Error");
  30.161 -          pss->push_on_queue(p);
  30.162 -        }
  30.163 -        continue;
  30.164 -      }
  30.165 -      pss->start_term_time();
  30.166 -      if (terminator()->offer_termination()) break;
  30.167 -      pss->end_term_time();
  30.168      }
  30.169 -    pss->end_term_time();
  30.170 -    pss->retire_alloc_buffers();
  30.171 -  }
  30.172 -};
  30.173 +  } while (!offer_termination());
  30.174 +
  30.175 +  pss->retire_alloc_buffers();
  30.176 +}
  30.177  
  30.178  class G1ParTask : public AbstractGangTask {
  30.179  protected:
  30.180 @@ -4182,8 +4230,7 @@
  30.181        pss.print_termination_stats(i);
  30.182      }
  30.183  
  30.184 -    assert(pss.refs_to_scan() == 0, "Task queue should be empty");
  30.185 -    assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
  30.186 +    assert(pss.refs()->is_empty(), "should be empty");
  30.187      double end_time_ms = os::elapsedTime() * 1000.0;
  30.188      _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
  30.189    }
    31.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Nov 04 15:19:16 2010 -0700
    31.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Nov 04 16:17:54 2010 -0700
    31.3 @@ -1651,49 +1651,17 @@
    31.4    size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
    31.5    size_t undo_waste() const                      { return _undo_waste; }
    31.6  
    31.7 +#ifdef ASSERT
    31.8 +  bool verify_ref(narrowOop* ref) const;
    31.9 +  bool verify_ref(oop* ref) const;
   31.10 +  bool verify_task(StarTask ref) const;
   31.11 +#endif // ASSERT
   31.12 +
   31.13    template <class T> void push_on_queue(T* ref) {
   31.14 -    assert(ref != NULL, "invariant");
   31.15 -    assert(has_partial_array_mask(ref) ||
   31.16 -           _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(ref)), "invariant");
   31.17 -#ifdef ASSERT
   31.18 -    if (has_partial_array_mask(ref)) {
   31.19 -      oop p = clear_partial_array_mask(ref);
   31.20 -      // Verify that we point into the CS
   31.21 -      assert(_g1h->obj_in_cs(p), "Should be in CS");
   31.22 -    }
   31.23 -#endif
   31.24 +    assert(verify_ref(ref), "sanity");
   31.25      refs()->push(ref);
   31.26    }
   31.27  
   31.28 -  void pop_from_queue(StarTask& ref) {
   31.29 -    if (refs()->pop_local(ref)) {
   31.30 -      assert((oop*)ref != NULL, "pop_local() returned true");
   31.31 -      assert(UseCompressedOops || !ref.is_narrow(), "Error");
   31.32 -      assert(has_partial_array_mask((oop*)ref) ||
   31.33 -             _g1h->is_in_g1_reserved(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
   31.34 -                                                     : oopDesc::load_decode_heap_oop((oop*)ref)),
   31.35 -              "invariant");
   31.36 -    } else {
   31.37 -      StarTask null_task;
   31.38 -      ref = null_task;
   31.39 -    }
   31.40 -  }
   31.41 -
   31.42 -  void pop_from_overflow_queue(StarTask& ref) {
   31.43 -    StarTask new_ref;
   31.44 -    refs()->pop_overflow(new_ref);
   31.45 -    assert((oop*)new_ref != NULL, "pop() from a local non-empty stack");
   31.46 -    assert(UseCompressedOops || !new_ref.is_narrow(), "Error");
   31.47 -    assert(has_partial_array_mask((oop*)new_ref) ||
   31.48 -           _g1h->is_in_g1_reserved(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
   31.49 -                                                       : oopDesc::load_decode_heap_oop((oop*)new_ref)),
   31.50 -           "invariant");
   31.51 -    ref = new_ref;
   31.52 -  }
   31.53 -
   31.54 -  int refs_to_scan()            { return (int)refs()->size(); }
   31.55 -  int overflowed_refs_to_scan() { return (int)refs()->overflow_stack()->size(); }
   31.56 -
   31.57    template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
   31.58      if (G1DeferredRSUpdate) {
   31.59        deferred_rs_update(from, p, tid);
   31.60 @@ -1804,7 +1772,6 @@
   31.61      }
   31.62    }
   31.63  
   31.64 -private:
   31.65    template <class T> void deal_with_reference(T* ref_to_scan) {
   31.66      if (has_partial_array_mask(ref_to_scan)) {
   31.67        _partial_scan_cl->do_oop_nv(ref_to_scan);
   31.68 @@ -1818,59 +1785,15 @@
   31.69      }
   31.70    }
   31.71  
   31.72 -public:
   31.73 -  void trim_queue() {
   31.74 -    // I've replicated the loop twice, first to drain the overflow
   31.75 -    // queue, second to drain the task queue. This is better than
   31.76 -    // having a single loop, which checks both conditions and, inside
   31.77 -    // it, either pops the overflow queue or the task queue, as each
   31.78 -    // loop is tighter. Also, the decision to drain the overflow queue
   31.79 -    // first is not arbitrary, as the overflow queue is not visible
   31.80 -    // to the other workers, whereas the task queue is. So, we want to
   31.81 -    // drain the "invisible" entries first, while allowing the other
   31.82 -    // workers to potentially steal the "visible" entries.
   31.83 -
   31.84 -    while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
   31.85 -      while (overflowed_refs_to_scan() > 0) {
   31.86 -        StarTask ref_to_scan;
   31.87 -        assert((oop*)ref_to_scan == NULL, "Constructed above");
   31.88 -        pop_from_overflow_queue(ref_to_scan);
   31.89 -        // We shouldn't have pushed it on the queue if it was not
   31.90 -        // pointing into the CSet.
   31.91 -        assert((oop*)ref_to_scan != NULL, "Follows from inner loop invariant");
   31.92 -        if (ref_to_scan.is_narrow()) {
   31.93 -          assert(UseCompressedOops, "Error");
   31.94 -          narrowOop* p = (narrowOop*)ref_to_scan;
   31.95 -          assert(!has_partial_array_mask(p) &&
   31.96 -                 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
   31.97 -          deal_with_reference(p);
   31.98 -        } else {
   31.99 -          oop* p = (oop*)ref_to_scan;
  31.100 -          assert((has_partial_array_mask(p) && _g1h->is_in_g1_reserved(clear_partial_array_mask(p))) ||
  31.101 -                 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
  31.102 -          deal_with_reference(p);
  31.103 -        }
  31.104 -      }
  31.105 -
  31.106 -      while (refs_to_scan() > 0) {
  31.107 -        StarTask ref_to_scan;
  31.108 -        assert((oop*)ref_to_scan == NULL, "Constructed above");
  31.109 -        pop_from_queue(ref_to_scan);
  31.110 -        if ((oop*)ref_to_scan != NULL) {
  31.111 -          if (ref_to_scan.is_narrow()) {
  31.112 -            assert(UseCompressedOops, "Error");
  31.113 -            narrowOop* p = (narrowOop*)ref_to_scan;
  31.114 -            assert(!has_partial_array_mask(p) &&
  31.115 -                    _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
  31.116 -            deal_with_reference(p);
  31.117 -          } else {
  31.118 -            oop* p = (oop*)ref_to_scan;
  31.119 -            assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
  31.120 -                   _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
  31.121 -            deal_with_reference(p);
  31.122 -          }
  31.123 -        }
  31.124 -      }
  31.125 +  void deal_with_reference(StarTask ref) {
  31.126 +    assert(verify_task(ref), "sanity");
  31.127 +    if (ref.is_narrow()) {
  31.128 +      deal_with_reference((narrowOop*)ref);
  31.129 +    } else {
  31.130 +      deal_with_reference((oop*)ref);
  31.131      }
  31.132    }
  31.133 +
  31.134 +public:
  31.135 +  void trim_queue();
  31.136  };
    32.1 --- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Thu Nov 04 15:19:16 2010 -0700
    32.2 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Thu Nov 04 16:17:54 2010 -0700
    32.3 @@ -25,8 +25,6 @@
    32.4  class HeapRegion;
    32.5  class G1CollectedHeap;
    32.6  class G1RemSet;
    32.7 -class HRInto_G1RemSet;
    32.8 -class G1RemSet;
    32.9  class ConcurrentMark;
   32.10  class DirtyCardToOopClosure;
   32.11  class CMBitMap;
    33.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Nov 04 15:19:16 2010 -0700
    33.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Nov 04 16:17:54 2010 -0700
    33.3 @@ -97,13 +97,6 @@
    33.4    }
    33.5  };
    33.6  
    33.7 -void
    33.8 -StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
    33.9 -                                            int worker_i) {
   33.10 -  IntoCSRegionClosure rc(_g1, oc);
   33.11 -  _g1->heap_region_iterate(&rc);
   33.12 -}
   33.13 -
   33.14  class VerifyRSCleanCardOopClosure: public OopClosure {
   33.15    G1CollectedHeap* _g1;
   33.16  public:
   33.17 @@ -119,8 +112,9 @@
   33.18    }
   33.19  };
   33.20  
   33.21 -HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
   33.22 -  : G1RemSet(g1), _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
   33.23 +G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
   33.24 +  : _g1(g1), _conc_refine_cards(0),
   33.25 +    _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
   33.26      _cg1r(g1->concurrent_g1_refine()),
   33.27      _traversal_in_progress(false),
   33.28      _cset_rs_update_cl(NULL),
   33.29 @@ -134,7 +128,7 @@
   33.30    }
   33.31  }
   33.32  
   33.33 -HRInto_G1RemSet::~HRInto_G1RemSet() {
   33.34 +G1RemSet::~G1RemSet() {
   33.35    delete _seq_task;
   33.36    for (uint i = 0; i < n_workers(); i++) {
   33.37      assert(_cset_rs_update_cl[i] == NULL, "it should be");
   33.38 @@ -277,7 +271,7 @@
   33.39  //          p threads
   33.40  // Then thread t will start at region t * floor (n/p)
   33.41  
   33.42 -HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) {
   33.43 +HeapRegion* G1RemSet::calculateStartRegion(int worker_i) {
   33.44    HeapRegion* result = _g1p->collection_set();
   33.45    if (ParallelGCThreads > 0) {
   33.46      size_t cs_size = _g1p->collection_set_size();
   33.47 @@ -290,7 +284,7 @@
   33.48    return result;
   33.49  }
   33.50  
   33.51 -void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
   33.52 +void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
   33.53    double rs_time_start = os::elapsedTime();
   33.54    HeapRegion *startRegion = calculateStartRegion(worker_i);
   33.55  
   33.56 @@ -340,7 +334,7 @@
   33.57    }
   33.58  };
   33.59  
   33.60 -void HRInto_G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
   33.61 +void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
   33.62    double start = os::elapsedTime();
   33.63    // Apply the given closure to all remaining log entries.
   33.64    RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
   33.65 @@ -439,12 +433,11 @@
   33.66    }
   33.67  };
   33.68  
   33.69 -void HRInto_G1RemSet::cleanupHRRS() {
   33.70 +void G1RemSet::cleanupHRRS() {
   33.71    HeapRegionRemSet::cleanup();
   33.72  }
   33.73  
   33.74 -void
   33.75 -HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
   33.76 +void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
   33.77                                               int worker_i) {
   33.78  #if CARD_REPEAT_HISTO
   33.79    ct_freq_update_histo_and_reset();
   33.80 @@ -508,8 +501,7 @@
   33.81    _cset_rs_update_cl[worker_i] = NULL;
   33.82  }
   33.83  
   33.84 -void HRInto_G1RemSet::
   33.85 -prepare_for_oops_into_collection_set_do() {
   33.86 +void G1RemSet::prepare_for_oops_into_collection_set_do() {
   33.87  #if G1_REM_SET_LOGGING
   33.88    PrintRSClosure cl;
   33.89    _g1->collection_set_iterate(&cl);
   33.90 @@ -581,7 +573,7 @@
   33.91      //   RSet updating,
   33.92      // * the post-write barrier shouldn't be logging updates to young
   33.93      //   regions (but there is a situation where this can happen - see
   33.94 -    //   the comment in HRInto_G1RemSet::concurrentRefineOneCard below -
   33.95 +    //   the comment in G1RemSet::concurrentRefineOneCard below -
   33.96      //   that should not be applicable here), and
   33.97      // * during actual RSet updating, the filtering of cards in young
   33.98      //   regions in HeapRegion::oops_on_card_seq_iterate_careful is
   33.99 @@ -601,7 +593,7 @@
  33.100    }
  33.101  };
  33.102  
  33.103 -void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() {
  33.104 +void G1RemSet::cleanup_after_oops_into_collection_set_do() {
  33.105    guarantee( _cards_scanned != NULL, "invariant" );
  33.106    _total_cards_scanned = 0;
  33.107    for (uint i = 0; i < n_workers(); ++i)
  33.108 @@ -692,12 +684,12 @@
  33.109    }
  33.110  };
  33.111  
  33.112 -void HRInto_G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
  33.113 +void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
  33.114    ScrubRSClosure scrub_cl(region_bm, card_bm);
  33.115    _g1->heap_region_iterate(&scrub_cl);
  33.116  }
  33.117  
  33.118 -void HRInto_G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
  33.119 +void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
  33.120                                  int worker_num, int claim_val) {
  33.121    ScrubRSClosure scrub_cl(region_bm, card_bm);
  33.122    _g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val);
  33.123 @@ -741,7 +733,7 @@
  33.124    virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
  33.125  };
  33.126  
  33.127 -bool HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
  33.128 +bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
  33.129                                                     bool check_for_refs_into_cset) {
  33.130    // Construct the region representing the card.
  33.131    HeapWord* start = _ct_bs->addr_for(card_ptr);
  33.132 @@ -820,7 +812,7 @@
  33.133    return trigger_cl.value();
  33.134  }
  33.135  
  33.136 -bool HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
  33.137 +bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
  33.138                                                bool check_for_refs_into_cset) {
  33.139    // If the card is no longer dirty, nothing to do.
  33.140    if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
  33.141 @@ -995,7 +987,7 @@
  33.142    }
  33.143  };
  33.144  
  33.145 -void HRInto_G1RemSet::print_summary_info() {
  33.146 +void G1RemSet::print_summary_info() {
  33.147    G1CollectedHeap* g1 = G1CollectedHeap::heap();
  33.148  
  33.149  #if CARD_REPEAT_HISTO
  33.150 @@ -1029,30 +1021,26 @@
  33.151    g1->concurrent_g1_refine()->threads_do(&p);
  33.152    gclog_or_tty->print_cr("");
  33.153  
  33.154 -  if (G1UseHRIntoRS) {
  33.155 -    HRRSStatsIter blk;
  33.156 -    g1->heap_region_iterate(&blk);
  33.157 -    gclog_or_tty->print_cr("  Total heap region rem set sizes = " SIZE_FORMAT "K."
  33.158 -                           "  Max = " SIZE_FORMAT "K.",
  33.159 -                           blk.total_mem_sz()/K, blk.max_mem_sz()/K);
  33.160 -    gclog_or_tty->print_cr("  Static structures = " SIZE_FORMAT "K,"
  33.161 -                           " free_lists = " SIZE_FORMAT "K.",
  33.162 -                           HeapRegionRemSet::static_mem_size()/K,
  33.163 -                           HeapRegionRemSet::fl_mem_size()/K);
  33.164 -    gclog_or_tty->print_cr("    %d occupied cards represented.",
  33.165 -                           blk.occupied());
  33.166 -    gclog_or_tty->print_cr("    Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
  33.167 -                           ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
  33.168 -                           blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
  33.169 -                           (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
  33.170 -                           (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
  33.171 -    gclog_or_tty->print_cr("    Did %d coarsenings.",
  33.172 -                  HeapRegionRemSet::n_coarsenings());
  33.173 -
  33.174 -  }
  33.175 +  HRRSStatsIter blk;
  33.176 +  g1->heap_region_iterate(&blk);
  33.177 +  gclog_or_tty->print_cr("  Total heap region rem set sizes = " SIZE_FORMAT "K."
  33.178 +                         "  Max = " SIZE_FORMAT "K.",
  33.179 +                         blk.total_mem_sz()/K, blk.max_mem_sz()/K);
  33.180 +  gclog_or_tty->print_cr("  Static structures = " SIZE_FORMAT "K,"
  33.181 +                         " free_lists = " SIZE_FORMAT "K.",
  33.182 +                         HeapRegionRemSet::static_mem_size()/K,
  33.183 +                         HeapRegionRemSet::fl_mem_size()/K);
  33.184 +  gclog_or_tty->print_cr("    %d occupied cards represented.",
  33.185 +                         blk.occupied());
  33.186 +  gclog_or_tty->print_cr("    Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
  33.187 +                         ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
  33.188 +                         blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
  33.189 +                         (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
  33.190 +                         (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
  33.191 +  gclog_or_tty->print_cr("    Did %d coarsenings.", HeapRegionRemSet::n_coarsenings());
  33.192  }
  33.193  
  33.194 -void HRInto_G1RemSet::prepare_for_verify() {
  33.195 +void G1RemSet::prepare_for_verify() {
  33.196    if (G1HRRSFlushLogBuffersOnVerify &&
  33.197        (VerifyBeforeGC || VerifyAfterGC)
  33.198        &&  !_g1->full_collection()) {
    34.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Nov 04 15:19:16 2010 -0700
    34.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Nov 04 16:17:54 2010 -0700
    34.3 @@ -27,107 +27,18 @@
    34.4  
    34.5  class G1CollectedHeap;
    34.6  class CardTableModRefBarrierSet;
    34.7 -class HRInto_G1RemSet;
    34.8  class ConcurrentG1Refine;
    34.9  
   34.10 +// A G1RemSet in which each heap region has a rem set that records the
   34.11 +// external heap references into it.  Uses a mod ref bs to track updates,
   34.12 +// so that they can be used to update the individual region remsets.
   34.13 +
   34.14  class G1RemSet: public CHeapObj {
   34.15  protected:
   34.16    G1CollectedHeap* _g1;
   34.17    unsigned _conc_refine_cards;
   34.18    size_t n_workers();
   34.19  
   34.20 -public:
   34.21 -  G1RemSet(G1CollectedHeap* g1) :
   34.22 -    _g1(g1), _conc_refine_cards(0)
   34.23 -  {}
   34.24 -
   34.25 -  // Invoke "blk->do_oop" on all pointers into the CS in object in regions
   34.26 -  // outside the CS (having invoked "blk->set_region" to set the "from"
   34.27 -  // region correctly beforehand.) The "worker_i" param is for the
   34.28 -  // parallel case where the number of the worker thread calling this
   34.29 -  // function can be helpful in partitioning the work to be done. It
   34.30 -  // should be the same as the "i" passed to the calling thread's
   34.31 -  // work(i) function. In the sequential case this param will be ingored.
   34.32 -  virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
   34.33 -                                           int worker_i) = 0;
   34.34 -
   34.35 -  // Prepare for and cleanup after an oops_into_collection_set_do
   34.36 -  // call.  Must call each of these once before and after (in sequential
   34.37 -  // code) any threads call oops into collection set do.  (This offers an
   34.38 -  // opportunity to sequential setup and teardown of structures needed by a
   34.39 -  // parallel iteration over the CS's RS.)
   34.40 -  virtual void prepare_for_oops_into_collection_set_do() = 0;
   34.41 -  virtual void cleanup_after_oops_into_collection_set_do() = 0;
   34.42 -
   34.43 -  // If "this" is of the given subtype, return "this", else "NULL".
   34.44 -  virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; }
   34.45 -
   34.46 -  // Record, if necessary, the fact that *p (where "p" is in region "from",
   34.47 -  // and is, a fortiori, required to be non-NULL) has changed to its new value.
   34.48 -  virtual void write_ref(HeapRegion* from, oop* p) = 0;
   34.49 -  virtual void write_ref(HeapRegion* from, narrowOop* p) = 0;
   34.50 -  virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0;
   34.51 -  virtual void par_write_ref(HeapRegion* from, narrowOop* p, int tid) = 0;
   34.52 -
   34.53 -  // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
   34.54 -  // or card, respectively, such that a region or card with a corresponding
   34.55 -  // 0 bit contains no part of any live object.  Eliminates any remembered
   34.56 -  // set entries that correspond to dead heap ranges.
   34.57 -  virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0;
   34.58 -  // Like the above, but assumes is called in parallel: "worker_num" is the
   34.59 -  // parallel thread id of the current thread, and "claim_val" is the
   34.60 -  // value that should be used to claim heap regions.
   34.61 -  virtual void scrub_par(BitMap* region_bm, BitMap* card_bm,
   34.62 -                         int worker_num, int claim_val) = 0;
   34.63 -
   34.64 -  // Refine the card corresponding to "card_ptr".  If "sts" is non-NULL,
   34.65 -  // join and leave around parts that must be atomic wrt GC.  (NULL means
   34.66 -  // being done at a safepoint.)
   34.67 -  // With some implementations of this routine, when check_for_refs_into_cset
   34.68 -  // is true, a true result may be returned if the given card contains oops
   34.69 -  // that have references into the current collection set.
   34.70 -  virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
   34.71 -                                       bool check_for_refs_into_cset) {
   34.72 -    return false;
   34.73 -  }
   34.74 -
   34.75 -  // Print any relevant summary info.
   34.76 -  virtual void print_summary_info() {}
   34.77 -
   34.78 -  // Prepare remebered set for verification.
   34.79 -  virtual void prepare_for_verify() {};
   34.80 -};
   34.81 -
   34.82 -
   34.83 -// The simplest possible G1RemSet: iterates over all objects in non-CS
   34.84 -// regions, searching for pointers into the CS.
   34.85 -class StupidG1RemSet: public G1RemSet {
   34.86 -public:
   34.87 -  StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {}
   34.88 -
   34.89 -  void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
   34.90 -                                   int worker_i);
   34.91 -
   34.92 -  void prepare_for_oops_into_collection_set_do() {}
   34.93 -  void cleanup_after_oops_into_collection_set_do() {}
   34.94 -
   34.95 -  // Nothing is necessary in the version below.
   34.96 -  void write_ref(HeapRegion* from, oop* p) {}
   34.97 -  void write_ref(HeapRegion* from, narrowOop* p) {}
   34.98 -  void par_write_ref(HeapRegion* from, oop* p, int tid) {}
   34.99 -  void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {}
  34.100 -
  34.101 -  void scrub(BitMap* region_bm, BitMap* card_bm) {}
  34.102 -  void scrub_par(BitMap* region_bm, BitMap* card_bm,
  34.103 -                 int worker_num, int claim_val) {}
  34.104 -
  34.105 -};
  34.106 -
  34.107 -// A G1RemSet in which each heap region has a rem set that records the
  34.108 -// external heap references into it.  Uses a mod ref bs to track updates,
  34.109 -// so that they can be used to update the individual region remsets.
  34.110 -
  34.111 -class HRInto_G1RemSet: public G1RemSet {
  34.112  protected:
  34.113    enum SomePrivateConstants {
  34.114      UpdateRStoMergeSync  = 0,
  34.115 @@ -175,28 +86,32 @@
  34.116    // scanned.
  34.117    void cleanupHRRS();
  34.118  
  34.119 -  HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
  34.120 -  ~HRInto_G1RemSet();
  34.121 +  G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
  34.122 +  ~G1RemSet();
  34.123  
  34.124 +  // Invoke "blk->do_oop" on all pointers into the CS in objects in regions
  34.125 +  // outside the CS (having invoked "blk->set_region" to set the "from"
  34.126 +  // region correctly beforehand.) The "worker_i" param is for the
  34.127 +  // parallel case where the number of the worker thread calling this
  34.128 +  // function can be helpful in partitioning the work to be done. It
  34.129 +  // should be the same as the "i" passed to the calling thread's
  34.130 +  // work(i) function. In the sequential case this param will be ingored.
  34.131    void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
  34.132                                     int worker_i);
  34.133  
  34.134 +  // Prepare for and cleanup after an oops_into_collection_set_do
  34.135 +  // call.  Must call each of these once before and after (in sequential
  34.136 +  // code) any threads call oops_into_collection_set_do.  (This offers an
  34.137 +  // opportunity to sequential setup and teardown of structures needed by a
  34.138 +  // parallel iteration over the CS's RS.)
  34.139    void prepare_for_oops_into_collection_set_do();
  34.140    void cleanup_after_oops_into_collection_set_do();
  34.141 +
  34.142    void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
  34.143 -  template <class T> void scanNewRefsRS_work(OopsInHeapRegionClosure* oc, int worker_i);
  34.144 -  void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i) {
  34.145 -    if (UseCompressedOops) {
  34.146 -      scanNewRefsRS_work<narrowOop>(oc, worker_i);
  34.147 -    } else {
  34.148 -      scanNewRefsRS_work<oop>(oc, worker_i);
  34.149 -    }
  34.150 -  }
  34.151    void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
  34.152 +
  34.153    HeapRegion* calculateStartRegion(int i);
  34.154  
  34.155 -  HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; }
  34.156 -
  34.157    CardTableModRefBS* ct_bs() { return _ct_bs; }
  34.158    size_t cardsScanned() { return _total_cards_scanned; }
  34.159  
  34.160 @@ -219,17 +134,31 @@
  34.161  
  34.162    bool self_forwarded(oop obj);
  34.163  
  34.164 +  // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
  34.165 +  // or card, respectively, such that a region or card with a corresponding
  34.166 +  // 0 bit contains no part of any live object.  Eliminates any remembered
  34.167 +  // set entries that correspond to dead heap ranges.
  34.168    void scrub(BitMap* region_bm, BitMap* card_bm);
  34.169 +
  34.170 +  // Like the above, but assumes is called in parallel: "worker_num" is the
  34.171 +  // parallel thread id of the current thread, and "claim_val" is the
  34.172 +  // value that should be used to claim heap regions.
  34.173    void scrub_par(BitMap* region_bm, BitMap* card_bm,
  34.174                   int worker_num, int claim_val);
  34.175  
  34.176 -  // If check_for_refs_into_cset is true then a true result is returned
  34.177 -  // if the card contains oops that have references into the current
  34.178 -  // collection set.
  34.179 +  // Refine the card corresponding to "card_ptr".  If "sts" is non-NULL,
  34.180 +  // join and leave around parts that must be atomic wrt GC.  (NULL means
  34.181 +  // being done at a safepoint.)
  34.182 +  // If check_for_refs_into_cset is true, a true result is returned
  34.183 +  // if the given card contains oops that have references into the
  34.184 +  // current collection set.
  34.185    virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
  34.186                                         bool check_for_refs_into_cset);
  34.187  
  34.188 +  // Print any relevant summary info.
  34.189    virtual void print_summary_info();
  34.190 +
  34.191 +  // Prepare remembered set for verification.
  34.192    virtual void prepare_for_verify();
  34.193  };
  34.194  
  34.195 @@ -250,13 +179,13 @@
  34.196  
  34.197  class UpdateRSOopClosure: public OopClosure {
  34.198    HeapRegion* _from;
  34.199 -  HRInto_G1RemSet* _rs;
  34.200 +  G1RemSet* _rs;
  34.201    int _worker_i;
  34.202  
  34.203    template <class T> void do_oop_work(T* p);
  34.204  
  34.205  public:
  34.206 -  UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
  34.207 +  UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) :
  34.208      _from(NULL), _rs(rs), _worker_i(worker_i) {
  34.209      guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
  34.210    }
    35.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Thu Nov 04 15:19:16 2010 -0700
    35.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Thu Nov 04 16:17:54 2010 -0700
    35.3 @@ -30,16 +30,18 @@
    35.4    }
    35.5  }
    35.6  
    35.7 -template <class T> inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, T* p) {
    35.8 +template <class T>
    35.9 +inline void G1RemSet::write_ref_nv(HeapRegion* from, T* p) {
   35.10    par_write_ref_nv(from, p, 0);
   35.11  }
   35.12  
   35.13 -inline bool HRInto_G1RemSet::self_forwarded(oop obj) {
   35.14 +inline bool G1RemSet::self_forwarded(oop obj) {
   35.15    bool result =  (obj->is_forwarded() && (obj->forwardee()== obj));
   35.16    return result;
   35.17  }
   35.18  
   35.19 -template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
   35.20 +template <class T>
   35.21 +inline void G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
   35.22    oop obj = oopDesc::load_decode_heap_oop(p);
   35.23  #ifdef ASSERT
   35.24    // can't do because of races
   35.25 @@ -77,7 +79,7 @@
   35.26        // Deferred updates to the CSet are either discarded (in the normal case),
   35.27        // or processed (if an evacuation failure occurs) at the end
   35.28        // of the collection.
   35.29 -      // See HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do().
   35.30 +      // See G1RemSet::cleanup_after_oops_into_collection_set_do().
   35.31      } else {
   35.32  #if G1_REM_SET_LOGGING
   35.33        gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
   35.34 @@ -91,12 +93,14 @@
   35.35    }
   35.36  }
   35.37  
   35.38 -template <class T> inline void UpdateRSOopClosure::do_oop_work(T* p) {
   35.39 +template <class T>
   35.40 +inline void UpdateRSOopClosure::do_oop_work(T* p) {
   35.41    assert(_from != NULL, "from region must be non-NULL");
   35.42    _rs->par_write_ref(_from, p, _worker_i);
   35.43  }
   35.44  
   35.45 -template <class T> inline void UpdateRSetImmediate::do_oop_work(T* p) {
   35.46 +template <class T>
   35.47 +inline void UpdateRSetImmediate::do_oop_work(T* p) {
   35.48    assert(_from->is_in_reserved(p), "paranoia");
   35.49    T heap_oop = oopDesc::load_heap_oop(p);
   35.50    if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
    36.1 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Nov 04 15:19:16 2010 -0700
    36.2 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Nov 04 16:17:54 2010 -0700
    36.3 @@ -40,9 +40,6 @@
    36.4    develop(intx, G1PolicyVerbose, 0,                                         \
    36.5            "The verbosity level on G1 policy decisions")                     \
    36.6                                                                              \
    36.7 -  develop(bool, G1UseHRIntoRS, true,                                        \
    36.8 -          "Determines whether the 'advanced' HR Into rem set is used.")     \
    36.9 -                                                                            \
   36.10    develop(intx, G1MarkingVerboseLevel, 0,                                   \
   36.11            "Level (0-4) of verboseness of the marking code")                 \
   36.12                                                                              \
    37.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Nov 04 15:19:16 2010 -0700
    37.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Nov 04 16:17:54 2010 -0700
    37.3 @@ -377,10 +377,26 @@
    37.4  }
    37.5  // </PREDICTION>
    37.6  
    37.7 -void HeapRegion::set_startsHumongous() {
    37.8 +void HeapRegion::set_startsHumongous(HeapWord* new_end) {
    37.9 +  assert(end() == _orig_end,
   37.10 +         "Should be normal before the humongous object allocation");
   37.11 +  assert(top() == bottom(), "should be empty");
   37.12 +
   37.13    _humongous_type = StartsHumongous;
   37.14    _humongous_start_region = this;
   37.15 -  assert(end() == _orig_end, "Should be normal before alloc.");
   37.16 +
   37.17 +  set_end(new_end);
   37.18 +  _offsets.set_for_starts_humongous(new_end);
   37.19 +}
   37.20 +
   37.21 +void HeapRegion::set_continuesHumongous(HeapRegion* start) {
   37.22 +  assert(end() == _orig_end,
   37.23 +         "Should be normal before the humongous object allocation");
   37.24 +  assert(top() == bottom(), "should be empty");
   37.25 +  assert(start->startsHumongous(), "pre-condition");
   37.26 +
   37.27 +  _humongous_type = ContinuesHumongous;
   37.28 +  _humongous_start_region = start;
   37.29  }
   37.30  
   37.31  bool HeapRegion::claimHeapRegion(jint claimValue) {
   37.32 @@ -500,23 +516,6 @@
   37.33    return blk.result();
   37.34  }
   37.35  
   37.36 -void HeapRegion::set_continuesHumongous(HeapRegion* start) {
   37.37 -  // The order is important here.
   37.38 -  start->add_continuingHumongousRegion(this);
   37.39 -  _humongous_type = ContinuesHumongous;
   37.40 -  _humongous_start_region = start;
   37.41 -}
   37.42 -
   37.43 -void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
   37.44 -  // Must join the blocks of the current H region seq with the block of the
   37.45 -  // added region.
   37.46 -  offsets()->join_blocks(bottom(), cont->bottom());
   37.47 -  arrayOop obj = (arrayOop)(bottom());
   37.48 -  obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
   37.49 -  set_end(cont->end());
   37.50 -  set_top(cont->end());
   37.51 -}
   37.52 -
   37.53  void HeapRegion::save_marks() {
   37.54    set_saved_mark();
   37.55  }
    38.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Nov 04 15:19:16 2010 -0700
    38.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Nov 04 16:17:54 2010 -0700
    38.3 @@ -395,14 +395,12 @@
    38.4  
    38.5    // Causes the current region to represent a humongous object spanning "n"
    38.6    // regions.
    38.7 -  virtual void set_startsHumongous();
    38.8 +  void set_startsHumongous(HeapWord* new_end);
    38.9  
   38.10    // The regions that continue a humongous sequence should be added using
   38.11    // this method, in increasing address order.
   38.12    void set_continuesHumongous(HeapRegion* start);
   38.13  
   38.14 -  void add_continuingHumongousRegion(HeapRegion* cont);
   38.15 -
   38.16    // If the region has a remembered set, return a pointer to it.
   38.17    HeapRegionRemSet* rem_set() const {
   38.18      return _rem_set;
   38.19 @@ -733,13 +731,6 @@
   38.20                                     FilterOutOfRegionClosure* cl,
   38.21                                     bool filter_young);
   38.22  
   38.23 -  // The region "mr" is entirely in "this", and starts and ends at block
   38.24 -  // boundaries. The caller declares that all the contained blocks are
   38.25 -  // coalesced into one.
   38.26 -  void declare_filled_region_to_BOT(MemRegion mr) {
   38.27 -    _offsets.single_block(mr.start(), mr.end());
   38.28 -  }
   38.29 -
   38.30    // A version of block start that is guaranteed to find *some* block
   38.31    // boundary at or before "p", but does not object iteration, and may
   38.32    // therefore be used safely when the heap is unparseable.
    39.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Nov 04 15:19:16 2010 -0700
    39.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Nov 04 16:17:54 2010 -0700
    39.3 @@ -1159,9 +1159,7 @@
    39.4    _hrrs(NULL),
    39.5    _g1h(G1CollectedHeap::heap()),
    39.6    _bosa(NULL),
    39.7 -  _sparse_iter(size_t(G1CollectedHeap::heap()->reserved_region().start())
    39.8 -               >> CardTableModRefBS::card_shift)
    39.9 -{}
   39.10 +  _sparse_iter() { }
   39.11  
   39.12  void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
   39.13    _hrrs = hrrs;
    40.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu Nov 04 15:19:16 2010 -0700
    40.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu Nov 04 16:17:54 2010 -0700
    40.3 @@ -91,34 +91,118 @@
    40.4    }
    40.5    if (sumSizes >= word_size) {
    40.6      _alloc_search_start = cur;
    40.7 -    // Mark the allocated regions as allocated.
    40.8 +
    40.9 +    // We need to initialize the region(s) we just discovered. This is
   40.10 +    // a bit tricky given that it can happen concurrently with
   40.11 +    // refinement threads refining cards on these regions and
   40.12 +    // potentially wanting to refine the BOT as they are scanning
   40.13 +    // those cards (this can happen shortly after a cleanup; see CR
   40.14 +    // 6991377). So we have to set up the region(s) carefully and in
   40.15 +    // a specific order.
   40.16 +
   40.17 +    // Currently, allocs_are_zero_filled() returns false. The zero
   40.18 +    // filling infrastructure will be going away soon (see CR 6977804).
   40.19 +    // So no need to do anything else here.
   40.20      bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
   40.21 +    assert(!zf, "not supported");
   40.22 +
   40.23 +    // This will be the "starts humongous" region.
   40.24      HeapRegion* first_hr = _regions.at(first);
   40.25 -    for (int i = first; i < cur; i++) {
   40.26 -      HeapRegion* hr = _regions.at(i);
   40.27 -      if (zf)
   40.28 -        hr->ensure_zero_filled();
   40.29 +    {
   40.30 +      MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
   40.31 +      first_hr->set_zero_fill_allocated();
   40.32 +    }
   40.33 +    // The header of the new object will be placed at the bottom of
   40.34 +    // the first region.
   40.35 +    HeapWord* new_obj = first_hr->bottom();
   40.36 +    // This will be the new end of the first region in the series that
   40.37 +    // should also match the end of the last region in the seriers.
   40.38 +    // (Note: sumSizes = "region size" x "number of regions we found").
   40.39 +    HeapWord* new_end = new_obj + sumSizes;
   40.40 +    // This will be the new top of the first region that will reflect
   40.41 +    // this allocation.
   40.42 +    HeapWord* new_top = new_obj + word_size;
   40.43 +
   40.44 +    // First, we need to zero the header of the space that we will be
   40.45 +    // allocating. When we update top further down, some refinement
   40.46 +    // threads might try to scan the region. By zeroing the header we
   40.47 +    // ensure that any thread that will try to scan the region will
   40.48 +    // come across the zero klass word and bail out.
   40.49 +    //
   40.50 +    // NOTE: It would not have been correct to have used
   40.51 +    // CollectedHeap::fill_with_object() and make the space look like
   40.52 +    // an int array. The thread that is doing the allocation will
   40.53 +    // later update the object header to a potentially different array
   40.54 +    // type and, for a very short period of time, the klass and length
   40.55 +    // fields will be inconsistent. This could cause a refinement
   40.56 +    // thread to calculate the object size incorrectly.
   40.57 +    Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
   40.58 +
   40.59 +    // We will set up the first region as "starts humongous". This
   40.60 +    // will also update the BOT covering all the regions to reflect
   40.61 +    // that there is a single object that starts at the bottom of the
   40.62 +    // first region.
   40.63 +    first_hr->set_startsHumongous(new_end);
   40.64 +
   40.65 +    // Then, if there are any, we will set up the "continues
   40.66 +    // humongous" regions.
   40.67 +    HeapRegion* hr = NULL;
   40.68 +    for (int i = first + 1; i < cur; ++i) {
   40.69 +      hr = _regions.at(i);
   40.70        {
   40.71          MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
   40.72          hr->set_zero_fill_allocated();
   40.73        }
   40.74 -      size_t sz = hr->capacity() / HeapWordSize;
   40.75 -      HeapWord* tmp = hr->allocate(sz);
   40.76 -      assert(tmp != NULL, "Humongous allocation failure");
   40.77 -      MemRegion mr = MemRegion(tmp, sz);
   40.78 -      CollectedHeap::fill_with_object(mr);
   40.79 -      hr->declare_filled_region_to_BOT(mr);
   40.80 -      if (i == first) {
   40.81 -        first_hr->set_startsHumongous();
   40.82 +      hr->set_continuesHumongous(first_hr);
   40.83 +    }
   40.84 +    // If we have "continues humongous" regions (hr != NULL), then the
   40.85 +    // end of the last one should match new_end.
   40.86 +    assert(hr == NULL || hr->end() == new_end, "sanity");
   40.87 +
   40.88 +    // Up to this point no concurrent thread would have been able to
   40.89 +    // do any scanning on any region in this series. All the top
   40.90 +    // fields still point to bottom, so the intersection between
   40.91 +    // [bottom,top] and [card_start,card_end] will be empty. Before we
   40.92 +    // update the top fields, we'll do a storestore to make sure that
   40.93 +    // no thread sees the update to top before the zeroing of the
   40.94 +    // object header and the BOT initialization.
   40.95 +    OrderAccess::storestore();
   40.96 +
   40.97 +    // Now that the BOT and the object header have been initialized,
   40.98 +    // we can update top of the "starts humongous" region.
   40.99 +    assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
  40.100 +           "new_top should be in this region");
  40.101 +    first_hr->set_top(new_top);
  40.102 +
  40.103 +    // Now, we will update the top fields of the "continues humongous"
  40.104 +    // regions. The reason we need to do this is that, otherwise,
  40.105 +    // these regions would look empty and this will confuse parts of
  40.106 +    // G1. For example, the code that looks for a consecutive number
  40.107 +    // of empty regions will consider them empty and try to
  40.108 +    // re-allocate them. We can extend is_empty() to also include
  40.109 +    // !continuesHumongous(), but it is easier to just update the top
  40.110 +    // fields here.
  40.111 +    hr = NULL;
  40.112 +    for (int i = first + 1; i < cur; ++i) {
  40.113 +      hr = _regions.at(i);
  40.114 +      if ((i + 1) == cur) {
  40.115 +        // last continues humongous region
  40.116 +        assert(hr->bottom() < new_top && new_top <= hr->end(),
  40.117 +               "new_top should fall on this region");
  40.118 +        hr->set_top(new_top);
  40.119        } else {
  40.120 -        assert(i > first, "sanity");
  40.121 -        hr->set_continuesHumongous(first_hr);
  40.122 +        // not last one
  40.123 +        assert(new_top > hr->end(), "new_top should be above this region");
  40.124 +        hr->set_top(hr->end());
  40.125        }
  40.126      }
  40.127 -    HeapWord* first_hr_bot = first_hr->bottom();
  40.128 -    HeapWord* obj_end = first_hr_bot + word_size;
  40.129 -    first_hr->set_top(obj_end);
  40.130 -    return first_hr_bot;
  40.131 +    // If we have continues humongous regions (hr != NULL), then the
  40.132 +    // end of the last one should match new_end and its top should
  40.133 +    // match new_top.
  40.134 +    assert(hr == NULL ||
  40.135 +           (hr->end() == new_end && hr->top() == new_top), "sanity");
  40.136 +
  40.137 +    return new_obj;
  40.138    } else {
  40.139      // If we started from the beginning, we want to know why we can't alloc.
  40.140      return NULL;
    41.1 --- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Thu Nov 04 15:19:16 2010 -0700
    41.2 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Thu Nov 04 16:17:54 2010 -0700
    41.3 @@ -308,7 +308,7 @@
    41.4    assert(e2->num_valid_cards() > 0, "Postcondition.");
    41.5  }
    41.6  
    41.7 -CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
    41.8 +CardIdx_t RSHashTableIter::find_first_card_in_list() {
    41.9    CardIdx_t res;
   41.10    while (_bl_ind != RSHashTable::NullEntry) {
   41.11      res = _rsht->entry(_bl_ind)->card(0);
   41.12 @@ -322,14 +322,11 @@
   41.13    return SparsePRTEntry::NullEntry;
   41.14  }
   41.15  
   41.16 -size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
   41.17 -  return
   41.18 -    _heap_bot_card_ind
   41.19 -    + (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion)
   41.20 -    + ci;
   41.21 +size_t RSHashTableIter::compute_card_ind(CardIdx_t ci) {
   41.22 +  return (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion) + ci;
   41.23  }
   41.24  
   41.25 -bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) {
   41.26 +bool RSHashTableIter::has_next(size_t& card_index) {
   41.27    _card_ind++;
   41.28    CardIdx_t ci;
   41.29    if (_card_ind < SparsePRTEntry::cards_num() &&
    42.1 --- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Thu Nov 04 15:19:16 2010 -0700
    42.2 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Thu Nov 04 16:17:54 2010 -0700
    42.3 @@ -169,7 +169,6 @@
    42.4    int _bl_ind;          // [-1, 0.._rsht->_capacity)
    42.5    short _card_ind;      // [0..SparsePRTEntry::cards_num())
    42.6    RSHashTable* _rsht;
    42.7 -  size_t _heap_bot_card_ind;
    42.8  
    42.9    // If the bucket list pointed to by _bl_ind contains a card, sets
   42.10    // _bl_ind to the index of that entry, and returns the card.
   42.11 @@ -183,13 +182,11 @@
   42.12    size_t compute_card_ind(CardIdx_t ci);
   42.13  
   42.14  public:
   42.15 -  RSHashTableIter(size_t heap_bot_card_ind) :
   42.16 +  RSHashTableIter() :
   42.17      _tbl_ind(RSHashTable::NullEntry),
   42.18      _bl_ind(RSHashTable::NullEntry),
   42.19      _card_ind((SparsePRTEntry::cards_num() - 1)),
   42.20 -    _rsht(NULL),
   42.21 -    _heap_bot_card_ind(heap_bot_card_ind)
   42.22 -  {}
   42.23 +    _rsht(NULL) {}
   42.24  
   42.25    void init(RSHashTable* rsht) {
   42.26      _rsht = rsht;
   42.27 @@ -280,20 +277,11 @@
   42.28    bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
   42.29      return _next->contains_card(region_id, card_index);
   42.30    }
   42.31 -
   42.32 -#if 0
   42.33 -  void verify_is_cleared();
   42.34 -  void print();
   42.35 -#endif
   42.36  };
   42.37  
   42.38  
   42.39 -class SparsePRTIter: public /* RSHashTable:: */RSHashTableIter {
   42.40 +class SparsePRTIter: public RSHashTableIter {
   42.41  public:
   42.42 -  SparsePRTIter(size_t heap_bot_card_ind) :
   42.43 -    /* RSHashTable:: */RSHashTableIter(heap_bot_card_ind)
   42.44 -  {}
   42.45 -
   42.46    void init(const SparsePRT* sprt) {
   42.47      RSHashTableIter::init(sprt->cur());
   42.48    }
    43.1 --- a/src/share/vm/gc_implementation/includeDB_gc_g1	Thu Nov 04 15:19:16 2010 -0700
    43.2 +++ b/src/share/vm/gc_implementation/includeDB_gc_g1	Thu Nov 04 16:17:54 2010 -0700
    43.3 @@ -310,10 +310,16 @@
    43.4  
    43.5  heapRegionSeq.inline.hpp                heapRegionSeq.hpp
    43.6  
    43.7 +instanceKlass.cpp                       g1RemSet.inline.hpp
    43.8 +
    43.9 +instanceRefKlass.cpp                    g1RemSet.inline.hpp
   43.10 +
   43.11  klass.hpp				g1OopClosures.hpp
   43.12  
   43.13  memoryService.cpp                       g1MemoryPool.hpp
   43.14  
   43.15 +objArrayKlass.cpp                       g1RemSet.inline.hpp
   43.16 +
   43.17  ptrQueue.cpp                            allocation.hpp
   43.18  ptrQueue.cpp                            allocation.inline.hpp
   43.19  ptrQueue.cpp                            mutex.hpp
    44.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Nov 04 15:19:16 2010 -0700
    44.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Nov 04 16:17:54 2010 -0700
    44.3 @@ -846,7 +846,7 @@
    44.4    // from this generation, pass on collection; let the next generation
    44.5    // do it.
    44.6    if (!collection_attempt_is_safe()) {
    44.7 -    gch->set_incremental_collection_will_fail();
    44.8 +    gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
    44.9      return;
   44.10    }
   44.11    assert(to()->is_empty(), "Else not collection_attempt_is_safe");
   44.12 @@ -935,8 +935,6 @@
   44.13  
   44.14      assert(to()->is_empty(), "to space should be empty now");
   44.15    } else {
   44.16 -    assert(HandlePromotionFailure,
   44.17 -      "Should only be here if promotion failure handling is on");
   44.18      assert(_promo_failure_scan_stack.is_empty(), "post condition");
   44.19      _promo_failure_scan_stack.clear(true); // Clear cached segments.
   44.20  
   44.21 @@ -947,7 +945,7 @@
   44.22      // All the spaces are in play for mark-sweep.
   44.23      swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
   44.24      from()->set_next_compaction_space(to());
   44.25 -    gch->set_incremental_collection_will_fail();
   44.26 +    gch->set_incremental_collection_failed();
   44.27      // Inform the next generation that a promotion failure occurred.
   44.28      _next_gen->promotion_failure_occurred();
   44.29  
   44.30 @@ -1092,11 +1090,6 @@
   44.31                                         old, m, sz);
   44.32  
   44.33      if (new_obj == NULL) {
   44.34 -      if (!HandlePromotionFailure) {
   44.35 -        // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
   44.36 -        // is incorrectly set. In any case, its seriously wrong to be here!
   44.37 -        vm_exit_out_of_memory(sz*wordSize, "promotion");
   44.38 -      }
   44.39        // promotion failed, forward to self
   44.40        _promotion_failed = true;
   44.41        new_obj = old;
   44.42 @@ -1206,12 +1199,6 @@
   44.43                                         old, m, sz);
   44.44  
   44.45      if (new_obj == NULL) {
   44.46 -      if (!HandlePromotionFailure) {
   44.47 -        // A failed promotion likely means the MaxLiveObjectEvacuationRatio
   44.48 -        // flag is incorrectly set. In any case, its seriously wrong to be
   44.49 -        // here!
   44.50 -        vm_exit_out_of_memory(sz*wordSize, "promotion");
   44.51 -      }
   44.52        // promotion failed, forward to self
   44.53        forward_ptr = old->forward_to_atomic(old);
   44.54        new_obj = old;
    45.1 --- a/src/share/vm/includeDB_compiler1	Thu Nov 04 15:19:16 2010 -0700
    45.2 +++ b/src/share/vm/includeDB_compiler1	Thu Nov 04 16:17:54 2010 -0700
    45.3 @@ -301,6 +301,7 @@
    45.4  c1_MacroAssembler.hpp                   assembler_<arch>.inline.hpp
    45.5  
    45.6  c1_MacroAssembler_<arch>.cpp            arrayOop.hpp
    45.7 +c1_MacroAssembler_<arch>.cpp            basicLock.hpp
    45.8  c1_MacroAssembler_<arch>.cpp            biasedLocking.hpp
    45.9  c1_MacroAssembler_<arch>.cpp            c1_MacroAssembler.hpp
   45.10  c1_MacroAssembler_<arch>.cpp            c1_Runtime1.hpp
   45.11 @@ -309,7 +310,6 @@
   45.12  c1_MacroAssembler_<arch>.cpp            markOop.hpp
   45.13  c1_MacroAssembler_<arch>.cpp            os.hpp
   45.14  c1_MacroAssembler_<arch>.cpp            stubRoutines.hpp
   45.15 -c1_MacroAssembler_<arch>.cpp            synchronizer.hpp
   45.16  c1_MacroAssembler_<arch>.cpp            systemDictionary.hpp
   45.17  
   45.18  c1_MacroAssembler_<arch>.hpp            generate_platform_dependent_include
    46.1 --- a/src/share/vm/includeDB_core	Thu Nov 04 15:19:16 2010 -0700
    46.2 +++ b/src/share/vm/includeDB_core	Thu Nov 04 16:17:54 2010 -0700
    46.3 @@ -300,10 +300,17 @@
    46.4  barrierSet.inline.hpp                   barrierSet.hpp
    46.5  barrierSet.inline.hpp                   cardTableModRefBS.hpp
    46.6  
    46.7 +basicLock.cpp                           basicLock.hpp
    46.8 +basicLock.cpp                           synchronizer.hpp
    46.9 +
   46.10 +basicLock.hpp                           handles.hpp
   46.11 +basicLock.hpp                           markOop.hpp
   46.12 +basicLock.hpp                           top.hpp
   46.13 +
   46.14 +biasedLocking.cpp                       basicLock.hpp
   46.15  biasedLocking.cpp                       biasedLocking.hpp
   46.16  biasedLocking.cpp                       klass.inline.hpp
   46.17  biasedLocking.cpp                       markOop.hpp
   46.18 -biasedLocking.cpp                       synchronizer.hpp
   46.19  biasedLocking.cpp                       task.hpp
   46.20  biasedLocking.cpp                       vframe.hpp
   46.21  biasedLocking.cpp                       vmThread.hpp
   46.22 @@ -404,13 +411,13 @@
   46.23  bytecodeInterpreterWithChecks.cpp       bytecodeInterpreter.cpp
   46.24  
   46.25  bytecodeInterpreter.hpp                 allocation.hpp
   46.26 +bytecodeInterpreter.hpp                 basicLock.hpp
   46.27  bytecodeInterpreter.hpp                 bytes_<arch>.hpp
   46.28  bytecodeInterpreter.hpp                 frame.hpp
   46.29  bytecodeInterpreter.hpp                 globalDefinitions.hpp
   46.30  bytecodeInterpreter.hpp                 globals.hpp
   46.31  bytecodeInterpreter.hpp                 methodDataOop.hpp
   46.32  bytecodeInterpreter.hpp                 methodOop.hpp
   46.33 -bytecodeInterpreter.hpp                 synchronizer.hpp
   46.34  
   46.35  bytecodeInterpreter.inline.hpp          bytecodeInterpreter.hpp
   46.36  bytecodeInterpreter.inline.hpp          stubRoutines.hpp
   46.37 @@ -1667,10 +1674,10 @@
   46.38  frame.cpp                               universe.inline.hpp
   46.39  
   46.40  frame.hpp                               assembler.hpp
   46.41 +frame.hpp                               basicLock.hpp
   46.42  frame.hpp                               methodOop.hpp
   46.43  frame.hpp                               monitorChunk.hpp
   46.44  frame.hpp                               registerMap.hpp
   46.45 -frame.hpp                               synchronizer.hpp
   46.46  frame.hpp                               top.hpp
   46.47  
   46.48  frame.inline.hpp                        bytecodeInterpreter.hpp
   46.49 @@ -2120,6 +2127,7 @@
   46.50  interfaceSupport_<os_family>.hpp        generate_platform_dependent_include
   46.51  
   46.52  interp_masm_<arch_model>.cpp            arrayOop.hpp
   46.53 +interp_masm_<arch_model>.cpp            basicLock.hpp
   46.54  interp_masm_<arch_model>.cpp            biasedLocking.hpp
   46.55  interp_masm_<arch_model>.cpp            interp_masm_<arch_model>.hpp
   46.56  interp_masm_<arch_model>.cpp            interpreterRuntime.hpp
   46.57 @@ -2131,7 +2139,6 @@
   46.58  interp_masm_<arch_model>.cpp            methodDataOop.hpp
   46.59  interp_masm_<arch_model>.cpp            methodOop.hpp
   46.60  interp_masm_<arch_model>.cpp            sharedRuntime.hpp
   46.61 -interp_masm_<arch_model>.cpp            synchronizer.hpp
   46.62  interp_masm_<arch_model>.cpp            thread_<os_family>.inline.hpp
   46.63  
   46.64  interp_masm_<arch_model>.hpp            assembler_<arch>.inline.hpp
   46.65 @@ -3094,25 +3101,26 @@
   46.66  
   46.67  objArrayOop.hpp                         arrayOop.hpp
   46.68  
   46.69 +objectMonitor.cpp                       dtrace.hpp
   46.70 +objectMonitor.cpp                       handles.inline.hpp
   46.71 +objectMonitor.cpp                       interfaceSupport.hpp
   46.72 +objectMonitor.cpp                       markOop.hpp
   46.73 +objectMonitor.cpp                       mutexLocker.hpp
   46.74 +objectMonitor.cpp                       objectMonitor.hpp
   46.75 +objectMonitor.cpp                       objectMonitor.inline.hpp
   46.76 +objectMonitor.cpp                       oop.inline.hpp
   46.77 +objectMonitor.cpp                       osThread.hpp
   46.78 +objectMonitor.cpp                       os_<os_family>.inline.hpp
   46.79 +objectMonitor.cpp                       preserveException.hpp
   46.80 +objectMonitor.cpp                       resourceArea.hpp
   46.81 +objectMonitor.cpp                       stubRoutines.hpp
   46.82 +objectMonitor.cpp                       thread.hpp
   46.83 +objectMonitor.cpp                       thread_<os_family>.inline.hpp
   46.84 +objectMonitor.cpp                       threadService.hpp
   46.85 +objectMonitor.cpp                       vmSymbols.hpp
   46.86 +
   46.87  objectMonitor.hpp                       os.hpp
   46.88 -
   46.89 -objectMonitor_<os_family>.cpp           dtrace.hpp
   46.90 -objectMonitor_<os_family>.cpp           interfaceSupport.hpp
   46.91 -objectMonitor_<os_family>.cpp           objectMonitor.hpp
   46.92 -objectMonitor_<os_family>.cpp           objectMonitor.inline.hpp
   46.93 -objectMonitor_<os_family>.cpp           oop.inline.hpp
   46.94 -objectMonitor_<os_family>.cpp           osThread.hpp
   46.95 -objectMonitor_<os_family>.cpp           os_<os_family>.inline.hpp
   46.96 -objectMonitor_<os_family>.cpp           threadService.hpp
   46.97 -objectMonitor_<os_family>.cpp           thread_<os_family>.inline.hpp
   46.98 -objectMonitor_<os_family>.cpp           vmSymbols.hpp
   46.99 -
  46.100 -objectMonitor_<os_family>.hpp           generate_platform_dependent_include
  46.101 -objectMonitor_<os_family>.hpp           os_<os_family>.inline.hpp
  46.102 -objectMonitor_<os_family>.hpp           thread_<os_family>.inline.hpp
  46.103 -objectMonitor_<os_family>.hpp           top.hpp
  46.104 -
  46.105 -objectMonitor_<os_family>.inline.hpp    generate_platform_dependent_include
  46.106 +objectMonitor.hpp                       perfData.hpp
  46.107  
  46.108  oop.cpp                                 copy.hpp
  46.109  oop.cpp                                 handles.inline.hpp
  46.110 @@ -3231,6 +3239,7 @@
  46.111  orderAccess.hpp                         os.hpp
  46.112  
  46.113  orderAccess_<os_arch>.inline.hpp        orderAccess.hpp
  46.114 +orderAccess_<os_arch>.inline.hpp        vm_version_<arch>.hpp
  46.115  
  46.116  os.cpp                                  allocation.inline.hpp
  46.117  os.cpp                                  arguments.hpp
  46.118 @@ -3328,7 +3337,6 @@
  46.119  os_<os_family>.cpp                      nativeInst_<arch>.hpp
  46.120  os_<os_family>.cpp                      no_precompiled_headers
  46.121  os_<os_family>.cpp                      objectMonitor.hpp
  46.122 -os_<os_family>.cpp                      objectMonitor.inline.hpp
  46.123  os_<os_family>.cpp                      oop.inline.hpp
  46.124  os_<os_family>.cpp                      osThread.hpp
  46.125  os_<os_family>.cpp                      os_share_<os_family>.hpp
  46.126 @@ -3388,6 +3396,12 @@
  46.127  ostream.hpp                             allocation.hpp
  46.128  ostream.hpp                             timer.hpp
  46.129  
  46.130 +// include thread.hpp to prevent cyclic includes
  46.131 +park.cpp                                thread.hpp
  46.132 +
  46.133 +park.hpp                                debug.hpp
  46.134 +park.hpp                                globalDefinitions.hpp
  46.135 +
  46.136  pcDesc.cpp                              debugInfoRec.hpp
  46.137  pcDesc.cpp                              nmethod.hpp
  46.138  pcDesc.cpp                              pcDesc.hpp
  46.139 @@ -3600,7 +3614,9 @@
  46.140  relocator.cpp                           bytecodes.hpp
  46.141  relocator.cpp                           handles.inline.hpp
  46.142  relocator.cpp                           oop.inline.hpp
  46.143 +relocator.cpp                           oopFactory.hpp
  46.144  relocator.cpp                           relocator.hpp
  46.145 +relocator.cpp                           stackMapTableFormat.hpp
  46.146  relocator.cpp                           universe.inline.hpp
  46.147  
  46.148  relocator.hpp                           bytecodes.hpp
  46.149 @@ -3907,6 +3923,8 @@
  46.150  stackMapTable.hpp                       methodOop.hpp
  46.151  stackMapTable.hpp                       stackMapFrame.hpp
  46.152  
  46.153 +stackMapTableFormat.hpp                 verificationType.hpp
  46.154 +
  46.155  stackValue.cpp                          debugInfo.hpp
  46.156  stackValue.cpp                          frame.inline.hpp
  46.157  stackValue.cpp                          handles.inline.hpp
  46.158 @@ -4062,10 +4080,10 @@
  46.159  synchronizer.cpp                        resourceArea.hpp
  46.160  synchronizer.cpp                        stubRoutines.hpp
  46.161  synchronizer.cpp                        synchronizer.hpp
  46.162 -synchronizer.cpp                        threadService.hpp
  46.163  synchronizer.cpp                        thread_<os_family>.inline.hpp
  46.164  synchronizer.cpp                        vmSymbols.hpp
  46.165  
  46.166 +synchronizer.hpp                        basicLock.hpp
  46.167  synchronizer.hpp                        handles.hpp
  46.168  synchronizer.hpp                        markOop.hpp
  46.169  synchronizer.hpp                        perfData.hpp
  46.170 @@ -4237,7 +4255,6 @@
  46.171  thread.cpp                              mutexLocker.hpp
  46.172  thread.cpp                              objArrayOop.hpp
  46.173  thread.cpp                              objectMonitor.hpp
  46.174 -thread.cpp                              objectMonitor.inline.hpp
  46.175  thread.cpp                              oop.inline.hpp
  46.176  thread.cpp                              oopFactory.hpp
  46.177  thread.cpp                              osThread.hpp
  46.178 @@ -4275,6 +4292,7 @@
  46.179  thread.hpp                              oop.hpp
  46.180  thread.hpp                              os.hpp
  46.181  thread.hpp                              osThread.hpp
  46.182 +thread.hpp                              park.hpp
  46.183  thread.hpp                              safepoint.hpp
  46.184  thread.hpp                              stubRoutines.hpp
  46.185  thread.hpp                              threadLocalAllocBuffer.hpp
  46.186 @@ -4586,6 +4604,7 @@
  46.187  vframeArray.hpp                         growableArray.hpp
  46.188  vframeArray.hpp                         monitorChunk.hpp
  46.189  
  46.190 +vframe_hp.cpp                           basicLock.hpp
  46.191  vframe_hp.cpp                           codeCache.hpp
  46.192  vframe_hp.cpp                           debugInfoRec.hpp
  46.193  vframe_hp.cpp                           handles.inline.hpp
  46.194 @@ -4599,7 +4618,6 @@
  46.195  vframe_hp.cpp                           scopeDesc.hpp
  46.196  vframe_hp.cpp                           signature.hpp
  46.197  vframe_hp.cpp                           stubRoutines.hpp
  46.198 -vframe_hp.cpp                           synchronizer.hpp
  46.199  vframe_hp.cpp                           vframeArray.hpp
  46.200  vframe_hp.cpp                           vframe_hp.hpp
  46.201  
  46.202 @@ -4751,6 +4769,7 @@
  46.203  workgroup.cpp                           workgroup.hpp
  46.204  
  46.205  workgroup.hpp                           taskqueue.hpp
  46.206 +
  46.207  workgroup.hpp                           thread_<os_family>.inline.hpp
  46.208  
  46.209  xmlstream.cpp                           allocation.hpp
    47.1 --- a/src/share/vm/includeDB_features	Thu Nov 04 15:19:16 2010 -0700
    47.2 +++ b/src/share/vm/includeDB_features	Thu Nov 04 16:17:54 2010 -0700
    47.3 @@ -184,6 +184,13 @@
    47.4  jvmtiImpl.hpp                           systemDictionary.hpp
    47.5  jvmtiImpl.hpp                           vm_operations.hpp
    47.6  
    47.7 +jvmtiRawMonitor.cpp                     interfaceSupport.hpp
    47.8 +jvmtiRawMonitor.cpp                     jvmtiRawMonitor.hpp
    47.9 +jvmtiRawMonitor.cpp                     thread.hpp
   47.10 +
   47.11 +jvmtiRawMonitor.hpp                     growableArray.hpp
   47.12 +jvmtiRawMonitor.hpp                     objectMonitor.hpp
   47.13 +
   47.14  jvmtiTagMap.cpp                         biasedLocking.hpp
   47.15  jvmtiTagMap.cpp                         javaCalls.hpp
   47.16  jvmtiTagMap.cpp                         jniHandles.hpp
    48.1 --- a/src/share/vm/includeDB_jvmti	Thu Nov 04 15:19:16 2010 -0700
    48.2 +++ b/src/share/vm/includeDB_jvmti	Thu Nov 04 16:17:54 2010 -0700
    48.3 @@ -35,6 +35,7 @@
    48.4  // jvmtiCodeBlobEvents is jck optional, please put deps in includeDB_features
    48.5  
    48.6  jvmtiEnter.cpp                          jvmtiEnter.hpp
    48.7 +jvmtiEnter.cpp                          jvmtiRawMonitor.hpp
    48.8  jvmtiEnter.cpp                          jvmtiUtil.hpp
    48.9  
   48.10  jvmtiEnter.hpp                          interfaceSupport.hpp
   48.11 @@ -44,6 +45,7 @@
   48.12  jvmtiEnter.hpp                          systemDictionary.hpp
   48.13  
   48.14  jvmtiEnterTrace.cpp                     jvmtiEnter.hpp
   48.15 +jvmtiEnterTrace.cpp                     jvmtiRawMonitor.hpp
   48.16  jvmtiEnterTrace.cpp                     jvmtiUtil.hpp
   48.17  
   48.18  jvmtiEnv.cpp                            arguments.hpp
   48.19 @@ -66,11 +68,11 @@
   48.20  jvmtiEnv.cpp                            jvmtiGetLoadedClasses.hpp
   48.21  jvmtiEnv.cpp                            jvmtiImpl.hpp
   48.22  jvmtiEnv.cpp                            jvmtiManageCapabilities.hpp
   48.23 +jvmtiEnv.cpp                            jvmtiRawMonitor.hpp
   48.24  jvmtiEnv.cpp                            jvmtiRedefineClasses.hpp
   48.25  jvmtiEnv.cpp                            jvmtiTagMap.hpp
   48.26  jvmtiEnv.cpp                            jvmtiThreadState.inline.hpp
   48.27  jvmtiEnv.cpp                            jvmtiUtil.hpp
   48.28 -jvmtiEnv.cpp                            objectMonitor.inline.hpp
   48.29  jvmtiEnv.cpp                            osThread.hpp
   48.30  jvmtiEnv.cpp                            preserveException.hpp
   48.31  jvmtiEnv.cpp                            reflectionUtils.hpp
   48.32 @@ -178,11 +180,13 @@
   48.33  jvmtiExport.cpp                         jvmtiExport.hpp
   48.34  jvmtiExport.cpp                         jvmtiImpl.hpp
   48.35  jvmtiExport.cpp                         jvmtiManageCapabilities.hpp
   48.36 +jvmtiExport.cpp                         jvmtiRawMonitor.hpp
   48.37  jvmtiExport.cpp                         jvmtiTagMap.hpp
   48.38  jvmtiExport.cpp                         jvmtiThreadState.inline.hpp
   48.39  jvmtiExport.cpp                         nmethod.hpp
   48.40  jvmtiExport.cpp                         objArrayKlass.hpp
   48.41  jvmtiExport.cpp                         objArrayOop.hpp
   48.42 +jvmtiExport.cpp                         objectMonitor.hpp
   48.43  jvmtiExport.cpp                         objectMonitor.inline.hpp
   48.44  jvmtiExport.cpp                         pcDesc.hpp
   48.45  jvmtiExport.cpp                         resourceArea.hpp
   48.46 @@ -210,6 +214,8 @@
   48.47  jvmtiManageCapabilities.hpp             allocation.hpp
   48.48  jvmtiManageCapabilities.hpp             jvmti.h
   48.49  
   48.50 +// jvmtiRawMonitor is jck optional, please put deps in includeDB_features
   48.51 +
   48.52  jvmtiRedefineClasses.cpp                bitMap.inline.hpp
   48.53  jvmtiRedefineClasses.cpp                codeCache.hpp
   48.54  jvmtiRedefineClasses.cpp                deoptimization.hpp
    49.1 --- a/src/share/vm/memory/collectorPolicy.cpp	Thu Nov 04 15:19:16 2010 -0700
    49.2 +++ b/src/share/vm/memory/collectorPolicy.cpp	Thu Nov 04 16:17:54 2010 -0700
    49.3 @@ -659,9 +659,6 @@
    49.4      }
    49.5      return result;   // could be null if we are out of space
    49.6    } else if (!gch->incremental_collection_will_fail()) {
    49.7 -    // The gc_prologues have not executed yet.  The value
    49.8 -    // for incremental_collection_will_fail() is the remanent
    49.9 -    // of the last collection.
   49.10      // Do an incremental collection.
   49.11      gch->do_collection(false            /* full */,
   49.12                         false            /* clear_all_soft_refs */,
   49.13 @@ -739,9 +736,8 @@
   49.14    GenCollectedHeap* gch = GenCollectedHeap::heap();
   49.15    size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
   49.16    return    (word_size > heap_word_size(gen0_capacity))
   49.17 -         || (GC_locker::is_active_and_needs_gc())
   49.18 -         || (   gch->last_incremental_collection_failed()
   49.19 -             && gch->incremental_collection_will_fail());
   49.20 +         || GC_locker::is_active_and_needs_gc()
   49.21 +         || gch->incremental_collection_failed();
   49.22  }
   49.23  
   49.24  
    50.1 --- a/src/share/vm/memory/defNewGeneration.cpp	Thu Nov 04 15:19:16 2010 -0700
    50.2 +++ b/src/share/vm/memory/defNewGeneration.cpp	Thu Nov 04 16:17:54 2010 -0700
    50.3 @@ -510,7 +510,7 @@
    50.4    // from this generation, pass on collection; let the next generation
    50.5    // do it.
    50.6    if (!collection_attempt_is_safe()) {
    50.7 -    gch->set_incremental_collection_will_fail();
    50.8 +    gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
    50.9      return;
   50.10    }
   50.11    assert(to()->is_empty(), "Else not collection_attempt_is_safe");
   50.12 @@ -596,9 +596,8 @@
   50.13      if (PrintGC && !PrintGCDetails) {
   50.14        gch->print_heap_change(gch_prev_used);
   50.15      }
   50.16 +    assert(!gch->incremental_collection_failed(), "Should be clear");
   50.17    } else {
   50.18 -    assert(HandlePromotionFailure,
   50.19 -      "Should not be here unless promotion failure handling is on");
   50.20      assert(_promo_failure_scan_stack.is_empty(), "post condition");
   50.21      _promo_failure_scan_stack.clear(true); // Clear cached segments.
   50.22  
   50.23 @@ -613,7 +612,7 @@
   50.24      // and from-space.
   50.25      swap_spaces();   // For uniformity wrt ParNewGeneration.
   50.26      from()->set_next_compaction_space(to());
   50.27 -    gch->set_incremental_collection_will_fail();
   50.28 +    gch->set_incremental_collection_failed();
   50.29  
   50.30      // Inform the next generation that a promotion failure occurred.
   50.31      _next_gen->promotion_failure_occurred();
   50.32 @@ -700,12 +699,6 @@
   50.33    if (obj == NULL) {
   50.34      obj = _next_gen->promote(old, s);
   50.35      if (obj == NULL) {
   50.36 -      if (!HandlePromotionFailure) {
   50.37 -        // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
   50.38 -        // is incorrectly set. In any case, its seriously wrong to be here!
   50.39 -        vm_exit_out_of_memory(s*wordSize, "promotion");
   50.40 -      }
   50.41 -
   50.42        handle_promotion_failure(old);
   50.43        return old;
   50.44      }
   50.45 @@ -812,47 +805,43 @@
   50.46      assert(_next_gen != NULL,
   50.47             "This must be the youngest gen, and not the only gen");
   50.48    }
   50.49 -
   50.50 -  // Decide if there's enough room for a full promotion
   50.51 -  // When using extremely large edens, we effectively lose a
   50.52 -  // large amount of old space.  Use the "MaxLiveObjectEvacuationRatio"
   50.53 -  // flag to reduce the minimum evacuation space requirements. If
   50.54 -  // there is not enough space to evacuate eden during a scavenge,
   50.55 -  // the VM will immediately exit with an out of memory error.
   50.56 -  // This flag has not been tested
   50.57 -  // with collectors other than simple mark & sweep.
   50.58 -  //
   50.59 -  // Note that with the addition of promotion failure handling, the
   50.60 -  // VM will not immediately exit but will undo the young generation
   50.61 -  // collection.  The parameter is left here for compatibility.
   50.62 -  const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
   50.63 -
   50.64 -  // worst_case_evacuation is based on "used()".  For the case where this
   50.65 -  // method is called after a collection, this is still appropriate because
   50.66 -  // the case that needs to be detected is one in which a full collection
   50.67 -  // has been done and has overflowed into the young generation.  In that
   50.68 -  // case a minor collection will fail (the overflow of the full collection
   50.69 -  // means there is no space in the old generation for any promotion).
   50.70 -  size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
   50.71 -
   50.72 -  return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
   50.73 -                                              HandlePromotionFailure);
   50.74 +  return _next_gen->promotion_attempt_is_safe(used());
   50.75  }
   50.76  
   50.77  void DefNewGeneration::gc_epilogue(bool full) {
   50.78 +  DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
   50.79 +
   50.80 +  assert(!GC_locker::is_active(), "We should not be executing here");
   50.81    // Check if the heap is approaching full after a collection has
   50.82    // been done.  Generally the young generation is empty at
   50.83    // a minimum at the end of a collection.  If it is not, then
   50.84    // the heap is approaching full.
   50.85    GenCollectedHeap* gch = GenCollectedHeap::heap();
   50.86 -  clear_should_allocate_from_space();
   50.87 -  if (collection_attempt_is_safe()) {
   50.88 -    gch->clear_incremental_collection_will_fail();
   50.89 +  if (full) {
   50.90 +    DEBUG_ONLY(seen_incremental_collection_failed = false;)
   50.91 +    if (!collection_attempt_is_safe()) {
   50.92 +      gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
   50.93 +      set_should_allocate_from_space(); // we seem to be running out of space
   50.94 +    } else {
   50.95 +      gch->clear_incremental_collection_failed(); // We just did a full collection
   50.96 +      clear_should_allocate_from_space(); // if set
   50.97 +    }
   50.98    } else {
   50.99 -    gch->set_incremental_collection_will_fail();
  50.100 -    if (full) { // we seem to be running out of space
  50.101 -      set_should_allocate_from_space();
  50.102 +#ifdef ASSERT
  50.103 +    // It is possible that incremental_collection_failed() == true
  50.104 +    // here, because an attempted scavenge did not succeed. The policy
  50.105 +    // is normally expected to cause a full collection which should
  50.106 +    // clear that condition, so we should not be here twice in a row
  50.107 +    // with incremental_collection_failed() == true without having done
  50.108 +    // a full collection in between.
  50.109 +    if (!seen_incremental_collection_failed &&
  50.110 +        gch->incremental_collection_failed()) {
  50.111 +      seen_incremental_collection_failed = true;
  50.112 +    } else if (seen_incremental_collection_failed) {
  50.113 +      assert(!gch->incremental_collection_failed(), "Twice in a row");
  50.114 +      seen_incremental_collection_failed = false;
  50.115      }
  50.116 +#endif // ASSERT
  50.117    }
  50.118  
  50.119    if (ZapUnusedHeapArea) {
    51.1 --- a/src/share/vm/memory/defNewGeneration.hpp	Thu Nov 04 15:19:16 2010 -0700
    51.2 +++ b/src/share/vm/memory/defNewGeneration.hpp	Thu Nov 04 16:17:54 2010 -0700
    51.3 @@ -82,12 +82,6 @@
    51.4    Stack<oop>     _objs_with_preserved_marks;
    51.5    Stack<markOop> _preserved_marks_of_objs;
    51.6  
    51.7 -  // Returns true if the collection can be safely attempted.
    51.8 -  // If this method returns false, a collection is not
    51.9 -  // guaranteed to fail but the system may not be able
   51.10 -  // to recover from the failure.
   51.11 -  bool collection_attempt_is_safe();
   51.12 -
   51.13    // Promotion failure handling
   51.14    OopClosure *_promo_failure_scan_stack_closure;
   51.15    void set_promo_failure_scan_stack_closure(OopClosure *scan_stack_closure) {
   51.16 @@ -304,6 +298,14 @@
   51.17  
   51.18    // GC support
   51.19    virtual void compute_new_size();
   51.20 +
   51.21 +  // Returns true if the collection is likely to be safely
   51.22 +  // completed. Even if this method returns true, a collection
   51.23 +  // may not be guaranteed to succeed, and the system should be
   51.24 +  // able to safely unwind and recover from that failure, albeit
   51.25 +  // at some additional cost. Override superclass's implementation.
   51.26 +  virtual bool collection_attempt_is_safe();
   51.27 +
   51.28    virtual void collect(bool   full,
   51.29                         bool   clear_all_soft_refs,
   51.30                         size_t size,
    52.1 --- a/src/share/vm/memory/genCollectedHeap.cpp	Thu Nov 04 15:19:16 2010 -0700
    52.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp	Thu Nov 04 16:17:54 2010 -0700
    52.3 @@ -142,8 +142,7 @@
    52.4    }
    52.5    _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
    52.6  
    52.7 -  clear_incremental_collection_will_fail();
    52.8 -  clear_last_incremental_collection_failed();
    52.9 +  clear_incremental_collection_failed();
   52.10  
   52.11  #ifndef SERIALGC
   52.12    // If we are running CMS, create the collector responsible
   52.13 @@ -1347,17 +1346,6 @@
   52.14  };
   52.15  
   52.16  void GenCollectedHeap::gc_epilogue(bool full) {
   52.17 -  // Remember if a partial collection of the heap failed, and
   52.18 -  // we did a complete collection.
   52.19 -  if (full && incremental_collection_will_fail()) {
   52.20 -    set_last_incremental_collection_failed();
   52.21 -  } else {
   52.22 -    clear_last_incremental_collection_failed();
   52.23 -  }
   52.24 -  // Clear the flag, if set; the generation gc_epilogues will set the
   52.25 -  // flag again if the condition persists despite the collection.
   52.26 -  clear_incremental_collection_will_fail();
   52.27 -
   52.28  #ifdef COMPILER2
   52.29    assert(DerivedPointerTable::is_empty(), "derived pointer present");
   52.30    size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
    53.1 --- a/src/share/vm/memory/genCollectedHeap.hpp	Thu Nov 04 15:19:16 2010 -0700
    53.2 +++ b/src/share/vm/memory/genCollectedHeap.hpp	Thu Nov 04 16:17:54 2010 -0700
    53.3 @@ -62,11 +62,10 @@
    53.4    // The generational collector policy.
    53.5    GenCollectorPolicy* _gen_policy;
    53.6  
    53.7 -  // If a generation would bail out of an incremental collection,
    53.8 -  // it sets this flag.  If the flag is set, satisfy_failed_allocation
    53.9 -  // will attempt allocating in all generations before doing a full GC.
   53.10 -  bool _incremental_collection_will_fail;
   53.11 -  bool _last_incremental_collection_failed;
   53.12 +  // Indicates that the most recent previous incremental collection failed.
   53.13 +  // The flag is cleared when an action is taken that might clear the
   53.14 +  // condition that caused that incremental collection to fail.
   53.15 +  bool _incremental_collection_failed;
   53.16  
   53.17    // In support of ExplicitGCInvokesConcurrent functionality
   53.18    unsigned int _full_collections_completed;
   53.19 @@ -469,26 +468,26 @@
   53.20    // call to "save_marks".
   53.21    bool no_allocs_since_save_marks(int level);
   53.22  
   53.23 +  // Returns true if an incremental collection is likely to fail.
   53.24 +  bool incremental_collection_will_fail() {
   53.25 +    // Assumes a 2-generation system; the first disjunct remembers if an
   53.26 +    // incremental collection failed, even when we thought (second disjunct)
   53.27 +    // that it would not.
   53.28 +    assert(heap()->collector_policy()->is_two_generation_policy(),
   53.29 +           "the following definition may not be suitable for an n(>2)-generation system");
   53.30 +    return incremental_collection_failed() || !get_gen(0)->collection_attempt_is_safe();
   53.31 +  }
   53.32 +
   53.33    // If a generation bails out of an incremental collection,
   53.34    // it sets this flag.
   53.35 -  bool incremental_collection_will_fail() {
   53.36 -    return _incremental_collection_will_fail;
   53.37 +  bool incremental_collection_failed() const {
   53.38 +    return _incremental_collection_failed;
   53.39    }
   53.40 -  void set_incremental_collection_will_fail() {
   53.41 -    _incremental_collection_will_fail = true;
   53.42 +  void set_incremental_collection_failed() {
   53.43 +    _incremental_collection_failed = true;
   53.44    }
   53.45 -  void clear_incremental_collection_will_fail() {
   53.46 -    _incremental_collection_will_fail = false;
   53.47 -  }
   53.48 -
   53.49 -  bool last_incremental_collection_failed() const {
   53.50 -    return _last_incremental_collection_failed;
   53.51 -  }
   53.52 -  void set_last_incremental_collection_failed() {
   53.53 -    _last_incremental_collection_failed = true;
   53.54 -  }
   53.55 -  void clear_last_incremental_collection_failed() {
   53.56 -    _last_incremental_collection_failed = false;
   53.57 +  void clear_incremental_collection_failed() {
   53.58 +    _incremental_collection_failed = false;
   53.59    }
   53.60  
   53.61    // Promotion of obj into gen failed.  Try to promote obj to higher non-perm
    54.1 --- a/src/share/vm/memory/generation.cpp	Thu Nov 04 15:19:16 2010 -0700
    54.2 +++ b/src/share/vm/memory/generation.cpp	Thu Nov 04 16:17:54 2010 -0700
    54.3 @@ -165,15 +165,16 @@
    54.4    return max;
    54.5  }
    54.6  
    54.7 -bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes,
    54.8 -                                           bool not_used) const {
    54.9 +bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
   54.10 +  size_t available = max_contiguous_available();
   54.11 +  bool   res = (available >= max_promotion_in_bytes);
   54.12    if (PrintGC && Verbose) {
   54.13 -    gclog_or_tty->print_cr("Generation::promotion_attempt_is_safe"
   54.14 -                " contiguous_available: " SIZE_FORMAT
   54.15 -                " promotion_in_bytes: " SIZE_FORMAT,
   54.16 -                max_contiguous_available(), promotion_in_bytes);
   54.17 +    gclog_or_tty->print_cr(
   54.18 +      "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")",
   54.19 +      res? "":" not", available, res? ">=":"<",
   54.20 +      max_promotion_in_bytes);
   54.21    }
   54.22 -  return max_contiguous_available() >= promotion_in_bytes;
   54.23 +  return res;
   54.24  }
   54.25  
   54.26  // Ignores "ref" and calls allocate().
    55.1 --- a/src/share/vm/memory/generation.hpp	Thu Nov 04 15:19:16 2010 -0700
    55.2 +++ b/src/share/vm/memory/generation.hpp	Thu Nov 04 16:17:54 2010 -0700
    55.3 @@ -173,15 +173,11 @@
    55.4    // The largest number of contiguous free bytes in this or any higher generation.
    55.5    virtual size_t max_contiguous_available() const;
    55.6  
    55.7 -  // Returns true if promotions of the specified amount can
    55.8 -  // be attempted safely (without a vm failure).
    55.9 +  // Returns true if promotions of the specified amount are
   55.10 +  // likely to succeed without a promotion failure.
   55.11    // Promotion of the full amount is not guaranteed but
   55.12 -  // can be attempted.
   55.13 -  //   younger_handles_promotion_failure
   55.14 -  // is true if the younger generation handles a promotion
   55.15 -  // failure.
   55.16 -  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
   55.17 -    bool younger_handles_promotion_failure) const;
   55.18 +  // might be attempted in the worst case.
   55.19 +  virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const;
   55.20  
   55.21    // For a non-young generation, this interface can be used to inform a
   55.22    // generation that a promotion attempt into that generation failed.
   55.23 @@ -358,6 +354,16 @@
   55.24      return (full || should_allocate(word_size, is_tlab));
   55.25    }
   55.26  
   55.27 +  // Returns true if the collection is likely to be safely
   55.28 +  // completed. Even if this method returns true, a collection
   55.29 +  // may not be guaranteed to succeed, and the system should be
   55.30 +  // able to safely unwind and recover from that failure, albeit
   55.31 +  // at some additional cost.
   55.32 +  virtual bool collection_attempt_is_safe() {
   55.33 +    guarantee(false, "Are you sure you want to call this method?");
   55.34 +    return true;
   55.35 +  }
   55.36 +
   55.37    // Perform a garbage collection.
   55.38    // If full is true attempt a full garbage collection of this generation.
   55.39    // Otherwise, attempting to (at least) free enough space to support an
    56.1 --- a/src/share/vm/memory/tenuredGeneration.cpp	Thu Nov 04 15:19:16 2010 -0700
    56.2 +++ b/src/share/vm/memory/tenuredGeneration.cpp	Thu Nov 04 16:17:54 2010 -0700
    56.3 @@ -419,29 +419,16 @@
    56.4  void TenuredGeneration::verify_alloc_buffers_clean() {}
    56.5  #endif // SERIALGC
    56.6  
    56.7 -bool TenuredGeneration::promotion_attempt_is_safe(
    56.8 -    size_t max_promotion_in_bytes,
    56.9 -    bool younger_handles_promotion_failure) const {
   56.10 -
   56.11 -  bool result = max_contiguous_available() >= max_promotion_in_bytes;
   56.12 -
   56.13 -  if (younger_handles_promotion_failure && !result) {
   56.14 -    result = max_contiguous_available() >=
   56.15 -      (size_t) gc_stats()->avg_promoted()->padded_average();
   56.16 -    if (PrintGC && Verbose && result) {
   56.17 -      gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
   56.18 -                  " contiguous_available: " SIZE_FORMAT
   56.19 -                  " avg_promoted: " SIZE_FORMAT,
   56.20 -                  max_contiguous_available(),
   56.21 -                  gc_stats()->avg_promoted()->padded_average());
   56.22 -    }
   56.23 -  } else {
   56.24 -    if (PrintGC && Verbose) {
   56.25 -      gclog_or_tty->print_cr("TenuredGeneration::promotion_attempt_is_safe"
   56.26 -                  " contiguous_available: " SIZE_FORMAT
   56.27 -                  " promotion_in_bytes: " SIZE_FORMAT,
   56.28 -                  max_contiguous_available(), max_promotion_in_bytes);
   56.29 -    }
   56.30 +bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
   56.31 +  size_t available = max_contiguous_available();
   56.32 +  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
   56.33 +  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
   56.34 +  if (PrintGC && Verbose) {
   56.35 +    gclog_or_tty->print_cr(
   56.36 +      "Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
   56.37 +      "max_promo("SIZE_FORMAT")",
   56.38 +      res? "":" not", available, res? ">=":"<",
   56.39 +      av_promo, max_promotion_in_bytes);
   56.40    }
   56.41 -  return result;
   56.42 +  return res;
   56.43  }
    57.1 --- a/src/share/vm/memory/tenuredGeneration.hpp	Thu Nov 04 15:19:16 2010 -0700
    57.2 +++ b/src/share/vm/memory/tenuredGeneration.hpp	Thu Nov 04 16:17:54 2010 -0700
    57.3 @@ -101,8 +101,7 @@
    57.4  
    57.5    virtual void update_gc_stats(int level, bool full);
    57.6  
    57.7 -  virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes,
    57.8 -    bool younger_handles_promotion_failure) const;
    57.9 +  virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
   57.10  
   57.11    void verify_alloc_buffers_clean();
   57.12  };
    58.1 --- a/src/share/vm/oops/methodOop.hpp	Thu Nov 04 15:19:16 2010 -0700
    58.2 +++ b/src/share/vm/oops/methodOop.hpp	Thu Nov 04 16:17:54 2010 -0700
    58.3 @@ -247,6 +247,10 @@
    58.4      return constMethod()->stackmap_data();
    58.5    }
    58.6  
    58.7 +  void set_stackmap_data(typeArrayOop sd) {
    58.8 +    constMethod()->set_stackmap_data(sd);
    58.9 +  }
   58.10 +
   58.11    // exception handler table
   58.12    typeArrayOop exception_table() const
   58.13                                     { return constMethod()->exception_table(); }
    59.1 --- a/src/share/vm/prims/jvmtiImpl.cpp	Thu Nov 04 15:19:16 2010 -0700
    59.2 +++ b/src/share/vm/prims/jvmtiImpl.cpp	Thu Nov 04 16:17:54 2010 -0700
    59.3 @@ -25,26 +25,6 @@
    59.4  # include "incls/_precompiled.incl"
    59.5  # include "incls/_jvmtiImpl.cpp.incl"
    59.6  
    59.7 -GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP) GrowableArray<JvmtiRawMonitor*>(1,true);
    59.8 -
    59.9 -void JvmtiPendingMonitors::transition_raw_monitors() {
   59.10 -  assert((Threads::number_of_threads()==1),
   59.11 -         "Java thread has not created yet or more than one java thread \
   59.12 -is running. Raw monitor transition will not work");
   59.13 -  JavaThread *current_java_thread = JavaThread::current();
   59.14 -  assert(current_java_thread->thread_state() == _thread_in_vm, "Must be in vm");
   59.15 -  {
   59.16 -    ThreadBlockInVM __tbivm(current_java_thread);
   59.17 -    for(int i=0; i< count(); i++) {
   59.18 -      JvmtiRawMonitor *rmonitor = monitors()->at(i);
   59.19 -      int r = rmonitor->raw_enter(current_java_thread);
   59.20 -      assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
   59.21 -    }
   59.22 -  }
   59.23 -  // pending monitors are converted to real monitor so delete them all.
   59.24 -  dispose();
   59.25 -}
   59.26 -
   59.27  //
   59.28  // class JvmtiAgentThread
   59.29  //
   59.30 @@ -216,57 +196,6 @@
   59.31    }
   59.32  }
   59.33  
   59.34 -
   59.35 -//
   59.36 -// class JvmtiRawMonitor
   59.37 -//
   59.38 -
   59.39 -JvmtiRawMonitor::JvmtiRawMonitor(const char *name) {
   59.40 -#ifdef ASSERT
   59.41 -  _name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1), name);
   59.42 -#else
   59.43 -  _name = NULL;
   59.44 -#endif
   59.45 -  _magic = JVMTI_RM_MAGIC;
   59.46 -}
   59.47 -
   59.48 -JvmtiRawMonitor::~JvmtiRawMonitor() {
   59.49 -#ifdef ASSERT
   59.50 -  FreeHeap(_name);
   59.51 -#endif
   59.52 -  _magic = 0;
   59.53 -}
   59.54 -
   59.55 -
   59.56 -bool
   59.57 -JvmtiRawMonitor::is_valid() {
   59.58 -  int value = 0;
   59.59 -
   59.60 -  // This object might not be a JvmtiRawMonitor so we can't assume
   59.61 -  // the _magic field is properly aligned. Get the value in a safe
   59.62 -  // way and then check against JVMTI_RM_MAGIC.
   59.63 -
   59.64 -  switch (sizeof(_magic)) {
   59.65 -  case 2:
   59.66 -    value = Bytes::get_native_u2((address)&_magic);
   59.67 -    break;
   59.68 -
   59.69 -  case 4:
   59.70 -    value = Bytes::get_native_u4((address)&_magic);
   59.71 -    break;
   59.72 -
   59.73 -  case 8:
   59.74 -    value = Bytes::get_native_u8((address)&_magic);
   59.75 -    break;
   59.76 -
   59.77 -  default:
   59.78 -    guarantee(false, "_magic field is an unexpected size");
   59.79 -  }
   59.80 -
   59.81 -  return value == JVMTI_RM_MAGIC;
   59.82 -}
   59.83 -
   59.84 -
   59.85  //
   59.86  // class JvmtiBreakpoint
   59.87  //
    60.1 --- a/src/share/vm/prims/jvmtiImpl.hpp	Thu Nov 04 15:19:16 2010 -0700
    60.2 +++ b/src/share/vm/prims/jvmtiImpl.hpp	Thu Nov 04 16:17:54 2010 -0700
    60.3 @@ -26,7 +26,6 @@
    60.4  // Forward Declarations
    60.5  //
    60.6  
    60.7 -class JvmtiRawMonitor;
    60.8  class JvmtiBreakpoint;
    60.9  class JvmtiBreakpoints;
   60.10  
   60.11 @@ -327,76 +326,6 @@
   60.12      return false;
   60.13  }
   60.14  
   60.15 -
   60.16 -///////////////////////////////////////////////////////////////
   60.17 -//
   60.18 -// class JvmtiRawMonitor
   60.19 -//
   60.20 -// Used by JVMTI methods: All RawMonitor methods (CreateRawMonitor, EnterRawMonitor, etc.)
   60.21 -//
   60.22 -// Wrapper for ObjectMonitor class that saves the Monitor's name
   60.23 -//
   60.24 -
   60.25 -class JvmtiRawMonitor : public ObjectMonitor  {
   60.26 -private:
   60.27 -  int           _magic;
   60.28 -  char *        _name;
   60.29 -  // JVMTI_RM_MAGIC is set in contructor and unset in destructor.
   60.30 -  enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') };
   60.31 -
   60.32 -public:
   60.33 -  JvmtiRawMonitor(const char *name);
   60.34 -  ~JvmtiRawMonitor();
   60.35 -  int            magic()   { return _magic;  }
   60.36 -  const char *get_name()   { return _name; }
   60.37 -  bool        is_valid();
   60.38 -};
   60.39 -
   60.40 -// Onload pending raw monitors
   60.41 -// Class is used to cache onload or onstart monitor enter
   60.42 -// which will transition into real monitor when
   60.43 -// VM is fully initialized.
   60.44 -class JvmtiPendingMonitors : public AllStatic {
   60.45 -
   60.46 -private:
   60.47 -  static GrowableArray<JvmtiRawMonitor*> *_monitors; // Cache raw monitor enter
   60.48 -
   60.49 -  inline static GrowableArray<JvmtiRawMonitor*>* monitors() { return _monitors; }
   60.50 -
   60.51 -  static void dispose() {
   60.52 -    delete monitors();
   60.53 -  }
   60.54 -
   60.55 -public:
   60.56 -  static void enter(JvmtiRawMonitor *monitor) {
   60.57 -    monitors()->append(monitor);
   60.58 -  }
   60.59 -
   60.60 -  static int count() {
   60.61 -    return monitors()->length();
   60.62 -  }
   60.63 -
   60.64 -  static void destroy(JvmtiRawMonitor *monitor) {
   60.65 -    while (monitors()->contains(monitor)) {
   60.66 -      monitors()->remove(monitor);
   60.67 -    }
   60.68 -  }
   60.69 -
   60.70 -  // Return false if monitor is not found in the list.
   60.71 -  static bool exit(JvmtiRawMonitor *monitor) {
   60.72 -    if (monitors()->contains(monitor)) {
   60.73 -      monitors()->remove(monitor);
   60.74 -      return true;
   60.75 -    } else {
   60.76 -      return false;
   60.77 -    }
   60.78 -  }
   60.79 -
   60.80 -  static void transition_raw_monitors();
   60.81 -};
   60.82 -
   60.83 -
   60.84 -
   60.85  ///////////////////////////////////////////////////////////////
   60.86  // The get/set local operations must only be done by the VM thread
   60.87  // because the interpreter version needs to access oop maps, which can
    61.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    61.2 +++ b/src/share/vm/prims/jvmtiRawMonitor.cpp	Thu Nov 04 16:17:54 2010 -0700
    61.3 @@ -0,0 +1,420 @@
    61.4 +/*
    61.5 + * Copyright (c) 2003, 2007, Oracle and/or its affiliates. All rights reserved.
    61.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    61.7 + *
    61.8 + * This code is free software; you can redistribute it and/or modify it
    61.9 + * under the terms of the GNU General Public License version 2 only, as
   61.10 + * published by the Free Software Foundation.
   61.11 + *
   61.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   61.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   61.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   61.15 + * version 2 for more details (a copy is included in the LICENSE file that
   61.16 + * accompanied this code).
   61.17 + *
   61.18 + * You should have received a copy of the GNU General Public License version
   61.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   61.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   61.21 + *
   61.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   61.23 + * or visit www.oracle.com if you need additional information or have any
   61.24 + * questions.
   61.25 + *
   61.26 + */
   61.27 +
   61.28 +# include "incls/_precompiled.incl"
   61.29 +# include "incls/_jvmtiRawMonitor.cpp.incl"
   61.30 +
   61.31 +GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP) GrowableArray<JvmtiRawMonitor*>(1,true);
   61.32 +
   61.33 +void JvmtiPendingMonitors::transition_raw_monitors() {
   61.34 +  assert((Threads::number_of_threads()==1),
   61.35 +         "Java thread has not created yet or more than one java thread \
   61.36 +is running. Raw monitor transition will not work");
   61.37 +  JavaThread *current_java_thread = JavaThread::current();
   61.38 +  assert(current_java_thread->thread_state() == _thread_in_vm, "Must be in vm");
   61.39 +  {
   61.40 +    ThreadBlockInVM __tbivm(current_java_thread);
   61.41 +    for(int i=0; i< count(); i++) {
   61.42 +      JvmtiRawMonitor *rmonitor = monitors()->at(i);
   61.43 +      int r = rmonitor->raw_enter(current_java_thread);
   61.44 +      assert(r == ObjectMonitor::OM_OK, "raw_enter should have worked");
   61.45 +    }
   61.46 +  }
   61.47 +  // pending monitors are converted to real monitor so delete them all.
   61.48 +  dispose();
   61.49 +}
   61.50 +
   61.51 +//
   61.52 +// class JvmtiRawMonitor
   61.53 +//
   61.54 +
   61.55 +JvmtiRawMonitor::JvmtiRawMonitor(const char *name) {
   61.56 +#ifdef ASSERT
   61.57 +  _name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1), name);
   61.58 +#else
   61.59 +  _name = NULL;
   61.60 +#endif
   61.61 +  _magic = JVMTI_RM_MAGIC;
   61.62 +}
   61.63 +
   61.64 +JvmtiRawMonitor::~JvmtiRawMonitor() {
   61.65 +#ifdef ASSERT
   61.66 +  FreeHeap(_name);
   61.67 +#endif
   61.68 +  _magic = 0;
   61.69 +}
   61.70 +
   61.71 +
   61.72 +bool
   61.73 +JvmtiRawMonitor::is_valid() {
   61.74 +  int value = 0;
   61.75 +
   61.76 +  // This object might not be a JvmtiRawMonitor so we can't assume
   61.77 +  // the _magic field is properly aligned. Get the value in a safe
   61.78 +  // way and then check against JVMTI_RM_MAGIC.
   61.79 +
   61.80 +  switch (sizeof(_magic)) {
   61.81 +  case 2:
   61.82 +    value = Bytes::get_native_u2((address)&_magic);
   61.83 +    break;
   61.84 +
   61.85 +  case 4:
   61.86 +    value = Bytes::get_native_u4((address)&_magic);
   61.87 +    break;
   61.88 +
   61.89 +  case 8:
   61.90 +    value = Bytes::get_native_u8((address)&_magic);
   61.91 +    break;
   61.92 +
   61.93 +  default:
   61.94 +    guarantee(false, "_magic field is an unexpected size");
   61.95 +  }
   61.96 +
   61.97 +  return value == JVMTI_RM_MAGIC;
   61.98 +}
   61.99 +
  61.100 +// -------------------------------------------------------------------------
  61.101 +// The raw monitor subsystem is entirely distinct from normal
  61.102 +// java-synchronization or jni-synchronization.  raw monitors are not
  61.103 +// associated with objects.  They can be implemented in any manner
  61.104 +// that makes sense.  The original implementors decided to piggy-back
  61.105 +// the raw-monitor implementation on the existing Java objectMonitor mechanism.
  61.106 +// This flaw needs to fixed.  We should reimplement raw monitors as sui-generis.
  61.107 +// Specifically, we should not implement raw monitors via java monitors.
  61.108 +// Time permitting, we should disentangle and deconvolve the two implementations
  61.109 +// and move the resulting raw monitor implementation over to the JVMTI directories.
  61.110 +// Ideally, the raw monitor implementation would be built on top of
  61.111 +// park-unpark and nothing else.
  61.112 +//
  61.113 +// raw monitors are used mainly by JVMTI
  61.114 +// The raw monitor implementation borrows the ObjectMonitor structure,
  61.115 +// but the operators are degenerate and extremely simple.
  61.116 +//
  61.117 +// Mixed use of a single objectMonitor instance -- as both a raw monitor
  61.118 +// and a normal java monitor -- is not permissible.
  61.119 +//
  61.120 +// Note that we use the single RawMonitor_lock to protect queue operations for
  61.121 +// _all_ raw monitors.  This is a scalability impediment, but since raw monitor usage
  61.122 +// is deprecated and rare, this is not of concern.  The RawMonitor_lock can not
  61.123 +// be held indefinitely.  The critical sections must be short and bounded.
  61.124 +//
  61.125 +// -------------------------------------------------------------------------
  61.126 +
  61.127 +int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
  61.128 +  for (;;) {
  61.129 +    if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
  61.130 +       return OS_OK ;
  61.131 +    }
  61.132 +
  61.133 +    ObjectWaiter Node (Self) ;
  61.134 +    Self->_ParkEvent->reset() ;     // strictly optional
  61.135 +    Node.TState = ObjectWaiter::TS_ENTER ;
  61.136 +
  61.137 +    RawMonitor_lock->lock_without_safepoint_check() ;
  61.138 +    Node._next  = _EntryList ;
  61.139 +    _EntryList  = &Node ;
  61.140 +    OrderAccess::fence() ;
  61.141 +    if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
  61.142 +        _EntryList = Node._next ;
  61.143 +        RawMonitor_lock->unlock() ;
  61.144 +        return OS_OK ;
  61.145 +    }
  61.146 +    RawMonitor_lock->unlock() ;
  61.147 +    while (Node.TState == ObjectWaiter::TS_ENTER) {
  61.148 +       Self->_ParkEvent->park() ;
  61.149 +    }
  61.150 +  }
  61.151 +}
  61.152 +
  61.153 +int JvmtiRawMonitor::SimpleExit (Thread * Self) {
  61.154 +  guarantee (_owner == Self, "invariant") ;
  61.155 +  OrderAccess::release_store_ptr (&_owner, NULL) ;
  61.156 +  OrderAccess::fence() ;
  61.157 +  if (_EntryList == NULL) return OS_OK ;
  61.158 +  ObjectWaiter * w ;
  61.159 +
  61.160 +  RawMonitor_lock->lock_without_safepoint_check() ;
  61.161 +  w = _EntryList ;
  61.162 +  if (w != NULL) {
  61.163 +      _EntryList = w->_next ;
  61.164 +  }
  61.165 +  RawMonitor_lock->unlock() ;
  61.166 +  if (w != NULL) {
  61.167 +      guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
  61.168 +      ParkEvent * ev = w->_event ;
  61.169 +      w->TState = ObjectWaiter::TS_RUN ;
  61.170 +      OrderAccess::fence() ;
  61.171 +      ev->unpark() ;
  61.172 +  }
  61.173 +  return OS_OK ;
  61.174 +}
  61.175 +
  61.176 +int JvmtiRawMonitor::SimpleWait (Thread * Self, jlong millis) {
  61.177 +  guarantee (_owner == Self  , "invariant") ;
  61.178 +  guarantee (_recursions == 0, "invariant") ;
  61.179 +
  61.180 +  ObjectWaiter Node (Self) ;
  61.181 +  Node._notified = 0 ;
  61.182 +  Node.TState    = ObjectWaiter::TS_WAIT ;
  61.183 +
  61.184 +  RawMonitor_lock->lock_without_safepoint_check() ;
  61.185 +  Node._next     = _WaitSet ;
  61.186 +  _WaitSet       = &Node ;
  61.187 +  RawMonitor_lock->unlock() ;
  61.188 +
  61.189 +  SimpleExit (Self) ;
  61.190 +  guarantee (_owner != Self, "invariant") ;
  61.191 +
  61.192 +  int ret = OS_OK ;
  61.193 +  if (millis <= 0) {
  61.194 +    Self->_ParkEvent->park();
  61.195 +  } else {
  61.196 +    ret = Self->_ParkEvent->park(millis);
  61.197 +  }
  61.198 +
  61.199 +  // If thread still resides on the waitset then unlink it.
  61.200 +  // Double-checked locking -- the usage is safe in this context
  61.201 +  // as we TState is volatile and the lock-unlock operators are
  61.202 +  // serializing (barrier-equivalent).
  61.203 +
  61.204 +  if (Node.TState == ObjectWaiter::TS_WAIT) {
  61.205 +    RawMonitor_lock->lock_without_safepoint_check() ;
  61.206 +    if (Node.TState == ObjectWaiter::TS_WAIT) {
  61.207 +      // Simple O(n) unlink, but performance isn't critical here.
  61.208 +      ObjectWaiter * p ;
  61.209 +      ObjectWaiter * q = NULL ;
  61.210 +      for (p = _WaitSet ; p != &Node; p = p->_next) {
  61.211 +         q = p ;
  61.212 +      }
  61.213 +      guarantee (p == &Node, "invariant") ;
  61.214 +      if (q == NULL) {
  61.215 +        guarantee (p == _WaitSet, "invariant") ;
  61.216 +        _WaitSet = p->_next ;
  61.217 +      } else {
  61.218 +        guarantee (p == q->_next, "invariant") ;
  61.219 +        q->_next = p->_next ;
  61.220 +      }
  61.221 +      Node.TState = ObjectWaiter::TS_RUN ;
  61.222 +    }
  61.223 +    RawMonitor_lock->unlock() ;
  61.224 +  }
  61.225 +
  61.226 +  guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
  61.227 +  SimpleEnter (Self) ;
  61.228 +
  61.229 +  guarantee (_owner == Self, "invariant") ;
  61.230 +  guarantee (_recursions == 0, "invariant") ;
  61.231 +  return ret ;
  61.232 +}
  61.233 +
  61.234 +int JvmtiRawMonitor::SimpleNotify (Thread * Self, bool All) {
  61.235 +  guarantee (_owner == Self, "invariant") ;
  61.236 +  if (_WaitSet == NULL) return OS_OK ;
  61.237 +
  61.238 +  // We have two options:
  61.239 +  // A. Transfer the threads from the WaitSet to the EntryList
  61.240 +  // B. Remove the thread from the WaitSet and unpark() it.
  61.241 +  //
  61.242 +  // We use (B), which is crude and results in lots of futile
  61.243 +  // context switching.  In particular (B) induces lots of contention.
  61.244 +
  61.245 +  ParkEvent * ev = NULL ;       // consider using a small auto array ...
  61.246 +  RawMonitor_lock->lock_without_safepoint_check() ;
  61.247 +  for (;;) {
  61.248 +      ObjectWaiter * w = _WaitSet ;
  61.249 +      if (w == NULL) break ;
  61.250 +      _WaitSet = w->_next ;
  61.251 +      if (ev != NULL) { ev->unpark(); ev = NULL; }
  61.252 +      ev = w->_event ;
  61.253 +      OrderAccess::loadstore() ;
  61.254 +      w->TState = ObjectWaiter::TS_RUN ;
  61.255 +      OrderAccess::storeload();
  61.256 +      if (!All) break ;
  61.257 +  }
  61.258 +  RawMonitor_lock->unlock() ;
  61.259 +  if (ev != NULL) ev->unpark();
  61.260 +  return OS_OK ;
  61.261 +}
  61.262 +
  61.263 +// Any JavaThread will enter here with state _thread_blocked
  61.264 +int JvmtiRawMonitor::raw_enter(TRAPS) {
  61.265 +  TEVENT (raw_enter) ;
  61.266 +  void * Contended ;
  61.267 +
  61.268 +  // don't enter raw monitor if thread is being externally suspended, it will
  61.269 +  // surprise the suspender if a "suspended" thread can still enter monitor
  61.270 +  JavaThread * jt = (JavaThread *)THREAD;
  61.271 +  if (THREAD->is_Java_thread()) {
  61.272 +    jt->SR_lock()->lock_without_safepoint_check();
  61.273 +    while (jt->is_external_suspend()) {
  61.274 +      jt->SR_lock()->unlock();
  61.275 +      jt->java_suspend_self();
  61.276 +      jt->SR_lock()->lock_without_safepoint_check();
  61.277 +    }
  61.278 +    // guarded by SR_lock to avoid racing with new external suspend requests.
  61.279 +    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
  61.280 +    jt->SR_lock()->unlock();
  61.281 +  } else {
  61.282 +    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
  61.283 +  }
  61.284 +
  61.285 +  if (Contended == THREAD) {
  61.286 +     _recursions ++ ;
  61.287 +     return OM_OK ;
  61.288 +  }
  61.289 +
  61.290 +  if (Contended == NULL) {
  61.291 +     guarantee (_owner == THREAD, "invariant") ;
  61.292 +     guarantee (_recursions == 0, "invariant") ;
  61.293 +     return OM_OK ;
  61.294 +  }
  61.295 +
  61.296 +  THREAD->set_current_pending_monitor(this);
  61.297 +
  61.298 +  if (!THREAD->is_Java_thread()) {
  61.299 +     // No other non-Java threads besides VM thread would acquire
  61.300 +     // a raw monitor.
  61.301 +     assert(THREAD->is_VM_thread(), "must be VM thread");
  61.302 +     SimpleEnter (THREAD) ;
  61.303 +   } else {
  61.304 +     guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
  61.305 +     for (;;) {
  61.306 +       jt->set_suspend_equivalent();
  61.307 +       // cleared by handle_special_suspend_equivalent_condition() or
  61.308 +       // java_suspend_self()
  61.309 +       SimpleEnter (THREAD) ;
  61.310 +
  61.311 +       // were we externally suspended while we were waiting?
  61.312 +       if (!jt->handle_special_suspend_equivalent_condition()) break ;
  61.313 +
  61.314 +       // This thread was externally suspended
  61.315 +       //
  61.316 +       // This logic isn't needed for JVMTI raw monitors,
  61.317 +       // but doesn't hurt just in case the suspend rules change. This
  61.318 +           // logic is needed for the JvmtiRawMonitor.wait() reentry phase.
  61.319 +           // We have reentered the contended monitor, but while we were
  61.320 +           // waiting another thread suspended us. We don't want to reenter
  61.321 +           // the monitor while suspended because that would surprise the
  61.322 +           // thread that suspended us.
  61.323 +           //
  61.324 +           // Drop the lock -
  61.325 +       SimpleExit (THREAD) ;
  61.326 +
  61.327 +           jt->java_suspend_self();
  61.328 +         }
  61.329 +
  61.330 +     assert(_owner == THREAD, "Fatal error with monitor owner!");
  61.331 +     assert(_recursions == 0, "Fatal error with monitor recursions!");
  61.332 +  }
  61.333 +
  61.334 +  THREAD->set_current_pending_monitor(NULL);
  61.335 +  guarantee (_recursions == 0, "invariant") ;
  61.336 +  return OM_OK;
  61.337 +}
  61.338 +
  61.339 +// Used mainly for JVMTI raw monitor implementation
  61.340 +// Also used for JvmtiRawMonitor::wait().
  61.341 +int JvmtiRawMonitor::raw_exit(TRAPS) {
  61.342 +  TEVENT (raw_exit) ;
  61.343 +  if (THREAD != _owner) {
  61.344 +    return OM_ILLEGAL_MONITOR_STATE;
  61.345 +  }
  61.346 +  if (_recursions > 0) {
  61.347 +    --_recursions ;
  61.348 +    return OM_OK ;
  61.349 +  }
  61.350 +
  61.351 +  void * List = _EntryList ;
  61.352 +  SimpleExit (THREAD) ;
  61.353 +
  61.354 +  return OM_OK;
  61.355 +}
  61.356 +
  61.357 +// Used for JVMTI raw monitor implementation.
  61.358 +// All JavaThreads will enter here with state _thread_blocked
  61.359 +
  61.360 +int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
  61.361 +  TEVENT (raw_wait) ;
  61.362 +  if (THREAD != _owner) {
  61.363 +    return OM_ILLEGAL_MONITOR_STATE;
  61.364 +  }
  61.365 +
  61.366 +  // To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
  61.367 +  // The caller must be able to tolerate spurious returns from raw_wait().
  61.368 +  THREAD->_ParkEvent->reset() ;
  61.369 +  OrderAccess::fence() ;
  61.370 +
  61.371 +  // check interrupt event
  61.372 +  if (interruptible && Thread::is_interrupted(THREAD, true)) {
  61.373 +    return OM_INTERRUPTED;
  61.374 +  }
  61.375 +
  61.376 +  intptr_t save = _recursions ;
  61.377 +  _recursions = 0 ;
  61.378 +  _waiters ++ ;
  61.379 +  if (THREAD->is_Java_thread()) {
  61.380 +    guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
  61.381 +    ((JavaThread *)THREAD)->set_suspend_equivalent();
  61.382 +  }
  61.383 +  int rv = SimpleWait (THREAD, millis) ;
  61.384 +  _recursions = save ;
  61.385 +  _waiters -- ;
  61.386 +
  61.387 +  guarantee (THREAD == _owner, "invariant") ;
  61.388 +  if (THREAD->is_Java_thread()) {
  61.389 +     JavaThread * jSelf = (JavaThread *) THREAD ;
  61.390 +     for (;;) {
  61.391 +        if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
  61.392 +        SimpleExit (THREAD) ;
  61.393 +        jSelf->java_suspend_self();
  61.394 +        SimpleEnter (THREAD) ;
  61.395 +        jSelf->set_suspend_equivalent() ;
  61.396 +     }
  61.397 +  }
  61.398 +  guarantee (THREAD == _owner, "invariant") ;
  61.399 +
  61.400 +  if (interruptible && Thread::is_interrupted(THREAD, true)) {
  61.401 +    return OM_INTERRUPTED;
  61.402 +  }
  61.403 +  return OM_OK ;
  61.404 +}
  61.405 +
  61.406 +int JvmtiRawMonitor::raw_notify(TRAPS) {
  61.407 +  TEVENT (raw_notify) ;
  61.408 +  if (THREAD != _owner) {
  61.409 +    return OM_ILLEGAL_MONITOR_STATE;
  61.410 +  }
  61.411 +  SimpleNotify (THREAD, false) ;
  61.412 +  return OM_OK;
  61.413 +}
  61.414 +
  61.415 +int JvmtiRawMonitor::raw_notifyAll(TRAPS) {
  61.416 +  TEVENT (raw_notifyAll) ;
  61.417 +  if (THREAD != _owner) {
  61.418 +    return OM_ILLEGAL_MONITOR_STATE;
  61.419 +  }
  61.420 +  SimpleNotify (THREAD, true) ;
  61.421 +  return OM_OK;
  61.422 +}
  61.423 +
    62.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    62.2 +++ b/src/share/vm/prims/jvmtiRawMonitor.hpp	Thu Nov 04 16:17:54 2010 -0700
    62.3 @@ -0,0 +1,99 @@
    62.4 +/*
    62.5 + * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
    62.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    62.7 + *
    62.8 + * This code is free software; you can redistribute it and/or modify it
    62.9 + * under the terms of the GNU General Public License version 2 only, as
   62.10 + * published by the Free Software Foundation.
   62.11 + *
   62.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   62.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   62.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   62.15 + * version 2 for more details (a copy is included in the LICENSE file that
   62.16 + * accompanied this code).
   62.17 + *
   62.18 + * You should have received a copy of the GNU General Public License version
   62.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   62.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   62.21 + *
   62.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   62.23 + * or visit www.oracle.com if you need additional information or have any
   62.24 + * questions.
   62.25 + *
   62.26 + */
   62.27 +
   62.28 +//
   62.29 +// class JvmtiRawMonitor
   62.30 +//
   62.31 +// Used by JVMTI methods: All RawMonitor methods (CreateRawMonitor, EnterRawMonitor, etc.)
   62.32 +//
   62.33 +// Wrapper for ObjectMonitor class that saves the Monitor's name
   62.34 +//
   62.35 +
   62.36 +class JvmtiRawMonitor : public ObjectMonitor  {
   62.37 +private:
   62.38 +  int           _magic;
   62.39 +  char *        _name;
   62.40 +  // JVMTI_RM_MAGIC is set in contructor and unset in destructor.
   62.41 +  enum { JVMTI_RM_MAGIC = (int)(('T' << 24) | ('I' << 16) | ('R' << 8) | 'M') };
   62.42 +
   62.43 +  int       SimpleEnter (Thread * Self) ;
   62.44 +  int       SimpleExit  (Thread * Self) ;
   62.45 +  int       SimpleWait  (Thread * Self, jlong millis) ;
   62.46 +  int       SimpleNotify (Thread * Self, bool All) ;
   62.47 +
   62.48 +public:
   62.49 +  JvmtiRawMonitor(const char *name);
   62.50 +  ~JvmtiRawMonitor();
   62.51 +  int       raw_enter(TRAPS);
   62.52 +  int       raw_exit(TRAPS);
   62.53 +  int       raw_wait(jlong millis, bool interruptable, TRAPS);
   62.54 +  int       raw_notify(TRAPS);
   62.55 +  int       raw_notifyAll(TRAPS);
   62.56 +  int            magic()   { return _magic;  }
   62.57 +  const char *get_name()   { return _name; }
   62.58 +  bool        is_valid();
   62.59 +};
   62.60 +
   62.61 +// Onload pending raw monitors
   62.62 +// Class is used to cache onload or onstart monitor enter
   62.63 +// which will transition into real monitor when
   62.64 +// VM is fully initialized.
   62.65 +class JvmtiPendingMonitors : public AllStatic {
   62.66 +
   62.67 +private:
   62.68 +  static GrowableArray<JvmtiRawMonitor*> *_monitors; // Cache raw monitor enter
   62.69 +
   62.70 +  inline static GrowableArray<JvmtiRawMonitor*>* monitors() { return _monitors; }
   62.71 +
   62.72 +  static void dispose() {
   62.73 +    delete monitors();
   62.74 +  }
   62.75 +
   62.76 +public:
   62.77 +  static void enter(JvmtiRawMonitor *monitor) {
   62.78 +    monitors()->append(monitor);
   62.79 +  }
   62.80 +
   62.81 +  static int count() {
   62.82 +    return monitors()->length();
   62.83 +  }
   62.84 +
   62.85 +  static void destroy(JvmtiRawMonitor *monitor) {
   62.86 +    while (monitors()->contains(monitor)) {
   62.87 +      monitors()->remove(monitor);
   62.88 +    }
   62.89 +  }
   62.90 +
   62.91 +  // Return false if monitor is not found in the list.
   62.92 +  static bool exit(JvmtiRawMonitor *monitor) {
   62.93 +    if (monitors()->contains(monitor)) {
   62.94 +      monitors()->remove(monitor);
   62.95 +      return true;
   62.96 +    } else {
   62.97 +      return false;
   62.98 +    }
   62.99 +  }
  62.100 +
  62.101 +  static void transition_raw_monitors();
  62.102 +};
    63.1 --- a/src/share/vm/runtime/arguments.cpp	Thu Nov 04 15:19:16 2010 -0700
    63.2 +++ b/src/share/vm/runtime/arguments.cpp	Thu Nov 04 16:17:54 2010 -0700
    63.3 @@ -119,11 +119,8 @@
    63.4    PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.version", "1.0", false));
    63.5    PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.name",
    63.6                                                                   "Java Virtual Machine Specification",  false));
    63.7 -  PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.vendor",
    63.8 -        JDK_Version::is_gte_jdk17x_version() ? "Oracle Corporation" : "Sun Microsystems Inc.", false));
    63.9    PropertyList_add(&_system_properties, new SystemProperty("java.vm.version", VM_Version::vm_release(),  false));
   63.10    PropertyList_add(&_system_properties, new SystemProperty("java.vm.name", VM_Version::vm_name(),  false));
   63.11 -  PropertyList_add(&_system_properties, new SystemProperty("java.vm.vendor", VM_Version::vm_vendor(),  false));
   63.12    PropertyList_add(&_system_properties, new SystemProperty("java.vm.info", VM_Version::vm_info_string(),  true));
   63.13  
   63.14    // following are JVMTI agent writeable properties.
   63.15 @@ -151,6 +148,14 @@
   63.16    os::init_system_properties_values();
   63.17  }
   63.18  
   63.19 +
   63.20 +  // Update/Initialize System properties after JDK version number is known
   63.21 +void Arguments::init_version_specific_system_properties() {
   63.22 +  PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.vendor",
   63.23 +        JDK_Version::is_gte_jdk17x_version() ? "Oracle Corporation" : "Sun Microsystems Inc.", false));
   63.24 +  PropertyList_add(&_system_properties, new SystemProperty("java.vm.vendor", VM_Version::vm_vendor(),  false));
   63.25 +}
   63.26 +
   63.27  /**
   63.28   * Provide a slightly more user-friendly way of eliminating -XX flags.
   63.29   * When a flag is eliminated, it can be added to this list in order to
   63.30 @@ -185,6 +190,10 @@
   63.31                             JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
   63.32    { "UseDepthFirstScavengeOrder",
   63.33                             JDK_Version::jdk_update(6,22), JDK_Version::jdk(7) },
   63.34 +  { "HandlePromotionFailure",
   63.35 +                           JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
   63.36 +  { "MaxLiveObjectEvacuationRatio",
   63.37 +                           JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
   63.38    { NULL, JDK_Version(0), JDK_Version(0) }
   63.39  };
   63.40  
   63.41 @@ -948,26 +957,65 @@
   63.42    }
   63.43  }
   63.44  
   63.45 +void Arguments::check_compressed_oops_compat() {
   63.46 +#ifdef _LP64
   63.47 +  assert(UseCompressedOops, "Precondition");
   63.48 +#  if defined(COMPILER1) && !defined(TIERED)
   63.49 +  // Until c1 supports compressed oops turn them off.
   63.50 +  FLAG_SET_DEFAULT(UseCompressedOops, false);
   63.51 +#  else
   63.52 +  // Is it on by default or set on ergonomically
   63.53 +  bool is_on_by_default = FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops);
   63.54 +
   63.55 +  // Tiered currently doesn't work with compressed oops
   63.56 +  if (TieredCompilation) {
   63.57 +    if (is_on_by_default) {
   63.58 +      FLAG_SET_DEFAULT(UseCompressedOops, false);
   63.59 +      return;
   63.60 +    } else {
   63.61 +      vm_exit_during_initialization(
   63.62 +        "Tiered compilation is not supported with compressed oops yet", NULL);
   63.63 +    }
   63.64 +  }
   63.65 +
   63.66 +  // XXX JSR 292 currently does not support compressed oops
   63.67 +  if (EnableMethodHandles) {
   63.68 +    if (is_on_by_default) {
   63.69 +      FLAG_SET_DEFAULT(UseCompressedOops, false);
   63.70 +      return;
   63.71 +    } else {
   63.72 +      vm_exit_during_initialization(
   63.73 +        "JSR292 is not supported with compressed oops yet", NULL);
   63.74 +    }
   63.75 +  }
   63.76 +
   63.77 +  // If dumping an archive or forcing its use, disable compressed oops if possible
   63.78 +  if (DumpSharedSpaces || RequireSharedSpaces) {
   63.79 +    if (is_on_by_default) {
   63.80 +      FLAG_SET_DEFAULT(UseCompressedOops, false);
   63.81 +      return;
   63.82 +    } else {
   63.83 +      vm_exit_during_initialization(
   63.84 +        "Class Data Sharing is not supported with compressed oops yet", NULL);
   63.85 +    }
   63.86 +  } else if (UseSharedSpaces) {
   63.87 +    // UseSharedSpaces is on by default. With compressed oops, we turn it off.
   63.88 +    FLAG_SET_DEFAULT(UseSharedSpaces, false);
   63.89 +  }
   63.90 +
   63.91 +#  endif // defined(COMPILER1) && !defined(TIERED)
   63.92 +#endif // _LP64
   63.93 +}
   63.94 +
   63.95  void Arguments::set_tiered_flags() {
   63.96    if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) {
   63.97      FLAG_SET_DEFAULT(CompilationPolicyChoice, 2);
   63.98    }
   63.99 -
  63.100    if (CompilationPolicyChoice < 2) {
  63.101      vm_exit_during_initialization(
  63.102        "Incompatible compilation policy selected", NULL);
  63.103    }
  63.104 -
  63.105 -#ifdef _LP64
  63.106 -  if (FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops)) {
  63.107 -    UseCompressedOops = false;
  63.108 -  }
  63.109 -  if (UseCompressedOops) {
  63.110 -    vm_exit_during_initialization(
  63.111 -      "Tiered compilation is not supported with compressed oops yet", NULL);
  63.112 -  }
  63.113 -#endif
  63.114 - // Increase the code cache size - tiered compiles a lot more.
  63.115 +  // Increase the code cache size - tiered compiles a lot more.
  63.116    if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
  63.117      FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 2);
  63.118    }
  63.119 @@ -1676,7 +1724,8 @@
  63.120    bool status = true;
  63.121    status = status && verify_min_value(StackYellowPages, 1, "StackYellowPages");
  63.122    status = status && verify_min_value(StackRedPages, 1, "StackRedPages");
  63.123 -  status = status && verify_min_value(StackShadowPages, 1, "StackShadowPages");
  63.124 +  // greater stack shadow pages can't generate instruction to bang stack
  63.125 +  status = status && verify_interval(StackShadowPages, 1, 50, "StackShadowPages");
  63.126    return status;
  63.127  }
  63.128  
  63.129 @@ -1722,8 +1771,6 @@
  63.130      status = false;
  63.131    }
  63.132  
  63.133 -  status = status && verify_percentage(MaxLiveObjectEvacuationRatio,
  63.134 -                              "MaxLiveObjectEvacuationRatio");
  63.135    status = status && verify_percentage(AdaptiveSizePolicyWeight,
  63.136                                "AdaptiveSizePolicyWeight");
  63.137    status = status && verify_percentage(AdaptivePermSizeWeight, "AdaptivePermSizeWeight");
  63.138 @@ -2827,6 +2874,7 @@
  63.139    return JNI_OK;
  63.140  }
  63.141  
  63.142 +
  63.143  // Parse entry point called from JNI_CreateJavaVM
  63.144  
  63.145  jint Arguments::parse(const JavaVMInitArgs* args) {
  63.146 @@ -2969,10 +3017,6 @@
  63.147      PrintGC = true;
  63.148    }
  63.149  
  63.150 -#if defined(_LP64) && defined(COMPILER1) && !defined(TIERED)
  63.151 -  UseCompressedOops = false;
  63.152 -#endif
  63.153 -
  63.154    // Set object alignment values.
  63.155    set_object_alignment();
  63.156  
  63.157 @@ -2987,13 +3031,10 @@
  63.158    set_ergonomics_flags();
  63.159  
  63.160  #ifdef _LP64
  63.161 -  // XXX JSR 292 currently does not support compressed oops.
  63.162 -  if (EnableMethodHandles && UseCompressedOops) {
  63.163 -    if (FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops)) {
  63.164 -      UseCompressedOops = false;
  63.165 -    }
  63.166 +  if (UseCompressedOops) {
  63.167 +    check_compressed_oops_compat();
  63.168    }
  63.169 -#endif // _LP64
  63.170 +#endif
  63.171  
  63.172    // Check the GC selections again.
  63.173    if (!check_gc_consistency()) {
    64.1 --- a/src/share/vm/runtime/arguments.hpp	Thu Nov 04 15:19:16 2010 -0700
    64.2 +++ b/src/share/vm/runtime/arguments.hpp	Thu Nov 04 16:17:54 2010 -0700
    64.3 @@ -291,6 +291,8 @@
    64.4  
    64.5    // Tiered
    64.6    static void set_tiered_flags();
    64.7 +  // Check compressed oops compatibility with other flags
    64.8 +  static void check_compressed_oops_compat();
    64.9    // CMS/ParNew garbage collectors
   64.10    static void set_parnew_gc_flags();
   64.11    static void set_cms_and_parnew_gc_flags();
   64.12 @@ -484,6 +486,9 @@
   64.13    // System properties
   64.14    static void init_system_properties();
   64.15  
   64.16 +  // Update/Initialize System properties after JDK version number is known
   64.17 +  static void init_version_specific_system_properties();
   64.18 +
   64.19    // Property List manipulation
   64.20    static void PropertyList_add(SystemProperty** plist, SystemProperty *element);
   64.21    static void PropertyList_add(SystemProperty** plist, const char* k, char* v);
    65.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    65.2 +++ b/src/share/vm/runtime/basicLock.cpp	Thu Nov 04 16:17:54 2010 -0700
    65.3 @@ -0,0 +1,76 @@
    65.4 +/*
    65.5 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    65.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    65.7 + *
    65.8 + * This code is free software; you can redistribute it and/or modify it
    65.9 + * under the terms of the GNU General Public License version 2 only, as
   65.10 + * published by the Free Software Foundation.
   65.11 + *
   65.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   65.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   65.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   65.15 + * version 2 for more details (a copy is included in the LICENSE file that
   65.16 + * accompanied this code).
   65.17 + *
   65.18 + * You should have received a copy of the GNU General Public License version
   65.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   65.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   65.21 + *
   65.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   65.23 + * or visit www.oracle.com if you need additional information or have any
   65.24 + * questions.
   65.25 + *
   65.26 + */
   65.27 +
   65.28 +# include "incls/_precompiled.incl"
   65.29 +# include "incls/_basicLock.cpp.incl"
   65.30 +
   65.31 +void BasicLock::print_on(outputStream* st) const {
   65.32 +  st->print("monitor");
   65.33 +}
   65.34 +
   65.35 +void BasicLock::move_to(oop obj, BasicLock* dest) {
   65.36 +  // Check to see if we need to inflate the lock. This is only needed
   65.37 +  // if an object is locked using "this" lightweight monitor. In that
   65.38 +  // case, the displaced_header() is unlocked, because the
   65.39 +  // displaced_header() contains the header for the originally unlocked
   65.40 +  // object. However the object could have already been inflated. But it
   65.41 +  // does not matter, the inflation will just a no-op. For other cases,
   65.42 +  // the displaced header will be either 0x0 or 0x3, which are location
   65.43 +  // independent, therefore the BasicLock is free to move.
   65.44 +  //
   65.45 +  // During OSR we may need to relocate a BasicLock (which contains a
   65.46 +  // displaced word) from a location in an interpreter frame to a
   65.47 +  // new location in a compiled frame.  "this" refers to the source
   65.48 +  // basiclock in the interpreter frame.  "dest" refers to the destination
   65.49 +  // basiclock in the new compiled frame.  We *always* inflate in move_to().
   65.50 +  // The always-Inflate policy works properly, but in 1.5.0 it can sometimes
   65.51 +  // cause performance problems in code that makes heavy use of a small # of
   65.52 +  // uncontended locks.   (We'd inflate during OSR, and then sync performance
   65.53 +  // would subsequently plummet because the thread would be forced thru the slow-path).
   65.54 +  // This problem has been made largely moot on IA32 by inlining the inflated fast-path
   65.55 +  // operations in Fast_Lock and Fast_Unlock in i486.ad.
   65.56 +  //
   65.57 +  // Note that there is a way to safely swing the object's markword from
   65.58 +  // one stack location to another.  This avoids inflation.  Obviously,
   65.59 +  // we need to ensure that both locations refer to the current thread's stack.
   65.60 +  // There are some subtle concurrency issues, however, and since the benefit is
   65.61 +  // is small (given the support for inflated fast-path locking in the fast_lock, etc)
   65.62 +  // we'll leave that optimization for another time.
   65.63 +
   65.64 +  if (displaced_header()->is_neutral()) {
   65.65 +    ObjectSynchronizer::inflate_helper(obj);
   65.66 +    // WARNING: We can not put check here, because the inflation
   65.67 +    // will not update the displaced header. Once BasicLock is inflated,
   65.68 +    // no one should ever look at its content.
   65.69 +  } else {
   65.70 +    // Typically the displaced header will be 0 (recursive stack lock) or
   65.71 +    // unused_mark.  Naively we'd like to assert that the displaced mark
   65.72 +    // value is either 0, neutral, or 3.  But with the advent of the
   65.73 +    // store-before-CAS avoidance in fast_lock/compiler_lock_object
   65.74 +    // we can find any flavor mark in the displaced mark.
   65.75 +  }
   65.76 +// [RGV] The next line appears to do nothing!
   65.77 +  intptr_t dh = (intptr_t) displaced_header();
   65.78 +  dest->set_displaced_header(displaced_header());
   65.79 +}
    66.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    66.2 +++ b/src/share/vm/runtime/basicLock.hpp	Thu Nov 04 16:17:54 2010 -0700
    66.3 @@ -0,0 +1,72 @@
    66.4 +/*
    66.5 + * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
    66.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    66.7 + *
    66.8 + * This code is free software; you can redistribute it and/or modify it
    66.9 + * under the terms of the GNU General Public License version 2 only, as
   66.10 + * published by the Free Software Foundation.
   66.11 + *
   66.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   66.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   66.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   66.15 + * version 2 for more details (a copy is included in the LICENSE file that
   66.16 + * accompanied this code).
   66.17 + *
   66.18 + * You should have received a copy of the GNU General Public License version
   66.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   66.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   66.21 + *
   66.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   66.23 + * or visit www.oracle.com if you need additional information or have any
   66.24 + * questions.
   66.25 + *
   66.26 + */
   66.27 +
   66.28 +class BasicLock VALUE_OBJ_CLASS_SPEC {
   66.29 +  friend class VMStructs;
   66.30 + private:
   66.31 +  volatile markOop _displaced_header;
   66.32 + public:
   66.33 +  markOop      displaced_header() const               { return _displaced_header; }
   66.34 +  void         set_displaced_header(markOop header)   { _displaced_header = header; }
   66.35 +
   66.36 +  void print_on(outputStream* st) const;
   66.37 +
   66.38 +  // move a basic lock (used during deoptimization
   66.39 +  void move_to(oop obj, BasicLock* dest);
   66.40 +
   66.41 +  static int displaced_header_offset_in_bytes()       { return offset_of(BasicLock, _displaced_header); }
   66.42 +};
   66.43 +
   66.44 +// A BasicObjectLock associates a specific Java object with a BasicLock.
   66.45 +// It is currently embedded in an interpreter frame.
   66.46 +
   66.47 +// Because some machines have alignment restrictions on the control stack,
   66.48 +// the actual space allocated by the interpreter may include padding words
   66.49 +// after the end of the BasicObjectLock.  Also, in order to guarantee
   66.50 +// alignment of the embedded BasicLock objects on such machines, we
   66.51 +// put the embedded BasicLock at the beginning of the struct.
   66.52 +
   66.53 +class BasicObjectLock VALUE_OBJ_CLASS_SPEC {
   66.54 +  friend class VMStructs;
   66.55 + private:
   66.56 +  BasicLock _lock;                                    // the lock, must be double word aligned
   66.57 +  oop       _obj;                                     // object holds the lock;
   66.58 +
   66.59 + public:
   66.60 +  // Manipulation
   66.61 +  oop      obj() const                                { return _obj;  }
   66.62 +  void set_obj(oop obj)                               { _obj = obj; }
   66.63 +  BasicLock* lock()                                   { return &_lock; }
   66.64 +
   66.65 +  // Note: Use frame::interpreter_frame_monitor_size() for the size of BasicObjectLocks
   66.66 +  //       in interpreter activation frames since it includes machine-specific padding.
   66.67 +  static int size()                                   { return sizeof(BasicObjectLock)/wordSize; }
   66.68 +
   66.69 +  // GC support
   66.70 +  void oops_do(OopClosure* f) { f->do_oop(&_obj); }
   66.71 +
   66.72 +  static int obj_offset_in_bytes()                    { return offset_of(BasicObjectLock, _obj);  }
   66.73 +  static int lock_offset_in_bytes()                   { return offset_of(BasicObjectLock, _lock); }
   66.74 +};
   66.75 +
    67.1 --- a/src/share/vm/runtime/globals.hpp	Thu Nov 04 15:19:16 2010 -0700
    67.2 +++ b/src/share/vm/runtime/globals.hpp	Thu Nov 04 16:17:54 2010 -0700
    67.3 @@ -327,10 +327,10 @@
    67.4    /* UseMembar is theoretically a temp flag used for memory barrier         \
    67.5     * removal testing.  It was supposed to be removed before FCS but has     \
    67.6     * been re-added (see 6401008) */                                         \
    67.7 -  product(bool, UseMembar, false,                                           \
    67.8 +  product_pd(bool, UseMembar,                                               \
    67.9            "(Unstable) Issues membars on thread state transitions")          \
   67.10                                                                              \
   67.11 -  /* Temporary: See 6948537 */                                             \
   67.12 +  /* Temporary: See 6948537 */                                              \
   67.13    experimental(bool, UseMemSetInBOT, true,                                  \
   67.14            "(Unstable) uses memset in BOT updates in GC code")               \
   67.15                                                                              \
   67.16 @@ -822,6 +822,9 @@
   67.17    develop(bool, PrintJVMWarnings, false,                                    \
   67.18            "Prints warnings for unimplemented JVM functions")                \
   67.19                                                                              \
   67.20 +  product(bool, PrintWarnings, true,                                        \
   67.21 +          "Prints JVM warnings to output stream")                           \
   67.22 +                                                                            \
   67.23    notproduct(uintx, WarnOnStalledSpinLock, 0,                               \
   67.24            "Prints warnings for stalled SpinLocks")                          \
   67.25                                                                              \
   67.26 @@ -1585,7 +1588,7 @@
   67.27            "(Temporary, subject to experimentation)"                         \
   67.28            "Nominal minimum work per abortable preclean iteration")          \
   67.29                                                                              \
   67.30 -  product(intx, CMSAbortablePrecleanWaitMillis, 100,                        \
   67.31 +  manageable(intx, CMSAbortablePrecleanWaitMillis, 100,                     \
   67.32            "(Temporary, subject to experimentation)"                         \
   67.33            " Time that we sleep between iterations when not given"           \
   67.34            " enough work per iteration")                                     \
   67.35 @@ -1677,7 +1680,7 @@
   67.36    product(uintx, CMSWorkQueueDrainThreshold, 10,                            \
   67.37            "Don't drain below this size per parallel worker/thief")          \
   67.38                                                                              \
   67.39 -  product(intx, CMSWaitDuration, 2000,                                      \
   67.40 +  manageable(intx, CMSWaitDuration, 2000,                                   \
   67.41            "Time in milliseconds that CMS thread waits for young GC")        \
   67.42                                                                              \
   67.43    product(bool, CMSYield, true,                                             \
   67.44 @@ -1786,10 +1789,6 @@
   67.45    notproduct(bool, GCALotAtAllSafepoints, false,                            \
   67.46            "Enforce ScavengeALot/GCALot at all potential safepoints")        \
   67.47                                                                              \
   67.48 -  product(bool, HandlePromotionFailure, true,                               \
   67.49 -          "The youngest generation collection does not require "            \
   67.50 -          "a guarantee of full promotion of all live objects.")             \
   67.51 -                                                                            \
   67.52    product(bool, PrintPromotionFailure, false,                               \
   67.53            "Print additional diagnostic information following "              \
   67.54            " promotion failure")                                             \
   67.55 @@ -3003,9 +3002,6 @@
   67.56    product(intx, NewRatio, 2,                                                \
   67.57            "Ratio of new/old generation sizes")                              \
   67.58                                                                              \
   67.59 -  product(uintx, MaxLiveObjectEvacuationRatio, 100,                         \
   67.60 -          "Max percent of eden objects that will be live at scavenge")      \
   67.61 -                                                                            \
   67.62    product_pd(uintx, NewSizeThreadIncrease,                                  \
   67.63            "Additional size added to desired new generation size per "       \
   67.64            "non-daemon thread (in bytes)")                                   \
   67.65 @@ -3542,7 +3538,7 @@
   67.66    product(uintx, SharedDummyBlockSize, 512*M,                               \
   67.67            "Size of dummy block used to shift heap addresses (in bytes)")    \
   67.68                                                                              \
   67.69 -  product(uintx, SharedReadWriteSize,  12*M,                                \
   67.70 +  product(uintx, SharedReadWriteSize,  NOT_LP64(12*M) LP64_ONLY(13*M),      \
   67.71            "Size of read-write space in permanent generation (in bytes)")    \
   67.72                                                                              \
   67.73    product(uintx, SharedReadOnlySize,   10*M,                                \
    68.1 --- a/src/share/vm/runtime/mutex.hpp	Thu Nov 04 15:19:16 2010 -0700
    68.2 +++ b/src/share/vm/runtime/mutex.hpp	Thu Nov 04 16:17:54 2010 -0700
    68.3 @@ -265,48 +265,3 @@
    68.4     }
    68.5  };
    68.6  
    68.7 -/*
    68.8 - * Per-thread blocking support for JSR166. See the Java-level
    68.9 - * Documentation for rationale. Basically, park acts like wait, unpark
   68.10 - * like notify.
   68.11 - *
   68.12 - * 6271289 --
   68.13 - * To avoid errors where an os thread expires but the JavaThread still
   68.14 - * exists, Parkers are immortal (type-stable) and are recycled across
   68.15 - * new threads.  This parallels the ParkEvent implementation.
   68.16 - * Because park-unpark allow spurious wakeups it is harmless if an
   68.17 - * unpark call unparks a new thread using the old Parker reference.
   68.18 - *
   68.19 - * In the future we'll want to think about eliminating Parker and using
   68.20 - * ParkEvent instead.  There's considerable duplication between the two
   68.21 - * services.
   68.22 - *
   68.23 - */
   68.24 -
   68.25 -class Parker : public os::PlatformParker {
   68.26 -private:
   68.27 -  volatile int _counter ;
   68.28 -  Parker * FreeNext ;
   68.29 -  JavaThread * AssociatedWith ; // Current association
   68.30 -
   68.31 -public:
   68.32 -  Parker() : PlatformParker() {
   68.33 -    _counter       = 0 ;
   68.34 -    FreeNext       = NULL ;
   68.35 -    AssociatedWith = NULL ;
   68.36 -  }
   68.37 -protected:
   68.38 -  ~Parker() { ShouldNotReachHere(); }
   68.39 -public:
   68.40 -  // For simplicity of interface with Java, all forms of park (indefinite,
   68.41 -  // relative, and absolute) are multiplexed into one call.
   68.42 -  void park(bool isAbsolute, jlong time);
   68.43 -  void unpark();
   68.44 -
   68.45 -  // Lifecycle operators
   68.46 -  static Parker * Allocate (JavaThread * t) ;
   68.47 -  static void Release (Parker * e) ;
   68.48 -private:
   68.49 -  static Parker * volatile FreeList ;
   68.50 -  static volatile int ListLock ;
   68.51 -};
    69.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    69.2 +++ b/src/share/vm/runtime/objectMonitor.cpp	Thu Nov 04 16:17:54 2010 -0700
    69.3 @@ -0,0 +1,2421 @@
    69.4 +/*
    69.5 + * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
    69.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    69.7 + *
    69.8 + * This code is free software; you can redistribute it and/or modify it
    69.9 + * under the terms of the GNU General Public License version 2 only, as
   69.10 + * published by the Free Software Foundation.
   69.11 + *
   69.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   69.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   69.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   69.15 + * version 2 for more details (a copy is included in the LICENSE file that
   69.16 + * accompanied this code).
   69.17 + *
   69.18 + * You should have received a copy of the GNU General Public License version
   69.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   69.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   69.21 + *
   69.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   69.23 + * or visit www.oracle.com if you need additional information or have any
   69.24 + * questions.
   69.25 + *
   69.26 + */
   69.27 +
   69.28 +# include "incls/_precompiled.incl"
   69.29 +# include "incls/_objectMonitor.cpp.incl"
   69.30 +
   69.31 +#if defined(__GNUC__) && !defined(IA64)
   69.32 +  // Need to inhibit inlining for older versions of GCC to avoid build-time failures
   69.33 +  #define ATTR __attribute__((noinline))
   69.34 +#else
   69.35 +  #define ATTR
   69.36 +#endif
   69.37 +
   69.38 +
   69.39 +#ifdef DTRACE_ENABLED
   69.40 +
   69.41 +// Only bother with this argument setup if dtrace is available
   69.42 +// TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
   69.43 +
   69.44 +HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
   69.45 +  jlong, uintptr_t, char*, int);
   69.46 +HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
   69.47 +  jlong, uintptr_t, char*, int);
   69.48 +HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
   69.49 +  jlong, uintptr_t, char*, int);
   69.50 +HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
   69.51 +  jlong, uintptr_t, char*, int);
   69.52 +HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
   69.53 +  jlong, uintptr_t, char*, int);
   69.54 +
   69.55 +#define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread)                      \
   69.56 +  char* bytes = NULL;                                                      \
   69.57 +  int len = 0;                                                             \
   69.58 +  jlong jtid = SharedRuntime::get_java_tid(thread);                        \
   69.59 +  symbolOop klassname = ((oop)(klassOop))->klass()->klass_part()->name();  \
   69.60 +  if (klassname != NULL) {                                                 \
   69.61 +    bytes = (char*)klassname->bytes();                                     \
   69.62 +    len = klassname->utf8_length();                                        \
   69.63 +  }
   69.64 +
   69.65 +#define DTRACE_MONITOR_WAIT_PROBE(monitor, klassOop, thread, millis)       \
   69.66 +  {                                                                        \
   69.67 +    if (DTraceMonitorProbes) {                                            \
   69.68 +      DTRACE_MONITOR_PROBE_COMMON(klassOop, thread);                       \
   69.69 +      HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid,                       \
   69.70 +                       (monitor), bytes, len, (millis));                   \
   69.71 +    }                                                                      \
   69.72 +  }
   69.73 +
   69.74 +#define DTRACE_MONITOR_PROBE(probe, monitor, klassOop, thread)             \
   69.75 +  {                                                                        \
   69.76 +    if (DTraceMonitorProbes) {                                            \
   69.77 +      DTRACE_MONITOR_PROBE_COMMON(klassOop, thread);                       \
   69.78 +      HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid,                    \
   69.79 +                       (uintptr_t)(monitor), bytes, len);                  \
   69.80 +    }                                                                      \
   69.81 +  }
   69.82 +
   69.83 +#else //  ndef DTRACE_ENABLED
   69.84 +
   69.85 +#define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon)    {;}
   69.86 +#define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon)          {;}
   69.87 +
   69.88 +#endif // ndef DTRACE_ENABLED
   69.89 +
   69.90 +// Tunables ...
   69.91 +// The knob* variables are effectively final.  Once set they should
   69.92 +// never be modified hence.  Consider using __read_mostly with GCC.
   69.93 +
   69.94 +int ObjectMonitor::Knob_Verbose    = 0 ;
   69.95 +int ObjectMonitor::Knob_SpinLimit  = 5000 ;    // derived by an external tool -
   69.96 +static int Knob_LogSpins           = 0 ;       // enable jvmstat tally for spins
   69.97 +static int Knob_HandOff            = 0 ;
   69.98 +static int Knob_ReportSettings     = 0 ;
   69.99 +
  69.100 +static int Knob_SpinBase           = 0 ;       // Floor AKA SpinMin
  69.101 +static int Knob_SpinBackOff        = 0 ;       // spin-loop backoff
  69.102 +static int Knob_CASPenalty         = -1 ;      // Penalty for failed CAS
  69.103 +static int Knob_OXPenalty          = -1 ;      // Penalty for observed _owner change
  69.104 +static int Knob_SpinSetSucc        = 1 ;       // spinners set the _succ field
  69.105 +static int Knob_SpinEarly          = 1 ;
  69.106 +static int Knob_SuccEnabled        = 1 ;       // futile wake throttling
  69.107 +static int Knob_SuccRestrict       = 0 ;       // Limit successors + spinners to at-most-one
  69.108 +static int Knob_MaxSpinners        = -1 ;      // Should be a function of # CPUs
  69.109 +static int Knob_Bonus              = 100 ;     // spin success bonus
  69.110 +static int Knob_BonusB             = 100 ;     // spin success bonus
  69.111 +static int Knob_Penalty            = 200 ;     // spin failure penalty
  69.112 +static int Knob_Poverty            = 1000 ;
  69.113 +static int Knob_SpinAfterFutile    = 1 ;       // Spin after returning from park()
  69.114 +static int Knob_FixedSpin          = 0 ;
  69.115 +static int Knob_OState             = 3 ;       // Spinner checks thread state of _owner
  69.116 +static int Knob_UsePause           = 1 ;
  69.117 +static int Knob_ExitPolicy         = 0 ;
  69.118 +static int Knob_PreSpin            = 10 ;      // 20-100 likely better
  69.119 +static int Knob_ResetEvent         = 0 ;
  69.120 +static int BackOffMask             = 0 ;
  69.121 +
  69.122 +static int Knob_FastHSSEC          = 0 ;
  69.123 +static int Knob_MoveNotifyee       = 2 ;       // notify() - disposition of notifyee
  69.124 +static int Knob_QMode              = 0 ;       // EntryList-cxq policy - queue discipline
  69.125 +static volatile int InitDone       = 0 ;
  69.126 +
  69.127 +#define TrySpin TrySpin_VaryDuration
  69.128 +
  69.129 +// -----------------------------------------------------------------------------
  69.130 +// Theory of operations -- Monitors lists, thread residency, etc:
  69.131 +//
  69.132 +// * A thread acquires ownership of a monitor by successfully
  69.133 +//   CAS()ing the _owner field from null to non-null.
  69.134 +//
  69.135 +// * Invariant: A thread appears on at most one monitor list --
  69.136 +//   cxq, EntryList or WaitSet -- at any one time.
  69.137 +//
  69.138 +// * Contending threads "push" themselves onto the cxq with CAS
  69.139 +//   and then spin/park.
  69.140 +//
  69.141 +// * After a contending thread eventually acquires the lock it must
  69.142 +//   dequeue itself from either the EntryList or the cxq.
  69.143 +//
  69.144 +// * The exiting thread identifies and unparks an "heir presumptive"
  69.145 +//   tentative successor thread on the EntryList.  Critically, the
  69.146 +//   exiting thread doesn't unlink the successor thread from the EntryList.
  69.147 +//   After having been unparked, the wakee will recontend for ownership of
  69.148 +//   the monitor.   The successor (wakee) will either acquire the lock or
  69.149 +//   re-park itself.
  69.150 +//
  69.151 +//   Succession is provided for by a policy of competitive handoff.
  69.152 +//   The exiting thread does _not_ grant or pass ownership to the
  69.153 +//   successor thread.  (This is also referred to as "handoff" succession").
  69.154 +//   Instead the exiting thread releases ownership and possibly wakes
  69.155 +//   a successor, so the successor can (re)compete for ownership of the lock.
  69.156 +//   If the EntryList is empty but the cxq is populated the exiting
  69.157 +//   thread will drain the cxq into the EntryList.  It does so by
  69.158 +//   by detaching the cxq (installing null with CAS) and folding
  69.159 +//   the threads from the cxq into the EntryList.  The EntryList is
  69.160 +//   doubly linked, while the cxq is singly linked because of the
  69.161 +//   CAS-based "push" used to enqueue recently arrived threads (RATs).
  69.162 +//
  69.163 +// * Concurrency invariants:
  69.164 +//
  69.165 +//   -- only the monitor owner may access or mutate the EntryList.
  69.166 +//      The mutex property of the monitor itself protects the EntryList
  69.167 +//      from concurrent interference.
  69.168 +//   -- Only the monitor owner may detach the cxq.
  69.169 +//
  69.170 +// * The monitor entry list operations avoid locks, but strictly speaking
  69.171 +//   they're not lock-free.  Enter is lock-free, exit is not.
  69.172 +//   See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
  69.173 +//
  69.174 +// * The cxq can have multiple concurrent "pushers" but only one concurrent
  69.175 +//   detaching thread.  This mechanism is immune from the ABA corruption.
  69.176 +//   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
  69.177 +//
  69.178 +// * Taken together, the cxq and the EntryList constitute or form a
  69.179 +//   single logical queue of threads stalled trying to acquire the lock.
  69.180 +//   We use two distinct lists to improve the odds of a constant-time
  69.181 +//   dequeue operation after acquisition (in the ::enter() epilog) and
  69.182 +//   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
  69.183 +//   A key desideratum is to minimize queue & monitor metadata manipulation
  69.184 +//   that occurs while holding the monitor lock -- that is, we want to
  69.185 +//   minimize monitor lock holds times.  Note that even a small amount of
  69.186 +//   fixed spinning will greatly reduce the # of enqueue-dequeue operations
  69.187 +//   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
  69.188 +//   locks and monitor metadata.
  69.189 +//
  69.190 +//   Cxq points to the the set of Recently Arrived Threads attempting entry.
  69.191 +//   Because we push threads onto _cxq with CAS, the RATs must take the form of
  69.192 +//   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
  69.193 +//   the unlocking thread notices that EntryList is null but _cxq is != null.
  69.194 +//
  69.195 +//   The EntryList is ordered by the prevailing queue discipline and
  69.196 +//   can be organized in any convenient fashion, such as a doubly-linked list or
  69.197 +//   a circular doubly-linked list.  Critically, we want insert and delete operations
  69.198 +//   to operate in constant-time.  If we need a priority queue then something akin
  69.199 +//   to Solaris' sleepq would work nicely.  Viz.,
  69.200 +//   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
  69.201 +//   Queue discipline is enforced at ::exit() time, when the unlocking thread
  69.202 +//   drains the cxq into the EntryList, and orders or reorders the threads on the
  69.203 +//   EntryList accordingly.
  69.204 +//
  69.205 +//   Barring "lock barging", this mechanism provides fair cyclic ordering,
  69.206 +//   somewhat similar to an elevator-scan.
  69.207 +//
  69.208 +// * The monitor synchronization subsystem avoids the use of native
  69.209 +//   synchronization primitives except for the narrow platform-specific
  69.210 +//   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
  69.211 +//   the semantics of park-unpark.  Put another way, this monitor implementation
  69.212 +//   depends only on atomic operations and park-unpark.  The monitor subsystem
  69.213 +//   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
  69.214 +//   underlying OS manages the READY<->RUN transitions.
  69.215 +//
  69.216 +// * Waiting threads reside on the WaitSet list -- wait() puts
  69.217 +//   the caller onto the WaitSet.
  69.218 +//
  69.219 +// * notify() or notifyAll() simply transfers threads from the WaitSet to
  69.220 +//   either the EntryList or cxq.  Subsequent exit() operations will
  69.221 +//   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
  69.222 +//   it's likely the notifyee would simply impale itself on the lock held
  69.223 +//   by the notifier.
  69.224 +//
  69.225 +// * An interesting alternative is to encode cxq as (List,LockByte) where
  69.226 +//   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
  69.227 +//   variable, like _recursions, in the scheme.  The threads or Events that form
  69.228 +//   the list would have to be aligned in 256-byte addresses.  A thread would
  69.229 +//   try to acquire the lock or enqueue itself with CAS, but exiting threads
  69.230 +//   could use a 1-0 protocol and simply STB to set the LockByte to 0.
  69.231 +//   Note that is is *not* word-tearing, but it does presume that full-word
  69.232 +//   CAS operations are coherent with intermix with STB operations.  That's true
  69.233 +//   on most common processors.
  69.234 +//
  69.235 +// * See also http://blogs.sun.com/dave
  69.236 +
  69.237 +
  69.238 +// -----------------------------------------------------------------------------
  69.239 +// Enter support
  69.240 +
  69.241 +bool ObjectMonitor::try_enter(Thread* THREAD) {
  69.242 +  if (THREAD != _owner) {
  69.243 +    if (THREAD->is_lock_owned ((address)_owner)) {
  69.244 +       assert(_recursions == 0, "internal state error");
  69.245 +       _owner = THREAD ;
  69.246 +       _recursions = 1 ;
  69.247 +       OwnerIsThread = 1 ;
  69.248 +       return true;
  69.249 +    }
  69.250 +    if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
  69.251 +      return false;
  69.252 +    }
  69.253 +    return true;
  69.254 +  } else {
  69.255 +    _recursions++;
  69.256 +    return true;
  69.257 +  }
  69.258 +}
  69.259 +
  69.260 +void ATTR ObjectMonitor::enter(TRAPS) {
  69.261 +  // The following code is ordered to check the most common cases first
  69.262 +  // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
  69.263 +  Thread * const Self = THREAD ;
  69.264 +  void * cur ;
  69.265 +
  69.266 +  cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
  69.267 +  if (cur == NULL) {
  69.268 +     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
  69.269 +     assert (_recursions == 0   , "invariant") ;
  69.270 +     assert (_owner      == Self, "invariant") ;
  69.271 +     // CONSIDER: set or assert OwnerIsThread == 1
  69.272 +     return ;
  69.273 +  }
  69.274 +
  69.275 +  if (cur == Self) {
  69.276 +     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
  69.277 +     _recursions ++ ;
  69.278 +     return ;
  69.279 +  }
  69.280 +
  69.281 +  if (Self->is_lock_owned ((address)cur)) {
  69.282 +    assert (_recursions == 0, "internal state error");
  69.283 +    _recursions = 1 ;
  69.284 +    // Commute owner from a thread-specific on-stack BasicLockObject address to
  69.285 +    // a full-fledged "Thread *".
  69.286 +    _owner = Self ;
  69.287 +    OwnerIsThread = 1 ;
  69.288 +    return ;
  69.289 +  }
  69.290 +
  69.291 +  // We've encountered genuine contention.
  69.292 +  assert (Self->_Stalled == 0, "invariant") ;
  69.293 +  Self->_Stalled = intptr_t(this) ;
  69.294 +
  69.295 +  // Try one round of spinning *before* enqueueing Self
  69.296 +  // and before going through the awkward and expensive state
  69.297 +  // transitions.  The following spin is strictly optional ...
  69.298 +  // Note that if we acquire the monitor from an initial spin
  69.299 +  // we forgo posting JVMTI events and firing DTRACE probes.
  69.300 +  if (Knob_SpinEarly && TrySpin (Self) > 0) {
  69.301 +     assert (_owner == Self      , "invariant") ;
  69.302 +     assert (_recursions == 0    , "invariant") ;
  69.303 +     assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
  69.304 +     Self->_Stalled = 0 ;
  69.305 +     return ;
  69.306 +  }
  69.307 +
  69.308 +  assert (_owner != Self          , "invariant") ;
  69.309 +  assert (_succ  != Self          , "invariant") ;
  69.310 +  assert (Self->is_Java_thread()  , "invariant") ;
  69.311 +  JavaThread * jt = (JavaThread *) Self ;
  69.312 +  assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
  69.313 +  assert (jt->thread_state() != _thread_blocked   , "invariant") ;
  69.314 +  assert (this->object() != NULL  , "invariant") ;
  69.315 +  assert (_count >= 0, "invariant") ;
  69.316 +
  69.317 +  // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
  69.318 +  // Ensure the object-monitor relationship remains stable while there's contention.
  69.319 +  Atomic::inc_ptr(&_count);
  69.320 +
  69.321 +  { // Change java thread status to indicate blocked on monitor enter.
  69.322 +    JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
  69.323 +
  69.324 +    DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
  69.325 +    if (JvmtiExport::should_post_monitor_contended_enter()) {
  69.326 +      JvmtiExport::post_monitor_contended_enter(jt, this);
  69.327 +    }
  69.328 +
  69.329 +    OSThreadContendState osts(Self->osthread());
  69.330 +    ThreadBlockInVM tbivm(jt);
  69.331 +
  69.332 +    Self->set_current_pending_monitor(this);
  69.333 +
  69.334 +    // TODO-FIXME: change the following for(;;) loop to straight-line code.
  69.335 +    for (;;) {
  69.336 +      jt->set_suspend_equivalent();
  69.337 +      // cleared by handle_special_suspend_equivalent_condition()
  69.338 +      // or java_suspend_self()
  69.339 +
  69.340 +      EnterI (THREAD) ;
  69.341 +
  69.342 +      if (!ExitSuspendEquivalent(jt)) break ;
  69.343 +
  69.344 +      //
  69.345 +      // We have acquired the contended monitor, but while we were
  69.346 +      // waiting another thread suspended us. We don't want to enter
  69.347 +      // the monitor while suspended because that would surprise the
  69.348 +      // thread that suspended us.
  69.349 +      //
  69.350 +          _recursions = 0 ;
  69.351 +      _succ = NULL ;
  69.352 +      exit (Self) ;
  69.353 +
  69.354 +      jt->java_suspend_self();
  69.355 +    }
  69.356 +    Self->set_current_pending_monitor(NULL);
  69.357 +  }
  69.358 +
  69.359 +  Atomic::dec_ptr(&_count);
  69.360 +  assert (_count >= 0, "invariant") ;
  69.361 +  Self->_Stalled = 0 ;
  69.362 +
  69.363 +  // Must either set _recursions = 0 or ASSERT _recursions == 0.
  69.364 +  assert (_recursions == 0     , "invariant") ;
  69.365 +  assert (_owner == Self       , "invariant") ;
  69.366 +  assert (_succ  != Self       , "invariant") ;
  69.367 +  assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
  69.368 +
  69.369 +  // The thread -- now the owner -- is back in vm mode.
  69.370 +  // Report the glorious news via TI,DTrace and jvmstat.
  69.371 +  // The probe effect is non-trivial.  All the reportage occurs
  69.372 +  // while we hold the monitor, increasing the length of the critical
  69.373 +  // section.  Amdahl's parallel speedup law comes vividly into play.
  69.374 +  //
  69.375 +  // Another option might be to aggregate the events (thread local or
  69.376 +  // per-monitor aggregation) and defer reporting until a more opportune
  69.377 +  // time -- such as next time some thread encounters contention but has
  69.378 +  // yet to acquire the lock.  While spinning that thread could
  69.379 +  // spinning we could increment JVMStat counters, etc.
  69.380 +
  69.381 +  DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
  69.382 +  if (JvmtiExport::should_post_monitor_contended_entered()) {
  69.383 +    JvmtiExport::post_monitor_contended_entered(jt, this);
  69.384 +  }
  69.385 +  if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
  69.386 +     ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
  69.387 +  }
  69.388 +}
  69.389 +
  69.390 +
  69.391 +// Caveat: TryLock() is not necessarily serializing if it returns failure.
  69.392 +// Callers must compensate as needed.
  69.393 +
  69.394 +int ObjectMonitor::TryLock (Thread * Self) {
  69.395 +   for (;;) {
  69.396 +      void * own = _owner ;
  69.397 +      if (own != NULL) return 0 ;
  69.398 +      if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
  69.399 +         // Either guarantee _recursions == 0 or set _recursions = 0.
  69.400 +         assert (_recursions == 0, "invariant") ;
  69.401 +         assert (_owner == Self, "invariant") ;
  69.402 +         // CONSIDER: set or assert that OwnerIsThread == 1
  69.403 +         return 1 ;
  69.404 +      }
  69.405 +      // The lock had been free momentarily, but we lost the race to the lock.
  69.406 +      // Interference -- the CAS failed.
  69.407 +      // We can either return -1 or retry.
  69.408 +      // Retry doesn't make as much sense because the lock was just acquired.
  69.409 +      if (true) return -1 ;
  69.410 +   }
  69.411 +}
  69.412 +
  69.413 +void ATTR ObjectMonitor::EnterI (TRAPS) {
  69.414 +    Thread * Self = THREAD ;
  69.415 +    assert (Self->is_Java_thread(), "invariant") ;
  69.416 +    assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ;
  69.417 +
  69.418 +    // Try the lock - TATAS
  69.419 +    if (TryLock (Self) > 0) {
  69.420 +        assert (_succ != Self              , "invariant") ;
  69.421 +        assert (_owner == Self             , "invariant") ;
  69.422 +        assert (_Responsible != Self       , "invariant") ;
  69.423 +        return ;
  69.424 +    }
  69.425 +
  69.426 +    DeferredInitialize () ;
  69.427 +
  69.428 +    // We try one round of spinning *before* enqueueing Self.
  69.429 +    //
  69.430 +    // If the _owner is ready but OFFPROC we could use a YieldTo()
  69.431 +    // operation to donate the remainder of this thread's quantum
  69.432 +    // to the owner.  This has subtle but beneficial affinity
  69.433 +    // effects.
  69.434 +
  69.435 +    if (TrySpin (Self) > 0) {
  69.436 +        assert (_owner == Self        , "invariant") ;
  69.437 +        assert (_succ != Self         , "invariant") ;
  69.438 +        assert (_Responsible != Self  , "invariant") ;
  69.439 +        return ;
  69.440 +    }
  69.441 +
  69.442 +    // The Spin failed -- Enqueue and park the thread ...
  69.443 +    assert (_succ  != Self            , "invariant") ;
  69.444 +    assert (_owner != Self            , "invariant") ;
  69.445 +    assert (_Responsible != Self      , "invariant") ;
  69.446 +
  69.447 +    // Enqueue "Self" on ObjectMonitor's _cxq.
  69.448 +    //
  69.449 +    // Node acts as a proxy for Self.
  69.450 +    // As an aside, if were to ever rewrite the synchronization code mostly
  69.451 +    // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
  69.452 +    // Java objects.  This would avoid awkward lifecycle and liveness issues,
  69.453 +    // as well as eliminate a subset of ABA issues.
  69.454 +    // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
  69.455 +    //
  69.456 +
  69.457 +    ObjectWaiter node(Self) ;
  69.458 +    Self->_ParkEvent->reset() ;
  69.459 +    node._prev   = (ObjectWaiter *) 0xBAD ;
  69.460 +    node.TState  = ObjectWaiter::TS_CXQ ;
  69.461 +
  69.462 +    // Push "Self" onto the front of the _cxq.
  69.463 +    // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
  69.464 +    // Note that spinning tends to reduce the rate at which threads
  69.465 +    // enqueue and dequeue on EntryList|cxq.
  69.466 +    ObjectWaiter * nxt ;
  69.467 +    for (;;) {
  69.468 +        node._next = nxt = _cxq ;
  69.469 +        if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
  69.470 +
  69.471 +        // Interference - the CAS failed because _cxq changed.  Just retry.
  69.472 +        // As an optional optimization we retry the lock.
  69.473 +        if (TryLock (Self) > 0) {
  69.474 +            assert (_succ != Self         , "invariant") ;
  69.475 +            assert (_owner == Self        , "invariant") ;
  69.476 +            assert (_Responsible != Self  , "invariant") ;
  69.477 +            return ;
  69.478 +        }
  69.479 +    }
  69.480 +
  69.481 +    // Check for cxq|EntryList edge transition to non-null.  This indicates
  69.482 +    // the onset of contention.  While contention persists exiting threads
  69.483 +    // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
  69.484 +    // operations revert to the faster 1-0 mode.  This enter operation may interleave
  69.485 +    // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
  69.486 +    // arrange for one of the contending thread to use a timed park() operations
  69.487 +    // to detect and recover from the race.  (Stranding is form of progress failure
  69.488 +    // where the monitor is unlocked but all the contending threads remain parked).
  69.489 +    // That is, at least one of the contended threads will periodically poll _owner.
  69.490 +    // One of the contending threads will become the designated "Responsible" thread.
  69.491 +    // The Responsible thread uses a timed park instead of a normal indefinite park
  69.492 +    // operation -- it periodically wakes and checks for and recovers from potential
  69.493 +    // strandings admitted by 1-0 exit operations.   We need at most one Responsible
  69.494 +    // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
  69.495 +    // be responsible for a monitor.
  69.496 +    //
  69.497 +    // Currently, one of the contended threads takes on the added role of "Responsible".
  69.498 +    // A viable alternative would be to use a dedicated "stranding checker" thread
  69.499 +    // that periodically iterated over all the threads (or active monitors) and unparked
  69.500 +    // successors where there was risk of stranding.  This would help eliminate the
  69.501 +    // timer scalability issues we see on some platforms as we'd only have one thread
  69.502 +    // -- the checker -- parked on a timer.
  69.503 +
  69.504 +    if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
  69.505 +        // Try to assume the role of responsible thread for the monitor.
  69.506 +        // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
  69.507 +        Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
  69.508 +    }
  69.509 +
  69.510 +    // The lock have been released while this thread was occupied queueing
  69.511 +    // itself onto _cxq.  To close the race and avoid "stranding" and
  69.512 +    // progress-liveness failure we must resample-retry _owner before parking.
  69.513 +    // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
  69.514 +    // In this case the ST-MEMBAR is accomplished with CAS().
  69.515 +    //
  69.516 +    // TODO: Defer all thread state transitions until park-time.
  69.517 +    // Since state transitions are heavy and inefficient we'd like
  69.518 +    // to defer the state transitions until absolutely necessary,
  69.519 +    // and in doing so avoid some transitions ...
  69.520 +
  69.521 +    TEVENT (Inflated enter - Contention) ;
  69.522 +    int nWakeups = 0 ;
  69.523 +    int RecheckInterval = 1 ;
  69.524 +
  69.525 +    for (;;) {
  69.526 +
  69.527 +        if (TryLock (Self) > 0) break ;
  69.528 +        assert (_owner != Self, "invariant") ;
  69.529 +
  69.530 +        if ((SyncFlags & 2) && _Responsible == NULL) {
  69.531 +           Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
  69.532 +        }
  69.533 +
  69.534 +        // park self
  69.535 +        if (_Responsible == Self || (SyncFlags & 1)) {
  69.536 +            TEVENT (Inflated enter - park TIMED) ;
  69.537 +            Self->_ParkEvent->park ((jlong) RecheckInterval) ;
  69.538 +            // Increase the RecheckInterval, but clamp the value.
  69.539 +            RecheckInterval *= 8 ;
  69.540 +            if (RecheckInterval > 1000) RecheckInterval = 1000 ;
  69.541 +        } else {
  69.542 +            TEVENT (Inflated enter - park UNTIMED) ;
  69.543 +            Self->_ParkEvent->park() ;
  69.544 +        }
  69.545 +
  69.546 +        if (TryLock(Self) > 0) break ;
  69.547 +
  69.548 +        // The lock is still contested.
  69.549 +        // Keep a tally of the # of futile wakeups.
  69.550 +        // Note that the counter is not protected by a lock or updated by atomics.
  69.551 +        // That is by design - we trade "lossy" counters which are exposed to
  69.552 +        // races during updates for a lower probe effect.
  69.553 +        TEVENT (Inflated enter - Futile wakeup) ;
  69.554 +        if (ObjectMonitor::_sync_FutileWakeups != NULL) {
  69.555 +           ObjectMonitor::_sync_FutileWakeups->inc() ;
  69.556 +        }
  69.557 +        ++ nWakeups ;
  69.558 +
  69.559 +        // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
  69.560 +        // We can defer clearing _succ until after the spin completes
  69.561 +        // TrySpin() must tolerate being called with _succ == Self.
  69.562 +        // Try yet another round of adaptive spinning.
  69.563 +        if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
  69.564 +
  69.565 +        // We can find that we were unpark()ed and redesignated _succ while
  69.566 +        // we were spinning.  That's harmless.  If we iterate and call park(),
  69.567 +        // park() will consume the event and return immediately and we'll
  69.568 +        // just spin again.  This pattern can repeat, leaving _succ to simply
  69.569 +        // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
  69.570 +        // Alternately, we can sample fired() here, and if set, forgo spinning
  69.571 +        // in the next iteration.
  69.572 +
  69.573 +        if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
  69.574 +           Self->_ParkEvent->reset() ;
  69.575 +           OrderAccess::fence() ;
  69.576 +        }
  69.577 +        if (_succ == Self) _succ = NULL ;
  69.578 +
  69.579 +        // Invariant: after clearing _succ a thread *must* retry _owner before parking.
  69.580 +        OrderAccess::fence() ;
  69.581 +    }
  69.582 +
  69.583 +    // Egress :
  69.584 +    // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
  69.585 +    // Normally we'll find Self on the EntryList .
  69.586 +    // From the perspective of the lock owner (this thread), the
  69.587 +    // EntryList is stable and cxq is prepend-only.
  69.588 +    // The head of cxq is volatile but the interior is stable.
  69.589 +    // In addition, Self.TState is stable.
  69.590 +
  69.591 +    assert (_owner == Self      , "invariant") ;
  69.592 +    assert (object() != NULL    , "invariant") ;
  69.593 +    // I'd like to write:
  69.594 +    //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
  69.595 +    // but as we're at a safepoint that's not safe.
  69.596 +
  69.597 +    UnlinkAfterAcquire (Self, &node) ;
  69.598 +    if (_succ == Self) _succ = NULL ;
  69.599 +
  69.600 +    assert (_succ != Self, "invariant") ;
  69.601 +    if (_Responsible == Self) {
  69.602 +        _Responsible = NULL ;
  69.603 +        // Dekker pivot-point.
  69.604 +        // Consider OrderAccess::storeload() here
  69.605 +
  69.606 +        // We may leave threads on cxq|EntryList without a designated
  69.607 +        // "Responsible" thread.  This is benign.  When this thread subsequently
  69.608 +        // exits the monitor it can "see" such preexisting "old" threads --
  69.609 +        // threads that arrived on the cxq|EntryList before the fence, above --
  69.610 +        // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
  69.611 +        // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
  69.612 +        // non-null and elect a new "Responsible" timer thread.
  69.613 +        //
  69.614 +        // This thread executes:
  69.615 +        //    ST Responsible=null; MEMBAR    (in enter epilog - here)
  69.616 +        //    LD cxq|EntryList               (in subsequent exit)
  69.617 +        //
  69.618 +        // Entering threads in the slow/contended path execute:
  69.619 +        //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
  69.620 +        //    The (ST cxq; MEMBAR) is accomplished with CAS().
  69.621 +        //
  69.622 +        // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
  69.623 +        // exit operation from floating above the ST Responsible=null.
  69.624 +        //
  69.625 +        // In *practice* however, EnterI() is always followed by some atomic
  69.626 +        // operation such as the decrement of _count in ::enter().  Those atomics
  69.627 +        // obviate the need for the explicit MEMBAR, above.
  69.628 +    }
  69.629 +
  69.630 +    // We've acquired ownership with CAS().
  69.631 +    // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
  69.632 +    // But since the CAS() this thread may have also stored into _succ,
  69.633 +    // EntryList, cxq or Responsible.  These meta-data updates must be
  69.634 +    // visible __before this thread subsequently drops the lock.
  69.635 +    // Consider what could occur if we didn't enforce this constraint --
  69.636 +    // STs to monitor meta-data and user-data could reorder with (become
  69.637 +    // visible after) the ST in exit that drops ownership of the lock.
  69.638 +    // Some other thread could then acquire the lock, but observe inconsistent
  69.639 +    // or old monitor meta-data and heap data.  That violates the JMM.
  69.640 +    // To that end, the 1-0 exit() operation must have at least STST|LDST
  69.641 +    // "release" barrier semantics.  Specifically, there must be at least a
  69.642 +    // STST|LDST barrier in exit() before the ST of null into _owner that drops
  69.643 +    // the lock.   The barrier ensures that changes to monitor meta-data and data
  69.644 +    // protected by the lock will be visible before we release the lock, and
  69.645 +    // therefore before some other thread (CPU) has a chance to acquire the lock.
  69.646 +    // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
  69.647 +    //
  69.648 +    // Critically, any prior STs to _succ or EntryList must be visible before
  69.649 +    // the ST of null into _owner in the *subsequent* (following) corresponding
  69.650 +    // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
  69.651 +    // execute a serializing instruction.
  69.652 +
  69.653 +    if (SyncFlags & 8) {
  69.654 +       OrderAccess::fence() ;
  69.655 +    }
  69.656 +    return ;
  69.657 +}
  69.658 +
  69.659 +// ReenterI() is a specialized inline form of the latter half of the
  69.660 +// contended slow-path from EnterI().  We use ReenterI() only for
  69.661 +// monitor reentry in wait().
  69.662 +//
  69.663 +// In the future we should reconcile EnterI() and ReenterI(), adding
  69.664 +// Knob_Reset and Knob_SpinAfterFutile support and restructuring the
  69.665 +// loop accordingly.
  69.666 +
  69.667 +void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
  69.668 +    assert (Self != NULL                , "invariant") ;
  69.669 +    assert (SelfNode != NULL            , "invariant") ;
  69.670 +    assert (SelfNode->_thread == Self   , "invariant") ;
  69.671 +    assert (_waiters > 0                , "invariant") ;
  69.672 +    assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
  69.673 +    assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
  69.674 +    JavaThread * jt = (JavaThread *) Self ;
  69.675 +
  69.676 +    int nWakeups = 0 ;
  69.677 +    for (;;) {
  69.678 +        ObjectWaiter::TStates v = SelfNode->TState ;
  69.679 +        guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
  69.680 +        assert    (_owner != Self, "invariant") ;
  69.681 +
  69.682 +        if (TryLock (Self) > 0) break ;
  69.683 +        if (TrySpin (Self) > 0) break ;
  69.684 +
  69.685 +        TEVENT (Wait Reentry - parking) ;
  69.686 +
  69.687 +        // State transition wrappers around park() ...
  69.688 +        // ReenterI() wisely defers state transitions until
  69.689 +        // it's clear we must park the thread.
  69.690 +        {
  69.691 +           OSThreadContendState osts(Self->osthread());
  69.692 +           ThreadBlockInVM tbivm(jt);
  69.693 +
  69.694 +           // cleared by handle_special_suspend_equivalent_condition()
  69.695 +           // or java_suspend_self()
  69.696 +           jt->set_suspend_equivalent();
  69.697 +           if (SyncFlags & 1) {
  69.698 +              Self->_ParkEvent->park ((jlong)1000) ;
  69.699 +           } else {
  69.700 +              Self->_ParkEvent->park () ;
  69.701 +           }
  69.702 +
  69.703 +           // were we externally suspended while we were waiting?
  69.704 +           for (;;) {
  69.705 +              if (!ExitSuspendEquivalent (jt)) break ;
  69.706 +              if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
  69.707 +              jt->java_suspend_self();
  69.708 +              jt->set_suspend_equivalent();
  69.709 +           }
  69.710 +        }
  69.711 +
  69.712 +        // Try again, but just so we distinguish between futile wakeups and
  69.713 +        // successful wakeups.  The following test isn't algorithmically
  69.714 +        // necessary, but it helps us maintain sensible statistics.
  69.715 +        if (TryLock(Self) > 0) break ;
  69.716 +
  69.717 +        // The lock is still contested.
  69.718 +        // Keep a tally of the # of futile wakeups.
  69.719 +        // Note that the counter is not protected by a lock or updated by atomics.
  69.720 +        // That is by design - we trade "lossy" counters which are exposed to
  69.721 +        // races during updates for a lower probe effect.
  69.722 +        TEVENT (Wait Reentry - futile wakeup) ;
  69.723 +        ++ nWakeups ;
  69.724 +
  69.725 +        // Assuming this is not a spurious wakeup we'll normally
  69.726 +        // find that _succ == Self.
  69.727 +        if (_succ == Self) _succ = NULL ;
  69.728 +
  69.729 +        // Invariant: after clearing _succ a contending thread
  69.730 +        // *must* retry  _owner before parking.
  69.731 +        OrderAccess::fence() ;
  69.732 +
  69.733 +        if (ObjectMonitor::_sync_FutileWakeups != NULL) {
  69.734 +          ObjectMonitor::_sync_FutileWakeups->inc() ;
  69.735 +        }
  69.736 +    }
  69.737 +
  69.738 +    // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
  69.739 +    // Normally we'll find Self on the EntryList.
  69.740 +    // Unlinking from the EntryList is constant-time and atomic-free.
  69.741 +    // From the perspective of the lock owner (this thread), the
  69.742 +    // EntryList is stable and cxq is prepend-only.
  69.743 +    // The head of cxq is volatile but the interior is stable.
  69.744 +    // In addition, Self.TState is stable.
  69.745 +
  69.746 +    assert (_owner == Self, "invariant") ;
  69.747 +    assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
  69.748 +    UnlinkAfterAcquire (Self, SelfNode) ;
  69.749 +    if (_succ == Self) _succ = NULL ;
  69.750 +    assert (_succ != Self, "invariant") ;
  69.751 +    SelfNode->TState = ObjectWaiter::TS_RUN ;
  69.752 +    OrderAccess::fence() ;      // see comments at the end of EnterI()
  69.753 +}
  69.754 +
  69.755 +// after the thread acquires the lock in ::enter().  Equally, we could defer
  69.756 +// unlinking the thread until ::exit()-time.
  69.757 +
  69.758 +void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
  69.759 +{
  69.760 +    assert (_owner == Self, "invariant") ;
  69.761 +    assert (SelfNode->_thread == Self, "invariant") ;
  69.762 +
  69.763 +    if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
  69.764 +        // Normal case: remove Self from the DLL EntryList .
  69.765 +        // This is a constant-time operation.
  69.766 +        ObjectWaiter * nxt = SelfNode->_next ;
  69.767 +        ObjectWaiter * prv = SelfNode->_prev ;
  69.768 +        if (nxt != NULL) nxt->_prev = prv ;
  69.769 +        if (prv != NULL) prv->_next = nxt ;
  69.770 +        if (SelfNode == _EntryList ) _EntryList = nxt ;
  69.771 +        assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
  69.772 +        assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
  69.773 +        TEVENT (Unlink from EntryList) ;
  69.774 +    } else {
  69.775 +        guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
  69.776 +        // Inopportune interleaving -- Self is still on the cxq.
  69.777 +        // This usually means the enqueue of self raced an exiting thread.
  69.778 +        // Normally we'll find Self near the front of the cxq, so
  69.779 +        // dequeueing is typically fast.  If needbe we can accelerate
  69.780 +        // this with some MCS/CHL-like bidirectional list hints and advisory
  69.781 +        // back-links so dequeueing from the interior will normally operate
  69.782 +        // in constant-time.
  69.783 +        // Dequeue Self from either the head (with CAS) or from the interior
  69.784 +        // with a linear-time scan and normal non-atomic memory operations.
  69.785 +        // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
  69.786 +        // and then unlink Self from EntryList.  We have to drain eventually,
  69.787 +        // so it might as well be now.
  69.788 +
  69.789 +        ObjectWaiter * v = _cxq ;
  69.790 +        assert (v != NULL, "invariant") ;
  69.791 +        if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
  69.792 +            // The CAS above can fail from interference IFF a "RAT" arrived.
  69.793 +            // In that case Self must be in the interior and can no longer be
  69.794 +            // at the head of cxq.
  69.795 +            if (v == SelfNode) {
  69.796 +                assert (_cxq != v, "invariant") ;
  69.797 +                v = _cxq ;          // CAS above failed - start scan at head of list
  69.798 +            }
  69.799 +            ObjectWaiter * p ;
  69.800 +            ObjectWaiter * q = NULL ;
  69.801 +            for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
  69.802 +                q = p ;
  69.803 +                assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
  69.804 +            }
  69.805 +            assert (v != SelfNode,  "invariant") ;
  69.806 +            assert (p == SelfNode,  "Node not found on cxq") ;
  69.807 +            assert (p != _cxq,      "invariant") ;
  69.808 +            assert (q != NULL,      "invariant") ;
  69.809 +            assert (q->_next == p,  "invariant") ;
  69.810 +            q->_next = p->_next ;
  69.811 +        }
  69.812 +        TEVENT (Unlink from cxq) ;
  69.813 +    }
  69.814 +
  69.815 +    // Diagnostic hygiene ...
  69.816 +    SelfNode->_prev  = (ObjectWaiter *) 0xBAD ;
  69.817 +    SelfNode->_next  = (ObjectWaiter *) 0xBAD ;
  69.818 +    SelfNode->TState = ObjectWaiter::TS_RUN ;
  69.819 +}
  69.820 +
  69.821 +// -----------------------------------------------------------------------------
  69.822 +// Exit support
  69.823 +//
  69.824 +// exit()
  69.825 +// ~~~~~~
  69.826 +// Note that the collector can't reclaim the objectMonitor or deflate
  69.827 +// the object out from underneath the thread calling ::exit() as the
  69.828 +// thread calling ::exit() never transitions to a stable state.
  69.829 +// This inhibits GC, which in turn inhibits asynchronous (and
  69.830 +// inopportune) reclamation of "this".
  69.831 +//
  69.832 +// We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
  69.833 +// There's one exception to the claim above, however.  EnterI() can call
  69.834 +// exit() to drop a lock if the acquirer has been externally suspended.
  69.835 +// In that case exit() is called with _thread_state as _thread_blocked,
  69.836 +// but the monitor's _count field is > 0, which inhibits reclamation.
  69.837 +//
  69.838 +// 1-0 exit
  69.839 +// ~~~~~~~~
  69.840 +// ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
  69.841 +// the fast-path operators have been optimized so the common ::exit()
  69.842 +// operation is 1-0.  See i486.ad fast_unlock(), for instance.
  69.843 +// The code emitted by fast_unlock() elides the usual MEMBAR.  This
  69.844 +// greatly improves latency -- MEMBAR and CAS having considerable local
  69.845 +// latency on modern processors -- but at the cost of "stranding".  Absent the
  69.846 +// MEMBAR, a thread in fast_unlock() can race a thread in the slow
  69.847 +// ::enter() path, resulting in the entering thread being stranding
  69.848 +// and a progress-liveness failure.   Stranding is extremely rare.
  69.849 +// We use timers (timed park operations) & periodic polling to detect
  69.850 +// and recover from stranding.  Potentially stranded threads periodically
  69.851 +// wake up and poll the lock.  See the usage of the _Responsible variable.
  69.852 +//
  69.853 +// The CAS() in enter provides for safety and exclusion, while the CAS or
  69.854 +// MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
  69.855 +// eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
  69.856 +// We detect and recover from stranding with timers.
  69.857 +//
  69.858 +// If a thread transiently strands it'll park until (a) another
  69.859 +// thread acquires the lock and then drops the lock, at which time the
  69.860 +// exiting thread will notice and unpark the stranded thread, or, (b)
  69.861 +// the timer expires.  If the lock is high traffic then the stranding latency
  69.862 +// will be low due to (a).  If the lock is low traffic then the odds of
  69.863 +// stranding are lower, although the worst-case stranding latency
  69.864 +// is longer.  Critically, we don't want to put excessive load in the
  69.865 +// platform's timer subsystem.  We want to minimize both the timer injection
  69.866 +// rate (timers created/sec) as well as the number of timers active at
  69.867 +// any one time.  (more precisely, we want to minimize timer-seconds, which is
  69.868 +// the integral of the # of active timers at any instant over time).
  69.869 +// Both impinge on OS scalability.  Given that, at most one thread parked on
  69.870 +// a monitor will use a timer.
  69.871 +
  69.872 +void ATTR ObjectMonitor::exit(TRAPS) {
  69.873 +   Thread * Self = THREAD ;
  69.874 +   if (THREAD != _owner) {
  69.875 +     if (THREAD->is_lock_owned((address) _owner)) {
  69.876 +       // Transmute _owner from a BasicLock pointer to a Thread address.
  69.877 +       // We don't need to hold _mutex for this transition.
  69.878 +       // Non-null to Non-null is safe as long as all readers can
  69.879 +       // tolerate either flavor.
  69.880 +       assert (_recursions == 0, "invariant") ;
  69.881 +       _owner = THREAD ;
  69.882 +       _recursions = 0 ;
  69.883 +       OwnerIsThread = 1 ;
  69.884 +     } else {
  69.885 +       // NOTE: we need to handle unbalanced monitor enter/exit
  69.886 +       // in native code by throwing an exception.
  69.887 +       // TODO: Throw an IllegalMonitorStateException ?
  69.888 +       TEVENT (Exit - Throw IMSX) ;
  69.889 +       assert(false, "Non-balanced monitor enter/exit!");
  69.890 +       if (false) {
  69.891 +          THROW(vmSymbols::java_lang_IllegalMonitorStateException());
  69.892 +       }
  69.893 +       return;
  69.894 +     }
  69.895 +   }
  69.896 +
  69.897 +   if (_recursions != 0) {
  69.898 +     _recursions--;        // this is simple recursive enter
  69.899 +     TEVENT (Inflated exit - recursive) ;
  69.900 +     return ;
  69.901 +   }
  69.902 +
  69.903 +   // Invariant: after setting Responsible=null an thread must execute
  69.904 +   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
  69.905 +   if ((SyncFlags & 4) == 0) {
  69.906 +      _Responsible = NULL ;
  69.907 +   }
  69.908 +
  69.909 +   for (;;) {
  69.910 +      assert (THREAD == _owner, "invariant") ;
  69.911 +
  69.912 +
  69.913 +      if (Knob_ExitPolicy == 0) {
  69.914 +         // release semantics: prior loads and stores from within the critical section
  69.915 +         // must not float (reorder) past the following store that drops the lock.
  69.916 +         // On SPARC that requires MEMBAR #loadstore|#storestore.
  69.917 +         // But of course in TSO #loadstore|#storestore is not required.
  69.918 +         // I'd like to write one of the following:
  69.919 +         // A.  OrderAccess::release() ; _owner = NULL
  69.920 +         // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
  69.921 +         // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
  69.922 +         // store into a _dummy variable.  That store is not needed, but can result
  69.923 +         // in massive wasteful coherency traffic on classic SMP systems.
  69.924 +         // Instead, I use release_store(), which is implemented as just a simple
  69.925 +         // ST on x64, x86 and SPARC.
  69.926 +         OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
  69.927 +         OrderAccess::storeload() ;                         // See if we need to wake a successor
  69.928 +         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
  69.929 +            TEVENT (Inflated exit - simple egress) ;
  69.930 +            return ;
  69.931 +         }
  69.932 +         TEVENT (Inflated exit - complex egress) ;
  69.933 +
  69.934 +         // Normally the exiting thread is responsible for ensuring succession,
  69.935 +         // but if other successors are ready or other entering threads are spinning
  69.936 +         // then this thread can simply store NULL into _owner and exit without
  69.937 +         // waking a successor.  The existence of spinners or ready successors
  69.938 +         // guarantees proper succession (liveness).  Responsibility passes to the
  69.939 +         // ready or running successors.  The exiting thread delegates the duty.
  69.940 +         // More precisely, if a successor already exists this thread is absolved
  69.941 +         // of the responsibility of waking (unparking) one.
  69.942 +         //
  69.943 +         // The _succ variable is critical to reducing futile wakeup frequency.
  69.944 +         // _succ identifies the "heir presumptive" thread that has been made
  69.945 +         // ready (unparked) but that has not yet run.  We need only one such
  69.946 +         // successor thread to guarantee progress.
  69.947 +         // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
  69.948 +         // section 3.3 "Futile Wakeup Throttling" for details.
  69.949 +         //
  69.950 +         // Note that spinners in Enter() also set _succ non-null.
  69.951 +         // In the current implementation spinners opportunistically set
  69.952 +         // _succ so that exiting threads might avoid waking a successor.
  69.953 +         // Another less appealing alternative would be for the exiting thread
  69.954 +         // to drop the lock and then spin briefly to see if a spinner managed
  69.955 +         // to acquire the lock.  If so, the exiting thread could exit
  69.956 +         // immediately without waking a successor, otherwise the exiting
  69.957 +         // thread would need to dequeue and wake a successor.
  69.958 +         // (Note that we'd need to make the post-drop spin short, but no
  69.959 +         // shorter than the worst-case round-trip cache-line migration time.
  69.960 +         // The dropped lock needs to become visible to the spinner, and then
  69.961 +         // the acquisition of the lock by the spinner must become visible to
  69.962 +         // the exiting thread).
  69.963 +         //
  69.964 +
  69.965 +         // It appears that an heir-presumptive (successor) must be made ready.
  69.966 +         // Only the current lock owner can manipulate the EntryList or
  69.967 +         // drain _cxq, so we need to reacquire the lock.  If we fail
  69.968 +         // to reacquire the lock the responsibility for ensuring succession
  69.969 +         // falls to the new owner.
  69.970 +         //
  69.971 +         if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
  69.972 +            return ;
  69.973 +         }
  69.974 +         TEVENT (Exit - Reacquired) ;
  69.975 +      } else {
  69.976 +         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
  69.977 +            OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
  69.978 +            OrderAccess::storeload() ;
  69.979 +            // Ratify the previously observed values.
  69.980 +            if (_cxq == NULL || _succ != NULL) {
  69.981 +                TEVENT (Inflated exit - simple egress) ;
  69.982 +                return ;
  69.983 +            }
  69.984 +
  69.985 +            // inopportune interleaving -- the exiting thread (this thread)
  69.986 +            // in the fast-exit path raced an entering thread in the slow-enter
  69.987 +            // path.
  69.988 +            // We have two choices:
  69.989 +            // A.  Try to reacquire the lock.
  69.990 +            //     If the CAS() fails return immediately, otherwise
  69.991 +            //     we either restart/rerun the exit operation, or simply
  69.992 +            //     fall-through into the code below which wakes a successor.
  69.993 +            // B.  If the elements forming the EntryList|cxq are TSM
  69.994 +            //     we could simply unpark() the lead thread and return
  69.995 +            //     without having set _succ.
  69.996 +            if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
  69.997 +               TEVENT (Inflated exit - reacquired succeeded) ;
  69.998 +               return ;
  69.999 +            }
 69.1000 +            TEVENT (Inflated exit - reacquired failed) ;
 69.1001 +         } else {
 69.1002 +            TEVENT (Inflated exit - complex egress) ;
 69.1003 +         }
 69.1004 +      }
 69.1005 +
 69.1006 +      guarantee (_owner == THREAD, "invariant") ;
 69.1007 +
 69.1008 +      ObjectWaiter * w = NULL ;
 69.1009 +      int QMode = Knob_QMode ;
 69.1010 +
 69.1011 +      if (QMode == 2 && _cxq != NULL) {
 69.1012 +          // QMode == 2 : cxq has precedence over EntryList.
 69.1013 +          // Try to directly wake a successor from the cxq.
 69.1014 +          // If successful, the successor will need to unlink itself from cxq.
 69.1015 +          w = _cxq ;
 69.1016 +          assert (w != NULL, "invariant") ;
 69.1017 +          assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
 69.1018 +          ExitEpilog (Self, w) ;
 69.1019 +          return ;
 69.1020 +      }
 69.1021 +
 69.1022 +      if (QMode == 3 && _cxq != NULL) {
 69.1023 +          // Aggressively drain cxq into EntryList at the first opportunity.
 69.1024 +          // This policy ensure that recently-run threads live at the head of EntryList.
 69.1025 +          // Drain _cxq into EntryList - bulk transfer.
 69.1026 +          // First, detach _cxq.
 69.1027 +          // The following loop is tantamount to: w = swap (&cxq, NULL)
 69.1028 +          w = _cxq ;
 69.1029 +          for (;;) {
 69.1030 +             assert (w != NULL, "Invariant") ;
 69.1031 +             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
 69.1032 +             if (u == w) break ;
 69.1033 +             w = u ;
 69.1034 +          }
 69.1035 +          assert (w != NULL              , "invariant") ;
 69.1036 +
 69.1037 +          ObjectWaiter * q = NULL ;
 69.1038 +          ObjectWaiter * p ;
 69.1039 +          for (p = w ; p != NULL ; p = p->_next) {
 69.1040 +              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
 69.1041 +              p->TState = ObjectWaiter::TS_ENTER ;
 69.1042 +              p->_prev = q ;
 69.1043 +              q = p ;
 69.1044 +          }
 69.1045 +
 69.1046 +          // Append the RATs to the EntryList
 69.1047 +          // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
 69.1048 +          ObjectWaiter * Tail ;
 69.1049 +          for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
 69.1050 +          if (Tail == NULL) {
 69.1051 +              _EntryList = w ;
 69.1052 +          } else {
 69.1053 +              Tail->_next = w ;
 69.1054 +              w->_prev = Tail ;
 69.1055 +          }
 69.1056 +
 69.1057 +          // Fall thru into code that tries to wake a successor from EntryList
 69.1058 +      }
 69.1059 +
 69.1060 +      if (QMode == 4 && _cxq != NULL) {
 69.1061 +          // Aggressively drain cxq into EntryList at the first opportunity.
 69.1062 +          // This policy ensure that recently-run threads live at the head of EntryList.
 69.1063 +
 69.1064 +          // Drain _cxq into EntryList - bulk transfer.
 69.1065 +          // First, detach _cxq.
 69.1066 +          // The following loop is tantamount to: w = swap (&cxq, NULL)
 69.1067 +          w = _cxq ;
 69.1068 +          for (;;) {
 69.1069 +             assert (w != NULL, "Invariant") ;
 69.1070 +             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
 69.1071 +             if (u == w) break ;
 69.1072 +             w = u ;
 69.1073 +          }
 69.1074 +          assert (w != NULL              , "invariant") ;
 69.1075 +
 69.1076 +          ObjectWaiter * q = NULL ;
 69.1077 +          ObjectWaiter * p ;
 69.1078 +          for (p = w ; p != NULL ; p = p->_next) {
 69.1079 +              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
 69.1080 +              p->TState = ObjectWaiter::TS_ENTER ;
 69.1081 +              p->_prev = q ;
 69.1082 +              q = p ;
 69.1083 +          }
 69.1084 +
 69.1085 +          // Prepend the RATs to the EntryList
 69.1086 +          if (_EntryList != NULL) {
 69.1087 +              q->_next = _EntryList ;
 69.1088 +              _EntryList->_prev = q ;
 69.1089 +          }
 69.1090 +          _EntryList = w ;
 69.1091 +
 69.1092 +          // Fall thru into code that tries to wake a successor from EntryList
 69.1093 +      }
 69.1094 +
 69.1095 +      w = _EntryList  ;
 69.1096 +      if (w != NULL) {
 69.1097 +          // I'd like to write: guarantee (w->_thread != Self).
 69.1098 +          // But in practice an exiting thread may find itself on the EntryList.
 69.1099 +          // Lets say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
 69.1100 +          // then calls exit().  Exit release the lock by setting O._owner to NULL.
 69.1101 +          // Lets say T1 then stalls.  T2 acquires O and calls O.notify().  The
 69.1102 +          // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
 69.1103 +          // release the lock "O".  T2 resumes immediately after the ST of null into
 69.1104 +          // _owner, above.  T2 notices that the EntryList is populated, so it
 69.1105 +          // reacquires the lock and then finds itself on the EntryList.
 69.1106 +          // Given all that, we have to tolerate the circumstance where "w" is
 69.1107 +          // associated with Self.
 69.1108 +          assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
 69.1109 +          ExitEpilog (Self, w) ;
 69.1110 +          return ;
 69.1111 +      }
 69.1112 +
 69.1113 +      // If we find that both _cxq and EntryList are null then just
 69.1114 +      // re-run the exit protocol from the top.
 69.1115 +      w = _cxq ;
 69.1116 +      if (w == NULL) continue ;
 69.1117 +
 69.1118 +      // Drain _cxq into EntryList - bulk transfer.
 69.1119 +      // First, detach _cxq.
 69.1120 +      // The following loop is tantamount to: w = swap (&cxq, NULL)
 69.1121 +      for (;;) {
 69.1122 +          assert (w != NULL, "Invariant") ;
 69.1123 +          ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
 69.1124 +          if (u == w) break ;
 69.1125 +          w = u ;
 69.1126 +      }
 69.1127 +      TEVENT (Inflated exit - drain cxq into EntryList) ;
 69.1128 +
 69.1129 +      assert (w != NULL              , "invariant") ;
 69.1130 +      assert (_EntryList  == NULL    , "invariant") ;
 69.1131 +
 69.1132 +      // Convert the LIFO SLL anchored by _cxq into a DLL.
 69.1133 +      // The list reorganization step operates in O(LENGTH(w)) time.
 69.1134 +      // It's critical that this step operate quickly as
 69.1135 +      // "Self" still holds the outer-lock, restricting parallelism
 69.1136 +      // and effectively lengthening the critical section.
 69.1137 +      // Invariant: s chases t chases u.
 69.1138 +      // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
 69.1139 +      // we have faster access to the tail.
 69.1140 +
 69.1141 +      if (QMode == 1) {
 69.1142 +         // QMode == 1 : drain cxq to EntryList, reversing order
 69.1143 +         // We also reverse the order of the list.
 69.1144 +         ObjectWaiter * s = NULL ;
 69.1145 +         ObjectWaiter * t = w ;
 69.1146 +         ObjectWaiter * u = NULL ;
 69.1147 +         while (t != NULL) {
 69.1148 +             guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
 69.1149 +             t->TState = ObjectWaiter::TS_ENTER ;
 69.1150 +             u = t->_next ;
 69.1151 +             t->_prev = u ;
 69.1152 +             t->_next = s ;
 69.1153 +             s = t;
 69.1154 +             t = u ;
 69.1155 +         }
 69.1156 +         _EntryList  = s ;
 69.1157 +         assert (s != NULL, "invariant") ;
 69.1158 +      } else {
 69.1159 +         // QMode == 0 or QMode == 2
 69.1160 +         _EntryList = w ;
 69.1161 +         ObjectWaiter * q = NULL ;
 69.1162 +         ObjectWaiter * p ;
 69.1163 +         for (p = w ; p != NULL ; p = p->_next) {
 69.1164 +             guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
 69.1165 +             p->TState = ObjectWaiter::TS_ENTER ;
 69.1166 +             p->_prev = q ;
 69.1167 +             q = p ;
 69.1168 +         }
 69.1169 +      }
 69.1170 +
 69.1171 +      // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
 69.1172 +      // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
 69.1173 +
 69.1174 +      // See if we can abdicate to a spinner instead of waking a thread.
 69.1175 +      // A primary goal of the implementation is to reduce the
 69.1176 +      // context-switch rate.
 69.1177 +      if (_succ != NULL) continue;
 69.1178 +
 69.1179 +      w = _EntryList  ;
 69.1180 +      if (w != NULL) {
 69.1181 +          guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
 69.1182 +          ExitEpilog (Self, w) ;
 69.1183 +          return ;
 69.1184 +      }
 69.1185 +   }
 69.1186 +}
 69.1187 +
 69.1188 +// ExitSuspendEquivalent:
 69.1189 +// A faster alternate to handle_special_suspend_equivalent_condition()
 69.1190 +//
 69.1191 +// handle_special_suspend_equivalent_condition() unconditionally
 69.1192 +// acquires the SR_lock.  On some platforms uncontended MutexLocker()
 69.1193 +// operations have high latency.  Note that in ::enter() we call HSSEC
 69.1194 +// while holding the monitor, so we effectively lengthen the critical sections.
 69.1195 +//
 69.1196 +// There are a number of possible solutions:
 69.1197 +//
 69.1198 +// A.  To ameliorate the problem we might also defer state transitions
 69.1199 +//     to as late as possible -- just prior to parking.
 69.1200 +//     Given that, we'd call HSSEC after having returned from park(),
 69.1201 +//     but before attempting to acquire the monitor.  This is only a
 69.1202 +//     partial solution.  It avoids calling HSSEC while holding the
 69.1203 +//     monitor (good), but it still increases successor reacquisition latency --
 69.1204 +//     the interval between unparking a successor and the time the successor
 69.1205 +//     resumes and retries the lock.  See ReenterI(), which defers state transitions.
 69.1206 +//     If we use this technique we can also avoid EnterI()-exit() loop
 69.1207 +//     in ::enter() where we iteratively drop the lock and then attempt
 69.1208 +//     to reacquire it after suspending.
 69.1209 +//
 69.1210 +// B.  In the future we might fold all the suspend bits into a
 69.1211 +//     composite per-thread suspend flag and then update it with CAS().
 69.1212 +//     Alternately, a Dekker-like mechanism with multiple variables
 69.1213 +//     would suffice:
 69.1214 +//       ST Self->_suspend_equivalent = false
 69.1215 +//       MEMBAR
 69.1216 +//       LD Self_>_suspend_flags
 69.1217 +//
 69.1218 +
 69.1219 +
 69.1220 +bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
 69.1221 +   int Mode = Knob_FastHSSEC ;
 69.1222 +   if (Mode && !jSelf->is_external_suspend()) {
 69.1223 +      assert (jSelf->is_suspend_equivalent(), "invariant") ;
 69.1224 +      jSelf->clear_suspend_equivalent() ;
 69.1225 +      if (2 == Mode) OrderAccess::storeload() ;
 69.1226 +      if (!jSelf->is_external_suspend()) return false ;
 69.1227 +      // We raced a suspension -- fall thru into the slow path
 69.1228 +      TEVENT (ExitSuspendEquivalent - raced) ;
 69.1229 +      jSelf->set_suspend_equivalent() ;
 69.1230 +   }
 69.1231 +   return jSelf->handle_special_suspend_equivalent_condition() ;
 69.1232 +}
 69.1233 +
 69.1234 +
 69.1235 +void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
 69.1236 +   assert (_owner == Self, "invariant") ;
 69.1237 +
 69.1238 +   // Exit protocol:
 69.1239 +   // 1. ST _succ = wakee
 69.1240 +   // 2. membar #loadstore|#storestore;
 69.1241 +   // 2. ST _owner = NULL
 69.1242 +   // 3. unpark(wakee)
 69.1243 +
 69.1244 +   _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
 69.1245 +   ParkEvent * Trigger = Wakee->_event ;
 69.1246 +
 69.1247 +   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
 69.1248 +   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
 69.1249 +   // out-of-scope (non-extant).
 69.1250 +   Wakee  = NULL ;
 69.1251 +
 69.1252 +   // Drop the lock
 69.1253 +   OrderAccess::release_store_ptr (&_owner, NULL) ;
 69.1254 +   OrderAccess::fence() ;                               // ST _owner vs LD in unpark()
 69.1255 +
 69.1256 +   if (SafepointSynchronize::do_call_back()) {
 69.1257 +      TEVENT (unpark before SAFEPOINT) ;
 69.1258 +   }
 69.1259 +
 69.1260 +   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
 69.1261 +   Trigger->unpark() ;
 69.1262 +
 69.1263 +   // Maintain stats and report events to JVMTI
 69.1264 +   if (ObjectMonitor::_sync_Parks != NULL) {
 69.1265 +      ObjectMonitor::_sync_Parks->inc() ;
 69.1266 +   }
 69.1267 +}
 69.1268 +
 69.1269 +
 69.1270 +// -----------------------------------------------------------------------------
 69.1271 +// Class Loader deadlock handling.
 69.1272 +//
 69.1273 +// complete_exit exits a lock returning recursion count
 69.1274 +// complete_exit/reenter operate as a wait without waiting
 69.1275 +// complete_exit requires an inflated monitor
 69.1276 +// The _owner field is not always the Thread addr even with an
 69.1277 +// inflated monitor, e.g. the monitor can be inflated by a non-owning
 69.1278 +// thread due to contention.
 69.1279 +intptr_t ObjectMonitor::complete_exit(TRAPS) {
 69.1280 +   Thread * const Self = THREAD;
 69.1281 +   assert(Self->is_Java_thread(), "Must be Java thread!");
 69.1282 +   JavaThread *jt = (JavaThread *)THREAD;
 69.1283 +
 69.1284 +   DeferredInitialize();
 69.1285 +
 69.1286 +   if (THREAD != _owner) {
 69.1287 +    if (THREAD->is_lock_owned ((address)_owner)) {
 69.1288 +       assert(_recursions == 0, "internal state error");
 69.1289 +       _owner = THREAD ;   /* Convert from basiclock addr to Thread addr */
 69.1290 +       _recursions = 0 ;
 69.1291 +       OwnerIsThread = 1 ;
 69.1292 +    }
 69.1293 +   }
 69.1294 +
 69.1295 +   guarantee(Self == _owner, "complete_exit not owner");
 69.1296 +   intptr_t save = _recursions; // record the old recursion count
 69.1297 +   _recursions = 0;        // set the recursion level to be 0
 69.1298 +   exit (Self) ;           // exit the monitor
 69.1299 +   guarantee (_owner != Self, "invariant");
 69.1300 +   return save;
 69.1301 +}
 69.1302 +
 69.1303 +// reenter() enters a lock and sets recursion count
 69.1304 +// complete_exit/reenter operate as a wait without waiting
 69.1305 +void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
 69.1306 +   Thread * const Self = THREAD;
 69.1307 +   assert(Self->is_Java_thread(), "Must be Java thread!");
 69.1308 +   JavaThread *jt = (JavaThread *)THREAD;
 69.1309 +
 69.1310 +   guarantee(_owner != Self, "reenter already owner");
 69.1311 +   enter (THREAD);       // enter the monitor
 69.1312 +   guarantee (_recursions == 0, "reenter recursion");
 69.1313 +   _recursions = recursions;
 69.1314 +   return;
 69.1315 +}
 69.1316 +
 69.1317 +
 69.1318 +// -----------------------------------------------------------------------------
 69.1319 +// A macro is used below because there may already be a pending
 69.1320 +// exception which should not abort the execution of the routines
 69.1321 +// which use this (which is why we don't put this into check_slow and
 69.1322 +// call it with a CHECK argument).
 69.1323 +
 69.1324 +#define CHECK_OWNER()                                                             \
 69.1325 +  do {                                                                            \
 69.1326 +    if (THREAD != _owner) {                                                       \
 69.1327 +      if (THREAD->is_lock_owned((address) _owner)) {                              \
 69.1328 +        _owner = THREAD ;  /* Convert from basiclock addr to Thread addr */       \
 69.1329 +        _recursions = 0;                                                          \
 69.1330 +        OwnerIsThread = 1 ;                                                       \
 69.1331 +      } else {                                                                    \
 69.1332 +        TEVENT (Throw IMSX) ;                                                     \
 69.1333 +        THROW(vmSymbols::java_lang_IllegalMonitorStateException());               \
 69.1334 +      }                                                                           \
 69.1335 +    }                                                                             \
 69.1336 +  } while (false)
 69.1337 +
 69.1338 +// check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
 69.1339 +// TODO-FIXME: remove check_slow() -- it's likely dead.
 69.1340 +
 69.1341 +void ObjectMonitor::check_slow(TRAPS) {
 69.1342 +  TEVENT (check_slow - throw IMSX) ;
 69.1343 +  assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
 69.1344 +  THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
 69.1345 +}
 69.1346 +
 69.1347 +static int Adjust (volatile int * adr, int dx) {
 69.1348 +  int v ;
 69.1349 +  for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
 69.1350 +  return v ;
 69.1351 +}
 69.1352 +// -----------------------------------------------------------------------------
 69.1353 +// Wait/Notify/NotifyAll
 69.1354 +//
 69.1355 +// Note: a subset of changes to ObjectMonitor::wait()
 69.1356 +// will need to be replicated in complete_exit above
 69.1357 +void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
 69.1358 +   Thread * const Self = THREAD ;
 69.1359 +   assert(Self->is_Java_thread(), "Must be Java thread!");
 69.1360 +   JavaThread *jt = (JavaThread *)THREAD;
 69.1361 +
 69.1362 +   DeferredInitialize () ;
 69.1363 +
 69.1364 +   // Throw IMSX or IEX.
 69.1365 +   CHECK_OWNER();
 69.1366 +
 69.1367 +   // check for a pending interrupt
 69.1368 +   if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
 69.1369 +     // post monitor waited event.  Note that this is past-tense, we are done waiting.
 69.1370 +     if (JvmtiExport::should_post_monitor_waited()) {
 69.1371 +        // Note: 'false' parameter is passed here because the
 69.1372 +        // wait was not timed out due to thread interrupt.
 69.1373 +        JvmtiExport::post_monitor_waited(jt, this, false);
 69.1374 +     }
 69.1375 +     TEVENT (Wait - Throw IEX) ;
 69.1376 +     THROW(vmSymbols::java_lang_InterruptedException());
 69.1377 +     return ;
 69.1378 +   }
 69.1379 +   TEVENT (Wait) ;
 69.1380 +
 69.1381 +   assert (Self->_Stalled == 0, "invariant") ;
 69.1382 +   Self->_Stalled = intptr_t(this) ;
 69.1383 +   jt->set_current_waiting_monitor(this);
 69.1384 +
 69.1385 +   // create a node to be put into the queue
 69.1386 +   // Critically, after we reset() the event but prior to park(), we must check
 69.1387 +   // for a pending interrupt.
 69.1388 +   ObjectWaiter node(Self);
 69.1389 +   node.TState = ObjectWaiter::TS_WAIT ;
 69.1390 +   Self->_ParkEvent->reset() ;
 69.1391 +   OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
 69.1392 +
 69.1393 +   // Enter the waiting queue, which is a circular doubly linked list in this case
 69.1394 +   // but it could be a priority queue or any data structure.
 69.1395 +   // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
 69.1396 +   // by the the owner of the monitor *except* in the case where park()
 69.1397 +   // returns because of a timeout of interrupt.  Contention is exceptionally rare
 69.1398 +   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
 69.1399 +
 69.1400 +   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
 69.1401 +   AddWaiter (&node) ;
 69.1402 +   Thread::SpinRelease (&_WaitSetLock) ;
 69.1403 +
 69.1404 +   if ((SyncFlags & 4) == 0) {
 69.1405 +      _Responsible = NULL ;
 69.1406 +   }
 69.1407 +   intptr_t save = _recursions; // record the old recursion count
 69.1408 +   _waiters++;                  // increment the number of waiters
 69.1409 +   _recursions = 0;             // set the recursion level to be 1
 69.1410 +   exit (Self) ;                    // exit the monitor
 69.1411 +   guarantee (_owner != Self, "invariant") ;
 69.1412 +
 69.1413 +   // As soon as the ObjectMonitor's ownership is dropped in the exit()
 69.1414 +   // call above, another thread can enter() the ObjectMonitor, do the
 69.1415 +   // notify(), and exit() the ObjectMonitor. If the other thread's
 69.1416 +   // exit() call chooses this thread as the successor and the unpark()
 69.1417 +   // call happens to occur while this thread is posting a
 69.1418 +   // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
 69.1419 +   // handler using RawMonitors and consuming the unpark().
 69.1420 +   //
 69.1421 +   // To avoid the problem, we re-post the event. This does no harm
 69.1422 +   // even if the original unpark() was not consumed because we are the
 69.1423 +   // chosen successor for this monitor.
 69.1424 +   if (node._notified != 0 && _succ == Self) {
 69.1425 +      node._event->unpark();
 69.1426 +   }
 69.1427 +
 69.1428 +   // The thread is on the WaitSet list - now park() it.
 69.1429 +   // On MP systems it's conceivable that a brief spin before we park
 69.1430 +   // could be profitable.
 69.1431 +   //
 69.1432 +   // TODO-FIXME: change the following logic to a loop of the form
 69.1433 +   //   while (!timeout && !interrupted && _notified == 0) park()
 69.1434 +
 69.1435 +   int ret = OS_OK ;
 69.1436 +   int WasNotified = 0 ;
 69.1437 +   { // State transition wrappers
 69.1438 +     OSThread* osthread = Self->osthread();
 69.1439 +     OSThreadWaitState osts(osthread, true);
 69.1440 +     {
 69.1441 +       ThreadBlockInVM tbivm(jt);
 69.1442 +       // Thread is in thread_blocked state and oop access is unsafe.
 69.1443 +       jt->set_suspend_equivalent();
 69.1444 +
 69.1445 +       if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
 69.1446 +           // Intentionally empty
 69.1447 +       } else
 69.1448 +       if (node._notified == 0) {
 69.1449 +         if (millis <= 0) {
 69.1450 +            Self->_ParkEvent->park () ;
 69.1451 +         } else {
 69.1452 +            ret = Self->_ParkEvent->park (millis) ;
 69.1453 +         }
 69.1454 +       }
 69.1455 +
 69.1456 +       // were we externally suspended while we were waiting?
 69.1457 +       if (ExitSuspendEquivalent (jt)) {
 69.1458 +          // TODO-FIXME: add -- if succ == Self then succ = null.
 69.1459 +          jt->java_suspend_self();
 69.1460 +       }
 69.1461 +
 69.1462 +     } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
 69.1463 +
 69.1464 +
 69.1465 +     // Node may be on the WaitSet, the EntryList (or cxq), or in transition
 69.1466 +     // from the WaitSet to the EntryList.
 69.1467 +     // See if we need to remove Node from the WaitSet.
 69.1468 +     // We use double-checked locking to avoid grabbing _WaitSetLock
 69.1469 +     // if the thread is not on the wait queue.
 69.1470 +     //
 69.1471 +     // Note that we don't need a fence before the fetch of TState.
 69.1472 +     // In the worst case we'll fetch a old-stale value of TS_WAIT previously
 69.1473 +     // written by the is thread. (perhaps the fetch might even be satisfied
 69.1474 +     // by a look-aside into the processor's own store buffer, although given
 69.1475 +     // the length of the code path between the prior ST and this load that's
 69.1476 +     // highly unlikely).  If the following LD fetches a stale TS_WAIT value
 69.1477 +     // then we'll acquire the lock and then re-fetch a fresh TState value.
 69.1478 +     // That is, we fail toward safety.
 69.1479 +
 69.1480 +     if (node.TState == ObjectWaiter::TS_WAIT) {
 69.1481 +         Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
 69.1482 +         if (node.TState == ObjectWaiter::TS_WAIT) {
 69.1483 +            DequeueSpecificWaiter (&node) ;       // unlink from WaitSet
 69.1484 +            assert(node._notified == 0, "invariant");
 69.1485 +            node.TState = ObjectWaiter::TS_RUN ;
 69.1486 +         }
 69.1487 +         Thread::SpinRelease (&_WaitSetLock) ;
 69.1488 +     }
 69.1489 +
 69.1490 +     // The thread is now either on off-list (TS_RUN),
 69.1491 +     // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
 69.1492 +     // The Node's TState variable is stable from the perspective of this thread.
 69.1493 +     // No other threads will asynchronously modify TState.
 69.1494 +     guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
 69.1495 +     OrderAccess::loadload() ;
 69.1496 +     if (_succ == Self) _succ = NULL ;
 69.1497 +     WasNotified = node._notified ;
 69.1498 +
 69.1499 +     // Reentry phase -- reacquire the monitor.
 69.1500 +     // re-enter contended monitor after object.wait().
 69.1501 +     // retain OBJECT_WAIT state until re-enter successfully completes
 69.1502 +     // Thread state is thread_in_vm and oop access is again safe,
 69.1503 +     // although the raw address of the object may have changed.
 69.1504 +     // (Don't cache naked oops over safepoints, of course).
 69.1505 +
 69.1506 +     // post monitor waited event. Note that this is past-tense, we are done waiting.
 69.1507 +     if (JvmtiExport::should_post_monitor_waited()) {
 69.1508 +       JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
 69.1509 +     }
 69.1510 +     OrderAccess::fence() ;
 69.1511 +
 69.1512 +     assert (Self->_Stalled != 0, "invariant") ;
 69.1513 +     Self->_Stalled = 0 ;
 69.1514 +
 69.1515 +     assert (_owner != Self, "invariant") ;
 69.1516 +     ObjectWaiter::TStates v = node.TState ;
 69.1517 +     if (v == ObjectWaiter::TS_RUN) {
 69.1518 +         enter (Self) ;
 69.1519 +     } else {
 69.1520 +         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
 69.1521 +         ReenterI (Self, &node) ;
 69.1522 +         node.wait_reenter_end(this);
 69.1523 +     }
 69.1524 +
 69.1525 +     // Self has reacquired the lock.
 69.1526 +     // Lifecycle - the node representing Self must not appear on any queues.
 69.1527 +     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
 69.1528 +     // want residual elements associated with this thread left on any lists.
 69.1529 +     guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
 69.1530 +     assert    (_owner == Self, "invariant") ;
 69.1531 +     assert    (_succ != Self , "invariant") ;
 69.1532 +   } // OSThreadWaitState()
 69.1533 +
 69.1534 +   jt->set_current_waiting_monitor(NULL);
 69.1535 +
 69.1536 +   guarantee (_recursions == 0, "invariant") ;
 69.1537 +   _recursions = save;     // restore the old recursion count
 69.1538 +   _waiters--;             // decrement the number of waiters
 69.1539 +
 69.1540 +   // Verify a few postconditions
 69.1541 +   assert (_owner == Self       , "invariant") ;
 69.1542 +   assert (_succ  != Self       , "invariant") ;
 69.1543 +   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 69.1544 +
 69.1545 +   if (SyncFlags & 32) {
 69.1546 +      OrderAccess::fence() ;
 69.1547 +   }
 69.1548 +
 69.1549 +   // check if the notification happened
 69.1550 +   if (!WasNotified) {
 69.1551 +     // no, it could be timeout or Thread.interrupt() or both
 69.1552 +     // check for interrupt event, otherwise it is timeout
 69.1553 +     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
 69.1554 +       TEVENT (Wait - throw IEX from epilog) ;
 69.1555 +       THROW(vmSymbols::java_lang_InterruptedException());
 69.1556 +     }
 69.1557 +   }
 69.1558 +
 69.1559 +   // NOTE: Spurious wake up will be consider as timeout.
 69.1560 +   // Monitor notify has precedence over thread interrupt.
 69.1561 +}
 69.1562 +
 69.1563 +
 69.1564 +// Consider:
 69.1565 +// If the lock is cool (cxq == null && succ == null) and we're on an MP system
 69.1566 +// then instead of transferring a thread from the WaitSet to the EntryList
 69.1567 +// we might just dequeue a thread from the WaitSet and directly unpark() it.
 69.1568 +
 69.1569 +void ObjectMonitor::notify(TRAPS) {
 69.1570 +  CHECK_OWNER();
 69.1571 +  if (_WaitSet == NULL) {
 69.1572 +     TEVENT (Empty-Notify) ;
 69.1573 +     return ;
 69.1574 +  }
 69.1575 +  DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
 69.1576 +
 69.1577 +  int Policy = Knob_MoveNotifyee ;
 69.1578 +
 69.1579 +  Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
 69.1580 +  ObjectWaiter * iterator = DequeueWaiter() ;
 69.1581 +  if (iterator != NULL) {
 69.1582 +     TEVENT (Notify1 - Transfer) ;
 69.1583 +     guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
 69.1584 +     guarantee (iterator->_notified == 0, "invariant") ;
 69.1585 +     if (Policy != 4) {
 69.1586 +        iterator->TState = ObjectWaiter::TS_ENTER ;
 69.1587 +     }
 69.1588 +     iterator->_notified = 1 ;
 69.1589 +
 69.1590 +     ObjectWaiter * List = _EntryList ;
 69.1591 +     if (List != NULL) {
 69.1592 +        assert (List->_prev == NULL, "invariant") ;
 69.1593 +        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
 69.1594 +        assert (List != iterator, "invariant") ;
 69.1595 +     }
 69.1596 +
 69.1597 +     if (Policy == 0) {       // prepend to EntryList
 69.1598 +         if (List == NULL) {
 69.1599 +             iterator->_next = iterator->_prev = NULL ;
 69.1600 +             _EntryList = iterator ;
 69.1601 +         } else {
 69.1602 +             List->_prev = iterator ;
 69.1603 +             iterator->_next = List ;
 69.1604 +             iterator->_prev = NULL ;
 69.1605 +             _EntryList = iterator ;
 69.1606 +        }
 69.1607 +     } else
 69.1608 +     if (Policy == 1) {      // append to EntryList
 69.1609 +         if (List == NULL) {
 69.1610 +             iterator->_next = iterator->_prev = NULL ;
 69.1611 +             _EntryList = iterator ;
 69.1612 +         } else {
 69.1613 +            // CONSIDER:  finding the tail currently requires a linear-time walk of
 69.1614 +            // the EntryList.  We can make tail access constant-time by converting to
 69.1615 +            // a CDLL instead of using our current DLL.
 69.1616 +            ObjectWaiter * Tail ;
 69.1617 +            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
 69.1618 +            assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
 69.1619 +            Tail->_next = iterator ;
 69.1620 +            iterator->_prev = Tail ;
 69.1621 +            iterator->_next = NULL ;
 69.1622 +        }
 69.1623 +     } else
 69.1624 +     if (Policy == 2) {      // prepend to cxq
 69.1625 +         // prepend to cxq
 69.1626 +         if (List == NULL) {
 69.1627 +             iterator->_next = iterator->_prev = NULL ;
 69.1628 +             _EntryList = iterator ;
 69.1629 +         } else {
 69.1630 +            iterator->TState = ObjectWaiter::TS_CXQ ;
 69.1631 +            for (;;) {
 69.1632 +                ObjectWaiter * Front = _cxq ;
 69.1633 +                iterator->_next = Front ;
 69.1634 +                if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
 69.1635 +                    break ;
 69.1636 +                }
 69.1637 +            }
 69.1638 +         }
 69.1639 +     } else
 69.1640 +     if (Policy == 3) {      // append to cxq
 69.1641 +        iterator->TState = ObjectWaiter::TS_CXQ ;
 69.1642 +        for (;;) {
 69.1643 +            ObjectWaiter * Tail ;
 69.1644 +            Tail = _cxq ;
 69.1645 +            if (Tail == NULL) {
 69.1646 +                iterator->_next = NULL ;
 69.1647 +                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
 69.1648 +                   break ;
 69.1649 +                }
 69.1650 +            } else {
 69.1651 +                while (Tail->_next != NULL) Tail = Tail->_next ;
 69.1652 +                Tail->_next = iterator ;
 69.1653 +                iterator->_prev = Tail ;
 69.1654 +                iterator->_next = NULL ;
 69.1655 +                break ;
 69.1656 +            }
 69.1657 +        }
 69.1658 +     } else {
 69.1659 +        ParkEvent * ev = iterator->_event ;
 69.1660 +        iterator->TState = ObjectWaiter::TS_RUN ;
 69.1661 +        OrderAccess::fence() ;
 69.1662 +        ev->unpark() ;
 69.1663 +     }
 69.1664 +
 69.1665 +     if (Policy < 4) {
 69.1666 +       iterator->wait_reenter_begin(this);
 69.1667 +     }
 69.1668 +
 69.1669 +     // _WaitSetLock protects the wait queue, not the EntryList.  We could
 69.1670 +     // move the add-to-EntryList operation, above, outside the critical section
 69.1671 +     // protected by _WaitSetLock.  In practice that's not useful.  With the
 69.1672 +     // exception of  wait() timeouts and interrupts the monitor owner
 69.1673 +     // is the only thread that grabs _WaitSetLock.  There's almost no contention
 69.1674 +     // on _WaitSetLock so it's not profitable to reduce the length of the
 69.1675 +     // critical section.
 69.1676 +  }
 69.1677 +
 69.1678 +  Thread::SpinRelease (&_WaitSetLock) ;
 69.1679 +
 69.1680 +  if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) {
 69.1681 +     ObjectMonitor::_sync_Notifications->inc() ;
 69.1682 +  }
 69.1683 +}
 69.1684 +
 69.1685 +
 69.1686 +void ObjectMonitor::notifyAll(TRAPS) {
 69.1687 +  CHECK_OWNER();
 69.1688 +  ObjectWaiter* iterator;
 69.1689 +  if (_WaitSet == NULL) {
 69.1690 +      TEVENT (Empty-NotifyAll) ;
 69.1691 +      return ;
 69.1692 +  }
 69.1693 +  DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
 69.1694 +
 69.1695 +  int Policy = Knob_MoveNotifyee ;
 69.1696 +  int Tally = 0 ;
 69.1697 +  Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
 69.1698 +
 69.1699 +  for (;;) {
 69.1700 +     iterator = DequeueWaiter () ;
 69.1701 +     if (iterator == NULL) break ;
 69.1702 +     TEVENT (NotifyAll - Transfer1) ;
 69.1703 +     ++Tally ;
 69.1704 +
 69.1705 +     // Disposition - what might we do with iterator ?
 69.1706 +     // a.  add it directly to the EntryList - either tail or head.
 69.1707 +     // b.  push it onto the front of the _cxq.
 69.1708 +     // For now we use (a).
 69.1709 +
 69.1710 +     guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
 69.1711 +     guarantee (iterator->_notified == 0, "invariant") ;
 69.1712 +     iterator->_notified = 1 ;
 69.1713 +     if (Policy != 4) {
 69.1714 +        iterator->TState = ObjectWaiter::TS_ENTER ;
 69.1715 +     }
 69.1716 +
 69.1717 +     ObjectWaiter * List = _EntryList ;
 69.1718 +     if (List != NULL) {
 69.1719 +        assert (List->_prev == NULL, "invariant") ;
 69.1720 +        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
 69.1721 +        assert (List != iterator, "invariant") ;
 69.1722 +     }
 69.1723 +
 69.1724 +     if (Policy == 0) {       // prepend to EntryList
 69.1725 +         if (List == NULL) {
 69.1726 +             iterator->_next = iterator->_prev = NULL ;
 69.1727 +             _EntryList = iterator ;
 69.1728 +         } else {
 69.1729 +             List->_prev = iterator ;
 69.1730 +             iterator->_next = List ;
 69.1731 +             iterator->_prev = NULL ;
 69.1732 +             _EntryList = iterator ;
 69.1733 +        }
 69.1734 +     } else
 69.1735 +     if (Policy == 1) {      // append to EntryList
 69.1736 +         if (List == NULL) {
 69.1737 +             iterator->_next = iterator->_prev = NULL ;
 69.1738 +             _EntryList = iterator ;
 69.1739 +         } else {
 69.1740 +            // CONSIDER:  finding the tail currently requires a linear-time walk of
 69.1741 +            // the EntryList.  We can make tail access constant-time by converting to
 69.1742 +            // a CDLL instead of using our current DLL.
 69.1743 +            ObjectWaiter * Tail ;
 69.1744 +            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
 69.1745 +            assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
 69.1746 +            Tail->_next = iterator ;
 69.1747 +            iterator->_prev = Tail ;
 69.1748 +            iterator->_next = NULL ;
 69.1749 +        }
 69.1750 +     } else
 69.1751 +     if (Policy == 2) {      // prepend to cxq
 69.1752 +         // prepend to cxq
 69.1753 +         iterator->TState = ObjectWaiter::TS_CXQ ;
 69.1754 +         for (;;) {
 69.1755 +             ObjectWaiter * Front = _cxq ;
 69.1756 +             iterator->_next = Front ;
 69.1757 +             if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
 69.1758 +                 break ;
 69.1759 +             }
 69.1760 +         }
 69.1761 +     } else
 69.1762 +     if (Policy == 3) {      // append to cxq
 69.1763 +        iterator->TState = ObjectWaiter::TS_CXQ ;
 69.1764 +        for (;;) {
 69.1765 +            ObjectWaiter * Tail ;
 69.1766 +            Tail = _cxq ;
 69.1767 +            if (Tail == NULL) {
 69.1768 +                iterator->_next = NULL ;
 69.1769 +                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
 69.1770 +                   break ;
 69.1771 +                }
 69.1772 +            } else {
 69.1773 +                while (Tail->_next != NULL) Tail = Tail->_next ;
 69.1774 +                Tail->_next = iterator ;
 69.1775 +                iterator->_prev = Tail ;
 69.1776 +                iterator->_next = NULL ;
 69.1777 +                break ;
 69.1778 +            }
 69.1779 +        }
 69.1780 +     } else {
 69.1781 +        ParkEvent * ev = iterator->_event ;
 69.1782 +        iterator->TState = ObjectWaiter::TS_RUN ;
 69.1783 +        OrderAccess::fence() ;
 69.1784 +        ev->unpark() ;
 69.1785 +     }
 69.1786 +
 69.1787 +     if (Policy < 4) {
 69.1788 +       iterator->wait_reenter_begin(this);
 69.1789 +     }
 69.1790 +
 69.1791 +     // _WaitSetLock protects the wait queue, not the EntryList.  We could
 69.1792 +     // move the add-to-EntryList operation, above, outside the critical section
 69.1793 +     // protected by _WaitSetLock.  In practice that's not useful.  With the
 69.1794 +     // exception of  wait() timeouts and interrupts the monitor owner
 69.1795 +     // is the only thread that grabs _WaitSetLock.  There's almost no contention
 69.1796 +     // on _WaitSetLock so it's not profitable to reduce the length of the
 69.1797 +     // critical section.
 69.1798 +  }
 69.1799 +
 69.1800 +  Thread::SpinRelease (&_WaitSetLock) ;
 69.1801 +
 69.1802 +  if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
 69.1803 +     ObjectMonitor::_sync_Notifications->inc(Tally) ;
 69.1804 +  }
 69.1805 +}
 69.1806 +
 69.1807 +// -----------------------------------------------------------------------------
 69.1808 +// Adaptive Spinning Support
 69.1809 +//
 69.1810 +// Adaptive spin-then-block - rational spinning
 69.1811 +//
 69.1812 +// Note that we spin "globally" on _owner with a classic SMP-polite TATAS
 69.1813 +// algorithm.  On high order SMP systems it would be better to start with
 69.1814 +// a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
 69.1815 +// a contending thread could enqueue itself on the cxq and then spin locally
 69.1816 +// on a thread-specific variable such as its ParkEvent._Event flag.
 69.1817 +// That's left as an exercise for the reader.  Note that global spinning is
 69.1818 +// not problematic on Niagara, as the L2$ serves the interconnect and has both
 69.1819 +// low latency and massive bandwidth.
 69.1820 +//
 69.1821 +// Broadly, we can fix the spin frequency -- that is, the % of contended lock
 69.1822 +// acquisition attempts where we opt to spin --  at 100% and vary the spin count
 69.1823 +// (duration) or we can fix the count at approximately the duration of
 69.1824 +// a context switch and vary the frequency.   Of course we could also
 69.1825 +// vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
 69.1826 +// See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
 69.1827 +//
 69.1828 +// This implementation varies the duration "D", where D varies with
 69.1829 +// the success rate of recent spin attempts. (D is capped at approximately
 69.1830 +// length of a round-trip context switch).  The success rate for recent
 69.1831 +// spin attempts is a good predictor of the success rate of future spin
 69.1832 +// attempts.  The mechanism adapts automatically to varying critical
 69.1833 +// section length (lock modality), system load and degree of parallelism.
 69.1834 +// D is maintained per-monitor in _SpinDuration and is initialized
 69.1835 +// optimistically.  Spin frequency is fixed at 100%.
 69.1836 +//
 69.1837 +// Note that _SpinDuration is volatile, but we update it without locks
 69.1838 +// or atomics.  The code is designed so that _SpinDuration stays within
 69.1839 +// a reasonable range even in the presence of races.  The arithmetic
 69.1840 +// operations on _SpinDuration are closed over the domain of legal values,
 69.1841 +// so at worst a race will install and older but still legal value.
 69.1842 +// At the very worst this introduces some apparent non-determinism.
 69.1843 +// We might spin when we shouldn't or vice-versa, but since the spin
 69.1844 +// count are relatively short, even in the worst case, the effect is harmless.
 69.1845 +//
 69.1846 +// Care must be taken that a low "D" value does not become an
 69.1847 +// an absorbing state.  Transient spinning failures -- when spinning
 69.1848 +// is overall profitable -- should not cause the system to converge
 69.1849 +// on low "D" values.  We want spinning to be stable and predictable
 69.1850 +// and fairly responsive to change and at the same time we don't want
 69.1851 +// it to oscillate, become metastable, be "too" non-deterministic,
 69.1852 +// or converge on or enter undesirable stable absorbing states.
 69.1853 +//
 69.1854 +// We implement a feedback-based control system -- using past behavior
 69.1855 +// to predict future behavior.  We face two issues: (a) if the
 69.1856 +// input signal is random then the spin predictor won't provide optimal
 69.1857 +// results, and (b) if the signal frequency is too high then the control
 69.1858 +// system, which has some natural response lag, will "chase" the signal.
 69.1859 +// (b) can arise from multimodal lock hold times.  Transient preemption
 69.1860 +// can also result in apparent bimodal lock hold times.
 69.1861 +// Although sub-optimal, neither condition is particularly harmful, as
 69.1862 +// in the worst-case we'll spin when we shouldn't or vice-versa.
 69.1863 +// The maximum spin duration is rather short so the failure modes aren't bad.
 69.1864 +// To be conservative, I've tuned the gain in system to bias toward
 69.1865 +// _not spinning.  Relatedly, the system can sometimes enter a mode where it
 69.1866 +// "rings" or oscillates between spinning and not spinning.  This happens
 69.1867 +// when spinning is just on the cusp of profitability, however, so the
 69.1868 +// situation is not dire.  The state is benign -- there's no need to add
 69.1869 +// hysteresis control to damp the transition rate between spinning and
 69.1870 +// not spinning.
 69.1871 +//
 69.1872 +
 69.1873 +intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
 69.1874 +int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
 69.1875 +
 69.1876 +// Spinning: Fixed frequency (100%), vary duration
 69.1877 +
 69.1878 +
 69.1879 +int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
 69.1880 +
 69.1881 +    // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
 69.1882 +    int ctr = Knob_FixedSpin ;
 69.1883 +    if (ctr != 0) {
 69.1884 +        while (--ctr >= 0) {
 69.1885 +            if (TryLock (Self) > 0) return 1 ;
 69.1886 +            SpinPause () ;
 69.1887 +        }
 69.1888 +        return 0 ;
 69.1889 +    }
 69.1890 +
 69.1891 +    for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
 69.1892 +      if (TryLock(Self) > 0) {
 69.1893 +        // Increase _SpinDuration ...
 69.1894 +        // Note that we don't clamp SpinDuration precisely at SpinLimit.
 69.1895 +        // Raising _SpurDuration to the poverty line is key.
 69.1896 +        int x = _SpinDuration ;
 69.1897 +        if (x < Knob_SpinLimit) {
 69.1898 +           if (x < Knob_Poverty) x = Knob_Poverty ;
 69.1899 +           _SpinDuration = x + Knob_BonusB ;
 69.1900 +        }
 69.1901 +        return 1 ;
 69.1902 +      }
 69.1903 +      SpinPause () ;
 69.1904 +    }
 69.1905 +
 69.1906 +    // Admission control - verify preconditions for spinning
 69.1907 +    //
 69.1908 +    // We always spin a little bit, just to prevent _SpinDuration == 0 from
 69.1909 +    // becoming an absorbing state.  Put another way, we spin briefly to
 69.1910 +    // sample, just in case the system load, parallelism, contention, or lock
 69.1911 +    // modality changed.
 69.1912 +    //
 69.1913 +    // Consider the following alternative:
 69.1914 +    // Periodically set _SpinDuration = _SpinLimit and try a long/full
 69.1915 +    // spin attempt.  "Periodically" might mean after a tally of
 69.1916 +    // the # of failed spin attempts (or iterations) reaches some threshold.
 69.1917 +    // This takes us into the realm of 1-out-of-N spinning, where we
 69.1918 +    // hold the duration constant but vary the frequency.
 69.1919 +
 69.1920 +    ctr = _SpinDuration  ;
 69.1921 +    if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
 69.1922 +    if (ctr <= 0) return 0 ;
 69.1923 +
 69.1924 +    if (Knob_SuccRestrict && _succ != NULL) return 0 ;
 69.1925 +    if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
 69.1926 +       TEVENT (Spin abort - notrunnable [TOP]);
 69.1927 +       return 0 ;
 69.1928 +    }
 69.1929 +
 69.1930 +    int MaxSpin = Knob_MaxSpinners ;
 69.1931 +    if (MaxSpin >= 0) {
 69.1932 +       if (_Spinner > MaxSpin) {
 69.1933 +          TEVENT (Spin abort -- too many spinners) ;
 69.1934 +          return 0 ;
 69.1935 +       }
 69.1936 +       // Slighty racy, but benign ...
 69.1937 +       Adjust (&_Spinner, 1) ;
 69.1938 +    }
 69.1939 +
 69.1940 +    // We're good to spin ... spin ingress.
 69.1941 +    // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
 69.1942 +    // when preparing to LD...CAS _owner, etc and the CAS is likely
 69.1943 +    // to succeed.
 69.1944 +    int hits    = 0 ;
 69.1945 +    int msk     = 0 ;
 69.1946 +    int caspty  = Knob_CASPenalty ;
 69.1947 +    int oxpty   = Knob_OXPenalty ;
 69.1948 +    int sss     = Knob_SpinSetSucc ;
 69.1949 +    if (sss && _succ == NULL ) _succ = Self ;
 69.1950 +    Thread * prv = NULL ;
 69.1951 +
 69.1952 +    // There are three ways to exit the following loop:
 69.1953 +    // 1.  A successful spin where this thread has acquired the lock.
 69.1954 +    // 2.  Spin failure with prejudice
 69.1955 +    // 3.  Spin failure without prejudice
 69.1956 +
 69.1957 +    while (--ctr >= 0) {
 69.1958 +
 69.1959 +      // Periodic polling -- Check for pending GC
 69.1960 +      // Threads may spin while they're unsafe.
 69.1961 +      // We don't want spinning threads to delay the JVM from reaching
 69.1962 +      // a stop-the-world safepoint or to steal cycles from GC.
 69.1963 +      // If we detect a pending safepoint we abort in order that
 69.1964 +      // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
 69.1965 +      // this thread, if safe, doesn't steal cycles from GC.
 69.1966 +      // This is in keeping with the "no loitering in runtime" rule.
 69.1967 +      // We periodically check to see if there's a safepoint pending.
 69.1968 +      if ((ctr & 0xFF) == 0) {
 69.1969 +         if (SafepointSynchronize::do_call_back()) {
 69.1970 +            TEVENT (Spin: safepoint) ;
 69.1971 +            goto Abort ;           // abrupt spin egress
 69.1972 +         }
 69.1973 +         if (Knob_UsePause & 1) SpinPause () ;
 69.1974 +
 69.1975 +         int (*scb)(intptr_t,int) = SpinCallbackFunction ;
 69.1976 +         if (hits > 50 && scb != NULL) {
 69.1977 +            int abend = (*scb)(SpinCallbackArgument, 0) ;
 69.1978 +         }
 69.1979 +      }
 69.1980 +
 69.1981 +      if (Knob_UsePause & 2) SpinPause() ;
 69.1982 +
 69.1983 +      // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
 69.1984 +      // This is useful on classic SMP systems, but is of less utility on
 69.1985 +      // N1-style CMT platforms.
 69.1986 +      //
 69.1987 +      // Trade-off: lock acquisition latency vs coherency bandwidth.
 69.1988 +      // Lock hold times are typically short.  A histogram
 69.1989 +      // of successful spin attempts shows that we usually acquire
 69.1990 +      // the lock early in the spin.  That suggests we want to
 69.1991 +      // sample _owner frequently in the early phase of the spin,
 69.1992 +      // but then back-off and sample less frequently as the spin
 69.1993 +      // progresses.  The back-off makes a good citizen on SMP big
 69.1994 +      // SMP systems.  Oversampling _owner can consume excessive
 69.1995 +      // coherency bandwidth.  Relatedly, if we _oversample _owner we
 69.1996 +      // can inadvertently interfere with the the ST m->owner=null.
 69.1997 +      // executed by the lock owner.
 69.1998 +      if (ctr & msk) continue ;
 69.1999 +      ++hits ;
 69.2000 +      if ((hits & 0xF) == 0) {
 69.2001 +        // The 0xF, above, corresponds to the exponent.
 69.2002 +        // Consider: (msk+1)|msk
 69.2003 +        msk = ((msk << 2)|3) & BackOffMask ;
 69.2004 +      }
 69.2005 +
 69.2006 +      // Probe _owner with TATAS
 69.2007 +      // If this thread observes the monitor transition or flicker
 69.2008 +      // from locked to unlocked to locked, then the odds that this
 69.2009 +      // thread will acquire the lock in this spin attempt go down
 69.2010 +      // considerably.  The same argument applies if the CAS fails
 69.2011 +      // or if we observe _owner change from one non-null value to
 69.2012 +      // another non-null value.   In such cases we might abort
 69.2013 +      // the spin without prejudice or apply a "penalty" to the
 69.2014 +      // spin count-down variable "ctr", reducing it by 100, say.
 69.2015 +
 69.2016 +      Thread * ox = (Thread *) _owner ;
 69.2017 +      if (ox == NULL) {
 69.2018 +         ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
 69.2019 +         if (ox == NULL) {
 69.2020 +            // The CAS succeeded -- this thread acquired ownership
 69.2021 +            // Take care of some bookkeeping to exit spin state.
 69.2022 +            if (sss && _succ == Self) {
 69.2023 +               _succ = NULL ;
 69.2024 +            }
 69.2025 +            if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
 69.2026 +
 69.2027 +            // Increase _SpinDuration :
 69.2028 +            // The spin was successful (profitable) so we tend toward
 69.2029 +            // longer spin attempts in the future.
 69.2030 +            // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
 69.2031 +            // If we acquired the lock early in the spin cycle it
 69.2032 +            // makes sense to increase _SpinDuration proportionally.
 69.2033 +            // Note that we don't clamp SpinDuration precisely at SpinLimit.
 69.2034 +            int x = _SpinDuration ;
 69.2035 +            if (x < Knob_SpinLimit) {
 69.2036 +                if (x < Knob_Poverty) x = Knob_Poverty ;
 69.2037 +                _SpinDuration = x + Knob_Bonus ;
 69.2038 +            }
 69.2039 +            return 1 ;
 69.2040 +         }
 69.2041 +
 69.2042 +         // The CAS failed ... we can take any of the following actions:
 69.2043 +         // * penalize: ctr -= Knob_CASPenalty
 69.2044 +         // * exit spin with prejudice -- goto Abort;
 69.2045 +         // * exit spin without prejudice.
 69.2046 +         // * Since CAS is high-latency, retry again immediately.
 69.2047 +         prv = ox ;
 69.2048 +         TEVENT (Spin: cas failed) ;
 69.2049 +         if (caspty == -2) break ;
 69.2050 +         if (caspty == -1) goto Abort ;
 69.2051 +         ctr -= caspty ;
 69.2052 +         continue ;
 69.2053 +      }
 69.2054 +
 69.2055 +      // Did lock ownership change hands ?
 69.2056 +      if (ox != prv && prv != NULL ) {
 69.2057 +          TEVENT (spin: Owner changed)
 69.2058 +          if (oxpty == -2) break ;
 69.2059 +          if (oxpty == -1) goto Abort ;
 69.2060 +          ctr -= oxpty ;
 69.2061 +      }
 69.2062 +      prv = ox ;
 69.2063 +
 69.2064 +      // Abort the spin if the owner is not executing.
 69.2065 +      // The owner must be executing in order to drop the lock.
 69.2066 +      // Spinning while the owner is OFFPROC is idiocy.
 69.2067 +      // Consider: ctr -= RunnablePenalty ;
 69.2068 +      if (Knob_OState && NotRunnable (Self, ox)) {
 69.2069 +         TEVENT (Spin abort - notrunnable);
 69.2070 +         goto Abort ;
 69.2071 +      }
 69.2072 +      if (sss && _succ == NULL ) _succ = Self ;
 69.2073 +   }
 69.2074 +
 69.2075 +   // Spin failed with prejudice -- reduce _SpinDuration.
 69.2076 +   // TODO: Use an AIMD-like policy to adjust _SpinDuration.
 69.2077 +   // AIMD is globally stable.
 69.2078 +   TEVENT (Spin failure) ;
 69.2079 +   {
 69.2080 +     int x = _SpinDuration ;
 69.2081 +     if (x > 0) {
 69.2082 +        // Consider an AIMD scheme like: x -= (x >> 3) + 100
 69.2083 +        // This is globally sample and tends to damp the response.
 69.2084 +        x -= Knob_Penalty ;
 69.2085 +        if (x < 0) x = 0 ;
 69.2086 +        _SpinDuration = x ;
 69.2087 +     }
 69.2088 +   }
 69.2089 +
 69.2090 + Abort:
 69.2091 +   if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
 69.2092 +   if (sss && _succ == Self) {
 69.2093 +      _succ = NULL ;
 69.2094 +      // Invariant: after setting succ=null a contending thread
 69.2095 +      // must recheck-retry _owner before parking.  This usually happens
 69.2096 +      // in the normal usage of TrySpin(), but it's safest
 69.2097 +      // to make TrySpin() as foolproof as possible.
 69.2098 +      OrderAccess::fence() ;
 69.2099 +      if (TryLock(Self) > 0) return 1 ;
 69.2100 +   }
 69.2101 +   return 0 ;
 69.2102 +}
 69.2103 +
 69.2104 +// NotRunnable() -- informed spinning
 69.2105 +//
 69.2106 +// Don't bother spinning if the owner is not eligible to drop the lock.
 69.2107 +// Peek at the owner's schedctl.sc_state and Thread._thread_values and
 69.2108 +// spin only if the owner thread is _thread_in_Java or _thread_in_vm.
 69.2109 +// The thread must be runnable in order to drop the lock in timely fashion.
 69.2110 +// If the _owner is not runnable then spinning will not likely be
 69.2111 +// successful (profitable).
 69.2112 +//
 69.2113 +// Beware -- the thread referenced by _owner could have died
 69.2114 +// so a simply fetch from _owner->_thread_state might trap.
 69.2115 +// Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
 69.2116 +// Because of the lifecycle issues the schedctl and _thread_state values
 69.2117 +// observed by NotRunnable() might be garbage.  NotRunnable must
 69.2118 +// tolerate this and consider the observed _thread_state value
 69.2119 +// as advisory.
 69.2120 +//
 69.2121 +// Beware too, that _owner is sometimes a BasicLock address and sometimes
 69.2122 +// a thread pointer.  We differentiate the two cases with OwnerIsThread.
 69.2123 +// Alternately, we might tag the type (thread pointer vs basiclock pointer)
 69.2124 +// with the LSB of _owner.  Another option would be to probablistically probe
 69.2125 +// the putative _owner->TypeTag value.
 69.2126 +//
 69.2127 +// Checking _thread_state isn't perfect.  Even if the thread is
 69.2128 +// in_java it might be blocked on a page-fault or have been preempted
 69.2129 +// and sitting on a ready/dispatch queue.  _thread state in conjunction
 69.2130 +// with schedctl.sc_state gives us a good picture of what the
 69.2131 +// thread is doing, however.
 69.2132 +//
 69.2133 +// TODO: check schedctl.sc_state.
 69.2134 +// We'll need to use SafeFetch32() to read from the schedctl block.
 69.2135 +// See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
 69.2136 +//
 69.2137 +// The return value from NotRunnable() is *advisory* -- the
 69.2138 +// result is based on sampling and is not necessarily coherent.
 69.2139 +// The caller must tolerate false-negative and false-positive errors.
 69.2140 +// Spinning, in general, is probabilistic anyway.
 69.2141 +
 69.2142 +
 69.2143 +int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
 69.2144 +    // Check either OwnerIsThread or ox->TypeTag == 2BAD.
 69.2145 +    if (!OwnerIsThread) return 0 ;
 69.2146 +
 69.2147 +    if (ox == NULL) return 0 ;
 69.2148 +
 69.2149 +    // Avoid transitive spinning ...
 69.2150 +    // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
 69.2151 +    // Immediately after T1 acquires L it's possible that T2, also
 69.2152 +    // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
 69.2153 +    // This occurs transiently after T1 acquired L but before
 69.2154 +    // T1 managed to clear T1.Stalled.  T2 does not need to abort
 69.2155 +    // its spin in this circumstance.
 69.2156 +    intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
 69.2157 +
 69.2158 +    if (BlockedOn == 1) return 1 ;
 69.2159 +    if (BlockedOn != 0) {
 69.2160 +      return BlockedOn != intptr_t(this) && _owner == ox ;
 69.2161 +    }
 69.2162 +
 69.2163 +    assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
 69.2164 +    int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
 69.2165 +    // consider also: jst != _thread_in_Java -- but that's overspecific.
 69.2166 +    return jst == _thread_blocked || jst == _thread_in_native ;
 69.2167 +}
 69.2168 +
 69.2169 +
 69.2170 +// -----------------------------------------------------------------------------
 69.2171 +// WaitSet management ...
 69.2172 +
 69.2173 +ObjectWaiter::ObjectWaiter(Thread* thread) {
 69.2174 +  _next     = NULL;
 69.2175 +  _prev     = NULL;
 69.2176 +  _notified = 0;
 69.2177 +  TState    = TS_RUN ;
 69.2178 +  _thread   = thread;
 69.2179 +  _event    = thread->_ParkEvent ;
 69.2180 +  _active   = false;
 69.2181 +  assert (_event != NULL, "invariant") ;
 69.2182 +}
 69.2183 +
 69.2184 +void ObjectWaiter::wait_reenter_begin(ObjectMonitor *mon) {
 69.2185 +  JavaThread *jt = (JavaThread *)this->_thread;
 69.2186 +  _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
 69.2187 +}
 69.2188 +
 69.2189 +void ObjectWaiter::wait_reenter_end(ObjectMonitor *mon) {
 69.2190 +  JavaThread *jt = (JavaThread *)this->_thread;
 69.2191 +  JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
 69.2192 +}
 69.2193 +
 69.2194 +inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
 69.2195 +  assert(node != NULL, "should not dequeue NULL node");
 69.2196 +  assert(node->_prev == NULL, "node already in list");
 69.2197 +  assert(node->_next == NULL, "node already in list");
 69.2198 +  // put node at end of queue (circular doubly linked list)
 69.2199 +  if (_WaitSet == NULL) {
 69.2200 +    _WaitSet = node;
 69.2201 +    node->_prev = node;
 69.2202 +    node->_next = node;
 69.2203 +  } else {
 69.2204 +    ObjectWaiter* head = _WaitSet ;
 69.2205 +    ObjectWaiter* tail = head->_prev;
 69.2206 +    assert(tail->_next == head, "invariant check");
 69.2207 +    tail->_next = node;
 69.2208 +    head->_prev = node;
 69.2209 +    node->_next = head;
 69.2210 +    node->_prev = tail;
 69.2211 +  }
 69.2212 +}
 69.2213 +
 69.2214 +inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
 69.2215 +  // dequeue the very first waiter
 69.2216 +  ObjectWaiter* waiter = _WaitSet;
 69.2217 +  if (waiter) {
 69.2218 +    DequeueSpecificWaiter(waiter);
 69.2219 +  }
 69.2220 +  return waiter;
 69.2221 +}
 69.2222 +
 69.2223 +inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
 69.2224 +  assert(node != NULL, "should not dequeue NULL node");
 69.2225 +  assert(node->_prev != NULL, "node already removed from list");
 69.2226 +  assert(node->_next != NULL, "node already removed from list");
 69.2227 +  // when the waiter has woken up because of interrupt,
 69.2228 +  // timeout or other spurious wake-up, dequeue the
 69.2229 +  // waiter from waiting list
 69.2230 +  ObjectWaiter* next = node->_next;
 69.2231 +  if (next == node) {
 69.2232 +    assert(node->_prev == node, "invariant check");
 69.2233 +    _WaitSet = NULL;
 69.2234 +  } else {
 69.2235 +    ObjectWaiter* prev = node->_prev;
 69.2236 +    assert(prev->_next == node, "invariant check");
 69.2237 +    assert(next->_prev == node, "invariant check");
 69.2238 +    next->_prev = prev;
 69.2239 +    prev->_next = next;
 69.2240 +    if (_WaitSet == node) {
 69.2241 +      _WaitSet = next;
 69.2242 +    }
 69.2243 +  }
 69.2244 +  node->_next = NULL;
 69.2245 +  node->_prev = NULL;
 69.2246 +}
 69.2247 +
 69.2248 +// -----------------------------------------------------------------------------
 69.2249 +// PerfData support
 69.2250 +PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts       = NULL ;
 69.2251 +PerfCounter * ObjectMonitor::_sync_FutileWakeups               = NULL ;
 69.2252 +PerfCounter * ObjectMonitor::_sync_Parks                       = NULL ;
 69.2253 +PerfCounter * ObjectMonitor::_sync_EmptyNotifications          = NULL ;
 69.2254 +PerfCounter * ObjectMonitor::_sync_Notifications               = NULL ;
 69.2255 +PerfCounter * ObjectMonitor::_sync_PrivateA                    = NULL ;
 69.2256 +PerfCounter * ObjectMonitor::_sync_PrivateB                    = NULL ;
 69.2257 +PerfCounter * ObjectMonitor::_sync_SlowExit                    = NULL ;
 69.2258 +PerfCounter * ObjectMonitor::_sync_SlowEnter                   = NULL ;
 69.2259 +PerfCounter * ObjectMonitor::_sync_SlowNotify                  = NULL ;
 69.2260 +PerfCounter * ObjectMonitor::_sync_SlowNotifyAll               = NULL ;
 69.2261 +PerfCounter * ObjectMonitor::_sync_FailedSpins                 = NULL ;
 69.2262 +PerfCounter * ObjectMonitor::_sync_SuccessfulSpins             = NULL ;
 69.2263 +PerfCounter * ObjectMonitor::_sync_MonInCirculation            = NULL ;
 69.2264 +PerfCounter * ObjectMonitor::_sync_MonScavenged                = NULL ;
 69.2265 +PerfCounter * ObjectMonitor::_sync_Inflations                  = NULL ;
 69.2266 +PerfCounter * ObjectMonitor::_sync_Deflations                  = NULL ;
 69.2267 +PerfLongVariable * ObjectMonitor::_sync_MonExtant              = NULL ;
 69.2268 +
 69.2269 +// One-shot global initialization for the sync subsystem.
 69.2270 +// We could also defer initialization and initialize on-demand
 69.2271 +// the first time we call inflate().  Initialization would
 69.2272 +// be protected - like so many things - by the MonitorCache_lock.
 69.2273 +
 69.2274 +void ObjectMonitor::Initialize () {
 69.2275 +  static int InitializationCompleted = 0 ;
 69.2276 +  assert (InitializationCompleted == 0, "invariant") ;
 69.2277 +  InitializationCompleted = 1 ;
 69.2278 +  if (UsePerfData) {
 69.2279 +      EXCEPTION_MARK ;
 69.2280 +      #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
 69.2281 +      #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
 69.2282 +      NEWPERFCOUNTER(_sync_Inflations) ;
 69.2283 +      NEWPERFCOUNTER(_sync_Deflations) ;
 69.2284 +      NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
 69.2285 +      NEWPERFCOUNTER(_sync_FutileWakeups) ;
 69.2286 +      NEWPERFCOUNTER(_sync_Parks) ;
 69.2287 +      NEWPERFCOUNTER(_sync_EmptyNotifications) ;
 69.2288 +      NEWPERFCOUNTER(_sync_Notifications) ;
 69.2289 +      NEWPERFCOUNTER(_sync_SlowEnter) ;
 69.2290 +      NEWPERFCOUNTER(_sync_SlowExit) ;
 69.2291 +      NEWPERFCOUNTER(_sync_SlowNotify) ;
 69.2292 +      NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
 69.2293 +      NEWPERFCOUNTER(_sync_FailedSpins) ;
 69.2294 +      NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
 69.2295 +      NEWPERFCOUNTER(_sync_PrivateA) ;
 69.2296 +      NEWPERFCOUNTER(_sync_PrivateB) ;
 69.2297 +      NEWPERFCOUNTER(_sync_MonInCirculation) ;
 69.2298 +      NEWPERFCOUNTER(_sync_MonScavenged) ;
 69.2299 +      NEWPERFVARIABLE(_sync_MonExtant) ;
 69.2300 +      #undef NEWPERFCOUNTER
 69.2301 +  }
 69.2302 +}
 69.2303 +
 69.2304 +
 69.2305 +// Compile-time asserts
 69.2306 +// When possible, it's better to catch errors deterministically at
 69.2307 +// compile-time than at runtime.  The down-side to using compile-time
 69.2308 +// asserts is that error message -- often something about negative array
 69.2309 +// indices -- is opaque.
 69.2310 +
 69.2311 +#define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); }
 69.2312 +
 69.2313 +void ObjectMonitor::ctAsserts() {
 69.2314 +  CTASSERT(offset_of (ObjectMonitor, _header) == 0);
 69.2315 +}
 69.2316 +
 69.2317 +
 69.2318 +static char * kvGet (char * kvList, const char * Key) {
 69.2319 +    if (kvList == NULL) return NULL ;
 69.2320 +    size_t n = strlen (Key) ;
 69.2321 +    char * Search ;
 69.2322 +    for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
 69.2323 +        if (strncmp (Search, Key, n) == 0) {
 69.2324 +            if (Search[n] == '=') return Search + n + 1 ;
 69.2325 +            if (Search[n] == 0)   return (char *) "1" ;
 69.2326 +        }
 69.2327 +    }
 69.2328 +    return NULL ;
 69.2329 +}
 69.2330 +
 69.2331 +static int kvGetInt (char * kvList, const char * Key, int Default) {
 69.2332 +    char * v = kvGet (kvList, Key) ;
 69.2333 +    int rslt = v ? ::strtol (v, NULL, 0) : Default ;
 69.2334 +    if (Knob_ReportSettings && v != NULL) {
 69.2335 +        ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
 69.2336 +        ::fflush (stdout) ;
 69.2337 +    }
 69.2338 +    return rslt ;
 69.2339 +}
 69.2340 +
 69.2341 +void ObjectMonitor::DeferredInitialize () {
 69.2342 +  if (InitDone > 0) return ;
 69.2343 +  if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
 69.2344 +      while (InitDone != 1) ;
 69.2345 +      return ;
 69.2346 +  }
 69.2347 +
 69.2348 +  // One-shot global initialization ...
 69.2349 +  // The initialization is idempotent, so we don't need locks.
 69.2350 +  // In the future consider doing this via os::init_2().
 69.2351 +  // SyncKnobs consist of <Key>=<Value> pairs in the style
 69.2352 +  // of environment variables.  Start by converting ':' to NUL.
 69.2353 +
 69.2354 +  if (SyncKnobs == NULL) SyncKnobs = "" ;
 69.2355 +
 69.2356 +  size_t sz = strlen (SyncKnobs) ;
 69.2357 +  char * knobs = (char *) malloc (sz + 2) ;
 69.2358 +  if (knobs == NULL) {
 69.2359 +     vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ;
 69.2360 +     guarantee (0, "invariant") ;
 69.2361 +  }
 69.2362 +  strcpy (knobs, SyncKnobs) ;
 69.2363 +  knobs[sz+1] = 0 ;
 69.2364 +  for (char * p = knobs ; *p ; p++) {
 69.2365 +     if (*p == ':') *p = 0 ;
 69.2366 +  }
 69.2367 +
 69.2368 +  #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
 69.2369 +  SETKNOB(ReportSettings) ;
 69.2370 +  SETKNOB(Verbose) ;
 69.2371 +  SETKNOB(FixedSpin) ;
 69.2372 +  SETKNOB(SpinLimit) ;
 69.2373 +  SETKNOB(SpinBase) ;
 69.2374 +  SETKNOB(SpinBackOff);
 69.2375 +  SETKNOB(CASPenalty) ;
 69.2376 +  SETKNOB(OXPenalty) ;
 69.2377 +  SETKNOB(LogSpins) ;
 69.2378 +  SETKNOB(SpinSetSucc) ;
 69.2379 +  SETKNOB(SuccEnabled) ;
 69.2380 +  SETKNOB(SuccRestrict) ;
 69.2381 +  SETKNOB(Penalty) ;
 69.2382 +  SETKNOB(Bonus) ;
 69.2383 +  SETKNOB(BonusB) ;
 69.2384 +  SETKNOB(Poverty) ;
 69.2385 +  SETKNOB(SpinAfterFutile) ;
 69.2386 +  SETKNOB(UsePause) ;
 69.2387 +  SETKNOB(SpinEarly) ;
 69.2388 +  SETKNOB(OState) ;
 69.2389 +  SETKNOB(MaxSpinners) ;
 69.2390 +  SETKNOB(PreSpin) ;
 69.2391 +  SETKNOB(ExitPolicy) ;
 69.2392 +  SETKNOB(QMode);
 69.2393 +  SETKNOB(ResetEvent) ;
 69.2394 +  SETKNOB(MoveNotifyee) ;
 69.2395 +  SETKNOB(FastHSSEC) ;
 69.2396 +  #undef SETKNOB
 69.2397 +
 69.2398 +  if (os::is_MP()) {
 69.2399 +     BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
 69.2400 +     if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
 69.2401 +     // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
 69.2402 +  } else {
 69.2403 +     Knob_SpinLimit = 0 ;
 69.2404 +     Knob_SpinBase  = 0 ;
 69.2405 +     Knob_PreSpin   = 0 ;
 69.2406 +     Knob_FixedSpin = -1 ;
 69.2407 +  }
 69.2408 +
 69.2409 +  if (Knob_LogSpins == 0) {
 69.2410 +     ObjectMonitor::_sync_FailedSpins = NULL ;
 69.2411 +  }
 69.2412 +
 69.2413 +  free (knobs) ;
 69.2414 +  OrderAccess::fence() ;
 69.2415 +  InitDone = 1 ;
 69.2416 +}
 69.2417 +
 69.2418 +#ifndef PRODUCT
 69.2419 +void ObjectMonitor::verify() {
 69.2420 +}
 69.2421 +
 69.2422 +void ObjectMonitor::print() {
 69.2423 +}
 69.2424 +#endif
    70.1 --- a/src/share/vm/runtime/objectMonitor.hpp	Thu Nov 04 15:19:16 2010 -0700
    70.2 +++ b/src/share/vm/runtime/objectMonitor.hpp	Thu Nov 04 16:17:54 2010 -0700
    70.3 @@ -22,6 +22,32 @@
    70.4   *
    70.5   */
    70.6  
    70.7 +
    70.8 +// ObjectWaiter serves as a "proxy" or surrogate thread.
    70.9 +// TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
   70.10 +// ParkEvent instead.  Beware, however, that the JVMTI code
   70.11 +// knows about ObjectWaiters, so we'll have to reconcile that code.
   70.12 +// See next_waiter(), first_waiter(), etc.
   70.13 +
   70.14 +class ObjectWaiter : public StackObj {
   70.15 + public:
   70.16 +  enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ;
   70.17 +  enum Sorted  { PREPEND, APPEND, SORTED } ;
   70.18 +  ObjectWaiter * volatile _next;
   70.19 +  ObjectWaiter * volatile _prev;
   70.20 +  Thread*       _thread;
   70.21 +  ParkEvent *   _event;
   70.22 +  volatile int  _notified ;
   70.23 +  volatile TStates TState ;
   70.24 +  Sorted        _Sorted ;           // List placement disposition
   70.25 +  bool          _active ;           // Contention monitoring is enabled
   70.26 + public:
   70.27 +  ObjectWaiter(Thread* thread);
   70.28 +
   70.29 +  void wait_reenter_begin(ObjectMonitor *mon);
   70.30 +  void wait_reenter_end(ObjectMonitor *mon);
   70.31 +};
   70.32 +
   70.33  // WARNING:
   70.34  //   This is a very sensitive and fragile class. DO NOT make any
   70.35  // change unless you are fully aware of the underlying semantics.
   70.36 @@ -38,8 +64,6 @@
   70.37  // It is also used as RawMonitor by the JVMTI
   70.38  
   70.39  
   70.40 -class ObjectWaiter;
   70.41 -
   70.42  class ObjectMonitor {
   70.43   public:
   70.44    enum {
   70.45 @@ -74,13 +98,16 @@
   70.46  
   70.47  
   70.48   public:
   70.49 -  ObjectMonitor();
   70.50 -  ~ObjectMonitor();
   70.51 -
   70.52    markOop   header() const;
   70.53    void      set_header(markOop hdr);
   70.54  
   70.55 -  intptr_t  is_busy() const;
   70.56 +  intptr_t is_busy() const {
   70.57 +    // TODO-FIXME: merge _count and _waiters.
   70.58 +    // TODO-FIXME: assert _owner == null implies _recursions = 0
   70.59 +    // TODO-FIXME: assert _WaitSet != null implies _count > 0
   70.60 +    return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
   70.61 +  }
   70.62 +
   70.63    intptr_t  is_entered(Thread* current) const;
   70.64  
   70.65    void*     owner() const;
   70.66 @@ -91,13 +118,58 @@
   70.67    intptr_t  count() const;
   70.68    void      set_count(intptr_t count);
   70.69    intptr_t  contentions() const ;
   70.70 +  intptr_t  recursions() const                                         { return _recursions; }
   70.71  
   70.72    // JVM/DI GetMonitorInfo() needs this
   70.73 -  Thread *  thread_of_waiter (ObjectWaiter *) ;
   70.74 -  ObjectWaiter * first_waiter () ;
   70.75 -  ObjectWaiter * next_waiter(ObjectWaiter* o);
   70.76 +  ObjectWaiter* first_waiter()                                         { return _WaitSet; }
   70.77 +  ObjectWaiter* next_waiter(ObjectWaiter* o)                           { return o->_next; }
   70.78 +  Thread* thread_of_waiter(ObjectWaiter* o)                            { return o->_thread; }
   70.79  
   70.80 -  intptr_t  recursions() const { return _recursions; }
   70.81 +  // initialize the monitor, exception the semaphore, all other fields
   70.82 +  // are simple integers or pointers
   70.83 +  ObjectMonitor() {
   70.84 +    _header       = NULL;
   70.85 +    _count        = 0;
   70.86 +    _waiters      = 0,
   70.87 +    _recursions   = 0;
   70.88 +    _object       = NULL;
   70.89 +    _owner        = NULL;
   70.90 +    _WaitSet      = NULL;
   70.91 +    _WaitSetLock  = 0 ;
   70.92 +    _Responsible  = NULL ;
   70.93 +    _succ         = NULL ;
   70.94 +    _cxq          = NULL ;
   70.95 +    FreeNext      = NULL ;
   70.96 +    _EntryList    = NULL ;
   70.97 +    _SpinFreq     = 0 ;
   70.98 +    _SpinClock    = 0 ;
   70.99 +    OwnerIsThread = 0 ;
  70.100 +  }
  70.101 +
  70.102 +  ~ObjectMonitor() {
  70.103 +   // TODO: Add asserts ...
  70.104 +   // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
  70.105 +   // _count == 0 _EntryList  == NULL etc
  70.106 +  }
  70.107 +
  70.108 +private:
  70.109 +  void Recycle () {
  70.110 +    // TODO: add stronger asserts ...
  70.111 +    // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
  70.112 +    // _count == 0 EntryList  == NULL
  70.113 +    // _recursions == 0 _WaitSet == NULL
  70.114 +    // TODO: assert (is_busy()|_recursions) == 0
  70.115 +    _succ          = NULL ;
  70.116 +    _EntryList     = NULL ;
  70.117 +    _cxq           = NULL ;
  70.118 +    _WaitSet       = NULL ;
  70.119 +    _recursions    = 0 ;
  70.120 +    _SpinFreq      = 0 ;
  70.121 +    _SpinClock     = 0 ;
  70.122 +    OwnerIsThread  = 0 ;
  70.123 +  }
  70.124 +
  70.125 +public:
  70.126  
  70.127    void*     object() const;
  70.128    void*     object_addr();
  70.129 @@ -122,22 +194,9 @@
  70.130    intptr_t  complete_exit(TRAPS);
  70.131    void      reenter(intptr_t recursions, TRAPS);
  70.132  
  70.133 -  int       raw_enter(TRAPS);
  70.134 -  int       raw_exit(TRAPS);
  70.135 -  int       raw_wait(jlong millis, bool interruptable, TRAPS);
  70.136 -  int       raw_notify(TRAPS);
  70.137 -  int       raw_notifyAll(TRAPS);
  70.138 -
  70.139   private:
  70.140 -  // JVMTI support -- remove ASAP
  70.141 -  int       SimpleEnter (Thread * Self) ;
  70.142 -  int       SimpleExit  (Thread * Self) ;
  70.143 -  int       SimpleWait  (Thread * Self, jlong millis) ;
  70.144 -  int       SimpleNotify (Thread * Self, bool All) ;
  70.145 -
  70.146 - private:
  70.147 -  void      Recycle () ;
  70.148    void      AddWaiter (ObjectWaiter * waiter) ;
  70.149 +  static    void DeferredInitialize();
  70.150  
  70.151    ObjectWaiter * DequeueWaiter () ;
  70.152    void      DequeueSpecificWaiter (ObjectWaiter * waiter) ;
  70.153 @@ -172,13 +231,17 @@
  70.154    // The VM assumes write ordering wrt these fields, which can be
  70.155    // read from other threads.
  70.156  
  70.157 + protected:                         // protected for jvmtiRawMonitor
  70.158    void *  volatile _owner;          // pointer to owning thread OR BasicLock
  70.159    volatile intptr_t  _recursions;   // recursion count, 0 for first entry
  70.160 + private:
  70.161    int OwnerIsThread ;               // _owner is (Thread *) vs SP/BasicLock
  70.162    ObjectWaiter * volatile _cxq ;    // LL of recently-arrived threads blocked on entry.
  70.163                                      // The list is actually composed of WaitNodes, acting
  70.164                                      // as proxies for Threads.
  70.165 + protected:
  70.166    ObjectWaiter * volatile _EntryList ;     // Threads blocked on entry or reentry.
  70.167 + private:
  70.168    Thread * volatile _succ ;          // Heir presumptive thread - used for futile wakeup throttling
  70.169    Thread * volatile _Responsible ;
  70.170    int _PromptDrain ;                // rqst to drain cxq into EntryList ASAP
  70.171 @@ -196,8 +259,12 @@
  70.172    volatile intptr_t  _count;        // reference count to prevent reclaimation/deflation
  70.173                                      // at stop-the-world time.  See deflate_idle_monitors().
  70.174                                      // _count is approximately |_WaitSet| + |_EntryList|
  70.175 + protected:
  70.176    volatile intptr_t  _waiters;      // number of waiting threads
  70.177 + private:
  70.178 + protected:
  70.179    ObjectWaiter * volatile _WaitSet; // LL of threads wait()ing on the monitor
  70.180 + private:
  70.181    volatile int _WaitSetLock;        // protects Wait Queue - simple spinlock
  70.182  
  70.183   public:
  70.184 @@ -205,4 +272,37 @@
  70.185    ObjectMonitor * FreeNext ;        // Free list linkage
  70.186    intptr_t StatA, StatsB ;
  70.187  
  70.188 + public:
  70.189 +  static void Initialize () ;
  70.190 +  static PerfCounter * _sync_ContendedLockAttempts ;
  70.191 +  static PerfCounter * _sync_FutileWakeups ;
  70.192 +  static PerfCounter * _sync_Parks ;
  70.193 +  static PerfCounter * _sync_EmptyNotifications ;
  70.194 +  static PerfCounter * _sync_Notifications ;
  70.195 +  static PerfCounter * _sync_SlowEnter ;
  70.196 +  static PerfCounter * _sync_SlowExit ;
  70.197 +  static PerfCounter * _sync_SlowNotify ;
  70.198 +  static PerfCounter * _sync_SlowNotifyAll ;
  70.199 +  static PerfCounter * _sync_FailedSpins ;
  70.200 +  static PerfCounter * _sync_SuccessfulSpins ;
  70.201 +  static PerfCounter * _sync_PrivateA ;
  70.202 +  static PerfCounter * _sync_PrivateB ;
  70.203 +  static PerfCounter * _sync_MonInCirculation ;
  70.204 +  static PerfCounter * _sync_MonScavenged ;
  70.205 +  static PerfCounter * _sync_Inflations ;
  70.206 +  static PerfCounter * _sync_Deflations ;
  70.207 +  static PerfLongVariable * _sync_MonExtant ;
  70.208 +
  70.209 + public:
  70.210 +  static int Knob_Verbose;
  70.211 +  static int Knob_SpinLimit;
  70.212  };
  70.213 +
  70.214 +#undef TEVENT
  70.215 +#define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); }
  70.216 +
  70.217 +#define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
  70.218 +
  70.219 +#undef  TEVENT
  70.220 +#define TEVENT(nom) {;}
  70.221 +
    71.1 --- a/src/share/vm/runtime/objectMonitor.inline.hpp	Thu Nov 04 15:19:16 2010 -0700
    71.2 +++ b/src/share/vm/runtime/objectMonitor.inline.hpp	Thu Nov 04 16:17:54 2010 -0700
    71.3 @@ -104,7 +104,3 @@
    71.4    _count = 0;
    71.5  }
    71.6  
    71.7 -
    71.8 -// here are the platform-dependent bodies:
    71.9 -
   71.10 -# include "incls/_objectMonitor_pd.inline.hpp.incl"
    72.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    72.2 +++ b/src/share/vm/runtime/park.cpp	Thu Nov 04 16:17:54 2010 -0700
    72.3 @@ -0,0 +1,237 @@
    72.4 +/*
    72.5 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    72.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    72.7 + *
    72.8 + * This code is free software; you can redistribute it and/or modify it
    72.9 + * under the terms of the GNU General Public License version 2 only, as
   72.10 + * published by the Free Software Foundation.
   72.11 + *
   72.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   72.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   72.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   72.15 + * version 2 for more details (a copy is included in the LICENSE file that
   72.16 + * accompanied this code).
   72.17 + *
   72.18 + * You should have received a copy of the GNU General Public License version
   72.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   72.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   72.21 + *
   72.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   72.23 + * or visit www.oracle.com if you need additional information or have any
   72.24 + * questions.
   72.25 + *
   72.26 + */
   72.27 +
   72.28 +
   72.29 +# include "incls/_precompiled.incl"
   72.30 +# include "incls/_park.cpp.incl"
   72.31 +
   72.32 +
   72.33 +// Lifecycle management for TSM ParkEvents.
   72.34 +// ParkEvents are type-stable (TSM).
   72.35 +// In our particular implementation they happen to be immortal.
   72.36 +//
   72.37 +// We manage concurrency on the FreeList with a CAS-based
   72.38 +// detach-modify-reattach idiom that avoids the ABA problems
   72.39 +// that would otherwise be present in a simple CAS-based
   72.40 +// push-pop implementation.   (push-one and pop-all)
   72.41 +//
   72.42 +// Caveat: Allocate() and Release() may be called from threads
   72.43 +// other than the thread associated with the Event!
   72.44 +// If we need to call Allocate() when running as the thread in
   72.45 +// question then look for the PD calls to initialize native TLS.
   72.46 +// Native TLS (Win32/Linux/Solaris) can only be initialized or
   72.47 +// accessed by the associated thread.
   72.48 +// See also pd_initialize().
   72.49 +//
   72.50 +// Note that we could defer associating a ParkEvent with a thread
   72.51 +// until the 1st time the thread calls park().  unpark() calls to
   72.52 +// an unprovisioned thread would be ignored.  The first park() call
   72.53 +// for a thread would allocate and associate a ParkEvent and return
   72.54 +// immediately.
   72.55 +
   72.56 +volatile int ParkEvent::ListLock = 0 ;
   72.57 +ParkEvent * volatile ParkEvent::FreeList = NULL ;
   72.58 +
   72.59 +ParkEvent * ParkEvent::Allocate (Thread * t) {
   72.60 +  // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
   72.61 +  ParkEvent * ev ;
   72.62 +
   72.63 +  // Start by trying to recycle an existing but unassociated
   72.64 +  // ParkEvent from the global free list.
   72.65 +  for (;;) {
   72.66 +    ev = FreeList ;
   72.67 +    if (ev == NULL) break ;
   72.68 +    // 1: Detach - sequester or privatize the list
   72.69 +    // Tantamount to ev = Swap (&FreeList, NULL)
   72.70 +    if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
   72.71 +       continue ;
   72.72 +    }
   72.73 +
   72.74 +    // We've detached the list.  The list in-hand is now
   72.75 +    // local to this thread.   This thread can operate on the
   72.76 +    // list without risk of interference from other threads.
   72.77 +    // 2: Extract -- pop the 1st element from the list.
   72.78 +    ParkEvent * List = ev->FreeNext ;
   72.79 +    if (List == NULL) break ;
   72.80 +    for (;;) {
   72.81 +        // 3: Try to reattach the residual list
   72.82 +        guarantee (List != NULL, "invariant") ;
   72.83 +        ParkEvent * Arv =  (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
   72.84 +        if (Arv == NULL) break ;
   72.85 +
   72.86 +        // New nodes arrived.  Try to detach the recent arrivals.
   72.87 +        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
   72.88 +            continue ;
   72.89 +        }
   72.90 +        guarantee (Arv != NULL, "invariant") ;
   72.91 +        // 4: Merge Arv into List
   72.92 +        ParkEvent * Tail = List ;
   72.93 +        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
   72.94 +        Tail->FreeNext = Arv ;
   72.95 +    }
   72.96 +    break ;
   72.97 +  }
   72.98 +
   72.99 +  if (ev != NULL) {
  72.100 +    guarantee (ev->AssociatedWith == NULL, "invariant") ;
  72.101 +  } else {
  72.102 +    // Do this the hard way -- materialize a new ParkEvent.
  72.103 +    // In rare cases an allocating thread might detach a long list --
  72.104 +    // installing null into FreeList -- and then stall or be obstructed.
  72.105 +    // A 2nd thread calling Allocate() would see FreeList == null.
  72.106 +    // The list held privately by the 1st thread is unavailable to the 2nd thread.
  72.107 +    // In that case the 2nd thread would have to materialize a new ParkEvent,
  72.108 +    // even though free ParkEvents existed in the system.  In this case we end up
  72.109 +    // with more ParkEvents in circulation than we need, but the race is
  72.110 +    // rare and the outcome is benign.  Ideally, the # of extant ParkEvents
  72.111 +    // is equal to the maximum # of threads that existed at any one time.
  72.112 +    // Because of the race mentioned above, segments of the freelist
  72.113 +    // can be transiently inaccessible.  At worst we may end up with the
  72.114 +    // # of ParkEvents in circulation slightly above the ideal.
  72.115 +    // Note that if we didn't have the TSM/immortal constraint, then
  72.116 +    // when reattaching, above, we could trim the list.
  72.117 +    ev = new ParkEvent () ;
  72.118 +    guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
  72.119 +  }
  72.120 +  ev->reset() ;                     // courtesy to caller
  72.121 +  ev->AssociatedWith = t ;          // Associate ev with t
  72.122 +  ev->FreeNext       = NULL ;
  72.123 +  return ev ;
  72.124 +}
  72.125 +
  72.126 +void ParkEvent::Release (ParkEvent * ev) {
  72.127 +  if (ev == NULL) return ;
  72.128 +  guarantee (ev->FreeNext == NULL      , "invariant") ;
  72.129 +  ev->AssociatedWith = NULL ;
  72.130 +  for (;;) {
  72.131 +    // Push ev onto FreeList
  72.132 +    // The mechanism is "half" lock-free.
  72.133 +    ParkEvent * List = FreeList ;
  72.134 +    ev->FreeNext = List ;
  72.135 +    if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
  72.136 +  }
  72.137 +}
  72.138 +
  72.139 +// Override operator new and delete so we can ensure that the
  72.140 +// least significant byte of ParkEvent addresses is 0.
  72.141 +// Beware that excessive address alignment is undesirable
  72.142 +// as it can result in D$ index usage imbalance as
  72.143 +// well as bank access imbalance on Niagara-like platforms,
  72.144 +// although Niagara's hash function should help.
  72.145 +
  72.146 +void * ParkEvent::operator new (size_t sz) {
  72.147 +  return (void *) ((intptr_t (CHeapObj::operator new (sz + 256)) + 256) & -256) ;
  72.148 +}
  72.149 +
  72.150 +void ParkEvent::operator delete (void * a) {
  72.151 +  // ParkEvents are type-stable and immortal ...
  72.152 +  ShouldNotReachHere();
  72.153 +}
  72.154 +
  72.155 +
  72.156 +// 6399321 As a temporary measure we copied & modified the ParkEvent::
  72.157 +// allocate() and release() code for use by Parkers.  The Parker:: forms
  72.158 +// will eventually be removed as we consolide and shift over to ParkEvents
  72.159 +// for both builtin synchronization and JSR166 operations.
  72.160 +
  72.161 +volatile int Parker::ListLock = 0 ;
  72.162 +Parker * volatile Parker::FreeList = NULL ;
  72.163 +
  72.164 +Parker * Parker::Allocate (JavaThread * t) {
  72.165 +  guarantee (t != NULL, "invariant") ;
  72.166 +  Parker * p ;
  72.167 +
  72.168 +  // Start by trying to recycle an existing but unassociated
  72.169 +  // Parker from the global free list.
  72.170 +  for (;;) {
  72.171 +    p = FreeList ;
  72.172 +    if (p  == NULL) break ;
  72.173 +    // 1: Detach
  72.174 +    // Tantamount to p = Swap (&FreeList, NULL)
  72.175 +    if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
  72.176 +       continue ;
  72.177 +    }
  72.178 +
  72.179 +    // We've detached the list.  The list in-hand is now
  72.180 +    // local to this thread.   This thread can operate on the
  72.181 +    // list without risk of interference from other threads.
  72.182 +    // 2: Extract -- pop the 1st element from the list.
  72.183 +    Parker * List = p->FreeNext ;
  72.184 +    if (List == NULL) break ;
  72.185 +    for (;;) {
  72.186 +        // 3: Try to reattach the residual list
  72.187 +        guarantee (List != NULL, "invariant") ;
  72.188 +        Parker * Arv =  (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
  72.189 +        if (Arv == NULL) break ;
  72.190 +
  72.191 +        // New nodes arrived.  Try to detach the recent arrivals.
  72.192 +        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
  72.193 +            continue ;
  72.194 +        }
  72.195 +        guarantee (Arv != NULL, "invariant") ;
  72.196 +        // 4: Merge Arv into List
  72.197 +        Parker * Tail = List ;
  72.198 +        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
  72.199 +        Tail->FreeNext = Arv ;
  72.200 +    }
  72.201 +    break ;
  72.202 +  }
  72.203 +
  72.204 +  if (p != NULL) {
  72.205 +    guarantee (p->AssociatedWith == NULL, "invariant") ;
  72.206 +  } else {
  72.207 +    // Do this the hard way -- materialize a new Parker..
  72.208 +    // In rare cases an allocating thread might detach
  72.209 +    // a long list -- installing null into FreeList --and
  72.210 +    // then stall.  Another thread calling Allocate() would see
  72.211 +    // FreeList == null and then invoke the ctor.  In this case we
  72.212 +    // end up with more Parkers in circulation than we need, but
  72.213 +    // the race is rare and the outcome is benign.
  72.214 +    // Ideally, the # of extant Parkers is equal to the
  72.215 +    // maximum # of threads that existed at any one time.
  72.216 +    // Because of the race mentioned above, segments of the
  72.217 +    // freelist can be transiently inaccessible.  At worst
  72.218 +    // we may end up with the # of Parkers in circulation
  72.219 +    // slightly above the ideal.
  72.220 +    p = new Parker() ;
  72.221 +  }
  72.222 +  p->AssociatedWith = t ;          // Associate p with t
  72.223 +  p->FreeNext       = NULL ;
  72.224 +  return p ;
  72.225 +}
  72.226 +
  72.227 +
  72.228 +void Parker::Release (Parker * p) {
  72.229 +  if (p == NULL) return ;
  72.230 +  guarantee (p->AssociatedWith != NULL, "invariant") ;
  72.231 +  guarantee (p->FreeNext == NULL      , "invariant") ;
  72.232 +  p->AssociatedWith = NULL ;
  72.233 +  for (;;) {
  72.234 +    // Push p onto FreeList
  72.235 +    Parker * List = FreeList ;
  72.236 +    p->FreeNext = List ;
  72.237 +    if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
  72.238 +  }
  72.239 +}
  72.240 +
    73.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    73.2 +++ b/src/share/vm/runtime/park.hpp	Thu Nov 04 16:17:54 2010 -0700
    73.3 @@ -0,0 +1,169 @@
    73.4 +/*
    73.5 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    73.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    73.7 + *
    73.8 + * This code is free software; you can redistribute it and/or modify it
    73.9 + * under the terms of the GNU General Public License version 2 only, as
   73.10 + * published by the Free Software Foundation.
   73.11 + *
   73.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   73.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   73.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   73.15 + * version 2 for more details (a copy is included in the LICENSE file that
   73.16 + * accompanied this code).
   73.17 + *
   73.18 + * You should have received a copy of the GNU General Public License version
   73.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   73.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   73.21 + *
   73.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   73.23 + * or visit www.oracle.com if you need additional information or have any
   73.24 + * questions.
   73.25 + *
   73.26 + */
   73.27 +/*
   73.28 + * Per-thread blocking support for JSR166. See the Java-level
   73.29 + * Documentation for rationale. Basically, park acts like wait, unpark
   73.30 + * like notify.
   73.31 + *
   73.32 + * 6271289 --
   73.33 + * To avoid errors where an os thread expires but the JavaThread still
   73.34 + * exists, Parkers are immortal (type-stable) and are recycled across
   73.35 + * new threads.  This parallels the ParkEvent implementation.
   73.36 + * Because park-unpark allow spurious wakeups it is harmless if an
   73.37 + * unpark call unparks a new thread using the old Parker reference.
   73.38 + *
   73.39 + * In the future we'll want to think about eliminating Parker and using
   73.40 + * ParkEvent instead.  There's considerable duplication between the two
   73.41 + * services.
   73.42 + *
   73.43 + */
   73.44 +
   73.45 +class Parker : public os::PlatformParker {
   73.46 +private:
   73.47 +  volatile int _counter ;
   73.48 +  Parker * FreeNext ;
   73.49 +  JavaThread * AssociatedWith ; // Current association
   73.50 +
   73.51 +public:
   73.52 +  Parker() : PlatformParker() {
   73.53 +    _counter       = 0 ;
   73.54 +    FreeNext       = NULL ;
   73.55 +    AssociatedWith = NULL ;
   73.56 +  }
   73.57 +protected:
   73.58 +  ~Parker() { ShouldNotReachHere(); }
   73.59 +public:
   73.60 +  // For simplicity of interface with Java, all forms of park (indefinite,
   73.61 +  // relative, and absolute) are multiplexed into one call.
   73.62 +  void park(bool isAbsolute, jlong time);
   73.63 +  void unpark();
   73.64 +
   73.65 +  // Lifecycle operators
   73.66 +  static Parker * Allocate (JavaThread * t) ;
   73.67 +  static void Release (Parker * e) ;
   73.68 +private:
   73.69 +  static Parker * volatile FreeList ;
   73.70 +  static volatile int ListLock ;
   73.71 +
   73.72 +};
   73.73 +
   73.74 +/////////////////////////////////////////////////////////////
   73.75 +//
   73.76 +// ParkEvents are type-stable and immortal.
   73.77 +//
   73.78 +// Lifecycle: Once a ParkEvent is associated with a thread that ParkEvent remains
   73.79 +// associated with the thread for the thread's entire lifetime - the relationship is
   73.80 +// stable. A thread will be associated at most one ParkEvent.  When the thread
   73.81 +// expires, the ParkEvent moves to the EventFreeList.  New threads attempt to allocate from
   73.82 +// the EventFreeList before creating a new Event.  Type-stability frees us from
   73.83 +// worrying about stale Event or Thread references in the objectMonitor subsystem.
   73.84 +// (A reference to ParkEvent is always valid, even though the event may no longer be associated
   73.85 +// with the desired or expected thread.  A key aspect of this design is that the callers of
   73.86 +// park, unpark, etc must tolerate stale references and spurious wakeups).
   73.87 +//
   73.88 +// Only the "associated" thread can block (park) on the ParkEvent, although
   73.89 +// any other thread can unpark a reachable parkevent.  Park() is allowed to
   73.90 +// return spuriously.  In fact park-unpark a really just an optimization to
   73.91 +// avoid unbounded spinning and surrender the CPU to be a polite system citizen.
   73.92 +// A degenerate albeit "impolite" park-unpark implementation could simply return.
   73.93 +// See http://blogs.sun.com/dave for more details.
   73.94 +//
   73.95 +// Eventually I'd like to eliminate Events and ObjectWaiters, both of which serve as
   73.96 +// thread proxies, and simply make the THREAD structure type-stable and persistent.
   73.97 +// Currently, we unpark events associated with threads, but ideally we'd just
   73.98 +// unpark threads.
   73.99 +//
  73.100 +// The base-class, PlatformEvent, is platform-specific while the ParkEvent is
  73.101 +// platform-independent.  PlatformEvent provides park(), unpark(), etc., and
  73.102 +// is abstract -- that is, a PlatformEvent should never be instantiated except
  73.103 +// as part of a ParkEvent.
  73.104 +// Equivalently we could have defined a platform-independent base-class that
  73.105 +// exported Allocate(), Release(), etc.  The platform-specific class would extend
  73.106 +// that base-class, adding park(), unpark(), etc.
  73.107 +//
  73.108 +// A word of caution: The JVM uses 2 very similar constructs:
  73.109 +// 1. ParkEvent are used for Java-level "monitor" synchronization.
  73.110 +// 2. Parkers are used by JSR166-JUC park-unpark.
  73.111 +//
  73.112 +// We'll want to eventually merge these redundant facilities and use ParkEvent.
  73.113 +
  73.114 +
  73.115 +class ParkEvent : public os::PlatformEvent {
  73.116 +  private:
  73.117 +    ParkEvent * FreeNext ;
  73.118 +
  73.119 +    // Current association
  73.120 +    Thread * AssociatedWith ;
  73.121 +    intptr_t RawThreadIdentity ;        // LWPID etc
  73.122 +    volatile int Incarnation ;
  73.123 +
  73.124 +    // diagnostic : keep track of last thread to wake this thread.
  73.125 +    // this is useful for construction of dependency graphs.
  73.126 +    void * LastWaker ;
  73.127 +
  73.128 +  public:
  73.129 +    // MCS-CLH list linkage and Native Mutex/Monitor
  73.130 +    ParkEvent * volatile ListNext ;
  73.131 +    ParkEvent * volatile ListPrev ;
  73.132 +    volatile intptr_t OnList ;
  73.133 +    volatile int TState ;
  73.134 +    volatile int Notified ;             // for native monitor construct
  73.135 +    volatile int IsWaiting ;            // Enqueued on WaitSet
  73.136 +
  73.137 +
  73.138 +  private:
  73.139 +    static ParkEvent * volatile FreeList ;
  73.140 +    static volatile int ListLock ;
  73.141 +
  73.142 +    // It's prudent to mark the dtor as "private"
  73.143 +    // ensuring that it's not visible outside the package.
  73.144 +    // Unfortunately gcc warns about such usage, so
  73.145 +    // we revert to the less desirable "protected" visibility.
  73.146 +    // The other compilers accept private dtors.
  73.147 +
  73.148 +  protected:        // Ensure dtor is never invoked
  73.149 +    ~ParkEvent() { guarantee (0, "invariant") ; }
  73.150 +
  73.151 +    ParkEvent() : PlatformEvent() {
  73.152 +       AssociatedWith = NULL ;
  73.153 +       FreeNext       = NULL ;
  73.154 +       ListNext       = NULL ;
  73.155 +       ListPrev       = NULL ;
  73.156 +       OnList         = 0 ;
  73.157 +       TState         = 0 ;
  73.158 +       Notified       = 0 ;
  73.159 +       IsWaiting      = 0 ;
  73.160 +    }
  73.161 +
  73.162 +    // We use placement-new to force ParkEvent instances to be
  73.163 +    // aligned on 256-byte address boundaries.  This ensures that the least
  73.164 +    // significant byte of a ParkEvent address is always 0.
  73.165 +
  73.166 +    void * operator new (size_t sz) ;
  73.167 +    void operator delete (void * a) ;
  73.168 +
  73.169 +  public:
  73.170 +    static ParkEvent * Allocate (Thread * t) ;
  73.171 +    static void Release (ParkEvent * e) ;
  73.172 +} ;
    74.1 --- a/src/share/vm/runtime/relocator.cpp	Thu Nov 04 15:19:16 2010 -0700
    74.2 +++ b/src/share/vm/runtime/relocator.cpp	Thu Nov 04 16:17:54 2010 -0700
    74.3 @@ -435,6 +435,120 @@
    74.4    }
    74.5  }
    74.6  
    74.7 +// Create a new array, copying the src array but adding a hole at
    74.8 +// the specified location
    74.9 +static typeArrayOop insert_hole_at(
   74.10 +    size_t where, int hole_sz, typeArrayOop src) {
   74.11 +  Thread* THREAD = Thread::current();
   74.12 +  Handle src_hnd(THREAD, src);
   74.13 +  typeArrayOop dst =
   74.14 +      oopFactory::new_permanent_byteArray(src->length() + hole_sz, CHECK_NULL);
   74.15 +  src = (typeArrayOop)src_hnd();
   74.16 +
   74.17 +  address src_addr = (address)src->byte_at_addr(0);
   74.18 +  address dst_addr = (address)dst->byte_at_addr(0);
   74.19 +
   74.20 +  memcpy(dst_addr, src_addr, where);
   74.21 +  memcpy(dst_addr + where + hole_sz,
   74.22 +         src_addr + where, src->length() - where);
   74.23 +  return dst;
   74.24 +}
   74.25 +
   74.26 +// The width of instruction at "bci" is changing by "delta".  Adjust the stack
   74.27 +// map frames.
   74.28 +void Relocator::adjust_stack_map_table(int bci, int delta) {
   74.29 +  if (method()->has_stackmap_table()) {
   74.30 +    typeArrayOop data = method()->stackmap_data();
   74.31 +    // The data in the array is a classfile representation of the stackmap
   74.32 +    // table attribute, less the initial u2 tag and u4 attribute_length fields.
   74.33 +    stack_map_table_attribute* attr = stack_map_table_attribute::at(
   74.34 +        (address)data->byte_at_addr(0) - (sizeof(u2) + sizeof(u4)));
   74.35 +
   74.36 +    int count = attr->number_of_entries();
   74.37 +    stack_map_frame* frame = attr->entries();
   74.38 +    int bci_iter = -1;
   74.39 +    bool offset_adjusted = false; // only need to adjust one offset
   74.40 +
   74.41 +    for (int i = 0; i < count; ++i) {
   74.42 +      int offset_delta = frame->offset_delta();
   74.43 +      bci_iter += offset_delta;
   74.44 +
   74.45 +      if (!offset_adjusted && bci_iter > bci) {
   74.46 +        int new_offset_delta = offset_delta + delta;
   74.47 +
   74.48 +        if (frame->is_valid_offset(new_offset_delta)) {
   74.49 +          frame->set_offset_delta(new_offset_delta);
   74.50 +        } else {
   74.51 +          assert(frame->is_same_frame() ||
   74.52 +                 frame->is_same_frame_1_stack_item_frame(),
   74.53 +                 "Frame must be one of the compressed forms");
   74.54 +          // The new delta exceeds the capacity of the 'same_frame' or
   74.55 +          // 'same_frame_1_stack_item_frame' frame types.  We need to
   74.56 +          // convert these frames to the extended versions, but the extended
   74.57 +          // version is bigger and requires more room.  So we allocate a
   74.58 +          // new array and copy the data, being sure to leave u2-sized hole
   74.59 +          // right after the 'frame_type' for the new offset field.
   74.60 +          //
   74.61 +          // We can safely ignore the reverse situation as a small delta
   74.62 +          // can still be used in an extended version of the frame.
   74.63 +
   74.64 +          size_t frame_offset = (address)frame - (address)data->byte_at_addr(0);
   74.65 +
   74.66 +          data = insert_hole_at(frame_offset + 1, 2, data);
   74.67 +          if (data == NULL) {
   74.68 +            return; // out-of-memory?
   74.69 +          }
   74.70 +
   74.71 +          address frame_addr = (address)(data->byte_at_addr(0) + frame_offset);
   74.72 +          frame = stack_map_frame::at(frame_addr);
   74.73 +
   74.74 +
   74.75 +          // Now convert the frames in place
   74.76 +          if (frame->is_same_frame()) {
   74.77 +            same_frame_extended::create_at(frame_addr, new_offset_delta);
   74.78 +          } else {
   74.79 +            same_frame_1_stack_item_extended::create_at(
   74.80 +              frame_addr, new_offset_delta, NULL);
   74.81 +            // the verification_info_type should already be at the right spot
   74.82 +          }
   74.83 +        }
   74.84 +        offset_adjusted = true; // needs to be done only once, since subsequent
   74.85 +                                // values are offsets from the current
   74.86 +      }
   74.87 +
   74.88 +      // The stack map frame may contain verification types, if so we need to
   74.89 +      // check and update any Uninitialized type's bci (no matter where it is).
   74.90 +      int number_of_types = frame->number_of_types();
   74.91 +      verification_type_info* types = frame->types();
   74.92 +
   74.93 +      for (int i = 0; i < number_of_types; ++i) {
   74.94 +        if (types->is_uninitialized() && types->bci() > bci) {
   74.95 +          types->set_bci(types->bci() + delta);
   74.96 +        }
   74.97 +        types = types->next();
   74.98 +      }
   74.99 +
  74.100 +      // Full frame has stack values too
  74.101 +      full_frame* ff = frame->as_full_frame();
  74.102 +      if (ff != NULL) {
  74.103 +        address eol = (address)types;
  74.104 +        number_of_types = ff->stack_slots(eol);
  74.105 +        types = ff->stack(eol);
  74.106 +        for (int i = 0; i < number_of_types; ++i) {
  74.107 +          if (types->is_uninitialized() && types->bci() > bci) {
  74.108 +            types->set_bci(types->bci() + delta);
  74.109 +          }
  74.110 +          types = types->next();
  74.111 +        }
  74.112 +      }
  74.113 +
  74.114 +      frame = frame->next();
  74.115 +    }
  74.116 +
  74.117 +    method()->set_stackmap_data(data); // in case it has changed
  74.118 +  }
  74.119 +}
  74.120 +
  74.121  
  74.122  bool Relocator::expand_code_array(int delta) {
  74.123    int length = MAX2(code_length() + delta, code_length() * (100+code_slop_pct()) / 100);
  74.124 @@ -499,6 +613,9 @@
  74.125    // And local variable table...
  74.126    adjust_local_var_table(bci, delta);
  74.127  
  74.128 +  // Adjust stack maps
  74.129 +  adjust_stack_map_table(bci, delta);
  74.130 +
  74.131    // Relocate the pending change stack...
  74.132    for (int j = 0; j < _changes->length(); j++) {
  74.133      ChangeItem* ci = _changes->at(j);
  74.134 @@ -641,6 +758,7 @@
  74.135        memmove(addr_at(bci +1 + new_pad),
  74.136                addr_at(bci +1 + old_pad),
  74.137                len * 4);
  74.138 +      memset(addr_at(bci + 1), 0, new_pad); // pad must be 0
  74.139      }
  74.140    }
  74.141    return true;
    75.1 --- a/src/share/vm/runtime/relocator.hpp	Thu Nov 04 15:19:16 2010 -0700
    75.2 +++ b/src/share/vm/runtime/relocator.hpp	Thu Nov 04 16:17:54 2010 -0700
    75.3 @@ -105,6 +105,7 @@
    75.4    void adjust_exception_table(int bci, int delta);
    75.5    void adjust_line_no_table  (int bci, int delta);
    75.6    void adjust_local_var_table(int bci, int delta);
    75.7 +  void adjust_stack_map_table(int bci, int delta);
    75.8    int  get_orig_switch_pad   (int bci, bool is_lookup_switch);
    75.9    int  rc_instr_len          (int bci);
   75.10    bool expand_code_array     (int delta);
    76.1 --- a/src/share/vm/runtime/sharedRuntime.cpp	Thu Nov 04 15:19:16 2010 -0700
    76.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp	Thu Nov 04 16:17:54 2010 -0700
    76.3 @@ -302,6 +302,9 @@
    76.4    return (f <= (double)0.0) ? (double)0.0 - f : f;
    76.5  }
    76.6  
    76.7 +#endif
    76.8 +
    76.9 +#if defined(__SOFTFP__) || defined(PPC)
   76.10  double SharedRuntime::dsqrt(double f) {
   76.11    return sqrt(f);
   76.12  }
    77.1 --- a/src/share/vm/runtime/sharedRuntime.hpp	Thu Nov 04 15:19:16 2010 -0700
    77.2 +++ b/src/share/vm/runtime/sharedRuntime.hpp	Thu Nov 04 16:17:54 2010 -0700
    77.3 @@ -116,6 +116,9 @@
    77.4  
    77.5  #if defined(__SOFTFP__) || defined(E500V2)
    77.6    static double dabs(double f);
    77.7 +#endif
    77.8 +
    77.9 +#if defined(__SOFTFP__) || defined(PPC)
   77.10    static double dsqrt(double f);
   77.11  #endif
   77.12  
    78.1 --- a/src/share/vm/runtime/synchronizer.cpp	Thu Nov 04 15:19:16 2010 -0700
    78.2 +++ b/src/share/vm/runtime/synchronizer.cpp	Thu Nov 04 16:17:54 2010 -0700
    78.3 @@ -32,15 +32,12 @@
    78.4    #define ATTR
    78.5  #endif
    78.6  
    78.7 -// Native markword accessors for synchronization and hashCode().
    78.8 -//
    78.9  // The "core" versions of monitor enter and exit reside in this file.
   78.10  // The interpreter and compilers contain specialized transliterated
   78.11  // variants of the enter-exit fast-path operations.  See i486.ad fast_lock(),
   78.12  // for instance.  If you make changes here, make sure to modify the
   78.13  // interpreter, and both C1 and C2 fast-path inline locking code emission.
   78.14  //
   78.15 -// TODO: merge the objectMonitor and synchronizer classes.
   78.16  //
   78.17  // -----------------------------------------------------------------------------
   78.18  
   78.19 @@ -53,16 +50,6 @@
   78.20    jlong, uintptr_t, char*, int, long);
   78.21  HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
   78.22    jlong, uintptr_t, char*, int);
   78.23 -HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
   78.24 -  jlong, uintptr_t, char*, int);
   78.25 -HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
   78.26 -  jlong, uintptr_t, char*, int);
   78.27 -HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
   78.28 -  jlong, uintptr_t, char*, int);
   78.29 -HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
   78.30 -  jlong, uintptr_t, char*, int);
   78.31 -HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
   78.32 -  jlong, uintptr_t, char*, int);
   78.33  
   78.34  #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread)                      \
   78.35    char* bytes = NULL;                                                      \
   78.36 @@ -99,61 +86,300 @@
   78.37  
   78.38  #endif // ndef DTRACE_ENABLED
   78.39  
   78.40 -// ObjectWaiter serves as a "proxy" or surrogate thread.
   78.41 -// TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
   78.42 -// ParkEvent instead.  Beware, however, that the JVMTI code
   78.43 -// knows about ObjectWaiters, so we'll have to reconcile that code.
   78.44 -// See next_waiter(), first_waiter(), etc.
   78.45 +// This exists only as a workaround of dtrace bug 6254741
   78.46 +int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
   78.47 +  DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
   78.48 +  return 0;
   78.49 +}
   78.50  
   78.51 -class ObjectWaiter : public StackObj {
   78.52 - public:
   78.53 -  enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ;
   78.54 -  enum Sorted  { PREPEND, APPEND, SORTED } ;
   78.55 -  ObjectWaiter * volatile _next;
   78.56 -  ObjectWaiter * volatile _prev;
   78.57 -  Thread*       _thread;
   78.58 -  ParkEvent *   _event;
   78.59 -  volatile int  _notified ;
   78.60 -  volatile TStates TState ;
   78.61 -  Sorted        _Sorted ;           // List placement disposition
   78.62 -  bool          _active ;           // Contention monitoring is enabled
   78.63 - public:
   78.64 -  ObjectWaiter(Thread* thread) {
   78.65 -    _next     = NULL;
   78.66 -    _prev     = NULL;
   78.67 -    _notified = 0;
   78.68 -    TState    = TS_RUN ;
   78.69 -    _thread   = thread;
   78.70 -    _event    = thread->_ParkEvent ;
   78.71 -    _active   = false;
   78.72 -    assert (_event != NULL, "invariant") ;
   78.73 +#define NINFLATIONLOCKS 256
   78.74 +static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
   78.75 +
   78.76 +ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
   78.77 +ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL ;
   78.78 +ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL ;
   78.79 +int ObjectSynchronizer::gOmInUseCount = 0;
   78.80 +static volatile intptr_t ListLock = 0 ;      // protects global monitor free-list cache
   78.81 +static volatile int MonitorFreeCount  = 0 ;      // # on gFreeList
   78.82 +static volatile int MonitorPopulation = 0 ;      // # Extant -- in circulation
   78.83 +#define CHAINMARKER ((oop)-1)
   78.84 +
   78.85 +// -----------------------------------------------------------------------------
   78.86 +//  Fast Monitor Enter/Exit
   78.87 +// This the fast monitor enter. The interpreter and compiler use
   78.88 +// some assembly copies of this code. Make sure update those code
   78.89 +// if the following function is changed. The implementation is
   78.90 +// extremely sensitive to race condition. Be careful.
   78.91 +
   78.92 +void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
   78.93 + if (UseBiasedLocking) {
   78.94 +    if (!SafepointSynchronize::is_at_safepoint()) {
   78.95 +      BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
   78.96 +      if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
   78.97 +        return;
   78.98 +      }
   78.99 +    } else {
  78.100 +      assert(!attempt_rebias, "can not rebias toward VM thread");
  78.101 +      BiasedLocking::revoke_at_safepoint(obj);
  78.102 +    }
  78.103 +    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
  78.104 + }
  78.105 +
  78.106 + slow_enter (obj, lock, THREAD) ;
  78.107 +}
  78.108 +
  78.109 +void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
  78.110 +  assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
  78.111 +  // if displaced header is null, the previous enter is recursive enter, no-op
  78.112 +  markOop dhw = lock->displaced_header();
  78.113 +  markOop mark ;
  78.114 +  if (dhw == NULL) {
  78.115 +     // Recursive stack-lock.
  78.116 +     // Diagnostics -- Could be: stack-locked, inflating, inflated.
  78.117 +     mark = object->mark() ;
  78.118 +     assert (!mark->is_neutral(), "invariant") ;
  78.119 +     if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
  78.120 +        assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
  78.121 +     }
  78.122 +     if (mark->has_monitor()) {
  78.123 +        ObjectMonitor * m = mark->monitor() ;
  78.124 +        assert(((oop)(m->object()))->mark() == mark, "invariant") ;
  78.125 +        assert(m->is_entered(THREAD), "invariant") ;
  78.126 +     }
  78.127 +     return ;
  78.128    }
  78.129  
  78.130 -  void wait_reenter_begin(ObjectMonitor *mon) {
  78.131 -    JavaThread *jt = (JavaThread *)this->_thread;
  78.132 -    _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
  78.133 +  mark = object->mark() ;
  78.134 +
  78.135 +  // If the object is stack-locked by the current thread, try to
  78.136 +  // swing the displaced header from the box back to the mark.
  78.137 +  if (mark == (markOop) lock) {
  78.138 +     assert (dhw->is_neutral(), "invariant") ;
  78.139 +     if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
  78.140 +        TEVENT (fast_exit: release stacklock) ;
  78.141 +        return;
  78.142 +     }
  78.143    }
  78.144  
  78.145 -  void wait_reenter_end(ObjectMonitor *mon) {
  78.146 -    JavaThread *jt = (JavaThread *)this->_thread;
  78.147 -    JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
  78.148 +  ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
  78.149 +}
  78.150 +
  78.151 +// -----------------------------------------------------------------------------
  78.152 +// Interpreter/Compiler Slow Case
  78.153 +// This routine is used to handle interpreter/compiler slow case
  78.154 +// We don't need to use fast path here, because it must have been
  78.155 +// failed in the interpreter/compiler code.
  78.156 +void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
  78.157 +  markOop mark = obj->mark();
  78.158 +  assert(!mark->has_bias_pattern(), "should not see bias pattern here");
  78.159 +
  78.160 +  if (mark->is_neutral()) {
  78.161 +    // Anticipate successful CAS -- the ST of the displaced mark must
  78.162 +    // be visible <= the ST performed by the CAS.
  78.163 +    lock->set_displaced_header(mark);
  78.164 +    if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
  78.165 +      TEVENT (slow_enter: release stacklock) ;
  78.166 +      return ;
  78.167 +    }
  78.168 +    // Fall through to inflate() ...
  78.169 +  } else
  78.170 +  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
  78.171 +    assert(lock != mark->locker(), "must not re-lock the same lock");
  78.172 +    assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
  78.173 +    lock->set_displaced_header(NULL);
  78.174 +    return;
  78.175    }
  78.176 -};
  78.177  
  78.178 -enum ManifestConstants {
  78.179 -    ClearResponsibleAtSTW   = 0,
  78.180 -    MaximumRecheckInterval  = 1000
  78.181 -} ;
  78.182 +#if 0
  78.183 +  // The following optimization isn't particularly useful.
  78.184 +  if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
  78.185 +    lock->set_displaced_header (NULL) ;
  78.186 +    return ;
  78.187 +  }
  78.188 +#endif
  78.189  
  78.190 +  // The object header will never be displaced to this lock,
  78.191 +  // so it does not matter what the value is, except that it
  78.192 +  // must be non-zero to avoid looking like a re-entrant lock,
  78.193 +  // and must not look locked either.
  78.194 +  lock->set_displaced_header(markOopDesc::unused_mark());
  78.195 +  ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
  78.196 +}
  78.197  
  78.198 -#undef TEVENT
  78.199 -#define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); }
  78.200 +// This routine is used to handle interpreter/compiler slow case
  78.201 +// We don't need to use fast path here, because it must have
  78.202 +// failed in the interpreter/compiler code. Simply use the heavy
  78.203 +// weight monitor should be ok, unless someone find otherwise.
  78.204 +void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
  78.205 +  fast_exit (object, lock, THREAD) ;
  78.206 +}
  78.207  
  78.208 -#define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
  78.209 +// -----------------------------------------------------------------------------
  78.210 +// Class Loader  support to workaround deadlocks on the class loader lock objects
  78.211 +// Also used by GC
  78.212 +// complete_exit()/reenter() are used to wait on a nested lock
  78.213 +// i.e. to give up an outer lock completely and then re-enter
  78.214 +// Used when holding nested locks - lock acquisition order: lock1 then lock2
  78.215 +//  1) complete_exit lock1 - saving recursion count
  78.216 +//  2) wait on lock2
  78.217 +//  3) when notified on lock2, unlock lock2
  78.218 +//  4) reenter lock1 with original recursion count
  78.219 +//  5) lock lock2
  78.220 +// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
  78.221 +intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
  78.222 +  TEVENT (complete_exit) ;
  78.223 +  if (UseBiasedLocking) {
  78.224 +    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
  78.225 +    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
  78.226 +  }
  78.227  
  78.228 -#undef  TEVENT
  78.229 -#define TEVENT(nom) {;}
  78.230 +  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
  78.231  
  78.232 +  return monitor->complete_exit(THREAD);
  78.233 +}
  78.234 +
  78.235 +// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
  78.236 +void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
  78.237 +  TEVENT (reenter) ;
  78.238 +  if (UseBiasedLocking) {
  78.239 +    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
  78.240 +    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
  78.241 +  }
  78.242 +
  78.243 +  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
  78.244 +
  78.245 +  monitor->reenter(recursion, THREAD);
  78.246 +}
  78.247 +// -----------------------------------------------------------------------------
  78.248 +// JNI locks on java objects
  78.249 +// NOTE: must use heavy weight monitor to handle jni monitor enter
  78.250 +void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
  78.251 +  // the current locking is from JNI instead of Java code
  78.252 +  TEVENT (jni_enter) ;
  78.253 +  if (UseBiasedLocking) {
  78.254 +    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
  78.255 +    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
  78.256 +  }
  78.257 +  THREAD->set_current_pending_monitor_is_from_java(false);
  78.258 +  ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
  78.259 +  THREAD->set_current_pending_monitor_is_from_java(true);
  78.260 +}
  78.261 +
  78.262 +// NOTE: must use heavy weight monitor to handle jni monitor enter
  78.263 +bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
  78.264 +  if (UseBiasedLocking) {
  78.265 +    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
  78.266 +    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
  78.267 +  }
  78.268 +
  78.269 +  ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
  78.270 +  return monitor->try_enter(THREAD);
  78.271 +}
  78.272 +
  78.273 +
  78.274 +// NOTE: must use heavy weight monitor to handle jni monitor exit
  78.275 +void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
  78.276 +  TEVENT (jni_exit) ;
  78.277 +  if (UseBiasedLocking) {
  78.278 +    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
  78.279 +  }
  78.280 +  assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
  78.281 +
  78.282 +  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
  78.283 +  // If this thread has locked the object, exit the monitor.  Note:  can't use
  78.284 +  // monitor->check(CHECK); must exit even if an exception is pending.
  78.285 +  if (monitor->check(THREAD)) {
  78.286 +     monitor->exit(THREAD);
  78.287 +  }
  78.288 +}
  78.289 +
  78.290 +// -----------------------------------------------------------------------------
  78.291 +// Internal VM locks on java objects
  78.292 +// standard constructor, allows locking failures
  78.293 +ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
  78.294 +  _dolock = doLock;
  78.295 +  _thread = thread;
  78.296 +  debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
  78.297 +  _obj = obj;
  78.298 +
  78.299 +  if (_dolock) {
  78.300 +    TEVENT (ObjectLocker) ;
  78.301 +
  78.302 +    ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
  78.303 +  }
  78.304 +}
  78.305 +
  78.306 +ObjectLocker::~ObjectLocker() {
  78.307 +  if (_dolock) {
  78.308 +    ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
  78.309 +  }
  78.310 +}
  78.311 +
  78.312 +
  78.313 +// -----------------------------------------------------------------------------
  78.314 +//  Wait/Notify/NotifyAll
  78.315 +// NOTE: must use heavy weight monitor to handle wait()
  78.316 +void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
  78.317 +  if (UseBiasedLocking) {
  78.318 +    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
  78.319 +    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
  78.320 +  }
  78.321 +  if (millis < 0) {
  78.322 +    TEVENT (wait - throw IAX) ;
  78.323 +    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
  78.324 +  }
  78.325 +  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
  78.326 +  DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
  78.327 +  monitor->wait(millis, true, THREAD);
  78.328 +
  78.329 +  /* This dummy call is in place to get around dtrace bug 6254741.  Once
  78.330 +     that's fixed we can uncomment the following line and remove the call */
  78.331 +  // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
  78.332 +  dtrace_waited_probe(monitor, obj, THREAD);
  78.333 +}
  78.334 +
  78.335 +void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
  78.336 +  if (UseBiasedLocking) {
  78.337 +    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
  78.338 +    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
  78.339 +  }
  78.340 +  if (millis < 0) {
  78.341 +    TEVENT (wait - throw IAX) ;
  78.342 +    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
  78.343 +  }
  78.344 +  ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
  78.345 +}
  78.346 +
  78.347 +void ObjectSynchronizer::notify(Handle obj, TRAPS) {
  78.348 + if (UseBiasedLocking) {
  78.349 +    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
  78.350 +    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
  78.351 +  }
  78.352 +
  78.353 +  markOop mark = obj->mark();
  78.354 +  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
  78.355 +    return;
  78.356 +  }
  78.357 +  ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
  78.358 +}
  78.359 +
  78.360 +// NOTE: see comment of notify()
  78.361 +void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
  78.362 +  if (UseBiasedLocking) {
  78.363 +    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
  78.364 +    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
  78.365 +  }
  78.366 +
  78.367 +  markOop mark = obj->mark();
  78.368 +  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
  78.369 +    return;
  78.370 +  }
  78.371 +  ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
  78.372 +}
  78.373 +
  78.374 +// -----------------------------------------------------------------------------
  78.375 +// Hash Code handling
  78.376 +//
  78.377  // Performance concern:
  78.378  // OrderAccess::storestore() calls release() which STs 0 into the global volatile
  78.379  // OrderAccess::Dummy variable.  This store is unnecessary for correctness.
  78.380 @@ -188,44 +414,73 @@
  78.381  static int MonitorScavengeThreshold = 1000000 ;
  78.382  static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
  78.383  
  78.384 +static markOop ReadStableMark (oop obj) {
  78.385 +  markOop mark = obj->mark() ;
  78.386 +  if (!mark->is_being_inflated()) {
  78.387 +    return mark ;       // normal fast-path return
  78.388 +  }
  78.389  
  78.390 -// Tunables ...
  78.391 -// The knob* variables are effectively final.  Once set they should
  78.392 -// never be modified hence.  Consider using __read_mostly with GCC.
  78.393 +  int its = 0 ;
  78.394 +  for (;;) {
  78.395 +    markOop mark = obj->mark() ;
  78.396 +    if (!mark->is_being_inflated()) {
  78.397 +      return mark ;    // normal fast-path return
  78.398 +    }
  78.399  
  78.400 -static int Knob_LogSpins           = 0 ;       // enable jvmstat tally for spins
  78.401 -static int Knob_HandOff            = 0 ;
  78.402 -static int Knob_Verbose            = 0 ;
  78.403 -static int Knob_ReportSettings     = 0 ;
  78.404 +    // The object is being inflated by some other thread.
  78.405 +    // The caller of ReadStableMark() must wait for inflation to complete.
  78.406 +    // Avoid live-lock
  78.407 +    // TODO: consider calling SafepointSynchronize::do_call_back() while
  78.408 +    // spinning to see if there's a safepoint pending.  If so, immediately
  78.409 +    // yielding or blocking would be appropriate.  Avoid spinning while
  78.410 +    // there is a safepoint pending.
  78.411 +    // TODO: add inflation contention performance counters.
  78.412 +    // TODO: restrict the aggregate number of spinners.
  78.413  
  78.414 -static int Knob_SpinLimit          = 5000 ;    // derived by an external tool -
  78.415 -static int Knob_SpinBase           = 0 ;       // Floor AKA SpinMin
  78.416 -static int Knob_SpinBackOff        = 0 ;       // spin-loop backoff
  78.417 -static int Knob_CASPenalty         = -1 ;      // Penalty for failed CAS
  78.418 -static int Knob_OXPenalty          = -1 ;      // Penalty for observed _owner change
  78.419 -static int Knob_SpinSetSucc        = 1 ;       // spinners set the _succ field
  78.420 -static int Knob_SpinEarly          = 1 ;
  78.421 -static int Knob_SuccEnabled        = 1 ;       // futile wake throttling
  78.422 -static int Knob_SuccRestrict       = 0 ;       // Limit successors + spinners to at-most-one
  78.423 -static int Knob_MaxSpinners        = -1 ;      // Should be a function of # CPUs
  78.424 -static int Knob_Bonus              = 100 ;     // spin success bonus
  78.425 -static int Knob_BonusB             = 100 ;     // spin success bonus
  78.426 -static int Knob_Penalty            = 200 ;     // spin failure penalty
  78.427 -static int Knob_Poverty            = 1000 ;
  78.428 -static int Knob_SpinAfterFutile    = 1 ;       // Spin after returning from park()
  78.429 -static int Knob_FixedSpin          = 0 ;
  78.430 -static int Knob_OState             = 3 ;       // Spinner checks thread state of _owner
  78.431 -static int Knob_UsePause           = 1 ;
  78.432 -static int Knob_ExitPolicy         = 0 ;
  78.433 -static int Knob_PreSpin            = 10 ;      // 20-100 likely better
  78.434 -static int Knob_ResetEvent         = 0 ;
  78.435 -static int BackOffMask             = 0 ;
  78.436 -
  78.437 -static int Knob_FastHSSEC          = 0 ;
  78.438 -static int Knob_MoveNotifyee       = 2 ;       // notify() - disposition of notifyee
  78.439 -static int Knob_QMode              = 0 ;       // EntryList-cxq policy - queue discipline
  78.440 -static volatile int InitDone       = 0 ;
  78.441 -
  78.442 +    ++its ;
  78.443 +    if (its > 10000 || !os::is_MP()) {
  78.444 +       if (its & 1) {
  78.445 +         os::NakedYield() ;
  78.446 +         TEVENT (Inflate: INFLATING - yield) ;
  78.447 +       } else {
  78.448 +         // Note that the following code attenuates the livelock problem but is not
  78.449 +         // a complete remedy.  A more complete solution would require that the inflating
  78.450 +         // thread hold the associated inflation lock.  The following code simply restricts
  78.451 +         // the number of spinners to at most one.  We'll have N-2 threads blocked
  78.452 +         // on the inflationlock, 1 thread holding the inflation lock and using
  78.453 +         // a yield/park strategy, and 1 thread in the midst of inflation.
  78.454 +         // A more refined approach would be to change the encoding of INFLATING
  78.455 +         // to allow encapsulation of a native thread pointer.  Threads waiting for
  78.456 +         // inflation to complete would use CAS to push themselves onto a singly linked
  78.457 +         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
  78.458 +         // and calling park().  When inflation was complete the thread that accomplished inflation
  78.459 +         // would detach the list and set the markword to inflated with a single CAS and
  78.460 +         // then for each thread on the list, set the flag and unpark() the thread.
  78.461 +         // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
  78.462 +         // wakes at most one thread whereas we need to wake the entire list.
  78.463 +         int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ;
  78.464 +         int YieldThenBlock = 0 ;
  78.465 +         assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
  78.466 +         assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
  78.467 +         Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
  78.468 +         while (obj->mark() == markOopDesc::INFLATING()) {
  78.469 +           // Beware: NakedYield() is advisory and has almost no effect on some platforms
  78.470 +           // so we periodically call Self->_ParkEvent->park(1).
  78.471 +           // We use a mixed spin/yield/block mechanism.
  78.472 +           if ((YieldThenBlock++) >= 16) {
  78.473 +              Thread::current()->_ParkEvent->park(1) ;
  78.474 +           } else {
  78.475 +              os::NakedYield() ;
  78.476 +           }
  78.477 +         }
  78.478 +         Thread::muxRelease (InflationLocks + ix ) ;
  78.479 +         TEVENT (Inflate: INFLATING - yield/park) ;
  78.480 +       }
  78.481 +    } else {
  78.482 +       SpinPause() ;       // SMP-polite spinning
  78.483 +    }
  78.484 +  }
  78.485 +}
  78.486  
  78.487  // hashCode() generation :
  78.488  //
  78.489 @@ -290,416 +545,272 @@
  78.490    TEVENT (hashCode: GENERATE) ;
  78.491    return value;
  78.492  }
  78.493 +//
  78.494 +intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
  78.495 +  if (UseBiasedLocking) {
  78.496 +    // NOTE: many places throughout the JVM do not expect a safepoint
  78.497 +    // to be taken here, in particular most operations on perm gen
  78.498 +    // objects. However, we only ever bias Java instances and all of
  78.499 +    // the call sites of identity_hash that might revoke biases have
  78.500 +    // been checked to make sure they can handle a safepoint. The
  78.501 +    // added check of the bias pattern is to avoid useless calls to
  78.502 +    // thread-local storage.
  78.503 +    if (obj->mark()->has_bias_pattern()) {
  78.504 +      // Box and unbox the raw reference just in case we cause a STW safepoint.
  78.505 +      Handle hobj (Self, obj) ;
  78.506 +      // Relaxing assertion for bug 6320749.
  78.507 +      assert (Universe::verify_in_progress() ||
  78.508 +              !SafepointSynchronize::is_at_safepoint(),
  78.509 +             "biases should not be seen by VM thread here");
  78.510 +      BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
  78.511 +      obj = hobj() ;
  78.512 +      assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
  78.513 +    }
  78.514 +  }
  78.515  
  78.516 -void BasicLock::print_on(outputStream* st) const {
  78.517 -  st->print("monitor");
  78.518 +  // hashCode() is a heap mutator ...
  78.519 +  // Relaxing assertion for bug 6320749.
  78.520 +  assert (Universe::verify_in_progress() ||
  78.521 +          !SafepointSynchronize::is_at_safepoint(), "invariant") ;
  78.522 +  assert (Universe::verify_in_progress() ||
  78.523 +          Self->is_Java_thread() , "invariant") ;
  78.524 +  assert (Universe::verify_in_progress() ||
  78.525 +         ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
  78.526 +
  78.527 +  ObjectMonitor* monitor = NULL;
  78.528 +  markOop temp, test;
  78.529 +  intptr_t hash;
  78.530 +  markOop mark = ReadStableMark (obj);
  78.531 +
  78.532 +  // object should remain ineligible for biased locking
  78.533 +  assert (!mark->has_bias_pattern(), "invariant") ;
  78.534 +
  78.535 +  if (mark->is_neutral()) {
  78.536 +    hash = mark->hash();              // this is a normal header
  78.537 +    if (hash) {                       // if it has hash, just return it
  78.538 +      return hash;
  78.539 +    }
  78.540 +    hash = get_next_hash(Self, obj);  // allocate a new hash code
  78.541 +    temp = mark->copy_set_hash(hash); // merge the hash code into header
  78.542 +    // use (machine word version) atomic operation to install the hash
  78.543 +    test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
  78.544 +    if (test == mark) {
  78.545 +      return hash;
  78.546 +    }
  78.547 +    // If atomic operation failed, we must inflate the header
  78.548 +    // into heavy weight monitor. We could add more code here
  78.549 +    // for fast path, but it does not worth the complexity.
  78.550 +  } else if (mark->has_monitor()) {
  78.551 +    monitor = mark->monitor();
  78.552 +    temp = monitor->header();
  78.553 +    assert (temp->is_neutral(), "invariant") ;
  78.554 +    hash = temp->hash();
  78.555 +    if (hash) {
  78.556 +      return hash;
  78.557 +    }
  78.558 +    // Skip to the following code to reduce code size
  78.559 +  } else if (Self->is_lock_owned((address)mark->locker())) {
  78.560 +    temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
  78.561 +    assert (temp->is_neutral(), "invariant") ;
  78.562 +    hash = temp->hash();              // by current thread, check if the displaced
  78.563 +    if (hash) {                       // header contains hash code
  78.564 +      return hash;
  78.565 +    }
  78.566 +    // WARNING:
  78.567 +    //   The displaced header is strictly immutable.
  78.568 +    // It can NOT be changed in ANY cases. So we have
  78.569 +    // to inflate the header into heavyweight monitor
  78.570 +    // even the current thread owns the lock. The reason
  78.571 +    // is the BasicLock (stack slot) will be asynchronously
  78.572 +    // read by other threads during the inflate() function.
  78.573 +    // Any change to stack may not propagate to other threads
  78.574 +    // correctly.
  78.575 +  }
  78.576 +
  78.577 +  // Inflate the monitor to set hash code
  78.578 +  monitor = ObjectSynchronizer::inflate(Self, obj);
  78.579 +  // Load displaced header and check it has hash code
  78.580 +  mark = monitor->header();
  78.581 +  assert (mark->is_neutral(), "invariant") ;
  78.582 +  hash = mark->hash();
  78.583 +  if (hash == 0) {
  78.584 +    hash = get_next_hash(Self, obj);
  78.585 +    temp = mark->copy_set_hash(hash); // merge hash code into header
  78.586 +    assert (temp->is_neutral(), "invariant") ;
  78.587 +    test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
  78.588 +    if (test != mark) {
  78.589 +      // The only update to the header in the monitor (outside GC)
  78.590 +      // is install the hash code. If someone add new usage of
  78.591 +      // displaced header, please update this code
  78.592 +      hash = test->hash();
  78.593 +      assert (test->is_neutral(), "invariant") ;
  78.594 +      assert (hash != 0, "Trivial unexpected object/monitor header usage.");
  78.595 +    }
  78.596 +  }
  78.597 +  // We finally get the hash
  78.598 +  return hash;
  78.599  }
  78.600  
  78.601 -void BasicLock::move_to(oop obj, BasicLock* dest) {
  78.602 -  // Check to see if we need to inflate the lock. This is only needed
  78.603 -  // if an object is locked using "this" lightweight monitor. In that
  78.604 -  // case, the displaced_header() is unlocked, because the
  78.605 -  // displaced_header() contains the header for the originally unlocked
  78.606 -  // object. However the object could have already been inflated. But it
  78.607 -  // does not matter, the inflation will just a no-op. For other cases,
  78.608 -  // the displaced header will be either 0x0 or 0x3, which are location
  78.609 -  // independent, therefore the BasicLock is free to move.
  78.610 -  //
  78.611 -  // During OSR we may need to relocate a BasicLock (which contains a
  78.612 -  // displaced word) from a location in an interpreter frame to a
  78.613 -  // new location in a compiled frame.  "this" refers to the source
  78.614 -  // basiclock in the interpreter frame.  "dest" refers to the destination
  78.615 -  // basiclock in the new compiled frame.  We *always* inflate in move_to().
  78.616 -  // The always-Inflate policy works properly, but in 1.5.0 it can sometimes
  78.617 -  // cause performance problems in code that makes heavy use of a small # of
  78.618 -  // uncontended locks.   (We'd inflate during OSR, and then sync performance
  78.619 -  // would subsequently plummet because the thread would be forced thru the slow-path).
  78.620 -  // This problem has been made largely moot on IA32 by inlining the inflated fast-path
  78.621 -  // operations in Fast_Lock and Fast_Unlock in i486.ad.
  78.622 -  //
  78.623 -  // Note that there is a way to safely swing the object's markword from
  78.624 -  // one stack location to another.  This avoids inflation.  Obviously,
  78.625 -  // we need to ensure that both locations refer to the current thread's stack.
  78.626 -  // There are some subtle concurrency issues, however, and since the benefit is
  78.627 -  // is small (given the support for inflated fast-path locking in the fast_lock, etc)
  78.628 -  // we'll leave that optimization for another time.
  78.629 +// Deprecated -- use FastHashCode() instead.
  78.630  
  78.631 -  if (displaced_header()->is_neutral()) {
  78.632 -    ObjectSynchronizer::inflate_helper(obj);
  78.633 -    // WARNING: We can not put check here, because the inflation
  78.634 -    // will not update the displaced header. Once BasicLock is inflated,
  78.635 -    // no one should ever look at its content.
  78.636 -  } else {
  78.637 -    // Typically the displaced header will be 0 (recursive stack lock) or
  78.638 -    // unused_mark.  Naively we'd like to assert that the displaced mark
  78.639 -    // value is either 0, neutral, or 3.  But with the advent of the
  78.640 -    // store-before-CAS avoidance in fast_lock/compiler_lock_object
  78.641 -    // we can find any flavor mark in the displaced mark.
  78.642 -  }
  78.643 -// [RGV] The next line appears to do nothing!
  78.644 -  intptr_t dh = (intptr_t) displaced_header();
  78.645 -  dest->set_displaced_header(displaced_header());
  78.646 +intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
  78.647 +  return FastHashCode (Thread::current(), obj()) ;
  78.648  }
  78.649  
  78.650 -// -----------------------------------------------------------------------------
  78.651  
  78.652 -// standard constructor, allows locking failures
  78.653 -ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
  78.654 -  _dolock = doLock;
  78.655 -  _thread = thread;
  78.656 -  debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
  78.657 -  _obj = obj;
  78.658 +bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
  78.659 +                                                   Handle h_obj) {
  78.660 +  if (UseBiasedLocking) {
  78.661 +    BiasedLocking::revoke_and_rebias(h_obj, false, thread);
  78.662 +    assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
  78.663 +  }
  78.664  
  78.665 -  if (_dolock) {
  78.666 -    TEVENT (ObjectLocker) ;
  78.667 +  assert(thread == JavaThread::current(), "Can only be called on current thread");
  78.668 +  oop obj = h_obj();
  78.669  
  78.670 -    ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
  78.671 +  markOop mark = ReadStableMark (obj) ;
  78.672 +
  78.673 +  // Uncontended case, header points to stack
  78.674 +  if (mark->has_locker()) {
  78.675 +    return thread->is_lock_owned((address)mark->locker());
  78.676 +  }
  78.677 +  // Contended case, header points to ObjectMonitor (tagged pointer)
  78.678 +  if (mark->has_monitor()) {
  78.679 +    ObjectMonitor* monitor = mark->monitor();
  78.680 +    return monitor->is_entered(thread) != 0 ;
  78.681 +  }
  78.682 +  // Unlocked case, header in place
  78.683 +  assert(mark->is_neutral(), "sanity check");
  78.684 +  return false;
  78.685 +}
  78.686 +
  78.687 +// Be aware of this method could revoke bias of the lock object.
  78.688 +// This method querys the ownership of the lock handle specified by 'h_obj'.
  78.689 +// If the current thread owns the lock, it returns owner_self. If no
  78.690 +// thread owns the lock, it returns owner_none. Otherwise, it will return
  78.691 +// ower_other.
  78.692 +ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
  78.693 +(JavaThread *self, Handle h_obj) {
  78.694 +  // The caller must beware this method can revoke bias, and
  78.695 +  // revocation can result in a safepoint.
  78.696 +  assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
  78.697 +  assert (self->thread_state() != _thread_blocked , "invariant") ;
  78.698 +
  78.699 +  // Possible mark states: neutral, biased, stack-locked, inflated
  78.700 +
  78.701 +  if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
  78.702 +    // CASE: biased
  78.703 +    BiasedLocking::revoke_and_rebias(h_obj, false, self);
  78.704 +    assert(!h_obj->mark()->has_bias_pattern(),
  78.705 +           "biases should be revoked by now");
  78.706 +  }
  78.707 +
  78.708 +  assert(self == JavaThread::current(), "Can only be called on current thread");
  78.709 +  oop obj = h_obj();
  78.710 +  markOop mark = ReadStableMark (obj) ;
  78.711 +
  78.712 +  // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
  78.713 +  if (mark->has_locker()) {
  78.714 +    return self->is_lock_owned((address)mark->locker()) ?
  78.715 +      owner_self : owner_other;
  78.716 +  }
  78.717 +
  78.718 +  // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
  78.719 +  // The Object:ObjectMonitor relationship is stable as long as we're
  78.720 +  // not at a safepoint.
  78.721 +  if (mark->has_monitor()) {
  78.722 +    void * owner = mark->monitor()->_owner ;
  78.723 +    if (owner == NULL) return owner_none ;
  78.724 +    return (owner == self ||
  78.725 +            self->is_lock_owned((address)owner)) ? owner_self : owner_other;
  78.726 +  }
  78.727 +
  78.728 +  // CASE: neutral
  78.729 +  assert(mark->is_neutral(), "sanity check");
  78.730 +  return owner_none ;           // it's unlocked
  78.731 +}
  78.732 +
  78.733 +// FIXME: jvmti should call this
  78.734 +JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
  78.735 +  if (UseBiasedLocking) {
  78.736 +    if (SafepointSynchronize::is_at_safepoint()) {
  78.737 +      BiasedLocking::revoke_at_safepoint(h_obj);
  78.738 +    } else {
  78.739 +      BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
  78.740 +    }
  78.741 +    assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
  78.742 +  }
  78.743 +
  78.744 +  oop obj = h_obj();
  78.745 +  address owner = NULL;
  78.746 +
  78.747 +  markOop mark = ReadStableMark (obj) ;
  78.748 +
  78.749 +  // Uncontended case, header points to stack
  78.750 +  if (mark->has_locker()) {
  78.751 +    owner = (address) mark->locker();
  78.752 +  }
  78.753 +
  78.754 +  // Contended case, header points to ObjectMonitor (tagged pointer)
  78.755 +  if (mark->has_monitor()) {
  78.756 +    ObjectMonitor* monitor = mark->monitor();
  78.757 +    assert(monitor != NULL, "monitor should be non-null");
  78.758 +    owner = (address) monitor->owner();
  78.759 +  }
  78.760 +
  78.761 +  if (owner != NULL) {
  78.762 +    return Threads::owning_thread_from_monitor_owner(owner, doLock);
  78.763 +  }
  78.764 +
  78.765 +  // Unlocked case, header in place
  78.766 +  // Cannot have assertion since this object may have been
  78.767 +  // locked by another thread when reaching here.
  78.768 +  // assert(mark->is_neutral(), "sanity check");
  78.769 +
  78.770 +  return NULL;
  78.771 +}
  78.772 +// Visitors ...
  78.773 +
  78.774 +void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
  78.775 +  ObjectMonitor* block = gBlockList;
  78.776 +  ObjectMonitor* mid;
  78.777 +  while (block) {
  78.778 +    assert(block->object() == CHAINMARKER, "must be a block header");
  78.779 +    for (int i = _BLOCKSIZE - 1; i > 0; i--) {
  78.780 +      mid = block + i;
  78.781 +      oop object = (oop) mid->object();
  78.782 +      if (object != NULL) {
  78.783 +        closure->do_monitor(mid);
  78.784 +      }
  78.785 +    }
  78.786 +    block = (ObjectMonitor*) block->FreeNext;
  78.787    }
  78.788  }
  78.789  
  78.790 -ObjectLocker::~ObjectLocker() {
  78.791 -  if (_dolock) {
  78.792 -    ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
  78.793 -  }
  78.794 +// Get the next block in the block list.
  78.795 +static inline ObjectMonitor* next(ObjectMonitor* block) {
  78.796 +  assert(block->object() == CHAINMARKER, "must be a block header");
  78.797 +  block = block->FreeNext ;
  78.798 +  assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
  78.799 +  return block;
  78.800  }
  78.801  
  78.802 -// -----------------------------------------------------------------------------
  78.803  
  78.804 -
  78.805 -PerfCounter * ObjectSynchronizer::_sync_Inflations                  = NULL ;
  78.806 -PerfCounter * ObjectSynchronizer::_sync_Deflations                  = NULL ;
  78.807 -PerfCounter * ObjectSynchronizer::_sync_ContendedLockAttempts       = NULL ;
  78.808 -PerfCounter * ObjectSynchronizer::_sync_FutileWakeups               = NULL ;
  78.809 -PerfCounter * ObjectSynchronizer::_sync_Parks                       = NULL ;
  78.810 -PerfCounter * ObjectSynchronizer::_sync_EmptyNotifications          = NULL ;
  78.811 -PerfCounter * ObjectSynchronizer::_sync_Notifications               = NULL ;
  78.812 -PerfCounter * ObjectSynchronizer::_sync_PrivateA                    = NULL ;
  78.813 -PerfCounter * ObjectSynchronizer::_sync_PrivateB                    = NULL ;
  78.814 -PerfCounter * ObjectSynchronizer::_sync_SlowExit                    = NULL ;
  78.815 -PerfCounter * ObjectSynchronizer::_sync_SlowEnter                   = NULL ;
  78.816 -PerfCounter * ObjectSynchronizer::_sync_SlowNotify                  = NULL ;
  78.817 -PerfCounter * ObjectSynchronizer::_sync_SlowNotifyAll               = NULL ;
  78.818 -PerfCounter * ObjectSynchronizer::_sync_FailedSpins                 = NULL ;
  78.819 -PerfCounter * ObjectSynchronizer::_sync_SuccessfulSpins             = NULL ;
  78.820 -PerfCounter * ObjectSynchronizer::_sync_MonInCirculation            = NULL ;
  78.821 -PerfCounter * ObjectSynchronizer::_sync_MonScavenged                = NULL ;
  78.822 -PerfLongVariable * ObjectSynchronizer::_sync_MonExtant              = NULL ;
  78.823 -
  78.824 -// One-shot global initialization for the sync subsystem.
  78.825 -// We could also defer initialization and initialize on-demand
  78.826 -// the first time we call inflate().  Initialization would
  78.827 -// be protected - like so many things - by the MonitorCache_lock.
  78.828 -
  78.829 -void ObjectSynchronizer::Initialize () {
  78.830 -  static int InitializationCompleted = 0 ;
  78.831 -  assert (InitializationCompleted == 0, "invariant") ;
  78.832 -  InitializationCompleted = 1 ;
  78.833 -  if (UsePerfData) {
  78.834 -      EXCEPTION_MARK ;
  78.835 -      #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
  78.836 -      #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
  78.837 -      NEWPERFCOUNTER(_sync_Inflations) ;
  78.838 -      NEWPERFCOUNTER(_sync_Deflations) ;
  78.839 -      NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
  78.840 -      NEWPERFCOUNTER(_sync_FutileWakeups) ;
  78.841 -      NEWPERFCOUNTER(_sync_Parks) ;
  78.842 -      NEWPERFCOUNTER(_sync_EmptyNotifications) ;
  78.843 -      NEWPERFCOUNTER(_sync_Notifications) ;
  78.844 -      NEWPERFCOUNTER(_sync_SlowEnter) ;
  78.845 -      NEWPERFCOUNTER(_sync_SlowExit) ;
  78.846 -      NEWPERFCOUNTER(_sync_SlowNotify) ;
  78.847 -      NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
  78.848 -      NEWPERFCOUNTER(_sync_FailedSpins) ;
  78.849 -      NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
  78.850 -      NEWPERFCOUNTER(_sync_PrivateA) ;
  78.851 -      NEWPERFCOUNTER(_sync_PrivateB) ;
  78.852 -      NEWPERFCOUNTER(_sync_MonInCirculation) ;
  78.853 -      NEWPERFCOUNTER(_sync_MonScavenged) ;
  78.854 -      NEWPERFVARIABLE(_sync_MonExtant) ;
  78.855 -      #undef NEWPERFCOUNTER
  78.856 -  }
  78.857 -}
  78.858 -
  78.859 -// Compile-time asserts
  78.860 -// When possible, it's better to catch errors deterministically at
  78.861 -// compile-time than at runtime.  The down-side to using compile-time
  78.862 -// asserts is that error message -- often something about negative array
  78.863 -// indices -- is opaque.
  78.864 -
  78.865 -#define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); }
  78.866 -
  78.867 -void ObjectMonitor::ctAsserts() {
  78.868 -  CTASSERT(offset_of (ObjectMonitor, _header) == 0);
  78.869 -}
  78.870 -
  78.871 -static int Adjust (volatile int * adr, int dx) {
  78.872 -  int v ;
  78.873 -  for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
  78.874 -  return v ;
  78.875 -}
  78.876 -
  78.877 -// Ad-hoc mutual exclusion primitives: SpinLock and Mux
  78.878 -//
  78.879 -// We employ SpinLocks _only for low-contention, fixed-length
  78.880 -// short-duration critical sections where we're concerned
  78.881 -// about native mutex_t or HotSpot Mutex:: latency.
  78.882 -// The mux construct provides a spin-then-block mutual exclusion
  78.883 -// mechanism.
  78.884 -//
  78.885 -// Testing has shown that contention on the ListLock guarding gFreeList
  78.886 -// is common.  If we implement ListLock as a simple SpinLock it's common
  78.887 -// for the JVM to devolve to yielding with little progress.  This is true
  78.888 -// despite the fact that the critical sections protected by ListLock are
  78.889 -// extremely short.
  78.890 -//
  78.891 -// TODO-FIXME: ListLock should be of type SpinLock.
  78.892 -// We should make this a 1st-class type, integrated into the lock
  78.893 -// hierarchy as leaf-locks.  Critically, the SpinLock structure
  78.894 -// should have sufficient padding to avoid false-sharing and excessive
  78.895 -// cache-coherency traffic.
  78.896 -
  78.897 -
  78.898 -typedef volatile int SpinLockT ;
  78.899 -
  78.900 -void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
  78.901 -  if (Atomic::cmpxchg (1, adr, 0) == 0) {
  78.902 -     return ;   // normal fast-path return
  78.903 -  }
  78.904 -
  78.905 -  // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
  78.906 -  TEVENT (SpinAcquire - ctx) ;
  78.907 -  int ctr = 0 ;
  78.908 -  int Yields = 0 ;
  78.909 -  for (;;) {
  78.910 -     while (*adr != 0) {
  78.911 -        ++ctr ;
  78.912 -        if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
  78.913 -           if (Yields > 5) {
  78.914 -             // Consider using a simple NakedSleep() instead.
  78.915 -             // Then SpinAcquire could be called by non-JVM threads
  78.916 -             Thread::current()->_ParkEvent->park(1) ;
  78.917 -           } else {
  78.918 -             os::NakedYield() ;
  78.919 -             ++Yields ;
  78.920 -           }
  78.921 -        } else {
  78.922 -           SpinPause() ;
  78.923 -        }
  78.924 -     }
  78.925 -     if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
  78.926 -  }
  78.927 -}
  78.928 -
  78.929 -void Thread::SpinRelease (volatile int * adr) {
  78.930 -  assert (*adr != 0, "invariant") ;
  78.931 -  OrderAccess::fence() ;      // guarantee at least release consistency.
  78.932 -  // Roach-motel semantics.
  78.933 -  // It's safe if subsequent LDs and STs float "up" into the critical section,
  78.934 -  // but prior LDs and STs within the critical section can't be allowed
  78.935 -  // to reorder or float past the ST that releases the lock.
  78.936 -  *adr = 0 ;
  78.937 -}
  78.938 -
  78.939 -// muxAcquire and muxRelease:
  78.940 -//
  78.941 -// *  muxAcquire and muxRelease support a single-word lock-word construct.
  78.942 -//    The LSB of the word is set IFF the lock is held.
  78.943 -//    The remainder of the word points to the head of a singly-linked list
  78.944 -//    of threads blocked on the lock.
  78.945 -//
  78.946 -// *  The current implementation of muxAcquire-muxRelease uses its own
  78.947 -//    dedicated Thread._MuxEvent instance.  If we're interested in
  78.948 -//    minimizing the peak number of extant ParkEvent instances then
  78.949 -//    we could eliminate _MuxEvent and "borrow" _ParkEvent as long
  78.950 -//    as certain invariants were satisfied.  Specifically, care would need
  78.951 -//    to be taken with regards to consuming unpark() "permits".
  78.952 -//    A safe rule of thumb is that a thread would never call muxAcquire()
  78.953 -//    if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
  78.954 -//    park().  Otherwise the _ParkEvent park() operation in muxAcquire() could
  78.955 -//    consume an unpark() permit intended for monitorenter, for instance.
  78.956 -//    One way around this would be to widen the restricted-range semaphore
  78.957 -//    implemented in park().  Another alternative would be to provide
  78.958 -//    multiple instances of the PlatformEvent() for each thread.  One
  78.959 -//    instance would be dedicated to muxAcquire-muxRelease, for instance.
  78.960 -//
  78.961 -// *  Usage:
  78.962 -//    -- Only as leaf locks
  78.963 -//    -- for short-term locking only as muxAcquire does not perform
  78.964 -//       thread state transitions.
  78.965 -//
  78.966 -// Alternatives:
  78.967 -// *  We could implement muxAcquire and muxRelease with MCS or CLH locks
  78.968 -//    but with parking or spin-then-park instead of pure spinning.
  78.969 -// *  Use Taura-Oyama-Yonenzawa locks.
  78.970 -// *  It's possible to construct a 1-0 lock if we encode the lockword as
  78.971 -//    (List,LockByte).  Acquire will CAS the full lockword while Release
  78.972 -//    will STB 0 into the LockByte.  The 1-0 scheme admits stranding, so
  78.973 -//    acquiring threads use timers (ParkTimed) to detect and recover from
  78.974 -//    the stranding window.  Thread/Node structures must be aligned on 256-byte
  78.975 -//    boundaries by using placement-new.
  78.976 -// *  Augment MCS with advisory back-link fields maintained with CAS().
  78.977 -//    Pictorially:  LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
  78.978 -//    The validity of the backlinks must be ratified before we trust the value.
  78.979 -//    If the backlinks are invalid the exiting thread must back-track through the
  78.980 -//    the forward links, which are always trustworthy.
  78.981 -// *  Add a successor indication.  The LockWord is currently encoded as
  78.982 -//    (List, LOCKBIT:1).  We could also add a SUCCBIT or an explicit _succ variable
  78.983 -//    to provide the usual futile-wakeup optimization.
  78.984 -//    See RTStt for details.
  78.985 -// *  Consider schedctl.sc_nopreempt to cover the critical section.
  78.986 -//
  78.987 -
  78.988 -
  78.989 -typedef volatile intptr_t MutexT ;      // Mux Lock-word
  78.990 -enum MuxBits { LOCKBIT = 1 } ;
  78.991 -
  78.992 -void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
  78.993 -  intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
  78.994 -  if (w == 0) return ;
  78.995 -  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
  78.996 -     return ;
  78.997 -  }
  78.998 -
  78.999 -  TEVENT (muxAcquire - Contention) ;
 78.1000 -  ParkEvent * const Self = Thread::current()->_MuxEvent ;
 78.1001 -  assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
 78.1002 -  for (;;) {
 78.1003 -     int its = (os::is_MP() ? 100 : 0) + 1 ;
 78.1004 -
 78.1005 -     // Optional spin phase: spin-then-park strategy
 78.1006 -     while (--its >= 0) {
 78.1007 -       w = *Lock ;
 78.1008 -       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
 78.1009 -          return ;
 78.1010 -       }
 78.1011 -     }
 78.1012 -
 78.1013 -     Self->reset() ;
 78.1014 -     Self->OnList = intptr_t(Lock) ;
 78.1015 -     // The following fence() isn't _strictly necessary as the subsequent
 78.1016 -     // CAS() both serializes execution and ratifies the fetched *Lock value.
 78.1017 -     OrderAccess::fence();
 78.1018 -     for (;;) {
 78.1019 -        w = *Lock ;
 78.1020 -        if ((w & LOCKBIT) == 0) {
 78.1021 -            if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
 78.1022 -                Self->OnList = 0 ;   // hygiene - allows stronger asserts
 78.1023 -                return ;
 78.1024 -            }
 78.1025 -            continue ;      // Interference -- *Lock changed -- Just retry
 78.1026 -        }
 78.1027 -        assert (w & LOCKBIT, "invariant") ;
 78.1028 -        Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
 78.1029 -        if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
 78.1030 -     }
 78.1031 -
 78.1032 -     while (Self->OnList != 0) {
 78.1033 -        Self->park() ;
 78.1034 -     }
 78.1035 -  }
 78.1036 -}
 78.1037 -
 78.1038 -void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
 78.1039 -  intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
 78.1040 -  if (w == 0) return ;
 78.1041 -  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
 78.1042 -    return ;
 78.1043 -  }
 78.1044 -
 78.1045 -  TEVENT (muxAcquire - Contention) ;
 78.1046 -  ParkEvent * ReleaseAfter = NULL ;
 78.1047 -  if (ev == NULL) {
 78.1048 -    ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
 78.1049 -  }
 78.1050 -  assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
 78.1051 -  for (;;) {
 78.1052 -    guarantee (ev->OnList == 0, "invariant") ;
 78.1053 -    int its = (os::is_MP() ? 100 : 0) + 1 ;
 78.1054 -
 78.1055 -    // Optional spin phase: spin-then-park strategy
 78.1056 -    while (--its >= 0) {
 78.1057 -      w = *Lock ;
 78.1058 -      if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
 78.1059 -        if (ReleaseAfter != NULL) {
 78.1060 -          ParkEvent::Release (ReleaseAfter) ;
 78.1061 -        }
 78.1062 -        return ;
 78.1063 +void ObjectSynchronizer::oops_do(OopClosure* f) {
 78.1064 +  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 78.1065 +  for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
 78.1066 +    assert(block->object() == CHAINMARKER, "must be a block header");
 78.1067 +    for (int i = 1; i < _BLOCKSIZE; i++) {
 78.1068 +      ObjectMonitor* mid = &block[i];
 78.1069 +      if (mid->object() != NULL) {
 78.1070 +        f->do_oop((oop*)mid->object_addr());
 78.1071        }
 78.1072      }
 78.1073 -
 78.1074 -    ev->reset() ;
 78.1075 -    ev->OnList = intptr_t(Lock) ;
 78.1076 -    // The following fence() isn't _strictly necessary as the subsequent
 78.1077 -    // CAS() both serializes execution and ratifies the fetched *Lock value.
 78.1078 -    OrderAccess::fence();
 78.1079 -    for (;;) {
 78.1080 -      w = *Lock ;
 78.1081 -      if ((w & LOCKBIT) == 0) {
 78.1082 -        if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
 78.1083 -          ev->OnList = 0 ;
 78.1084 -          // We call ::Release while holding the outer lock, thus
 78.1085 -          // artificially lengthening the critical section.
 78.1086 -          // Consider deferring the ::Release() until the subsequent unlock(),
 78.1087 -          // after we've dropped the outer lock.
 78.1088 -          if (ReleaseAfter != NULL) {
 78.1089 -            ParkEvent::Release (ReleaseAfter) ;
 78.1090 -          }
 78.1091 -          return ;
 78.1092 -        }
 78.1093 -        continue ;      // Interference -- *Lock changed -- Just retry
 78.1094 -      }
 78.1095 -      assert (w & LOCKBIT, "invariant") ;
 78.1096 -      ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
 78.1097 -      if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
 78.1098 -    }
 78.1099 -
 78.1100 -    while (ev->OnList != 0) {
 78.1101 -      ev->park() ;
 78.1102 -    }
 78.1103    }
 78.1104  }
 78.1105  
 78.1106 -// Release() must extract a successor from the list and then wake that thread.
 78.1107 -// It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
 78.1108 -// similar to that used by ParkEvent::Allocate() and ::Release().  DMR-based
 78.1109 -// Release() would :
 78.1110 -// (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
 78.1111 -// (B) Extract a successor from the private list "in-hand"
 78.1112 -// (C) attempt to CAS() the residual back into *Lock over null.
 78.1113 -//     If there were any newly arrived threads and the CAS() would fail.
 78.1114 -//     In that case Release() would detach the RATs, re-merge the list in-hand
 78.1115 -//     with the RATs and repeat as needed.  Alternately, Release() might
 78.1116 -//     detach and extract a successor, but then pass the residual list to the wakee.
 78.1117 -//     The wakee would be responsible for reattaching and remerging before it
 78.1118 -//     competed for the lock.
 78.1119 -//
 78.1120 -// Both "pop" and DMR are immune from ABA corruption -- there can be
 78.1121 -// multiple concurrent pushers, but only one popper or detacher.
 78.1122 -// This implementation pops from the head of the list.  This is unfair,
 78.1123 -// but tends to provide excellent throughput as hot threads remain hot.
 78.1124 -// (We wake recently run threads first).
 78.1125  
 78.1126 -void Thread::muxRelease (volatile intptr_t * Lock)  {
 78.1127 -  for (;;) {
 78.1128 -    const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
 78.1129 -    assert (w & LOCKBIT, "invariant") ;
 78.1130 -    if (w == LOCKBIT) return ;
 78.1131 -    ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
 78.1132 -    assert (List != NULL, "invariant") ;
 78.1133 -    assert (List->OnList == intptr_t(Lock), "invariant") ;
 78.1134 -    ParkEvent * nxt = List->ListNext ;
 78.1135 -
 78.1136 -    // The following CAS() releases the lock and pops the head element.
 78.1137 -    if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
 78.1138 -      continue ;
 78.1139 -    }
 78.1140 -    List->OnList = 0 ;
 78.1141 -    OrderAccess::fence() ;
 78.1142 -    List->unpark () ;
 78.1143 -    return ;
 78.1144 -  }
 78.1145 -}
 78.1146 -
 78.1147 +// -----------------------------------------------------------------------------
 78.1148  // ObjectMonitor Lifecycle
 78.1149  // -----------------------
 78.1150  // Inflation unlinks monitors from the global gFreeList and
 78.1151 @@ -718,41 +829,7 @@
 78.1152  // --   assigned to an object.  The object is inflated and the mark refers
 78.1153  //      to the objectmonitor.
 78.1154  //
 78.1155 -// TODO-FIXME:
 78.1156 -//
 78.1157 -// *  We currently protect the gFreeList with a simple lock.
 78.1158 -//    An alternate lock-free scheme would be to pop elements from the gFreeList
 78.1159 -//    with CAS.  This would be safe from ABA corruption as long we only
 78.1160 -//    recycled previously appearing elements onto the list in deflate_idle_monitors()
 78.1161 -//    at STW-time.  Completely new elements could always be pushed onto the gFreeList
 78.1162 -//    with CAS.  Elements that appeared previously on the list could only
 78.1163 -//    be installed at STW-time.
 78.1164 -//
 78.1165 -// *  For efficiency and to help reduce the store-before-CAS penalty
 78.1166 -//    the objectmonitors on gFreeList or local free lists should be ready to install
 78.1167 -//    with the exception of _header and _object.  _object can be set after inflation.
 78.1168 -//    In particular, keep all objectMonitors on a thread's private list in ready-to-install
 78.1169 -//    state with m.Owner set properly.
 78.1170 -//
 78.1171 -// *  We could all diffuse contention by using multiple global (FreeList, Lock)
 78.1172 -//    pairs -- threads could use trylock() and a cyclic-scan strategy to search for
 78.1173 -//    an unlocked free list.
 78.1174 -//
 78.1175 -// *  Add lifecycle tags and assert()s.
 78.1176 -//
 78.1177 -// *  Be more consistent about when we clear an objectmonitor's fields:
 78.1178 -//    A.  After extracting the objectmonitor from a free list.
 78.1179 -//    B.  After adding an objectmonitor to a free list.
 78.1180 -//
 78.1181  
 78.1182 -ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
 78.1183 -ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL ;
 78.1184 -ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL ;
 78.1185 -int ObjectSynchronizer::gOmInUseCount = 0;
 78.1186 -static volatile intptr_t ListLock = 0 ;      // protects global monitor free-list cache
 78.1187 -static volatile int MonitorFreeCount  = 0 ;      // # on gFreeList
 78.1188 -static volatile int MonitorPopulation = 0 ;      // # Extant -- in circulation
 78.1189 -#define CHAINMARKER ((oop)-1)
 78.1190  
 78.1191  // Constraining monitor pool growth via MonitorBound ...
 78.1192  //
 78.1193 @@ -768,41 +845,8 @@
 78.1194  // we'll incur more safepoints, which are harmful to performance.
 78.1195  // See also: GuaranteedSafepointInterval
 78.1196  //
 78.1197 -// As noted elsewhere, the correct long-term solution is to deflate at
 78.1198 -// monitorexit-time, in which case the number of inflated objects is bounded
 78.1199 -// by the number of threads.  That policy obviates the need for scavenging at
 78.1200 -// STW safepoint time.   As an aside, scavenging can be time-consuming when the
 78.1201 -// # of extant monitors is large.   Unfortunately there's a day-1 assumption baked
 78.1202 -// into much HotSpot code that the object::monitor relationship, once established
 78.1203 -// or observed, will remain stable except over potential safepoints.
 78.1204 -//
 78.1205 -// We can use either a blocking synchronous VM operation or an async VM operation.
 78.1206 -// -- If we use a blocking VM operation :
 78.1207 -//    Calls to ScavengeCheck() should be inserted only into 'safe' locations in paths
 78.1208 -//    that lead to ::inflate() or ::omAlloc().
 78.1209 -//    Even though the safepoint will not directly induce GC, a GC might
 78.1210 -//    piggyback on the safepoint operation, so the caller should hold no naked oops.
 78.1211 -//    Furthermore, monitor::object relationships are NOT necessarily stable over this call
 78.1212 -//    unless the caller has made provisions to "pin" the object to the monitor, say
 78.1213 -//    by incrementing the monitor's _count field.
 78.1214 -// -- If we use a non-blocking asynchronous VM operation :
 78.1215 -//    the constraints above don't apply.  The safepoint will fire in the future
 78.1216 -//    at a more convenient time.  On the other hand the latency between posting and
 78.1217 -//    running the safepoint introduces or admits "slop" or laxity during which the
 78.1218 -//    monitor population can climb further above the threshold.  The monitor population,
 78.1219 -//    however, tends to converge asymptotically over time to a count that's slightly
 78.1220 -//    above the target value specified by MonitorBound.   That is, we avoid unbounded
 78.1221 -//    growth, albeit with some imprecision.
 78.1222 -//
 78.1223  // The current implementation uses asynchronous VM operations.
 78.1224  //
 78.1225 -// Ideally we'd check if (MonitorPopulation > MonitorBound) in omAlloc()
 78.1226 -// immediately before trying to grow the global list via allocation.
 78.1227 -// If the predicate was true then we'd induce a synchronous safepoint, wait
 78.1228 -// for the safepoint to complete, and then again to allocate from the global
 78.1229 -// free list.  This approach is much simpler and precise, admitting no "slop".
 78.1230 -// Unfortunately we can't safely safepoint in the midst of omAlloc(), so
 78.1231 -// instead we use asynchronous safepoints.
 78.1232  
 78.1233  static void InduceScavenge (Thread * Self, const char * Whence) {
 78.1234    // Induce STW safepoint to trim monitors
 78.1235 @@ -812,7 +856,7 @@
 78.1236    // TODO: assert thread state is reasonable
 78.1237  
 78.1238    if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
 78.1239 -    if (Knob_Verbose) {
 78.1240 +    if (ObjectMonitor::Knob_Verbose) {
 78.1241        ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
 78.1242        ::fflush(stdout) ;
 78.1243      }
 78.1244 @@ -822,7 +866,7 @@
 78.1245      // The VMThread will delete the op when completed.
 78.1246      VMThread::execute (new VM_ForceAsyncSafepoint()) ;
 78.1247  
 78.1248 -    if (Knob_Verbose) {
 78.1249 +    if (ObjectMonitor::Knob_Verbose) {
 78.1250        ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
 78.1251        ::fflush(stdout) ;
 78.1252      }
 78.1253 @@ -844,7 +888,6 @@
 78.1254     assert(freetally == Self->omFreeCount, "free count off");
 78.1255  }
 78.1256  */
 78.1257 -
 78.1258  ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
 78.1259      // A large MAXPRIVATE value reduces both list lock contention
 78.1260      // and list coherency traffic, but also tends to increase the
 78.1261 @@ -974,12 +1017,6 @@
 78.1262  // attempt failed.  This doesn't allow unbounded #s of monitors to
 78.1263  // accumulate on a thread's free list.
 78.1264  //
 78.1265 -// In the future the usage of omRelease() might change and monitors
 78.1266 -// could migrate between free lists.  In that case to avoid excessive
 78.1267 -// accumulation we could  limit omCount to (omProvision*2), otherwise return
 78.1268 -// the objectMonitor to the global list.  We should drain (return) in reasonable chunks.
 78.1269 -// That is, *not* one-at-a-time.
 78.1270 -
 78.1271  
 78.1272  void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
 78.1273      guarantee (m->object() == NULL, "invariant") ;
 78.1274 @@ -1082,15 +1119,6 @@
 78.1275      TEVENT (omFlush) ;
 78.1276  }
 78.1277  
 78.1278 -
 78.1279 -// Get the next block in the block list.
 78.1280 -static inline ObjectMonitor* next(ObjectMonitor* block) {
 78.1281 -  assert(block->object() == CHAINMARKER, "must be a block header");
 78.1282 -  block = block->FreeNext ;
 78.1283 -  assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
 78.1284 -  return block;
 78.1285 -}
 78.1286 -
 78.1287  // Fast path code shared by multiple functions
 78.1288  ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
 78.1289    markOop mark = obj->mark();
 78.1290 @@ -1102,79 +1130,10 @@
 78.1291    return ObjectSynchronizer::inflate(Thread::current(), obj);
 78.1292  }
 78.1293  
 78.1294 +
 78.1295  // Note that we could encounter some performance loss through false-sharing as
 78.1296  // multiple locks occupy the same $ line.  Padding might be appropriate.
 78.1297  
 78.1298 -#define NINFLATIONLOCKS 256
 78.1299 -static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
 78.1300 -
 78.1301 -static markOop ReadStableMark (oop obj) {
 78.1302 -  markOop mark = obj->mark() ;
 78.1303 -  if (!mark->is_being_inflated()) {
 78.1304 -    return mark ;       // normal fast-path return
 78.1305 -  }
 78.1306 -
 78.1307 -  int its = 0 ;
 78.1308 -  for (;;) {
 78.1309 -    markOop mark = obj->mark() ;
 78.1310 -    if (!mark->is_being_inflated()) {
 78.1311 -      return mark ;    // normal fast-path return
 78.1312 -    }
 78.1313 -
 78.1314 -    // The object is being inflated by some other thread.
 78.1315 -    // The caller of ReadStableMark() must wait for inflation to complete.
 78.1316 -    // Avoid live-lock
 78.1317 -    // TODO: consider calling SafepointSynchronize::do_call_back() while
 78.1318 -    // spinning to see if there's a safepoint pending.  If so, immediately
 78.1319 -    // yielding or blocking would be appropriate.  Avoid spinning while
 78.1320 -    // there is a safepoint pending.
 78.1321 -    // TODO: add inflation contention performance counters.
 78.1322 -    // TODO: restrict the aggregate number of spinners.
 78.1323 -
 78.1324 -    ++its ;
 78.1325 -    if (its > 10000 || !os::is_MP()) {
 78.1326 -       if (its & 1) {
 78.1327 -         os::NakedYield() ;
 78.1328 -         TEVENT (Inflate: INFLATING - yield) ;
 78.1329 -       } else {
 78.1330 -         // Note that the following code attenuates the livelock problem but is not
 78.1331 -         // a complete remedy.  A more complete solution would require that the inflating
 78.1332 -         // thread hold the associated inflation lock.  The following code simply restricts
 78.1333 -         // the number of spinners to at most one.  We'll have N-2 threads blocked
 78.1334 -         // on the inflationlock, 1 thread holding the inflation lock and using
 78.1335 -         // a yield/park strategy, and 1 thread in the midst of inflation.
 78.1336 -         // A more refined approach would be to change the encoding of INFLATING
 78.1337 -         // to allow encapsulation of a native thread pointer.  Threads waiting for
 78.1338 -         // inflation to complete would use CAS to push themselves onto a singly linked
 78.1339 -         // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
 78.1340 -         // and calling park().  When inflation was complete the thread that accomplished inflation
 78.1341 -         // would detach the list and set the markword to inflated with a single CAS and
 78.1342 -         // then for each thread on the list, set the flag and unpark() the thread.
 78.1343 -         // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
 78.1344 -         // wakes at most one thread whereas we need to wake the entire list.
 78.1345 -         int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ;
 78.1346 -         int YieldThenBlock = 0 ;
 78.1347 -         assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
 78.1348 -         assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
 78.1349 -         Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
 78.1350 -         while (obj->mark() == markOopDesc::INFLATING()) {
 78.1351 -           // Beware: NakedYield() is advisory and has almost no effect on some platforms
 78.1352 -           // so we periodically call Self->_ParkEvent->park(1).
 78.1353 -           // We use a mixed spin/yield/block mechanism.
 78.1354 -           if ((YieldThenBlock++) >= 16) {
 78.1355 -              Thread::current()->_ParkEvent->park(1) ;
 78.1356 -           } else {
 78.1357 -              os::NakedYield() ;
 78.1358 -           }
 78.1359 -         }
 78.1360 -         Thread::muxRelease (InflationLocks + ix ) ;
 78.1361 -         TEVENT (Inflate: INFLATING - yield/park) ;
 78.1362 -       }
 78.1363 -    } else {
 78.1364 -       SpinPause() ;       // SMP-polite spinning
 78.1365 -    }
 78.1366 -  }
 78.1367 -}
 78.1368  
 78.1369  ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
 78.1370    // Inflate mutates the heap ...
 78.1371 @@ -1242,7 +1201,7 @@
 78.1372            m->_Responsible  = NULL ;
 78.1373            m->OwnerIsThread = 0 ;
 78.1374            m->_recursions   = 0 ;
 78.1375 -          m->_SpinDuration = Knob_SpinLimit ;   // Consider: maintain by type/class
 78.1376 +          m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ;   // Consider: maintain by type/class
 78.1377  
 78.1378            markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
 78.1379            if (cmp != mark) {
 78.1380 @@ -1302,7 +1261,7 @@
 78.1381  
 78.1382            // Hopefully the performance counters are allocated on distinct cache lines
 78.1383            // to avoid false sharing on MP systems ...
 78.1384 -          if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
 78.1385 +          if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
 78.1386            TEVENT(Inflate: overwrite stacklock) ;
 78.1387            if (TraceMonitorInflation) {
 78.1388              if (object->is_instance()) {
 78.1389 @@ -1335,7 +1294,7 @@
 78.1390        m->OwnerIsThread = 1 ;
 78.1391        m->_recursions   = 0 ;
 78.1392        m->_Responsible  = NULL ;
 78.1393 -      m->_SpinDuration = Knob_SpinLimit ;       // consider: keep metastats by type/class
 78.1394 +      m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ;       // consider: keep metastats by type/class
 78.1395  
 78.1396        if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
 78.1397            m->set_object (NULL) ;
 78.1398 @@ -1352,7 +1311,7 @@
 78.1399  
 78.1400        // Hopefully the performance counters are allocated on distinct
 78.1401        // cache lines to avoid false sharing on MP systems ...
 78.1402 -      if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
 78.1403 +      if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
 78.1404        TEVENT(Inflate: overwrite neutral) ;
 78.1405        if (TraceMonitorInflation) {
 78.1406          if (object->is_instance()) {
 78.1407 @@ -1366,547 +1325,9 @@
 78.1408    }
 78.1409  }
 78.1410  
 78.1411 +// Note that we could encounter some performance loss through false-sharing as
 78.1412 +// multiple locks occupy the same $ line.  Padding might be appropriate.
 78.1413  
 78.1414 -// This the fast monitor enter. The interpreter and compiler use
 78.1415 -// some assembly copies of this code. Make sure update those code
 78.1416 -// if the following function is changed. The implementation is
 78.1417 -// extremely sensitive to race condition. Be careful.
 78.1418 -
 78.1419 -void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
 78.1420 - if (UseBiasedLocking) {
 78.1421 -    if (!SafepointSynchronize::is_at_safepoint()) {
 78.1422 -      BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
 78.1423 -      if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
 78.1424 -        return;
 78.1425 -      }
 78.1426 -    } else {
 78.1427 -      assert(!attempt_rebias, "can not rebias toward VM thread");
 78.1428 -      BiasedLocking::revoke_at_safepoint(obj);
 78.1429 -    }
 78.1430 -    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 78.1431 - }
 78.1432 -
 78.1433 - slow_enter (obj, lock, THREAD) ;
 78.1434 -}
 78.1435 -
 78.1436 -void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
 78.1437 -  assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
 78.1438 -  // if displaced header is null, the previous enter is recursive enter, no-op
 78.1439 -  markOop dhw = lock->displaced_header();
 78.1440 -  markOop mark ;
 78.1441 -  if (dhw == NULL) {
 78.1442 -     // Recursive stack-lock.
 78.1443 -     // Diagnostics -- Could be: stack-locked, inflating, inflated.
 78.1444 -     mark = object->mark() ;
 78.1445 -     assert (!mark->is_neutral(), "invariant") ;
 78.1446 -     if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
 78.1447 -        assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
 78.1448 -     }
 78.1449 -     if (mark->has_monitor()) {
 78.1450 -        ObjectMonitor * m = mark->monitor() ;
 78.1451 -        assert(((oop)(m->object()))->mark() == mark, "invariant") ;
 78.1452 -        assert(m->is_entered(THREAD), "invariant") ;
 78.1453 -     }
 78.1454 -     return ;
 78.1455 -  }
 78.1456 -
 78.1457 -  mark = object->mark() ;
 78.1458 -
 78.1459 -  // If the object is stack-locked by the current thread, try to
 78.1460 -  // swing the displaced header from the box back to the mark.
 78.1461 -  if (mark == (markOop) lock) {
 78.1462 -     assert (dhw->is_neutral(), "invariant") ;
 78.1463 -     if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
 78.1464 -        TEVENT (fast_exit: release stacklock) ;
 78.1465 -        return;
 78.1466 -     }
 78.1467 -  }
 78.1468 -
 78.1469 -  ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
 78.1470 -}
 78.1471 -
 78.1472 -// This routine is used to handle interpreter/compiler slow case
 78.1473 -// We don't need to use fast path here, because it must have been
 78.1474 -// failed in the interpreter/compiler code.
 78.1475 -void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
 78.1476 -  markOop mark = obj->mark();
 78.1477 -  assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 78.1478 -
 78.1479 -  if (mark->is_neutral()) {
 78.1480 -    // Anticipate successful CAS -- the ST of the displaced mark must
 78.1481 -    // be visible <= the ST performed by the CAS.
 78.1482 -    lock->set_displaced_header(mark);
 78.1483 -    if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
 78.1484 -      TEVENT (slow_enter: release stacklock) ;
 78.1485 -      return ;
 78.1486 -    }
 78.1487 -    // Fall through to inflate() ...
 78.1488 -  } else
 78.1489 -  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 78.1490 -    assert(lock != mark->locker(), "must not re-lock the same lock");
 78.1491 -    assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 78.1492 -    lock->set_displaced_header(NULL);
 78.1493 -    return;
 78.1494 -  }
 78.1495 -
 78.1496 -#if 0
 78.1497 -  // The following optimization isn't particularly useful.
 78.1498 -  if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
 78.1499 -    lock->set_displaced_header (NULL) ;
 78.1500 -    return ;
 78.1501 -  }
 78.1502 -#endif
 78.1503 -
 78.1504 -  // The object header will never be displaced to this lock,
 78.1505 -  // so it does not matter what the value is, except that it
 78.1506 -  // must be non-zero to avoid looking like a re-entrant lock,
 78.1507 -  // and must not look locked either.
 78.1508 -  lock->set_displaced_header(markOopDesc::unused_mark());
 78.1509 -  ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
 78.1510 -}
 78.1511 -
 78.1512 -// This routine is used to handle interpreter/compiler slow case
 78.1513 -// We don't need to use fast path here, because it must have
 78.1514 -// failed in the interpreter/compiler code. Simply use the heavy
 78.1515 -// weight monitor should be ok, unless someone find otherwise.
 78.1516 -void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
 78.1517 -  fast_exit (object, lock, THREAD) ;
 78.1518 -}
 78.1519 -
 78.1520 -// NOTE: must use heavy weight monitor to handle jni monitor enter
 78.1521 -void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
 78.1522 -  // the current locking is from JNI instead of Java code
 78.1523 -  TEVENT (jni_enter) ;
 78.1524 -  if (UseBiasedLocking) {
 78.1525 -    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 78.1526 -    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 78.1527 -  }
 78.1528 -  THREAD->set_current_pending_monitor_is_from_java(false);
 78.1529 -  ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
 78.1530 -  THREAD->set_current_pending_monitor_is_from_java(true);
 78.1531 -}
 78.1532 -
 78.1533 -// NOTE: must use heavy weight monitor to handle jni monitor enter
 78.1534 -bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
 78.1535 -  if (UseBiasedLocking) {
 78.1536 -    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 78.1537 -    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 78.1538 -  }
 78.1539 -
 78.1540 -  ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
 78.1541 -  return monitor->try_enter(THREAD);
 78.1542 -}
 78.1543 -
 78.1544 -
 78.1545 -// NOTE: must use heavy weight monitor to handle jni monitor exit
 78.1546 -void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 78.1547 -  TEVENT (jni_exit) ;
 78.1548 -  if (UseBiasedLocking) {
 78.1549 -    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 78.1550 -  }
 78.1551 -  assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 78.1552 -
 78.1553 -  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
 78.1554 -  // If this thread has locked the object, exit the monitor.  Note:  can't use
 78.1555 -  // monitor->check(CHECK); must exit even if an exception is pending.
 78.1556 -  if (monitor->check(THREAD)) {
 78.1557 -     monitor->exit(THREAD);
 78.1558 -  }
 78.1559 -}
 78.1560 -
 78.1561 -// complete_exit()/reenter() are used to wait on a nested lock
 78.1562 -// i.e. to give up an outer lock completely and then re-enter
 78.1563 -// Used when holding nested locks - lock acquisition order: lock1 then lock2
 78.1564 -//  1) complete_exit lock1 - saving recursion count
 78.1565 -//  2) wait on lock2
 78.1566 -//  3) when notified on lock2, unlock lock2
 78.1567 -//  4) reenter lock1 with original recursion count
 78.1568 -//  5) lock lock2
 78.1569 -// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 78.1570 -intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
 78.1571 -  TEVENT (complete_exit) ;
 78.1572 -  if (UseBiasedLocking) {
 78.1573 -    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 78.1574 -    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 78.1575 -  }
 78.1576 -
 78.1577 -  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
 78.1578 -
 78.1579 -  return monitor->complete_exit(THREAD);
 78.1580 -}
 78.1581 -
 78.1582 -// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 78.1583 -void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
 78.1584 -  TEVENT (reenter) ;
 78.1585 -  if (UseBiasedLocking) {
 78.1586 -    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 78.1587 -    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 78.1588 -  }
 78.1589 -
 78.1590 -  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
 78.1591 -
 78.1592 -  monitor->reenter(recursion, THREAD);
 78.1593 -}
 78.1594 -
 78.1595 -// This exists only as a workaround of dtrace bug 6254741
 78.1596 -int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
 78.1597 -  DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
 78.1598 -  return 0;
 78.1599 -}
 78.1600 -
 78.1601 -// NOTE: must use heavy weight monitor to handle wait()
 78.1602 -void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
 78.1603 -  if (UseBiasedLocking) {
 78.1604 -    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 78.1605 -    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 78.1606 -  }
 78.1607 -  if (millis < 0) {
 78.1608 -    TEVENT (wait - throw IAX) ;
 78.1609 -    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 78.1610 -  }
 78.1611 -  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
 78.1612 -  DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
 78.1613 -  monitor->wait(millis, true, THREAD);
 78.1614 -
 78.1615 -  /* This dummy call is in place to get around dtrace bug 6254741.  Once
 78.1616 -     that's fixed we can uncomment the following line and remove the call */
 78.1617 -  // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
 78.1618 -  dtrace_waited_probe(monitor, obj, THREAD);
 78.1619 -}
 78.1620 -
 78.1621 -void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
 78.1622 -  if (UseBiasedLocking) {
 78.1623 -    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 78.1624 -    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 78.1625 -  }
 78.1626 -  if (millis < 0) {
 78.1627 -    TEVENT (wait - throw IAX) ;
 78.1628 -    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
 78.1629 -  }
 78.1630 -  ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
 78.1631 -}
 78.1632 -
 78.1633 -void ObjectSynchronizer::notify(Handle obj, TRAPS) {
 78.1634 - if (UseBiasedLocking) {
 78.1635 -    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 78.1636 -    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 78.1637 -  }
 78.1638 -
 78.1639 -  markOop mark = obj->mark();
 78.1640 -  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 78.1641 -    return;
 78.1642 -  }
 78.1643 -  ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
 78.1644 -}
 78.1645 -
 78.1646 -// NOTE: see comment of notify()
 78.1647 -void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
 78.1648 -  if (UseBiasedLocking) {
 78.1649 -    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 78.1650 -    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 78.1651 -  }
 78.1652 -
 78.1653 -  markOop mark = obj->mark();
 78.1654 -  if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
 78.1655 -    return;
 78.1656 -  }
 78.1657 -  ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
 78.1658 -}
 78.1659 -
 78.1660 -intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
 78.1661 -  if (UseBiasedLocking) {
 78.1662 -    // NOTE: many places throughout the JVM do not expect a safepoint
 78.1663 -    // to be taken here, in particular most operations on perm gen
 78.1664 -    // objects. However, we only ever bias Java instances and all of
 78.1665 -    // the call sites of identity_hash that might revoke biases have
 78.1666 -    // been checked to make sure they can handle a safepoint. The
 78.1667 -    // added check of the bias pattern is to avoid useless calls to
 78.1668 -    // thread-local storage.
 78.1669 -    if (obj->mark()->has_bias_pattern()) {
 78.1670 -      // Box and unbox the raw reference just in case we cause a STW safepoint.
 78.1671 -      Handle hobj (Self, obj) ;
 78.1672 -      // Relaxing assertion for bug 6320749.
 78.1673 -      assert (Universe::verify_in_progress() ||
 78.1674 -              !SafepointSynchronize::is_at_safepoint(),
 78.1675 -             "biases should not be seen by VM thread here");
 78.1676 -      BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
 78.1677 -      obj = hobj() ;
 78.1678 -      assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 78.1679 -    }
 78.1680 -  }
 78.1681 -
 78.1682 -  // hashCode() is a heap mutator ...
 78.1683 -  // Relaxing assertion for bug 6320749.
 78.1684 -  assert (Universe::verify_in_progress() ||
 78.1685 -          !SafepointSynchronize::is_at_safepoint(), "invariant") ;
 78.1686 -  assert (Universe::verify_in_progress() ||
 78.1687 -          Self->is_Java_thread() , "invariant") ;
 78.1688 -  assert (Universe::verify_in_progress() ||
 78.1689 -         ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
 78.1690 -
 78.1691 -  ObjectMonitor* monitor = NULL;
 78.1692 -  markOop temp, test;
 78.1693 -  intptr_t hash;
 78.1694 -  markOop mark = ReadStableMark (obj);
 78.1695 -
 78.1696 -  // object should remain ineligible for biased locking
 78.1697 -  assert (!mark->has_bias_pattern(), "invariant") ;
 78.1698 -
 78.1699 -  if (mark->is_neutral()) {
 78.1700 -    hash = mark->hash();              // this is a normal header
 78.1701 -    if (hash) {                       // if it has hash, just return it
 78.1702 -      return hash;
 78.1703 -    }
 78.1704 -    hash = get_next_hash(Self, obj);  // allocate a new hash code
 78.1705 -    temp = mark->copy_set_hash(hash); // merge the hash code into header
 78.1706 -    // use (machine word version) atomic operation to install the hash
 78.1707 -    test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
 78.1708 -    if (test == mark) {
 78.1709 -      return hash;
 78.1710 -    }
 78.1711 -    // If atomic operation failed, we must inflate the header
 78.1712 -    // into heavy weight monitor. We could add more code here
 78.1713 -    // for fast path, but it does not worth the complexity.
 78.1714 -  } else if (mark->has_monitor()) {
 78.1715 -    monitor = mark->monitor();
 78.1716 -    temp = monitor->header();
 78.1717 -    assert (temp->is_neutral(), "invariant") ;
 78.1718 -    hash = temp->hash();
 78.1719 -    if (hash) {
 78.1720 -      return hash;
 78.1721 -    }
 78.1722 -    // Skip to the following code to reduce code size
 78.1723 -  } else if (Self->is_lock_owned((address)mark->locker())) {
 78.1724 -    temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
 78.1725 -    assert (temp->is_neutral(), "invariant") ;
 78.1726 -    hash = temp->hash();              // by current thread, check if the displaced
 78.1727 -    if (hash) {                       // header contains hash code
 78.1728 -      return hash;
 78.1729 -    }
 78.1730 -    // WARNING:
 78.1731 -    //   The displaced header is strictly immutable.
 78.1732 -    // It can NOT be changed in ANY cases. So we have
 78.1733 -    // to inflate the header into heavyweight monitor
 78.1734 -    // even the current thread owns the lock. The reason
 78.1735 -    // is the BasicLock (stack slot) will be asynchronously
 78.1736 -    // read by other threads during the inflate() function.
 78.1737 -    // Any change to stack may not propagate to other threads
 78.1738 -    // correctly.
 78.1739 -  }
 78.1740 -
 78.1741 -  // Inflate the monitor to set hash code
 78.1742 -  monitor = ObjectSynchronizer::inflate(Self, obj);
 78.1743 -  // Load displaced header and check it has hash code
 78.1744 -  mark = monitor->header();
 78.1745 -  assert (mark->is_neutral(), "invariant") ;
 78.1746 -  hash = mark->hash();
 78.1747 -  if (hash == 0) {
 78.1748 -    hash = get_next_hash(Self, obj);
 78.1749 -    temp = mark->copy_set_hash(hash); // merge hash code into header
 78.1750 -    assert (temp->is_neutral(), "invariant") ;
 78.1751 -    test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
 78.1752 -    if (test != mark) {
 78.1753 -      // The only update to the header in the monitor (outside GC)
 78.1754 -      // is install the hash code. If someone add new usage of
 78.1755 -      // displaced header, please update this code
 78.1756 -      hash = test->hash();
 78.1757 -      assert (test->is_neutral(), "invariant") ;
 78.1758 -      assert (hash != 0, "Trivial unexpected object/monitor header usage.");
 78.1759 -    }
 78.1760 -  }
 78.1761 -  // We finally get the hash
 78.1762 -  return hash;
 78.1763 -}
 78.1764 -
 78.1765 -// Deprecated -- use FastHashCode() instead.
 78.1766 -
 78.1767 -intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
 78.1768 -  return FastHashCode (Thread::current(), obj()) ;
 78.1769 -}
 78.1770 -
 78.1771 -bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
 78.1772 -                                                   Handle h_obj) {
 78.1773 -  if (UseBiasedLocking) {
 78.1774 -    BiasedLocking::revoke_and_rebias(h_obj, false, thread);
 78.1775 -    assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 78.1776 -  }
 78.1777 -
 78.1778 -  assert(thread == JavaThread::current(), "Can only be called on current thread");
 78.1779 -  oop obj = h_obj();
 78.1780 -
 78.1781 -  markOop mark = ReadStableMark (obj) ;
 78.1782 -
 78.1783 -  // Uncontended case, header points to stack
 78.1784 -  if (mark->has_locker()) {
 78.1785 -    return thread->is_lock_owned((address)mark->locker());
 78.1786 -  }
 78.1787 -  // Contended case, header points to ObjectMonitor (tagged pointer)
 78.1788 -  if (mark->has_monitor()) {
 78.1789 -    ObjectMonitor* monitor = mark->monitor();
 78.1790 -    return monitor->is_entered(thread) != 0 ;
 78.1791 -  }
 78.1792 -  // Unlocked case, header in place
 78.1793 -  assert(mark->is_neutral(), "sanity check");
 78.1794 -  return false;
 78.1795 -}
 78.1796 -
 78.1797 -// Be aware of this method could revoke bias of the lock object.
 78.1798 -// This method querys the ownership of the lock handle specified by 'h_obj'.
 78.1799 -// If the current thread owns the lock, it returns owner_self. If no
 78.1800 -// thread owns the lock, it returns owner_none. Otherwise, it will return
 78.1801 -// ower_other.
 78.1802 -ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
 78.1803 -(JavaThread *self, Handle h_obj) {
 78.1804 -  // The caller must beware this method can revoke bias, and
 78.1805 -  // revocation can result in a safepoint.
 78.1806 -  assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
 78.1807 -  assert (self->thread_state() != _thread_blocked , "invariant") ;
 78.1808 -
 78.1809 -  // Possible mark states: neutral, biased, stack-locked, inflated
 78.1810 -
 78.1811 -  if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
 78.1812 -    // CASE: biased
 78.1813 -    BiasedLocking::revoke_and_rebias(h_obj, false, self);
 78.1814 -    assert(!h_obj->mark()->has_bias_pattern(),
 78.1815 -           "biases should be revoked by now");
 78.1816 -  }
 78.1817 -
 78.1818 -  assert(self == JavaThread::current(), "Can only be called on current thread");
 78.1819 -  oop obj = h_obj();
 78.1820 -  markOop mark = ReadStableMark (obj) ;
 78.1821 -
 78.1822 -  // CASE: stack-locked.  Mark points to a BasicLock on the owner's stack.
 78.1823 -  if (mark->has_locker()) {
 78.1824 -    return self->is_lock_owned((address)mark->locker()) ?
 78.1825 -      owner_self : owner_other;
 78.1826 -  }
 78.1827 -
 78.1828 -  // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
 78.1829 -  // The Object:ObjectMonitor relationship is stable as long as we're
 78.1830 -  // not at a safepoint.
 78.1831 -  if (mark->has_monitor()) {
 78.1832 -    void * owner = mark->monitor()->_owner ;
 78.1833 -    if (owner == NULL) return owner_none ;
 78.1834 -    return (owner == self ||
 78.1835 -            self->is_lock_owned((address)owner)) ? owner_self : owner_other;
 78.1836 -  }
 78.1837 -
 78.1838 -  // CASE: neutral
 78.1839 -  assert(mark->is_neutral(), "sanity check");
 78.1840 -  return owner_none ;           // it's unlocked
 78.1841 -}
 78.1842 -
 78.1843 -// FIXME: jvmti should call this
 78.1844 -JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
 78.1845 -  if (UseBiasedLocking) {
 78.1846 -    if (SafepointSynchronize::is_at_safepoint()) {
 78.1847 -      BiasedLocking::revoke_at_safepoint(h_obj);
 78.1848 -    } else {
 78.1849 -      BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
 78.1850 -    }
 78.1851 -    assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 78.1852 -  }
 78.1853 -
 78.1854 -  oop obj = h_obj();
 78.1855 -  address owner = NULL;
 78.1856 -
 78.1857 -  markOop mark = ReadStableMark (obj) ;
 78.1858 -
 78.1859 -  // Uncontended case, header points to stack
 78.1860 -  if (mark->has_locker()) {
 78.1861 -    owner = (address) mark->locker();
 78.1862 -  }
 78.1863 -
 78.1864 -  // Contended case, header points to ObjectMonitor (tagged pointer)
 78.1865 -  if (mark->has_monitor()) {
 78.1866 -    ObjectMonitor* monitor = mark->monitor();
 78.1867 -    assert(monitor != NULL, "monitor should be non-null");
 78.1868 -    owner = (address) monitor->owner();
 78.1869 -  }
 78.1870 -
 78.1871 -  if (owner != NULL) {
 78.1872 -    return Threads::owning_thread_from_monitor_owner(owner, doLock);
 78.1873 -  }
 78.1874 -
 78.1875 -  // Unlocked case, header in place
 78.1876 -  // Cannot have assertion since this object may have been
 78.1877 -  // locked by another thread when reaching here.
 78.1878 -  // assert(mark->is_neutral(), "sanity check");
 78.1879 -
 78.1880 -  return NULL;
 78.1881 -}
 78.1882 -
 78.1883 -// Iterate through monitor cache and attempt to release thread's monitors
 78.1884 -// Gives up on a particular monitor if an exception occurs, but continues
 78.1885 -// the overall iteration, swallowing the exception.
 78.1886 -class ReleaseJavaMonitorsClosure: public MonitorClosure {
 78.1887 -private:
 78.1888 -  TRAPS;
 78.1889 -
 78.1890 -public:
 78.1891 -  ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
 78.1892 -  void do_monitor(ObjectMonitor* mid) {
 78.1893 -    if (mid->owner() == THREAD) {
 78.1894 -      (void)mid->complete_exit(CHECK);
 78.1895 -    }
 78.1896 -  }
 78.1897 -};
 78.1898 -
 78.1899 -// Release all inflated monitors owned by THREAD.  Lightweight monitors are
 78.1900 -// ignored.  This is meant to be called during JNI thread detach which assumes
 78.1901 -// all remaining monitors are heavyweight.  All exceptions are swallowed.
 78.1902 -// Scanning the extant monitor list can be time consuming.
 78.1903 -// A simple optimization is to add a per-thread flag that indicates a thread
 78.1904 -// called jni_monitorenter() during its lifetime.
 78.1905 -//
 78.1906 -// Instead of No_Savepoint_Verifier it might be cheaper to
 78.1907 -// use an idiom of the form:
 78.1908 -//   auto int tmp = SafepointSynchronize::_safepoint_counter ;
 78.1909 -//   <code that must not run at safepoint>
 78.1910 -//   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
 78.1911 -// Since the tests are extremely cheap we could leave them enabled
 78.1912 -// for normal product builds.
 78.1913 -
 78.1914 -void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
 78.1915 -  assert(THREAD == JavaThread::current(), "must be current Java thread");
 78.1916 -  No_Safepoint_Verifier nsv ;
 78.1917 -  ReleaseJavaMonitorsClosure rjmc(THREAD);
 78.1918 -  Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
 78.1919 -  ObjectSynchronizer::monitors_iterate(&rjmc);
 78.1920 -  Thread::muxRelease(&ListLock);
 78.1921 -  THREAD->clear_pending_exception();
 78.1922 -}
 78.1923 -
 78.1924 -// Visitors ...
 78.1925 -
 78.1926 -void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
 78.1927 -  ObjectMonitor* block = gBlockList;
 78.1928 -  ObjectMonitor* mid;
 78.1929 -  while (block) {
 78.1930 -    assert(block->object() == CHAINMARKER, "must be a block header");
 78.1931 -    for (int i = _BLOCKSIZE - 1; i > 0; i--) {
 78.1932 -      mid = block + i;
 78.1933 -      oop object = (oop) mid->object();
 78.1934 -      if (object != NULL) {
 78.1935 -        closure->do_monitor(mid);
 78.1936 -      }
 78.1937 -    }
 78.1938 -    block = (ObjectMonitor*) block->FreeNext;
 78.1939 -  }
 78.1940 -}
 78.1941 -
 78.1942 -void ObjectSynchronizer::oops_do(OopClosure* f) {
 78.1943 -  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 78.1944 -  for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
 78.1945 -    assert(block->object() == CHAINMARKER, "must be a block header");
 78.1946 -    for (int i = 1; i < _BLOCKSIZE; i++) {
 78.1947 -      ObjectMonitor* mid = &block[i];
 78.1948 -      if (mid->object() != NULL) {
 78.1949 -        f->do_oop((oop*)mid->object_addr());
 78.1950 -      }
 78.1951 -    }
 78.1952 -  }
 78.1953 -}
 78.1954  
 78.1955  // Deflate_idle_monitors() is called at all safepoints, immediately
 78.1956  // after all mutators are stopped, but before any objects have moved.
 78.1957 @@ -1936,12 +1357,11 @@
 78.1958  // which in turn can mean large(r) numbers of objectmonitors in circulation.
 78.1959  // This is an unfortunate aspect of this design.
 78.1960  //
 78.1961 -// Another refinement would be to refrain from calling deflate_idle_monitors()
 78.1962 -// except at stop-the-world points associated with garbage collections.
 78.1963 -//
 78.1964 -// An even better solution would be to deflate on-the-fly, aggressively,
 78.1965 -// at monitorexit-time as is done in EVM's metalock or Relaxed Locks.
 78.1966  
 78.1967 +enum ManifestConstants {
 78.1968 +    ClearResponsibleAtSTW   = 0,
 78.1969 +    MaximumRecheckInterval  = 1000
 78.1970 +} ;
 78.1971  
 78.1972  // Deflate a single monitor if not in use
 78.1973  // Return true if deflated, false if in use
 78.1974 @@ -2088,7 +1508,7 @@
 78.1975  
 78.1976    // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree.
 78.1977  
 78.1978 -  if (Knob_Verbose) {
 78.1979 +  if (ObjectMonitor::Knob_Verbose) {
 78.1980      ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
 78.1981          nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
 78.1982          MonitorPopulation, MonitorFreeCount) ;
 78.1983 @@ -2107,8 +1527,8 @@
 78.1984    }
 78.1985    Thread::muxRelease (&ListLock) ;
 78.1986  
 78.1987 -  if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ;
 78.1988 -  if (_sync_MonExtant  != NULL) _sync_MonExtant ->set_value(nInCirculation);
 78.1989 +  if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged) ;
 78.1990 +  if (ObjectMonitor::_sync_MonExtant  != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation);
 78.1991  
 78.1992    // TODO: Add objectMonitor leak detection.
 78.1993    // Audit/inventory the objectMonitors -- make sure they're all accounted for.
 78.1994 @@ -2116,2810 +1536,49 @@
 78.1995    GVars.stwCycle ++ ;
 78.1996  }
 78.1997  
 78.1998 -// A macro is used below because there may already be a pending
 78.1999 -// exception which should not abort the execution of the routines
 78.2000 -// which use this (which is why we don't put this into check_slow and
 78.2001 -// call it with a CHECK argument).
 78.2002 +// Monitor cleanup on JavaThread::exit
 78.2003  
 78.2004 -#define CHECK_OWNER()                                                             \
 78.2005 -  do {                                                                            \
 78.2006 -    if (THREAD != _owner) {                                                       \
 78.2007 -      if (THREAD->is_lock_owned((address) _owner)) {                              \
 78.2008 -        _owner = THREAD ;  /* Convert from basiclock addr to Thread addr */       \
 78.2009 -        _recursions = 0;                                                          \
 78.2010 -        OwnerIsThread = 1 ;                                                       \
 78.2011 -      } else {                                                                    \
 78.2012 -        TEVENT (Throw IMSX) ;                                                     \
 78.2013 -        THROW(vmSymbols::java_lang_IllegalMonitorStateException());               \
 78.2014 -      }                                                                           \
 78.2015 -    }                                                                             \
 78.2016 -  } while (false)
 78.2017 +// Iterate through monitor cache and attempt to release thread's monitors
 78.2018 +// Gives up on a particular monitor if an exception occurs, but continues
 78.2019 +// the overall iteration, swallowing the exception.
 78.2020 +class ReleaseJavaMonitorsClosure: public MonitorClosure {
 78.2021 +private:
 78.2022 +  TRAPS;
 78.2023  
 78.2024 -// TODO-FIXME: eliminate ObjectWaiters.  Replace this visitor/enumerator
 78.2025 -// interface with a simple FirstWaitingThread(), NextWaitingThread() interface.
 78.2026 -
 78.2027 -ObjectWaiter* ObjectMonitor::first_waiter() {
 78.2028 -  return _WaitSet;
 78.2029 -}
 78.2030 -
 78.2031 -ObjectWaiter* ObjectMonitor::next_waiter(ObjectWaiter* o) {
 78.2032 -  return o->_next;
 78.2033 -}
 78.2034 -
 78.2035 -Thread* ObjectMonitor::thread_of_waiter(ObjectWaiter* o) {
 78.2036 -  return o->_thread;
 78.2037 -}
 78.2038 -
 78.2039 -// initialize the monitor, exception the semaphore, all other fields
 78.2040 -// are simple integers or pointers
 78.2041 -ObjectMonitor::ObjectMonitor() {
 78.2042 -  _header       = NULL;
 78.2043 -  _count        = 0;
 78.2044 -  _waiters      = 0,
 78.2045 -  _recursions   = 0;
 78.2046 -  _object       = NULL;
 78.2047 -  _owner        = NULL;
 78.2048 -  _WaitSet      = NULL;
 78.2049 -  _WaitSetLock  = 0 ;
 78.2050 -  _Responsible  = NULL ;
 78.2051 -  _succ         = NULL ;
 78.2052 -  _cxq          = NULL ;
 78.2053 -  FreeNext      = NULL ;
 78.2054 -  _EntryList    = NULL ;
 78.2055 -  _SpinFreq     = 0 ;
 78.2056 -  _SpinClock    = 0 ;
 78.2057 -  OwnerIsThread = 0 ;
 78.2058 -}
 78.2059 -
 78.2060 -ObjectMonitor::~ObjectMonitor() {
 78.2061 -   // TODO: Add asserts ...
 78.2062 -   // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
 78.2063 -   // _count == 0 _EntryList  == NULL etc
 78.2064 -}
 78.2065 -
 78.2066 -intptr_t ObjectMonitor::is_busy() const {
 78.2067 -  // TODO-FIXME: merge _count and _waiters.
 78.2068 -  // TODO-FIXME: assert _owner == null implies _recursions = 0
 78.2069 -  // TODO-FIXME: assert _WaitSet != null implies _count > 0
 78.2070 -  return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
 78.2071 -}
 78.2072 -
 78.2073 -void ObjectMonitor::Recycle () {
 78.2074 -  // TODO: add stronger asserts ...
 78.2075 -  // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
 78.2076 -  // _count == 0 EntryList  == NULL
 78.2077 -  // _recursions == 0 _WaitSet == NULL
 78.2078 -  // TODO: assert (is_busy()|_recursions) == 0
 78.2079 -  _succ          = NULL ;
 78.2080 -  _EntryList     = NULL ;
 78.2081 -  _cxq           = NULL ;
 78.2082 -  _WaitSet       = NULL ;
 78.2083 -  _recursions    = 0 ;
 78.2084 -  _SpinFreq      = 0 ;
 78.2085 -  _SpinClock     = 0 ;
 78.2086 -  OwnerIsThread  = 0 ;
 78.2087 -}
 78.2088 -
 78.2089 -// WaitSet management ...
 78.2090 -
 78.2091 -inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
 78.2092 -  assert(node != NULL, "should not dequeue NULL node");
 78.2093 -  assert(node->_prev == NULL, "node already in list");
 78.2094 -  assert(node->_next == NULL, "node already in list");
 78.2095 -  // put node at end of queue (circular doubly linked list)
 78.2096 -  if (_WaitSet == NULL) {
 78.2097 -    _WaitSet = node;
 78.2098 -    node->_prev = node;
 78.2099 -    node->_next = node;
 78.2100 -  } else {
 78.2101 -    ObjectWaiter* head = _WaitSet ;
 78.2102 -    ObjectWaiter* tail = head->_prev;
 78.2103 -    assert(tail->_next == head, "invariant check");
 78.2104 -    tail->_next = node;
 78.2105 -    head->_prev = node;
 78.2106 -    node->_next = head;
 78.2107 -    node->_prev = tail;
 78.2108 -  }
 78.2109 -}
 78.2110 -
 78.2111 -inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
 78.2112 -  // dequeue the very first waiter
 78.2113 -  ObjectWaiter* waiter = _WaitSet;
 78.2114 -  if (waiter) {
 78.2115 -    DequeueSpecificWaiter(waiter);
 78.2116 -  }
 78.2117 -  return waiter;
 78.2118 -}
 78.2119 -
 78.2120 -inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
 78.2121 -  assert(node != NULL, "should not dequeue NULL node");
 78.2122 -  assert(node->_prev != NULL, "node already removed from list");
 78.2123 -  assert(node->_next != NULL, "node already removed from list");
 78.2124 -  // when the waiter has woken up because of interrupt,
 78.2125 -  // timeout or other spurious wake-up, dequeue the
 78.2126 -  // waiter from waiting list
 78.2127 -  ObjectWaiter* next = node->_next;
 78.2128 -  if (next == node) {
 78.2129 -    assert(node->_prev == node, "invariant check");
 78.2130 -    _WaitSet = NULL;
 78.2131 -  } else {
 78.2132 -    ObjectWaiter* prev = node->_prev;
 78.2133 -    assert(prev->_next == node, "invariant check");
 78.2134 -    assert(next->_prev == node, "invariant check");
 78.2135 -    next->_prev = prev;
 78.2136 -    prev->_next = next;
 78.2137 -    if (_WaitSet == node) {
 78.2138 -      _WaitSet = next;
 78.2139 +public:
 78.2140 +  ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
 78.2141 +  void do_monitor(ObjectMonitor* mid) {
 78.2142 +    if (mid->owner() == THREAD) {
 78.2143 +      (void)mid->complete_exit(CHECK);
 78.2144      }
 78.2145    }
 78.2146 -  node->_next = NULL;
 78.2147 -  node->_prev = NULL;
 78.2148 +};
 78.2149 +
 78.2150 +// Release all inflated monitors owned by THREAD.  Lightweight monitors are
 78.2151 +// ignored.  This is meant to be called during JNI thread detach which assumes
 78.2152 +// all remaining monitors are heavyweight.  All exceptions are swallowed.
 78.2153 +// Scanning the extant monitor list can be time consuming.
 78.2154 +// A simple optimization is to add a per-thread flag that indicates a thread
 78.2155 +// called jni_monitorenter() during its lifetime.
 78.2156 +//
 78.2157 +// Instead of No_Savepoint_Verifier it might be cheaper to
 78.2158 +// use an idiom of the form:
 78.2159 +//   auto int tmp = SafepointSynchronize::_safepoint_counter ;
 78.2160 +//   <code that must not run at safepoint>
 78.2161 +//   guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
 78.2162 +// Since the tests are extremely cheap we could leave them enabled
 78.2163 +// for normal product builds.
 78.2164 +
 78.2165 +void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
 78.2166 +  assert(THREAD == JavaThread::current(), "must be current Java thread");
 78.2167 +  No_Safepoint_Verifier nsv ;
 78.2168 +  ReleaseJavaMonitorsClosure rjmc(THREAD);
 78.2169 +  Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
 78.2170 +  ObjectSynchronizer::monitors_iterate(&rjmc);
 78.2171 +  Thread::muxRelease(&ListLock);
 78.2172 +  THREAD->clear_pending_exception();
 78.2173  }
 78.2174  
 78.2175 -static char * kvGet (char * kvList, const char * Key) {
 78.2176 -    if (kvList == NULL) return NULL ;
 78.2177 -    size_t n = strlen (Key) ;
 78.2178 -    char * Search ;
 78.2179 -    for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
 78.2180 -        if (strncmp (Search, Key, n) == 0) {
 78.2181 -            if (Search[n] == '=') return Search + n + 1 ;
 78.2182 -            if (Search[n] == 0)   return (char *) "1" ;
 78.2183 -        }
 78.2184 -    }
 78.2185 -    return NULL ;
 78.2186 -}
 78.2187 -
 78.2188 -static int kvGetInt (char * kvList, const char * Key, int Default) {
 78.2189 -    char * v = kvGet (kvList, Key) ;
 78.2190 -    int rslt = v ? ::strtol (v, NULL, 0) : Default ;
 78.2191 -    if (Knob_ReportSettings && v != NULL) {
 78.2192 -        ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
 78.2193 -        ::fflush (stdout) ;
 78.2194 -    }
 78.2195 -    return rslt ;
 78.2196 -}
 78.2197 -
 78.2198 -// By convention we unlink a contending thread from EntryList|cxq immediately
 78.2199 -// after the thread acquires the lock in ::enter().  Equally, we could defer
 78.2200 -// unlinking the thread until ::exit()-time.
 78.2201 -
 78.2202 -void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
 78.2203 -{
 78.2204 -    assert (_owner == Self, "invariant") ;
 78.2205 -    assert (SelfNode->_thread == Self, "invariant") ;
 78.2206 -
 78.2207 -    if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
 78.2208 -        // Normal case: remove Self from the DLL EntryList .
 78.2209 -        // This is a constant-time operation.
 78.2210 -        ObjectWaiter * nxt = SelfNode->_next ;
 78.2211 -        ObjectWaiter * prv = SelfNode->_prev ;
 78.2212 -        if (nxt != NULL) nxt->_prev = prv ;
 78.2213 -        if (prv != NULL) prv->_next = nxt ;
 78.2214 -        if (SelfNode == _EntryList ) _EntryList = nxt ;
 78.2215 -        assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
 78.2216 -        assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
 78.2217 -        TEVENT (Unlink from EntryList) ;
 78.2218 -    } else {
 78.2219 -        guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
 78.2220 -        // Inopportune interleaving -- Self is still on the cxq.
 78.2221 -        // This usually means the enqueue of self raced an exiting thread.
 78.2222 -        // Normally we'll find Self near the front of the cxq, so
 78.2223 -        // dequeueing is typically fast.  If needbe we can accelerate
 78.2224 -        // this with some MCS/CHL-like bidirectional list hints and advisory
 78.2225 -        // back-links so dequeueing from the interior will normally operate
 78.2226 -        // in constant-time.
 78.2227 -        // Dequeue Self from either the head (with CAS) or from the interior
 78.2228 -        // with a linear-time scan and normal non-atomic memory operations.
 78.2229 -        // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
 78.2230 -        // and then unlink Self from EntryList.  We have to drain eventually,
 78.2231 -        // so it might as well be now.
 78.2232 -
 78.2233 -        ObjectWaiter * v = _cxq ;
 78.2234 -        assert (v != NULL, "invariant") ;
 78.2235 -        if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
 78.2236 -            // The CAS above can fail from interference IFF a "RAT" arrived.
 78.2237 -            // In that case Self must be in the interior and can no longer be
 78.2238 -            // at the head of cxq.
 78.2239 -            if (v == SelfNode) {
 78.2240 -                assert (_cxq != v, "invariant") ;
 78.2241 -                v = _cxq ;          // CAS above failed - start scan at head of list
 78.2242 -            }
 78.2243 -            ObjectWaiter * p ;
 78.2244 -            ObjectWaiter * q = NULL ;
 78.2245 -            for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
 78.2246 -                q = p ;
 78.2247 -                assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
 78.2248 -            }
 78.2249 -            assert (v != SelfNode,  "invariant") ;
 78.2250 -            assert (p == SelfNode,  "Node not found on cxq") ;
 78.2251 -            assert (p != _cxq,      "invariant") ;
 78.2252 -            assert (q != NULL,      "invariant") ;
 78.2253 -            assert (q->_next == p,  "invariant") ;
 78.2254 -            q->_next = p->_next ;
 78.2255 -        }
 78.2256 -        TEVENT (Unlink from cxq) ;
 78.2257 -    }
 78.2258 -
 78.2259 -    // Diagnostic hygiene ...
 78.2260 -    SelfNode->_prev  = (ObjectWaiter *) 0xBAD ;
 78.2261 -    SelfNode->_next  = (ObjectWaiter *) 0xBAD ;
 78.2262 -    SelfNode->TState = ObjectWaiter::TS_RUN ;
 78.2263 -}
 78.2264 -
 78.2265 -// Caveat: TryLock() is not necessarily serializing if it returns failure.
 78.2266 -// Callers must compensate as needed.
 78.2267 -
 78.2268 -int ObjectMonitor::TryLock (Thread * Self) {
 78.2269 -   for (;;) {
 78.2270 -      void * own = _owner ;
 78.2271 -      if (own != NULL) return 0 ;
 78.2272 -      if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 78.2273 -         // Either guarantee _recursions == 0 or set _recursions = 0.
 78.2274 -         assert (_recursions == 0, "invariant") ;
 78.2275 -         assert (_owner == Self, "invariant") ;
 78.2276 -         // CONSIDER: set or assert that OwnerIsThread == 1
 78.2277 -         return 1 ;
 78.2278 -      }
 78.2279 -      // The lock had been free momentarily, but we lost the race to the lock.
 78.2280 -      // Interference -- the CAS failed.
 78.2281 -      // We can either return -1 or retry.
 78.2282 -      // Retry doesn't make as much sense because the lock was just acquired.
 78.2283 -      if (true) return -1 ;
 78.2284 -   }
 78.2285 -}
 78.2286 -
 78.2287 -// NotRunnable() -- informed spinning
 78.2288 -//
 78.2289 -// Don't bother spinning if the owner is not eligible to drop the lock.
 78.2290 -// Peek at the owner's schedctl.sc_state and Thread._thread_values and
 78.2291 -// spin only if the owner thread is _thread_in_Java or _thread_in_vm.
 78.2292 -// The thread must be runnable in order to drop the lock in timely fashion.
 78.2293 -// If the _owner is not runnable then spinning will not likely be
 78.2294 -// successful (profitable).
 78.2295 -//
 78.2296 -// Beware -- the thread referenced by _owner could have died
 78.2297 -// so a simply fetch from _owner->_thread_state might trap.
 78.2298 -// Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
 78.2299 -// Because of the lifecycle issues the schedctl and _thread_state values
 78.2300 -// observed by NotRunnable() might be garbage.  NotRunnable must
 78.2301 -// tolerate this and consider the observed _thread_state value
 78.2302 -// as advisory.
 78.2303 -//
 78.2304 -// Beware too, that _owner is sometimes a BasicLock address and sometimes
 78.2305 -// a thread pointer.  We differentiate the two cases with OwnerIsThread.
 78.2306 -// Alternately, we might tag the type (thread pointer vs basiclock pointer)
 78.2307 -// with the LSB of _owner.  Another option would be to probablistically probe
 78.2308 -// the putative _owner->TypeTag value.
 78.2309 -//
 78.2310 -// Checking _thread_state isn't perfect.  Even if the thread is
 78.2311 -// in_java it might be blocked on a page-fault or have been preempted
 78.2312 -// and sitting on a ready/dispatch queue.  _thread state in conjunction
 78.2313 -// with schedctl.sc_state gives us a good picture of what the
 78.2314 -// thread is doing, however.
 78.2315 -//
 78.2316 -// TODO: check schedctl.sc_state.
 78.2317 -// We'll need to use SafeFetch32() to read from the schedctl block.
 78.2318 -// See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
 78.2319 -//
 78.2320 -// The return value from NotRunnable() is *advisory* -- the
 78.2321 -// result is based on sampling and is not necessarily coherent.
 78.2322 -// The caller must tolerate false-negative and false-positive errors.
 78.2323 -// Spinning, in general, is probabilistic anyway.
 78.2324 -
 78.2325 -
 78.2326 -int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
 78.2327 -    // Check either OwnerIsThread or ox->TypeTag == 2BAD.
 78.2328 -    if (!OwnerIsThread) return 0 ;
 78.2329 -
 78.2330 -    if (ox == NULL) return 0 ;
 78.2331 -
 78.2332 -    // Avoid transitive spinning ...
 78.2333 -    // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
 78.2334 -    // Immediately after T1 acquires L it's possible that T2, also
 78.2335 -    // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
 78.2336 -    // This occurs transiently after T1 acquired L but before
 78.2337 -    // T1 managed to clear T1.Stalled.  T2 does not need to abort
 78.2338 -    // its spin in this circumstance.
 78.2339 -    intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
 78.2340 -
 78.2341 -    if (BlockedOn == 1) return 1 ;
 78.2342 -    if (BlockedOn != 0) {
 78.2343 -      return BlockedOn != intptr_t(this) && _owner == ox ;
 78.2344 -    }
 78.2345 -
 78.2346 -    assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
 78.2347 -    int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
 78.2348 -    // consider also: jst != _thread_in_Java -- but that's overspecific.
 78.2349 -    return jst == _thread_blocked || jst == _thread_in_native ;
 78.2350 -}
 78.2351 -
 78.2352 -
 78.2353 -// Adaptive spin-then-block - rational spinning
 78.2354 -//
 78.2355 -// Note that we spin "globally" on _owner with a classic SMP-polite TATAS
 78.2356 -// algorithm.  On high order SMP systems it would be better to start with
 78.2357 -// a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
 78.2358 -// a contending thread could enqueue itself on the cxq and then spin locally
 78.2359 -// on a thread-specific variable such as its ParkEvent._Event flag.
 78.2360 -// That's left as an exercise for the reader.  Note that global spinning is
 78.2361 -// not problematic on Niagara, as the L2$ serves the interconnect and has both
 78.2362 -// low latency and massive bandwidth.
 78.2363 -//
 78.2364 -// Broadly, we can fix the spin frequency -- that is, the % of contended lock
 78.2365 -// acquisition attempts where we opt to spin --  at 100% and vary the spin count
 78.2366 -// (duration) or we can fix the count at approximately the duration of
 78.2367 -// a context switch and vary the frequency.   Of course we could also
 78.2368 -// vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
 78.2369 -// See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
 78.2370 -//
 78.2371 -// This implementation varies the duration "D", where D varies with
 78.2372 -// the success rate of recent spin attempts. (D is capped at approximately
 78.2373 -// length of a round-trip context switch).  The success rate for recent
 78.2374 -// spin attempts is a good predictor of the success rate of future spin
 78.2375 -// attempts.  The mechanism adapts automatically to varying critical
 78.2376 -// section length (lock modality), system load and degree of parallelism.
 78.2377 -// D is maintained per-monitor in _SpinDuration and is initialized
 78.2378 -// optimistically.  Spin frequency is fixed at 100%.
 78.2379 -//
 78.2380 -// Note that _SpinDuration is volatile, but we update it without locks
 78.2381 -// or atomics.  The code is designed so that _SpinDuration stays within
 78.2382 -// a reasonable range even in the presence of races.  The arithmetic
 78.2383 -// operations on _SpinDuration are closed over the domain of legal values,
 78.2384 -// so at worst a race will install and older but still legal value.
 78.2385 -// At the very worst this introduces some apparent non-determinism.
 78.2386 -// We might spin when we shouldn't or vice-versa, but since the spin
 78.2387 -// count are relatively short, even in the worst case, the effect is harmless.
 78.2388 -//
 78.2389 -// Care must be taken that a low "D" value does not become an
 78.2390 -// an absorbing state.  Transient spinning failures -- when spinning
 78.2391 -// is overall profitable -- should not cause the system to converge
 78.2392 -// on low "D" values.  We want spinning to be stable and predictable
 78.2393 -// and fairly responsive to change and at the same time we don't want
 78.2394 -// it to oscillate, become metastable, be "too" non-deterministic,
 78.2395 -// or converge on or enter undesirable stable absorbing states.
 78.2396 -//
 78.2397 -// We implement a feedback-based control system -- using past behavior
 78.2398 -// to predict future behavior.  We face two issues: (a) if the
 78.2399 -// input signal is random then the spin predictor won't provide optimal
 78.2400 -// results, and (b) if the signal frequency is too high then the control
 78.2401 -// system, which has some natural response lag, will "chase" the signal.
 78.2402 -// (b) can arise from multimodal lock hold times.  Transient preemption
 78.2403 -// can also result in apparent bimodal lock hold times.
 78.2404 -// Although sub-optimal, neither condition is particularly harmful, as
 78.2405 -// in the worst-case we'll spin when we shouldn't or vice-versa.
 78.2406 -// The maximum spin duration is rather short so the failure modes aren't bad.
 78.2407 -// To be conservative, I've tuned the gain in system to bias toward
 78.2408 -// _not spinning.  Relatedly, the system can sometimes enter a mode where it
 78.2409 -// "rings" or oscillates between spinning and not spinning.  This happens
 78.2410 -// when spinning is just on the cusp of profitability, however, so the
 78.2411 -// situation is not dire.  The state is benign -- there's no need to add
 78.2412 -// hysteresis control to damp the transition rate between spinning and
 78.2413 -// not spinning.
 78.2414 -//
 78.2415 -// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 78.2416 -//
 78.2417 -// Spin-then-block strategies ...
 78.2418 -//
 78.2419 -// Thoughts on ways to improve spinning :
 78.2420 -//
 78.2421 -// *  Periodically call {psr_}getloadavg() while spinning, and
 78.2422 -//    permit unbounded spinning if the load average is <
 78.2423 -//    the number of processors.  Beware, however, that getloadavg()
 78.2424 -//    is exceptionally fast on solaris (about 1/10 the cost of a full
 78.2425 -//    spin cycle, but quite expensive on linux.  Beware also, that
 78.2426 -//    multiple JVMs could "ring" or oscillate in a feedback loop.
 78.2427 -//    Sufficient damping would solve that problem.
 78.2428 -//
 78.2429 -// *  We currently use spin loops with iteration counters to approximate
 78.2430 -//    spinning for some interval.  Given the availability of high-precision
 78.2431 -//    time sources such as gethrtime(), %TICK, %STICK, RDTSC, etc., we should
 78.2432 -//    someday reimplement the spin loops to duration-based instead of iteration-based.
 78.2433 -//
 78.2434 -// *  Don't spin if there are more than N = (CPUs/2) threads
 78.2435 -//        currently spinning on the monitor (or globally).
 78.2436 -//    That is, limit the number of concurrent spinners.
 78.2437 -//    We might also limit the # of spinners in the JVM, globally.
 78.2438 -//
 78.2439 -// *  If a spinning thread observes _owner change hands it should
 78.2440 -//    abort the spin (and park immediately) or at least debit
 78.2441 -//    the spin counter by a large "penalty".
 78.2442 -//
 78.2443 -// *  Classically, the spin count is either K*(CPUs-1) or is a
 78.2444 -//        simple constant that approximates the length of a context switch.
 78.2445 -//    We currently use a value -- computed by a special utility -- that
 78.2446 -//    approximates round-trip context switch times.
 78.2447 -//
 78.2448 -// *  Normally schedctl_start()/_stop() is used to advise the kernel
 78.2449 -//    to avoid preempting threads that are running in short, bounded
 78.2450 -//    critical sections.  We could use the schedctl hooks in an inverted
 78.2451 -//    sense -- spinners would set the nopreempt flag, but poll the preempt
 78.2452 -//    pending flag.  If a spinner observed a pending preemption it'd immediately
 78.2453 -//    abort the spin and park.   As such, the schedctl service acts as
 78.2454 -//    a preemption warning mechanism.
 78.2455 -//
 78.2456 -// *  In lieu of spinning, if the system is running below saturation
 78.2457 -//    (that is, loadavg() << #cpus), we can instead suppress futile
 78.2458 -//    wakeup throttling, or even wake more than one successor at exit-time.
 78.2459 -//    The net effect is largely equivalent to spinning.  In both cases,
 78.2460 -//    contending threads go ONPROC and opportunistically attempt to acquire
 78.2461 -//    the lock, decreasing lock handover latency at the expense of wasted
 78.2462 -//    cycles and context switching.
 78.2463 -//
 78.2464 -// *  We might to spin less after we've parked as the thread will
 78.2465 -//    have less $ and TLB affinity with the processor.
 78.2466 -//    Likewise, we might spin less if we come ONPROC on a different
 78.2467 -//    processor or after a long period (>> rechose_interval).
 78.2468 -//
 78.2469 -// *  A table-driven state machine similar to Solaris' dispadmin scheduling
 78.2470 -//    tables might be a better design.  Instead of encoding information in
 78.2471 -//    _SpinDuration, _SpinFreq and _SpinClock we'd just use explicit,
 78.2472 -//    discrete states.   Success or failure during a spin would drive
 78.2473 -//    state transitions, and each state node would contain a spin count.
 78.2474 -//
 78.2475 -// *  If the processor is operating in a mode intended to conserve power
 78.2476 -//    (such as Intel's SpeedStep) or to reduce thermal output (thermal
 78.2477 -//    step-down mode) then the Java synchronization subsystem should
 78.2478 -//    forgo spinning.
 78.2479 -//
 78.2480 -// *  The minimum spin duration should be approximately the worst-case
 78.2481 -//    store propagation latency on the platform.  That is, the time
 78.2482 -//    it takes a store on CPU A to become visible on CPU B, where A and
 78.2483 -//    B are "distant".
 78.2484 -//
 78.2485 -// *  We might want to factor a thread's priority in the spin policy.
 78.2486 -//    Threads with a higher priority might spin for slightly longer.
 78.2487 -//    Similarly, if we use back-off in the TATAS loop, lower priority
 78.2488 -//    threads might back-off longer.  We don't currently use a
 78.2489 -//    thread's priority when placing it on the entry queue.  We may
 78.2490 -//    want to consider doing so in future releases.
 78.2491 -//
 78.2492 -// *  We might transiently drop a thread's scheduling priority while it spins.
 78.2493 -//    SCHED_BATCH on linux and FX scheduling class at priority=0 on Solaris
 78.2494 -//    would suffice.  We could even consider letting the thread spin indefinitely at
 78.2495 -//    a depressed or "idle" priority.  This brings up fairness issues, however --
 78.2496 -//    in a saturated system a thread would with a reduced priority could languish
 78.2497 -//    for extended periods on the ready queue.
 78.2498 -//
 78.2499 -// *  While spinning try to use the otherwise wasted time to help the VM make
 78.2500 -//    progress:
 78.2501 -//
 78.2502 -//    -- YieldTo() the owner, if the owner is OFFPROC but ready
 78.2503 -//       Done our remaining quantum directly to the ready thread.
 78.2504 -//       This helps "push" the lock owner through the critical section.
 78.2505 -//       It also tends to improve affinity/locality as the lock
 78.2506 -//       "migrates" less frequently between CPUs.
 78.2507 -//    -- Walk our own stack in anticipation of blocking.  Memoize the roots.
 78.2508 -//    -- Perform strand checking for other thread.  Unpark potential strandees.
 78.2509 -//    -- Help GC: trace or mark -- this would need to be a bounded unit of work.
 78.2510 -//       Unfortunately this will pollute our $ and TLBs.  Recall that we
 78.2511 -//       spin to avoid context switching -- context switching has an
 78.2512 -//       immediate cost in latency, a disruptive cost to other strands on a CMT
 78.2513 -//       processor, and an amortized cost because of the D$ and TLB cache
 78.2514 -//       reload transient when the thread comes back ONPROC and repopulates
 78.2515 -//       $s and TLBs.
 78.2516 -//    -- call getloadavg() to see if the system is saturated.  It'd probably
 78.2517 -//       make sense to call getloadavg() half way through the spin.
 78.2518 -//       If the system isn't at full capacity the we'd simply reset
 78.2519 -//       the spin counter to and extend the spin attempt.
 78.2520 -//    -- Doug points out that we should use the same "helping" policy
 78.2521 -//       in thread.yield().
 78.2522 -//
 78.2523 -// *  Try MONITOR-MWAIT on systems that support those instructions.
 78.2524 -//
 78.2525 -// *  The spin statistics that drive spin decisions & frequency are
 78.2526 -//    maintained in the objectmonitor structure so if we deflate and reinflate
 78.2527 -//    we lose spin state.  In practice this is not usually a concern
 78.2528 -//    as the default spin state after inflation is aggressive (optimistic)
 78.2529 -//    and tends toward spinning.  So in the worst case for a lock where
 78.2530 -//    spinning is not profitable we may spin unnecessarily for a brief
 78.2531 -//    period.  But then again, if a lock is contended it'll tend not to deflate
 78.2532 -//    in the first place.
 78.2533 -
 78.2534 -
 78.2535 -intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
 78.2536 -int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
 78.2537 -
 78.2538 -// Spinning: Fixed frequency (100%), vary duration
 78.2539 -
 78.2540 -int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
 78.2541 -
 78.2542 -    // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
 78.2543 -    int ctr = Knob_FixedSpin ;
 78.2544 -    if (ctr != 0) {
 78.2545 -        while (--ctr >= 0) {
 78.2546 -            if (TryLock (Self) > 0) return 1 ;
 78.2547 -            SpinPause () ;
 78.2548 -        }
 78.2549 -        return 0 ;
 78.2550 -    }
 78.2551 -
 78.2552 -    for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
 78.2553 -      if (TryLock(Self) > 0) {
 78.2554 -        // Increase _SpinDuration ...
 78.2555 -        // Note that we don't clamp SpinDuration precisely at SpinLimit.
 78.2556 -        // Raising _SpurDuration to the poverty line is key.
 78.2557 -        int x = _SpinDuration ;
 78.2558 -        if (x < Knob_SpinLimit) {
 78.2559 -           if (x < Knob_Poverty) x = Knob_Poverty ;
 78.2560 -           _SpinDuration = x + Knob_BonusB ;
 78.2561 -        }
 78.2562 -        return 1 ;
 78.2563 -      }
 78.2564 -      SpinPause () ;
 78.2565 -    }
 78.2566 -
 78.2567 -    // Admission control - verify preconditions for spinning
 78.2568 -    //
 78.2569 -    // We always spin a little bit, just to prevent _SpinDuration == 0 from
 78.2570 -    // becoming an absorbing state.  Put another way, we spin briefly to
 78.2571 -    // sample, just in case the system load, parallelism, contention, or lock
 78.2572 -    // modality changed.
 78.2573 -    //
 78.2574 -    // Consider the following alternative:
 78.2575 -    // Periodically set _SpinDuration = _SpinLimit and try a long/full
 78.2576 -    // spin attempt.  "Periodically" might mean after a tally of
 78.2577 -    // the # of failed spin attempts (or iterations) reaches some threshold.
 78.2578 -    // This takes us into the realm of 1-out-of-N spinning, where we
 78.2579 -    // hold the duration constant but vary the frequency.
 78.2580 -
 78.2581 -    ctr = _SpinDuration  ;
 78.2582 -    if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
 78.2583 -    if (ctr <= 0) return 0 ;
 78.2584 -
 78.2585 -    if (Knob_SuccRestrict && _succ != NULL) return 0 ;
 78.2586 -    if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
 78.2587 -       TEVENT (Spin abort - notrunnable [TOP]);
 78.2588 -       return 0 ;
 78.2589 -    }
 78.2590 -
 78.2591 -    int MaxSpin = Knob_MaxSpinners ;
 78.2592 -    if (MaxSpin >= 0) {
 78.2593 -       if (_Spinner > MaxSpin) {
 78.2594 -          TEVENT (Spin abort -- too many spinners) ;
 78.2595 -          return 0 ;
 78.2596 -       }
 78.2597 -       // Slighty racy, but benign ...
 78.2598 -       Adjust (&_Spinner, 1) ;
 78.2599 -    }
 78.2600 -
 78.2601 -    // We're good to spin ... spin ingress.
 78.2602 -    // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
 78.2603 -    // when preparing to LD...CAS _owner, etc and the CAS is likely
 78.2604 -    // to succeed.
 78.2605 -    int hits    = 0 ;
 78.2606 -    int msk     = 0 ;
 78.2607 -    int caspty  = Knob_CASPenalty ;
 78.2608 -    int oxpty   = Knob_OXPenalty ;
 78.2609 -    int sss     = Knob_SpinSetSucc ;
 78.2610 -    if (sss && _succ == NULL ) _succ = Self ;
 78.2611 -    Thread * prv = NULL ;
 78.2612 -
 78.2613 -    // There are three ways to exit the following loop:
 78.2614 -    // 1.  A successful spin where this thread has acquired the lock.
 78.2615 -    // 2.  Spin failure with prejudice
 78.2616 -    // 3.  Spin failure without prejudice
 78.2617 -
 78.2618 -    while (--ctr >= 0) {
 78.2619 -
 78.2620 -      // Periodic polling -- Check for pending GC
 78.2621 -      // Threads may spin while they're unsafe.
 78.2622 -      // We don't want spinning threads to delay the JVM from reaching
 78.2623 -      // a stop-the-world safepoint or to steal cycles from GC.
 78.2624 -      // If we detect a pending safepoint we abort in order that
 78.2625 -      // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
 78.2626 -      // this thread, if safe, doesn't steal cycles from GC.
 78.2627 -      // This is in keeping with the "no loitering in runtime" rule.
 78.2628 -      // We periodically check to see if there's a safepoint pending.
 78.2629 -      if ((ctr & 0xFF) == 0) {
 78.2630 -         if (SafepointSynchronize::do_call_back()) {
 78.2631 -            TEVENT (Spin: safepoint) ;
 78.2632 -            goto Abort ;           // abrupt spin egress
 78.2633 -         }
 78.2634 -         if (Knob_UsePause & 1) SpinPause () ;
 78.2635 -
 78.2636 -         int (*scb)(intptr_t,int) = SpinCallbackFunction ;
 78.2637 -         if (hits > 50 && scb != NULL) {
 78.2638 -            int abend = (*scb)(SpinCallbackArgument, 0) ;
 78.2639 -         }
 78.2640 -      }
 78.2641 -
 78.2642 -      if (Knob_UsePause & 2) SpinPause() ;
 78.2643 -
 78.2644 -      // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
 78.2645 -      // This is useful on classic SMP systems, but is of less utility on
 78.2646 -      // N1-style CMT platforms.
 78.2647 -      //
 78.2648 -      // Trade-off: lock acquisition latency vs coherency bandwidth.
 78.2649 -      // Lock hold times are typically short.  A histogram
 78.2650 -      // of successful spin attempts shows that we usually acquire
 78.2651 -      // the lock early in the spin.  That suggests we want to
 78.2652 -      // sample _owner frequently in the early phase of the spin,
 78.2653 -      // but then back-off and sample less frequently as the spin
 78.2654 -      // progresses.  The back-off makes a good citizen on SMP big
 78.2655 -      // SMP systems.  Oversampling _owner can consume excessive
 78.2656 -      // coherency bandwidth.  Relatedly, if we _oversample _owner we
 78.2657 -      // can inadvertently interfere with the the ST m->owner=null.
 78.2658 -      // executed by the lock owner.
 78.2659 -      if (ctr & msk) continue ;
 78.2660 -      ++hits ;
 78.2661 -      if ((hits & 0xF) == 0) {
 78.2662 -        // The 0xF, above, corresponds to the exponent.
 78.2663 -        // Consider: (msk+1)|msk
 78.2664 -        msk = ((msk << 2)|3) & BackOffMask ;
 78.2665 -      }
 78.2666 -
 78.2667 -      // Probe _owner with TATAS
 78.2668 -      // If this thread observes the monitor transition or flicker
 78.2669 -      // from locked to unlocked to locked, then the odds that this
 78.2670 -      // thread will acquire the lock in this spin attempt go down
 78.2671 -      // considerably.  The same argument applies if the CAS fails
 78.2672 -      // or if we observe _owner change from one non-null value to
 78.2673 -      // another non-null value.   In such cases we might abort
 78.2674 -      // the spin without prejudice or apply a "penalty" to the
 78.2675 -      // spin count-down variable "ctr", reducing it by 100, say.
 78.2676 -
 78.2677 -      Thread * ox = (Thread *) _owner ;
 78.2678 -      if (ox == NULL) {
 78.2679 -         ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
 78.2680 -         if (ox == NULL) {
 78.2681 -            // The CAS succeeded -- this thread acquired ownership
 78.2682 -            // Take care of some bookkeeping to exit spin state.
 78.2683 -            if (sss && _succ == Self) {
 78.2684 -               _succ = NULL ;
 78.2685 -            }
 78.2686 -            if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
 78.2687 -
 78.2688 -            // Increase _SpinDuration :
 78.2689 -            // The spin was successful (profitable) so we tend toward
 78.2690 -            // longer spin attempts in the future.
 78.2691 -            // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
 78.2692 -            // If we acquired the lock early in the spin cycle it
 78.2693 -            // makes sense to increase _SpinDuration proportionally.
 78.2694 -            // Note that we don't clamp SpinDuration precisely at SpinLimit.
 78.2695 -            int x = _SpinDuration ;
 78.2696 -            if (x < Knob_SpinLimit) {
 78.2697 -                if (x < Knob_Poverty) x = Knob_Poverty ;
 78.2698 -                _SpinDuration = x + Knob_Bonus ;
 78.2699 -            }
 78.2700 -            return 1 ;
 78.2701 -         }
 78.2702 -
 78.2703 -         // The CAS failed ... we can take any of the following actions:
 78.2704 -         // * penalize: ctr -= Knob_CASPenalty
 78.2705 -         // * exit spin with prejudice -- goto Abort;
 78.2706 -         // * exit spin without prejudice.
 78.2707 -         // * Since CAS is high-latency, retry again immediately.
 78.2708 -         prv = ox ;
 78.2709 -         TEVENT (Spin: cas failed) ;
 78.2710 -         if (caspty == -2) break ;
 78.2711 -         if (caspty == -1) goto Abort ;
 78.2712 -         ctr -= caspty ;
 78.2713 -         continue ;
 78.2714 -      }
 78.2715 -
 78.2716 -      // Did lock ownership change hands ?
 78.2717 -      if (ox != prv && prv != NULL ) {
 78.2718 -          TEVENT (spin: Owner changed)
 78.2719 -          if (oxpty == -2) break ;
 78.2720 -          if (oxpty == -1) goto Abort ;
 78.2721 -          ctr -= oxpty ;
 78.2722 -      }
 78.2723 -      prv = ox ;
 78.2724 -
 78.2725 -      // Abort the spin if the owner is not executing.
 78.2726 -      // The owner must be executing in order to drop the lock.
 78.2727 -      // Spinning while the owner is OFFPROC is idiocy.
 78.2728 -      // Consider: ctr -= RunnablePenalty ;
 78.2729 -      if (Knob_OState && NotRunnable (Self, ox)) {
 78.2730 -         TEVENT (Spin abort - notrunnable);
 78.2731 -         goto Abort ;
 78.2732 -      }
 78.2733 -      if (sss && _succ == NULL ) _succ = Self ;
 78.2734 -   }
 78.2735 -
 78.2736 -   // Spin failed with prejudice -- reduce _SpinDuration.
 78.2737 -   // TODO: Use an AIMD-like policy to adjust _SpinDuration.
 78.2738 -   // AIMD is globally stable.
 78.2739 -   TEVENT (Spin failure) ;
 78.2740 -   {
 78.2741 -     int x = _SpinDuration ;
 78.2742 -     if (x > 0) {
 78.2743 -        // Consider an AIMD scheme like: x -= (x >> 3) + 100
 78.2744 -        // This is globally sample and tends to damp the response.
 78.2745 -        x -= Knob_Penalty ;
 78.2746 -        if (x < 0) x = 0 ;
 78.2747 -        _SpinDuration = x ;
 78.2748 -     }
 78.2749 -   }
 78.2750 -
 78.2751 - Abort:
 78.2752 -   if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
 78.2753 -   if (sss && _succ == Self) {
 78.2754 -      _succ = NULL ;
 78.2755 -      // Invariant: after setting succ=null a contending thread
 78.2756 -      // must recheck-retry _owner before parking.  This usually happens
 78.2757 -      // in the normal usage of TrySpin(), but it's safest
 78.2758 -      // to make TrySpin() as foolproof as possible.
 78.2759 -      OrderAccess::fence() ;
 78.2760 -      if (TryLock(Self) > 0) return 1 ;
 78.2761 -   }
 78.2762 -   return 0 ;
 78.2763 -}
 78.2764 -
 78.2765 -#define TrySpin TrySpin_VaryDuration
 78.2766 -
 78.2767 -static void DeferredInitialize () {
 78.2768 -  if (InitDone > 0) return ;
 78.2769 -  if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
 78.2770 -      while (InitDone != 1) ;
 78.2771 -      return ;
 78.2772 -  }
 78.2773 -
 78.2774 -  // One-shot global initialization ...
 78.2775 -  // The initialization is idempotent, so we don't need locks.
 78.2776 -  // In the future consider doing this via os::init_2().
 78.2777 -  // SyncKnobs consist of <Key>=<Value> pairs in the style
 78.2778 -  // of environment variables.  Start by converting ':' to NUL.
 78.2779 -
 78.2780 -  if (SyncKnobs == NULL) SyncKnobs = "" ;
 78.2781 -
 78.2782 -  size_t sz = strlen (SyncKnobs) ;
 78.2783 -  char * knobs = (char *) malloc (sz + 2) ;
 78.2784 -  if (knobs == NULL) {
 78.2785 -     vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ;
 78.2786 -     guarantee (0, "invariant") ;
 78.2787 -  }
 78.2788 -  strcpy (knobs, SyncKnobs) ;
 78.2789 -  knobs[sz+1] = 0 ;
 78.2790 -  for (char * p = knobs ; *p ; p++) {
 78.2791 -     if (*p == ':') *p = 0 ;
 78.2792 -  }
 78.2793 -
 78.2794 -  #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
 78.2795 -  SETKNOB(ReportSettings) ;
 78.2796 -  SETKNOB(Verbose) ;
 78.2797 -  SETKNOB(FixedSpin) ;
 78.2798 -  SETKNOB(SpinLimit) ;
 78.2799 -  SETKNOB(SpinBase) ;
 78.2800 -  SETKNOB(SpinBackOff);
 78.2801 -  SETKNOB(CASPenalty) ;
 78.2802 -  SETKNOB(OXPenalty) ;
 78.2803 -  SETKNOB(LogSpins) ;
 78.2804 -  SETKNOB(SpinSetSucc) ;
 78.2805 -  SETKNOB(SuccEnabled) ;
 78.2806 -  SETKNOB(SuccRestrict) ;
 78.2807 -  SETKNOB(Penalty) ;
 78.2808 -  SETKNOB(Bonus) ;
 78.2809 -  SETKNOB(BonusB) ;
 78.2810 -  SETKNOB(Poverty) ;
 78.2811 -  SETKNOB(SpinAfterFutile) ;
 78.2812 -  SETKNOB(UsePause) ;
 78.2813 -  SETKNOB(SpinEarly) ;
 78.2814 -  SETKNOB(OState) ;
 78.2815 -  SETKNOB(MaxSpinners) ;
 78.2816 -  SETKNOB(PreSpin) ;
 78.2817 -  SETKNOB(ExitPolicy) ;
 78.2818 -  SETKNOB(QMode);
 78.2819 -  SETKNOB(ResetEvent) ;
 78.2820 -  SETKNOB(MoveNotifyee) ;
 78.2821 -  SETKNOB(FastHSSEC) ;
 78.2822 -  #undef SETKNOB
 78.2823 -
 78.2824 -  if (os::is_MP()) {
 78.2825 -     BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
 78.2826 -     if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
 78.2827 -     // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
 78.2828 -  } else {
 78.2829 -     Knob_SpinLimit = 0 ;
 78.2830 -     Knob_SpinBase  = 0 ;
 78.2831 -     Knob_PreSpin   = 0 ;
 78.2832 -     Knob_FixedSpin = -1 ;
 78.2833 -  }
 78.2834 -
 78.2835 -  if (Knob_LogSpins == 0) {
 78.2836 -     ObjectSynchronizer::_sync_FailedSpins = NULL ;
 78.2837 -  }
 78.2838 -
 78.2839 -  free (knobs) ;
 78.2840 -  OrderAccess::fence() ;
 78.2841 -  InitDone = 1 ;
 78.2842 -}
 78.2843 -
 78.2844 -// Theory of operations -- Monitors lists, thread residency, etc:
 78.2845 -//
 78.2846 -// * A thread acquires ownership of a monitor by successfully
 78.2847 -//   CAS()ing the _owner field from null to non-null.
 78.2848 -//
 78.2849 -// * Invariant: A thread appears on at most one monitor list --
 78.2850 -//   cxq, EntryList or WaitSet -- at any one time.
 78.2851 -//
 78.2852 -// * Contending threads "push" themselves onto the cxq with CAS
 78.2853 -//   and then spin/park.
 78.2854 -//
 78.2855 -// * After a contending thread eventually acquires the lock it must
 78.2856 -//   dequeue itself from either the EntryList or the cxq.
 78.2857 -//
 78.2858 -// * The exiting thread identifies and unparks an "heir presumptive"
 78.2859 -//   tentative successor thread on the EntryList.  Critically, the
 78.2860 -//   exiting thread doesn't unlink the successor thread from the EntryList.
 78.2861 -//   After having been unparked, the wakee will recontend for ownership of
 78.2862 -//   the monitor.   The successor (wakee) will either acquire the lock or
 78.2863 -//   re-park itself.
 78.2864 -//
 78.2865 -//   Succession is provided for by a policy of competitive handoff.
 78.2866 -//   The exiting thread does _not_ grant or pass ownership to the
 78.2867 -//   successor thread.  (This is also referred to as "handoff" succession").
 78.2868 -//   Instead the exiting thread releases ownership and possibly wakes
 78.2869 -//   a successor, so the successor can (re)compete for ownership of the lock.
 78.2870 -//   If the EntryList is empty but the cxq is populated the exiting
 78.2871 -//   thread will drain the cxq into the EntryList.  It does so by
 78.2872 -//   by detaching the cxq (installing null with CAS) and folding
 78.2873 -//   the threads from the cxq into the EntryList.  The EntryList is
 78.2874 -//   doubly linked, while the cxq is singly linked because of the
 78.2875 -//   CAS-based "push" used to enqueue recently arrived threads (RATs).
 78.2876 -//
 78.2877 -// * Concurrency invariants:
 78.2878 -//
 78.2879 -//   -- only the monitor owner may access or mutate the EntryList.
 78.2880 -//      The mutex property of the monitor itself protects the EntryList
 78.2881 -//      from concurrent interference.
 78.2882 -//   -- Only the monitor owner may detach the cxq.
 78.2883 -//
 78.2884 -// * The monitor entry list operations avoid locks, but strictly speaking
 78.2885 -//   they're not lock-free.  Enter is lock-free, exit is not.
 78.2886 -//   See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
 78.2887 -//
 78.2888 -// * The cxq can have multiple concurrent "pushers" but only one concurrent
 78.2889 -//   detaching thread.  This mechanism is immune from the ABA corruption.
 78.2890 -//   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
 78.2891 -//
 78.2892 -// * Taken together, the cxq and the EntryList constitute or form a
 78.2893 -//   single logical queue of threads stalled trying to acquire the lock.
 78.2894 -//   We use two distinct lists to improve the odds of a constant-time
 78.2895 -//   dequeue operation after acquisition (in the ::enter() epilog) and
 78.2896 -//   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
 78.2897 -//   A key desideratum is to minimize queue & monitor metadata manipulation
 78.2898 -//   that occurs while holding the monitor lock -- that is, we want to
 78.2899 -//   minimize monitor lock holds times.  Note that even a small amount of
 78.2900 -//   fixed spinning will greatly reduce the # of enqueue-dequeue operations
 78.2901 -//   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
 78.2902 -//   locks and monitor metadata.
 78.2903 -//
 78.2904 -//   Cxq points to the the set of Recently Arrived Threads attempting entry.
 78.2905 -//   Because we push threads onto _cxq with CAS, the RATs must take the form of
 78.2906 -//   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
 78.2907 -//   the unlocking thread notices that EntryList is null but _cxq is != null.
 78.2908 -//
 78.2909 -//   The EntryList is ordered by the prevailing queue discipline and
 78.2910 -//   can be organized in any convenient fashion, such as a doubly-linked list or
 78.2911 -//   a circular doubly-linked list.  Critically, we want insert and delete operations
 78.2912 -//   to operate in constant-time.  If we need a priority queue then something akin
 78.2913 -//   to Solaris' sleepq would work nicely.  Viz.,
 78.2914 -//   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
 78.2915 -//   Queue discipline is enforced at ::exit() time, when the unlocking thread
 78.2916 -//   drains the cxq into the EntryList, and orders or reorders the threads on the
 78.2917 -//   EntryList accordingly.
 78.2918 -//
 78.2919 -//   Barring "lock barging", this mechanism provides fair cyclic ordering,
 78.2920 -//   somewhat similar to an elevator-scan.
 78.2921 -//
 78.2922 -// * The monitor synchronization subsystem avoids the use of native
 78.2923 -//   synchronization primitives except for the narrow platform-specific
 78.2924 -//   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
 78.2925 -//   the semantics of park-unpark.  Put another way, this monitor implementation
 78.2926 -//   depends only on atomic operations and park-unpark.  The monitor subsystem
 78.2927 -//   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
 78.2928 -//   underlying OS manages the READY<->RUN transitions.
 78.2929 -//
 78.2930 -// * Waiting threads reside on the WaitSet list -- wait() puts
 78.2931 -//   the caller onto the WaitSet.
 78.2932 -//
 78.2933 -// * notify() or notifyAll() simply transfers threads from the WaitSet to
 78.2934 -//   either the EntryList or cxq.  Subsequent exit() operations will
 78.2935 -//   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
 78.2936 -//   it's likely the notifyee would simply impale itself on the lock held
 78.2937 -//   by the notifier.
 78.2938 -//
 78.2939 -// * An interesting alternative is to encode cxq as (List,LockByte) where
 78.2940 -//   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
 78.2941 -//   variable, like _recursions, in the scheme.  The threads or Events that form
 78.2942 -//   the list would have to be aligned in 256-byte addresses.  A thread would
 78.2943 -//   try to acquire the lock or enqueue itself with CAS, but exiting threads
 78.2944 -//   could use a 1-0 protocol and simply STB to set the LockByte to 0.
 78.2945 -//   Note that is is *not* word-tearing, but it does presume that full-word
 78.2946 -//   CAS operations are coherent with intermix with STB operations.  That's true
 78.2947 -//   on most common processors.
 78.2948 -//
 78.2949 -// * See also http://blogs.sun.com/dave
 78.2950 -
 78.2951 -
 78.2952 -void ATTR ObjectMonitor::EnterI (TRAPS) {
 78.2953 -    Thread * Self = THREAD ;
 78.2954 -    assert (Self->is_Java_thread(), "invariant") ;
 78.2955 -    assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ;
 78.2956 -
 78.2957 -    // Try the lock - TATAS
 78.2958 -    if (TryLock (Self) > 0) {
 78.2959 -        assert (_succ != Self              , "invariant") ;
 78.2960 -        assert (_owner == Self             , "invariant") ;
 78.2961 -        assert (_Responsible != Self       , "invariant") ;
 78.2962 -        return ;
 78.2963 -    }
 78.2964 -
 78.2965 -    DeferredInitialize () ;
 78.2966 -
 78.2967 -    // We try one round of spinning *before* enqueueing Self.
 78.2968 -    //
 78.2969 -    // If the _owner is ready but OFFPROC we could use a YieldTo()
 78.2970 -    // operation to donate the remainder of this thread's quantum
 78.2971 -    // to the owner.  This has subtle but beneficial affinity
 78.2972 -    // effects.
 78.2973 -
 78.2974 -    if (TrySpin (Self) > 0) {
 78.2975 -        assert (_owner == Self        , "invariant") ;
 78.2976 -        assert (_succ != Self         , "invariant") ;
 78.2977 -        assert (_Responsible != Self  , "invariant") ;
 78.2978 -        return ;
 78.2979 -    }
 78.2980 -
 78.2981 -    // The Spin failed -- Enqueue and park the thread ...
 78.2982 -    assert (_succ  != Self            , "invariant") ;
 78.2983 -    assert (_owner != Self            , "invariant") ;
 78.2984 -    assert (_Responsible != Self      , "invariant") ;
 78.2985 -
 78.2986 -    // Enqueue "Self" on ObjectMonitor's _cxq.
 78.2987 -    //
 78.2988 -    // Node acts as a proxy for Self.
 78.2989 -    // As an aside, if were to ever rewrite the synchronization code mostly
 78.2990 -    // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
 78.2991 -    // Java objects.  This would avoid awkward lifecycle and liveness issues,
 78.2992 -    // as well as eliminate a subset of ABA issues.
 78.2993 -    // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
 78.2994 -    //
 78.2995 -
 78.2996 -    ObjectWaiter node(Self) ;
 78.2997 -    Self->_ParkEvent->reset() ;
 78.2998 -    node._prev   = (ObjectWaiter *) 0xBAD ;
 78.2999 -    node.TState  = ObjectWaiter::TS_CXQ ;
 78.3000 -
 78.3001 -    // Push "Self" onto the front of the _cxq.
 78.3002 -    // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
 78.3003 -    // Note that spinning tends to reduce the rate at which threads
 78.3004 -    // enqueue and dequeue on EntryList|cxq.
 78.3005 -    ObjectWaiter * nxt ;
 78.3006 -    for (;;) {
 78.3007 -        node._next = nxt = _cxq ;
 78.3008 -        if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
 78.3009 -
 78.3010 -        // Interference - the CAS failed because _cxq changed.  Just retry.
 78.3011 -        // As an optional optimization we retry the lock.
 78.3012 -        if (TryLock (Self) > 0) {
 78.3013 -            assert (_succ != Self         , "invariant") ;
 78.3014 -            assert (_owner == Self        , "invariant") ;
 78.3015 -            assert (_Responsible != Self  , "invariant") ;
 78.3016 -            return ;
 78.3017 -        }
 78.3018 -    }
 78.3019 -
 78.3020 -    // Check for cxq|EntryList edge transition to non-null.  This indicates
 78.3021 -    // the onset of contention.  While contention persists exiting threads
 78.3022 -    // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
 78.3023 -    // operations revert to the faster 1-0 mode.  This enter operation may interleave
 78.3024 -    // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
 78.3025 -    // arrange for one of the contending thread to use a timed park() operations
 78.3026 -    // to detect and recover from the race.  (Stranding is form of progress failure
 78.3027 -    // where the monitor is unlocked but all the contending threads remain parked).
 78.3028 -    // That is, at least one of the contended threads will periodically poll _owner.
 78.3029 -    // One of the contending threads will become the designated "Responsible" thread.
 78.3030 -    // The Responsible thread uses a timed park instead of a normal indefinite park
 78.3031 -    // operation -- it periodically wakes and checks for and recovers from potential
 78.3032 -    // strandings admitted by 1-0 exit operations.   We need at most one Responsible
 78.3033 -    // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
 78.3034 -    // be responsible for a monitor.
 78.3035 -    //
 78.3036 -    // Currently, one of the contended threads takes on the added role of "Responsible".
 78.3037 -    // A viable alternative would be to use a dedicated "stranding checker" thread
 78.3038 -    // that periodically iterated over all the threads (or active monitors) and unparked
 78.3039 -    // successors where there was risk of stranding.  This would help eliminate the
 78.3040 -    // timer scalability issues we see on some platforms as we'd only have one thread
 78.3041 -    // -- the checker -- parked on a timer.
 78.3042 -
 78.3043 -    if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
 78.3044 -        // Try to assume the role of responsible thread for the monitor.
 78.3045 -        // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
 78.3046 -        Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
 78.3047 -    }
 78.3048 -
 78.3049 -    // The lock have been released while this thread was occupied queueing
 78.3050 -    // itself onto _cxq.  To close the race and avoid "stranding" and
 78.3051 -    // progress-liveness failure we must resample-retry _owner before parking.
 78.3052 -    // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
 78.3053 -    // In this case the ST-MEMBAR is accomplished with CAS().
 78.3054 -    //
 78.3055 -    // TODO: Defer all thread state transitions until park-time.
 78.3056 -    // Since state transitions are heavy and inefficient we'd like
 78.3057 -    // to defer the state transitions until absolutely necessary,
 78.3058 -    // and in doing so avoid some transitions ...
 78.3059 -
 78.3060 -    TEVENT (Inflated enter - Contention) ;
 78.3061 -    int nWakeups = 0 ;
 78.3062 -    int RecheckInterval = 1 ;
 78.3063 -
 78.3064 -    for (;;) {
 78.3065 -
 78.3066 -        if (TryLock (Self) > 0) break ;
 78.3067 -        assert (_owner != Self, "invariant") ;
 78.3068 -
 78.3069 -        if ((SyncFlags & 2) && _Responsible == NULL) {
 78.3070 -           Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
 78.3071 -        }
 78.3072 -
 78.3073 -        // park self
 78.3074 -        if (_Responsible == Self || (SyncFlags & 1)) {
 78.3075 -            TEVENT (Inflated enter - park TIMED) ;
 78.3076 -            Self->_ParkEvent->park ((jlong) RecheckInterval) ;
 78.3077 -            // Increase the RecheckInterval, but clamp the value.
 78.3078 -            RecheckInterval *= 8 ;
 78.3079 -            if (RecheckInterval > 1000) RecheckInterval = 1000 ;
 78.3080 -        } else {
 78.3081 -            TEVENT (Inflated enter - park UNTIMED) ;
 78.3082 -            Self->_ParkEvent->park() ;
 78.3083 -        }
 78.3084 -
 78.3085 -        if (TryLock(Self) > 0) break ;
 78.3086 -
 78.3087 -        // The lock is still contested.
 78.3088 -        // Keep a tally of the # of futile wakeups.
 78.3089 -        // Note that the counter is not protected by a lock or updated by atomics.
 78.3090 -        // That is by design - we trade "lossy" counters which are exposed to
 78.3091 -        // races during updates for a lower probe effect.
 78.3092 -        TEVENT (Inflated enter - Futile wakeup) ;
 78.3093 -        if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
 78.3094 -           ObjectSynchronizer::_sync_FutileWakeups->inc() ;
 78.3095 -        }
 78.3096 -        ++ nWakeups ;
 78.3097 -
 78.3098 -        // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 78.3099 -        // We can defer clearing _succ until after the spin completes
 78.3100 -        // TrySpin() must tolerate being called with _succ == Self.
 78.3101 -        // Try yet another round of adaptive spinning.
 78.3102 -        if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
 78.3103 -
 78.3104 -        // We can find that we were unpark()ed and redesignated _succ while
 78.3105 -        // we were spinning.  That's harmless.  If we iterate and call park(),
 78.3106 -        // park() will consume the event and return immediately and we'll
 78.3107 -        // just spin again.  This pattern can repeat, leaving _succ to simply
 78.3108 -        // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
 78.3109 -        // Alternately, we can sample fired() here, and if set, forgo spinning
 78.3110 -        // in the next iteration.
 78.3111 -
 78.3112 -        if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
 78.3113 -           Self->_ParkEvent->reset() ;
 78.3114 -           OrderAccess::fence() ;
 78.3115 -        }
 78.3116 -        if (_succ == Self) _succ = NULL ;
 78.3117 -
 78.3118 -        // Invariant: after clearing _succ a thread *must* retry _owner before parking.
 78.3119 -        OrderAccess::fence() ;
 78.3120 -    }
 78.3121 -
 78.3122 -    // Egress :
 78.3123 -    // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
 78.3124 -    // Normally we'll find Self on the EntryList .
 78.3125 -    // From the perspective of the lock owner (this thread), the
 78.3126 -    // EntryList is stable and cxq is prepend-only.
 78.3127 -    // The head of cxq is volatile but the interior is stable.
 78.3128 -    // In addition, Self.TState is stable.
 78.3129 -
 78.3130 -    assert (_owner == Self      , "invariant") ;
 78.3131 -    assert (object() != NULL    , "invariant") ;
 78.3132 -    // I'd like to write:
 78.3133 -    //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 78.3134 -    // but as we're at a safepoint that's not safe.
 78.3135 -
 78.3136 -    UnlinkAfterAcquire (Self, &node) ;
 78.3137 -    if (_succ == Self) _succ = NULL ;
 78.3138 -
 78.3139 -    assert (_succ != Self, "invariant") ;
 78.3140 -    if (_Responsible == Self) {
 78.3141 -        _Responsible = NULL ;
 78.3142 -        // Dekker pivot-point.
 78.3143 -        // Consider OrderAccess::storeload() here
 78.3144 -
 78.3145 -        // We may leave threads on cxq|EntryList without a designated
 78.3146 -        // "Responsible" thread.  This is benign.  When this thread subsequently
 78.3147 -        // exits the monitor it can "see" such preexisting "old" threads --
 78.3148 -        // threads that arrived on the cxq|EntryList before the fence, above --
 78.3149 -        // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
 78.3150 -        // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
 78.3151 -        // non-null and elect a new "Responsible" timer thread.
 78.3152 -        //
 78.3153 -        // This thread executes:
 78.3154 -        //    ST Responsible=null; MEMBAR    (in enter epilog - here)
 78.3155 -        //    LD cxq|EntryList               (in subsequent exit)
 78.3156 -        //
 78.3157 -        // Entering threads in the slow/contended path execute:
 78.3158 -        //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
 78.3159 -        //    The (ST cxq; MEMBAR) is accomplished with CAS().
 78.3160 -        //
 78.3161 -        // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
 78.3162 -        // exit operation from floating above the ST Responsible=null.
 78.3163 -        //
 78.3164 -        // In *practice* however, EnterI() is always followed by some atomic
 78.3165 -        // operation such as the decrement of _count in ::enter().  Those atomics
 78.3166 -        // obviate the need for the explicit MEMBAR, above.
 78.3167 -    }
 78.3168 -
 78.3169 -    // We've acquired ownership with CAS().
 78.3170 -    // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
 78.3171 -    // But since the CAS() this thread may have also stored into _succ,
 78.3172 -    // EntryList, cxq or Responsible.  These meta-data updates must be
 78.3173 -    // visible __before this thread subsequently drops the lock.
 78.3174 -    // Consider what could occur if we didn't enforce this constraint --
 78.3175 -    // STs to monitor meta-data and user-data could reorder with (become
 78.3176 -    // visible after) the ST in exit that drops ownership of the lock.
 78.3177 -    // Some other thread could then acquire the lock, but observe inconsistent
 78.3178 -    // or old monitor meta-data and heap data.  That violates the JMM.
 78.3179 -    // To that end, the 1-0 exit() operation must have at least STST|LDST
 78.3180 -    // "release" barrier semantics.  Specifically, there must be at least a
 78.3181 -    // STST|LDST barrier in exit() before the ST of null into _owner that drops
 78.3182 -    // the lock.   The barrier ensures that changes to monitor meta-data and data
 78.3183 -    // protected by the lock will be visible before we release the lock, and
 78.3184 -    // therefore before some other thread (CPU) has a chance to acquire the lock.
 78.3185 -    // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
 78.3186 -    //
 78.3187 -    // Critically, any prior STs to _succ or EntryList must be visible before
 78.3188 -    // the ST of null into _owner in the *subsequent* (following) corresponding
 78.3189 -    // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
 78.3190 -    // execute a serializing instruction.
 78.3191 -
 78.3192 -    if (SyncFlags & 8) {
 78.3193 -       OrderAccess::fence() ;
 78.3194 -    }
 78.3195 -    return ;
 78.3196 -}
 78.3197 -
 78.3198 -// ExitSuspendEquivalent:
 78.3199 -// A faster alternate to handle_special_suspend_equivalent_condition()
 78.3200 -//
 78.3201 -// handle_special_suspend_equivalent_condition() unconditionally
 78.3202 -// acquires the SR_lock.  On some platforms uncontended MutexLocker()
 78.3203 -// operations have high latency.  Note that in ::enter() we call HSSEC
 78.3204 -// while holding the monitor, so we effectively lengthen the critical sections.
 78.3205 -//
 78.3206 -// There are a number of possible solutions:
 78.3207 -//
 78.3208 -// A.  To ameliorate the problem we might also defer state transitions
 78.3209 -//     to as late as possible -- just prior to parking.
 78.3210 -//     Given that, we'd call HSSEC after having returned from park(),
 78.3211 -//     but before attempting to acquire the monitor.  This is only a
 78.3212 -//     partial solution.  It avoids calling HSSEC while holding the
 78.3213 -//     monitor (good), but it still increases successor reacquisition latency --
 78.3214 -//     the interval between unparking a successor and the time the successor
 78.3215 -//     resumes and retries the lock.  See ReenterI(), which defers state transitions.
 78.3216 -//     If we use this technique we can also avoid EnterI()-exit() loop
 78.3217 -//     in ::enter() where we iteratively drop the lock and then attempt
 78.3218 -//     to reacquire it after suspending.
 78.3219 -//
 78.3220 -// B.  In the future we might fold all the suspend bits into a
 78.3221 -//     composite per-thread suspend flag and then update it with CAS().
 78.3222 -//     Alternately, a Dekker-like mechanism with multiple variables
 78.3223 -//     would suffice:
 78.3224 -//       ST Self->_suspend_equivalent = false
 78.3225 -//       MEMBAR
 78.3226 -//       LD Self_>_suspend_flags
 78.3227 -//
 78.3228 -
 78.3229 -
 78.3230 -bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
 78.3231 -   int Mode = Knob_FastHSSEC ;
 78.3232 -   if (Mode && !jSelf->is_external_suspend()) {
 78.3233 -      assert (jSelf->is_suspend_equivalent(), "invariant") ;
 78.3234 -      jSelf->clear_suspend_equivalent() ;
 78.3235 -      if (2 == Mode) OrderAccess::storeload() ;
 78.3236 -      if (!jSelf->is_external_suspend()) return false ;
 78.3237 -      // We raced a suspension -- fall thru into the slow path
 78.3238 -      TEVENT (ExitSuspendEquivalent - raced) ;
 78.3239 -      jSelf->set_suspend_equivalent() ;
 78.3240 -   }
 78.3241 -   return jSelf->handle_special_suspend_equivalent_condition() ;
 78.3242 -}
 78.3243 -
 78.3244 -
 78.3245 -// ReenterI() is a specialized inline form of the latter half of the
 78.3246 -// contended slow-path from EnterI().  We use ReenterI() only for
 78.3247 -// monitor reentry in wait().
 78.3248 -//
 78.3249 -// In the future we should reconcile EnterI() and ReenterI(), adding
 78.3250 -// Knob_Reset and Knob_SpinAfterFutile support and restructuring the
 78.3251 -// loop accordingly.
 78.3252 -
 78.3253 -void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
 78.3254 -    assert (Self != NULL                , "invariant") ;
 78.3255 -    assert (SelfNode != NULL            , "invariant") ;
 78.3256 -    assert (SelfNode->_thread == Self   , "invariant") ;
 78.3257 -    assert (_waiters > 0                , "invariant") ;
 78.3258 -    assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
 78.3259 -    assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
 78.3260 -    JavaThread * jt = (JavaThread *) Self ;
 78.3261 -
 78.3262 -    int nWakeups = 0 ;
 78.3263 -    for (;;) {
 78.3264 -        ObjectWaiter::TStates v = SelfNode->TState ;
 78.3265 -        guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
 78.3266 -        assert    (_owner != Self, "invariant") ;
 78.3267 -
 78.3268 -        if (TryLock (Self) > 0) break ;
 78.3269 -        if (TrySpin (Self) > 0) break ;
 78.3270 -
 78.3271 -        TEVENT (Wait Reentry - parking) ;
 78.3272 -
 78.3273 -        // State transition wrappers around park() ...
 78.3274 -        // ReenterI() wisely defers state transitions until
 78.3275 -        // it's clear we must park the thread.
 78.3276 -        {
 78.3277 -           OSThreadContendState osts(Self->osthread());
 78.3278 -           ThreadBlockInVM tbivm(jt);
 78.3279 -
 78.3280 -           // cleared by handle_special_suspend_equivalent_condition()
 78.3281 -           // or java_suspend_self()
 78.3282 -           jt->set_suspend_equivalent();
 78.3283 -           if (SyncFlags & 1) {
 78.3284 -              Self->_ParkEvent->park ((jlong)1000) ;
 78.3285 -           } else {
 78.3286 -              Self->_ParkEvent->park () ;
 78.3287 -           }
 78.3288 -
 78.3289 -           // were we externally suspended while we were waiting?
 78.3290 -           for (;;) {
 78.3291 -              if (!ExitSuspendEquivalent (jt)) break ;
 78.3292 -              if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
 78.3293 -              jt->java_suspend_self();
 78.3294 -              jt->set_suspend_equivalent();
 78.3295 -           }
 78.3296 -        }
 78.3297 -
 78.3298 -        // Try again, but just so we distinguish between futile wakeups and
 78.3299 -        // successful wakeups.  The following test isn't algorithmically
 78.3300 -        // necessary, but it helps us maintain sensible statistics.
 78.3301 -        if (TryLock(Self) > 0) break ;
 78.3302 -
 78.3303 -        // The lock is still contested.
 78.3304 -        // Keep a tally of the # of futile wakeups.
 78.3305 -        // Note that the counter is not protected by a lock or updated by atomics.
 78.3306 -        // That is by design - we trade "lossy" counters which are exposed to
 78.3307 -        // races during updates for a lower probe effect.
 78.3308 -        TEVENT (Wait Reentry - futile wakeup) ;
 78.3309 -        ++ nWakeups ;
 78.3310 -
 78.3311 -        // Assuming this is not a spurious wakeup we'll normally
 78.3312 -        // find that _succ == Self.
 78.3313 -        if (_succ == Self) _succ = NULL ;
 78.3314 -
 78.3315 -        // Invariant: after clearing _succ a contending thread
 78.3316 -        // *must* retry  _owner before parking.
 78.3317 -        OrderAccess::fence() ;
 78.3318 -
 78.3319 -        if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
 78.3320 -          ObjectSynchronizer::_sync_FutileWakeups->inc() ;
 78.3321 -        }
 78.3322 -    }
 78.3323 -
 78.3324 -    // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
 78.3325 -    // Normally we'll find Self on the EntryList.
 78.3326 -    // Unlinking from the EntryList is constant-time and atomic-free.
 78.3327 -    // From the perspective of the lock owner (this thread), the
 78.3328 -    // EntryList is stable and cxq is prepend-only.
 78.3329 -    // The head of cxq is volatile but the interior is stable.
 78.3330 -    // In addition, Self.TState is stable.
 78.3331 -
 78.3332 -    assert (_owner == Self, "invariant") ;
 78.3333 -    assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 78.3334 -    UnlinkAfterAcquire (Self, SelfNode) ;
 78.3335 -    if (_succ == Self) _succ = NULL ;
 78.3336 -    assert (_succ != Self, "invariant") ;
 78.3337 -    SelfNode->TState = ObjectWaiter::TS_RUN ;
 78.3338 -    OrderAccess::fence() ;      // see comments at the end of EnterI()
 78.3339 -}
 78.3340 -
 78.3341 -bool ObjectMonitor::try_enter(Thread* THREAD) {
 78.3342 -  if (THREAD != _owner) {
 78.3343 -    if (THREAD->is_lock_owned ((address)_owner)) {
 78.3344 -       assert(_recursions == 0, "internal state error");
 78.3345 -       _owner = THREAD ;
 78.3346 -       _recursions = 1 ;
 78.3347 -       OwnerIsThread = 1 ;
 78.3348 -       return true;
 78.3349 -    }
 78.3350 -    if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
 78.3351 -      return false;
 78.3352 -    }
 78.3353 -    return true;
 78.3354 -  } else {
 78.3355 -    _recursions++;
 78.3356 -    return true;
 78.3357 -  }
 78.3358 -}
 78.3359 -
 78.3360 -void ATTR ObjectMonitor::enter(TRAPS) {
 78.3361 -  // The following code is ordered to check the most common cases first
 78.3362 -  // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 78.3363 -  Thread * const Self = THREAD ;
 78.3364 -  void * cur ;
 78.3365 -
 78.3366 -  cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
 78.3367 -  if (cur == NULL) {
 78.3368 -     // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
 78.3369 -     assert (_recursions == 0   , "invariant") ;
 78.3370 -     assert (_owner      == Self, "invariant") ;
 78.3371 -     // CONSIDER: set or assert OwnerIsThread == 1
 78.3372 -     return ;
 78.3373 -  }
 78.3374 -
 78.3375 -  if (cur == Self) {
 78.3376 -     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 78.3377 -     _recursions ++ ;
 78.3378 -     return ;
 78.3379 -  }
 78.3380 -
 78.3381 -  if (Self->is_lock_owned ((address)cur)) {
 78.3382 -    assert (_recursions == 0, "internal state error");
 78.3383 -    _recursions = 1 ;
 78.3384 -    // Commute owner from a thread-specific on-stack BasicLockObject address to
 78.3385 -    // a full-fledged "Thread *".
 78.3386 -    _owner = Self ;
 78.3387 -    OwnerIsThread = 1 ;
 78.3388 -    return ;
 78.3389 -  }
 78.3390 -
 78.3391 -  // We've encountered genuine contention.
 78.3392 -  assert (Self->_Stalled == 0, "invariant") ;
 78.3393 -  Self->_Stalled = intptr_t(this) ;
 78.3394 -
 78.3395 -  // Try one round of spinning *before* enqueueing Self
 78.3396 -  // and before going through the awkward and expensive state
 78.3397 -  // transitions.  The following spin is strictly optional ...
 78.3398 -  // Note that if we acquire the monitor from an initial spin
 78.3399 -  // we forgo posting JVMTI events and firing DTRACE probes.
 78.3400 -  if (Knob_SpinEarly && TrySpin (Self) > 0) {
 78.3401 -     assert (_owner == Self      , "invariant") ;
 78.3402 -     assert (_recursions == 0    , "invariant") ;
 78.3403 -     assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 78.3404 -     Self->_Stalled = 0 ;
 78.3405 -     return ;
 78.3406 -  }
 78.3407 -
 78.3408 -  assert (_owner != Self          , "invariant") ;
 78.3409 -  assert (_succ  != Self          , "invariant") ;
 78.3410 -  assert (Self->is_Java_thread()  , "invariant") ;
 78.3411 -  JavaThread * jt = (JavaThread *) Self ;
 78.3412 -  assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
 78.3413 -  assert (jt->thread_state() != _thread_blocked   , "invariant") ;
 78.3414 -  assert (this->object() != NULL  , "invariant") ;
 78.3415 -  assert (_count >= 0, "invariant") ;
 78.3416 -
 78.3417 -  // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
 78.3418 -  // Ensure the object-monitor relationship remains stable while there's contention.
 78.3419 -  Atomic::inc_ptr(&_count);
 78.3420 -
 78.3421 -  { // Change java thread status to indicate blocked on monitor enter.
 78.3422 -    JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 78.3423 -
 78.3424 -    DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
 78.3425 -    if (JvmtiExport::should_post_monitor_contended_enter()) {
 78.3426 -      JvmtiExport::post_monitor_contended_enter(jt, this);
 78.3427 -    }
 78.3428 -
 78.3429 -    OSThreadContendState osts(Self->osthread());
 78.3430 -    ThreadBlockInVM tbivm(jt);
 78.3431 -
 78.3432 -    Self->set_current_pending_monitor(this);
 78.3433 -
 78.3434 -    // TODO-FIXME: change the following for(;;) loop to straight-line code.
 78.3435 -    for (;;) {
 78.3436 -      jt->set_suspend_equivalent();
 78.3437 -      // cleared by handle_special_suspend_equivalent_condition()
 78.3438 -      // or java_suspend_self()
 78.3439 -
 78.3440 -      EnterI (THREAD) ;
 78.3441 -
 78.3442 -      if (!ExitSuspendEquivalent(jt)) break ;
 78.3443 -
 78.3444 -      //
 78.3445 -      // We have acquired the contended monitor, but while we were
 78.3446 -      // waiting another thread suspended us. We don't want to enter
 78.3447 -      // the monitor while suspended because that would surprise the
 78.3448 -      // thread that suspended us.
 78.3449 -      //
 78.3450 -          _recursions = 0 ;
 78.3451 -      _succ = NULL ;
 78.3452 -      exit (Self) ;
 78.3453 -
 78.3454 -      jt->java_suspend_self();
 78.3455 -    }
 78.3456 -    Self->set_current_pending_monitor(NULL);
 78.3457 -  }
 78.3458 -
 78.3459 -  Atomic::dec_ptr(&_count);
 78.3460 -  assert (_count >= 0, "invariant") ;
 78.3461 -  Self->_Stalled = 0 ;
 78.3462 -
 78.3463 -  // Must either set _recursions = 0 or ASSERT _recursions == 0.
 78.3464 -  assert (_recursions == 0     , "invariant") ;
 78.3465 -  assert (_owner == Self       , "invariant") ;
 78.3466 -  assert (_succ  != Self       , "invariant") ;
 78.3467 -  assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 78.3468 -
 78.3469 -  // The thread -- now the owner -- is back in vm mode.
 78.3470 -  // Report the glorious news via TI,DTrace and jvmstat.
 78.3471 -  // The probe effect is non-trivial.  All the reportage occurs
 78.3472 -  // while we hold the monitor, increasing the length of the critical
 78.3473 -  // section.  Amdahl's parallel speedup law comes vividly into play.
 78.3474 -  //
 78.3475 -  // Another option might be to aggregate the events (thread local or
 78.3476 -  // per-monitor aggregation) and defer reporting until a more opportune
 78.3477 -  // time -- such as next time some thread encounters contention but has
 78.3478 -  // yet to acquire the lock.  While spinning that thread could
 78.3479 -  // spinning we could increment JVMStat counters, etc.
 78.3480 -
 78.3481 -  DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
 78.3482 -  if (JvmtiExport::should_post_monitor_contended_entered()) {
 78.3483 -    JvmtiExport::post_monitor_contended_entered(jt, this);
 78.3484 -  }
 78.3485 -  if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) {
 78.3486 -     ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ;
 78.3487 -  }
 78.3488 -}
 78.3489 -
 78.3490 -void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
 78.3491 -   assert (_owner == Self, "invariant") ;
 78.3492 -
 78.3493 -   // Exit protocol:
 78.3494 -   // 1. ST _succ = wakee
 78.3495 -   // 2. membar #loadstore|#storestore;
 78.3496 -   // 2. ST _owner = NULL
 78.3497 -   // 3. unpark(wakee)
 78.3498 -
 78.3499 -   _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
 78.3500 -   ParkEvent * Trigger = Wakee->_event ;
 78.3501 -
 78.3502 -   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
 78.3503 -   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
 78.3504 -   // out-of-scope (non-extant).
 78.3505 -   Wakee  = NULL ;
 78.3506 -
 78.3507 -   // Drop the lock
 78.3508 -   OrderAccess::release_store_ptr (&_owner, NULL) ;
 78.3509 -   OrderAccess::fence() ;                               // ST _owner vs LD in unpark()
 78.3510 -
 78.3511 -   // TODO-FIXME:
 78.3512 -   // If there's a safepoint pending the best policy would be to
 78.3513 -   // get _this thread to a safepoint and only wake the successor
 78.3514 -   // after the safepoint completed.  monitorexit uses a "leaf"
 78.3515 -   // state transition, however, so this thread can't become
 78.3516 -   // safe at this point in time.  (Its stack isn't walkable).
 78.3517 -   // The next best thing is to defer waking the successor by
 78.3518 -   // adding to a list of thread to be unparked after at the
 78.3519 -   // end of the forthcoming STW).
 78.3520 -   if (SafepointSynchronize::do_call_back()) {
 78.3521 -      TEVENT (unpark before SAFEPOINT) ;
 78.3522 -   }
 78.3523 -
 78.3524 -   // Possible optimizations ...
 78.3525 -   //
 78.3526 -   // * Consider: set Wakee->UnparkTime = timeNow()
 78.3527 -   //   When the thread wakes up it'll compute (timeNow() - Self->UnparkTime()).
 78.3528 -   //   By measuring recent ONPROC latency we can approximate the
 78.3529 -   //   system load.  In turn, we can feed that information back
 78.3530 -   //   into the spinning & succession policies.
 78.3531 -   //   (ONPROC latency correlates strongly with load).
 78.3532 -   //
 78.3533 -   // * Pull affinity:
 78.3534 -   //   If the wakee is cold then transiently setting it's affinity
 78.3535 -   //   to the current CPU is a good idea.
 78.3536 -   //   See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt
 78.3537 -   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
 78.3538 -   Trigger->unpark() ;
 78.3539 -
 78.3540 -   // Maintain stats and report events to JVMTI
 78.3541 -   if (ObjectSynchronizer::_sync_Parks != NULL) {
 78.3542 -      ObjectSynchronizer::_sync_Parks->inc() ;
 78.3543 -   }
 78.3544 -}
 78.3545 -
 78.3546 -
 78.3547 -// exit()
 78.3548 -// ~~~~~~
 78.3549 -// Note that the collector can't reclaim the objectMonitor or deflate
 78.3550 -// the object out from underneath the thread calling ::exit() as the
 78.3551 -// thread calling ::exit() never transitions to a stable state.
 78.3552 -// This inhibits GC, which in turn inhibits asynchronous (and
 78.3553 -// inopportune) reclamation of "this".
 78.3554 -//
 78.3555 -// We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
 78.3556 -// There's one exception to the claim above, however.  EnterI() can call
 78.3557 -// exit() to drop a lock if the acquirer has been externally suspended.
 78.3558 -// In that case exit() is called with _thread_state as _thread_blocked,
 78.3559 -// but the monitor's _count field is > 0, which inhibits reclamation.
 78.3560 -//
 78.3561 -// 1-0 exit
 78.3562 -// ~~~~~~~~
 78.3563 -// ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
 78.3564 -// the fast-path operators have been optimized so the common ::exit()
 78.3565 -// operation is 1-0.  See i486.ad fast_unlock(), for instance.
 78.3566 -// The code emitted by fast_unlock() elides the usual MEMBAR.  This
 78.3567 -// greatly improves latency -- MEMBAR and CAS having considerable local
 78.3568 -// latency on modern processors -- but at the cost of "stranding".  Absent the
 78.3569 -// MEMBAR, a thread in fast_unlock() can race a thread in the slow
 78.3570 -// ::enter() path, resulting in the entering thread being stranding
 78.3571 -// and a progress-liveness failure.   Stranding is extremely rare.
 78.3572 -// We use timers (timed park operations) & periodic polling to detect
 78.3573 -// and recover from stranding.  Potentially stranded threads periodically
 78.3574 -// wake up and poll the lock.  See the usage of the _Responsible variable.
 78.3575 -//
 78.3576 -// The CAS() in enter provides for safety and exclusion, while the CAS or
 78.3577 -// MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
 78.3578 -// eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
 78.3579 -// We detect and recover from stranding with timers.
 78.3580 -//
 78.3581 -// If a thread transiently strands it'll park until (a) another
 78.3582 -// thread acquires the lock and then drops the lock, at which time the
 78.3583 -// exiting thread will notice and unpark the stranded thread, or, (b)
 78.3584 -// the timer expires.  If the lock is high traffic then the stranding latency
 78.3585 -// will be low due to (a).  If the lock is low traffic then the odds of
 78.3586 -// stranding are lower, although the worst-case stranding latency
 78.3587 -// is longer.  Critically, we don't want to put excessive load in the
 78.3588 -// platform's timer subsystem.  We want to minimize both the timer injection
 78.3589 -// rate (timers created/sec) as well as the number of timers active at
 78.3590 -// any one time.  (more precisely, we want to minimize timer-seconds, which is
 78.3591 -// the integral of the # of active timers at any instant over time).
 78.3592 -// Both impinge on OS scalability.  Given that, at most one thread parked on
 78.3593 -// a monitor will use a timer.
 78.3594 -
 78.3595 -void ATTR ObjectMonitor::exit(TRAPS) {
 78.3596 -   Thread * Self = THREAD ;
 78.3597 -   if (THREAD != _owner) {
 78.3598 -     if (THREAD->is_lock_owned((address) _owner)) {
 78.3599 -       // Transmute _owner from a BasicLock pointer to a Thread address.
 78.3600 -       // We don't need to hold _mutex for this transition.
 78.3601 -       // Non-null to Non-null is safe as long as all readers can
 78.3602 -       // tolerate either flavor.
 78.3603 -       assert (_recursions == 0, "invariant") ;
 78.3604 -       _owner = THREAD ;
 78.3605 -       _recursions = 0 ;
 78.3606 -       OwnerIsThread = 1 ;
 78.3607 -     } else {
 78.3608 -       // NOTE: we need to handle unbalanced monitor enter/exit
 78.3609 -       // in native code by throwing an exception.
 78.3610 -       // TODO: Throw an IllegalMonitorStateException ?
 78.3611 -       TEVENT (Exit - Throw IMSX) ;
 78.3612 -       assert(false, "Non-balanced monitor enter/exit!");
 78.3613 -       if (false) {
 78.3614 -          THROW(vmSymbols::java_lang_IllegalMonitorStateException());
 78.3615 -       }
 78.3616 -       return;
 78.3617 -     }
 78.3618 -   }
 78.3619 -
 78.3620 -   if (_recursions != 0) {
 78.3621 -     _recursions--;        // this is simple recursive enter
 78.3622 -     TEVENT (Inflated exit - recursive) ;
 78.3623 -     return ;
 78.3624 -   }
 78.3625 -
 78.3626 -   // Invariant: after setting Responsible=null an thread must execute
 78.3627 -   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 78.3628 -   if ((SyncFlags & 4) == 0) {
 78.3629 -      _Responsible = NULL ;
 78.3630 -   }
 78.3631 -
 78.3632 -   for (;;) {
 78.3633 -      assert (THREAD == _owner, "invariant") ;
 78.3634 -
 78.3635 -      // Fast-path monitor exit:
 78.3636 -      //
 78.3637 -      // Observe the Dekker/Lamport duality:
 78.3638 -      // A thread in ::exit() executes:
 78.3639 -      //   ST Owner=null; MEMBAR; LD EntryList|cxq.
 78.3640 -      // A thread in the contended ::enter() path executes the complementary:
 78.3641 -      //   ST EntryList|cxq = nonnull; MEMBAR; LD Owner.
 78.3642 -      //
 78.3643 -      // Note that there's a benign race in the exit path.  We can drop the
 78.3644 -      // lock, another thread can reacquire the lock immediately, and we can
 78.3645 -      // then wake a thread unnecessarily (yet another flavor of futile wakeup).
 78.3646 -      // This is benign, and we've structured the code so the windows are short
 78.3647 -      // and the frequency of such futile wakeups is low.
 78.3648 -      //
 78.3649 -      // We could eliminate the race by encoding both the "LOCKED" state and
 78.3650 -      // the queue head in a single word.  Exit would then use either CAS to
 78.3651 -      // clear the LOCKED bit/byte.  This precludes the desirable 1-0 optimization,
 78.3652 -      // however.
 78.3653 -      //
 78.3654 -      // Possible fast-path ::exit() optimization:
 78.3655 -      // The current fast-path exit implementation fetches both cxq and EntryList.
 78.3656 -      // See also i486.ad fast_unlock().  Testing has shown that two LDs
 78.3657 -      // isn't measurably slower than a single LD on any platforms.
 78.3658 -      // Still, we could reduce the 2 LDs to one or zero by one of the following:
 78.3659 -      //
 78.3660 -      // - Use _count instead of cxq|EntryList
 78.3661 -      //   We intend to eliminate _count, however, when we switch
 78.3662 -      //   to on-the-fly deflation in ::exit() as is used in
 78.3663 -      //   Metalocks and RelaxedLocks.
 78.3664 -      //
 78.3665 -      // - Establish the invariant that cxq == null implies EntryList == null.
 78.3666 -      //   set cxq == EMPTY (1) to encode the state where cxq is empty
 78.3667 -      //   by EntryList != null.  EMPTY is a distinguished value.
 78.3668 -      //   The fast-path exit() would fetch cxq but not EntryList.
 78.3669 -      //
 78.3670 -      // - Encode succ as follows:
 78.3671 -      //   succ = t :  Thread t is the successor -- t is ready or is spinning.
 78.3672 -      //               Exiting thread does not need to wake a successor.
 78.3673 -      //   succ = 0 :  No successor required -> (EntryList|cxq) == null
 78.3674 -      //               Exiting thread does not need to wake a successor
 78.3675 -      //   succ = 1 :  Successor required    -> (EntryList|cxq) != null and
 78.3676 -      //               logically succ == null.
 78.3677 -      //               Exiting thread must wake a successor.
 78.3678 -      //
 78.3679 -      //   The 1-1 fast-exit path would appear as :
 78.3680 -      //     _owner = null ; membar ;
 78.3681 -      //     if (_succ == 1 && CAS (&_owner, null, Self) == null) goto SlowPath
 78.3682 -      //     goto FastPathDone ;
 78.3683 -      //
 78.3684 -      //   and the 1-0 fast-exit path would appear as:
 78.3685 -      //      if (_succ == 1) goto SlowPath
 78.3686 -      //      Owner = null ;
 78.3687 -      //      goto FastPathDone
 78.3688 -      //
 78.3689 -      // - Encode the LSB of _owner as 1 to indicate that exit()
 78.3690 -      //   must use the slow-path and make a successor ready.
 78.3691 -      //   (_owner & 1) == 0 IFF succ != null || (EntryList|cxq) == null
 78.3692 -      //   (_owner & 1) == 0 IFF succ == null && (EntryList|cxq) != null (obviously)
 78.3693 -      //   The 1-0 fast exit path would read:
 78.3694 -      //      if (_owner != Self) goto SlowPath
 78.3695 -      //      _owner = null
 78.3696 -      //      goto FastPathDone
 78.3697 -
 78.3698 -      if (Knob_ExitPolicy == 0) {
 78.3699 -         // release semantics: prior loads and stores from within the critical section
 78.3700 -         // must not float (reorder) past the following store that drops the lock.
 78.3701 -         // On SPARC that requires MEMBAR #loadstore|#storestore.
 78.3702 -         // But of course in TSO #loadstore|#storestore is not required.
 78.3703 -         // I'd like to write one of the following:
 78.3704 -         // A.  OrderAccess::release() ; _owner = NULL
 78.3705 -         // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
 78.3706 -         // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
 78.3707 -         // store into a _dummy variable.  That store is not needed, but can result
 78.3708 -         // in massive wasteful coherency traffic on classic SMP systems.
 78.3709 -         // Instead, I use release_store(), which is implemented as just a simple
 78.3710 -         // ST on x64, x86 and SPARC.
 78.3711 -         OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
 78.3712 -         OrderAccess::storeload() ;                         // See if we need to wake a successor
 78.3713 -         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
 78.3714 -            TEVENT (Inflated exit - simple egress) ;
 78.3715 -            return ;
 78.3716 -         }
 78.3717 -         TEVENT (Inflated exit - complex egress) ;
 78.3718 -
 78.3719 -         // Normally the exiting thread is responsible for ensuring succession,
 78.3720 -         // but if other successors are ready or other entering threads are spinning
 78.3721 -         // then this thread can simply store NULL into _owner and exit without
 78.3722 -         // waking a successor.  The existence of spinners or ready successors
 78.3723 -         // guarantees proper succession (liveness).  Responsibility passes to the
 78.3724 -         // ready or running successors.  The exiting thread delegates the duty.
 78.3725 -         // More precisely, if a successor already exists this thread is absolved
 78.3726 -         // of the responsibility of waking (unparking) one.
 78.3727 -         //
 78.3728 -         // The _succ variable is critical to reducing futile wakeup frequency.
 78.3729 -         // _succ identifies the "heir presumptive" thread that has been made
 78.3730 -         // ready (unparked) but that has not yet run.  We need only one such
 78.3731 -         // successor thread to guarantee progress.
 78.3732 -         // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
 78.3733 -         // section 3.3 "Futile Wakeup Throttling" for details.
 78.3734 -         //
 78.3735 -         // Note that spinners in Enter() also set _succ non-null.
 78.3736 -         // In the current implementation spinners opportunistically set
 78.3737 -         // _succ so that exiting threads might avoid waking a successor.
 78.3738 -         // Another less appealing alternative would be for the exiting thread
 78.3739 -         // to drop the lock and then spin briefly to see if a spinner managed
 78.3740 -         // to acquire the lock.  If so, the exiting thread could exit
 78.3741 -         // immediately without waking a successor, otherwise the exiting
 78.3742 -         // thread would need to dequeue and wake a successor.
 78.3743 -         // (Note that we'd need to make the post-drop spin short, but no
 78.3744 -         // shorter than the worst-case round-trip cache-line migration time.
 78.3745 -         // The dropped lock needs to become visible to the spinner, and then
 78.3746 -         // the acquisition of the lock by the spinner must become visible to
 78.3747 -         // the exiting thread).
 78.3748 -         //
 78.3749 -
 78.3750 -         // It appears that an heir-presumptive (successor) must be made ready.
 78.3751 -         // Only the current lock owner can manipulate the EntryList or
 78.3752 -         // drain _cxq, so we need to reacquire the lock.  If we fail
 78.3753 -         // to reacquire the lock the responsibility for ensuring succession
 78.3754 -         // falls to the new owner.
 78.3755 -         //
 78.3756 -         if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
 78.3757 -            return ;
 78.3758 -         }
 78.3759 -         TEVENT (Exit - Reacquired) ;
 78.3760 -      } else {
 78.3761 -         if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
 78.3762 -            OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
 78.3763 -            OrderAccess::storeload() ;
 78.3764 -            // Ratify the previously observed values.
 78.3765 -            if (_cxq == NULL || _succ != NULL) {
 78.3766 -                TEVENT (Inflated exit - simple egress) ;
 78.3767 -                return ;
 78.3768 -            }
 78.3769 -
 78.3770 -            // inopportune interleaving -- the exiting thread (this thread)
 78.3771 -            // in the fast-exit path raced an entering thread in the slow-enter
 78.3772 -            // path.
 78.3773 -            // We have two choices:
 78.3774 -            // A.  Try to reacquire the lock.
 78.3775 -            //     If the CAS() fails return immediately, otherwise
 78.3776 -            //     we either restart/rerun the exit operation, or simply
 78.3777 -            //     fall-through into the code below which wakes a successor.
 78.3778 -            // B.  If the elements forming the EntryList|cxq are TSM
 78.3779 -            //     we could simply unpark() the lead thread and return
 78.3780 -            //     without having set _succ.
 78.3781 -            if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
 78.3782 -               TEVENT (Inflated exit - reacquired succeeded) ;
 78.3783 -               return ;
 78.3784 -            }
 78.3785 -            TEVENT (Inflated exit - reacquired failed) ;
 78.3786 -         } else {
 78.3787 -            TEVENT (Inflated exit - complex egress) ;
 78.3788 -         }
 78.3789 -      }
 78.3790 -
 78.3791 -      guarantee (_owner == THREAD, "invariant") ;
 78.3792 -
 78.3793 -      // Select an appropriate successor ("heir presumptive") from the EntryList
 78.3794 -      // and make it ready.  Generally we just wake the head of EntryList .
 78.3795 -      // There's no algorithmic constraint that we use the head - it's just
 78.3796 -      // a policy decision.   Note that the thread at head of the EntryList
 78.3797 -      // remains at the head until it acquires the lock.  This means we'll
 78.3798 -      // repeatedly wake the same thread until it manages to grab the lock.
 78.3799 -      // This is generally a good policy - if we're seeing lots of futile wakeups
 78.3800 -      // at least we're waking/rewaking a thread that's like to be hot or warm
 78.3801 -      // (have residual D$ and TLB affinity).
 78.3802 -      //
 78.3803 -      // "Wakeup locality" optimization:
 78.3804 -      // http://j2se.east/~dice/PERSIST/040825-WakeLocality.txt
 78.3805 -      // In the future we'll try to bias the selection mechanism
 78.3806 -      // to preferentially pick a thread that recently ran on
 78.3807 -      // a processor element that shares cache with the CPU on which
 78.3808 -      // the exiting thread is running.   We need access to Solaris'
 78.3809 -      // schedctl.sc_cpu to make that work.
 78.3810 -      //
 78.3811 -      ObjectWaiter * w = NULL ;
 78.3812 -      int QMode = Knob_QMode ;
 78.3813 -
 78.3814 -      if (QMode == 2 && _cxq != NULL) {
 78.3815 -          // QMode == 2 : cxq has precedence over EntryList.
 78.3816 -          // Try to directly wake a successor from the cxq.
 78.3817 -          // If successful, the successor will need to unlink itself from cxq.
 78.3818 -          w = _cxq ;
 78.3819 -          assert (w != NULL, "invariant") ;
 78.3820 -          assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
 78.3821 -          ExitEpilog (Self, w) ;
 78.3822 -          return ;
 78.3823 -      }
 78.3824 -
 78.3825 -      if (QMode == 3 && _cxq != NULL) {
 78.3826 -          // Aggressively drain cxq into EntryList at the first opportunity.
 78.3827 -          // This policy ensure that recently-run threads live at the head of EntryList.
 78.3828 -          // Drain _cxq into EntryList - bulk transfer.
 78.3829 -          // First, detach _cxq.
 78.3830 -          // The following loop is tantamount to: w = swap (&cxq, NULL)
 78.3831 -          w = _cxq ;
 78.3832 -          for (;;) {
 78.3833 -             assert (w != NULL, "Invariant") ;
 78.3834 -             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
 78.3835 -             if (u == w) break ;
 78.3836 -             w = u ;
 78.3837 -          }
 78.3838 -          assert (w != NULL              , "invariant") ;
 78.3839 -
 78.3840 -          ObjectWaiter * q = NULL ;
 78.3841 -          ObjectWaiter * p ;
 78.3842 -          for (p = w ; p != NULL ; p = p->_next) {
 78.3843 -              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
 78.3844 -              p->TState = ObjectWaiter::TS_ENTER ;
 78.3845 -              p->_prev = q ;
 78.3846 -              q = p ;
 78.3847 -          }
 78.3848 -
 78.3849 -          // Append the RATs to the EntryList
 78.3850 -          // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
 78.3851 -          ObjectWaiter * Tail ;
 78.3852 -          for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
 78.3853 -          if (Tail == NULL) {
 78.3854 -              _EntryList = w ;
 78.3855 -          } else {
 78.3856 -              Tail->_next = w ;
 78.3857 -              w->_prev = Tail ;
 78.3858 -          }
 78.3859 -
 78.3860 -          // Fall thru into code that tries to wake a successor from EntryList
 78.3861 -      }
 78.3862 -
 78.3863 -      if (QMode == 4 && _cxq != NULL) {
 78.3864 -          // Aggressively drain cxq into EntryList at the first opportunity.
 78.3865 -          // This policy ensure that recently-run threads live at the head of EntryList.
 78.3866 -
 78.3867 -          // Drain _cxq into EntryList - bulk transfer.
 78.3868 -          // First, detach _cxq.
 78.3869 -          // The following loop is tantamount to: w = swap (&cxq, NULL)
 78.3870 -          w = _cxq ;
 78.3871 -          for (;;) {
 78.3872 -             assert (w != NULL, "Invariant") ;
 78.3873 -             ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
 78.3874 -             if (u == w) break ;
 78.3875 -             w = u ;
 78.3876 -          }
 78.3877 -          assert (w != NULL              , "invariant") ;
 78.3878 -
 78.3879 -          ObjectWaiter * q = NULL ;
 78.3880 -          ObjectWaiter * p ;
 78.3881 -          for (p = w ; p != NULL ; p = p->_next) {
 78.3882 -              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
 78.3883 -              p->TState = ObjectWaiter::TS_ENTER ;
 78.3884 -              p->_prev = q ;
 78.3885 -              q = p ;
 78.3886 -          }
 78.3887 -
 78.3888 -          // Prepend the RATs to the EntryList
 78.3889 -          if (_EntryList != NULL) {
 78.3890 -              q->_next = _EntryList ;
 78.3891 -              _EntryList->_prev = q ;
 78.3892 -          }
 78.3893 -          _EntryList = w ;
 78.3894 -
 78.3895 -          // Fall thru into code that tries to wake a successor from EntryList
 78.3896 -      }
 78.3897 -
 78.3898 -      w = _EntryList  ;
 78.3899 -      if (w != NULL) {
 78.3900 -          // I'd like to write: guarantee (w->_thread != Self).
 78.3901 -          // But in practice an exiting thread may find itself on the EntryList.
 78.3902 -          // Lets say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
 78.3903 -          // then calls exit().  Exit release the lock by setting O._owner to NULL.
 78.3904 -          // Lets say T1 then stalls.  T2 acquires O and calls O.notify().  The
 78.3905 -          // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
 78.3906 -          // release the lock "O".  T2 resumes immediately after the ST of null into
 78.3907 -          // _owner, above.  T2 notices that the EntryList is populated, so it
 78.3908 -          // reacquires the lock and then finds itself on the EntryList.
 78.3909 -          // Given all that, we have to tolerate the circumstance where "w" is
 78.3910 -          // associated with Self.
 78.3911 -          assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
 78.3912 -          ExitEpilog (Self, w) ;
 78.3913 -          return ;
 78.3914 -      }
 78.3915 -
 78.3916 -      // If we find that both _cxq and EntryList are null then just
 78.3917 -      // re-run the exit protocol from the top.
 78.3918 -      w = _cxq ;
 78.3919 -      if (w == NULL) continue ;
 78.3920 -
 78.3921 -      // Drain _cxq into EntryList - bulk transfer.
 78.3922 -      // First, detach _cxq.
 78.3923 -      // The following loop is tantamount to: w = swap (&cxq, NULL)
 78.3924 -      for (;;) {
 78.3925 -          assert (w != NULL, "Invariant") ;
 78.3926 -          ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
 78.3927 -          if (u == w) break ;
 78.3928 -          w = u ;
 78.3929 -      }
 78.3930 -      TEVENT (Inflated exit - drain cxq into EntryList) ;
 78.3931 -
 78.3932 -      assert (w != NULL              , "invariant") ;
 78.3933 -      assert (_EntryList  == NULL    , "invariant") ;
 78.3934 -
 78.3935 -      // Convert the LIFO SLL anchored by _cxq into a DLL.
 78.3936 -      // The list reorganization step operates in O(LENGTH(w)) time.
 78.3937 -      // It's critical that this step operate quickly as
 78.3938 -      // "Self" still holds the outer-lock, restricting parallelism
 78.3939 -      // and effectively lengthening the critical section.
 78.3940 -      // Invariant: s chases t chases u.
 78.3941 -      // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
 78.3942 -      // we have faster access to the tail.
 78.3943 -
 78.3944 -      if (QMode == 1) {
 78.3945 -         // QMode == 1 : drain cxq to EntryList, reversing order
 78.3946 -         // We also reverse the order of the list.
 78.3947 -         ObjectWaiter * s = NULL ;
 78.3948 -         ObjectWaiter * t = w ;
 78.3949 -         ObjectWaiter * u = NULL ;
 78.3950 -         while (t != NULL) {
 78.3951 -             guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
 78.3952 -             t->TState = ObjectWaiter::TS_ENTER ;
 78.3953 -             u = t->_next ;
 78.3954 -             t->_prev = u ;
 78.3955 -             t->_next = s ;
 78.3956 -             s = t;
 78.3957 -             t = u ;
 78.3958 -         }
 78.3959 -         _EntryList  = s ;
 78.3960 -         assert (s != NULL, "invariant") ;
 78.3961 -      } else {
 78.3962 -         // QMode == 0 or QMode == 2
 78.3963 -         _EntryList = w ;
 78.3964 -         ObjectWaiter * q = NULL ;
 78.3965 -         ObjectWaiter * p ;
 78.3966 -         for (p = w ; p != NULL ; p = p->_next) {
 78.3967 -             guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
 78.3968 -             p->TState = ObjectWaiter::TS_ENTER ;
 78.3969 -             p->_prev = q ;
 78.3970 -             q = p ;
 78.3971 -         }
 78.3972 -      }
 78.3973 -
 78.3974 -      // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
 78.3975 -      // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
 78.3976 -
 78.3977 -      // See if we can abdicate to a spinner instead of waking a thread.
 78.3978 -      // A primary goal of the implementation is to reduce the
 78.3979 -      // context-switch rate.
 78.3980 -      if (_succ != NULL) continue;
 78.3981 -
 78.3982 -      w = _EntryList  ;
 78.3983 -      if (w != NULL) {
 78.3984 -          guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
 78.3985 -          ExitEpilog (Self, w) ;
 78.3986 -          return ;
 78.3987 -      }
 78.3988 -   }
 78.3989 -}
 78.3990 -// complete_exit exits a lock returning recursion count
 78.3991 -// complete_exit/reenter operate as a wait without waiting
 78.3992 -// complete_exit requires an inflated monitor
 78.3993 -// The _owner field is not always the Thread addr even with an
 78.3994 -// inflated monitor, e.g. the monitor can be inflated by a non-owning
 78.3995 -// thread due to contention.
 78.3996 -intptr_t ObjectMonitor::complete_exit(TRAPS) {
 78.3997 -   Thread * const Self = THREAD;
 78.3998 -   assert(Self->is_Java_thread(), "Must be Java thread!");
 78.3999 -   JavaThread *jt = (JavaThread *)THREAD;
 78.4000 -
 78.4001 -   DeferredInitialize();
 78.4002 -
 78.4003 -   if (THREAD != _owner) {
 78.4004 -    if (THREAD->is_lock_owned ((address)_owner)) {
 78.4005 -       assert(_recursions == 0, "internal state error");
 78.4006 -       _owner = THREAD ;   /* Convert from basiclock addr to Thread addr */
 78.4007 -       _recursions = 0 ;
 78.4008 -       OwnerIsThread = 1 ;
 78.4009 -    }
 78.4010 -   }
 78.4011 -
 78.4012 -   guarantee(Self == _owner, "complete_exit not owner");
 78.4013 -   intptr_t save = _recursions; // record the old recursion count
 78.4014 -   _recursions = 0;        // set the recursion level to be 0
 78.4015 -   exit (Self) ;           // exit the monitor
 78.4016 -   guarantee (_owner != Self, "invariant");
 78.4017 -   return save;
 78.4018 -}
 78.4019 -
 78.4020 -// reenter() enters a lock and sets recursion count
 78.4021 -// complete_exit/reenter operate as a wait without waiting
 78.4022 -void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
 78.4023 -   Thread * const Self = THREAD;
 78.4024 -   assert(Self->is_Java_thread(), "Must be Java thread!");
 78.4025 -   JavaThread *jt = (JavaThread *)THREAD;
 78.4026 -
 78.4027 -   guarantee(_owner != Self, "reenter already owner");
 78.4028 -   enter (THREAD);       // enter the monitor
 78.4029 -   guarantee (_recursions == 0, "reenter recursion");
 78.4030 -   _recursions = recursions;
 78.4031 -   return;
 78.4032 -}
 78.4033 -
 78.4034 -// Note: a subset of changes to ObjectMonitor::wait()
 78.4035 -// will need to be replicated in complete_exit above
 78.4036 -void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
 78.4037 -   Thread * const Self = THREAD ;
 78.4038 -   assert(Self->is_Java_thread(), "Must be Java thread!");
 78.4039 -   JavaThread *jt = (JavaThread *)THREAD;
 78.4040 -
 78.4041 -   DeferredInitialize () ;
 78.4042 -
 78.4043 -   // Throw IMSX or IEX.
 78.4044 -   CHECK_OWNER();
 78.4045 -
 78.4046 -   // check for a pending interrupt
 78.4047 -   if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
 78.4048 -     // post monitor waited event.  Note that this is past-tense, we are done waiting.
 78.4049 -     if (JvmtiExport::should_post_monitor_waited()) {
 78.4050 -        // Note: 'false' parameter is passed here because the
 78.4051 -        // wait was not timed out due to thread interrupt.
 78.4052 -        JvmtiExport::post_monitor_waited(jt, this, false);
 78.4053 -     }
 78.4054 -     TEVENT (Wait - Throw IEX) ;
 78.4055 -     THROW(vmSymbols::java_lang_InterruptedException());
 78.4056 -     return ;
 78.4057 -   }
 78.4058 -   TEVENT (Wait) ;
 78.4059 -
 78.4060 -   assert (Self->_Stalled == 0, "invariant") ;
 78.4061 -   Self->_Stalled = intptr_t(this) ;
 78.4062 -   jt->set_current_waiting_monitor(this);
 78.4063 -
 78.4064 -   // create a node to be put into the queue
 78.4065 -   // Critically, after we reset() the event but prior to park(), we must check
 78.4066 -   // for a pending interrupt.
 78.4067 -   ObjectWaiter node(Self);
 78.4068 -   node.TState = ObjectWaiter::TS_WAIT ;
 78.4069 -   Self->_ParkEvent->reset() ;
 78.4070 -   OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
 78.4071 -
 78.4072 -   // Enter the waiting queue, which is a circular doubly linked list in this case
 78.4073 -   // but it could be a priority queue or any data structure.
 78.4074 -   // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
 78.4075 -   // by the the owner of the monitor *except* in the case where park()
 78.4076 -   // returns because of a timeout of interrupt.  Contention is exceptionally rare
 78.4077 -   // so we use a simple spin-lock instead of a heavier-weight blocking lock.
 78.4078 -
 78.4079 -   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
 78.4080 -   AddWaiter (&node) ;
 78.4081 -   Thread::SpinRelease (&_WaitSetLock) ;
 78.4082 -
 78.4083 -   if ((SyncFlags & 4) == 0) {
 78.4084 -      _Responsible = NULL ;
 78.4085 -   }
 78.4086 -   intptr_t save = _recursions; // record the old recursion count
 78.4087 -   _waiters++;                  // increment the number of waiters
 78.4088 -   _recursions = 0;             // set the recursion level to be 1
 78.4089 -   exit (Self) ;                    // exit the monitor
 78.4090 -   guarantee (_owner != Self, "invariant") ;
 78.4091 -
 78.4092 -   // As soon as the ObjectMonitor's ownership is dropped in the exit()
 78.4093 -   // call above, another thread can enter() the ObjectMonitor, do the
 78.4094 -   // notify(), and exit() the ObjectMonitor. If the other thread's
 78.4095 -   // exit() call chooses this thread as the successor and the unpark()
 78.4096 -   // call happens to occur while this thread is posting a
 78.4097 -   // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
 78.4098 -   // handler using RawMonitors and consuming the unpark().
 78.4099 -   //
 78.4100 -   // To avoid the problem, we re-post the event. This does no harm
 78.4101 -   // even if the original unpark() was not consumed because we are the
 78.4102 -   // chosen successor for this monitor.
 78.4103 -   if (node._notified != 0 && _succ == Self) {
 78.4104 -      node._event->unpark();
 78.4105 -   }
 78.4106 -
 78.4107 -   // The thread is on the WaitSet list - now park() it.
 78.4108 -   // On MP systems it's conceivable that a brief spin before we park
 78.4109 -   // could be profitable.
 78.4110 -   //
 78.4111 -   // TODO-FIXME: change the following logic to a loop of the form
 78.4112 -   //   while (!timeout && !interrupted && _notified == 0) park()
 78.4113 -
 78.4114 -   int ret = OS_OK ;
 78.4115 -   int WasNotified = 0 ;
 78.4116 -   { // State transition wrappers
 78.4117 -     OSThread* osthread = Self->osthread();
 78.4118 -     OSThreadWaitState osts(osthread, true);
 78.4119 -     {
 78.4120 -       ThreadBlockInVM tbivm(jt);
 78.4121 -       // Thread is in thread_blocked state and oop access is unsafe.
 78.4122 -       jt->set_suspend_equivalent();
 78.4123 -
 78.4124 -       if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
 78.4125 -           // Intentionally empty
 78.4126 -       } else
 78.4127 -       if (node._notified == 0) {
 78.4128 -         if (millis <= 0) {
 78.4129 -            Self->_ParkEvent->park () ;
 78.4130 -         } else {
 78.4131 -            ret = Self->_ParkEvent->park (millis) ;
 78.4132 -         }
 78.4133 -       }
 78.4134 -
 78.4135 -       // were we externally suspended while we were waiting?
 78.4136 -       if (ExitSuspendEquivalent (jt)) {
 78.4137 -          // TODO-FIXME: add -- if succ == Self then succ = null.
 78.4138 -          jt->java_suspend_self();
 78.4139 -       }
 78.4140 -
 78.4141 -     } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
 78.4142 -
 78.4143 -
 78.4144 -     // Node may be on the WaitSet, the EntryList (or cxq), or in transition
 78.4145 -     // from the WaitSet to the EntryList.
 78.4146 -     // See if we need to remove Node from the WaitSet.
 78.4147 -     // We use double-checked locking to avoid grabbing _WaitSetLock
 78.4148 -     // if the thread is not on the wait queue.
 78.4149 -     //
 78.4150 -     // Note that we don't need a fence before the fetch of TState.
 78.4151 -     // In the worst case we'll fetch a old-stale value of TS_WAIT previously
 78.4152 -     // written by the is thread. (perhaps the fetch might even be satisfied
 78.4153 -     // by a look-aside into the processor's own store buffer, although given
 78.4154 -     // the length of the code path between the prior ST and this load that's
 78.4155 -     // highly unlikely).  If the following LD fetches a stale TS_WAIT value
 78.4156 -     // then we'll acquire the lock and then re-fetch a fresh TState value.
 78.4157 -     // That is, we fail toward safety.
 78.4158 -
 78.4159 -     if (node.TState == ObjectWaiter::TS_WAIT) {
 78.4160 -         Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
 78.4161 -         if (node.TState == ObjectWaiter::TS_WAIT) {
 78.4162 -            DequeueSpecificWaiter (&node) ;       // unlink from WaitSet
 78.4163 -            assert(node._notified == 0, "invariant");
 78.4164 -            node.TState = ObjectWaiter::TS_RUN ;
 78.4165 -         }
 78.4166 -         Thread::SpinRelease (&_WaitSetLock) ;
 78.4167 -     }
 78.4168 -
 78.4169 -     // The thread is now either on off-list (TS_RUN),
 78.4170 -     // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
 78.4171 -     // The Node's TState variable is stable from the perspective of this thread.
 78.4172 -     // No other threads will asynchronously modify TState.
 78.4173 -     guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
 78.4174 -     OrderAccess::loadload() ;
 78.4175 -     if (_succ == Self) _succ = NULL ;
 78.4176 -     WasNotified = node._notified ;
 78.4177 -
 78.4178 -     // Reentry phase -- reacquire the monitor.
 78.4179 -     // re-enter contended monitor after object.wait().
 78.4180 -     // retain OBJECT_WAIT state until re-enter successfully completes
 78.4181 -     // Thread state is thread_in_vm and oop access is again safe,
 78.4182 -     // although the raw address of the object may have changed.
 78.4183 -     // (Don't cache naked oops over safepoints, of course).
 78.4184 -
 78.4185 -     // post monitor waited event. Note that this is past-tense, we are done waiting.
 78.4186 -     if (JvmtiExport::should_post_monitor_waited()) {
 78.4187 -       JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
 78.4188 -     }
 78.4189 -     OrderAccess::fence() ;
 78.4190 -
 78.4191 -     assert (Self->_Stalled != 0, "invariant") ;
 78.4192 -     Self->_Stalled = 0 ;
 78.4193 -
 78.4194 -     assert (_owner != Self, "invariant") ;
 78.4195 -     ObjectWaiter::TStates v = node.TState ;
 78.4196 -     if (v == ObjectWaiter::TS_RUN) {
 78.4197 -         enter (Self) ;
 78.4198 -     } else {
 78.4199 -         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
 78.4200 -         ReenterI (Self, &node) ;
 78.4201 -         node.wait_reenter_end(this);
 78.4202 -     }
 78.4203 -
 78.4204 -     // Self has reacquired the lock.
 78.4205 -     // Lifecycle - the node representing Self must not appear on any queues.
 78.4206 -     // Node is about to go out-of-scope, but even if it were immortal we wouldn't
 78.4207 -     // want residual elements associated with this thread left on any lists.
 78.4208 -     guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
 78.4209 -     assert    (_owner == Self, "invariant") ;
 78.4210 -     assert    (_succ != Self , "invariant") ;
 78.4211 -   } // OSThreadWaitState()
 78.4212 -
 78.4213 -   jt->set_current_waiting_monitor(NULL);
 78.4214 -
 78.4215 -   guarantee (_recursions == 0, "invariant") ;
 78.4216 -   _recursions = save;     // restore the old recursion count
 78.4217 -   _waiters--;             // decrement the number of waiters
 78.4218 -
 78.4219 -   // Verify a few postconditions
 78.4220 -   assert (_owner == Self       , "invariant") ;
 78.4221 -   assert (_succ  != Self       , "invariant") ;
 78.4222 -   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
 78.4223 -
 78.4224 -   if (SyncFlags & 32) {
 78.4225 -      OrderAccess::fence() ;
 78.4226 -   }
 78.4227 -
 78.4228 -   // check if the notification happened
 78.4229 -   if (!WasNotified) {
 78.4230 -     // no, it could be timeout or Thread.interrupt() or both
 78.4231 -     // check for interrupt event, otherwise it is timeout
 78.4232 -     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
 78.4233 -       TEVENT (Wait - throw IEX from epilog) ;
 78.4234 -       THROW(vmSymbols::java_lang_InterruptedException());
 78.4235 -     }
 78.4236 -   }
 78.4237 -
 78.4238 -   // NOTE: Spurious wake up will be consider as timeout.
 78.4239 -   // Monitor notify has precedence over thread interrupt.
 78.4240 -}
 78.4241 -
 78.4242 -
 78.4243 -// Consider:
 78.4244 -// If the lock is cool (cxq == null && succ == null) and we're on an MP system
 78.4245 -// then instead of transferring a thread from the WaitSet to the EntryList
 78.4246 -// we might just dequeue a thread from the WaitSet and directly unpark() it.
 78.4247 -
 78.4248 -void ObjectMonitor::notify(TRAPS) {
 78.4249 -  CHECK_OWNER();
 78.4250 -  if (_WaitSet == NULL) {
 78.4251 -     TEVENT (Empty-Notify) ;
 78.4252 -     return ;
 78.4253 -  }
 78.4254 -  DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
 78.4255 -
 78.4256 -  int Policy = Knob_MoveNotifyee ;
 78.4257 -
 78.4258 -  Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
 78.4259 -  ObjectWaiter * iterator = DequeueWaiter() ;
 78.4260 -  if (iterator != NULL) {
 78.4261 -     TEVENT (Notify1 - Transfer) ;
 78.4262 -     guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
 78.4263 -     guarantee (iterator->_notified == 0, "invariant") ;
 78.4264 -     // Disposition - what might we do with iterator ?
 78.4265 -     // a.  add it directly to the EntryList - either tail or head.
 78.4266 -     // b.  push it onto the front of the _cxq.
 78.4267 -     // For now we use (a).
 78.4268 -     if (Policy != 4) {
 78.4269 -        iterator->TState = ObjectWaiter::TS_ENTER ;
 78.4270 -     }
 78.4271 -     iterator->_notified = 1 ;
 78.4272 -
 78.4273 -     ObjectWaiter * List = _EntryList ;
 78.4274 -     if (List != NULL) {
 78.4275 -        assert (List->_prev == NULL, "invariant") ;
 78.4276 -        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
 78.4277 -        assert (List != iterator, "invariant") ;
 78.4278 -     }
 78.4279 -
 78.4280 -     if (Policy == 0) {       // prepend to EntryList
 78.4281 -         if (List == NULL) {
 78.4282 -             iterator->_next = iterator->_prev = NULL ;
 78.4283 -             _EntryList = iterator ;
 78.4284 -         } else {
 78.4285 -             List->_prev = iterator ;
 78.4286 -             iterator->_next = List ;
 78.4287 -             iterator->_prev = NULL ;
 78.4288 -             _EntryList = iterator ;
 78.4289 -        }
 78.4290 -     } else
 78.4291 -     if (Policy == 1) {      // append to EntryList
 78.4292 -         if (List == NULL) {
 78.4293 -             iterator->_next = iterator->_prev = NULL ;
 78.4294 -             _EntryList = iterator ;
 78.4295 -         } else {
 78.4296 -            // CONSIDER:  finding the tail currently requires a linear-time walk of
 78.4297 -            // the EntryList.  We can make tail access constant-time by converting to
 78.4298 -            // a CDLL instead of using our current DLL.
 78.4299 -            ObjectWaiter * Tail ;
 78.4300 -            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
 78.4301 -            assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
 78.4302 -            Tail->_next = iterator ;
 78.4303 -            iterator->_prev = Tail ;
 78.4304 -            iterator->_next = NULL ;
 78.4305 -        }
 78.4306 -     } else
 78.4307 -     if (Policy == 2) {      // prepend to cxq
 78.4308 -         // prepend to cxq
 78.4309 -         if (List == NULL) {
 78.4310 -             iterator->_next = iterator->_prev = NULL ;
 78.4311 -             _EntryList = iterator ;
 78.4312 -         } else {
 78.4313 -            iterator->TState = ObjectWaiter::TS_CXQ ;
 78.4314 -            for (;;) {
 78.4315 -                ObjectWaiter * Front = _cxq ;
 78.4316 -                iterator->_next = Front ;
 78.4317 -                if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
 78.4318 -                    break ;
 78.4319 -                }
 78.4320 -            }
 78.4321 -         }
 78.4322 -     } else
 78.4323 -     if (Policy == 3) {      // append to cxq
 78.4324 -        iterator->TState = ObjectWaiter::TS_CXQ ;
 78.4325 -        for (;;) {
 78.4326 -            ObjectWaiter * Tail ;
 78.4327 -            Tail = _cxq ;
 78.4328 -            if (Tail == NULL) {
 78.4329 -                iterator->_next = NULL ;
 78.4330 -                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
 78.4331 -                   break ;
 78.4332 -                }
 78.4333 -            } else {
 78.4334 -                while (Tail->_next != NULL) Tail = Tail->_next ;
 78.4335 -                Tail->_next = iterator ;
 78.4336 -                iterator->_prev = Tail ;
 78.4337 -                iterator->_next = NULL ;
 78.4338 -                break ;
 78.4339 -            }
 78.4340 -        }
 78.4341 -     } else {
 78.4342 -        ParkEvent * ev = iterator->_event ;
 78.4343 -        iterator->TState = ObjectWaiter::TS_RUN ;
 78.4344 -        OrderAccess::fence() ;
 78.4345 -        ev->unpark() ;
 78.4346 -     }
 78.4347 -
 78.4348 -     if (Policy < 4) {
 78.4349 -       iterator->wait_reenter_begin(this);
 78.4350 -     }
 78.4351 -
 78.4352 -     // _WaitSetLock protects the wait queue, not the EntryList.  We could
 78.4353 -     // move the add-to-EntryList operation, above, outside the critical section
 78.4354 -     // protected by _WaitSetLock.  In practice that's not useful.  With the
 78.4355 -     // exception of  wait() timeouts and interrupts the monitor owner
 78.4356 -     // is the only thread that grabs _WaitSetLock.  There's almost no contention
 78.4357 -     // on _WaitSetLock so it's not profitable to reduce the length of the
 78.4358 -     // critical section.
 78.4359 -  }
 78.4360 -
 78.4361 -  Thread::SpinRelease (&_WaitSetLock) ;
 78.4362 -
 78.4363 -  if (iterator != NULL && ObjectSynchronizer::_sync_Notifications != NULL) {
 78.4364 -     ObjectSynchronizer::_sync_Notifications->inc() ;
 78.4365 -  }
 78.4366 -}
 78.4367 -
 78.4368 -
 78.4369 -void ObjectMonitor::notifyAll(TRAPS) {
 78.4370 -  CHECK_OWNER();
 78.4371 -  ObjectWaiter* iterator;
 78.4372 -  if (_WaitSet == NULL) {
 78.4373 -      TEVENT (Empty-NotifyAll) ;
 78.4374 -      return ;
 78.4375 -  }
 78.4376 -  DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
 78.4377 -
 78.4378 -  int Policy = Knob_MoveNotifyee ;
 78.4379 -  int Tally = 0 ;
 78.4380 -  Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
 78.4381 -
 78.4382 -  for (;;) {
 78.4383 -     iterator = DequeueWaiter () ;
 78.4384 -     if (iterator == NULL) break ;
 78.4385 -     TEVENT (NotifyAll - Transfer1) ;
 78.4386 -     ++Tally ;
 78.4387 -
 78.4388 -     // Disposition - what might we do with iterator ?
 78.4389 -     // a.  add it directly to the EntryList - either tail or head.
 78.4390 -     // b.  push it onto the front of the _cxq.
 78.4391 -     // For now we use (a).
 78.4392 -     //
 78.4393 -     // TODO-FIXME: currently notifyAll() transfers the waiters one-at-a-time from the waitset
 78.4394 -     // to the EntryList.  This could be done more efficiently with a single bulk transfer,
 78.4395 -     // but in practice it's not time-critical.  Beware too, that in prepend-mode we invert the
 78.4396 -     // order of the waiters.  Lets say that the waitset is "ABCD" and the EntryList is "XYZ".
 78.4397 -     // After a notifyAll() in prepend mode the waitset will be empty and the EntryList will
 78.4398 -     // be "DCBAXYZ".
 78.4399 -
 78.4400 -     guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
 78.4401 -     guarantee (iterator->_notified == 0, "invariant") ;
 78.4402 -     iterator->_notified = 1 ;
 78.4403 -     if (Policy != 4) {
 78.4404 -        iterator->TState = ObjectWaiter::TS_ENTER ;
 78.4405 -     }
 78.4406 -
 78.4407 -     ObjectWaiter * List = _EntryList ;
 78.4408 -     if (List != NULL) {
 78.4409 -        assert (List->_prev == NULL, "invariant") ;
 78.4410 -        assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
 78.4411 -        assert (List != iterator, "invariant") ;
 78.4412 -     }
 78.4413 -
 78.4414 -     if (Policy == 0) {       // prepend to EntryList
 78.4415 -         if (List == NULL) {
 78.4416 -             iterator->_next = iterator->_prev = NULL ;
 78.4417 -             _EntryList = iterator ;
 78.4418 -         } else {
 78.4419 -             List->_prev = iterator ;
 78.4420 -             iterator->_next = List ;
 78.4421 -             iterator->_prev = NULL ;
 78.4422 -             _EntryList = iterator ;
 78.4423 -        }
 78.4424 -     } else
 78.4425 -     if (Policy == 1) {      // append to EntryList
 78.4426 -         if (List == NULL) {
 78.4427 -             iterator->_next = iterator->_prev = NULL ;
 78.4428 -             _EntryList = iterator ;
 78.4429 -         } else {
 78.4430 -            // CONSIDER:  finding the tail currently requires a linear-time walk of
 78.4431 -            // the EntryList.  We can make tail access constant-time by converting to
 78.4432 -            // a CDLL instead of using our current DLL.
 78.4433 -            ObjectWaiter * Tail ;
 78.4434 -            for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
 78.4435 -            assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
 78.4436 -            Tail->_next = iterator ;
 78.4437 -            iterator->_prev = Tail ;
 78.4438 -            iterator->_next = NULL ;
 78.4439 -        }
 78.4440 -     } else
 78.4441 -     if (Policy == 2) {      // prepend to cxq
 78.4442 -         // prepend to cxq
 78.4443 -         iterator->TState = ObjectWaiter::TS_CXQ ;
 78.4444 -         for (;;) {
 78.4445 -             ObjectWaiter * Front = _cxq ;
 78.4446 -             iterator->_next = Front ;
 78.4447 -             if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
 78.4448 -                 break ;
 78.4449 -             }
 78.4450 -         }
 78.4451 -     } else
 78.4452 -     if (Policy == 3) {      // append to cxq
 78.4453 -        iterator->TState = ObjectWaiter::TS_CXQ ;
 78.4454 -        for (;;) {
 78.4455 -            ObjectWaiter * Tail ;
 78.4456 -            Tail = _cxq ;
 78.4457 -            if (Tail == NULL) {
 78.4458 -                iterator->_next = NULL ;
 78.4459 -                if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
 78.4460 -                   break ;
 78.4461 -                }
 78.4462 -            } else {
 78.4463 -                while (Tail->_next != NULL) Tail = Tail->_next ;
 78.4464 -                Tail->_next = iterator ;
 78.4465 -                iterator->_prev = Tail ;
 78.4466 -                iterator->_next = NULL ;
 78.4467 -                break ;
 78.4468 -            }
 78.4469 -        }
 78.4470 -     } else {
 78.4471 -        ParkEvent * ev = iterator->_event ;
 78.4472 -        iterator->TState = ObjectWaiter::TS_RUN ;
 78.4473 -        OrderAccess::fence() ;
 78.4474 -        ev->unpark() ;
 78.4475 -     }
 78.4476 -
 78.4477 -     if (Policy < 4) {
 78.4478 -       iterator->wait_reenter_begin(this);
 78.4479 -     }
 78.4480 -
 78.4481 -     // _WaitSetLock protects the wait queue, not the EntryList.  We could
 78.4482 -     // move the add-to-EntryList operation, above, outside the critical section
 78.4483 -     // protected by _WaitSetLock.  In practice that's not useful.  With the
 78.4484 -     // exception of  wait() timeouts and interrupts the monitor owner
 78.4485 -     // is the only thread that grabs _WaitSetLock.  There's almost no contention
 78.4486 -     // on _WaitSetLock so it's not profitable to reduce the length of the
 78.4487 -     // critical section.
 78.4488 -  }
 78.4489 -
 78.4490 -  Thread::SpinRelease (&_WaitSetLock) ;
 78.4491 -
 78.4492 -  if (Tally != 0 && ObjectSynchronizer::_sync_Notifications != NULL) {
 78.4493 -     ObjectSynchronizer::_sync_Notifications->inc(Tally) ;
 78.4494 -  }
 78.4495 -}
 78.4496 -
 78.4497 -// check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
 78.4498 -// TODO-FIXME: remove check_slow() -- it's likely dead.
 78.4499 -
 78.4500 -void ObjectMonitor::check_slow(TRAPS) {
 78.4501 -  TEVENT (check_slow - throw IMSX) ;
 78.4502 -  assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
 78.4503 -  THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
 78.4504 -}
 78.4505 -
 78.4506 -
 78.4507 -// -------------------------------------------------------------------------
 78.4508 -// The raw monitor subsystem is entirely distinct from normal
 78.4509 -// java-synchronization or jni-synchronization.  raw monitors are not
 78.4510 -// associated with objects.  They can be implemented in any manner
 78.4511 -// that makes sense.  The original implementors decided to piggy-back
 78.4512 -// the raw-monitor implementation on the existing Java objectMonitor mechanism.
 78.4513 -// This flaw needs to fixed.  We should reimplement raw monitors as sui-generis.
 78.4514 -// Specifically, we should not implement raw monitors via java monitors.
 78.4515 -// Time permitting, we should disentangle and deconvolve the two implementations
 78.4516 -// and move the resulting raw monitor implementation over to the JVMTI directories.
 78.4517 -// Ideally, the raw monitor implementation would be built on top of
 78.4518 -// park-unpark and nothing else.
 78.4519 -//
 78.4520 -// raw monitors are used mainly by JVMTI
 78.4521 -// The raw monitor implementation borrows the ObjectMonitor structure,
 78.4522 -// but the operators are degenerate and extremely simple.
 78.4523 -//
 78.4524 -// Mixed use of a single objectMonitor instance -- as both a raw monitor
 78.4525 -// and a normal java monitor -- is not permissible.
 78.4526 -//
 78.4527 -// Note that we use the single RawMonitor_lock to protect queue operations for
 78.4528 -// _all_ raw monitors.  This is a scalability impediment, but since raw monitor usage
 78.4529 -// is deprecated and rare, this is not of concern.  The RawMonitor_lock can not
 78.4530 -// be held indefinitely.  The critical sections must be short and bounded.
 78.4531 -//
 78.4532 -// -------------------------------------------------------------------------
 78.4533 -
 78.4534 -int ObjectMonitor::SimpleEnter (Thread * Self) {
 78.4535 -  for (;;) {
 78.4536 -    if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 78.4537 -       return OS_OK ;
 78.4538 -    }
 78.4539 -
 78.4540 -    ObjectWaiter Node (Self) ;
 78.4541 -    Self->_ParkEvent->reset() ;     // strictly optional
 78.4542 -    Node.TState = ObjectWaiter::TS_ENTER ;
 78.4543 -
 78.4544 -    RawMonitor_lock->lock_without_safepoint_check() ;
 78.4545 -    Node._next  = _EntryList ;
 78.4546 -    _EntryList  = &Node ;
 78.4547 -    OrderAccess::fence() ;
 78.4548 -    if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
 78.4549 -        _EntryList = Node._next ;
 78.4550 -        RawMonitor_lock->unlock() ;
 78.4551 -        return OS_OK ;
 78.4552 -    }
 78.4553 -    RawMonitor_lock->unlock() ;
 78.4554 -    while (Node.TState == ObjectWaiter::TS_ENTER) {
 78.4555 -       Self->_ParkEvent->park() ;
 78.4556 -    }
 78.4557 -  }
 78.4558 -}
 78.4559 -
 78.4560 -int ObjectMonitor::SimpleExit (Thread * Self) {
 78.4561 -  guarantee (_owner == Self, "invariant") ;
 78.4562 -  OrderAccess::release_store_ptr (&_owner, NULL) ;
 78.4563 -  OrderAccess::fence() ;
 78.4564 -  if (_EntryList == NULL) return OS_OK ;
 78.4565 -  ObjectWaiter * w ;
 78.4566 -
 78.4567 -  RawMonitor_lock->lock_without_safepoint_check() ;
 78.4568 -  w = _EntryList ;
 78.4569 -  if (w != NULL) {
 78.4570 -      _EntryList = w->_next ;
 78.4571 -  }
 78.4572 -  RawMonitor_lock->unlock() ;
 78.4573 -  if (w != NULL) {
 78.4574 -      guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
 78.4575 -      ParkEvent * ev = w->_event ;
 78.4576 -      w->TState = ObjectWaiter::TS_RUN ;
 78.4577 -      OrderAccess::fence() ;
 78.4578 -      ev->unpark() ;
 78.4579 -  }
 78.4580 -  return OS_OK ;
 78.4581 -}
 78.4582 -
 78.4583 -int ObjectMonitor::SimpleWait (Thread * Self, jlong millis) {
 78.4584 -  guarantee (_owner == Self  , "invariant") ;
 78.4585 -  guarantee (_recursions == 0, "invariant") ;
 78.4586 -
 78.4587 -  ObjectWaiter Node (Self) ;
 78.4588 -  Node._notified = 0 ;
 78.4589 -  Node.TState    = ObjectWaiter::TS_WAIT ;
 78.4590 -
 78.4591 -  RawMonitor_lock->lock_without_safepoint_check() ;
 78.4592 -  Node._next     = _WaitSet ;
 78.4593 -  _WaitSet       = &Node ;
 78.4594 -  RawMonitor_lock->unlock() ;
 78.4595 -
 78.4596 -  SimpleExit (Self) ;
 78.4597 -  guarantee (_owner != Self, "invariant") ;
 78.4598 -
 78.4599 -  int ret = OS_OK ;
 78.4600 -  if (millis <= 0) {
 78.4601 -    Self->_ParkEvent->park();
 78.4602 -  } else {
 78.4603 -    ret = Self->_ParkEvent->park(millis);
 78.4604 -  }
 78.4605 -
 78.4606 -  // If thread still resides on the waitset then unlink it.
 78.4607 -  // Double-checked locking -- the usage is safe in this context
 78.4608 -  // as we TState is volatile and the lock-unlock operators are
 78.4609 -  // serializing (barrier-equivalent).
 78.4610 -
 78.4611 -  if (Node.TState == ObjectWaiter::TS_WAIT) {
 78.4612 -    RawMonitor_lock->lock_without_safepoint_check() ;
 78.4613 -    if (Node.TState == ObjectWaiter::TS_WAIT) {
 78.4614 -      // Simple O(n) unlink, but performance isn't critical here.
 78.4615 -      ObjectWaiter * p ;
 78.4616 -      ObjectWaiter * q = NULL ;
 78.4617 -      for (p = _WaitSet ; p != &Node; p = p->_next) {
 78.4618 -         q = p ;
 78.4619 -      }
 78.4620 -      guarantee (p == &Node, "invariant") ;
 78.4621 -      if (q == NULL) {
 78.4622 -        guarantee (p == _WaitSet, "invariant") ;
 78.4623 -        _WaitSet = p->_next ;
 78.4624 -      } else {
 78.4625 -        guarantee (p == q->_next, "invariant") ;
 78.4626 -        q->_next = p->_next ;
 78.4627 -      }
 78.4628 -      Node.TState = ObjectWaiter::TS_RUN ;
 78.4629 -    }
 78.4630 -    RawMonitor_lock->unlock() ;
 78.4631 -  }
 78.4632 -
 78.4633 -  guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
 78.4634 -  SimpleEnter (Self) ;
 78.4635 -
 78.4636 -  guarantee (_owner == Self, "invariant") ;
 78.4637 -  guarantee (_recursions == 0, "invariant") ;
 78.4638 -  return ret ;
 78.4639 -}
 78.4640 -
 78.4641 -int ObjectMonitor::SimpleNotify (Thread * Self, bool All) {
 78.4642 -  guarantee (_owner == Self, "invariant") ;
 78.4643 -  if (_WaitSet == NULL) return OS_OK ;
 78.4644 -
 78.4645 -  // We have two options:
 78.4646 -  // A. Transfer the threads from the WaitSet to the EntryList
 78.4647 -  // B. Remove the thread from the WaitSet and unpark() it.
 78.4648 -  //
 78.4649 -  // We use (B), which is crude and results in lots of futile
 78.4650 -  // context switching.  In particular (B) induces lots of contention.
 78.4651 -
 78.4652 -  ParkEvent * ev = NULL ;       // consider using a small auto array ...
 78.4653 -  RawMonitor_lock->lock_without_safepoint_check() ;
 78.4654 -  for (;;) {
 78.4655 -      ObjectWaiter * w = _WaitSet ;
 78.4656 -      if (w == NULL) break ;
 78.4657 -      _WaitSet = w->_next ;
 78.4658 -      if (ev != NULL) { ev->unpark(); ev = NULL; }
 78.4659 -      ev = w->_event ;
 78.4660 -      OrderAccess::loadstore() ;
 78.4661 -      w->TState = ObjectWaiter::TS_RUN ;
 78.4662 -      OrderAccess::storeload();
 78.4663 -      if (!All) break ;
 78.4664 -  }
 78.4665 -  RawMonitor_lock->unlock() ;
 78.4666 -  if (ev != NULL) ev->unpark();
 78.4667 -  return OS_OK ;
 78.4668 -}
 78.4669 -
 78.4670 -// Any JavaThread will enter here with state _thread_blocked
 78.4671 -int ObjectMonitor::raw_enter(TRAPS) {
 78.4672 -  TEVENT (raw_enter) ;
 78.4673 -  void * Contended ;
 78.4674 -
 78.4675 -  // don't enter raw monitor if thread is being externally suspended, it will
 78.4676 -  // surprise the suspender if a "suspended" thread can still enter monitor
 78.4677 -  JavaThread * jt = (JavaThread *)THREAD;
 78.4678 -  if (THREAD->is_Java_thread()) {
 78.4679 -    jt->SR_lock()->lock_without_safepoint_check();
 78.4680 -    while (jt->is_external_suspend()) {
 78.4681 -      jt->SR_lock()->unlock();
 78.4682 -      jt->java_suspend_self();
 78.4683 -      jt->SR_lock()->lock_without_safepoint_check();
 78.4684 -    }
 78.4685 -    // guarded by SR_lock to avoid racing with new external suspend requests.
 78.4686 -    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
 78.4687 -    jt->SR_lock()->unlock();
 78.4688 -  } else {
 78.4689 -    Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
 78.4690 -  }
 78.4691 -
 78.4692 -  if (Contended == THREAD) {
 78.4693 -     _recursions ++ ;
 78.4694 -     return OM_OK ;
 78.4695 -  }
 78.4696 -
 78.4697 -  if (Contended == NULL) {
 78.4698 -     guarantee (_owner == THREAD, "invariant") ;
 78.4699 -     guarantee (_recursions == 0, "invariant") ;
 78.4700 -     return OM_OK ;
 78.4701 -  }
 78.4702 -
 78.4703 -  THREAD->set_current_pending_monitor(this);
 78.4704 -
 78.4705 -  if (!THREAD->is_Java_thread()) {
 78.4706 -     // No other non-Java threads besides VM thread would acquire
 78.4707 -     // a raw monitor.
 78.4708 -     assert(THREAD->is_VM_thread(), "must be VM thread");
 78.4709 -     SimpleEnter (THREAD) ;
 78.4710 -   } else {
 78.4711 -     guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
 78.4712 -     for (;;) {
 78.4713 -       jt->set_suspend_equivalent();
 78.4714 -       // cleared by handle_special_suspend_equivalent_condition() or
 78.4715 -       // java_suspend_self()
 78.4716 -       SimpleEnter (THREAD) ;
 78.4717 -
 78.4718 -       // were we externally suspended while we were waiting?
 78.4719 -       if (!jt->handle_special_suspend_equivalent_condition()) break ;
 78.4720 -
 78.4721 -       // This thread was externally suspended
 78.4722 -       //
 78.4723 -       // This logic isn't needed for JVMTI raw monitors,
 78.4724 -       // but doesn't hurt just in case the suspend rules change. This
 78.4725 -           // logic is needed for the ObjectMonitor.wait() reentry phase.
 78.4726 -           // We have reentered the contended monitor, but while we were
 78.4727 -           // waiting another thread suspended us. We don't want to reenter
 78.4728 -           // the monitor while suspended because that would surprise the
 78.4729 -           // thread that suspended us.
 78.4730 -           //
 78.4731 -           // Drop the lock -
 78.4732 -       SimpleExit (THREAD) ;
 78.4733 -
 78.4734 -           jt->java_suspend_self();
 78.4735 -         }
 78.4736 -
 78.4737 -     assert(_owner == THREAD, "Fatal error with monitor owner!");
 78.4738 -     assert(_recursions == 0, "Fatal error with monitor recursions!");
 78.4739 -  }
 78.4740 -
 78.4741 -  THREAD->set_current_pending_monitor(NULL);
 78.4742 -  guarantee (_recursions == 0, "invariant") ;
 78.4743 -  return OM_OK;
 78.4744 -}
 78.4745 -
 78.4746 -// Used mainly for JVMTI raw monitor implementation
 78.4747 -// Also used for ObjectMonitor::wait().
 78.4748 -int ObjectMonitor::raw_exit(TRAPS) {
 78.4749 -  TEVENT (raw_exit) ;
 78.4750 -  if (THREAD != _owner) {
 78.4751 -    return OM_ILLEGAL_MONITOR_STATE;
 78.4752 -  }
 78.4753 -  if (_recursions > 0) {
 78.4754 -    --_recursions ;
 78.4755 -    return OM_OK ;
 78.4756 -  }
 78.4757 -
 78.4758 -  void * List = _EntryList ;
 78.4759 -  SimpleExit (THREAD) ;
 78.4760 -
 78.4761 -  return OM_OK;
 78.4762 -}
 78.4763 -
 78.4764 -// Used for JVMTI raw monitor implementation.
 78.4765 -// All JavaThreads will enter here with state _thread_blocked
 78.4766 -
 78.4767 -int ObjectMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
 78.4768 -  TEVENT (raw_wait) ;
 78.4769 -  if (THREAD != _owner) {
 78.4770 -    return OM_ILLEGAL_MONITOR_STATE;
 78.4771 -  }
 78.4772 -
 78.4773 -  // To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
 78.4774 -  // The caller must be able to tolerate spurious returns from raw_wait().
 78.4775 -  THREAD->_ParkEvent->reset() ;
 78.4776 -  OrderAccess::fence() ;
 78.4777 -
 78.4778 -  // check interrupt event
 78.4779 -  if (interruptible && Thread::is_interrupted(THREAD, true)) {
 78.4780 -    return OM_INTERRUPTED;
 78.4781 -  }
 78.4782 -
 78.4783 -  intptr_t save = _recursions ;
 78.4784 -  _recursions = 0 ;
 78.4785 -  _waiters ++ ;
 78.4786 -  if (THREAD->is_Java_thread()) {
 78.4787 -    guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
 78.4788 -    ((JavaThread *)THREAD)->set_suspend_equivalent();
 78.4789 -  }
 78.4790 -  int rv = SimpleWait (THREAD, millis) ;
 78.4791 -  _recursions = save ;
 78.4792 -  _waiters -- ;
 78.4793 -
 78.4794 -  guarantee (THREAD == _owner, "invariant") ;
 78.4795 -  if (THREAD->is_Java_thread()) {
 78.4796 -     JavaThread * jSelf = (JavaThread *) THREAD ;
 78.4797 -     for (;;) {
 78.4798 -        if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
 78.4799 -        SimpleExit (THREAD) ;
 78.4800 -        jSelf->java_suspend_self();
 78.4801 -        SimpleEnter (THREAD) ;
 78.4802 -        jSelf->set_suspend_equivalent() ;
 78.4803 -     }
 78.4804 -  }
 78.4805 -  guarantee (THREAD == _owner, "invariant") ;
 78.4806 -
 78.4807 -  if (interruptible && Thread::is_interrupted(THREAD, true)) {
 78.4808 -    return OM_INTERRUPTED;
 78.4809 -  }
 78.4810 -  return OM_OK ;
 78.4811 -}
 78.4812 -
 78.4813 -int ObjectMonitor::raw_notify(TRAPS) {
 78.4814 -  TEVENT (raw_notify) ;
 78.4815 -  if (THREAD != _owner) {
 78.4816 -    return OM_ILLEGAL_MONITOR_STATE;
 78.4817 -  }
 78.4818 -  SimpleNotify (THREAD, false) ;
 78.4819 -  return OM_OK;
 78.4820 -}
 78.4821 -
 78.4822 -int ObjectMonitor::raw_notifyAll(TRAPS) {
 78.4823 -  TEVENT (raw_notifyAll) ;
 78.4824 -  if (THREAD != _owner) {
 78.4825 -    return OM_ILLEGAL_MONITOR_STATE;
 78.4826 -  }
 78.4827 -  SimpleNotify (THREAD, true) ;
 78.4828 -  return OM_OK;
 78.4829 -}
 78.4830 -
 78.4831 -#ifndef PRODUCT
 78.4832 -void ObjectMonitor::verify() {
 78.4833 -}
 78.4834 -
 78.4835 -void ObjectMonitor::print() {
 78.4836 -}
 78.4837 -#endif
 78.4838 -
 78.4839  //------------------------------------------------------------------------------
 78.4840  // Non-product code
 78.4841  
    79.1 --- a/src/share/vm/runtime/synchronizer.hpp	Thu Nov 04 15:19:16 2010 -0700
    79.2 +++ b/src/share/vm/runtime/synchronizer.hpp	Thu Nov 04 16:17:54 2010 -0700
    79.3 @@ -22,53 +22,6 @@
    79.4   *
    79.5   */
    79.6  
    79.7 -class BasicLock VALUE_OBJ_CLASS_SPEC {
    79.8 -  friend class VMStructs;
    79.9 - private:
   79.10 -  volatile markOop _displaced_header;
   79.11 - public:
   79.12 -  markOop      displaced_header() const               { return _displaced_header; }
   79.13 -  void         set_displaced_header(markOop header)   { _displaced_header = header; }
   79.14 -
   79.15 -  void print_on(outputStream* st) const;
   79.16 -
   79.17 -  // move a basic lock (used during deoptimization
   79.18 -  void move_to(oop obj, BasicLock* dest);
   79.19 -
   79.20 -  static int displaced_header_offset_in_bytes()       { return offset_of(BasicLock, _displaced_header); }
   79.21 -};
   79.22 -
   79.23 -// A BasicObjectLock associates a specific Java object with a BasicLock.
   79.24 -// It is currently embedded in an interpreter frame.
   79.25 -
   79.26 -// Because some machines have alignment restrictions on the control stack,
   79.27 -// the actual space allocated by the interpreter may include padding words
   79.28 -// after the end of the BasicObjectLock.  Also, in order to guarantee
   79.29 -// alignment of the embedded BasicLock objects on such machines, we
   79.30 -// put the embedded BasicLock at the beginning of the struct.
   79.31 -
   79.32 -class BasicObjectLock VALUE_OBJ_CLASS_SPEC {
   79.33 -  friend class VMStructs;
   79.34 - private:
   79.35 -  BasicLock _lock;                                    // the lock, must be double word aligned
   79.36 -  oop       _obj;                                     // object holds the lock;
   79.37 -
   79.38 - public:
   79.39 -  // Manipulation
   79.40 -  oop      obj() const                                { return _obj;  }
   79.41 -  void set_obj(oop obj)                               { _obj = obj; }
   79.42 -  BasicLock* lock()                                   { return &_lock; }
   79.43 -
   79.44 -  // Note: Use frame::interpreter_frame_monitor_size() for the size of BasicObjectLocks
   79.45 -  //       in interpreter activation frames since it includes machine-specific padding.
   79.46 -  static int size()                                   { return sizeof(BasicObjectLock)/wordSize; }
   79.47 -
   79.48 -  // GC support
   79.49 -  void oops_do(OopClosure* f) { f->do_oop(&_obj); }
   79.50 -
   79.51 -  static int obj_offset_in_bytes()                    { return offset_of(BasicObjectLock, _obj);  }
   79.52 -  static int lock_offset_in_bytes()                   { return offset_of(BasicObjectLock, _lock); }
   79.53 -};
   79.54  
   79.55  class ObjectMonitor;
   79.56  
   79.57 @@ -163,6 +116,8 @@
   79.58    static void verify() PRODUCT_RETURN;
   79.59    static int  verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
   79.60  
   79.61 +  static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ;
   79.62 +
   79.63   private:
   79.64    enum { _BLOCKSIZE = 128 };
   79.65    static ObjectMonitor* gBlockList;
   79.66 @@ -170,30 +125,6 @@
   79.67    static ObjectMonitor * volatile gOmInUseList; // for moribund thread, so monitors they inflated still get scanned
   79.68    static int gOmInUseCount;
   79.69  
   79.70 - public:
   79.71 -  static void Initialize () ;
   79.72 -  static PerfCounter * _sync_ContendedLockAttempts ;
   79.73 -  static PerfCounter * _sync_FutileWakeups ;
   79.74 -  static PerfCounter * _sync_Parks ;
   79.75 -  static PerfCounter * _sync_EmptyNotifications ;
   79.76 -  static PerfCounter * _sync_Notifications ;
   79.77 -  static PerfCounter * _sync_SlowEnter ;
   79.78 -  static PerfCounter * _sync_SlowExit ;
   79.79 -  static PerfCounter * _sync_SlowNotify ;
   79.80 -  static PerfCounter * _sync_SlowNotifyAll ;
   79.81 -  static PerfCounter * _sync_FailedSpins ;
   79.82 -  static PerfCounter * _sync_SuccessfulSpins ;
   79.83 -  static PerfCounter * _sync_PrivateA ;
   79.84 -  static PerfCounter * _sync_PrivateB ;
   79.85 -  static PerfCounter * _sync_MonInCirculation ;
   79.86 -  static PerfCounter * _sync_MonScavenged ;
   79.87 -  static PerfCounter * _sync_Inflations ;
   79.88 -  static PerfCounter * _sync_Deflations ;
   79.89 -  static PerfLongVariable * _sync_MonExtant ;
   79.90 -
   79.91 - public:
   79.92 -  static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ;
   79.93 -
   79.94  };
   79.95  
   79.96  // ObjectLocker enforced balanced locking and can never thrown an
    80.1 --- a/src/share/vm/runtime/thread.cpp	Thu Nov 04 15:19:16 2010 -0700
    80.2 +++ b/src/share/vm/runtime/thread.cpp	Thu Nov 04 16:17:54 2010 -0700
    80.3 @@ -2921,6 +2921,9 @@
    80.4    // So that JDK version can be used as a discrimintor when parsing arguments
    80.5    JDK_Version_init();
    80.6  
    80.7 +  // Update/Initialize System properties after JDK version number is known
    80.8 +  Arguments::init_version_specific_system_properties();
    80.9 +
   80.10    // Parse arguments
   80.11    jint parse_result = Arguments::parse(args);
   80.12    if (parse_result != JNI_OK) return parse_result;
   80.13 @@ -2992,8 +2995,8 @@
   80.14    // crash Linux VM, see notes in os_linux.cpp.
   80.15    main_thread->create_stack_guard_pages();
   80.16  
   80.17 -  // Initialize Java-Leve synchronization subsystem
   80.18 -  ObjectSynchronizer::Initialize() ;
   80.19 +  // Initialize Java-Level synchronization subsystem
   80.20 +  ObjectMonitor::Initialize() ;
   80.21  
   80.22    // Initialize global modules
   80.23    jint status = init_globals();
   80.24 @@ -3962,215 +3965,272 @@
   80.25    }
   80.26  }
   80.27  
   80.28 -
   80.29 -// Lifecycle management for TSM ParkEvents.
   80.30 -// ParkEvents are type-stable (TSM).
   80.31 -// In our particular implementation they happen to be immortal.
   80.32 +// Internal SpinLock and Mutex
   80.33 +// Based on ParkEvent
   80.34 +
   80.35 +// Ad-hoc mutual exclusion primitives: SpinLock and Mux
   80.36  //
   80.37 -// We manage concurrency on the FreeList with a CAS-based
   80.38 -// detach-modify-reattach idiom that avoids the ABA problems
   80.39 -// that would otherwise be present in a simple CAS-based
   80.40 -// push-pop implementation.   (push-one and pop-all)
   80.41 +// We employ SpinLocks _only for low-contention, fixed-length
   80.42 +// short-duration critical sections where we're concerned
   80.43 +// about native mutex_t or HotSpot Mutex:: latency.
   80.44 +// The mux construct provides a spin-then-block mutual exclusion
   80.45 +// mechanism.
   80.46  //
   80.47 -// Caveat: Allocate() and Release() may be called from threads
   80.48 -// other than the thread associated with the Event!
   80.49 -// If we need to call Allocate() when running as the thread in
   80.50 -// question then look for the PD calls to initialize native TLS.
   80.51 -// Native TLS (Win32/Linux/Solaris) can only be initialized or
   80.52 -// accessed by the associated thread.
   80.53 -// See also pd_initialize().
   80.54 +// Testing has shown that contention on the ListLock guarding gFreeList
   80.55 +// is common.  If we implement ListLock as a simple SpinLock it's common
   80.56 +// for the JVM to devolve to yielding with little progress.  This is true
   80.57 +// despite the fact that the critical sections protected by ListLock are
   80.58 +// extremely short.
   80.59  //
   80.60 -// Note that we could defer associating a ParkEvent with a thread
   80.61 -// until the 1st time the thread calls park().  unpark() calls to
   80.62 -// an unprovisioned thread would be ignored.  The first park() call
   80.63 -// for a thread would allocate and associate a ParkEvent and return
   80.64 -// immediately.
   80.65 -
   80.66 -volatile int ParkEvent::ListLock = 0 ;
   80.67 -ParkEvent * volatile ParkEvent::FreeList = NULL ;
   80.68 -
   80.69 -ParkEvent * ParkEvent::Allocate (Thread * t) {
   80.70 -  // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
   80.71 -  ParkEvent * ev ;
   80.72 -
   80.73 -  // Start by trying to recycle an existing but unassociated
   80.74 -  // ParkEvent from the global free list.
   80.75 +// TODO-FIXME: ListLock should be of type SpinLock.
   80.76 +// We should make this a 1st-class type, integrated into the lock
   80.77 +// hierarchy as leaf-locks.  Critically, the SpinLock structure
   80.78 +// should have sufficient padding to avoid false-sharing and excessive
   80.79 +// cache-coherency traffic.
   80.80 +
   80.81 +
   80.82 +typedef volatile int SpinLockT ;
   80.83 +
   80.84 +void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
   80.85 +  if (Atomic::cmpxchg (1, adr, 0) == 0) {
   80.86 +     return ;   // normal fast-path return
   80.87 +  }
   80.88 +
   80.89 +  // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
   80.90 +  TEVENT (SpinAcquire - ctx) ;
   80.91 +  int ctr = 0 ;
   80.92 +  int Yields = 0 ;
   80.93    for (;;) {
   80.94 -    ev = FreeList ;
   80.95 -    if (ev == NULL) break ;
   80.96 -    // 1: Detach - sequester or privatize the list
   80.97 -    // Tantamount to ev = Swap (&FreeList, NULL)
   80.98 -    if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
   80.99 -       continue ;
  80.100 -    }
  80.101 -
  80.102 -    // We've detached the list.  The list in-hand is now
  80.103 -    // local to this thread.   This thread can operate on the
  80.104 -    // list without risk of interference from other threads.
  80.105 -    // 2: Extract -- pop the 1st element from the list.
  80.106 -    ParkEvent * List = ev->FreeNext ;
  80.107 -    if (List == NULL) break ;
  80.108 -    for (;;) {
  80.109 -        // 3: Try to reattach the residual list
  80.110 -        guarantee (List != NULL, "invariant") ;
  80.111 -        ParkEvent * Arv =  (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
  80.112 -        if (Arv == NULL) break ;
  80.113 -
  80.114 -        // New nodes arrived.  Try to detach the recent arrivals.
  80.115 -        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
  80.116 -            continue ;
  80.117 +     while (*adr != 0) {
  80.118 +        ++ctr ;
  80.119 +        if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
  80.120 +           if (Yields > 5) {
  80.121 +             // Consider using a simple NakedSleep() instead.
  80.122 +             // Then SpinAcquire could be called by non-JVM threads
  80.123 +             Thread::current()->_ParkEvent->park(1) ;
  80.124 +           } else {
  80.125 +             os::NakedYield() ;
  80.126 +             ++Yields ;
  80.127 +           }
  80.128 +        } else {
  80.129 +           SpinPause() ;
  80.130          }
  80.131 -        guarantee (Arv != NULL, "invariant") ;
  80.132 -        // 4: Merge Arv into List
  80.133 -        ParkEvent * Tail = List ;
  80.134 -        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
  80.135 -        Tail->FreeNext = Arv ;
  80.136 -    }
  80.137 -    break ;
  80.138 -  }
  80.139 -
  80.140 -  if (ev != NULL) {
  80.141 -    guarantee (ev->AssociatedWith == NULL, "invariant") ;
  80.142 -  } else {
  80.143 -    // Do this the hard way -- materialize a new ParkEvent.
  80.144 -    // In rare cases an allocating thread might detach a long list --
  80.145 -    // installing null into FreeList -- and then stall or be obstructed.
  80.146 -    // A 2nd thread calling Allocate() would see FreeList == null.
  80.147 -    // The list held privately by the 1st thread is unavailable to the 2nd thread.
  80.148 -    // In that case the 2nd thread would have to materialize a new ParkEvent,
  80.149 -    // even though free ParkEvents existed in the system.  In this case we end up
  80.150 -    // with more ParkEvents in circulation than we need, but the race is
  80.151 -    // rare and the outcome is benign.  Ideally, the # of extant ParkEvents
  80.152 -    // is equal to the maximum # of threads that existed at any one time.
  80.153 -    // Because of the race mentioned above, segments of the freelist
  80.154 -    // can be transiently inaccessible.  At worst we may end up with the
  80.155 -    // # of ParkEvents in circulation slightly above the ideal.
  80.156 -    // Note that if we didn't have the TSM/immortal constraint, then
  80.157 -    // when reattaching, above, we could trim the list.
  80.158 -    ev = new ParkEvent () ;
  80.159 -    guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
  80.160 -  }
  80.161 -  ev->reset() ;                     // courtesy to caller
  80.162 -  ev->AssociatedWith = t ;          // Associate ev with t
  80.163 -  ev->FreeNext       = NULL ;
  80.164 -  return ev ;
  80.165 -}
  80.166 -
  80.167 -void ParkEvent::Release (ParkEvent * ev) {
  80.168 -  if (ev == NULL) return ;
  80.169 -  guarantee (ev->FreeNext == NULL      , "invariant") ;
  80.170 -  ev->AssociatedWith = NULL ;
  80.171 -  for (;;) {
  80.172 -    // Push ev onto FreeList
  80.173 -    // The mechanism is "half" lock-free.
  80.174 -    ParkEvent * List = FreeList ;
  80.175 -    ev->FreeNext = List ;
  80.176 -    if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
  80.177 +     }
  80.178 +     if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
  80.179    }
  80.180  }
  80.181  
  80.182 -// Override operator new and delete so we can ensure that the
  80.183 -// least significant byte of ParkEvent addresses is 0.
  80.184 -// Beware that excessive address alignment is undesirable
  80.185 -// as it can result in D$ index usage imbalance as
  80.186 -// well as bank access imbalance on Niagara-like platforms,
  80.187 -// although Niagara's hash function should help.
  80.188 -
  80.189 -void * ParkEvent::operator new (size_t sz) {
  80.190 -  return (void *) ((intptr_t (CHeapObj::operator new (sz + 256)) + 256) & -256) ;
  80.191 +void Thread::SpinRelease (volatile int * adr) {
  80.192 +  assert (*adr != 0, "invariant") ;
  80.193 +  OrderAccess::fence() ;      // guarantee at least release consistency.
  80.194 +  // Roach-motel semantics.
  80.195 +  // It's safe if subsequent LDs and STs float "up" into the critical section,
  80.196 +  // but prior LDs and STs within the critical section can't be allowed
  80.197 +  // to reorder or float past the ST that releases the lock.
  80.198 +  *adr = 0 ;
  80.199  }
  80.200  
  80.201 -void ParkEvent::operator delete (void * a) {
  80.202 -  // ParkEvents are type-stable and immortal ...
  80.203 -  ShouldNotReachHere();
  80.204 -}
  80.205 -
  80.206 -
  80.207 -// 6399321 As a temporary measure we copied & modified the ParkEvent::
  80.208 -// allocate() and release() code for use by Parkers.  The Parker:: forms
  80.209 -// will eventually be removed as we consolide and shift over to ParkEvents
  80.210 -// for both builtin synchronization and JSR166 operations.
  80.211 -
  80.212 -volatile int Parker::ListLock = 0 ;
  80.213 -Parker * volatile Parker::FreeList = NULL ;
  80.214 -
  80.215 -Parker * Parker::Allocate (JavaThread * t) {
  80.216 -  guarantee (t != NULL, "invariant") ;
  80.217 -  Parker * p ;
  80.218 -
  80.219 -  // Start by trying to recycle an existing but unassociated
  80.220 -  // Parker from the global free list.
  80.221 +// muxAcquire and muxRelease:
  80.222 +//
  80.223 +// *  muxAcquire and muxRelease support a single-word lock-word construct.
  80.224 +//    The LSB of the word is set IFF the lock is held.
  80.225 +//    The remainder of the word points to the head of a singly-linked list
  80.226 +//    of threads blocked on the lock.
  80.227 +//
  80.228 +// *  The current implementation of muxAcquire-muxRelease uses its own
  80.229 +//    dedicated Thread._MuxEvent instance.  If we're interested in
  80.230 +//    minimizing the peak number of extant ParkEvent instances then
  80.231 +//    we could eliminate _MuxEvent and "borrow" _ParkEvent as long
  80.232 +//    as certain invariants were satisfied.  Specifically, care would need
  80.233 +//    to be taken with regards to consuming unpark() "permits".
  80.234 +//    A safe rule of thumb is that a thread would never call muxAcquire()
  80.235 +//    if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
  80.236 +//    park().  Otherwise the _ParkEvent park() operation in muxAcquire() could
  80.237 +//    consume an unpark() permit intended for monitorenter, for instance.
  80.238 +//    One way around this would be to widen the restricted-range semaphore
  80.239 +//    implemented in park().  Another alternative would be to provide
  80.240 +//    multiple instances of the PlatformEvent() for each thread.  One
  80.241 +//    instance would be dedicated to muxAcquire-muxRelease, for instance.
  80.242 +//
  80.243 +// *  Usage:
  80.244 +//    -- Only as leaf locks
  80.245 +//    -- for short-term locking only as muxAcquire does not perform
  80.246 +//       thread state transitions.
  80.247 +//
  80.248 +// Alternatives:
  80.249 +// *  We could implement muxAcquire and muxRelease with MCS or CLH locks
  80.250 +//    but with parking or spin-then-park instead of pure spinning.
  80.251 +// *  Use Taura-Oyama-Yonenzawa locks.
  80.252 +// *  It's possible to construct a 1-0 lock if we encode the lockword as
  80.253 +//    (List,LockByte).  Acquire will CAS the full lockword while Release
  80.254 +//    will STB 0 into the LockByte.  The 1-0 scheme admits stranding, so
  80.255 +//    acquiring threads use timers (ParkTimed) to detect and recover from
  80.256 +//    the stranding window.  Thread/Node structures must be aligned on 256-byte
  80.257 +//    boundaries by using placement-new.
  80.258 +// *  Augment MCS with advisory back-link fields maintained with CAS().
  80.259 +//    Pictorially:  LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
  80.260 +//    The validity of the backlinks must be ratified before we trust the value.
  80.261 +//    If the backlinks are invalid the exiting thread must back-track through the
  80.262 +//    the forward links, which are always trustworthy.
  80.263 +// *  Add a successor indication.  The LockWord is currently encoded as
  80.264 +//    (List, LOCKBIT:1).  We could also add a SUCCBIT or an explicit _succ variable
  80.265 +//    to provide the usual futile-wakeup optimization.
  80.266 +//    See RTStt for details.
  80.267 +// *  Consider schedctl.sc_nopreempt to cover the critical section.
  80.268 +//
  80.269 +
  80.270 +
  80.271 +typedef volatile intptr_t MutexT ;      // Mux Lock-word
  80.272 +enum MuxBits { LOCKBIT = 1 } ;
  80.273 +
  80.274 +void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
  80.275 +  intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
  80.276 +  if (w == 0) return ;
  80.277 +  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
  80.278 +     return ;
  80.279 +  }
  80.280 +
  80.281 +  TEVENT (muxAcquire - Contention) ;
  80.282 +  ParkEvent * const Self = Thread::current()->_MuxEvent ;
  80.283 +  assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
  80.284    for (;;) {
  80.285 -    p = FreeList ;
  80.286 -    if (p  == NULL) break ;
  80.287 -    // 1: Detach
  80.288 -    // Tantamount to p = Swap (&FreeList, NULL)
  80.289 -    if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
  80.290 -       continue ;
  80.291 -    }
  80.292 -
  80.293 -    // We've detached the list.  The list in-hand is now
  80.294 -    // local to this thread.   This thread can operate on the
  80.295 -    // list without risk of interference from other threads.
  80.296 -    // 2: Extract -- pop the 1st element from the list.
  80.297 -    Parker * List = p->FreeNext ;
  80.298 -    if (List == NULL) break ;
  80.299 -    for (;;) {
  80.300 -        // 3: Try to reattach the residual list
  80.301 -        guarantee (List != NULL, "invariant") ;
  80.302 -        Parker * Arv =  (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
  80.303 -        if (Arv == NULL) break ;
  80.304 -
  80.305 -        // New nodes arrived.  Try to detach the recent arrivals.
  80.306 -        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
  80.307 -            continue ;
  80.308 +     int its = (os::is_MP() ? 100 : 0) + 1 ;
  80.309 +
  80.310 +     // Optional spin phase: spin-then-park strategy
  80.311 +     while (--its >= 0) {
  80.312 +       w = *Lock ;
  80.313 +       if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
  80.314 +          return ;
  80.315 +       }
  80.316 +     }
  80.317 +
  80.318 +     Self->reset() ;
  80.319 +     Self->OnList = intptr_t(Lock) ;
  80.320 +     // The following fence() isn't _strictly necessary as the subsequent
  80.321 +     // CAS() both serializes execution and ratifies the fetched *Lock value.
  80.322 +     OrderAccess::fence();
  80.323 +     for (;;) {
  80.324 +        w = *Lock ;
  80.325 +        if ((w & LOCKBIT) == 0) {
  80.326 +            if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
  80.327 +                Self->OnList = 0 ;   // hygiene - allows stronger asserts
  80.328 +                return ;
  80.329 +            }
  80.330 +            continue ;      // Interference -- *Lock changed -- Just retry
  80.331          }
  80.332 -        guarantee (Arv != NULL, "invariant") ;
  80.333 -        // 4: Merge Arv into List
  80.334 -        Parker * Tail = List ;
  80.335 -        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
  80.336 -        Tail->FreeNext = Arv ;
  80.337 -    }
  80.338 -    break ;
  80.339 -  }
  80.340 -
  80.341 -  if (p != NULL) {
  80.342 -    guarantee (p->AssociatedWith == NULL, "invariant") ;
  80.343 -  } else {
  80.344 -    // Do this the hard way -- materialize a new Parker..
  80.345 -    // In rare cases an allocating thread might detach
  80.346 -    // a long list -- installing null into FreeList --and
  80.347 -    // then stall.  Another thread calling Allocate() would see
  80.348 -    // FreeList == null and then invoke the ctor.  In this case we
  80.349 -    // end up with more Parkers in circulation than we need, but
  80.350 -    // the race is rare and the outcome is benign.
  80.351 -    // Ideally, the # of extant Parkers is equal to the
  80.352 -    // maximum # of threads that existed at any one time.
  80.353 -    // Because of the race mentioned above, segments of the
  80.354 -    // freelist can be transiently inaccessible.  At worst
  80.355 -    // we may end up with the # of Parkers in circulation
  80.356 -    // slightly above the ideal.
  80.357 -    p = new Parker() ;
  80.358 -  }
  80.359 -  p->AssociatedWith = t ;          // Associate p with t
  80.360 -  p->FreeNext       = NULL ;
  80.361 -  return p ;
  80.362 -}
  80.363 -
  80.364 -
  80.365 -void Parker::Release (Parker * p) {
  80.366 -  if (p == NULL) return ;
  80.367 -  guarantee (p->AssociatedWith != NULL, "invariant") ;
  80.368 -  guarantee (p->FreeNext == NULL      , "invariant") ;
  80.369 -  p->AssociatedWith = NULL ;
  80.370 -  for (;;) {
  80.371 -    // Push p onto FreeList
  80.372 -    Parker * List = FreeList ;
  80.373 -    p->FreeNext = List ;
  80.374 -    if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
  80.375 +        assert (w & LOCKBIT, "invariant") ;
  80.376 +        Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
  80.377 +        if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
  80.378 +     }
  80.379 +
  80.380 +     while (Self->OnList != 0) {
  80.381 +        Self->park() ;
  80.382 +     }
  80.383    }
  80.384  }
  80.385  
  80.386 +void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
  80.387 +  intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
  80.388 +  if (w == 0) return ;
  80.389 +  if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
  80.390 +    return ;
  80.391 +  }
  80.392 +
  80.393 +  TEVENT (muxAcquire - Contention) ;
  80.394 +  ParkEvent * ReleaseAfter = NULL ;
  80.395 +  if (ev == NULL) {
  80.396 +    ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
  80.397 +  }
  80.398 +  assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
  80.399 +  for (;;) {
  80.400 +    guarantee (ev->OnList == 0, "invariant") ;
  80.401 +    int its = (os::is_MP() ? 100 : 0) + 1 ;
  80.402 +
  80.403 +    // Optional spin phase: spin-then-park strategy
  80.404 +    while (--its >= 0) {
  80.405 +      w = *Lock ;
  80.406 +      if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
  80.407 +        if (ReleaseAfter != NULL) {
  80.408 +          ParkEvent::Release (ReleaseAfter) ;
  80.409 +        }
  80.410 +        return ;
  80.411 +      }
  80.412 +    }
  80.413 +
  80.414 +    ev->reset() ;
  80.415 +    ev->OnList = intptr_t(Lock) ;
  80.416 +    // The following fence() isn't _strictly necessary as the subsequent
  80.417 +    // CAS() both serializes execution and ratifies the fetched *Lock value.
  80.418 +    OrderAccess::fence();
  80.419 +    for (;;) {
  80.420 +      w = *Lock ;
  80.421 +      if ((w & LOCKBIT) == 0) {
  80.422 +        if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
  80.423 +          ev->OnList = 0 ;
  80.424 +          // We call ::Release while holding the outer lock, thus
  80.425 +          // artificially lengthening the critical section.
  80.426 +          // Consider deferring the ::Release() until the subsequent unlock(),
  80.427 +          // after we've dropped the outer lock.
  80.428 +          if (ReleaseAfter != NULL) {
  80.429 +            ParkEvent::Release (ReleaseAfter) ;
  80.430 +          }
  80.431 +          return ;
  80.432 +        }
  80.433 +        continue ;      // Interference -- *Lock changed -- Just retry
  80.434 +      }
  80.435 +      assert (w & LOCKBIT, "invariant") ;
  80.436 +      ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
  80.437 +      if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
  80.438 +    }
  80.439 +
  80.440 +    while (ev->OnList != 0) {
  80.441 +      ev->park() ;
  80.442 +    }
  80.443 +  }
  80.444 +}
  80.445 +
  80.446 +// Release() must extract a successor from the list and then wake that thread.
  80.447 +// It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
  80.448 +// similar to that used by ParkEvent::Allocate() and ::Release().  DMR-based
  80.449 +// Release() would :
  80.450 +// (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
  80.451 +// (B) Extract a successor from the private list "in-hand"
  80.452 +// (C) attempt to CAS() the residual back into *Lock over null.
  80.453 +//     If there were any newly arrived threads and the CAS() would fail.
  80.454 +//     In that case Release() would detach the RATs, re-merge the list in-hand
  80.455 +//     with the RATs and repeat as needed.  Alternately, Release() might
  80.456 +//     detach and extract a successor, but then pass the residual list to the wakee.
  80.457 +//     The wakee would be responsible for reattaching and remerging before it
  80.458 +//     competed for the lock.
  80.459 +//
  80.460 +// Both "pop" and DMR are immune from ABA corruption -- there can be
  80.461 +// multiple concurrent pushers, but only one popper or detacher.
  80.462 +// This implementation pops from the head of the list.  This is unfair,
  80.463 +// but tends to provide excellent throughput as hot threads remain hot.
  80.464 +// (We wake recently run threads first).
  80.465 +
  80.466 +void Thread::muxRelease (volatile intptr_t * Lock)  {
  80.467 +  for (;;) {
  80.468 +    const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
  80.469 +    assert (w & LOCKBIT, "invariant") ;
  80.470 +    if (w == LOCKBIT) return ;
  80.471 +    ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
  80.472 +    assert (List != NULL, "invariant") ;
  80.473 +    assert (List->OnList == intptr_t(Lock), "invariant") ;
  80.474 +    ParkEvent * nxt = List->ListNext ;
  80.475 +
  80.476 +    // The following CAS() releases the lock and pops the head element.
  80.477 +    if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
  80.478 +      continue ;
  80.479 +    }
  80.480 +    List->OnList = 0 ;
  80.481 +    OrderAccess::fence() ;
  80.482 +    List->unpark () ;
  80.483 +    return ;
  80.484 +  }
  80.485 +}
  80.486 +
  80.487 +
  80.488  void Threads::verify() {
  80.489    ALL_JAVA_THREADS(p) {
  80.490      p->verify();
    81.1 --- a/src/share/vm/runtime/thread.hpp	Thu Nov 04 15:19:16 2010 -0700
    81.2 +++ b/src/share/vm/runtime/thread.hpp	Thu Nov 04 16:17:54 2010 -0700
    81.3 @@ -30,6 +30,7 @@
    81.4  class ThreadStatistics;
    81.5  class ConcurrentLocksDump;
    81.6  class ParkEvent ;
    81.7 +class Parker;
    81.8  
    81.9  class ciEnv;
   81.10  class CompileThread;
   81.11 @@ -544,7 +545,6 @@
   81.12    static void muxAcquire  (volatile intptr_t * Lock, const char * Name) ;
   81.13    static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ;
   81.14    static void muxRelease  (volatile intptr_t * Lock) ;
   81.15 -
   81.16  };
   81.17  
   81.18  // Inline implementation of Thread::current()
   81.19 @@ -1769,100 +1769,3 @@
   81.20    }
   81.21  };
   81.22  
   81.23 -// ParkEvents are type-stable and immortal.
   81.24 -//
   81.25 -// Lifecycle: Once a ParkEvent is associated with a thread that ParkEvent remains
   81.26 -// associated with the thread for the thread's entire lifetime - the relationship is
   81.27 -// stable. A thread will be associated at most one ParkEvent.  When the thread
   81.28 -// expires, the ParkEvent moves to the EventFreeList.  New threads attempt to allocate from
   81.29 -// the EventFreeList before creating a new Event.  Type-stability frees us from
   81.30 -// worrying about stale Event or Thread references in the objectMonitor subsystem.
   81.31 -// (A reference to ParkEvent is always valid, even though the event may no longer be associated
   81.32 -// with the desired or expected thread.  A key aspect of this design is that the callers of
   81.33 -// park, unpark, etc must tolerate stale references and spurious wakeups).
   81.34 -//
   81.35 -// Only the "associated" thread can block (park) on the ParkEvent, although
   81.36 -// any other thread can unpark a reachable parkevent.  Park() is allowed to
   81.37 -// return spuriously.  In fact park-unpark a really just an optimization to
   81.38 -// avoid unbounded spinning and surrender the CPU to be a polite system citizen.
   81.39 -// A degenerate albeit "impolite" park-unpark implementation could simply return.
   81.40 -// See http://blogs.sun.com/dave for more details.
   81.41 -//
   81.42 -// Eventually I'd like to eliminate Events and ObjectWaiters, both of which serve as
   81.43 -// thread proxies, and simply make the THREAD structure type-stable and persistent.
   81.44 -// Currently, we unpark events associated with threads, but ideally we'd just
   81.45 -// unpark threads.
   81.46 -//
   81.47 -// The base-class, PlatformEvent, is platform-specific while the ParkEvent is
   81.48 -// platform-independent.  PlatformEvent provides park(), unpark(), etc., and
   81.49 -// is abstract -- that is, a PlatformEvent should never be instantiated except
   81.50 -// as part of a ParkEvent.
   81.51 -// Equivalently we could have defined a platform-independent base-class that
   81.52 -// exported Allocate(), Release(), etc.  The platform-specific class would extend
   81.53 -// that base-class, adding park(), unpark(), etc.
   81.54 -//
   81.55 -// A word of caution: The JVM uses 2 very similar constructs:
   81.56 -// 1. ParkEvent are used for Java-level "monitor" synchronization.
   81.57 -// 2. Parkers are used by JSR166-JUC park-unpark.
   81.58 -//
   81.59 -// We'll want to eventually merge these redundant facilities and use ParkEvent.
   81.60 -
   81.61 -
   81.62 -class ParkEvent : public os::PlatformEvent {
   81.63 -  private:
   81.64 -    ParkEvent * FreeNext ;
   81.65 -
   81.66 -    // Current association
   81.67 -    Thread * AssociatedWith ;
   81.68 -    intptr_t RawThreadIdentity ;        // LWPID etc
   81.69 -    volatile int Incarnation ;
   81.70 -
   81.71 -    // diagnostic : keep track of last thread to wake this thread.
   81.72 -    // this is useful for construction of dependency graphs.
   81.73 -    void * LastWaker ;
   81.74 -
   81.75 -  public:
   81.76 -    // MCS-CLH list linkage and Native Mutex/Monitor
   81.77 -    ParkEvent * volatile ListNext ;
   81.78 -    ParkEvent * volatile ListPrev ;
   81.79 -    volatile intptr_t OnList ;
   81.80 -    volatile int TState ;
   81.81 -    volatile int Notified ;             // for native monitor construct
   81.82 -    volatile int IsWaiting ;            // Enqueued on WaitSet
   81.83 -
   81.84 -
   81.85 -  private:
   81.86 -    static ParkEvent * volatile FreeList ;
   81.87 -    static volatile int ListLock ;
   81.88 -
   81.89 -    // It's prudent to mark the dtor as "private"
   81.90 -    // ensuring that it's not visible outside the package.
   81.91 -    // Unfortunately gcc warns about such usage, so
   81.92 -    // we revert to the less desirable "protected" visibility.
   81.93 -    // The other compilers accept private dtors.
   81.94 -
   81.95 -  protected:        // Ensure dtor is never invoked
   81.96 -    ~ParkEvent() { guarantee (0, "invariant") ; }
   81.97 -
   81.98 -    ParkEvent() : PlatformEvent() {
   81.99 -       AssociatedWith = NULL ;
  81.100 -       FreeNext       = NULL ;
  81.101 -       ListNext       = NULL ;
  81.102 -       ListPrev       = NULL ;
  81.103 -       OnList         = 0 ;
  81.104 -       TState         = 0 ;
  81.105 -       Notified       = 0 ;
  81.106 -       IsWaiting      = 0 ;
  81.107 -    }
  81.108 -
  81.109 -    // We use placement-new to force ParkEvent instances to be
  81.110 -    // aligned on 256-byte address boundaries.  This ensures that the least
  81.111 -    // significant byte of a ParkEvent address is always 0.
  81.112 -
  81.113 -    void * operator new (size_t sz) ;
  81.114 -    void operator delete (void * a) ;
  81.115 -
  81.116 -  public:
  81.117 -    static ParkEvent * Allocate (Thread * t) ;
  81.118 -    static void Release (ParkEvent * e) ;
  81.119 -} ;
    82.1 --- a/src/share/vm/utilities/debug.cpp	Thu Nov 04 15:19:16 2010 -0700
    82.2 +++ b/src/share/vm/utilities/debug.cpp	Thu Nov 04 16:17:54 2010 -0700
    82.3 @@ -51,14 +51,16 @@
    82.4  
    82.5  
    82.6  void warning(const char* format, ...) {
    82.7 -  // In case error happens before init or during shutdown
    82.8 -  if (tty == NULL) ostream_init();
    82.9 +  if (PrintWarnings) {
   82.10 +    // In case error happens before init or during shutdown
   82.11 +    if (tty == NULL) ostream_init();
   82.12  
   82.13 -  tty->print("%s warning: ", VM_Version::vm_name());
   82.14 -  va_list ap;
   82.15 -  va_start(ap, format);
   82.16 -  tty->vprint_cr(format, ap);
   82.17 -  va_end(ap);
   82.18 +    tty->print("%s warning: ", VM_Version::vm_name());
   82.19 +    va_list ap;
   82.20 +    va_start(ap, format);
   82.21 +    tty->vprint_cr(format, ap);
   82.22 +    va_end(ap);
   82.23 +  }
   82.24    if (BreakAtWarning) BREAKPOINT;
   82.25  }
   82.26  
    83.1 --- a/src/share/vm/utilities/exceptions.cpp	Thu Nov 04 15:19:16 2010 -0700
    83.2 +++ b/src/share/vm/utilities/exceptions.cpp	Thu Nov 04 16:17:54 2010 -0700
    83.3 @@ -61,6 +61,18 @@
    83.4     ShouldNotReachHere();
    83.5    }
    83.6  
    83.7 +#ifdef ASSERT
    83.8 +  // Check for trying to throw stack overflow before initialization is complete
    83.9 +  // to prevent infinite recursion trying to initialize stack overflow without
   83.10 +  // adequate stack space.
   83.11 +  // This can happen with stress testing a large value of StackShadowPages
   83.12 +  if (h_exception()->klass() == SystemDictionary::StackOverflowError_klass()) {
   83.13 +    instanceKlass* ik = instanceKlass::cast(h_exception->klass());
   83.14 +    assert(ik->is_initialized(),
   83.15 +           "need to increase min_stack_allowed calculation");
   83.16 +  }
   83.17 +#endif // ASSERT
   83.18 +
   83.19    if (thread->is_VM_thread()
   83.20        || thread->is_Compiler_thread() ) {
   83.21      // We do not care what kind of exception we get for the vm-thread or a thread which
   83.22 @@ -91,7 +103,6 @@
   83.23      thread->set_pending_exception(Universe::vm_exception(), file, line);
   83.24      return true;
   83.25    }
   83.26 -
   83.27    return false;
   83.28  }
   83.29  
   83.30 @@ -193,6 +204,7 @@
   83.31      klassOop k = SystemDictionary::StackOverflowError_klass();
   83.32      oop e = instanceKlass::cast(k)->allocate_instance(CHECK);
   83.33      exception = Handle(THREAD, e);  // fill_in_stack trace does gc
   83.34 +    assert(instanceKlass::cast(k)->is_initialized(), "need to increase min_stack_allowed calculation");
   83.35      if (StackTraceInThrowable) {
   83.36        java_lang_Throwable::fill_in_stack_trace(exception);
   83.37      }

mercurial