Merge

Tue, 17 Jun 2014 22:03:39 -0700

author
asaha
date
Tue, 17 Jun 2014 22:03:39 -0700
changeset 6794
7ef8ab2bf2b0
parent 6793
13be2835d7eb
parent 6730
b15553cde967
child 6795
d094914a65e6
child 6843
bba95ce6b634

Merge

.hgtags file | annotate | diff | comparison | revisions
make/hotspot_version file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Tue Jun 17 15:49:31 2014 -0700
     1.2 +++ b/.hgtags	Tue Jun 17 22:03:39 2014 -0700
     1.3 @@ -486,3 +486,4 @@
     1.4  8ea4732884ccd5586f0afe9478b80add90231455 jdk8u20-b17
     1.5  b685b4e870b159ea5731984199d275879d427038 hs25.20-b18
     1.6  11159d7ec80462a422e39c9b3a39ae932923622d jdk8u20-b18
     1.7 +3e1cec358ab95ef985f821219104141b9ffda83f hs25.20-b19
     2.1 --- a/make/hotspot_version	Tue Jun 17 15:49:31 2014 -0700
     2.2 +++ b/make/hotspot_version	Tue Jun 17 22:03:39 2014 -0700
     2.3 @@ -35,7 +35,7 @@
     2.4  
     2.5  HS_MAJOR_VER=25
     2.6  HS_MINOR_VER=20
     2.7 -HS_BUILD_NUMBER=18
     2.8 +HS_BUILD_NUMBER=19
     2.9  
    2.10  JDK_MAJOR_VER=1
    2.11  JDK_MINOR_VER=8
     3.1 --- a/src/cpu/ppc/vm/cppInterpreter_ppc.cpp	Tue Jun 17 15:49:31 2014 -0700
     3.2 +++ b/src/cpu/ppc/vm/cppInterpreter_ppc.cpp	Tue Jun 17 22:03:39 2014 -0700
     3.3 @@ -1,7 +1,7 @@
     3.4  
     3.5  /*
     3.6 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3.7 - * Copyright 2012, 2013 SAP AG. All rights reserved.
     3.8 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3.9 + * Copyright 2012, 2014 SAP AG. All rights reserved.
    3.10   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    3.11   *
    3.12   * This code is free software; you can redistribute it and/or modify it
    3.13 @@ -2948,17 +2948,60 @@
    3.14    istate->_last_Java_fp = last_Java_fp;
    3.15  }
    3.16  
    3.17 -int AbstractInterpreter::layout_activation(Method* method,
    3.18 -                                           int temps,        // Number of slots on java expression stack in use.
    3.19 -                                           int popframe_args,
    3.20 -                                           int monitors,     // Number of active monitors.
    3.21 -                                           int caller_actual_parameters,
    3.22 -                                           int callee_params,// Number of slots for callee parameters.
    3.23 -                                           int callee_locals,// Number of slots for locals.
    3.24 -                                           frame* caller,
    3.25 -                                           frame* interpreter_frame,
    3.26 -                                           bool is_top_frame,
    3.27 -                                           bool is_bottom_frame) {
    3.28 +// Computes monitor_size and top_frame_size in bytes.
    3.29 +static void frame_size_helper(int max_stack,
    3.30 +                              int monitors,
    3.31 +                              int& monitor_size,
    3.32 +                              int& top_frame_size) {
    3.33 +  monitor_size = frame::interpreter_frame_monitor_size_in_bytes() * monitors;
    3.34 +  top_frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
    3.35 +                            + monitor_size
    3.36 +                            + max_stack * Interpreter::stackElementSize
    3.37 +                            + 2 * Interpreter::stackElementSize,
    3.38 +                            frame::alignment_in_bytes)
    3.39 +                   + frame::top_ijava_frame_abi_size;
    3.40 +}
    3.41 +
    3.42 +// Returns number of stackElementWords needed for the interpreter frame with the
    3.43 +// given sections.
    3.44 +int AbstractInterpreter::size_activation(int max_stack,
    3.45 +                                         int temps,
    3.46 +                                         int extra_args,
    3.47 +                                         int monitors,
    3.48 +                                         int callee_params,
    3.49 +                                         int callee_locals,
    3.50 +                                         bool is_top_frame) {
    3.51 +  int monitor_size = 0;
    3.52 +  int top_frame_size = 0;
    3.53 +  frame_size_helper(max_stack, monitors, monitor_size, top_frame_size);
    3.54 +
    3.55 +  int frame_size;
    3.56 +  if (is_top_frame) {
    3.57 +    frame_size = top_frame_size;
    3.58 +  } else {
    3.59 +    frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
    3.60 +                          + monitor_size
    3.61 +                          + (temps - callee_params + callee_locals) * Interpreter::stackElementSize
    3.62 +                          + 2 * Interpreter::stackElementSize,
    3.63 +                          frame::alignment_in_bytes)
    3.64 +                 + frame::parent_ijava_frame_abi_size;
    3.65 +    assert(extra_args == 0, "non-zero for top_frame only");
    3.66 +  }
    3.67 +
    3.68 +  return frame_size / Interpreter::stackElementSize;
    3.69 +}
    3.70 +
    3.71 +void AbstractInterpreter::layout_activation(Method* method,
    3.72 +                                            int temps,        // Number of slots on java expression stack in use.
    3.73 +                                            int popframe_args,
    3.74 +                                            int monitors,     // Number of active monitors.
    3.75 +                                            int caller_actual_parameters,
    3.76 +                                            int callee_params,// Number of slots for callee parameters.
    3.77 +                                            int callee_locals,// Number of slots for locals.
    3.78 +                                            frame* caller,
    3.79 +                                            frame* interpreter_frame,
    3.80 +                                            bool is_top_frame,
    3.81 +                                            bool is_bottom_frame) {
    3.82  
    3.83    // NOTE this code must exactly mimic what
    3.84    // InterpreterGenerator::generate_compute_interpreter_state() does
    3.85 @@ -2968,86 +3011,64 @@
    3.86    // both the abi scratch area and a place to hold a result from a
    3.87    // callee on its way to the callers stack.
    3.88  
    3.89 -  int monitor_size = frame::interpreter_frame_monitor_size_in_bytes() * monitors;
    3.90 -  int frame_size;
    3.91 -  int top_frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
    3.92 -                                + monitor_size
    3.93 -                                + (method->max_stack() *Interpreter::stackElementWords * BytesPerWord)
    3.94 -                                + 2*BytesPerWord,
    3.95 -                                frame::alignment_in_bytes)
    3.96 -                      + frame::top_ijava_frame_abi_size;
    3.97 -  if (is_top_frame) {
    3.98 -    frame_size = top_frame_size;
    3.99 +  int monitor_size = 0;
   3.100 +  int top_frame_size = 0;
   3.101 +  frame_size_helper(method->max_stack(), monitors, monitor_size, top_frame_size);
   3.102 +
   3.103 +  intptr_t sp = (intptr_t)interpreter_frame->sp();
   3.104 +  intptr_t fp = *(intptr_t *)sp;
   3.105 +  assert(fp == (intptr_t)caller->sp(), "fp must match");
   3.106 +  interpreterState cur_state =
   3.107 +    (interpreterState)(fp - frame::interpreter_frame_cinterpreterstate_size_in_bytes());
   3.108 +
   3.109 +  // Now fill in the interpreterState object.
   3.110 +
   3.111 +  intptr_t* locals;
   3.112 +  if (caller->is_interpreted_frame()) {
   3.113 +    // Locals must agree with the caller because it will be used to set the
   3.114 +    // caller's tos when we return.
   3.115 +    interpreterState prev  = caller->get_interpreterState();
   3.116 +    // Calculate start of "locals" for MH calls.  For MH calls, the
   3.117 +    // current method() (= MH target) and prev->callee() (=
   3.118 +    // MH.invoke*()) are different and especially have different
   3.119 +    // signatures. To pop the argumentsof the caller, we must use
   3.120 +    // the prev->callee()->size_of_arguments() because that's what
   3.121 +    // the caller actually pushed.  Currently, for synthetic MH
   3.122 +    // calls (deoptimized from inlined MH calls), detected by
   3.123 +    // is_method_handle_invoke(), we use the callee's arguments
   3.124 +    // because here, the caller's and callee's signature match.
   3.125 +    if (true /*!caller->is_at_mh_callsite()*/) {
   3.126 +      locals = prev->stack() + method->size_of_parameters();
   3.127 +    } else {
   3.128 +      // Normal MH call.
   3.129 +      locals = prev->stack() + prev->callee()->size_of_parameters();
   3.130 +    }
   3.131    } else {
   3.132 -    frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
   3.133 -                          + monitor_size
   3.134 -                          + ((temps - callee_params + callee_locals) *
   3.135 -                             Interpreter::stackElementWords * BytesPerWord)
   3.136 -                          + 2*BytesPerWord,
   3.137 -                          frame::alignment_in_bytes)
   3.138 -                 + frame::parent_ijava_frame_abi_size;
   3.139 -    assert(popframe_args==0, "non-zero for top_frame only");
   3.140 +    bool is_deopted;
   3.141 +    locals = (intptr_t*) (fp + ((method->max_locals() - 1) * BytesPerWord) +
   3.142 +                          frame::parent_ijava_frame_abi_size);
   3.143    }
   3.144  
   3.145 -  // If we actually have a frame to layout we must now fill in all the pieces.
   3.146 -  if (interpreter_frame != NULL) {
   3.147 -
   3.148 -    intptr_t sp = (intptr_t)interpreter_frame->sp();
   3.149 -    intptr_t fp = *(intptr_t *)sp;
   3.150 -    assert(fp == (intptr_t)caller->sp(), "fp must match");
   3.151 -    interpreterState cur_state =
   3.152 -      (interpreterState)(fp - frame::interpreter_frame_cinterpreterstate_size_in_bytes());
   3.153 -
   3.154 -    // Now fill in the interpreterState object.
   3.155 -
   3.156 -    intptr_t* locals;
   3.157 -    if (caller->is_interpreted_frame()) {
   3.158 -      // Locals must agree with the caller because it will be used to set the
   3.159 -      // caller's tos when we return.
   3.160 -      interpreterState prev  = caller->get_interpreterState();
   3.161 -      // Calculate start of "locals" for MH calls.  For MH calls, the
   3.162 -      // current method() (= MH target) and prev->callee() (=
   3.163 -      // MH.invoke*()) are different and especially have different
   3.164 -      // signatures. To pop the argumentsof the caller, we must use
   3.165 -      // the prev->callee()->size_of_arguments() because that's what
   3.166 -      // the caller actually pushed.  Currently, for synthetic MH
   3.167 -      // calls (deoptimized from inlined MH calls), detected by
   3.168 -      // is_method_handle_invoke(), we use the callee's arguments
   3.169 -      // because here, the caller's and callee's signature match.
   3.170 -      if (true /*!caller->is_at_mh_callsite()*/) {
   3.171 -        locals = prev->stack() + method->size_of_parameters();
   3.172 -      } else {
   3.173 -        // Normal MH call.
   3.174 -        locals = prev->stack() + prev->callee()->size_of_parameters();
   3.175 -      }
   3.176 -    } else {
   3.177 -      bool is_deopted;
   3.178 -      locals = (intptr_t*) (fp + ((method->max_locals() - 1) * BytesPerWord) +
   3.179 -                            frame::parent_ijava_frame_abi_size);
   3.180 -    }
   3.181 -
   3.182 -    intptr_t* monitor_base = (intptr_t*) cur_state;
   3.183 -    intptr_t* stack_base   = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
   3.184 -
   3.185 -    // Provide pop_frame capability on PPC64, add popframe_args.
   3.186 -    // +1 because stack is always prepushed.
   3.187 -    intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (temps + popframe_args + 1) * BytesPerWord);
   3.188 -
   3.189 -    BytecodeInterpreter::layout_interpreterState(cur_state,
   3.190 -                                                 caller,
   3.191 -                                                 interpreter_frame,
   3.192 -                                                 method,
   3.193 -                                                 locals,
   3.194 -                                                 stack,
   3.195 -                                                 stack_base,
   3.196 -                                                 monitor_base,
   3.197 -                                                 (intptr_t*)(((intptr_t)fp)-top_frame_size),
   3.198 -                                                 is_top_frame);
   3.199 -
   3.200 -    BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address,
   3.201 -                                                    interpreter_frame->fp());
   3.202 -  }
   3.203 -  return frame_size/BytesPerWord;
   3.204 +  intptr_t* monitor_base = (intptr_t*) cur_state;
   3.205 +  intptr_t* stack_base   = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
   3.206 +
   3.207 +  // Provide pop_frame capability on PPC64, add popframe_args.
   3.208 +  // +1 because stack is always prepushed.
   3.209 +  intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (temps + popframe_args + 1) * BytesPerWord);
   3.210 +
   3.211 +  BytecodeInterpreter::layout_interpreterState(cur_state,
   3.212 +                                               caller,
   3.213 +                                               interpreter_frame,
   3.214 +                                               method,
   3.215 +                                               locals,
   3.216 +                                               stack,
   3.217 +                                               stack_base,
   3.218 +                                               monitor_base,
   3.219 +                                               (intptr_t*)(((intptr_t)fp) - top_frame_size),
   3.220 +                                               is_top_frame);
   3.221 +
   3.222 +  BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address,
   3.223 +                                                  interpreter_frame->fp());
   3.224  }
   3.225  
   3.226  #endif // CC_INTERP
     4.1 --- a/src/cpu/ppc/vm/ppc.ad	Tue Jun 17 15:49:31 2014 -0700
     4.2 +++ b/src/cpu/ppc/vm/ppc.ad	Tue Jun 17 22:03:39 2014 -0700
     4.3 @@ -1,6 +1,6 @@
     4.4  //
     4.5 -// Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
     4.6 -// Copyright 2012, 2013 SAP AG. All rights reserved.
     4.7 +// Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
     4.8 +// Copyright 2012, 2014 SAP AG. All rights reserved.
     4.9  // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    4.10  //
    4.11  // This code is free software; you can redistribute it and/or modify it
    4.12 @@ -1363,8 +1363,8 @@
    4.13    Compile* C = ra_->C;
    4.14    MacroAssembler _masm(&cbuf);
    4.15  
    4.16 -  const long framesize = ((long)C->frame_slots()) << LogBytesPerInt;
    4.17 -  assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
    4.18 +  const long framesize = C->frame_size_in_bytes();
    4.19 +  assert(framesize % (2 * wordSize) == 0, "must preserve 2*wordSize alignment");
    4.20  
    4.21    const bool method_is_frameless      = false /* TODO: PPC port C->is_frameless_method()*/;
    4.22  
    4.23 @@ -1389,19 +1389,22 @@
    4.24    // careful, because some VM calls (such as call site linkage) can
    4.25    // use several kilobytes of stack. But the stack safety zone should
    4.26    // account for that. See bugs 4446381, 4468289, 4497237.
    4.27 -  if (C->need_stack_bang(framesize) && UseStackBanging) {
    4.28 +
    4.29 +  int bangsize = C->bang_size_in_bytes();
    4.30 +  assert(bangsize >= framesize || bangsize <= 0, "stack bang size incorrect");
    4.31 +  if (C->need_stack_bang(bangsize) && UseStackBanging) {
    4.32      // Unfortunately we cannot use the function provided in
    4.33      // assembler.cpp as we have to emulate the pipes. So I had to
    4.34      // insert the code of generate_stack_overflow_check(), see
    4.35      // assembler.cpp for some illuminative comments.
    4.36      const int page_size = os::vm_page_size();
    4.37 -    int bang_end = StackShadowPages*page_size;
    4.38 +    int bang_end = StackShadowPages * page_size;
    4.39  
    4.40      // This is how far the previous frame's stack banging extended.
    4.41      const int bang_end_safe = bang_end;
    4.42  
    4.43 -    if (framesize > page_size) {
    4.44 -      bang_end += framesize;
    4.45 +    if (bangsize > page_size) {
    4.46 +      bang_end += bangsize;
    4.47      }
    4.48  
    4.49      int bang_offset = bang_end_safe;
    4.50 @@ -1447,7 +1450,7 @@
    4.51  
    4.52    unsigned int bytes = (unsigned int)framesize;
    4.53    long offset = Assembler::align_addr(bytes, frame::alignment_in_bytes);
    4.54 -  ciMethod *currMethod = C -> method();
    4.55 +  ciMethod *currMethod = C->method();
    4.56  
    4.57    // Optimized version for most common case.
    4.58    if (UsePower6SchedulerPPC64 &&
     5.1 --- a/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Tue Jun 17 15:49:31 2014 -0700
     5.2 +++ b/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Tue Jun 17 22:03:39 2014 -0700
     5.3 @@ -1328,21 +1328,42 @@
     5.4  int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
     5.5    const int max_alignment_size = 2;
     5.6    const int abi_scratch = frame::abi_reg_args_size;
     5.7 -  return method->max_locals() + method->max_stack() + frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
     5.8 +  return method->max_locals() + method->max_stack() +
     5.9 +         frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
    5.10  }
    5.11  
    5.12 -// Fills a sceletal interpreter frame generated during deoptimizations
    5.13 -// and returns the frame size in slots.
    5.14 +// Returns number of stackElementWords needed for the interpreter frame with the
    5.15 +// given sections.
    5.16 +// This overestimates the stack by one slot in case of alignments.
    5.17 +int AbstractInterpreter::size_activation(int max_stack,
    5.18 +                                         int temps,
    5.19 +                                         int extra_args,
    5.20 +                                         int monitors,
    5.21 +                                         int callee_params,
    5.22 +                                         int callee_locals,
    5.23 +                                         bool is_top_frame) {
    5.24 +  // Note: This calculation must exactly parallel the frame setup
    5.25 +  // in AbstractInterpreterGenerator::generate_method_entry.
    5.26 +  assert(Interpreter::stackElementWords == 1, "sanity");
    5.27 +  const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
    5.28 +  const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
    5.29 +                                         (frame::abi_minframe_size / Interpreter::stackElementSize);
    5.30 +  const int size =
    5.31 +    max_stack                                                +
    5.32 +    (callee_locals - callee_params)                          +
    5.33 +    monitors * frame::interpreter_frame_monitor_size()       +
    5.34 +    max_alignment_space                                      +
    5.35 +    abi_scratch                                              +
    5.36 +    frame::ijava_state_size / Interpreter::stackElementSize;
    5.37 +
    5.38 +  // Fixed size of an interpreter frame, align to 16-byte.
    5.39 +  return (size & -2);
    5.40 +}
    5.41 +
    5.42 +// Fills a sceletal interpreter frame generated during deoptimizations.
    5.43  //
    5.44  // Parameters:
    5.45  //
    5.46 -// interpreter_frame == NULL:
    5.47 -//   Only calculate the size of an interpreter activation, no actual layout.
    5.48 -//   Note: This calculation must exactly parallel the frame setup
    5.49 -//   in TemplateInterpreter::generate_normal_entry. But it does not
    5.50 -//   account for the SP alignment, that might further enhance the
    5.51 -//   frame size, depending on FP.
    5.52 -//
    5.53  // interpreter_frame != NULL:
    5.54  //   set up the method, locals, and monitors.
    5.55  //   The frame interpreter_frame, if not NULL, is guaranteed to be the
    5.56 @@ -1359,59 +1380,41 @@
    5.57  //   the arguments off advance the esp by dummy popframe_extra_args slots.
    5.58  //   Popping off those will establish the stack layout as it was before the call.
    5.59  //
    5.60 -int AbstractInterpreter::layout_activation(Method* method,
    5.61 -                                           int tempcount,
    5.62 -                                           int popframe_extra_args,
    5.63 -                                           int moncount,
    5.64 -                                           int caller_actual_parameters,
    5.65 -                                           int callee_param_count,
    5.66 -                                           int callee_locals,
    5.67 -                                           frame* caller,
    5.68 -                                           frame* interpreter_frame,
    5.69 -                                           bool is_top_frame,
    5.70 -                                           bool is_bottom_frame) {
    5.71 +void AbstractInterpreter::layout_activation(Method* method,
    5.72 +                                            int tempcount,
    5.73 +                                            int popframe_extra_args,
    5.74 +                                            int moncount,
    5.75 +                                            int caller_actual_parameters,
    5.76 +                                            int callee_param_count,
    5.77 +                                            int callee_locals_count,
    5.78 +                                            frame* caller,
    5.79 +                                            frame* interpreter_frame,
    5.80 +                                            bool is_top_frame,
    5.81 +                                            bool is_bottom_frame) {
    5.82  
    5.83 -  const int max_alignment_space = 2;
    5.84    const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
    5.85 -                                         (frame::abi_minframe_size / Interpreter::stackElementSize) ;
    5.86 -  const int conservative_framesize_in_slots =
    5.87 -    method->max_stack() + callee_locals - callee_param_count +
    5.88 -    (moncount * frame::interpreter_frame_monitor_size()) + max_alignment_space +
    5.89 -    abi_scratch + frame::ijava_state_size / Interpreter::stackElementSize;
    5.90 +                                         (frame::abi_minframe_size / Interpreter::stackElementSize);
    5.91  
    5.92 -  assert(!is_top_frame || conservative_framesize_in_slots * 8 > frame::abi_reg_args_size + frame::ijava_state_size, "frame too small");
    5.93 +  intptr_t* locals_base  = (caller->is_interpreted_frame()) ?
    5.94 +    caller->interpreter_frame_esp() + caller_actual_parameters :
    5.95 +    caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ;
    5.96  
    5.97 -  if (interpreter_frame == NULL) {
    5.98 -    // Since we don't know the exact alignment, we return the conservative size.
    5.99 -    return (conservative_framesize_in_slots & -2);
   5.100 -  } else {
   5.101 -    // Now we know our caller, calc the exact frame layout and size.
   5.102 -    intptr_t* locals_base  = (caller->is_interpreted_frame()) ?
   5.103 -      caller->interpreter_frame_esp() + caller_actual_parameters :
   5.104 -      caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ;
   5.105 +  intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize ;
   5.106 +  intptr_t* monitor      = monitor_base - (moncount * frame::interpreter_frame_monitor_size());
   5.107 +  intptr_t* esp_base     = monitor - 1;
   5.108 +  intptr_t* esp          = esp_base - tempcount - popframe_extra_args;
   5.109 +  intptr_t* sp           = (intptr_t *) (((intptr_t) (esp_base - callee_locals_count + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes);
   5.110 +  intptr_t* sender_sp    = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
   5.111 +  intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
   5.112  
   5.113 -    intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize ;
   5.114 -    intptr_t* monitor      = monitor_base - (moncount * frame::interpreter_frame_monitor_size());
   5.115 -    intptr_t* esp_base     = monitor - 1;
   5.116 -    intptr_t* esp          = esp_base - tempcount - popframe_extra_args;
   5.117 -    intptr_t* sp           = (intptr_t *) (((intptr_t) (esp_base- callee_locals + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes);
   5.118 -    intptr_t* sender_sp    = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
   5.119 -    intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
   5.120 -
   5.121 -    interpreter_frame->interpreter_frame_set_method(method);
   5.122 -    interpreter_frame->interpreter_frame_set_locals(locals_base);
   5.123 -    interpreter_frame->interpreter_frame_set_cpcache(method->constants()->cache());
   5.124 -    interpreter_frame->interpreter_frame_set_esp(esp);
   5.125 -    interpreter_frame->interpreter_frame_set_monitor_end((BasicObjectLock *)monitor);
   5.126 -    interpreter_frame->interpreter_frame_set_top_frame_sp(top_frame_sp);
   5.127 -    if (!is_bottom_frame) {
   5.128 -      interpreter_frame->interpreter_frame_set_sender_sp(sender_sp);
   5.129 -    }
   5.130 -
   5.131 -    int framesize_in_slots = caller->sp() - sp;
   5.132 -    assert(!is_top_frame ||framesize_in_slots >= (frame::abi_reg_args_size / Interpreter::stackElementSize) + frame::ijava_state_size / Interpreter::stackElementSize, "frame too small");
   5.133 -    assert(framesize_in_slots <= conservative_framesize_in_slots, "exact frame size must be smaller than the convervative size!");
   5.134 -    return framesize_in_slots;
   5.135 +  interpreter_frame->interpreter_frame_set_method(method);
   5.136 +  interpreter_frame->interpreter_frame_set_locals(locals_base);
   5.137 +  interpreter_frame->interpreter_frame_set_cpcache(method->constants()->cache());
   5.138 +  interpreter_frame->interpreter_frame_set_esp(esp);
   5.139 +  interpreter_frame->interpreter_frame_set_monitor_end((BasicObjectLock *)monitor);
   5.140 +  interpreter_frame->interpreter_frame_set_top_frame_sp(top_frame_sp);
   5.141 +  if (!is_bottom_frame) {
   5.142 +    interpreter_frame->interpreter_frame_set_sender_sp(sender_sp);
   5.143    }
   5.144  }
   5.145  
     6.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Jun 17 15:49:31 2014 -0700
     6.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Jun 17 22:03:39 2014 -0700
     6.3 @@ -152,7 +152,7 @@
     6.4  }
     6.5  
     6.6  
     6.7 -int LIR_Assembler::initial_frame_size_in_bytes() {
     6.8 +int LIR_Assembler::initial_frame_size_in_bytes() const {
     6.9    return in_bytes(frame_map()->framesize_in_bytes());
    6.10  }
    6.11  
    6.12 @@ -182,7 +182,7 @@
    6.13    int number_of_locks = entry_state->locks_size();
    6.14  
    6.15    // Create a frame for the compiled activation.
    6.16 -  __ build_frame(initial_frame_size_in_bytes());
    6.17 +  __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
    6.18  
    6.19    // OSR buffer is
    6.20    //
     7.1 --- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Tue Jun 17 15:49:31 2014 -0700
     7.2 +++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Tue Jun 17 22:03:39 2014 -0700
     7.3 @@ -55,9 +55,9 @@
     7.4  }
     7.5  
     7.6  
     7.7 -void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
     7.8 -
     7.9 -  generate_stack_overflow_check(frame_size_in_bytes);
    7.10 +void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
    7.11 +  assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
    7.12 +  generate_stack_overflow_check(bang_size_in_bytes);
    7.13    // Create the frame.
    7.14    save_frame_c1(frame_size_in_bytes);
    7.15  }
     8.1 --- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Tue Jun 17 15:49:31 2014 -0700
     8.2 +++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Tue Jun 17 22:03:39 2014 -0700
     8.3 @@ -2101,7 +2101,7 @@
     8.4    int monitor_size    = method->is_synchronized() ?
     8.5                                  1*frame::interpreter_frame_monitor_size() : 0;
     8.6    return size_activation_helper(method->max_locals(), method->max_stack(),
     8.7 -                                 monitor_size) + call_stub_size;
     8.8 +                                monitor_size) + call_stub_size;
     8.9  }
    8.10  
    8.11  void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
    8.12 @@ -2185,31 +2185,31 @@
    8.13    istate->_last_Java_pc = (intptr_t*) last_Java_pc;
    8.14  }
    8.15  
    8.16 +static int frame_size_helper(int max_stack,
    8.17 +                             int moncount,
    8.18 +                             int callee_param_size,
    8.19 +                             int callee_locals_size,
    8.20 +                             bool is_top_frame,
    8.21 +                             int& monitor_size,
    8.22 +                             int& full_frame_words) {
    8.23 +  int extra_locals_size = callee_locals_size - callee_param_size;
    8.24 +  monitor_size = (sizeof(BasicObjectLock) * moncount) / wordSize;
    8.25 +  full_frame_words = size_activation_helper(extra_locals_size, max_stack, monitor_size);
    8.26 +  int short_frame_words = size_activation_helper(extra_locals_size, max_stack, monitor_size);
    8.27 +  int frame_words = is_top_frame ? full_frame_words : short_frame_words;
    8.28  
    8.29 -int AbstractInterpreter::layout_activation(Method* method,
    8.30 -                                           int tempcount, // Number of slots on java expression stack in use
    8.31 -                                           int popframe_extra_args,
    8.32 -                                           int moncount,  // Number of active monitors
    8.33 -                                           int caller_actual_parameters,
    8.34 -                                           int callee_param_size,
    8.35 -                                           int callee_locals_size,
    8.36 -                                           frame* caller,
    8.37 -                                           frame* interpreter_frame,
    8.38 -                                           bool is_top_frame,
    8.39 -                                           bool is_bottom_frame) {
    8.40 +  return frame_words;
    8.41 +}
    8.42  
    8.43 -  assert(popframe_extra_args == 0, "NEED TO FIX");
    8.44 -  // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
    8.45 -  // does as far as allocating an interpreter frame.
    8.46 -  // If interpreter_frame!=NULL, set up the method, locals, and monitors.
    8.47 -  // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
    8.48 -  // as determined by a previous call to this method.
    8.49 -  // It is also guaranteed to be walkable even though it is in a skeletal state
    8.50 +int AbstractInterpreter::size_activation(int max_stack,
    8.51 +                                         int tempcount,
    8.52 +                                         int extra_args,
    8.53 +                                         int moncount,
    8.54 +                                         int callee_param_size,
    8.55 +                                         int callee_locals_size,
    8.56 +                                         bool is_top_frame) {
    8.57 +  assert(extra_args == 0, "NEED TO FIX");
    8.58    // NOTE: return size is in words not bytes
    8.59 -  // NOTE: tempcount is the current size of the java expression stack. For top most
    8.60 -  //       frames we will allocate a full sized expression stack and not the curback
    8.61 -  //       version that non-top frames have.
    8.62 -
    8.63    // Calculate the amount our frame will be adjust by the callee. For top frame
    8.64    // this is zero.
    8.65  
    8.66 @@ -2218,87 +2218,108 @@
    8.67    // to it. So it ignores last_frame_adjust value. Seems suspicious as far
    8.68    // as getting sender_sp correct.
    8.69  
    8.70 -  int extra_locals_size = callee_locals_size - callee_param_size;
    8.71 -  int monitor_size = (sizeof(BasicObjectLock) * moncount) / wordSize;
    8.72 -  int full_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size);
    8.73 -  int short_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size);
    8.74 -  int frame_words = is_top_frame ? full_frame_words : short_frame_words;
    8.75 +  int unused_monitor_size = 0;
    8.76 +  int unused_full_frame_words = 0;
    8.77 +  return frame_size_helper(max_stack, moncount, callee_param_size, callee_locals_size, is_top_frame,
    8.78 +                           unused_monitor_size, unused_full_frame_words);
    8.79 +}
    8.80 +void AbstractInterpreter::layout_activation(Method* method,
    8.81 +                                            int tempcount, // Number of slots on java expression stack in use
    8.82 +                                            int popframe_extra_args,
    8.83 +                                            int moncount,  // Number of active monitors
    8.84 +                                            int caller_actual_parameters,
    8.85 +                                            int callee_param_size,
    8.86 +                                            int callee_locals_size,
    8.87 +                                            frame* caller,
    8.88 +                                            frame* interpreter_frame,
    8.89 +                                            bool is_top_frame,
    8.90 +                                            bool is_bottom_frame) {
    8.91 +  assert(popframe_extra_args == 0, "NEED TO FIX");
    8.92 +  // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
    8.93 +  // does as far as allocating an interpreter frame.
    8.94 +  // Set up the method, locals, and monitors.
    8.95 +  // The frame interpreter_frame is guaranteed to be the right size,
    8.96 +  // as determined by a previous call to the size_activation() method.
    8.97 +  // It is also guaranteed to be walkable even though it is in a skeletal state
    8.98 +  // NOTE: tempcount is the current size of the java expression stack. For top most
    8.99 +  //       frames we will allocate a full sized expression stack and not the curback
   8.100 +  //       version that non-top frames have.
   8.101  
   8.102 +  int monitor_size = 0;
   8.103 +  int full_frame_words = 0;
   8.104 +  int frame_words = frame_size_helper(method->max_stack(), moncount, callee_param_size, callee_locals_size,
   8.105 +                                      is_top_frame, monitor_size, full_frame_words);
   8.106  
   8.107    /*
   8.108 -    if we actually have a frame to layout we must now fill in all the pieces. This means both
   8.109 +    We must now fill in all the pieces of the frame. This means both
   8.110      the interpreterState and the registers.
   8.111    */
   8.112 -  if (interpreter_frame != NULL) {
   8.113  
   8.114 -    // MUCHO HACK
   8.115 +  // MUCHO HACK
   8.116  
   8.117 -    intptr_t* frame_bottom = interpreter_frame->sp() - (full_frame_words - frame_words);
   8.118 -    // 'interpreter_frame->sp()' is unbiased while 'frame_bottom' must be a biased value in 64bit mode.
   8.119 -    assert(((intptr_t)frame_bottom & 0xf) == 0, "SP biased in layout_activation");
   8.120 -    frame_bottom = (intptr_t*)((intptr_t)frame_bottom - STACK_BIAS);
   8.121 +  intptr_t* frame_bottom = interpreter_frame->sp() - (full_frame_words - frame_words);
   8.122 +  // 'interpreter_frame->sp()' is unbiased while 'frame_bottom' must be a biased value in 64bit mode.
   8.123 +  assert(((intptr_t)frame_bottom & 0xf) == 0, "SP biased in layout_activation");
   8.124 +  frame_bottom = (intptr_t*)((intptr_t)frame_bottom - STACK_BIAS);
   8.125  
   8.126 -    /* Now fillin the interpreterState object */
   8.127 +  /* Now fillin the interpreterState object */
   8.128  
   8.129 -    interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() -  sizeof(BytecodeInterpreter));
   8.130 +  interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() -  sizeof(BytecodeInterpreter));
   8.131  
   8.132  
   8.133 -    intptr_t* locals;
   8.134 +  intptr_t* locals;
   8.135  
   8.136 -    // Calculate the postion of locals[0]. This is painful because of
   8.137 -    // stack alignment (same as ia64). The problem is that we can
   8.138 -    // not compute the location of locals from fp(). fp() will account
   8.139 -    // for the extra locals but it also accounts for aligning the stack
   8.140 -    // and we can't determine if the locals[0] was misaligned but max_locals
   8.141 -    // was enough to have the
   8.142 -    // calculate postion of locals. fp already accounts for extra locals.
   8.143 -    // +2 for the static long no_params() issue.
   8.144 +  // Calculate the postion of locals[0]. This is painful because of
   8.145 +  // stack alignment (same as ia64). The problem is that we can
   8.146 +  // not compute the location of locals from fp(). fp() will account
   8.147 +  // for the extra locals but it also accounts for aligning the stack
   8.148 +  // and we can't determine if the locals[0] was misaligned but max_locals
   8.149 +  // was enough to have the
   8.150 +  // calculate postion of locals. fp already accounts for extra locals.
   8.151 +  // +2 for the static long no_params() issue.
   8.152  
   8.153 -    if (caller->is_interpreted_frame()) {
   8.154 -      // locals must agree with the caller because it will be used to set the
   8.155 -      // caller's tos when we return.
   8.156 -      interpreterState prev  = caller->get_interpreterState();
   8.157 -      // stack() is prepushed.
   8.158 -      locals = prev->stack() + method->size_of_parameters();
   8.159 +  if (caller->is_interpreted_frame()) {
   8.160 +    // locals must agree with the caller because it will be used to set the
   8.161 +    // caller's tos when we return.
   8.162 +    interpreterState prev  = caller->get_interpreterState();
   8.163 +    // stack() is prepushed.
   8.164 +    locals = prev->stack() + method->size_of_parameters();
   8.165 +  } else {
   8.166 +    // Lay out locals block in the caller adjacent to the register window save area.
   8.167 +    //
   8.168 +    // Compiled frames do not allocate a varargs area which is why this if
   8.169 +    // statement is needed.
   8.170 +    //
   8.171 +    intptr_t* fp = interpreter_frame->fp();
   8.172 +    int local_words = method->max_locals() * Interpreter::stackElementWords;
   8.173 +
   8.174 +    if (caller->is_compiled_frame()) {
   8.175 +      locals = fp + frame::register_save_words + local_words - 1;
   8.176      } else {
   8.177 -      // Lay out locals block in the caller adjacent to the register window save area.
   8.178 -      //
   8.179 -      // Compiled frames do not allocate a varargs area which is why this if
   8.180 -      // statement is needed.
   8.181 -      //
   8.182 -      intptr_t* fp = interpreter_frame->fp();
   8.183 -      int local_words = method->max_locals() * Interpreter::stackElementWords;
   8.184 -
   8.185 -      if (caller->is_compiled_frame()) {
   8.186 -        locals = fp + frame::register_save_words + local_words - 1;
   8.187 -      } else {
   8.188 -        locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
   8.189 -      }
   8.190 -
   8.191 +      locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
   8.192      }
   8.193 -    // END MUCHO HACK
   8.194 -
   8.195 -    intptr_t* monitor_base = (intptr_t*) cur_state;
   8.196 -    intptr_t* stack_base =  monitor_base - monitor_size;
   8.197 -    /* +1 because stack is always prepushed */
   8.198 -    intptr_t* stack = stack_base - (tempcount + 1);
   8.199 -
   8.200 -
   8.201 -    BytecodeInterpreter::layout_interpreterState(cur_state,
   8.202 -                                          caller,
   8.203 -                                          interpreter_frame,
   8.204 -                                          method,
   8.205 -                                          locals,
   8.206 -                                          stack,
   8.207 -                                          stack_base,
   8.208 -                                          monitor_base,
   8.209 -                                          frame_bottom,
   8.210 -                                          is_top_frame);
   8.211 -
   8.212 -    BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
   8.213  
   8.214    }
   8.215 -  return frame_words;
   8.216 +  // END MUCHO HACK
   8.217 +
   8.218 +  intptr_t* monitor_base = (intptr_t*) cur_state;
   8.219 +  intptr_t* stack_base =  monitor_base - monitor_size;
   8.220 +  /* +1 because stack is always prepushed */
   8.221 +  intptr_t* stack = stack_base - (tempcount + 1);
   8.222 +
   8.223 +
   8.224 +  BytecodeInterpreter::layout_interpreterState(cur_state,
   8.225 +                                               caller,
   8.226 +                                               interpreter_frame,
   8.227 +                                               method,
   8.228 +                                               locals,
   8.229 +                                               stack,
   8.230 +                                               stack_base,
   8.231 +                                               monitor_base,
   8.232 +                                               frame_bottom,
   8.233 +                                               is_top_frame);
   8.234 +
   8.235 +  BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
   8.236  }
   8.237  
   8.238  #endif // CC_INTERP
     9.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Jun 17 15:49:31 2014 -0700
     9.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Jun 17 22:03:39 2014 -0700
     9.3 @@ -3531,7 +3531,7 @@
     9.4    // was post-decremented.)  Skip this address by starting at i=1, and
     9.5    // touch a few more pages below.  N.B.  It is important to touch all
     9.6    // the way down to and including i=StackShadowPages.
     9.7 -  for (int i = 1; i <= StackShadowPages; i++) {
     9.8 +  for (int i = 1; i < StackShadowPages; i++) {
     9.9      set((-i*offset)+STACK_BIAS, Rscratch);
    9.10      st(G0, Rtsp, Rscratch);
    9.11    }
    10.1 --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Tue Jun 17 15:49:31 2014 -0700
    10.2 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Tue Jun 17 22:03:39 2014 -0700
    10.3 @@ -3355,13 +3355,16 @@
    10.4    Register        O4array_size       = O4;
    10.5    Label           loop;
    10.6  
    10.7 -  // Before we make new frames, check to see if stack is available.
    10.8 -  // Do this after the caller's return address is on top of stack
    10.9 +#ifdef ASSERT
   10.10 +  // Compilers generate code that bang the stack by as much as the
   10.11 +  // interpreter would need. So this stack banging should never
   10.12 +  // trigger a fault. Verify that it does not on non product builds.
   10.13    if (UseStackBanging) {
   10.14      // Get total frame size for interpreted frames
   10.15      __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
   10.16      __ bang_stack_size(O4, O3, G3_scratch);
   10.17    }
   10.18 +#endif
   10.19  
   10.20    __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
   10.21    __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
   10.22 @@ -3409,9 +3412,11 @@
   10.23    ResourceMark rm;
   10.24    // setup code generation tools
   10.25    int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
   10.26 +#ifdef ASSERT
   10.27    if (UseStackBanging) {
   10.28      pad += StackShadowPages*16 + 32;
   10.29    }
   10.30 +#endif
   10.31  #ifdef _LP64
   10.32    CodeBuffer buffer("deopt_blob", 2100+pad, 512);
   10.33  #else
   10.34 @@ -3632,9 +3637,11 @@
   10.35    ResourceMark rm;
   10.36    // setup code generation tools
   10.37    int pad = VerifyThread ? 512 : 0;
   10.38 +#ifdef ASSERT
   10.39    if (UseStackBanging) {
   10.40      pad += StackShadowPages*16 + 32;
   10.41    }
   10.42 +#endif
   10.43  #ifdef _LP64
   10.44    CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
   10.45  #else
    11.1 --- a/src/cpu/sparc/vm/sparc.ad	Tue Jun 17 15:49:31 2014 -0700
    11.2 +++ b/src/cpu/sparc/vm/sparc.ad	Tue Jun 17 22:03:39 2014 -0700
    11.3 @@ -1193,15 +1193,16 @@
    11.4      st->print_cr("Verify_Thread"); st->print("\t");
    11.5    }
    11.6  
    11.7 -  size_t framesize = C->frame_slots() << LogBytesPerInt;
    11.8 +  size_t framesize = C->frame_size_in_bytes();
    11.9 +  int bangsize = C->bang_size_in_bytes();
   11.10  
   11.11    // Calls to C2R adapters often do not accept exceptional returns.
   11.12    // We require that their callers must bang for them.  But be careful, because
   11.13    // some VM calls (such as call site linkage) can use several kilobytes of
   11.14    // stack.  But the stack safety zone should account for that.
   11.15    // See bugs 4446381, 4468289, 4497237.
   11.16 -  if (C->need_stack_bang(framesize)) {
   11.17 -    st->print_cr("! stack bang"); st->print("\t");
   11.18 +  if (C->need_stack_bang(bangsize)) {
   11.19 +    st->print_cr("! stack bang (%d bytes)", bangsize); st->print("\t");
   11.20    }
   11.21  
   11.22    if (Assembler::is_simm13(-framesize)) {
   11.23 @@ -1225,17 +1226,18 @@
   11.24  
   11.25    __ verify_thread();
   11.26  
   11.27 -  size_t framesize = C->frame_slots() << LogBytesPerInt;
   11.28 +  size_t framesize = C->frame_size_in_bytes();
   11.29    assert(framesize >= 16*wordSize, "must have room for reg. save area");
   11.30    assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
   11.31 +  int bangsize = C->bang_size_in_bytes();
   11.32  
   11.33    // Calls to C2R adapters often do not accept exceptional returns.
   11.34    // We require that their callers must bang for them.  But be careful, because
   11.35    // some VM calls (such as call site linkage) can use several kilobytes of
   11.36    // stack.  But the stack safety zone should account for that.
   11.37    // See bugs 4446381, 4468289, 4497237.
   11.38 -  if (C->need_stack_bang(framesize)) {
   11.39 -    __ generate_stack_overflow_check(framesize);
   11.40 +  if (C->need_stack_bang(bangsize)) {
   11.41 +    __ generate_stack_overflow_check(bangsize);
   11.42    }
   11.43  
   11.44    if (Assembler::is_simm13(-framesize)) {
   11.45 @@ -2547,7 +2549,7 @@
   11.46    enc_class call_epilog %{
   11.47      if( VerifyStackAtCalls ) {
   11.48        MacroAssembler _masm(&cbuf);
   11.49 -      int framesize = ra_->C->frame_slots() << LogBytesPerInt;
   11.50 +      int framesize = ra_->C->frame_size_in_bytes();
   11.51        Register temp_reg = G3;
   11.52        __ add(SP, framesize, temp_reg);
   11.53        __ cmp(temp_reg, FP);
    12.1 --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Jun 17 15:49:31 2014 -0700
    12.2 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Jun 17 22:03:39 2014 -0700
    12.3 @@ -1564,37 +1564,23 @@
    12.4    int monitor_size    = method->is_synchronized() ?
    12.5                                  1*frame::interpreter_frame_monitor_size() : 0;
    12.6    return size_activation_helper(method->max_locals(), method->max_stack(),
    12.7 -                                 monitor_size) + call_stub_size;
    12.8 +                                monitor_size) + call_stub_size;
    12.9  }
   12.10  
   12.11 -int AbstractInterpreter::layout_activation(Method* method,
   12.12 -                                           int tempcount,
   12.13 -                                           int popframe_extra_args,
   12.14 -                                           int moncount,
   12.15 -                                           int caller_actual_parameters,
   12.16 -                                           int callee_param_count,
   12.17 -                                           int callee_local_count,
   12.18 -                                           frame* caller,
   12.19 -                                           frame* interpreter_frame,
   12.20 -                                           bool is_top_frame,
   12.21 -                                           bool is_bottom_frame) {
   12.22 +int AbstractInterpreter::size_activation(int max_stack,
   12.23 +                                         int temps,
   12.24 +                                         int extra_args,
   12.25 +                                         int monitors,
   12.26 +                                         int callee_params,
   12.27 +                                         int callee_locals,
   12.28 +                                         bool is_top_frame) {
   12.29    // Note: This calculation must exactly parallel the frame setup
   12.30    // in InterpreterGenerator::generate_fixed_frame.
   12.31 -  // If f!=NULL, set up the following variables:
   12.32 -  //   - Lmethod
   12.33 -  //   - Llocals
   12.34 -  //   - Lmonitors (to the indicated number of monitors)
   12.35 -  //   - Lesp (to the indicated number of temps)
   12.36 -  // The frame f (if not NULL) on entry is a description of the caller of the frame
   12.37 -  // we are about to layout. We are guaranteed that we will be able to fill in a
   12.38 -  // new interpreter frame as its callee (i.e. the stack space is allocated and
   12.39 -  // the amount was determined by an earlier call to this method with f == NULL).
   12.40 -  // On return f (if not NULL) while describe the interpreter frame we just layed out.
   12.41  
   12.42 -  int monitor_size           = moncount * frame::interpreter_frame_monitor_size();
   12.43 -  int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
   12.44 +  int monitor_size           = monitors * frame::interpreter_frame_monitor_size();
   12.45  
   12.46    assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
   12.47 +
   12.48    //
   12.49    // Note: if you look closely this appears to be doing something much different
   12.50    // than generate_fixed_frame. What is happening is this. On sparc we have to do
   12.51 @@ -1619,146 +1605,171 @@
   12.52    // there is no sense in messing working code.
   12.53    //
   12.54  
   12.55 -  int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong);
   12.56 +  int rounded_cls = round_to((callee_locals - callee_params), WordsPerLong);
   12.57    assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
   12.58  
   12.59 -  int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(),
   12.60 -                                              monitor_size);
   12.61 +  int raw_frame_size = size_activation_helper(rounded_cls, max_stack, monitor_size);
   12.62  
   12.63 -  if (interpreter_frame != NULL) {
   12.64 -    // The skeleton frame must already look like an interpreter frame
   12.65 -    // even if not fully filled out.
   12.66 -    assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
   12.67 +  return raw_frame_size;
   12.68 +}
   12.69  
   12.70 -    intptr_t* fp = interpreter_frame->fp();
   12.71 +void AbstractInterpreter::layout_activation(Method* method,
   12.72 +                                            int tempcount,
   12.73 +                                            int popframe_extra_args,
   12.74 +                                            int moncount,
   12.75 +                                            int caller_actual_parameters,
   12.76 +                                            int callee_param_count,
   12.77 +                                            int callee_local_count,
   12.78 +                                            frame* caller,
   12.79 +                                            frame* interpreter_frame,
   12.80 +                                            bool is_top_frame,
   12.81 +                                            bool is_bottom_frame) {
   12.82 +  // Set up the following variables:
   12.83 +  //   - Lmethod
   12.84 +  //   - Llocals
   12.85 +  //   - Lmonitors (to the indicated number of monitors)
   12.86 +  //   - Lesp (to the indicated number of temps)
   12.87 +  // The frame caller on entry is a description of the caller of the
   12.88 +  // frame we are about to layout. We are guaranteed that we will be
   12.89 +  // able to fill in a new interpreter frame as its callee (i.e. the
   12.90 +  // stack space is allocated and the amount was determined by an
   12.91 +  // earlier call to the size_activation() method).  On return caller
   12.92 +  // while describe the interpreter frame we just layed out.
   12.93  
   12.94 -    JavaThread* thread = JavaThread::current();
   12.95 -    RegisterMap map(thread, false);
   12.96 -    // More verification that skeleton frame is properly walkable
   12.97 -    assert(fp == caller->sp(), "fp must match");
   12.98 +  // The skeleton frame must already look like an interpreter frame
   12.99 +  // even if not fully filled out.
  12.100 +  assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
  12.101  
  12.102 -    intptr_t* montop     = fp - rounded_vm_local_words;
  12.103 +  int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
  12.104 +  int monitor_size           = moncount * frame::interpreter_frame_monitor_size();
  12.105 +  assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
  12.106  
  12.107 -    // preallocate monitors (cf. __ add_monitor_to_stack)
  12.108 -    intptr_t* monitors = montop - monitor_size;
  12.109 +  intptr_t* fp = interpreter_frame->fp();
  12.110  
  12.111 -    // preallocate stack space
  12.112 -    intptr_t*  esp = monitors - 1 -
  12.113 -                     (tempcount * Interpreter::stackElementWords) -
  12.114 -                     popframe_extra_args;
  12.115 +  JavaThread* thread = JavaThread::current();
  12.116 +  RegisterMap map(thread, false);
  12.117 +  // More verification that skeleton frame is properly walkable
  12.118 +  assert(fp == caller->sp(), "fp must match");
  12.119  
  12.120 -    int local_words = method->max_locals() * Interpreter::stackElementWords;
  12.121 -    NEEDS_CLEANUP;
  12.122 -    intptr_t* locals;
  12.123 -    if (caller->is_interpreted_frame()) {
  12.124 -      // Can force the locals area to end up properly overlapping the top of the expression stack.
  12.125 -      intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
  12.126 -      // Note that this computation means we replace size_of_parameters() values from the caller
  12.127 -      // interpreter frame's expression stack with our argument locals
  12.128 -      int parm_words  = caller_actual_parameters * Interpreter::stackElementWords;
  12.129 -      locals = Lesp_ptr + parm_words;
  12.130 -      int delta = local_words - parm_words;
  12.131 -      int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
  12.132 -      *interpreter_frame->register_addr(I5_savedSP)    = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
  12.133 -      if (!is_bottom_frame) {
  12.134 -        // Llast_SP is set below for the current frame to SP (with the
  12.135 -        // extra space for the callee's locals). Here we adjust
  12.136 -        // Llast_SP for the caller's frame, removing the extra space
  12.137 -        // for the current method's locals.
  12.138 -        *caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP);
  12.139 -      } else {
  12.140 -        assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP");
  12.141 -      }
  12.142 +  intptr_t* montop     = fp - rounded_vm_local_words;
  12.143 +
  12.144 +  // preallocate monitors (cf. __ add_monitor_to_stack)
  12.145 +  intptr_t* monitors = montop - monitor_size;
  12.146 +
  12.147 +  // preallocate stack space
  12.148 +  intptr_t*  esp = monitors - 1 -
  12.149 +    (tempcount * Interpreter::stackElementWords) -
  12.150 +    popframe_extra_args;
  12.151 +
  12.152 +  int local_words = method->max_locals() * Interpreter::stackElementWords;
  12.153 +  NEEDS_CLEANUP;
  12.154 +  intptr_t* locals;
  12.155 +  if (caller->is_interpreted_frame()) {
  12.156 +    // Can force the locals area to end up properly overlapping the top of the expression stack.
  12.157 +    intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
  12.158 +    // Note that this computation means we replace size_of_parameters() values from the caller
  12.159 +    // interpreter frame's expression stack with our argument locals
  12.160 +    int parm_words  = caller_actual_parameters * Interpreter::stackElementWords;
  12.161 +    locals = Lesp_ptr + parm_words;
  12.162 +    int delta = local_words - parm_words;
  12.163 +    int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
  12.164 +    *interpreter_frame->register_addr(I5_savedSP)    = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
  12.165 +    if (!is_bottom_frame) {
  12.166 +      // Llast_SP is set below for the current frame to SP (with the
  12.167 +      // extra space for the callee's locals). Here we adjust
  12.168 +      // Llast_SP for the caller's frame, removing the extra space
  12.169 +      // for the current method's locals.
  12.170 +      *caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP);
  12.171      } else {
  12.172 -      assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
  12.173 -      // Don't have Lesp available; lay out locals block in the caller
  12.174 -      // adjacent to the register window save area.
  12.175 -      //
  12.176 -      // Compiled frames do not allocate a varargs area which is why this if
  12.177 -      // statement is needed.
  12.178 -      //
  12.179 -      if (caller->is_compiled_frame()) {
  12.180 -        locals = fp + frame::register_save_words + local_words - 1;
  12.181 -      } else {
  12.182 -        locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
  12.183 -      }
  12.184 -      if (!caller->is_entry_frame()) {
  12.185 -        // Caller wants his own SP back
  12.186 -        int caller_frame_size = caller->cb()->frame_size();
  12.187 -        *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
  12.188 +      assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP");
  12.189 +    }
  12.190 +  } else {
  12.191 +    assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
  12.192 +    // Don't have Lesp available; lay out locals block in the caller
  12.193 +    // adjacent to the register window save area.
  12.194 +    //
  12.195 +    // Compiled frames do not allocate a varargs area which is why this if
  12.196 +    // statement is needed.
  12.197 +    //
  12.198 +    if (caller->is_compiled_frame()) {
  12.199 +      locals = fp + frame::register_save_words + local_words - 1;
  12.200 +    } else {
  12.201 +      locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
  12.202 +    }
  12.203 +    if (!caller->is_entry_frame()) {
  12.204 +      // Caller wants his own SP back
  12.205 +      int caller_frame_size = caller->cb()->frame_size();
  12.206 +      *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
  12.207 +    }
  12.208 +  }
  12.209 +  if (TraceDeoptimization) {
  12.210 +    if (caller->is_entry_frame()) {
  12.211 +      // make sure I5_savedSP and the entry frames notion of saved SP
  12.212 +      // agree.  This assertion duplicate a check in entry frame code
  12.213 +      // but catches the failure earlier.
  12.214 +      assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
  12.215 +             "would change callers SP");
  12.216 +    }
  12.217 +    if (caller->is_entry_frame()) {
  12.218 +      tty->print("entry ");
  12.219 +    }
  12.220 +    if (caller->is_compiled_frame()) {
  12.221 +      tty->print("compiled ");
  12.222 +      if (caller->is_deoptimized_frame()) {
  12.223 +        tty->print("(deopt) ");
  12.224        }
  12.225      }
  12.226 -    if (TraceDeoptimization) {
  12.227 -      if (caller->is_entry_frame()) {
  12.228 -        // make sure I5_savedSP and the entry frames notion of saved SP
  12.229 -        // agree.  This assertion duplicate a check in entry frame code
  12.230 -        // but catches the failure earlier.
  12.231 -        assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
  12.232 -               "would change callers SP");
  12.233 -      }
  12.234 -      if (caller->is_entry_frame()) {
  12.235 -        tty->print("entry ");
  12.236 -      }
  12.237 -      if (caller->is_compiled_frame()) {
  12.238 -        tty->print("compiled ");
  12.239 -        if (caller->is_deoptimized_frame()) {
  12.240 -          tty->print("(deopt) ");
  12.241 -        }
  12.242 -      }
  12.243 -      if (caller->is_interpreted_frame()) {
  12.244 -        tty->print("interpreted ");
  12.245 -      }
  12.246 -      tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
  12.247 -      tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
  12.248 -      tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
  12.249 -      tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
  12.250 -      tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
  12.251 -      tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
  12.252 -      tty->print_cr("Llocals = 0x%x", locals);
  12.253 -      tty->print_cr("Lesp = 0x%x", esp);
  12.254 -      tty->print_cr("Lmonitors = 0x%x", monitors);
  12.255 +    if (caller->is_interpreted_frame()) {
  12.256 +      tty->print("interpreted ");
  12.257      }
  12.258 +    tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
  12.259 +    tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
  12.260 +    tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
  12.261 +    tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
  12.262 +    tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
  12.263 +    tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
  12.264 +    tty->print_cr("Llocals = 0x%x", locals);
  12.265 +    tty->print_cr("Lesp = 0x%x", esp);
  12.266 +    tty->print_cr("Lmonitors = 0x%x", monitors);
  12.267 +  }
  12.268  
  12.269 -    if (method->max_locals() > 0) {
  12.270 -      assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
  12.271 -      assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
  12.272 -      assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
  12.273 -      assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
  12.274 -    }
  12.275 +  if (method->max_locals() > 0) {
  12.276 +    assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
  12.277 +    assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
  12.278 +    assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
  12.279 +    assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
  12.280 +  }
  12.281  #ifdef _LP64
  12.282 -    assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
  12.283 +  assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
  12.284  #endif
  12.285  
  12.286 -    *interpreter_frame->register_addr(Lmethod)     = (intptr_t) method;
  12.287 -    *interpreter_frame->register_addr(Llocals)     = (intptr_t) locals;
  12.288 -    *interpreter_frame->register_addr(Lmonitors)   = (intptr_t) monitors;
  12.289 -    *interpreter_frame->register_addr(Lesp)        = (intptr_t) esp;
  12.290 -    // Llast_SP will be same as SP as there is no adapter space
  12.291 -    *interpreter_frame->register_addr(Llast_SP)    = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
  12.292 -    *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
  12.293 +  *interpreter_frame->register_addr(Lmethod)     = (intptr_t) method;
  12.294 +  *interpreter_frame->register_addr(Llocals)     = (intptr_t) locals;
  12.295 +  *interpreter_frame->register_addr(Lmonitors)   = (intptr_t) monitors;
  12.296 +  *interpreter_frame->register_addr(Lesp)        = (intptr_t) esp;
  12.297 +  // Llast_SP will be same as SP as there is no adapter space
  12.298 +  *interpreter_frame->register_addr(Llast_SP)    = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
  12.299 +  *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
  12.300  #ifdef FAST_DISPATCH
  12.301 -    *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
  12.302 +  *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
  12.303  #endif
  12.304  
  12.305  
  12.306  #ifdef ASSERT
  12.307 -    BasicObjectLock* mp = (BasicObjectLock*)monitors;
  12.308 +  BasicObjectLock* mp = (BasicObjectLock*)monitors;
  12.309  
  12.310 -    assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
  12.311 -    assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
  12.312 -    assert(interpreter_frame->interpreter_frame_monitor_end()   == mp, "monitor_end matches");
  12.313 -    assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
  12.314 -    assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
  12.315 +  assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
  12.316 +  assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
  12.317 +  assert(interpreter_frame->interpreter_frame_monitor_end()   == mp, "monitor_end matches");
  12.318 +  assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
  12.319 +  assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
  12.320  
  12.321 -    // check bounds
  12.322 -    intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
  12.323 -    intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
  12.324 -    assert(lo < monitors && montop <= hi, "monitors in bounds");
  12.325 -    assert(lo <= esp && esp < monitors, "esp in bounds");
  12.326 +  // check bounds
  12.327 +  intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
  12.328 +  intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
  12.329 +  assert(lo < monitors && montop <= hi, "monitors in bounds");
  12.330 +  assert(lo <= esp && esp < monitors, "esp in bounds");
  12.331  #endif // ASSERT
  12.332 -  }
  12.333 -
  12.334 -  return raw_frame_size;
  12.335  }
  12.336  
  12.337  //----------------------------------------------------------------------------------------------------
    13.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Jun 17 15:49:31 2014 -0700
    13.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Jun 17 22:03:39 2014 -0700
    13.3 @@ -288,7 +288,7 @@
    13.4  
    13.5    // build frame
    13.6    ciMethod* m = compilation()->method();
    13.7 -  __ build_frame(initial_frame_size_in_bytes());
    13.8 +  __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
    13.9  
   13.10    // OSR buffer is
   13.11    //
   13.12 @@ -376,7 +376,7 @@
   13.13  }
   13.14  
   13.15  // This specifies the rsp decrement needed to build the frame
   13.16 -int LIR_Assembler::initial_frame_size_in_bytes() {
   13.17 +int LIR_Assembler::initial_frame_size_in_bytes() const {
   13.18    // if rounding, must let FrameMap know!
   13.19  
   13.20    // The frame_map records size in slots (32bit word)
    14.1 --- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Tue Jun 17 15:49:31 2014 -0700
    14.2 +++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Tue Jun 17 22:03:39 2014 -0700
    14.3 @@ -349,13 +349,14 @@
    14.4  }
    14.5  
    14.6  
    14.7 -void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
    14.8 +void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
    14.9 +  assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
   14.10    // Make sure there is enough stack space for this method's activation.
   14.11    // Note that we do this before doing an enter(). This matches the
   14.12    // ordering of C2's stack overflow check / rsp decrement and allows
   14.13    // the SharedRuntime stack overflow handling to be consistent
   14.14    // between the two compilers.
   14.15 -  generate_stack_overflow_check(frame_size_in_bytes);
   14.16 +  generate_stack_overflow_check(bang_size_in_bytes);
   14.17  
   14.18    push(rbp);
   14.19  #ifdef TIERED
    15.1 --- a/src/cpu/x86/vm/cppInterpreter_x86.cpp	Tue Jun 17 15:49:31 2014 -0700
    15.2 +++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp	Tue Jun 17 22:03:39 2014 -0700
    15.3 @@ -2336,29 +2336,42 @@
    15.4           "Stack top out of range");
    15.5  }
    15.6  
    15.7 -int AbstractInterpreter::layout_activation(Method* method,
    15.8 -                                           int tempcount,  //
    15.9 -                                           int popframe_extra_args,
   15.10 -                                           int moncount,
   15.11 -                                           int caller_actual_parameters,
   15.12 -                                           int callee_param_count,
   15.13 -                                           int callee_locals,
   15.14 -                                           frame* caller,
   15.15 -                                           frame* interpreter_frame,
   15.16 -                                           bool is_top_frame,
   15.17 -                                           bool is_bottom_frame) {
   15.18 -
   15.19 -  assert(popframe_extra_args == 0, "FIX ME");
   15.20 -  // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
   15.21 -  // does as far as allocating an interpreter frame.
   15.22 -  // If interpreter_frame!=NULL, set up the method, locals, and monitors.
   15.23 -  // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
   15.24 -  // as determined by a previous call to this method.
   15.25 -  // It is also guaranteed to be walkable even though it is in a skeletal state
   15.26 +
   15.27 +static int frame_size_helper(int max_stack,
   15.28 +                             int tempcount,
   15.29 +                             int moncount,
   15.30 +                             int callee_param_count,
   15.31 +                             int callee_locals,
   15.32 +                             bool is_top_frame,
   15.33 +                             int& monitor_size,
   15.34 +                             int& full_frame_size) {
   15.35 +  int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
   15.36 +  monitor_size = sizeof(BasicObjectLock) * moncount;
   15.37 +
   15.38 +  // First calculate the frame size without any java expression stack
   15.39 +  int short_frame_size = size_activation_helper(extra_locals_size,
   15.40 +                                                monitor_size);
   15.41 +
   15.42 +  // Now with full size expression stack
   15.43 +  full_frame_size = short_frame_size + max_stack * BytesPerWord;
   15.44 +
   15.45 +  // and now with only live portion of the expression stack
   15.46 +  short_frame_size = short_frame_size + tempcount * BytesPerWord;
   15.47 +
   15.48 +  // the size the activation is right now. Only top frame is full size
   15.49 +  int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
   15.50 +  return frame_size;
   15.51 +}
   15.52 +
   15.53 +int AbstractInterpreter::size_activation(int max_stack,
   15.54 +                                         int tempcount,
   15.55 +                                         int extra_args,
   15.56 +                                         int moncount,
   15.57 +                                         int callee_param_count,
   15.58 +                                         int callee_locals,
   15.59 +                                         bool is_top_frame) {
   15.60 +  assert(extra_args == 0, "FIX ME");
   15.61    // NOTE: return size is in words not bytes
   15.62 -  // NOTE: tempcount is the current size of the java expression stack. For top most
   15.63 -  //       frames we will allocate a full sized expression stack and not the curback
   15.64 -  //       version that non-top frames have.
   15.65  
   15.66    // Calculate the amount our frame will be adjust by the callee. For top frame
   15.67    // this is zero.
   15.68 @@ -2368,87 +2381,102 @@
   15.69    // to it. So it ignores last_frame_adjust value. Seems suspicious as far
   15.70    // as getting sender_sp correct.
   15.71  
   15.72 -  int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
   15.73 -  int monitor_size = sizeof(BasicObjectLock) * moncount;
   15.74 -
   15.75 -  // First calculate the frame size without any java expression stack
   15.76 -  int short_frame_size = size_activation_helper(extra_locals_size,
   15.77 -                                                monitor_size);
   15.78 -
   15.79 -  // Now with full size expression stack
   15.80 -  int full_frame_size = short_frame_size + method->max_stack() * BytesPerWord;
   15.81 -
   15.82 -  // and now with only live portion of the expression stack
   15.83 -  short_frame_size = short_frame_size + tempcount * BytesPerWord;
   15.84 -
   15.85 -  // the size the activation is right now. Only top frame is full size
   15.86 -  int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
   15.87 -
   15.88 -  if (interpreter_frame != NULL) {
   15.89 +  int unused_monitor_size = 0;
   15.90 +  int unused_full_frame_size = 0;
   15.91 +  return frame_size_helper(max_stack, tempcount, moncount, callee_param_count, callee_locals,
   15.92 +                           is_top_frame, unused_monitor_size, unused_full_frame_size)/BytesPerWord;
   15.93 +}
   15.94 +
   15.95 +void AbstractInterpreter::layout_activation(Method* method,
   15.96 +                                            int tempcount,  //
   15.97 +                                            int popframe_extra_args,
   15.98 +                                            int moncount,
   15.99 +                                            int caller_actual_parameters,
  15.100 +                                            int callee_param_count,
  15.101 +                                            int callee_locals,
  15.102 +                                            frame* caller,
  15.103 +                                            frame* interpreter_frame,
  15.104 +                                            bool is_top_frame,
  15.105 +                                            bool is_bottom_frame) {
  15.106 +
  15.107 +  assert(popframe_extra_args == 0, "FIX ME");
  15.108 +  // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
  15.109 +  // does as far as allocating an interpreter frame.
  15.110 +  // Set up the method, locals, and monitors.
  15.111 +  // The frame interpreter_frame is guaranteed to be the right size,
  15.112 +  // as determined by a previous call to the size_activation() method.
  15.113 +  // It is also guaranteed to be walkable even though it is in a skeletal state
  15.114 +  // NOTE: tempcount is the current size of the java expression stack. For top most
  15.115 +  //       frames we will allocate a full sized expression stack and not the curback
  15.116 +  //       version that non-top frames have.
  15.117 +
  15.118 +  int monitor_size = 0;
  15.119 +  int full_frame_size = 0;
  15.120 +  int frame_size = frame_size_helper(method->max_stack(), tempcount, moncount, callee_param_count, callee_locals,
  15.121 +                                     is_top_frame, monitor_size, full_frame_size);
  15.122 +
  15.123  #ifdef ASSERT
  15.124 -    assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
  15.125 +  assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
  15.126  #endif
  15.127  
  15.128 -    // MUCHO HACK
  15.129 -
  15.130 -    intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size));
  15.131 -
  15.132 -    /* Now fillin the interpreterState object */
  15.133 -
  15.134 -    // The state object is the first thing on the frame and easily located
  15.135 -
  15.136 -    interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
  15.137 -
  15.138 -
  15.139 -    // Find the locals pointer. This is rather simple on x86 because there is no
  15.140 -    // confusing rounding at the callee to account for. We can trivially locate
  15.141 -    // our locals based on the current fp().
  15.142 -    // Note: the + 2 is for handling the "static long no_params() method" issue.
  15.143 -    // (too bad I don't really remember that issue well...)
  15.144 -
  15.145 -    intptr_t* locals;
  15.146 -    // If the caller is interpreted we need to make sure that locals points to the first
  15.147 -    // argument that the caller passed and not in an area where the stack might have been extended.
  15.148 -    // because the stack to stack to converter needs a proper locals value in order to remove the
  15.149 -    // arguments from the caller and place the result in the proper location. Hmm maybe it'd be
  15.150 -    // simpler if we simply stored the result in the BytecodeInterpreter object and let the c++ code
  15.151 -    // adjust the stack?? HMMM QQQ
  15.152 -    //
  15.153 -    if (caller->is_interpreted_frame()) {
  15.154 -      // locals must agree with the caller because it will be used to set the
  15.155 -      // caller's tos when we return.
  15.156 -      interpreterState prev  = caller->get_interpreterState();
  15.157 -      // stack() is prepushed.
  15.158 -      locals = prev->stack() + method->size_of_parameters();
  15.159 -      // locals = caller->unextended_sp() + (method->size_of_parameters() - 1);
  15.160 -      if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) {
  15.161 -        // os::breakpoint();
  15.162 -      }
  15.163 -    } else {
  15.164 -      // this is where a c2i would have placed locals (except for the +2)
  15.165 -      locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2;
  15.166 +  // MUCHO HACK
  15.167 +
  15.168 +  intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size));
  15.169 +
  15.170 +  /* Now fillin the interpreterState object */
  15.171 +
  15.172 +  // The state object is the first thing on the frame and easily located
  15.173 +
  15.174 +  interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
  15.175 +
  15.176 +
  15.177 +  // Find the locals pointer. This is rather simple on x86 because there is no
  15.178 +  // confusing rounding at the callee to account for. We can trivially locate
  15.179 +  // our locals based on the current fp().
  15.180 +  // Note: the + 2 is for handling the "static long no_params() method" issue.
  15.181 +  // (too bad I don't really remember that issue well...)
  15.182 +
  15.183 +  intptr_t* locals;
  15.184 +  // If the caller is interpreted we need to make sure that locals points to the first
  15.185 +  // argument that the caller passed and not in an area where the stack might have been extended.
  15.186 +  // because the stack to stack to converter needs a proper locals value in order to remove the
  15.187 +  // arguments from the caller and place the result in the proper location. Hmm maybe it'd be
  15.188 +  // simpler if we simply stored the result in the BytecodeInterpreter object and let the c++ code
  15.189 +  // adjust the stack?? HMMM QQQ
  15.190 +  //
  15.191 +  if (caller->is_interpreted_frame()) {
  15.192 +    // locals must agree with the caller because it will be used to set the
  15.193 +    // caller's tos when we return.
  15.194 +    interpreterState prev  = caller->get_interpreterState();
  15.195 +    // stack() is prepushed.
  15.196 +    locals = prev->stack() + method->size_of_parameters();
  15.197 +    // locals = caller->unextended_sp() + (method->size_of_parameters() - 1);
  15.198 +    if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) {
  15.199 +      // os::breakpoint();
  15.200      }
  15.201 -
  15.202 -    intptr_t* monitor_base = (intptr_t*) cur_state;
  15.203 -    intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
  15.204 -    /* +1 because stack is always prepushed */
  15.205 -    intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord);
  15.206 -
  15.207 -
  15.208 -    BytecodeInterpreter::layout_interpreterState(cur_state,
  15.209 -                                          caller,
  15.210 -                                          interpreter_frame,
  15.211 -                                          method,
  15.212 -                                          locals,
  15.213 -                                          stack,
  15.214 -                                          stack_base,
  15.215 -                                          monitor_base,
  15.216 -                                          frame_bottom,
  15.217 -                                          is_top_frame);
  15.218 -
  15.219 -    // BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
  15.220 +  } else {
  15.221 +    // this is where a c2i would have placed locals (except for the +2)
  15.222 +    locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2;
  15.223    }
  15.224 -  return frame_size/BytesPerWord;
  15.225 +
  15.226 +  intptr_t* monitor_base = (intptr_t*) cur_state;
  15.227 +  intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
  15.228 +  /* +1 because stack is always prepushed */
  15.229 +  intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord);
  15.230 +
  15.231 +
  15.232 +  BytecodeInterpreter::layout_interpreterState(cur_state,
  15.233 +                                               caller,
  15.234 +                                               interpreter_frame,
  15.235 +                                               method,
  15.236 +                                               locals,
  15.237 +                                               stack,
  15.238 +                                               stack_base,
  15.239 +                                               monitor_base,
  15.240 +                                               frame_bottom,
  15.241 +                                               is_top_frame);
  15.242 +
  15.243 +  // BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
  15.244  }
  15.245  
  15.246  #endif // CC_INTERP (all)
    16.1 --- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Jun 17 15:49:31 2014 -0700
    16.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Jun 17 22:03:39 2014 -0700
    16.3 @@ -1052,7 +1052,7 @@
    16.4    // was post-decremented.)  Skip this address by starting at i=1, and
    16.5    // touch a few more pages below.  N.B.  It is important to touch all
    16.6    // the way down to and including i=StackShadowPages.
    16.7 -  for (int i = 1; i <= StackShadowPages; i++) {
    16.8 +  for (int i = 1; i < StackShadowPages; i++) {
    16.9      // this could be any sized move but this is can be a debugging crumb
   16.10      // so the bigger the better.
   16.11      movptr(Address(tmp, (-i*os::vm_page_size())), size );
   16.12 @@ -6096,7 +6096,7 @@
   16.13  
   16.14  
   16.15  // C2 compiled method's prolog code.
   16.16 -void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) {
   16.17 +void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b) {
   16.18  
   16.19    // WARNING: Initial instruction MUST be 5 bytes or longer so that
   16.20    // NativeJump::patch_verified_entry will be able to patch out the entry
   16.21 @@ -6104,18 +6104,20 @@
   16.22    // the frame allocation can be either 3 or 6 bytes. So if we don't do
   16.23    // stack bang then we must use the 6 byte frame allocation even if
   16.24    // we have no frame. :-(
   16.25 +  assert(stack_bang_size >= framesize || stack_bang_size <= 0, "stack bang size incorrect");
   16.26  
   16.27    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   16.28    // Remove word for return addr
   16.29    framesize -= wordSize;
   16.30 +  stack_bang_size -= wordSize;
   16.31  
   16.32    // Calls to C2R adapters often do not accept exceptional returns.
   16.33    // We require that their callers must bang for them.  But be careful, because
   16.34    // some VM calls (such as call site linkage) can use several kilobytes of
   16.35    // stack.  But the stack safety zone should account for that.
   16.36    // See bugs 4446381, 4468289, 4497237.
   16.37 -  if (stack_bang) {
   16.38 -    generate_stack_overflow_check(framesize);
   16.39 +  if (stack_bang_size > 0) {
   16.40 +    generate_stack_overflow_check(stack_bang_size);
   16.41  
   16.42      // We always push rbp, so that on return to interpreter rbp, will be
   16.43      // restored correctly and we can correct the stack.
    17.1 --- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Tue Jun 17 15:49:31 2014 -0700
    17.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Tue Jun 17 22:03:39 2014 -0700
    17.3 @@ -1170,7 +1170,7 @@
    17.4    void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
    17.5  
    17.6    // C2 compiled method's prolog code.
    17.7 -  void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b);
    17.8 +  void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b);
    17.9  
   17.10    // clear memory of size 'cnt' qwords, starting at 'base'.
   17.11    void clear_mem(Register base, Register cnt, Register rtmp);
    18.1 --- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Tue Jun 17 15:49:31 2014 -0700
    18.2 +++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Tue Jun 17 22:03:39 2014 -0700
    18.3 @@ -3014,11 +3014,15 @@
    18.4    // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
    18.5    __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
    18.6  
    18.7 -  // Stack bang to make sure there's enough room for these interpreter frames.
    18.8 +#ifdef ASSERT
    18.9 +  // Compilers generate code that bang the stack by as much as the
   18.10 +  // interpreter would need. So this stack banging should never
   18.11 +  // trigger a fault. Verify that it does not on non product builds.
   18.12    if (UseStackBanging) {
   18.13      __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
   18.14      __ bang_stack_size(rbx, rcx);
   18.15    }
   18.16 +#endif
   18.17  
   18.18    // Load array of frame pcs into ECX
   18.19    __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
   18.20 @@ -3240,12 +3244,15 @@
   18.21    // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
   18.22    __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
   18.23  
   18.24 -  // Stack bang to make sure there's enough room for these interpreter frames.
   18.25 +#ifdef ASSERT
   18.26 +  // Compilers generate code that bang the stack by as much as the
   18.27 +  // interpreter would need. So this stack banging should never
   18.28 +  // trigger a fault. Verify that it does not on non product builds.
   18.29    if (UseStackBanging) {
   18.30      __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
   18.31      __ bang_stack_size(rbx, rcx);
   18.32    }
   18.33 -
   18.34 +#endif
   18.35  
   18.36    // Load array of frame pcs into ECX
   18.37    __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
    19.1 --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Jun 17 15:49:31 2014 -0700
    19.2 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Jun 17 22:03:39 2014 -0700
    19.3 @@ -3484,11 +3484,15 @@
    19.4    // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
    19.5    __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
    19.6  
    19.7 -  // Stack bang to make sure there's enough room for these interpreter frames.
    19.8 +#ifdef ASSERT
    19.9 +  // Compilers generate code that bang the stack by as much as the
   19.10 +  // interpreter would need. So this stack banging should never
   19.11 +  // trigger a fault. Verify that it does not on non product builds.
   19.12    if (UseStackBanging) {
   19.13      __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
   19.14      __ bang_stack_size(rbx, rcx);
   19.15    }
   19.16 +#endif
   19.17  
   19.18    // Load address of array of frame pcs into rcx
   19.19    __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
   19.20 @@ -3682,11 +3686,15 @@
   19.21    // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
   19.22    __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
   19.23  
   19.24 -  // Stack bang to make sure there's enough room for these interpreter frames.
   19.25 +#ifdef ASSERT
   19.26 +  // Compilers generate code that bang the stack by as much as the
   19.27 +  // interpreter would need. So this stack banging should never
   19.28 +  // trigger a fault. Verify that it does not on non product builds.
   19.29    if (UseStackBanging) {
   19.30      __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
   19.31      __ bang_stack_size(rbx, rcx);
   19.32    }
   19.33 +#endif
   19.34  
   19.35    // Load address of array of frame pcs into rcx (address*)
   19.36    __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
    20.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86.cpp	Tue Jun 17 22:03:39 2014 -0700
    20.3 @@ -0,0 +1,124 @@
    20.4 +/*
    20.5 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    20.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    20.7 + *
    20.8 + * This code is free software; you can redistribute it and/or modify it
    20.9 + * under the terms of the GNU General Public License version 2 only, as
   20.10 + * published by the Free Software Foundation.
   20.11 + *
   20.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   20.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   20.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   20.15 + * version 2 for more details (a copy is included in the LICENSE file that
   20.16 + * accompanied this code).
   20.17 + *
   20.18 + * You should have received a copy of the GNU General Public License version
   20.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   20.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   20.21 + *
   20.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20.23 + * or visit www.oracle.com if you need additional information or have any
   20.24 + * questions.
   20.25 + *
   20.26 + */
   20.27 +
   20.28 +#include "precompiled.hpp"
   20.29 +#include "ci/ciMethod.hpp"
   20.30 +#include "interpreter/interpreter.hpp"
   20.31 +#include "runtime/frame.inline.hpp"
   20.32 +
   20.33 +#ifndef CC_INTERP
   20.34 +
   20.35 +// asm based interpreter deoptimization helpers
   20.36 +int AbstractInterpreter::size_activation(int max_stack,
   20.37 +                                         int temps,
   20.38 +                                         int extra_args,
   20.39 +                                         int monitors,
   20.40 +                                         int callee_params,
   20.41 +                                         int callee_locals,
   20.42 +                                         bool is_top_frame) {
   20.43 +  // Note: This calculation must exactly parallel the frame setup
   20.44 +  // in AbstractInterpreterGenerator::generate_method_entry.
   20.45 +
   20.46 +  // fixed size of an interpreter frame:
   20.47 +  int overhead = frame::sender_sp_offset -
   20.48 +                 frame::interpreter_frame_initial_sp_offset;
   20.49 +  // Our locals were accounted for by the caller (or last_frame_adjust
   20.50 +  // on the transistion) Since the callee parameters already account
   20.51 +  // for the callee's params we only need to account for the extra
   20.52 +  // locals.
   20.53 +  int size = overhead +
   20.54 +         (callee_locals - callee_params)*Interpreter::stackElementWords +
   20.55 +         monitors * frame::interpreter_frame_monitor_size() +
   20.56 +         temps* Interpreter::stackElementWords + extra_args;
   20.57 +
   20.58 +  return size;
   20.59 +}
   20.60 +
   20.61 +void AbstractInterpreter::layout_activation(Method* method,
   20.62 +                                            int tempcount,
   20.63 +                                            int popframe_extra_args,
   20.64 +                                            int moncount,
   20.65 +                                            int caller_actual_parameters,
   20.66 +                                            int callee_param_count,
   20.67 +                                            int callee_locals,
   20.68 +                                            frame* caller,
   20.69 +                                            frame* interpreter_frame,
   20.70 +                                            bool is_top_frame,
   20.71 +                                            bool is_bottom_frame) {
   20.72 +  // The frame interpreter_frame is guaranteed to be the right size,
   20.73 +  // as determined by a previous call to the size_activation() method.
   20.74 +  // It is also guaranteed to be walkable even though it is in a
   20.75 +  // skeletal state
   20.76 +
   20.77 +  int max_locals = method->max_locals() * Interpreter::stackElementWords;
   20.78 +  int extra_locals = (method->max_locals() - method->size_of_parameters()) *
   20.79 +    Interpreter::stackElementWords;
   20.80 +
   20.81 +#ifdef ASSERT
   20.82 +  if (!EnableInvokeDynamic) {
   20.83 +    // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
   20.84 +    // Probably, since deoptimization doesn't work yet.
   20.85 +    assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
   20.86 +  }
   20.87 +  assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
   20.88 +#endif
   20.89 +
   20.90 +  interpreter_frame->interpreter_frame_set_method(method);
   20.91 +  // NOTE the difference in using sender_sp and
   20.92 +  // interpreter_frame_sender_sp interpreter_frame_sender_sp is
   20.93 +  // the original sp of the caller (the unextended_sp) and
   20.94 +  // sender_sp is fp+8/16 (32bit/64bit) XXX
   20.95 +  intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
   20.96 +
   20.97 +#ifdef ASSERT
   20.98 +  if (caller->is_interpreted_frame()) {
   20.99 +    assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
  20.100 +  }
  20.101 +#endif
  20.102 +
  20.103 +  interpreter_frame->interpreter_frame_set_locals(locals);
  20.104 +  BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
  20.105 +  BasicObjectLock* monbot = montop - moncount;
  20.106 +  interpreter_frame->interpreter_frame_set_monitor_end(monbot);
  20.107 +
  20.108 +  // Set last_sp
  20.109 +  intptr_t*  esp = (intptr_t*) monbot -
  20.110 +    tempcount*Interpreter::stackElementWords -
  20.111 +    popframe_extra_args;
  20.112 +  interpreter_frame->interpreter_frame_set_last_sp(esp);
  20.113 +
  20.114 +  // All frames but the initial (oldest) interpreter frame we fill in have
  20.115 +  // a value for sender_sp that allows walking the stack but isn't
  20.116 +  // truly correct. Correct the value here.
  20.117 +  if (extra_locals != 0 &&
  20.118 +      interpreter_frame->sender_sp() ==
  20.119 +      interpreter_frame->interpreter_frame_sender_sp()) {
  20.120 +    interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
  20.121 +                                                       extra_locals);
  20.122 +  }
  20.123 +  *interpreter_frame->interpreter_frame_cache_addr() =
  20.124 +    method->constants()->cache();
  20.125 +}
  20.126 +
  20.127 +#endif // CC_INTERP
    21.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Jun 17 15:49:31 2014 -0700
    21.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Jun 17 22:03:39 2014 -0700
    21.3 @@ -1686,91 +1686,6 @@
    21.4    return overhead_size + method_stack + stub_code;
    21.5  }
    21.6  
    21.7 -// asm based interpreter deoptimization helpers
    21.8 -
    21.9 -int AbstractInterpreter::layout_activation(Method* method,
   21.10 -                                           int tempcount,
   21.11 -                                           int popframe_extra_args,
   21.12 -                                           int moncount,
   21.13 -                                           int caller_actual_parameters,
   21.14 -                                           int callee_param_count,
   21.15 -                                           int callee_locals,
   21.16 -                                           frame* caller,
   21.17 -                                           frame* interpreter_frame,
   21.18 -                                           bool is_top_frame,
   21.19 -                                           bool is_bottom_frame) {
   21.20 -  // Note: This calculation must exactly parallel the frame setup
   21.21 -  // in AbstractInterpreterGenerator::generate_method_entry.
   21.22 -  // If interpreter_frame!=NULL, set up the method, locals, and monitors.
   21.23 -  // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
   21.24 -  // as determined by a previous call to this method.
   21.25 -  // It is also guaranteed to be walkable even though it is in a skeletal state
   21.26 -  // NOTE: return size is in words not bytes
   21.27 -
   21.28 -  // fixed size of an interpreter frame:
   21.29 -  int max_locals = method->max_locals() * Interpreter::stackElementWords;
   21.30 -  int extra_locals = (method->max_locals() - method->size_of_parameters()) *
   21.31 -                     Interpreter::stackElementWords;
   21.32 -
   21.33 -  int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
   21.34 -
   21.35 -  // Our locals were accounted for by the caller (or last_frame_adjust on the transistion)
   21.36 -  // Since the callee parameters already account for the callee's params we only need to account for
   21.37 -  // the extra locals.
   21.38 -
   21.39 -
   21.40 -  int size = overhead +
   21.41 -         ((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
   21.42 -         (moncount*frame::interpreter_frame_monitor_size()) +
   21.43 -         tempcount*Interpreter::stackElementWords + popframe_extra_args;
   21.44 -
   21.45 -  if (interpreter_frame != NULL) {
   21.46 -#ifdef ASSERT
   21.47 -    if (!EnableInvokeDynamic)
   21.48 -      // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
   21.49 -      // Probably, since deoptimization doesn't work yet.
   21.50 -      assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
   21.51 -    assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
   21.52 -#endif
   21.53 -
   21.54 -    interpreter_frame->interpreter_frame_set_method(method);
   21.55 -    // NOTE the difference in using sender_sp and interpreter_frame_sender_sp
   21.56 -    // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
   21.57 -    // and sender_sp is fp+8
   21.58 -    intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
   21.59 -
   21.60 -#ifdef ASSERT
   21.61 -    if (caller->is_interpreted_frame()) {
   21.62 -      assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
   21.63 -    }
   21.64 -#endif
   21.65 -
   21.66 -    interpreter_frame->interpreter_frame_set_locals(locals);
   21.67 -    BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
   21.68 -    BasicObjectLock* monbot = montop - moncount;
   21.69 -    interpreter_frame->interpreter_frame_set_monitor_end(monbot);
   21.70 -
   21.71 -    // Set last_sp
   21.72 -    intptr_t*  rsp = (intptr_t*) monbot  -
   21.73 -                     tempcount*Interpreter::stackElementWords -
   21.74 -                     popframe_extra_args;
   21.75 -    interpreter_frame->interpreter_frame_set_last_sp(rsp);
   21.76 -
   21.77 -    // All frames but the initial (oldest) interpreter frame we fill in have a
   21.78 -    // value for sender_sp that allows walking the stack but isn't
   21.79 -    // truly correct. Correct the value here.
   21.80 -
   21.81 -    if (extra_locals != 0 &&
   21.82 -        interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
   21.83 -      interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
   21.84 -    }
   21.85 -    *interpreter_frame->interpreter_frame_cache_addr() =
   21.86 -      method->constants()->cache();
   21.87 -  }
   21.88 -  return size;
   21.89 -}
   21.90 -
   21.91 -
   21.92  //------------------------------------------------------------------------------------------------------------------------
   21.93  // Exceptions
   21.94  
    22.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Tue Jun 17 15:49:31 2014 -0700
    22.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Tue Jun 17 22:03:39 2014 -0700
    22.3 @@ -1695,87 +1695,6 @@
    22.4    return (overhead_size + method_stack + stub_code);
    22.5  }
    22.6  
    22.7 -int AbstractInterpreter::layout_activation(Method* method,
    22.8 -                                           int tempcount,
    22.9 -                                           int popframe_extra_args,
   22.10 -                                           int moncount,
   22.11 -                                           int caller_actual_parameters,
   22.12 -                                           int callee_param_count,
   22.13 -                                           int callee_locals,
   22.14 -                                           frame* caller,
   22.15 -                                           frame* interpreter_frame,
   22.16 -                                           bool is_top_frame,
   22.17 -                                           bool is_bottom_frame) {
   22.18 -  // Note: This calculation must exactly parallel the frame setup
   22.19 -  // in AbstractInterpreterGenerator::generate_method_entry.
   22.20 -  // If interpreter_frame!=NULL, set up the method, locals, and monitors.
   22.21 -  // The frame interpreter_frame, if not NULL, is guaranteed to be the
   22.22 -  // right size, as determined by a previous call to this method.
   22.23 -  // It is also guaranteed to be walkable even though it is in a skeletal state
   22.24 -
   22.25 -  // fixed size of an interpreter frame:
   22.26 -  int max_locals = method->max_locals() * Interpreter::stackElementWords;
   22.27 -  int extra_locals = (method->max_locals() - method->size_of_parameters()) *
   22.28 -                     Interpreter::stackElementWords;
   22.29 -
   22.30 -  int overhead = frame::sender_sp_offset -
   22.31 -                 frame::interpreter_frame_initial_sp_offset;
   22.32 -  // Our locals were accounted for by the caller (or last_frame_adjust
   22.33 -  // on the transistion) Since the callee parameters already account
   22.34 -  // for the callee's params we only need to account for the extra
   22.35 -  // locals.
   22.36 -  int size = overhead +
   22.37 -         (callee_locals - callee_param_count)*Interpreter::stackElementWords +
   22.38 -         moncount * frame::interpreter_frame_monitor_size() +
   22.39 -         tempcount* Interpreter::stackElementWords + popframe_extra_args;
   22.40 -  if (interpreter_frame != NULL) {
   22.41 -#ifdef ASSERT
   22.42 -    if (!EnableInvokeDynamic)
   22.43 -      // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
   22.44 -      // Probably, since deoptimization doesn't work yet.
   22.45 -      assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
   22.46 -    assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
   22.47 -#endif
   22.48 -
   22.49 -    interpreter_frame->interpreter_frame_set_method(method);
   22.50 -    // NOTE the difference in using sender_sp and
   22.51 -    // interpreter_frame_sender_sp interpreter_frame_sender_sp is
   22.52 -    // the original sp of the caller (the unextended_sp) and
   22.53 -    // sender_sp is fp+16 XXX
   22.54 -    intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
   22.55 -
   22.56 -#ifdef ASSERT
   22.57 -    if (caller->is_interpreted_frame()) {
   22.58 -      assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
   22.59 -    }
   22.60 -#endif
   22.61 -
   22.62 -    interpreter_frame->interpreter_frame_set_locals(locals);
   22.63 -    BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
   22.64 -    BasicObjectLock* monbot = montop - moncount;
   22.65 -    interpreter_frame->interpreter_frame_set_monitor_end(monbot);
   22.66 -
   22.67 -    // Set last_sp
   22.68 -    intptr_t*  esp = (intptr_t*) monbot -
   22.69 -                     tempcount*Interpreter::stackElementWords -
   22.70 -                     popframe_extra_args;
   22.71 -    interpreter_frame->interpreter_frame_set_last_sp(esp);
   22.72 -
   22.73 -    // All frames but the initial (oldest) interpreter frame we fill in have
   22.74 -    // a value for sender_sp that allows walking the stack but isn't
   22.75 -    // truly correct. Correct the value here.
   22.76 -    if (extra_locals != 0 &&
   22.77 -        interpreter_frame->sender_sp() ==
   22.78 -        interpreter_frame->interpreter_frame_sender_sp()) {
   22.79 -      interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
   22.80 -                                                         extra_locals);
   22.81 -    }
   22.82 -    *interpreter_frame->interpreter_frame_cache_addr() =
   22.83 -      method->constants()->cache();
   22.84 -  }
   22.85 -  return size;
   22.86 -}
   22.87 -
   22.88  //-----------------------------------------------------------------------------
   22.89  // Exceptions
   22.90  
    23.1 --- a/src/cpu/x86/vm/x86_32.ad	Tue Jun 17 15:49:31 2014 -0700
    23.2 +++ b/src/cpu/x86/vm/x86_32.ad	Tue Jun 17 22:03:39 2014 -0700
    23.3 @@ -512,14 +512,15 @@
    23.4  void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
    23.5    Compile* C = ra_->C;
    23.6  
    23.7 -  int framesize = C->frame_slots() << LogBytesPerInt;
    23.8 +  int framesize = C->frame_size_in_bytes();
    23.9 +  int bangsize = C->bang_size_in_bytes();
   23.10    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   23.11    // Remove wordSize for return addr which is already pushed.
   23.12    framesize -= wordSize;
   23.13  
   23.14 -  if (C->need_stack_bang(framesize)) {
   23.15 +  if (C->need_stack_bang(bangsize)) {
   23.16      framesize -= wordSize;
   23.17 -    st->print("# stack bang");
   23.18 +    st->print("# stack bang (%d bytes)", bangsize);
   23.19      st->print("\n\t");
   23.20      st->print("PUSH   EBP\t# Save EBP");
   23.21      if (framesize) {
   23.22 @@ -563,9 +564,10 @@
   23.23    Compile* C = ra_->C;
   23.24    MacroAssembler _masm(&cbuf);
   23.25  
   23.26 -  int framesize = C->frame_slots() << LogBytesPerInt;
   23.27 -
   23.28 -  __ verified_entry(framesize, C->need_stack_bang(framesize), C->in_24_bit_fp_mode());
   23.29 +  int framesize = C->frame_size_in_bytes();
   23.30 +  int bangsize = C->bang_size_in_bytes();
   23.31 +
   23.32 +  __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, C->in_24_bit_fp_mode());
   23.33  
   23.34    C->set_frame_complete(cbuf.insts_size());
   23.35  
   23.36 @@ -589,7 +591,7 @@
   23.37  #ifndef PRODUCT
   23.38  void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
   23.39    Compile *C = ra_->C;
   23.40 -  int framesize = C->frame_slots() << LogBytesPerInt;
   23.41 +  int framesize = C->frame_size_in_bytes();
   23.42    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   23.43    // Remove two words for return addr and rbp,
   23.44    framesize -= 2*wordSize;
   23.45 @@ -629,7 +631,7 @@
   23.46      masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
   23.47    }
   23.48  
   23.49 -  int framesize = C->frame_slots() << LogBytesPerInt;
   23.50 +  int framesize = C->frame_size_in_bytes();
   23.51    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   23.52    // Remove two words for return addr and rbp,
   23.53    framesize -= 2*wordSize;
   23.54 @@ -663,7 +665,7 @@
   23.55    if (C->max_vector_size() > 16) size += 3; // vzeroupper
   23.56    if (do_polling() && C->is_method_compilation()) size += 6;
   23.57  
   23.58 -  int framesize = C->frame_slots() << LogBytesPerInt;
   23.59 +  int framesize = C->frame_size_in_bytes();
   23.60    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   23.61    // Remove two words for return addr and rbp,
   23.62    framesize -= 2*wordSize;
    24.1 --- a/src/cpu/x86/vm/x86_64.ad	Tue Jun 17 15:49:31 2014 -0700
    24.2 +++ b/src/cpu/x86/vm/x86_64.ad	Tue Jun 17 22:03:39 2014 -0700
    24.3 @@ -713,14 +713,15 @@
    24.4  void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
    24.5    Compile* C = ra_->C;
    24.6  
    24.7 -  int framesize = C->frame_slots() << LogBytesPerInt;
    24.8 +  int framesize = C->frame_size_in_bytes();
    24.9 +  int bangsize = C->bang_size_in_bytes();
   24.10    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   24.11    // Remove wordSize for return addr which is already pushed.
   24.12    framesize -= wordSize;
   24.13  
   24.14 -  if (C->need_stack_bang(framesize)) {
   24.15 +  if (C->need_stack_bang(bangsize)) {
   24.16      framesize -= wordSize;
   24.17 -    st->print("# stack bang");
   24.18 +    st->print("# stack bang (%d bytes)", bangsize);
   24.19      st->print("\n\t");
   24.20      st->print("pushq   rbp\t# Save rbp");
   24.21      if (framesize) {
   24.22 @@ -751,9 +752,10 @@
   24.23    Compile* C = ra_->C;
   24.24    MacroAssembler _masm(&cbuf);
   24.25  
   24.26 -  int framesize = C->frame_slots() << LogBytesPerInt;
   24.27 -
   24.28 -  __ verified_entry(framesize, C->need_stack_bang(framesize), false);
   24.29 +  int framesize = C->frame_size_in_bytes();
   24.30 +  int bangsize = C->bang_size_in_bytes();
   24.31 +
   24.32 +  __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
   24.33  
   24.34    C->set_frame_complete(cbuf.insts_size());
   24.35  
   24.36 @@ -786,7 +788,7 @@
   24.37      st->cr(); st->print("\t");
   24.38    }
   24.39  
   24.40 -  int framesize = C->frame_slots() << LogBytesPerInt;
   24.41 +  int framesize = C->frame_size_in_bytes();
   24.42    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   24.43    // Remove word for return adr already pushed
   24.44    // and RBP
   24.45 @@ -822,7 +824,7 @@
   24.46      __ vzeroupper();
   24.47    }
   24.48  
   24.49 -  int framesize = C->frame_slots() << LogBytesPerInt;
   24.50 +  int framesize = C->frame_size_in_bytes();
   24.51    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   24.52    // Remove word for return adr already pushed
   24.53    // and RBP
    25.1 --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Tue Jun 17 15:49:31 2014 -0700
    25.2 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Tue Jun 17 22:03:39 2014 -0700
    25.3 @@ -916,17 +916,32 @@
    25.4    return (InterpreterFrame *) fp;
    25.5  }
    25.6  
    25.7 -int AbstractInterpreter::layout_activation(Method* method,
    25.8 -                                           int       tempcount,
    25.9 -                                           int       popframe_extra_args,
   25.10 -                                           int       moncount,
   25.11 -                                           int       caller_actual_parameters,
   25.12 -                                           int       callee_param_count,
   25.13 -                                           int       callee_locals,
   25.14 -                                           frame*    caller,
   25.15 -                                           frame*    interpreter_frame,
   25.16 -                                           bool      is_top_frame,
   25.17 -                                           bool      is_bottom_frame) {
   25.18 +int AbstractInterpreter::size_activation(int       max_stack,
   25.19 +                                         int       tempcount,
   25.20 +                                         int       extra_args,
   25.21 +                                         int       moncount,
   25.22 +                                         int       callee_param_count,
   25.23 +                                         int       callee_locals,
   25.24 +                                         bool      is_top_frame) {
   25.25 +  int header_words        = InterpreterFrame::header_words;
   25.26 +  int monitor_words       = moncount * frame::interpreter_frame_monitor_size();
   25.27 +  int stack_words         = is_top_frame ? max_stack : tempcount;
   25.28 +  int callee_extra_locals = callee_locals - callee_param_count;
   25.29 +
   25.30 +  return header_words + monitor_words + stack_words + callee_extra_locals;
   25.31 +}
   25.32 +
   25.33 +void AbstractInterpreter::layout_activation(Method* method,
   25.34 +                                            int       tempcount,
   25.35 +                                            int       popframe_extra_args,
   25.36 +                                            int       moncount,
   25.37 +                                            int       caller_actual_parameters,
   25.38 +                                            int       callee_param_count,
   25.39 +                                            int       callee_locals,
   25.40 +                                            frame*    caller,
   25.41 +                                            frame*    interpreter_frame,
   25.42 +                                            bool      is_top_frame,
   25.43 +                                            bool      is_bottom_frame) {
   25.44    assert(popframe_extra_args == 0, "what to do?");
   25.45    assert(!is_top_frame || (!callee_locals && !callee_param_count),
   25.46           "top frame should have no caller");
   25.47 @@ -935,39 +950,31 @@
   25.48    // does (the full InterpreterFrame::build, that is, not the
   25.49    // one that creates empty frames for the deoptimizer).
   25.50    //
   25.51 -  // If interpreter_frame is not NULL then it will be filled in.
   25.52 -  // It's size is determined by a previous call to this method,
   25.53 -  // so it should be correct.
   25.54 +  // interpreter_frame will be filled in.  It's size is determined by
   25.55 +  // a previous call to the size_activation() method,
   25.56    //
   25.57    // Note that tempcount is the current size of the expression
   25.58    // stack.  For top most frames we will allocate a full sized
   25.59    // expression stack and not the trimmed version that non-top
   25.60    // frames have.
   25.61  
   25.62 -  int header_words        = InterpreterFrame::header_words;
   25.63    int monitor_words       = moncount * frame::interpreter_frame_monitor_size();
   25.64 -  int stack_words         = is_top_frame ? method->max_stack() : tempcount;
   25.65 -  int callee_extra_locals = callee_locals - callee_param_count;
   25.66 +  intptr_t *locals        = interpreter_frame->fp() + method->max_locals();
   25.67 +  interpreterState istate = interpreter_frame->get_interpreterState();
   25.68 +  intptr_t *monitor_base  = (intptr_t*) istate;
   25.69 +  intptr_t *stack_base    = monitor_base - monitor_words;
   25.70 +  intptr_t *stack         = stack_base - tempcount - 1;
   25.71  
   25.72 -  if (interpreter_frame) {
   25.73 -    intptr_t *locals        = interpreter_frame->fp() + method->max_locals();
   25.74 -    interpreterState istate = interpreter_frame->get_interpreterState();
   25.75 -    intptr_t *monitor_base  = (intptr_t*) istate;
   25.76 -    intptr_t *stack_base    = monitor_base - monitor_words;
   25.77 -    intptr_t *stack         = stack_base - tempcount - 1;
   25.78 -
   25.79 -    BytecodeInterpreter::layout_interpreterState(istate,
   25.80 -                                                 caller,
   25.81 -                                                 NULL,
   25.82 -                                                 method,
   25.83 -                                                 locals,
   25.84 -                                                 stack,
   25.85 -                                                 stack_base,
   25.86 -                                                 monitor_base,
   25.87 -                                                 NULL,
   25.88 -                                                 is_top_frame);
   25.89 -  }
   25.90 -  return header_words + monitor_words + stack_words + callee_extra_locals;
   25.91 +  BytecodeInterpreter::layout_interpreterState(istate,
   25.92 +                                               caller,
   25.93 +                                               NULL,
   25.94 +                                               method,
   25.95 +                                               locals,
   25.96 +                                               stack,
   25.97 +                                               stack_base,
   25.98 +                                               monitor_base,
   25.99 +                                               NULL,
  25.100 +                                               is_top_frame);
  25.101  }
  25.102  
  25.103  void BytecodeInterpreter::layout_interpreterState(interpreterState istate,
    26.1 --- a/src/share/vm/c1/c1_Compilation.cpp	Tue Jun 17 15:49:31 2014 -0700
    26.2 +++ b/src/share/vm/c1/c1_Compilation.cpp	Tue Jun 17 22:03:39 2014 -0700
    26.3 @@ -546,6 +546,7 @@
    26.4  , _code(buffer_blob)
    26.5  , _has_access_indexed(false)
    26.6  , _current_instruction(NULL)
    26.7 +, _interpreter_frame_size(0)
    26.8  #ifndef PRODUCT
    26.9  , _last_instruction_printed(NULL)
   26.10  #endif // PRODUCT
    27.1 --- a/src/share/vm/c1/c1_Compilation.hpp	Tue Jun 17 15:49:31 2014 -0700
    27.2 +++ b/src/share/vm/c1/c1_Compilation.hpp	Tue Jun 17 22:03:39 2014 -0700
    27.3 @@ -88,6 +88,7 @@
    27.4    CodeOffsets        _offsets;
    27.5    CodeBuffer         _code;
    27.6    bool               _has_access_indexed;
    27.7 +  int                _interpreter_frame_size; // Stack space needed in case of a deoptimization
    27.8  
    27.9    // compilation helpers
   27.10    void initialize();
   27.11 @@ -262,6 +263,18 @@
   27.12  
   27.13    // Dump inlining replay data to the stream.
   27.14    void dump_inline_data(outputStream* out) { /* do nothing now */ }
   27.15 +
   27.16 +  // How much stack space would the interpreter need in case of a
   27.17 +  // deoptimization (worst case)
   27.18 +  void update_interpreter_frame_size(int size) {
   27.19 +    if (_interpreter_frame_size < size) {
   27.20 +      _interpreter_frame_size = size;
   27.21 +    }
   27.22 +  }
   27.23 +
   27.24 +  int interpreter_frame_size() const {
   27.25 +    return _interpreter_frame_size;
   27.26 +  }
   27.27  };
   27.28  
   27.29  
    28.1 --- a/src/share/vm/c1/c1_IR.cpp	Tue Jun 17 15:49:31 2014 -0700
    28.2 +++ b/src/share/vm/c1/c1_IR.cpp	Tue Jun 17 22:03:39 2014 -0700
    28.3 @@ -226,8 +226,38 @@
    28.4    _oop_map->set_oop(name);
    28.5  }
    28.6  
    28.7 +// Mirror the stack size calculation in the deopt code
    28.8 +// How much stack space would we need at this point in the program in
    28.9 +// case of deoptimization?
   28.10 +int CodeEmitInfo::interpreter_frame_size() const {
   28.11 +  ValueStack* state = _stack;
   28.12 +  int size = 0;
   28.13 +  int callee_parameters = 0;
   28.14 +  int callee_locals = 0;
   28.15 +  int extra_args = state->scope()->method()->max_stack() - state->stack_size();
   28.16  
   28.17 +  while (state != NULL) {
   28.18 +    int locks = state->locks_size();
   28.19 +    int temps = state->stack_size();
   28.20 +    bool is_top_frame = (state == _stack);
   28.21 +    ciMethod* method = state->scope()->method();
   28.22  
   28.23 +    int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
   28.24 +                                                                 temps + callee_parameters,
   28.25 +                                                                 extra_args,
   28.26 +                                                                 locks,
   28.27 +                                                                 callee_parameters,
   28.28 +                                                                 callee_locals,
   28.29 +                                                                 is_top_frame);
   28.30 +    size += frame_size;
   28.31 +
   28.32 +    callee_parameters = method->size_of_parameters();
   28.33 +    callee_locals = method->max_locals();
   28.34 +    extra_args = 0;
   28.35 +    state = state->caller_state();
   28.36 +  }
   28.37 +  return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
   28.38 +}
   28.39  
   28.40  // Implementation of IR
   28.41  
    29.1 --- a/src/share/vm/c1/c1_IR.hpp	Tue Jun 17 15:49:31 2014 -0700
    29.2 +++ b/src/share/vm/c1/c1_IR.hpp	Tue Jun 17 22:03:39 2014 -0700
    29.3 @@ -280,6 +280,8 @@
    29.4  
    29.5    bool     is_method_handle_invoke() const { return _is_method_handle_invoke;     }
    29.6    void set_is_method_handle_invoke(bool x) {        _is_method_handle_invoke = x; }
    29.7 +
    29.8 +  int interpreter_frame_size() const;
    29.9  };
   29.10  
   29.11  
    30.1 --- a/src/share/vm/c1/c1_LIRAssembler.cpp	Tue Jun 17 15:49:31 2014 -0700
    30.2 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Tue Jun 17 22:03:39 2014 -0700
    30.3 @@ -185,6 +185,13 @@
    30.4    return _masm->pc();
    30.5  }
    30.6  
    30.7 +// To bang the stack of this compiled method we use the stack size
    30.8 +// that the interpreter would need in case of a deoptimization. This
    30.9 +// removes the need to bang the stack in the deoptimization blob which
   30.10 +// in turn simplifies stack overflow handling.
   30.11 +int LIR_Assembler::bang_size_in_bytes() const {
   30.12 +  return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
   30.13 +}
   30.14  
   30.15  void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
   30.16    for (int i = 0; i < info_list->length(); i++) {
   30.17 @@ -792,7 +799,7 @@
   30.18  
   30.19  
   30.20  void LIR_Assembler::build_frame() {
   30.21 -  _masm->build_frame(initial_frame_size_in_bytes());
   30.22 +  _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
   30.23  }
   30.24  
   30.25  
    31.1 --- a/src/share/vm/c1/c1_LIRAssembler.hpp	Tue Jun 17 15:49:31 2014 -0700
    31.2 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Tue Jun 17 22:03:39 2014 -0700
    31.3 @@ -132,7 +132,8 @@
    31.4    int code_offset() const;
    31.5    address pc() const;
    31.6  
    31.7 -  int  initial_frame_size_in_bytes();
    31.8 +  int  initial_frame_size_in_bytes() const;
    31.9 +  int  bang_size_in_bytes() const;
   31.10  
   31.11    // test for constants which can be encoded directly in instructions
   31.12    static bool is_small_constant(LIR_Opr opr);
    32.1 --- a/src/share/vm/c1/c1_LinearScan.cpp	Tue Jun 17 15:49:31 2014 -0700
    32.2 +++ b/src/share/vm/c1/c1_LinearScan.cpp	Tue Jun 17 22:03:39 2014 -0700
    32.3 @@ -2441,6 +2441,9 @@
    32.4      CodeEmitInfo* info = visitor.info_at(i);
    32.5      OopMap* oop_map = first_oop_map;
    32.6  
    32.7 +    // compute worst case interpreter size in case of a deoptimization
    32.8 +    _compilation->update_interpreter_frame_size(info->interpreter_frame_size());
    32.9 +
   32.10      if (info->stack()->locks_size() != first_info->stack()->locks_size()) {
   32.11        // this info has a different number of locks then the precomputed oop map
   32.12        // (possible for lock and unlock instructions) -> compute oop map with
    33.1 --- a/src/share/vm/c1/c1_MacroAssembler.hpp	Tue Jun 17 15:49:31 2014 -0700
    33.2 +++ b/src/share/vm/c1/c1_MacroAssembler.hpp	Tue Jun 17 22:03:39 2014 -0700
    33.3 @@ -39,7 +39,7 @@
    33.4    void explicit_null_check(Register base);
    33.5  
    33.6    void inline_cache_check(Register receiver, Register iCache);
    33.7 -  void build_frame(int frame_size_in_bytes);
    33.8 +  void build_frame(int frame_size_in_bytes, int bang_size_in_bytes);
    33.9    void remove_frame(int frame_size_in_bytes);
   33.10  
   33.11    void unverified_entry(Register receiver, Register ic_klass);
    34.1 --- a/src/share/vm/ci/ciMethod.cpp	Tue Jun 17 15:49:31 2014 -0700
    34.2 +++ b/src/share/vm/ci/ciMethod.cpp	Tue Jun 17 22:03:39 2014 -0700
    34.3 @@ -80,6 +80,7 @@
    34.4    _code_size          = h_m()->code_size();
    34.5    _intrinsic_id       = h_m()->intrinsic_id();
    34.6    _handler_count      = h_m()->exception_table_length();
    34.7 +  _size_of_parameters = h_m()->size_of_parameters();
    34.8    _uses_monitors      = h_m()->access_flags().has_monitor_bytecodes();
    34.9    _balanced_monitors  = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
   34.10    _is_c1_compilable   = !h_m()->is_not_c1_compilable();
    35.1 --- a/src/share/vm/ci/ciMethod.hpp	Tue Jun 17 15:49:31 2014 -0700
    35.2 +++ b/src/share/vm/ci/ciMethod.hpp	Tue Jun 17 22:03:39 2014 -0700
    35.3 @@ -71,6 +71,7 @@
    35.4    int _interpreter_invocation_count;
    35.5    int _interpreter_throwout_count;
    35.6    int _instructions_size;
    35.7 +  int _size_of_parameters;
    35.8  
    35.9    bool _uses_monitors;
   35.10    bool _balanced_monitors;
   35.11 @@ -166,6 +167,7 @@
   35.12    int exception_table_length() const             { check_is_loaded(); return _handler_count; }
   35.13    int interpreter_invocation_count() const       { check_is_loaded(); return _interpreter_invocation_count; }
   35.14    int interpreter_throwout_count() const         { check_is_loaded(); return _interpreter_throwout_count; }
   35.15 +  int size_of_parameters() const                 { check_is_loaded(); return _size_of_parameters; }
   35.16  
   35.17    // Code size for inlining decisions.
   35.18    int code_size_for_inlining();
   35.19 @@ -241,7 +243,6 @@
   35.20  
   35.21    ciField*      get_field_at_bci( int bci, bool &will_link);
   35.22    ciMethod*     get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
   35.23 -
   35.24    // Given a certain calling environment, find the monomorphic target
   35.25    // for the call.  Return NULL if the call is not monomorphic in
   35.26    // its calling environment.
    36.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jun 17 15:49:31 2014 -0700
    36.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jun 17 22:03:39 2014 -0700
    36.3 @@ -310,8 +310,7 @@
    36.4                               _cmsGen->refs_discovery_is_mt(),     // mt discovery
    36.5                               (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
    36.6                               _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
    36.7 -                             &_is_alive_closure,                  // closure for liveness info
    36.8 -                             false);                              // next field updates do not need write barrier
    36.9 +                             &_is_alive_closure);                 // closure for liveness info
   36.10      // Initialize the _ref_processor field of CMSGen
   36.11      _cmsGen->set_ref_processor(_ref_processor);
   36.12  
    37.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Jun 17 15:49:31 2014 -0700
    37.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Jun 17 22:03:39 2014 -0700
    37.3 @@ -2258,12 +2258,9 @@
    37.4                                  // degree of mt discovery
    37.5                             false,
    37.6                                  // Reference discovery is not atomic
    37.7 -                           &_is_alive_closure_cm,
    37.8 +                           &_is_alive_closure_cm);
    37.9                                  // is alive closure
   37.10                                  // (for efficiency/performance)
   37.11 -                           true);
   37.12 -                                // Setting next fields of discovered
   37.13 -                                // lists requires a barrier.
   37.14  
   37.15    // STW ref processor
   37.16    _ref_processor_stw =
   37.17 @@ -2278,12 +2275,9 @@
   37.18                                  // degree of mt discovery
   37.19                             true,
   37.20                                  // Reference discovery is atomic
   37.21 -                           &_is_alive_closure_stw,
   37.22 +                           &_is_alive_closure_stw);
   37.23                                  // is alive closure
   37.24                                  // (for efficiency/performance)
   37.25 -                           false);
   37.26 -                                // Setting next fields of discovered
   37.27 -                                // lists does not require a barrier.
   37.28  }
   37.29  
   37.30  size_t G1CollectedHeap::capacity() const {
    38.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Jun 17 15:49:31 2014 -0700
    38.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Jun 17 22:03:39 2014 -0700
    38.3 @@ -1638,8 +1638,7 @@
    38.4                               refs_discovery_is_mt(),     // mt discovery
    38.5                               (int) ParallelGCThreads,    // mt discovery degree
    38.6                               refs_discovery_is_atomic(), // atomic_discovery
    38.7 -                             NULL,                       // is_alive_non_header
    38.8 -                             false);                     // write barrier for next field updates
    38.9 +                             NULL);                      // is_alive_non_header
   38.10    }
   38.11  }
   38.12  
    39.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Jun 17 15:49:31 2014 -0700
    39.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Jun 17 22:03:39 2014 -0700
    39.3 @@ -853,8 +853,7 @@
    39.4                             true,          // mt discovery
    39.5                             (int) ParallelGCThreads, // mt discovery degree
    39.6                             true,          // atomic_discovery
    39.7 -                           &_is_alive_closure, // non-header is alive closure
    39.8 -                           false);        // write barrier for next field updates
    39.9 +                           &_is_alive_closure); // non-header is alive closure
   39.10    _counters = new CollectorCounters("PSParallelCompact", 1);
   39.11  
   39.12    // Initialize static fields in ParCompactionManager.
    40.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Jun 17 15:49:31 2014 -0700
    40.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Jun 17 22:03:39 2014 -0700
    40.3 @@ -861,8 +861,7 @@
    40.4                             true,                       // mt discovery
    40.5                             (int) ParallelGCThreads,    // mt discovery degree
    40.6                             true,                       // atomic_discovery
    40.7 -                           NULL,                       // header provides liveness info
    40.8 -                           false);                     // next field updates do not need write barrier
    40.9 +                           NULL);                      // header provides liveness info
   40.10  
   40.11    // Cache the cardtable
   40.12    BarrierSet* bs = Universe::heap()->barrier_set();
    41.1 --- a/src/share/vm/interpreter/abstractInterpreter.hpp	Tue Jun 17 15:49:31 2014 -0700
    41.2 +++ b/src/share/vm/interpreter/abstractInterpreter.hpp	Tue Jun 17 22:03:39 2014 -0700
    41.3 @@ -181,30 +181,16 @@
    41.4    // Deoptimization should reexecute this bytecode
    41.5    static bool    bytecode_should_reexecute(Bytecodes::Code code);
    41.6  
    41.7 -  // share implementation of size_activation and layout_activation:
    41.8 -  static int        size_activation(Method* method,
    41.9 +  // deoptimization support
   41.10 +  static int        size_activation(int max_stack,
   41.11                                      int temps,
   41.12 -                                    int popframe_args,
   41.13 +                                    int extra_args,
   41.14                                      int monitors,
   41.15 -                                    int caller_actual_parameters,
   41.16                                      int callee_params,
   41.17                                      int callee_locals,
   41.18 -                                    bool is_top_frame,
   41.19 -                                    bool is_bottom_frame) {
   41.20 -    return layout_activation(method,
   41.21 -                             temps,
   41.22 -                             popframe_args,
   41.23 -                             monitors,
   41.24 -                             caller_actual_parameters,
   41.25 -                             callee_params,
   41.26 -                             callee_locals,
   41.27 -                             (frame*)NULL,
   41.28 -                             (frame*)NULL,
   41.29 -                             is_top_frame,
   41.30 -                             is_bottom_frame);
   41.31 -  }
   41.32 +                                    bool is_top_frame);
   41.33  
   41.34 -  static int       layout_activation(Method* method,
   41.35 +  static void      layout_activation(Method* method,
   41.36                                       int temps,
   41.37                                       int popframe_args,
   41.38                                       int monitors,
    42.1 --- a/src/share/vm/memory/metaspace.cpp	Tue Jun 17 15:49:31 2014 -0700
    42.2 +++ b/src/share/vm/memory/metaspace.cpp	Tue Jun 17 22:03:39 2014 -0700
    42.3 @@ -1423,6 +1423,17 @@
    42.4    return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
    42.5  }
    42.6  
    42.7 +void MetaspaceGC::initialize() {
    42.8 +  // Set the high-water mark to MaxMetapaceSize during VM initializaton since
    42.9 +  // we can't do a GC during initialization.
   42.10 +  _capacity_until_GC = MaxMetaspaceSize;
   42.11 +}
   42.12 +
   42.13 +void MetaspaceGC::post_initialize() {
   42.14 +  // Reset the high-water mark once the VM initialization is done.
   42.15 +  _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
   42.16 +}
   42.17 +
   42.18  bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
   42.19    // Check if the compressed class space is full.
   42.20    if (is_class && Metaspace::using_class_space()) {
   42.21 @@ -1443,21 +1454,13 @@
   42.22  
   42.23  size_t MetaspaceGC::allowed_expansion() {
   42.24    size_t committed_bytes = MetaspaceAux::committed_bytes();
   42.25 +  size_t capacity_until_gc = capacity_until_GC();
   42.26 +
   42.27 +  assert(capacity_until_gc >= committed_bytes,
   42.28 +        err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
   42.29 +                capacity_until_gc, committed_bytes));
   42.30  
   42.31    size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
   42.32 -
   42.33 -  // Always grant expansion if we are initiating the JVM,
   42.34 -  // or if the GC_locker is preventing GCs.
   42.35 -  if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
   42.36 -    return left_until_max / BytesPerWord;
   42.37 -  }
   42.38 -
   42.39 -  size_t capacity_until_gc = capacity_until_GC();
   42.40 -
   42.41 -  if (capacity_until_gc <= committed_bytes) {
   42.42 -    return 0;
   42.43 -  }
   42.44 -
   42.45    size_t left_until_GC = capacity_until_gc - committed_bytes;
   42.46    size_t left_to_commit = MIN2(left_until_GC, left_until_max);
   42.47  
   42.48 @@ -1469,7 +1472,15 @@
   42.49    uint current_shrink_factor = _shrink_factor;
   42.50    _shrink_factor = 0;
   42.51  
   42.52 -  const size_t used_after_gc = MetaspaceAux::capacity_bytes();
   42.53 +  // Using committed_bytes() for used_after_gc is an overestimation, since the
   42.54 +  // chunk free lists are included in committed_bytes() and the memory in an
   42.55 +  // un-fragmented chunk free list is available for future allocations.
   42.56 +  // However, if the chunk free lists becomes fragmented, then the memory may
   42.57 +  // not be available for future allocations and the memory is therefore "in use".
   42.58 +  // Including the chunk free lists in the definition of "in use" is therefore
   42.59 +  // necessary. Not including the chunk free lists can cause capacity_until_GC to
   42.60 +  // shrink below committed_bytes() and this has caused serious bugs in the past.
   42.61 +  const size_t used_after_gc = MetaspaceAux::committed_bytes();
   42.62    const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
   42.63  
   42.64    const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
   42.65 @@ -3093,6 +3104,8 @@
   42.66  }
   42.67  
   42.68  void Metaspace::global_initialize() {
   42.69 +  MetaspaceGC::initialize();
   42.70 +
   42.71    // Initialize the alignment for shared spaces.
   42.72    int max_alignment = os::vm_page_size();
   42.73    size_t cds_total = 0;
   42.74 @@ -3200,10 +3213,13 @@
   42.75      }
   42.76    }
   42.77  
   42.78 -  MetaspaceGC::initialize();
   42.79    _tracer = new MetaspaceTracer();
   42.80  }
   42.81  
   42.82 +void Metaspace::post_initialize() {
   42.83 +  MetaspaceGC::post_initialize();
   42.84 +}
   42.85 +
   42.86  Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
   42.87                                                 size_t chunk_word_size,
   42.88                                                 size_t chunk_bunch) {
    43.1 --- a/src/share/vm/memory/metaspace.hpp	Tue Jun 17 15:49:31 2014 -0700
    43.2 +++ b/src/share/vm/memory/metaspace.hpp	Tue Jun 17 22:03:39 2014 -0700
    43.3 @@ -208,6 +208,7 @@
    43.4  
    43.5    static void ergo_initialize();
    43.6    static void global_initialize();
    43.7 +  static void post_initialize();
    43.8  
    43.9    static size_t first_chunk_word_size() { return _first_chunk_word_size; }
   43.10    static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
   43.11 @@ -398,7 +399,8 @@
   43.12  
   43.13   public:
   43.14  
   43.15 -  static void initialize() { _capacity_until_GC = MetaspaceSize; }
   43.16 +  static void initialize();
   43.17 +  static void post_initialize();
   43.18  
   43.19    static size_t capacity_until_GC();
   43.20    static size_t inc_capacity_until_GC(size_t v);
    44.1 --- a/src/share/vm/memory/referenceProcessor.cpp	Tue Jun 17 15:49:31 2014 -0700
    44.2 +++ b/src/share/vm/memory/referenceProcessor.cpp	Tue Jun 17 22:03:39 2014 -0700
    44.3 @@ -96,12 +96,10 @@
    44.4                                         bool      mt_discovery,
    44.5                                         uint      mt_discovery_degree,
    44.6                                         bool      atomic_discovery,
    44.7 -                                       BoolObjectClosure* is_alive_non_header,
    44.8 -                                       bool      discovered_list_needs_post_barrier)  :
    44.9 +                                       BoolObjectClosure* is_alive_non_header)  :
   44.10    _discovering_refs(false),
   44.11    _enqueuing_is_done(false),
   44.12    _is_alive_non_header(is_alive_non_header),
   44.13 -  _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier),
   44.14    _processing_is_mt(mt_processing),
   44.15    _next_id(0)
   44.16  {
   44.17 @@ -340,10 +338,18 @@
   44.18    // (java.lang.ref.Reference.discovered), self-loop their "next" field
   44.19    // thus distinguishing them from active References, then
   44.20    // prepend them to the pending list.
   44.21 +  //
   44.22 +  // The Java threads will see the Reference objects linked together through
   44.23 +  // the discovered field. Instead of trying to do the write barrier updates
   44.24 +  // in all places in the reference processor where we manipulate the discovered
   44.25 +  // field we make sure to do the barrier here where we anyway iterate through
   44.26 +  // all linked Reference objects. Note that it is important to not dirty any
   44.27 +  // cards during reference processing since this will cause card table
   44.28 +  // verification to fail for G1.
   44.29 +  //
   44.30    // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777),
   44.31    // the "next" field is used to chain the pending list, not the discovered
   44.32    // field.
   44.33 -
   44.34    if (TraceReferenceGC && PrintGCDetails) {
   44.35      gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
   44.36                             INTPTR_FORMAT, (address)refs_list.head());
   44.37 @@ -351,7 +357,7 @@
   44.38  
   44.39    oop obj = NULL;
   44.40    oop next_d = refs_list.head();
   44.41 -  if (pending_list_uses_discovered_field()) { // New behaviour
   44.42 +  if (pending_list_uses_discovered_field()) { // New behavior
   44.43      // Walk down the list, self-looping the next field
   44.44      // so that the References are not considered active.
   44.45      while (obj != next_d) {
   44.46 @@ -365,15 +371,15 @@
   44.47        assert(java_lang_ref_Reference::next(obj) == NULL,
   44.48               "Reference not active; should not be discovered");
   44.49        // Self-loop next, so as to make Ref not active.
   44.50 -      // Post-barrier not needed when looping to self.
   44.51        java_lang_ref_Reference::set_next_raw(obj, obj);
   44.52 -      if (next_d == obj) {  // obj is last
   44.53 -        // Swap refs_list into pendling_list_addr and
   44.54 +      if (next_d != obj) {
   44.55 +        oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d);
   44.56 +      } else {
   44.57 +        // This is the last object.
   44.58 +        // Swap refs_list into pending_list_addr and
   44.59          // set obj's discovered to what we read from pending_list_addr.
   44.60          oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
   44.61 -        // Need post-barrier on pending_list_addr above;
   44.62 -        // see special post-barrier code at the end of
   44.63 -        // enqueue_discovered_reflists() further below.
   44.64 +        // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above.
   44.65          java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
   44.66          oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
   44.67        }
   44.68 @@ -496,20 +502,15 @@
   44.69    // pre-barrier here because we know the Reference has already been found/marked,
   44.70    // that's how it ended up in the discovered list in the first place.
   44.71    oop_store_raw(_prev_next, new_next);
   44.72 -  if (_discovered_list_needs_post_barrier && _prev_next != _refs_list.adr_head()) {
   44.73 -    // Needs post-barrier and this is not the list head (which is not on the heap)
   44.74 -    oopDesc::bs()->write_ref_field(_prev_next, new_next);
   44.75 -  }
   44.76    NOT_PRODUCT(_removed++);
   44.77    _refs_list.dec_length(1);
   44.78  }
   44.79  
   44.80  // Make the Reference object active again.
   44.81  void DiscoveredListIterator::make_active() {
   44.82 -  // For G1 we don't want to use set_next - it
   44.83 -  // will dirty the card for the next field of
   44.84 -  // the reference object and will fail
   44.85 -  // CT verification.
   44.86 +  // The pre barrier for G1 is probably just needed for the old
   44.87 +  // reference processing behavior. Should we guard this with
   44.88 +  // ReferenceProcessor::pending_list_uses_discovered_field() ?
   44.89    if (UseG1GC) {
   44.90      HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
   44.91      if (UseCompressedOops) {
   44.92 @@ -517,10 +518,8 @@
   44.93      } else {
   44.94        oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL);
   44.95      }
   44.96 -    java_lang_ref_Reference::set_next_raw(_ref, NULL);
   44.97 -  } else {
   44.98 -    java_lang_ref_Reference::set_next(_ref, NULL);
   44.99    }
  44.100 +  java_lang_ref_Reference::set_next_raw(_ref, NULL);
  44.101  }
  44.102  
  44.103  void DiscoveredListIterator::clear_referent() {
  44.104 @@ -546,7 +545,7 @@
  44.105                                     OopClosure*        keep_alive,
  44.106                                     VoidClosure*       complete_gc) {
  44.107    assert(policy != NULL, "Must have a non-NULL policy");
  44.108 -  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
  44.109 +  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
  44.110    // Decide which softly reachable refs should be kept alive.
  44.111    while (iter.has_next()) {
  44.112      iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
  44.113 @@ -586,7 +585,7 @@
  44.114                               BoolObjectClosure* is_alive,
  44.115                               OopClosure*        keep_alive) {
  44.116    assert(discovery_is_atomic(), "Error");
  44.117 -  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
  44.118 +  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
  44.119    while (iter.has_next()) {
  44.120      iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
  44.121      DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
  44.122 @@ -623,7 +622,7 @@
  44.123                                                    OopClosure*        keep_alive,
  44.124                                                    VoidClosure*       complete_gc) {
  44.125    assert(!discovery_is_atomic(), "Error");
  44.126 -  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
  44.127 +  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
  44.128    while (iter.has_next()) {
  44.129      iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
  44.130      HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
  44.131 @@ -666,7 +665,7 @@
  44.132                                     OopClosure*        keep_alive,
  44.133                                     VoidClosure*       complete_gc) {
  44.134    ResourceMark rm;
  44.135 -  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
  44.136 +  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
  44.137    while (iter.has_next()) {
  44.138      iter.update_discovered();
  44.139      iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
  44.140 @@ -782,13 +781,6 @@
  44.141    bool _clear_referent;
  44.142  };
  44.143  
  44.144 -void ReferenceProcessor::set_discovered(oop ref, oop value) {
  44.145 -  java_lang_ref_Reference::set_discovered_raw(ref, value);
  44.146 -  if (_discovered_list_needs_post_barrier) {
  44.147 -    oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(ref), value);
  44.148 -  }
  44.149 -}
  44.150 -
  44.151  // Balances reference queues.
  44.152  // Move entries from all queues[0, 1, ..., _max_num_q-1] to
  44.153  // queues[0, 1, ..., _num_q-1] because only the first _num_q
  44.154 @@ -846,9 +838,9 @@
  44.155          // Add the chain to the to list.
  44.156          if (ref_lists[to_idx].head() == NULL) {
  44.157            // to list is empty. Make a loop at the end.
  44.158 -          set_discovered(move_tail, move_tail);
  44.159 +          java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail);
  44.160          } else {
  44.161 -          set_discovered(move_tail, ref_lists[to_idx].head());
  44.162 +          java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head());
  44.163          }
  44.164          ref_lists[to_idx].set_head(move_head);
  44.165          ref_lists[to_idx].inc_length(refs_to_move);
  44.166 @@ -982,7 +974,7 @@
  44.167  
  44.168  void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
  44.169    assert(!discovery_is_atomic(), "Else why call this method?");
  44.170 -  DiscoveredListIterator iter(refs_list, NULL, NULL, _discovered_list_needs_post_barrier);
  44.171 +  DiscoveredListIterator iter(refs_list, NULL, NULL);
  44.172    while (iter.has_next()) {
  44.173      iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
  44.174      oop next = java_lang_ref_Reference::next(iter.obj());
  44.175 @@ -1071,16 +1063,6 @@
  44.176    // The last ref must have its discovered field pointing to itself.
  44.177    oop next_discovered = (current_head != NULL) ? current_head : obj;
  44.178  
  44.179 -  // Note: In the case of G1, this specific pre-barrier is strictly
  44.180 -  // not necessary because the only case we are interested in
  44.181 -  // here is when *discovered_addr is NULL (see the CAS further below),
  44.182 -  // so this will expand to nothing. As a result, we have manually
  44.183 -  // elided this out for G1, but left in the test for some future
  44.184 -  // collector that might have need for a pre-barrier here, e.g.:-
  44.185 -  // oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
  44.186 -  assert(!_discovered_list_needs_post_barrier || UseG1GC,
  44.187 -         "Need to check non-G1 collector: "
  44.188 -         "may need a pre-write-barrier for CAS from NULL below");
  44.189    oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
  44.190                                                      NULL);
  44.191    if (retest == NULL) {
  44.192 @@ -1089,9 +1071,6 @@
  44.193      // is necessary.
  44.194      refs_list.set_head(obj);
  44.195      refs_list.inc_length(1);
  44.196 -    if (_discovered_list_needs_post_barrier) {
  44.197 -      oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
  44.198 -    }
  44.199  
  44.200      if (TraceReferenceGC) {
  44.201        gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
  44.202 @@ -1242,24 +1221,14 @@
  44.203    if (_discovery_is_mt) {
  44.204      add_to_discovered_list_mt(*list, obj, discovered_addr);
  44.205    } else {
  44.206 -    // If "_discovered_list_needs_post_barrier", we do write barriers when
  44.207 -    // updating the discovered reference list.  Otherwise, we do a raw store
  44.208 -    // here: the field will be visited later when processing the discovered
  44.209 -    // references.
  44.210 +    // We do a raw store here: the field will be visited later when processing
  44.211 +    // the discovered references.
  44.212      oop current_head = list->head();
  44.213      // The last ref must have its discovered field pointing to itself.
  44.214      oop next_discovered = (current_head != NULL) ? current_head : obj;
  44.215  
  44.216 -    // As in the case further above, since we are over-writing a NULL
  44.217 -    // pre-value, we can safely elide the pre-barrier here for the case of G1.
  44.218 -    // e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
  44.219      assert(discovered == NULL, "control point invariant");
  44.220 -    assert(!_discovered_list_needs_post_barrier || UseG1GC,
  44.221 -           "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");
  44.222      oop_store_raw(discovered_addr, next_discovered);
  44.223 -    if (_discovered_list_needs_post_barrier) {
  44.224 -      oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
  44.225 -    }
  44.226      list->set_head(obj);
  44.227      list->inc_length(1);
  44.228  
  44.229 @@ -1353,7 +1322,7 @@
  44.230                                                  OopClosure*        keep_alive,
  44.231                                                  VoidClosure*       complete_gc,
  44.232                                                  YieldClosure*      yield) {
  44.233 -  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
  44.234 +  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
  44.235    while (iter.has_next()) {
  44.236      iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
  44.237      oop obj = iter.obj();
    45.1 --- a/src/share/vm/memory/referenceProcessor.hpp	Tue Jun 17 15:49:31 2014 -0700
    45.2 +++ b/src/share/vm/memory/referenceProcessor.hpp	Tue Jun 17 22:03:39 2014 -0700
    45.3 @@ -99,7 +99,6 @@
    45.4    oop                _referent;
    45.5    OopClosure*        _keep_alive;
    45.6    BoolObjectClosure* _is_alive;
    45.7 -  bool               _discovered_list_needs_post_barrier;
    45.8  
    45.9    DEBUG_ONLY(
   45.10    oop                _first_seen; // cyclic linked list check
   45.11 @@ -113,8 +112,7 @@
   45.12  public:
   45.13    inline DiscoveredListIterator(DiscoveredList&    refs_list,
   45.14                                  OopClosure*        keep_alive,
   45.15 -                                BoolObjectClosure* is_alive,
   45.16 -                                bool               discovered_list_needs_post_barrier = false):
   45.17 +                                BoolObjectClosure* is_alive):
   45.18      _refs_list(refs_list),
   45.19      _prev_next(refs_list.adr_head()),
   45.20      _prev(NULL),
   45.21 @@ -128,8 +126,7 @@
   45.22  #endif
   45.23      _next(NULL),
   45.24      _keep_alive(keep_alive),
   45.25 -    _is_alive(is_alive),
   45.26 -    _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier)
   45.27 +    _is_alive(is_alive)
   45.28  { }
   45.29  
   45.30    // End Of List.
   45.31 @@ -230,14 +227,6 @@
   45.32                                          // other collectors in configuration
   45.33    bool        _discovery_is_mt;         // true if reference discovery is MT.
   45.34  
   45.35 -  // If true, setting "next" field of a discovered refs list requires
   45.36 -  // write post barrier.  (Must be true if used in a collector in which
   45.37 -  // elements of a discovered list may be moved during discovery: for
   45.38 -  // example, a collector like Garbage-First that moves objects during a
   45.39 -  // long-term concurrent marking phase that does weak reference
   45.40 -  // discovery.)
   45.41 -  bool        _discovered_list_needs_post_barrier;
   45.42 -
   45.43    bool        _enqueuing_is_done;       // true if all weak references enqueued
   45.44    bool        _processing_is_mt;        // true during phases when
   45.45                                          // reference processing is MT.
   45.46 @@ -382,11 +371,6 @@
   45.47    void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
   45.48  
   45.49   protected:
   45.50 -  // Set the 'discovered' field of the given reference to
   45.51 -  // the given value - emitting post barriers depending upon
   45.52 -  // the value of _discovered_list_needs_post_barrier.
   45.53 -  void set_discovered(oop ref, oop value);
   45.54 -
   45.55    // "Preclean" the given discovered reference list
   45.56    // by removing references with strongly reachable referents.
   45.57    // Currently used in support of CMS only.
   45.58 @@ -427,8 +411,7 @@
   45.59                       bool mt_processing = false, uint mt_processing_degree = 1,
   45.60                       bool mt_discovery  = false, uint mt_discovery_degree  = 1,
   45.61                       bool atomic_discovery = true,
   45.62 -                     BoolObjectClosure* is_alive_non_header = NULL,
   45.63 -                     bool discovered_list_needs_post_barrier = false);
   45.64 +                     BoolObjectClosure* is_alive_non_header = NULL);
   45.65  
   45.66    // RefDiscoveryPolicy values
   45.67    enum DiscoveryPolicy {
    46.1 --- a/src/share/vm/oops/cpCache.cpp	Tue Jun 17 15:49:31 2014 -0700
    46.2 +++ b/src/share/vm/oops/cpCache.cpp	Tue Jun 17 22:03:39 2014 -0700
    46.3 @@ -406,7 +406,7 @@
    46.4  
    46.5  
    46.6  oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) {
    46.7 -  if (is_f1_null() || !has_appendix())
    46.8 +  if (!has_appendix())
    46.9      return NULL;
   46.10    const int ref_index = f2_as_index() + _indy_resolved_references_appendix_offset;
   46.11    objArrayOop resolved_references = cpool->resolved_references();
   46.12 @@ -415,7 +415,7 @@
   46.13  
   46.14  
   46.15  oop ConstantPoolCacheEntry::method_type_if_resolved(constantPoolHandle cpool) {
   46.16 -  if (is_f1_null() || !has_method_type())
   46.17 +  if (!has_method_type())
   46.18      return NULL;
   46.19    const int ref_index = f2_as_index() + _indy_resolved_references_method_type_offset;
   46.20    objArrayOop resolved_references = cpool->resolved_references();
    47.1 --- a/src/share/vm/oops/cpCache.hpp	Tue Jun 17 15:49:31 2014 -0700
    47.2 +++ b/src/share/vm/oops/cpCache.hpp	Tue Jun 17 22:03:39 2014 -0700
    47.3 @@ -1,5 +1,5 @@
    47.4  /*
    47.5 - * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    47.6 + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
    47.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    47.8   *
    47.9   * This code is free software; you can redistribute it and/or modify it
   47.10 @@ -346,8 +346,8 @@
   47.11    bool is_final() const                          { return (_flags & (1 << is_final_shift))          != 0; }
   47.12    bool is_forced_virtual() const                 { return (_flags & (1 << is_forced_virtual_shift)) != 0; }
   47.13    bool is_vfinal() const                         { return (_flags & (1 << is_vfinal_shift))         != 0; }
   47.14 -  bool has_appendix() const                      { return (_flags & (1 << has_appendix_shift))      != 0; }
   47.15 -  bool has_method_type() const                   { return (_flags & (1 << has_method_type_shift))   != 0; }
   47.16 +  bool has_appendix() const                      { return (!is_f1_null()) && (_flags & (1 << has_appendix_shift))      != 0; }
   47.17 +  bool has_method_type() const                   { return (!is_f1_null()) && (_flags & (1 << has_method_type_shift))   != 0; }
   47.18    bool is_method_entry() const                   { return (_flags & (1 << is_field_entry_shift))    == 0; }
   47.19    bool is_field_entry() const                    { return (_flags & (1 << is_field_entry_shift))    != 0; }
   47.20    bool is_byte() const                           { return flag_state() == btos; }
    48.1 --- a/src/share/vm/oops/instanceKlass.cpp	Tue Jun 17 15:49:31 2014 -0700
    48.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Tue Jun 17 22:03:39 2014 -0700
    48.3 @@ -2771,7 +2771,7 @@
    48.4    Method* m = n->method();
    48.5    // Search for match
    48.6    while(cur != NULL && cur != n) {
    48.7 -    if (TieredCompilation) {
    48.8 +    if (TieredCompilation && m == cur->method()) {
    48.9        // Find max level before n
   48.10        max_level = MAX2(max_level, cur->comp_level());
   48.11      }
   48.12 @@ -2793,7 +2793,9 @@
   48.13      cur = next;
   48.14      while (cur != NULL) {
   48.15        // Find max level after n
   48.16 -      max_level = MAX2(max_level, cur->comp_level());
   48.17 +      if (m == cur->method()) {
   48.18 +        max_level = MAX2(max_level, cur->comp_level());
   48.19 +      }
   48.20        cur = cur->osr_link();
   48.21      }
   48.22      m->set_highest_osr_comp_level(max_level);
    49.1 --- a/src/share/vm/opto/callnode.cpp	Tue Jun 17 15:49:31 2014 -0700
    49.2 +++ b/src/share/vm/opto/callnode.cpp	Tue Jun 17 22:03:39 2014 -0700
    49.3 @@ -607,6 +607,39 @@
    49.4    }
    49.5  }
    49.6  
    49.7 +// Mirror the stack size calculation in the deopt code
    49.8 +// How much stack space would we need at this point in the program in
    49.9 +// case of deoptimization?
   49.10 +int JVMState::interpreter_frame_size() const {
   49.11 +  const JVMState* jvms = this;
   49.12 +  int size = 0;
   49.13 +  int callee_parameters = 0;
   49.14 +  int callee_locals = 0;
   49.15 +  int extra_args = method()->max_stack() - stk_size();
   49.16 +
   49.17 +  while (jvms != NULL) {
   49.18 +    int locks = jvms->nof_monitors();
   49.19 +    int temps = jvms->stk_size();
   49.20 +    bool is_top_frame = (jvms == this);
   49.21 +    ciMethod* method = jvms->method();
   49.22 +
   49.23 +    int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
   49.24 +                                                                 temps + callee_parameters,
   49.25 +                                                                 extra_args,
   49.26 +                                                                 locks,
   49.27 +                                                                 callee_parameters,
   49.28 +                                                                 callee_locals,
   49.29 +                                                                 is_top_frame);
   49.30 +    size += frame_size;
   49.31 +
   49.32 +    callee_parameters = method->size_of_parameters();
   49.33 +    callee_locals = method->max_locals();
   49.34 +    extra_args = 0;
   49.35 +    jvms = jvms->caller();
   49.36 +  }
   49.37 +  return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
   49.38 +}
   49.39 +
   49.40  //=============================================================================
   49.41  uint CallNode::cmp( const Node &n ) const
   49.42  { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
    50.1 --- a/src/share/vm/opto/callnode.hpp	Tue Jun 17 15:49:31 2014 -0700
    50.2 +++ b/src/share/vm/opto/callnode.hpp	Tue Jun 17 22:03:39 2014 -0700
    50.3 @@ -300,6 +300,7 @@
    50.4    JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
    50.5    void      set_map_deep(SafePointNode *map);// reset map for all callers
    50.6    void      adapt_position(int delta);       // Adapt offsets in in-array after adding an edge.
    50.7 +  int       interpreter_frame_size() const;
    50.8  
    50.9  #ifndef PRODUCT
   50.10    void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
    51.1 --- a/src/share/vm/opto/compile.cpp	Tue Jun 17 15:49:31 2014 -0700
    51.2 +++ b/src/share/vm/opto/compile.cpp	Tue Jun 17 22:03:39 2014 -0700
    51.3 @@ -439,6 +439,14 @@
    51.4    return words;
    51.5  }
    51.6  
    51.7 +// To bang the stack of this compiled method we use the stack size
    51.8 +// that the interpreter would need in case of a deoptimization. This
    51.9 +// removes the need to bang the stack in the deoptimization blob which
   51.10 +// in turn simplifies stack overflow handling.
   51.11 +int Compile::bang_size_in_bytes() const {
   51.12 +  return MAX2(_interpreter_frame_size, frame_size_in_bytes());
   51.13 +}
   51.14 +
   51.15  // ============================================================================
   51.16  //------------------------------CompileWrapper---------------------------------
   51.17  class CompileWrapper : public StackObj {
   51.18 @@ -662,7 +670,8 @@
   51.19                    _inlining_incrementally(false),
   51.20                    _print_inlining_list(NULL),
   51.21                    _print_inlining_idx(0),
   51.22 -                  _preserve_jvm_state(0) {
   51.23 +                  _preserve_jvm_state(0),
   51.24 +                  _interpreter_frame_size(0) {
   51.25    C = this;
   51.26  
   51.27    CompileWrapper cw(this);
   51.28 @@ -969,7 +978,8 @@
   51.29      _print_inlining_list(NULL),
   51.30      _print_inlining_idx(0),
   51.31      _preserve_jvm_state(0),
   51.32 -    _allowed_reasons(0) {
   51.33 +    _allowed_reasons(0),
   51.34 +    _interpreter_frame_size(0) {
   51.35    C = this;
   51.36  
   51.37  #ifndef PRODUCT
   51.38 @@ -3078,8 +3088,12 @@
   51.39        Node* m = n->in(i);
   51.40        ++i;
   51.41        if (m != NULL && !frc._visited.test_set(m->_idx)) {
   51.42 -        if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL)
   51.43 +        if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL) {
   51.44 +          // compute worst case interpreter size in case of a deoptimization
   51.45 +          update_interpreter_frame_size(m->as_SafePoint()->jvms()->interpreter_frame_size());
   51.46 +
   51.47            sfpt.push(m);
   51.48 +        }
   51.49          cnt = m->req();
   51.50          nstack.push(n, i); // put on stack parent and next input's index
   51.51          n = m;
    52.1 --- a/src/share/vm/opto/compile.hpp	Tue Jun 17 15:49:31 2014 -0700
    52.2 +++ b/src/share/vm/opto/compile.hpp	Tue Jun 17 22:03:39 2014 -0700
    52.3 @@ -487,6 +487,7 @@
    52.4    RegMask               _FIRST_STACK_mask;      // All stack slots usable for spills (depends on frame layout)
    52.5    Arena*                _indexSet_arena;        // control IndexSet allocation within PhaseChaitin
    52.6    void*                 _indexSet_free_block_list; // free list of IndexSet bit blocks
    52.7 +  int                   _interpreter_frame_size;
    52.8  
    52.9    uint                  _node_bundling_limit;
   52.10    Bundle*               _node_bundling_base;    // Information for instruction bundling
   52.11 @@ -946,6 +947,7 @@
   52.12    PhaseRegAlloc*    regalloc()                  { return _regalloc; }
   52.13    int               frame_slots() const         { return _frame_slots; }
   52.14    int               frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
   52.15 +  int               frame_size_in_bytes() const { return _frame_slots << LogBytesPerInt; }
   52.16    RegMask&          FIRST_STACK_mask()          { return _FIRST_STACK_mask; }
   52.17    Arena*            indexSet_arena()            { return _indexSet_arena; }
   52.18    void*             indexSet_free_block_list()  { return _indexSet_free_block_list; }
   52.19 @@ -957,6 +959,13 @@
   52.20    bool          need_stack_bang(int frame_size_in_bytes) const;
   52.21    bool          need_register_stack_bang() const;
   52.22  
   52.23 +  void  update_interpreter_frame_size(int size) {
   52.24 +    if (_interpreter_frame_size < size) {
   52.25 +      _interpreter_frame_size = size;
   52.26 +    }
   52.27 +  }
   52.28 +  int           bang_size_in_bytes() const;
   52.29 +
   52.30    void          set_matcher(Matcher* m)                 { _matcher = m; }
   52.31  //void          set_regalloc(PhaseRegAlloc* ra)           { _regalloc = ra; }
   52.32    void          set_indexSet_arena(Arena* a)            { _indexSet_arena = a; }
    53.1 --- a/src/share/vm/opto/loopopts.cpp	Tue Jun 17 15:49:31 2014 -0700
    53.2 +++ b/src/share/vm/opto/loopopts.cpp	Tue Jun 17 22:03:39 2014 -0700
    53.3 @@ -1401,7 +1401,8 @@
    53.4          // loop.  Happens if people set a loop-exit flag; then test the flag
    53.5          // in the loop to break the loop, then test is again outside of the
    53.6          // loop to determine which way the loop exited.
    53.7 -        if( use->is_If() || use->is_CMove() ) {
    53.8 +        // Loop predicate If node connects to Bool node through Opaque1 node.
    53.9 +        if (use->is_If() || use->is_CMove() || C->is_predicate_opaq(use)) {
   53.10            // Since this code is highly unlikely, we lazily build the worklist
   53.11            // of such Nodes to go split.
   53.12            if( !split_if_set )
   53.13 @@ -2768,11 +2769,11 @@
   53.14        // Hit!  Refactor use to use the post-incremented tripcounter.
   53.15        // Compute a post-increment tripcounter.
   53.16        Node *opaq = new (C) Opaque2Node( C, cle->incr() );
   53.17 -      register_new_node( opaq, u_ctrl );
   53.18 +      register_new_node(opaq, exit);
   53.19        Node *neg_stride = _igvn.intcon(-cle->stride_con());
   53.20        set_ctrl(neg_stride, C->root());
   53.21        Node *post = new (C) AddINode( opaq, neg_stride);
   53.22 -      register_new_node( post, u_ctrl );
   53.23 +      register_new_node(post, exit);
   53.24        _igvn.rehash_node_delayed(use);
   53.25        for (uint j = 1; j < use->req(); j++) {
   53.26          if (use->in(j) == phi)
    54.1 --- a/src/share/vm/opto/output.cpp	Tue Jun 17 15:49:31 2014 -0700
    54.2 +++ b/src/share/vm/opto/output.cpp	Tue Jun 17 22:03:39 2014 -0700
    54.3 @@ -165,8 +165,13 @@
    54.4    // Determine if we need to generate a stack overflow check.
    54.5    // Do it if the method is not a stub function and
    54.6    // has java calls or has frame size > vm_page_size/8.
    54.7 +  // The debug VM checks that deoptimization doesn't trigger an
    54.8 +  // unexpected stack overflow (compiled method stack banging should
    54.9 +  // guarantee it doesn't happen) so we always need the stack bang in
   54.10 +  // a debug VM.
   54.11    return (UseStackBanging && stub_function() == NULL &&
   54.12 -          (has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3));
   54.13 +          (has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3
   54.14 +           DEBUG_ONLY(|| true)));
   54.15  }
   54.16  
   54.17  bool Compile::need_register_stack_bang() const {
    55.1 --- a/src/share/vm/runtime/deoptimization.cpp	Tue Jun 17 15:49:31 2014 -0700
    55.2 +++ b/src/share/vm/runtime/deoptimization.cpp	Tue Jun 17 22:03:39 2014 -0700
    55.3 @@ -422,15 +422,9 @@
    55.4      // frame[number_of_frames - 1 ] = on_stack_size(youngest)
    55.5      // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
    55.6      // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
    55.7 -    int caller_parms = callee_parameters;
    55.8 -    if ((index == array->frames() - 1) && caller_was_method_handle) {
    55.9 -      caller_parms = 0;
   55.10 -    }
   55.11 -    frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(caller_parms,
   55.12 -                                                                                                    callee_parameters,
   55.13 +    frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
   55.14                                                                                                      callee_locals,
   55.15                                                                                                      index == 0,
   55.16 -                                                                                                    index == array->frames() - 1,
   55.17                                                                                                      popframe_extra_args);
   55.18      // This pc doesn't have to be perfect just good enough to identify the frame
   55.19      // as interpreted so the skeleton frame will be walkable
    56.1 --- a/src/share/vm/runtime/sharedRuntime.cpp	Tue Jun 17 15:49:31 2014 -0700
    56.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp	Tue Jun 17 22:03:39 2014 -0700
    56.3 @@ -785,10 +785,13 @@
    56.4          // going to be unwound. Dispatch to a shared runtime stub
    56.5          // which will cause the StackOverflowError to be fabricated
    56.6          // and processed.
    56.7 -        // For stack overflow in deoptimization blob, cleanup thread.
    56.8 -        if (thread->deopt_mark() != NULL) {
    56.9 -          Deoptimization::cleanup_deopt_info(thread, NULL);
   56.10 -        }
   56.11 +        // Stack overflow should never occur during deoptimization:
   56.12 +        // the compiled method bangs the stack by as much as the
   56.13 +        // interpreter would need in case of a deoptimization. The
   56.14 +        // deoptimization blob and uncommon trap blob bang the stack
   56.15 +        // in a debug VM to verify the correctness of the compiled
   56.16 +        // method stack banging.
   56.17 +        assert(thread->deopt_mark() == NULL, "no stack overflow from deopt blob/uncommon trap");
   56.18          Events::log_exception(thread, "StackOverflowError at " INTPTR_FORMAT, pc);
   56.19          return StubRoutines::throw_StackOverflowError_entry();
   56.20        }
    57.1 --- a/src/share/vm/runtime/thread.cpp	Tue Jun 17 15:49:31 2014 -0700
    57.2 +++ b/src/share/vm/runtime/thread.cpp	Tue Jun 17 22:03:39 2014 -0700
    57.3 @@ -3574,6 +3574,8 @@
    57.4    // debug stuff, that does not work until all basic classes have been initialized.
    57.5    set_init_completed();
    57.6  
    57.7 +  Metaspace::post_initialize();
    57.8 +
    57.9  #ifndef USDT2
   57.10    HS_DTRACE_PROBE(hotspot, vm__init__end);
   57.11  #else /* USDT2 */
    58.1 --- a/src/share/vm/runtime/vframeArray.cpp	Tue Jun 17 15:49:31 2014 -0700
    58.2 +++ b/src/share/vm/runtime/vframeArray.cpp	Tue Jun 17 22:03:39 2014 -0700
    58.3 @@ -419,24 +419,20 @@
    58.4  
    58.5  }
    58.6  
    58.7 -int vframeArrayElement::on_stack_size(int caller_actual_parameters,
    58.8 -                                      int callee_parameters,
    58.9 +int vframeArrayElement::on_stack_size(int callee_parameters,
   58.10                                        int callee_locals,
   58.11                                        bool is_top_frame,
   58.12 -                                      bool is_bottom_frame,
   58.13                                        int popframe_extra_stack_expression_els) const {
   58.14    assert(method()->max_locals() == locals()->size(), "just checking");
   58.15    int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
   58.16    int temps = expressions()->size();
   58.17 -  return Interpreter::size_activation(method(),
   58.18 +  return Interpreter::size_activation(method()->max_stack(),
   58.19                                        temps + callee_parameters,
   58.20                                        popframe_extra_stack_expression_els,
   58.21                                        locks,
   58.22 -                                      caller_actual_parameters,
   58.23                                        callee_parameters,
   58.24                                        callee_locals,
   58.25 -                                      is_top_frame,
   58.26 -                                      is_bottom_frame);
   58.27 +                                      is_top_frame);
   58.28  }
   58.29  
   58.30  
    59.1 --- a/src/share/vm/runtime/vframeArray.hpp	Tue Jun 17 15:49:31 2014 -0700
    59.2 +++ b/src/share/vm/runtime/vframeArray.hpp	Tue Jun 17 22:03:39 2014 -0700
    59.3 @@ -85,10 +85,8 @@
    59.4  
    59.5    // Returns the on stack word size for this frame
    59.6    // callee_parameters is the number of callee locals residing inside this frame
    59.7 -  int on_stack_size(int caller_actual_parameters,
    59.8 -                    int callee_parameters,
    59.9 +  int on_stack_size(int callee_parameters,
   59.10                      int callee_locals,
   59.11 -                    bool is_bottom_frame,
   59.12                      bool is_top_frame,
   59.13                      int popframe_extra_stack_expression_els) const;
   59.14  
    60.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    60.2 +++ b/test/compiler/loopopts/TestLogSum.java	Tue Jun 17 22:03:39 2014 -0700
    60.3 @@ -0,0 +1,111 @@
    60.4 +/*
    60.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    60.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    60.7 + *
    60.8 + * This code is free software; you can redistribute it and/or modify it
    60.9 + * under the terms of the GNU General Public License version 2 only, as
   60.10 + * published by the Free Software Foundation.
   60.11 + *
   60.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   60.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   60.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   60.15 + * version 2 for more details (a copy is included in the LICENSE file that
   60.16 + * accompanied this code).
   60.17 + *
   60.18 + * You should have received a copy of the GNU General Public License version
   60.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   60.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   60.21 + *
   60.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   60.23 + * or visit www.oracle.com if you need additional information or have any
   60.24 + * questions.
   60.25 + */
   60.26 +
   60.27 +/*
   60.28 + * @test
   60.29 + * @bug 8046516
   60.30 + * @summary Segmentation fault in JVM (easily reproducible)
   60.31 + * @run main/othervm -XX:-TieredCompilation -Xbatch TestLogSum
   60.32 + * @author jackkamm@gmail.com
   60.33 + */
   60.34 +
   60.35 +import java.util.Arrays;
   60.36 +import java.util.HashMap;
   60.37 +import java.util.List;
   60.38 +import java.util.Map;
   60.39 +public class TestLogSum {
   60.40 +  public static void main(String[] args) {
   60.41 +    double sum;
   60.42 +
   60.43 +    for (int i = 0; i < 6; i++) {
   60.44 +        for (int n = 2; n < 30; n++) {
   60.45 +           for (int j = 1; j <= n; j++) {
   60.46 +              for (int k = 1; k <= j; k++) {
   60.47 +                // System.out.println(computeSum(k, j));
   60.48 +                sum = computeSum(k, j);
   60.49 +              }
   60.50 +           }
   60.51 +        }
   60.52 +      }
   60.53 +   }
   60.54 +
   60.55 +   private static Map<List<Integer>, Double> cache = new HashMap<List<Integer>, Double>();
   60.56 +   public static double computeSum(int x, int y) {
   60.57 +      List<Integer> key = Arrays.asList(new Integer[] {x, y});
   60.58 +
   60.59 +      if (!cache.containsKey(key)) {
   60.60 +
   60.61 +        // explicitly creating/updating a double[] array, instead of using the LogSumArray wrapper object, will prevent the error
   60.62 +        LogSumArray toReturn = new LogSumArray(x);
   60.63 +
   60.64 +        // changing loop indices will prevent the error
   60.65 +        // in particular, for(z=0; z<x-1; z++), and then using z+1 in place of z, will not produce error
   60.66 +        for (int z = 1; z < x+1; z++) {
   60.67 +           double logSummand = Math.log(z + x + y);
   60.68 +           toReturn.addLogSummand(logSummand);
   60.69 +        }
   60.70 +
   60.71 +        // returning the value here without cacheing it will prevent the segfault
   60.72 +        cache.put(key, toReturn.retrieveLogSum());
   60.73 +      }
   60.74 +      return cache.get(key);
   60.75 +   }
   60.76 +
   60.77 +   /*
   60.78 +    * Given a bunch of logarithms log(X),log(Y),log(Z),...
   60.79 +    * This class is used to compute the log of the sum, log(X+Y+Z+...)
   60.80 +    */
   60.81 +   private static class LogSumArray {
   60.82 +      private double[] logSummandArray;
   60.83 +      private int currSize;
   60.84 +
   60.85 +      private double maxLogSummand;
   60.86 +
   60.87 +      public LogSumArray(int maxEntries) {
   60.88 +        this.logSummandArray = new double[maxEntries];
   60.89 +
   60.90 +        this.currSize = 0;
   60.91 +        this.maxLogSummand = Double.NEGATIVE_INFINITY;
   60.92 +      }
   60.93 +
   60.94 +      public void addLogSummand(double logSummand) {
   60.95 +        logSummandArray[currSize] = logSummand;
   60.96 +        currSize++;
   60.97 +        // removing this line will prevent the error
   60.98 +        maxLogSummand = Math.max(maxLogSummand, logSummand);
   60.99 +      }
  60.100 +
  60.101 +      public double retrieveLogSum() {
  60.102 +        if (maxLogSummand == Double.NEGATIVE_INFINITY) return Double.NEGATIVE_INFINITY;
  60.103 +
  60.104 +        assert currSize <= logSummandArray.length;
  60.105 +
  60.106 +        double factorSum = 0;
  60.107 +        for (int i = 0; i < currSize; i++) {
  60.108 +           factorSum += Math.exp(logSummandArray[i] - maxLogSummand);
  60.109 +        }
  60.110 +
  60.111 +        return Math.log(factorSum) + maxLogSummand;
  60.112 +      }
  60.113 +   }
  60.114 +}
    61.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    61.2 +++ b/test/compiler/uncommontrap/TestStackBangMonitorOwned.java	Tue Jun 17 22:03:39 2014 -0700
    61.3 @@ -0,0 +1,268 @@
    61.4 +/*
    61.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    61.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    61.7 + *
    61.8 + * This code is free software; you can redistribute it and/or modify it
    61.9 + * under the terms of the GNU General Public License version 2 only, as
   61.10 + * published by the Free Software Foundation.
   61.11 + *
   61.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   61.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   61.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   61.15 + * version 2 for more details (a copy is included in the LICENSE file that
   61.16 + * accompanied this code).
   61.17 + *
   61.18 + * You should have received a copy of the GNU General Public License version
   61.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   61.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   61.21 + *
   61.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   61.23 + * or visit www.oracle.com if you need additional information or have any
   61.24 + * questions.
   61.25 + */
   61.26 +
   61.27 +/*
   61.28 + * @test
   61.29 + * @bug 8032410
   61.30 + * @summary Stack overflow at deoptimization doesn't release owned monitors
   61.31 + * @run main/othervm -XX:-BackgroundCompilation -XX:CompileCommand=dontinline,TestStackBangMonitorOwned::m1 -XX:CompileCommand=exclude,TestStackBangMonitorOwned::m2 -Xss256K -XX:-UseOnStackReplacement TestStackBangMonitorOwned
   61.32 + *
   61.33 + */
   61.34 +public class TestStackBangMonitorOwned {
   61.35 +
   61.36 +    static class UnloadedClass1 {
   61.37 +        volatile int field;
   61.38 +    }
   61.39 +
   61.40 +    static Object m1(boolean deopt) {
   61.41 +        long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
   61.42 +        l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24,
   61.43 +        l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36,
   61.44 +        l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48,
   61.45 +        l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60,
   61.46 +        l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72,
   61.47 +        l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84,
   61.48 +        l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96,
   61.49 +        l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107,
   61.50 +        l108, l109, l110, l111, l112, l113, l114, l115, l116, l117,
   61.51 +        l118, l119, l120, l121, l122, l123, l124, l125, l126, l127,
   61.52 +        l128, l129, l130, l131, l132, l133, l134, l135, l136, l137,
   61.53 +        l138, l139, l140, l141, l142, l143, l144, l145, l146, l147,
   61.54 +        l148, l149, l150, l151, l152, l153, l154, l155, l156, l157,
   61.55 +        l158, l159, l160, l161, l162, l163, l164, l165, l166, l167,
   61.56 +        l168, l169, l170, l171, l172, l173, l174, l175, l176, l177,
   61.57 +        l178, l179, l180, l181, l182, l183, l184, l185, l186, l187,
   61.58 +        l188, l189, l190, l191, l192, l193, l194, l195, l196, l197,
   61.59 +        l198, l199, l200, l201, l202, l203, l204, l205, l206, l207,
   61.60 +        l208, l209, l210, l211, l212, l213, l214, l215, l216, l217,
   61.61 +        l218, l219, l220, l221, l222, l223, l224, l225, l226, l227,
   61.62 +        l228, l229, l230, l231, l232, l233, l234, l235, l236, l237,
   61.63 +        l238, l239, l240, l241, l242, l243, l244, l245, l246, l247,
   61.64 +        l248, l249, l250, l251, l252, l253, l254, l255, l256, l257,
   61.65 +        l258, l259, l260, l261, l262, l263, l264, l265, l266, l267,
   61.66 +        l268, l269, l270, l271, l272, l273, l274, l275, l276, l277,
   61.67 +        l278, l279, l280, l281, l282, l283, l284, l285, l286, l287,
   61.68 +        l288, l289, l290, l291, l292, l293, l294, l295, l296, l297,
   61.69 +        l298, l299, l300, l301, l302, l303, l304, l305, l306, l307,
   61.70 +        l308, l309, l310, l311, l312, l313, l314, l315, l316, l317,
   61.71 +        l318, l319, l320, l321, l322, l323, l324, l325, l326, l327,
   61.72 +        l328, l329, l330, l331, l332, l333, l334, l335, l336, l337,
   61.73 +        l338, l339, l340, l341, l342, l343, l344, l345, l346, l347,
   61.74 +        l348, l349, l350, l351, l352, l353, l354, l355, l356, l357,
   61.75 +        l358, l359, l360, l361, l362, l363, l364, l365, l366, l367,
   61.76 +        l368, l369, l370, l371, l372, l373, l374, l375, l376, l377,
   61.77 +        l378, l379, l380, l381, l382, l383, l384, l385, l386, l387,
   61.78 +        l388, l389, l390, l391, l392, l393, l394, l395, l396, l397,
   61.79 +        l398, l399, l400, l401, l402, l403, l404, l405, l406, l407,
   61.80 +        l408, l409, l410, l411, l412, l413, l414, l415, l416, l417,
   61.81 +        l418, l419, l420, l421, l422, l423, l424, l425, l426, l427,
   61.82 +        l428, l429, l430, l431, l432, l433, l434, l435, l436, l437,
   61.83 +        l438, l439, l440, l441, l442, l443, l444, l445, l446, l447,
   61.84 +        l448, l449, l450, l451, l452, l453, l454, l455, l456, l457,
   61.85 +        l458, l459, l460, l461, l462, l463, l464, l465, l466, l467,
   61.86 +        l468, l469, l470, l471, l472, l473, l474, l475, l476, l477,
   61.87 +        l478, l479, l480, l481, l482, l483, l484, l485, l486, l487,
   61.88 +        l488, l489, l490, l491, l492, l493, l494, l495, l496, l497,
   61.89 +        l498, l499, l500, l501, l502, l503, l504, l505, l506, l507,
   61.90 +        l508, l509, l510, l511;
   61.91 +
   61.92 +        long ll0, ll1, ll2, ll3, ll4, ll5, ll6, ll7, ll8, ll9, ll10, ll11, ll12,
   61.93 +        ll13, ll14, ll15, ll16, ll17, ll18, ll19, ll20, ll21, ll22, ll23, ll24,
   61.94 +        ll25, ll26, ll27, ll28, ll29, ll30, ll31, ll32, ll33, ll34, ll35, ll36,
   61.95 +        ll37, ll38, ll39, ll40, ll41, ll42, ll43, ll44, ll45, ll46, ll47, ll48,
   61.96 +        ll49, ll50, ll51, ll52, ll53, ll54, ll55, ll56, ll57, ll58, ll59, ll60,
   61.97 +        ll61, ll62, ll63, ll64, ll65, ll66, ll67, ll68, ll69, ll70, ll71, ll72,
   61.98 +        ll73, ll74, ll75, ll76, ll77, ll78, ll79, ll80, ll81, ll82, ll83, ll84,
   61.99 +        ll85, ll86, ll87, ll88, ll89, ll90, ll91, ll92, ll93, ll94, ll95, ll96,
  61.100 +        ll97, ll98, ll99, ll100, ll101, ll102, ll103, ll104, ll105, ll106, ll107,
  61.101 +        ll108, ll109, ll110, ll111, ll112, ll113, ll114, ll115, ll116, ll117,
  61.102 +        ll118, ll119, ll120, ll121, ll122, ll123, ll124, ll125, ll126, ll127,
  61.103 +        ll128, ll129, ll130, ll131, ll132, ll133, ll134, ll135, ll136, ll137,
  61.104 +        ll138, ll139, ll140, ll141, ll142, ll143, ll144, ll145, ll146, ll147,
  61.105 +        ll148, ll149, ll150, ll151, ll152, ll153, ll154, ll155, ll156, ll157,
  61.106 +        ll158, ll159, ll160, ll161, ll162, ll163, ll164, ll165, ll166, ll167,
  61.107 +        ll168, ll169, ll170, ll171, ll172, ll173, ll174, ll175, ll176, ll177,
  61.108 +        ll178, ll179, ll180, ll181, ll182, ll183, ll184, ll185, ll186, ll187,
  61.109 +        ll188, ll189, ll190, ll191, ll192, ll193, ll194, ll195, ll196, ll197,
  61.110 +        ll198, ll199, ll200, ll201, ll202, ll203, ll204, ll205, ll206, ll207,
  61.111 +        ll208, ll209, ll210, ll211, ll212, ll213, ll214, ll215, ll216, ll217,
  61.112 +        ll218, ll219, ll220, ll221, ll222, ll223, ll224, ll225, ll226, ll227,
  61.113 +        ll228, ll229, ll230, ll231, ll232, ll233, ll234, ll235, ll236, ll237,
  61.114 +        ll238, ll239, ll240, ll241, ll242, ll243, ll244, ll245, ll246, ll247,
  61.115 +        ll248, ll249, ll250, ll251, ll252, ll253, ll254, ll255, ll256, ll257,
  61.116 +        ll258, ll259, ll260, ll261, ll262, ll263, ll264, ll265, ll266, ll267,
  61.117 +        ll268, ll269, ll270, ll271, ll272, ll273, ll274, ll275, ll276, ll277,
  61.118 +        ll278, ll279, ll280, ll281, ll282, ll283, ll284, ll285, ll286, ll287,
  61.119 +        ll288, ll289, ll290, ll291, ll292, ll293, ll294, ll295, ll296, ll297,
  61.120 +        ll298, ll299, ll300, ll301, ll302, ll303, ll304, ll305, ll306, ll307,
  61.121 +        ll308, ll309, ll310, ll311, ll312, ll313, ll314, ll315, ll316, ll317,
  61.122 +        ll318, ll319, ll320, ll321, ll322, ll323, ll324, ll325, ll326, ll327,
  61.123 +        ll328, ll329, ll330, ll331, ll332, ll333, ll334, ll335, ll336, ll337,
  61.124 +        ll338, ll339, ll340, ll341, ll342, ll343, ll344, ll345, ll346, ll347,
  61.125 +        ll348, ll349, ll350, ll351, ll352, ll353, ll354, ll355, ll356, ll357,
  61.126 +        ll358, ll359, ll360, ll361, ll362, ll363, ll364, ll365, ll366, ll367,
  61.127 +        ll368, ll369, ll370, ll371, ll372, ll373, ll374, ll375, ll376, ll377,
  61.128 +        ll378, ll379, ll380, ll381, ll382, ll383, ll384, ll385, ll386, ll387,
  61.129 +        ll388, ll389, ll390, ll391, ll392, ll393, ll394, ll395, ll396, ll397,
  61.130 +        ll398, ll399, ll400, ll401, ll402, ll403, ll404, ll405, ll406, ll407,
  61.131 +        ll408, ll409, ll410, ll411, ll412, ll413, ll414, ll415, ll416, ll417,
  61.132 +        ll418, ll419, ll420, ll421, ll422, ll423, ll424, ll425, ll426, ll427,
  61.133 +        ll428, ll429, ll430, ll431, ll432, ll433, ll434, ll435, ll436, ll437,
  61.134 +        ll438, ll439, ll440, ll441, ll442, ll443, ll444, ll445, ll446, ll447,
  61.135 +        ll448, ll449, ll450, ll451, ll452, ll453, ll454, ll455, ll456, ll457,
  61.136 +        ll458, ll459, ll460, ll461, ll462, ll463, ll464, ll465, ll466, ll467,
  61.137 +        ll468, ll469, ll470, ll471, ll472, ll473, ll474, ll475, ll476, ll477,
  61.138 +        ll478, ll479, ll480, ll481, ll482, ll483, ll484, ll485, ll486, ll487,
  61.139 +        ll488, ll489, ll490, ll491, ll492, ll493, ll494, ll495, ll496, ll497,
  61.140 +        ll498, ll499, ll500, ll501, ll502, ll503, ll504, ll505, ll506, ll507,
  61.141 +        ll508, ll509, ll510, ll511;
  61.142 +
  61.143 +        if (deopt) {
  61.144 +            method_entered = true;
  61.145 +            synchronized(monitor) {
  61.146 +                do_monitor_acquire = true;
  61.147 +                UnloadedClass1 res = new UnloadedClass1(); // forces deopt with c2
  61.148 +                res.field = 0; //forced deopt with c1
  61.149 +                return res;
  61.150 +            }
  61.151 +        }
  61.152 +        return null;
  61.153 +    }
  61.154 +
  61.155 +    static boolean m2(boolean deopt) {
  61.156 +        long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
  61.157 +        l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24,
  61.158 +        l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36,
  61.159 +        l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48,
  61.160 +        l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60,
  61.161 +        l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72,
  61.162 +        l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84,
  61.163 +        l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96,
  61.164 +        l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107,
  61.165 +        l108, l109, l110, l111, l112, l113, l114, l115, l116, l117,
  61.166 +        l118, l119, l120, l121, l122, l123, l124, l125, l126, l127,
  61.167 +        l128, l129, l130, l131, l132, l133, l134, l135, l136, l137,
  61.168 +        l138, l139, l140, l141, l142, l143, l144, l145, l146, l147,
  61.169 +        l148, l149, l150, l151, l152, l153, l154, l155, l156, l157,
  61.170 +        l158, l159, l160, l161, l162, l163, l164, l165, l166, l167,
  61.171 +        l168, l169, l170, l171, l172, l173, l174, l175, l176, l177,
  61.172 +        l178, l179, l180, l181, l182, l183, l184, l185, l186, l187,
  61.173 +        l188, l189, l190, l191, l192, l193, l194, l195, l196, l197,
  61.174 +        l198, l199, l200, l201, l202, l203, l204, l205, l206, l207,
  61.175 +        l208, l209, l210, l211, l212, l213, l214, l215, l216, l217,
  61.176 +        l218, l219, l220, l221, l222, l223, l224, l225, l226, l227,
  61.177 +        l228, l229, l230, l231, l232, l233, l234, l235, l236, l237,
  61.178 +        l238, l239, l240, l241, l242, l243, l244, l245, l246, l247,
  61.179 +        l248, l249, l250, l251, l252, l253, l254, l255, l256, l257,
  61.180 +        l258, l259, l260, l261, l262, l263, l264, l265, l266, l267,
  61.181 +        l268, l269, l270, l271, l272, l273, l274, l275, l276, l277,
  61.182 +        l278, l279, l280, l281, l282, l283, l284, l285, l286, l287,
  61.183 +        l288, l289, l290, l291, l292, l293, l294, l295, l296, l297,
  61.184 +        l298, l299, l300, l301, l302, l303, l304, l305, l306, l307,
  61.185 +        l308, l309, l310, l311, l312, l313, l314, l315, l316, l317,
  61.186 +        l318, l319, l320, l321, l322, l323, l324, l325, l326, l327,
  61.187 +        l328, l329, l330, l331, l332, l333, l334, l335, l336, l337,
  61.188 +        l338, l339, l340, l341, l342, l343, l344, l345, l346, l347,
  61.189 +        l348, l349, l350, l351, l352, l353, l354, l355, l356, l357,
  61.190 +        l358, l359, l360, l361, l362, l363, l364, l365, l366, l367,
  61.191 +        l368, l369, l370, l371, l372, l373, l374, l375, l376, l377,
  61.192 +        l378, l379, l380, l381, l382, l383, l384, l385, l386, l387,
  61.193 +        l388, l389, l390, l391, l392, l393, l394, l395, l396, l397,
  61.194 +        l398, l399, l400, l401, l402, l403, l404, l405, l406, l407,
  61.195 +        l408, l409, l410, l411, l412, l413, l414, l415, l416, l417,
  61.196 +        l418, l419, l420, l421, l422, l423, l424, l425, l426, l427,
  61.197 +        l428, l429, l430, l431, l432, l433, l434, l435, l436, l437,
  61.198 +        l438, l439, l440, l441, l442, l443, l444, l445, l446, l447,
  61.199 +        l448, l449, l450, l451, l452, l453, l454, l455, l456, l457,
  61.200 +        l458, l459, l460, l461, l462, l463, l464, l465, l466, l467,
  61.201 +        l468, l469, l470, l471, l472, l473, l474, l475, l476, l477,
  61.202 +        l478, l479, l480, l481, l482, l483, l484, l485, l486, l487,
  61.203 +        l488, l489, l490, l491, l492, l493, l494, l495, l496, l497,
  61.204 +        l498, l499, l500, l501, l502, l503, l504, l505, l506, l507,
  61.205 +        l508, l509, l510, l511;
  61.206 +
  61.207 +        boolean do_m3 = false;
  61.208 +        try {
  61.209 +            do_m3 = m2(deopt);
  61.210 +        } catch (StackOverflowError e) {
  61.211 +            return true;
  61.212 +        }
  61.213 +        if (do_m3) {
  61.214 +            try {
  61.215 +                m1(deopt);
  61.216 +            } catch (StackOverflowError e) {}
  61.217 +        }
  61.218 +        return false;
  61.219 +    }
  61.220 +
  61.221 +    // Used for synchronization betwen threads
  61.222 +    static volatile boolean thread_started = false;
  61.223 +    static volatile boolean do_monitor_acquire = false;
  61.224 +    static volatile boolean monitor_acquired = false;
  61.225 +    static volatile boolean method_entered = false;
  61.226 +
  61.227 +    static Object monitor = new Object();
  61.228 +
  61.229 +    static public void main(String[] args) {
  61.230 +        // get m1 compiled
  61.231 +        for (int i = 0; i < 20000; i++) {
  61.232 +            m1(false);
  61.233 +        }
  61.234 +
  61.235 +        Thread thread = new Thread() {
  61.236 +            public void run() {
  61.237 +                thread_started = true;
  61.238 +                while(!do_monitor_acquire);
  61.239 +                System.out.println("Ok to try to acquire the lock");
  61.240 +                synchronized(monitor) {
  61.241 +                    monitor_acquired = true;
  61.242 +                }
  61.243 +            }
  61.244 +        };
  61.245 +
  61.246 +        thread.setDaemon(true);
  61.247 +        thread.start();
  61.248 +
  61.249 +        while(!thread_started);
  61.250 +
  61.251 +        m2(true);
  61.252 +
  61.253 +        if (!method_entered) {
  61.254 +            System.out.println("TEST PASSED");
  61.255 +            return;
  61.256 +        }
  61.257 +
  61.258 +        for (int i = 0; i < 10; i++) {
  61.259 +            System.out.println("Is lock acquired?");
  61.260 +            if (monitor_acquired) {
  61.261 +                System.out.println("TEST PASSED");
  61.262 +                return;
  61.263 +            }
  61.264 +            try {
  61.265 +                Thread.sleep(10000);
  61.266 +            } catch(InterruptedException ie) {
  61.267 +            }
  61.268 +        }
  61.269 +        System.out.println("TEST FAILED");
  61.270 +    }
  61.271 +}
    62.1 --- a/test/compiler/whitebox/IsMethodCompilableTest.java	Tue Jun 17 15:49:31 2014 -0700
    62.2 +++ b/test/compiler/whitebox/IsMethodCompilableTest.java	Tue Jun 17 22:03:39 2014 -0700
    62.3 @@ -28,7 +28,7 @@
    62.4   * @build IsMethodCompilableTest
    62.5   * @run main ClassFileInstaller sun.hotspot.WhiteBox
    62.6   * @run main ClassFileInstaller com.oracle.java.testlibrary.Platform
    62.7 - * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
    62.8 + * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
    62.9   * @summary testing of WB::isMethodCompilable()
   62.10   * @author igor.ignatyev@oracle.com
   62.11   */
    63.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    63.2 +++ b/test/gc/metaspace/TestMetaspaceInitialization.java	Tue Jun 17 22:03:39 2014 -0700
    63.3 @@ -0,0 +1,48 @@
    63.4 +/*
    63.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    63.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    63.7 + *
    63.8 + * This code is free software; you can redistribute it and/or modify it
    63.9 + * under the terms of the GNU General Public License version 2 only, as
   63.10 + * published by the Free Software Foundation.
   63.11 + *
   63.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   63.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   63.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   63.15 + * version 2 for more details (a copy is included in the LICENSE file that
   63.16 + * accompanied this code).
   63.17 + *
   63.18 + * You should have received a copy of the GNU General Public License version
   63.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   63.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   63.21 + *
   63.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   63.23 + * or visit www.oracle.com if you need additional information or have any
   63.24 + * questions.
   63.25 + */
   63.26 +
   63.27 +import java.util.ArrayList;
   63.28 +
   63.29 +/* @test TestMetaspaceInitialization
   63.30 + * @bug 8042933
   63.31 + * @summary Tests to initialize metaspace with a very low MetaspaceSize
   63.32 + * @library /testlibrary
   63.33 + * @run main/othervm -XX:MetaspaceSize=2m TestMetaspaceInitialization
   63.34 + */
   63.35 +public class TestMetaspaceInitialization {
   63.36 +    private class Internal {
   63.37 +        public int x;
   63.38 +        public Internal(int x) {
   63.39 +            this.x = x;
   63.40 +        }
   63.41 +    }
   63.42 +
   63.43 +    private void test() {
   63.44 +        ArrayList<Internal> l = new ArrayList<>();
   63.45 +        l.add(new Internal(17));
   63.46 +    }
   63.47 +
   63.48 +    public static void main(String[] args) {
   63.49 +        new TestMetaspaceInitialization().test();
   63.50 +    }
   63.51 +}
    64.1 --- a/test/runtime/Thread/TestThreadDumpMonitorContention.java	Tue Jun 17 15:49:31 2014 -0700
    64.2 +++ b/test/runtime/Thread/TestThreadDumpMonitorContention.java	Tue Jun 17 22:03:39 2014 -0700
    64.3 @@ -24,6 +24,7 @@
    64.4  /*
    64.5   * @test
    64.6   * @bug     8036823
    64.7 + * @bug     8046287
    64.8   * @summary Creates two threads contending for the same lock and checks
    64.9   *      whether jstack reports "locked" by more than one thread.
   64.10   *
   64.11 @@ -52,10 +53,13 @@
   64.12      // looking for header lines with these patterns:
   64.13      // "ContendingThread-1" #19 prio=5 os_prio=64 tid=0x000000000079c000 nid=0x23 runnable [0xffff80ffb8b87000]
   64.14      // "ContendingThread-2" #21 prio=5 os_prio=64 tid=0x0000000000780000 nid=0x2f waiting for monitor entry [0xfffffd7fc1111000]
   64.15 +    // "ContendingThread-2" #24 prio=5 os_prio=64 tid=0x0000000000ec8800 nid=0x31 waiting on condition [0xfffffd7bbfffe000]
   64.16      final static Pattern HEADER_PREFIX_PATTERN = Pattern.compile(
   64.17          "^\"ContendingThread-.*");
   64.18 -    final static Pattern HEADER_WAITING_PATTERN = Pattern.compile(
   64.19 +    final static Pattern HEADER_WAITING_PATTERN1 = Pattern.compile(
   64.20          "^\"ContendingThread-.* waiting for monitor entry .*");
   64.21 +    final static Pattern HEADER_WAITING_PATTERN2 = Pattern.compile(
   64.22 +        "^\"ContendingThread-.* waiting on condition .*");
   64.23      final static Pattern HEADER_RUNNABLE_PATTERN = Pattern.compile(
   64.24          "^\"ContendingThread-.* runnable .*");
   64.25  
   64.26 @@ -80,17 +84,34 @@
   64.27      final static Pattern WAITING_PATTERN = Pattern.compile(
   64.28          ".* waiting to lock \\<.*\\(a TestThreadDumpMonitorContention.*");
   64.29  
   64.30 +    final static Object barrier = new Object();
   64.31      volatile static boolean done = false;
   64.32  
   64.33 +    static int barrier_cnt = 0;
   64.34 +    static int blank_line_match_cnt = 0;
   64.35      static int error_cnt = 0;
   64.36 -    static String header_line = null;
   64.37      static boolean have_header_line = false;
   64.38      static boolean have_thread_state_line = false;
   64.39 -    static int match_cnt = 0;
   64.40 -    static String[] match_list = new String[2];
   64.41 +    static String header_line = null;
   64.42 +    static int header_prefix_match_cnt = 0;
   64.43 +    static int locked_line_match_cnt = 0;
   64.44 +    static String[] locked_match_list = new String[2];
   64.45      static int n_samples = 15;
   64.46 +    static int sum_both_running_cnt = 0;
   64.47 +    static int sum_both_waiting_cnt = 0;
   64.48 +    static int sum_contended_cnt = 0;
   64.49 +    static int sum_locked_hdr_runnable_cnt = 0;
   64.50 +    static int sum_locked_hdr_waiting1_cnt = 0;
   64.51 +    static int sum_locked_hdr_waiting2_cnt = 0;
   64.52 +    static int sum_locked_thr_state_blocked_cnt = 0;
   64.53 +    static int sum_locked_thr_state_runnable_cnt = 0;
   64.54 +    static int sum_one_waiting_cnt = 0;
   64.55 +    static int sum_uncontended_cnt = 0;
   64.56 +    static int sum_waiting_hdr_waiting1_cnt = 0;
   64.57 +    static int sum_waiting_thr_state_blocked_cnt = 0;
   64.58      static String thread_state_line = null;
   64.59      static boolean verbose = false;
   64.60 +    static int waiting_line_match_cnt = 0;
   64.61  
   64.62      public static void main(String[] args) throws Exception {
   64.63          if (args.length != 0) {
   64.64 @@ -110,6 +131,11 @@
   64.65  
   64.66          Runnable runnable = new Runnable() {
   64.67              public void run() {
   64.68 +                synchronized (barrier) {
   64.69 +                    // let the main thread know we're running
   64.70 +                    barrier_cnt++;
   64.71 +                    barrier.notify();
   64.72 +                }
   64.73                  while (!done) {
   64.74                      synchronized (this) { }
   64.75                  }
   64.76 @@ -118,8 +144,16 @@
   64.77          Thread[] thread_list = new Thread[2];
   64.78          thread_list[0] = new Thread(runnable, "ContendingThread-1");
   64.79          thread_list[1] = new Thread(runnable, "ContendingThread-2");
   64.80 -        thread_list[0].start();
   64.81 -        thread_list[1].start();
   64.82 +        synchronized (barrier) {
   64.83 +            thread_list[0].start();
   64.84 +            thread_list[1].start();
   64.85 +
   64.86 +            // Wait until the contending threads are running so that
   64.87 +            // we don't sample any thread init states.
   64.88 +            while (barrier_cnt < 2) {
   64.89 +                barrier.wait();
   64.90 +            }
   64.91 +        }
   64.92  
   64.93          doSamples();
   64.94  
   64.95 @@ -143,11 +177,12 @@
   64.96      // Example:
   64.97      // "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f runnable [0xfffffd7fc1111000]
   64.98      //    java.lang.Thread.State: RUNNABLE
   64.99 -    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:67)
  64.100 +    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
  64.101      //         at java.lang.Thread.run(Thread.java:745)
  64.102      //
  64.103      static boolean checkBlankLine(String line) {
  64.104          if (line.length() == 0) {
  64.105 +            blank_line_match_cnt++;
  64.106              have_header_line = false;
  64.107              have_thread_state_line = false;
  64.108              return true;
  64.109 @@ -161,49 +196,73 @@
  64.110      // Example 1:
  64.111      // "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f runnable [0xfffffd7fc1111000]
  64.112      //    java.lang.Thread.State: RUNNABLE
  64.113 -    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:67)
  64.114 +    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
  64.115      //         - locked <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1)
  64.116      //         at java.lang.Thread.run(Thread.java:745)
  64.117      //
  64.118      // Example 2:
  64.119      // "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f waiting for monitor entry [0xfffffd7fc1111000]
  64.120      //    java.lang.Thread.State: BLOCKED (on object monitor)
  64.121 -    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:67)
  64.122 +    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
  64.123      //         - locked <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1)
  64.124      //         at java.lang.Thread.run(Thread.java:745)
  64.125      //
  64.126 +    // Example 3:
  64.127 +    // "ContendingThread-2" #24 prio=5 os_prio=64 tid=0x0000000000ec8800 nid=0x31 waiting on condition [0xfffffd7bbfffe000]
  64.128 +    //    java.lang.Thread.State: RUNNABLE
  64.129 +    //    JavaThread state: _thread_blocked
  64.130 +    // Thread: 0x0000000000ec8800  [0x31] State: _at_safepoint _has_called_back 0 _at_poll_safepoint 0
  64.131 +    //    JavaThread state: _thread_blocked
  64.132 +    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
  64.133 +    //         - locked <0xfffffd7e6d03eb28> (a TestThreadDumpMonitorContention$1)
  64.134 +    //         at java.lang.Thread.run(Thread.java:745)
  64.135 +    //
  64.136      static boolean checkLockedLine(String line) {
  64.137          Matcher matcher = LOCK_PATTERN.matcher(line);
  64.138          if (matcher.matches()) {
  64.139              if (verbose) {
  64.140                  System.out.println("locked_line='" + line + "'");
  64.141              }
  64.142 -            match_list[match_cnt] = new String(line);
  64.143 -            match_cnt++;
  64.144 +            locked_match_list[locked_line_match_cnt] = new String(line);
  64.145 +            locked_line_match_cnt++;
  64.146  
  64.147              matcher = HEADER_RUNNABLE_PATTERN.matcher(header_line);
  64.148 -            if (!matcher.matches()) {
  64.149 +            if (matcher.matches()) {
  64.150 +                sum_locked_hdr_runnable_cnt++;
  64.151 +            } else {
  64.152                  // It's strange, but a locked line can also
  64.153 -                // match the HEADER_WAITING_PATTERN.
  64.154 -                matcher = HEADER_WAITING_PATTERN.matcher(header_line);
  64.155 -                if (!matcher.matches()) {
  64.156 -                    System.err.println();
  64.157 -                    System.err.println("ERROR: header line does " +
  64.158 -                        "not match runnable or waiting patterns.");
  64.159 -                    System.err.println("ERROR: header_line='" +
  64.160 -                        header_line + "'");
  64.161 -                    System.err.println("ERROR: locked_line='" + line + "'");
  64.162 -                    error_cnt++;
  64.163 +                // match the HEADER_WAITING_PATTERN{1,2}.
  64.164 +                matcher = HEADER_WAITING_PATTERN1.matcher(header_line);
  64.165 +                if (matcher.matches()) {
  64.166 +                    sum_locked_hdr_waiting1_cnt++;
  64.167 +                } else {
  64.168 +                    matcher = HEADER_WAITING_PATTERN2.matcher(header_line);
  64.169 +                    if (matcher.matches()) {
  64.170 +                        sum_locked_hdr_waiting2_cnt++;
  64.171 +                    } else {
  64.172 +                        System.err.println();
  64.173 +                        System.err.println("ERROR: header line does " +
  64.174 +                            "not match runnable or waiting patterns.");
  64.175 +                        System.err.println("ERROR: header_line='" +
  64.176 +                            header_line + "'");
  64.177 +                        System.err.println("ERROR: locked_line='" + line +
  64.178 +                            "'");
  64.179 +                        error_cnt++;
  64.180 +                    }
  64.181                  }
  64.182              }
  64.183  
  64.184              matcher = THREAD_STATE_RUNNABLE_PATTERN.matcher(thread_state_line);
  64.185 -            if (!matcher.matches()) {
  64.186 +            if (matcher.matches()) {
  64.187 +                sum_locked_thr_state_runnable_cnt++;
  64.188 +            } else {
  64.189                  // It's strange, but a locked line can also
  64.190                  // match the THREAD_STATE_BLOCKED_PATTERN.
  64.191                  matcher = THREAD_STATE_BLOCKED_PATTERN.matcher(
  64.192                                thread_state_line);
  64.193 -                if (!matcher.matches()) {
  64.194 +                if (matcher.matches()) {
  64.195 +                    sum_locked_thr_state_blocked_cnt++;
  64.196 +                } else {
  64.197                      System.err.println();
  64.198                      System.err.println("ERROR: thread state line does not " +
  64.199                          "match runnable or waiting patterns.");
  64.200 @@ -229,19 +288,22 @@
  64.201      // Example:
  64.202      // "ContendingThread-2" #22 prio=5 os_prio=64 tid=0x00000000007b9800 nid=0x30 waiting for monitor entry [0xfffffd7fc1010000]
  64.203      //    java.lang.Thread.State: BLOCKED (on object monitor)
  64.204 -    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:67)
  64.205 +    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
  64.206      //         - waiting to lock <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1)
  64.207      //         at java.lang.Thread.run(Thread.java:745)
  64.208      //
  64.209      static boolean checkWaitingLine(String line) {
  64.210          Matcher matcher = WAITING_PATTERN.matcher(line);
  64.211          if (matcher.matches()) {
  64.212 +            waiting_line_match_cnt++;
  64.213              if (verbose) {
  64.214                  System.out.println("waiting_line='" + line + "'");
  64.215              }
  64.216  
  64.217 -            matcher = HEADER_WAITING_PATTERN.matcher(header_line);
  64.218 -            if (!matcher.matches()) {
  64.219 +            matcher = HEADER_WAITING_PATTERN1.matcher(header_line);
  64.220 +            if (matcher.matches()) {
  64.221 +                sum_waiting_hdr_waiting1_cnt++;
  64.222 +            } else {
  64.223                  System.err.println();
  64.224                  System.err.println("ERROR: header line does " +
  64.225                      "not match a waiting pattern.");
  64.226 @@ -251,7 +313,9 @@
  64.227              }
  64.228  
  64.229              matcher = THREAD_STATE_BLOCKED_PATTERN.matcher(thread_state_line);
  64.230 -            if (!matcher.matches()) {
  64.231 +            if (matcher.matches()) {
  64.232 +                sum_waiting_thr_state_blocked_cnt++;
  64.233 +            } else {
  64.234                  System.err.println();
  64.235                  System.err.println("ERROR: thread state line " +
  64.236                      "does not match a waiting pattern.");
  64.237 @@ -273,7 +337,10 @@
  64.238  
  64.239      static void doSamples() throws Exception {
  64.240          for (int count = 0; count < n_samples; count++) {
  64.241 -            match_cnt = 0;
  64.242 +            blank_line_match_cnt = 0;
  64.243 +            header_prefix_match_cnt = 0;
  64.244 +            locked_line_match_cnt = 0;
  64.245 +            waiting_line_match_cnt = 0;
  64.246              // verbose mode or an error has a lot of output so add more space
  64.247              if (verbose || error_cnt > 0) System.out.println();
  64.248              System.out.println("Sample #" + count);
  64.249 @@ -290,12 +357,12 @@
  64.250              //   a failure and we report it
  64.251              // - for a stack trace that matches LOCKED_PATTERN, we verify:
  64.252              //   - the header line matches HEADER_RUNNABLE_PATTERN
  64.253 -            //     or HEADER_WAITING_PATTERN
  64.254 +            //     or HEADER_WAITING_PATTERN{1,2}
  64.255              //   - the thread state line matches THREAD_STATE_BLOCKED_PATTERN
  64.256              //     or THREAD_STATE_RUNNABLE_PATTERN
  64.257              //   - we report any mismatches as failures
  64.258              // - for a stack trace that matches WAITING_PATTERN, we verify:
  64.259 -            //   - the header line matches HEADER_WAITING_PATTERN
  64.260 +            //   - the header line matches HEADER_WAITING_PATTERN1
  64.261              //   - the thread state line matches THREAD_STATE_BLOCKED_PATTERN
  64.262              //   - we report any mismatches as failures
  64.263              // - the stack traces that match HEADER_PREFIX_PATTERN may
  64.264 @@ -324,6 +391,7 @@
  64.265                  if (!have_header_line) {
  64.266                      matcher = HEADER_PREFIX_PATTERN.matcher(line);
  64.267                      if (matcher.matches()) {
  64.268 +                        header_prefix_match_cnt++;
  64.269                          if (verbose) {
  64.270                              System.out.println();
  64.271                              System.out.println("header='" + line + "'");
  64.272 @@ -366,19 +434,80 @@
  64.273              }
  64.274              process.waitFor();
  64.275  
  64.276 -           if (match_cnt == 2) {
  64.277 -               if (match_list[0].equals(match_list[1])) {
  64.278 -                   System.err.println();
  64.279 -                   System.err.println("ERROR: matching lock lines:");
  64.280 -                   System.err.println("ERROR: line[0]'" + match_list[0] + "'");
  64.281 -                   System.err.println("ERROR: line[1]'" + match_list[1] + "'");
  64.282 -                   error_cnt++;
  64.283 -               }
  64.284 -           }
  64.285 +            if (header_prefix_match_cnt != 2) {
  64.286 +                System.err.println();
  64.287 +                System.err.println("ERROR: should match exactly two headers.");
  64.288 +                System.err.println("ERROR: header_prefix_match_cnt=" +
  64.289 +                    header_prefix_match_cnt);
  64.290 +                error_cnt++;
  64.291 +            }
  64.292 +
  64.293 +            if (locked_line_match_cnt == 2) {
  64.294 +                if (locked_match_list[0].equals(locked_match_list[1])) {
  64.295 +                    System.err.println();
  64.296 +                    System.err.println("ERROR: matching lock lines:");
  64.297 +                    System.err.println("ERROR: line[0]'" +
  64.298 +                        locked_match_list[0] + "'");
  64.299 +                    System.err.println("ERROR: line[1]'" +
  64.300 +                        locked_match_list[1] + "'");
  64.301 +                    error_cnt++;
  64.302 +                }
  64.303 +            }
  64.304 +
  64.305 +            if (locked_line_match_cnt == 1) {
  64.306 +                // one thread has the lock
  64.307 +                if (waiting_line_match_cnt == 1) {
  64.308 +                    // and the other contended for it
  64.309 +                    sum_contended_cnt++;
  64.310 +                } else {
  64.311 +                    // and the other is just running
  64.312 +                    sum_uncontended_cnt++;
  64.313 +                }
  64.314 +            } else if (waiting_line_match_cnt == 1) {
  64.315 +                // one thread is waiting
  64.316 +                sum_one_waiting_cnt++;
  64.317 +            } else if (waiting_line_match_cnt == 2) {
  64.318 +                // both threads are waiting
  64.319 +                sum_both_waiting_cnt++;
  64.320 +            } else {
  64.321 +                // both threads are running
  64.322 +                sum_both_running_cnt++;
  64.323 +            }
  64.324  
  64.325              // slight delay between jstack launches
  64.326              Thread.sleep(500);
  64.327          }
  64.328 +
  64.329 +        if (error_cnt != 0) {
  64.330 +            // skip summary info since there were errors
  64.331 +            return;
  64.332 +        }
  64.333 +
  64.334 +        System.out.println("INFO: Summary for all samples:");
  64.335 +        System.out.println("INFO: both_running_cnt=" + sum_both_running_cnt);
  64.336 +        System.out.println("INFO: both_waiting_cnt=" + sum_both_waiting_cnt);
  64.337 +        System.out.println("INFO: contended_cnt=" + sum_contended_cnt);
  64.338 +        System.out.println("INFO: one_waiting_cnt=" + sum_one_waiting_cnt);
  64.339 +        System.out.println("INFO: uncontended_cnt=" + sum_uncontended_cnt);
  64.340 +        System.out.println("INFO: locked_hdr_runnable_cnt=" +
  64.341 +            sum_locked_hdr_runnable_cnt);
  64.342 +        System.out.println("INFO: locked_hdr_waiting1_cnt=" +
  64.343 +            sum_locked_hdr_waiting1_cnt);
  64.344 +        System.out.println("INFO: locked_hdr_waiting2_cnt=" +
  64.345 +            sum_locked_hdr_waiting2_cnt);
  64.346 +        System.out.println("INFO: locked_thr_state_blocked_cnt=" +
  64.347 +            sum_locked_thr_state_blocked_cnt);
  64.348 +        System.out.println("INFO: locked_thr_state_runnable_cnt=" +
  64.349 +            sum_locked_thr_state_runnable_cnt);
  64.350 +        System.out.println("INFO: waiting_hdr_waiting1_cnt=" +
  64.351 +            sum_waiting_hdr_waiting1_cnt);
  64.352 +        System.out.println("INFO: waiting_thr_state_blocked_cnt=" +
  64.353 +            sum_waiting_thr_state_blocked_cnt);
  64.354 +
  64.355 +        if (sum_contended_cnt == 0) {
  64.356 +            System.err.println("WARNING: the primary scenario for 8036823" +
  64.357 +                " has not been exercised by this test run.");
  64.358 +        }
  64.359      }
  64.360  
  64.361      // This helper relies on RuntimeMXBean.getName() returning a string
    65.1 --- a/test/serviceability/ParserTest.java	Tue Jun 17 15:49:31 2014 -0700
    65.2 +++ b/test/serviceability/ParserTest.java	Tue Jun 17 22:03:39 2014 -0700
    65.3 @@ -22,10 +22,10 @@
    65.4   */
    65.5  
    65.6  /*
    65.7 - * @test ParserTest
    65.8 + * @test
    65.9   * @summary Test that the diagnostic command arguemnt parser works
   65.10   * @library /testlibrary /testlibrary/whitebox
   65.11 - * @build ParserTest
   65.12 + * @build ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.parser.*
   65.13   * @run main ClassFileInstaller sun.hotspot.WhiteBox
   65.14   * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ParserTest
   65.15   */
    66.1 --- a/test/serviceability/attach/AttachWithStalePidFile.java	Tue Jun 17 15:49:31 2014 -0700
    66.2 +++ b/test/serviceability/attach/AttachWithStalePidFile.java	Tue Jun 17 22:03:39 2014 -0700
    66.3 @@ -27,7 +27,7 @@
    66.4   * @key regression
    66.5   * @summary Regression test for attach issue where stale pid files in /tmp lead to connection issues
    66.6   * @library /testlibrary
    66.7 - * @compile AttachWithStalePidFileTarget.java
    66.8 + * @build com.oracle.java.testlibrary.* AttachWithStalePidFileTarget
    66.9   * @run main AttachWithStalePidFile
   66.10   */
   66.11  
    67.1 --- a/test/serviceability/jvmti/GetObjectSizeOverflow.java	Tue Jun 17 15:49:31 2014 -0700
    67.2 +++ b/test/serviceability/jvmti/GetObjectSizeOverflow.java	Tue Jun 17 22:03:39 2014 -0700
    67.3 @@ -29,7 +29,7 @@
    67.4   * @test
    67.5   * @bug 8027230
    67.6   * @library /testlibrary
    67.7 - * @build GetObjectSizeOverflowAgent
    67.8 + * @build ClassFileInstaller com.oracle.java.testlibrary.* GetObjectSizeOverflowAgent
    67.9   * @run main ClassFileInstaller GetObjectSizeOverflowAgent
   67.10   * @run main GetObjectSizeOverflow
   67.11   */
    68.1 --- a/test/serviceability/jvmti/TestRedefineWithUnresolvedClass.java	Tue Jun 17 15:49:31 2014 -0700
    68.2 +++ b/test/serviceability/jvmti/TestRedefineWithUnresolvedClass.java	Tue Jun 17 22:03:39 2014 -0700
    68.3 @@ -26,7 +26,7 @@
    68.4   * @summary Redefine a class with an UnresolvedClass reference in the constant pool.
    68.5   * @bug 8035150
    68.6   * @library /testlibrary
    68.7 - * @build UnresolvedClassAgent com.oracle.java.testlibrary.ProcessTools com.oracle.java.testlibrary.OutputAnalyzer
    68.8 + * @build com.oracle.java.testlibrary.* UnresolvedClassAgent
    68.9   * @run main TestRedefineWithUnresolvedClass
   68.10   */
   68.11  
    69.1 --- a/test/serviceability/sa/jmap-hashcode/Test8028623.java	Tue Jun 17 15:49:31 2014 -0700
    69.2 +++ b/test/serviceability/sa/jmap-hashcode/Test8028623.java	Tue Jun 17 22:03:39 2014 -0700
    69.3 @@ -26,6 +26,7 @@
    69.4   * @bug 8028623
    69.5   * @summary Test hashing of extended characters in Serviceability Agent.
    69.6   * @library /testlibrary
    69.7 + * @build com.oracle.java.testlibrary.*
    69.8   * @compile -encoding utf8 Test8028623.java
    69.9   * @run main Test8028623
   69.10   */
    70.1 --- a/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java	Tue Jun 17 15:49:31 2014 -0700
    70.2 +++ b/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java	Tue Jun 17 22:03:39 2014 -0700
    70.3 @@ -44,7 +44,7 @@
    70.4   * @key regression
    70.5   * @summary Regression test for hprof export issue due to large heaps (>2G)
    70.6   * @library /testlibrary
    70.7 - * @compile JMapHProfLargeHeapProc.java
    70.8 + * @build com.oracle.java.testlibrary.* JMapHProfLargeHeapProc
    70.9   * @run main JMapHProfLargeHeapTest
   70.10   */
   70.11  
    71.1 --- a/test/testlibrary/ctw/test/ClassesDirTest.java	Tue Jun 17 15:49:31 2014 -0700
    71.2 +++ b/test/testlibrary/ctw/test/ClassesDirTest.java	Tue Jun 17 22:03:39 2014 -0700
    71.3 @@ -22,10 +22,10 @@
    71.4   */
    71.5  
    71.6  /*
    71.7 - * @test ClassesDirTest
    71.8 + * @test
    71.9   * @bug 8012447
   71.10   * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
   71.11 - * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox ClassesDirTest Foo Bar
   71.12 + * @build ClassFileInstaller sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox Foo Bar
   71.13   * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
   71.14   * @run main ClassesDirTest prepare
   71.15   * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld classes
    72.1 --- a/test/testlibrary/ctw/test/ClassesListTest.java	Tue Jun 17 15:49:31 2014 -0700
    72.2 +++ b/test/testlibrary/ctw/test/ClassesListTest.java	Tue Jun 17 22:03:39 2014 -0700
    72.3 @@ -22,10 +22,10 @@
    72.4   */
    72.5  
    72.6  /*
    72.7 - * @test ClassesListTest
    72.8 + * @test
    72.9   * @bug 8012447
   72.10   * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
   72.11 - * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox ClassesListTest Foo Bar
   72.12 + * @build ClassFileInstaller sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox Foo Bar
   72.13   * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
   72.14   * @run main ClassesListTest prepare
   72.15   * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld classes.lst
    73.1 --- a/test/testlibrary/ctw/test/JarDirTest.java	Tue Jun 17 15:49:31 2014 -0700
    73.2 +++ b/test/testlibrary/ctw/test/JarDirTest.java	Tue Jun 17 22:03:39 2014 -0700
    73.3 @@ -22,10 +22,10 @@
    73.4   */
    73.5  
    73.6  /*
    73.7 - * @test JarDirTest
    73.8 + * @test
    73.9   * @bug 8012447
   73.10   * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
   73.11 - * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox JarDirTest Foo Bar
   73.12 + * @build ClassFileInstaller com.oracle.java.testlibrary.* sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox Foo Bar
   73.13   * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
   73.14   * @run main JarDirTest prepare
   73.15   * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld jars/*
    74.1 --- a/test/testlibrary/ctw/test/JarsTest.java	Tue Jun 17 15:49:31 2014 -0700
    74.2 +++ b/test/testlibrary/ctw/test/JarsTest.java	Tue Jun 17 22:03:39 2014 -0700
    74.3 @@ -22,10 +22,10 @@
    74.4   */
    74.5  
    74.6  /*
    74.7 - * @test JarsTest
    74.8 + * @test
    74.9   * @bug 8012447
   74.10   * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
   74.11 - * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox JarsTest Foo Bar
   74.12 + * @build ClassFileInstaller com.oracle.java.testlibrary.* sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox Foo Bar
   74.13   * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
   74.14   * @run main JarsTest prepare
   74.15   * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld foo.jar bar.jar

mercurial