Merge

Tue, 17 Jun 2014 22:15:24 -0700

author
asaha
date
Tue, 17 Jun 2014 22:15:24 -0700
changeset 6843
bba95ce6b634
parent 6842
a2221bbf6812
parent 6794
7ef8ab2bf2b0
child 6844
27348de6239b

Merge

.hgtags file | annotate | diff | comparison | revisions
make/hotspot_version file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Tue Jun 17 16:12:09 2014 -0700
     1.2 +++ b/.hgtags	Tue Jun 17 22:15:24 2014 -0700
     1.3 @@ -486,6 +486,7 @@
     1.4  8ea4732884ccd5586f0afe9478b80add90231455 jdk8u20-b17
     1.5  b685b4e870b159ea5731984199d275879d427038 hs25.20-b18
     1.6  11159d7ec80462a422e39c9b3a39ae932923622d jdk8u20-b18
     1.7 +3e1cec358ab95ef985f821219104141b9ffda83f hs25.20-b19
     1.8  a4d44dfb7d30eea54bc172e4429a655454ae0bbf jdk8u25-b00
     1.9  9a2152fbd929b0d8b2f5c326a5526214ae71731a jdk8u25-b01
    1.10  d3d5604ea0dea3812e87ba76ac199d0a8be6f49f jdk8u25-b02
     2.1 --- a/src/cpu/ppc/vm/cppInterpreter_ppc.cpp	Tue Jun 17 16:12:09 2014 -0700
     2.2 +++ b/src/cpu/ppc/vm/cppInterpreter_ppc.cpp	Tue Jun 17 22:15:24 2014 -0700
     2.3 @@ -1,7 +1,7 @@
     2.4  
     2.5  /*
     2.6 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     2.7 - * Copyright 2012, 2013 SAP AG. All rights reserved.
     2.8 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     2.9 + * Copyright 2012, 2014 SAP AG. All rights reserved.
    2.10   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    2.11   *
    2.12   * This code is free software; you can redistribute it and/or modify it
    2.13 @@ -2948,17 +2948,60 @@
    2.14    istate->_last_Java_fp = last_Java_fp;
    2.15  }
    2.16  
    2.17 -int AbstractInterpreter::layout_activation(Method* method,
    2.18 -                                           int temps,        // Number of slots on java expression stack in use.
    2.19 -                                           int popframe_args,
    2.20 -                                           int monitors,     // Number of active monitors.
    2.21 -                                           int caller_actual_parameters,
    2.22 -                                           int callee_params,// Number of slots for callee parameters.
    2.23 -                                           int callee_locals,// Number of slots for locals.
    2.24 -                                           frame* caller,
    2.25 -                                           frame* interpreter_frame,
    2.26 -                                           bool is_top_frame,
    2.27 -                                           bool is_bottom_frame) {
    2.28 +// Computes monitor_size and top_frame_size in bytes.
    2.29 +static void frame_size_helper(int max_stack,
    2.30 +                              int monitors,
    2.31 +                              int& monitor_size,
    2.32 +                              int& top_frame_size) {
    2.33 +  monitor_size = frame::interpreter_frame_monitor_size_in_bytes() * monitors;
    2.34 +  top_frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
    2.35 +                            + monitor_size
    2.36 +                            + max_stack * Interpreter::stackElementSize
    2.37 +                            + 2 * Interpreter::stackElementSize,
    2.38 +                            frame::alignment_in_bytes)
    2.39 +                   + frame::top_ijava_frame_abi_size;
    2.40 +}
    2.41 +
    2.42 +// Returns number of stackElementWords needed for the interpreter frame with the
    2.43 +// given sections.
    2.44 +int AbstractInterpreter::size_activation(int max_stack,
    2.45 +                                         int temps,
    2.46 +                                         int extra_args,
    2.47 +                                         int monitors,
    2.48 +                                         int callee_params,
    2.49 +                                         int callee_locals,
    2.50 +                                         bool is_top_frame) {
    2.51 +  int monitor_size = 0;
    2.52 +  int top_frame_size = 0;
    2.53 +  frame_size_helper(max_stack, monitors, monitor_size, top_frame_size);
    2.54 +
    2.55 +  int frame_size;
    2.56 +  if (is_top_frame) {
    2.57 +    frame_size = top_frame_size;
    2.58 +  } else {
    2.59 +    frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
    2.60 +                          + monitor_size
    2.61 +                          + (temps - callee_params + callee_locals) * Interpreter::stackElementSize
    2.62 +                          + 2 * Interpreter::stackElementSize,
    2.63 +                          frame::alignment_in_bytes)
    2.64 +                 + frame::parent_ijava_frame_abi_size;
    2.65 +    assert(extra_args == 0, "non-zero for top_frame only");
    2.66 +  }
    2.67 +
    2.68 +  return frame_size / Interpreter::stackElementSize;
    2.69 +}
    2.70 +
    2.71 +void AbstractInterpreter::layout_activation(Method* method,
    2.72 +                                            int temps,        // Number of slots on java expression stack in use.
    2.73 +                                            int popframe_args,
    2.74 +                                            int monitors,     // Number of active monitors.
    2.75 +                                            int caller_actual_parameters,
    2.76 +                                            int callee_params,// Number of slots for callee parameters.
    2.77 +                                            int callee_locals,// Number of slots for locals.
    2.78 +                                            frame* caller,
    2.79 +                                            frame* interpreter_frame,
    2.80 +                                            bool is_top_frame,
    2.81 +                                            bool is_bottom_frame) {
    2.82  
    2.83    // NOTE this code must exactly mimic what
    2.84    // InterpreterGenerator::generate_compute_interpreter_state() does
    2.85 @@ -2968,86 +3011,64 @@
    2.86    // both the abi scratch area and a place to hold a result from a
    2.87    // callee on its way to the callers stack.
    2.88  
    2.89 -  int monitor_size = frame::interpreter_frame_monitor_size_in_bytes() * monitors;
    2.90 -  int frame_size;
    2.91 -  int top_frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
    2.92 -                                + monitor_size
    2.93 -                                + (method->max_stack() *Interpreter::stackElementWords * BytesPerWord)
    2.94 -                                + 2*BytesPerWord,
    2.95 -                                frame::alignment_in_bytes)
    2.96 -                      + frame::top_ijava_frame_abi_size;
    2.97 -  if (is_top_frame) {
    2.98 -    frame_size = top_frame_size;
    2.99 +  int monitor_size = 0;
   2.100 +  int top_frame_size = 0;
   2.101 +  frame_size_helper(method->max_stack(), monitors, monitor_size, top_frame_size);
   2.102 +
   2.103 +  intptr_t sp = (intptr_t)interpreter_frame->sp();
   2.104 +  intptr_t fp = *(intptr_t *)sp;
   2.105 +  assert(fp == (intptr_t)caller->sp(), "fp must match");
   2.106 +  interpreterState cur_state =
   2.107 +    (interpreterState)(fp - frame::interpreter_frame_cinterpreterstate_size_in_bytes());
   2.108 +
   2.109 +  // Now fill in the interpreterState object.
   2.110 +
   2.111 +  intptr_t* locals;
   2.112 +  if (caller->is_interpreted_frame()) {
   2.113 +    // Locals must agree with the caller because it will be used to set the
   2.114 +    // caller's tos when we return.
   2.115 +    interpreterState prev  = caller->get_interpreterState();
   2.116 +    // Calculate start of "locals" for MH calls.  For MH calls, the
   2.117 +    // current method() (= MH target) and prev->callee() (=
   2.118 +    // MH.invoke*()) are different and especially have different
   2.119 +    // signatures. To pop the argumentsof the caller, we must use
   2.120 +    // the prev->callee()->size_of_arguments() because that's what
   2.121 +    // the caller actually pushed.  Currently, for synthetic MH
   2.122 +    // calls (deoptimized from inlined MH calls), detected by
   2.123 +    // is_method_handle_invoke(), we use the callee's arguments
   2.124 +    // because here, the caller's and callee's signature match.
   2.125 +    if (true /*!caller->is_at_mh_callsite()*/) {
   2.126 +      locals = prev->stack() + method->size_of_parameters();
   2.127 +    } else {
   2.128 +      // Normal MH call.
   2.129 +      locals = prev->stack() + prev->callee()->size_of_parameters();
   2.130 +    }
   2.131    } else {
   2.132 -    frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
   2.133 -                          + monitor_size
   2.134 -                          + ((temps - callee_params + callee_locals) *
   2.135 -                             Interpreter::stackElementWords * BytesPerWord)
   2.136 -                          + 2*BytesPerWord,
   2.137 -                          frame::alignment_in_bytes)
   2.138 -                 + frame::parent_ijava_frame_abi_size;
   2.139 -    assert(popframe_args==0, "non-zero for top_frame only");
   2.140 +    bool is_deopted;
   2.141 +    locals = (intptr_t*) (fp + ((method->max_locals() - 1) * BytesPerWord) +
   2.142 +                          frame::parent_ijava_frame_abi_size);
   2.143    }
   2.144  
   2.145 -  // If we actually have a frame to layout we must now fill in all the pieces.
   2.146 -  if (interpreter_frame != NULL) {
   2.147 -
   2.148 -    intptr_t sp = (intptr_t)interpreter_frame->sp();
   2.149 -    intptr_t fp = *(intptr_t *)sp;
   2.150 -    assert(fp == (intptr_t)caller->sp(), "fp must match");
   2.151 -    interpreterState cur_state =
   2.152 -      (interpreterState)(fp - frame::interpreter_frame_cinterpreterstate_size_in_bytes());
   2.153 -
   2.154 -    // Now fill in the interpreterState object.
   2.155 -
   2.156 -    intptr_t* locals;
   2.157 -    if (caller->is_interpreted_frame()) {
   2.158 -      // Locals must agree with the caller because it will be used to set the
   2.159 -      // caller's tos when we return.
   2.160 -      interpreterState prev  = caller->get_interpreterState();
   2.161 -      // Calculate start of "locals" for MH calls.  For MH calls, the
   2.162 -      // current method() (= MH target) and prev->callee() (=
   2.163 -      // MH.invoke*()) are different and especially have different
   2.164 -      // signatures. To pop the argumentsof the caller, we must use
   2.165 -      // the prev->callee()->size_of_arguments() because that's what
   2.166 -      // the caller actually pushed.  Currently, for synthetic MH
   2.167 -      // calls (deoptimized from inlined MH calls), detected by
   2.168 -      // is_method_handle_invoke(), we use the callee's arguments
   2.169 -      // because here, the caller's and callee's signature match.
   2.170 -      if (true /*!caller->is_at_mh_callsite()*/) {
   2.171 -        locals = prev->stack() + method->size_of_parameters();
   2.172 -      } else {
   2.173 -        // Normal MH call.
   2.174 -        locals = prev->stack() + prev->callee()->size_of_parameters();
   2.175 -      }
   2.176 -    } else {
   2.177 -      bool is_deopted;
   2.178 -      locals = (intptr_t*) (fp + ((method->max_locals() - 1) * BytesPerWord) +
   2.179 -                            frame::parent_ijava_frame_abi_size);
   2.180 -    }
   2.181 -
   2.182 -    intptr_t* monitor_base = (intptr_t*) cur_state;
   2.183 -    intptr_t* stack_base   = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
   2.184 -
   2.185 -    // Provide pop_frame capability on PPC64, add popframe_args.
   2.186 -    // +1 because stack is always prepushed.
   2.187 -    intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (temps + popframe_args + 1) * BytesPerWord);
   2.188 -
   2.189 -    BytecodeInterpreter::layout_interpreterState(cur_state,
   2.190 -                                                 caller,
   2.191 -                                                 interpreter_frame,
   2.192 -                                                 method,
   2.193 -                                                 locals,
   2.194 -                                                 stack,
   2.195 -                                                 stack_base,
   2.196 -                                                 monitor_base,
   2.197 -                                                 (intptr_t*)(((intptr_t)fp)-top_frame_size),
   2.198 -                                                 is_top_frame);
   2.199 -
   2.200 -    BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address,
   2.201 -                                                    interpreter_frame->fp());
   2.202 -  }
   2.203 -  return frame_size/BytesPerWord;
   2.204 +  intptr_t* monitor_base = (intptr_t*) cur_state;
   2.205 +  intptr_t* stack_base   = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
   2.206 +
   2.207 +  // Provide pop_frame capability on PPC64, add popframe_args.
   2.208 +  // +1 because stack is always prepushed.
   2.209 +  intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (temps + popframe_args + 1) * BytesPerWord);
   2.210 +
   2.211 +  BytecodeInterpreter::layout_interpreterState(cur_state,
   2.212 +                                               caller,
   2.213 +                                               interpreter_frame,
   2.214 +                                               method,
   2.215 +                                               locals,
   2.216 +                                               stack,
   2.217 +                                               stack_base,
   2.218 +                                               monitor_base,
   2.219 +                                               (intptr_t*)(((intptr_t)fp) - top_frame_size),
   2.220 +                                               is_top_frame);
   2.221 +
   2.222 +  BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address,
   2.223 +                                                  interpreter_frame->fp());
   2.224  }
   2.225  
   2.226  #endif // CC_INTERP
     3.1 --- a/src/cpu/ppc/vm/ppc.ad	Tue Jun 17 16:12:09 2014 -0700
     3.2 +++ b/src/cpu/ppc/vm/ppc.ad	Tue Jun 17 22:15:24 2014 -0700
     3.3 @@ -1,6 +1,6 @@
     3.4  //
     3.5 -// Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
     3.6 -// Copyright 2012, 2013 SAP AG. All rights reserved.
     3.7 +// Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
     3.8 +// Copyright 2012, 2014 SAP AG. All rights reserved.
     3.9  // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    3.10  //
    3.11  // This code is free software; you can redistribute it and/or modify it
    3.12 @@ -1363,8 +1363,8 @@
    3.13    Compile* C = ra_->C;
    3.14    MacroAssembler _masm(&cbuf);
    3.15  
    3.16 -  const long framesize = ((long)C->frame_slots()) << LogBytesPerInt;
    3.17 -  assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
    3.18 +  const long framesize = C->frame_size_in_bytes();
    3.19 +  assert(framesize % (2 * wordSize) == 0, "must preserve 2*wordSize alignment");
    3.20  
    3.21    const bool method_is_frameless      = false /* TODO: PPC port C->is_frameless_method()*/;
    3.22  
    3.23 @@ -1389,19 +1389,22 @@
    3.24    // careful, because some VM calls (such as call site linkage) can
    3.25    // use several kilobytes of stack. But the stack safety zone should
    3.26    // account for that. See bugs 4446381, 4468289, 4497237.
    3.27 -  if (C->need_stack_bang(framesize) && UseStackBanging) {
    3.28 +
    3.29 +  int bangsize = C->bang_size_in_bytes();
    3.30 +  assert(bangsize >= framesize || bangsize <= 0, "stack bang size incorrect");
    3.31 +  if (C->need_stack_bang(bangsize) && UseStackBanging) {
    3.32      // Unfortunately we cannot use the function provided in
    3.33      // assembler.cpp as we have to emulate the pipes. So I had to
    3.34      // insert the code of generate_stack_overflow_check(), see
    3.35      // assembler.cpp for some illuminative comments.
    3.36      const int page_size = os::vm_page_size();
    3.37 -    int bang_end = StackShadowPages*page_size;
    3.38 +    int bang_end = StackShadowPages * page_size;
    3.39  
    3.40      // This is how far the previous frame's stack banging extended.
    3.41      const int bang_end_safe = bang_end;
    3.42  
    3.43 -    if (framesize > page_size) {
    3.44 -      bang_end += framesize;
    3.45 +    if (bangsize > page_size) {
    3.46 +      bang_end += bangsize;
    3.47      }
    3.48  
    3.49      int bang_offset = bang_end_safe;
    3.50 @@ -1447,7 +1450,7 @@
    3.51  
    3.52    unsigned int bytes = (unsigned int)framesize;
    3.53    long offset = Assembler::align_addr(bytes, frame::alignment_in_bytes);
    3.54 -  ciMethod *currMethod = C -> method();
    3.55 +  ciMethod *currMethod = C->method();
    3.56  
    3.57    // Optimized version for most common case.
    3.58    if (UsePower6SchedulerPPC64 &&
     4.1 --- a/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Tue Jun 17 16:12:09 2014 -0700
     4.2 +++ b/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Tue Jun 17 22:15:24 2014 -0700
     4.3 @@ -1328,21 +1328,42 @@
     4.4  int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
     4.5    const int max_alignment_size = 2;
     4.6    const int abi_scratch = frame::abi_reg_args_size;
     4.7 -  return method->max_locals() + method->max_stack() + frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
     4.8 +  return method->max_locals() + method->max_stack() +
     4.9 +         frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
    4.10  }
    4.11  
    4.12 -// Fills a sceletal interpreter frame generated during deoptimizations
    4.13 -// and returns the frame size in slots.
    4.14 +// Returns number of stackElementWords needed for the interpreter frame with the
    4.15 +// given sections.
    4.16 +// This overestimates the stack by one slot in case of alignments.
    4.17 +int AbstractInterpreter::size_activation(int max_stack,
    4.18 +                                         int temps,
    4.19 +                                         int extra_args,
    4.20 +                                         int monitors,
    4.21 +                                         int callee_params,
    4.22 +                                         int callee_locals,
    4.23 +                                         bool is_top_frame) {
    4.24 +  // Note: This calculation must exactly parallel the frame setup
    4.25 +  // in AbstractInterpreterGenerator::generate_method_entry.
    4.26 +  assert(Interpreter::stackElementWords == 1, "sanity");
    4.27 +  const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
    4.28 +  const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
    4.29 +                                         (frame::abi_minframe_size / Interpreter::stackElementSize);
    4.30 +  const int size =
    4.31 +    max_stack                                                +
    4.32 +    (callee_locals - callee_params)                          +
    4.33 +    monitors * frame::interpreter_frame_monitor_size()       +
    4.34 +    max_alignment_space                                      +
    4.35 +    abi_scratch                                              +
    4.36 +    frame::ijava_state_size / Interpreter::stackElementSize;
    4.37 +
    4.38 +  // Fixed size of an interpreter frame, align to 16-byte.
    4.39 +  return (size & -2);
    4.40 +}
    4.41 +
    4.42 +// Fills a sceletal interpreter frame generated during deoptimizations.
    4.43  //
    4.44  // Parameters:
    4.45  //
    4.46 -// interpreter_frame == NULL:
    4.47 -//   Only calculate the size of an interpreter activation, no actual layout.
    4.48 -//   Note: This calculation must exactly parallel the frame setup
    4.49 -//   in TemplateInterpreter::generate_normal_entry. But it does not
    4.50 -//   account for the SP alignment, that might further enhance the
    4.51 -//   frame size, depending on FP.
    4.52 -//
    4.53  // interpreter_frame != NULL:
    4.54  //   set up the method, locals, and monitors.
    4.55  //   The frame interpreter_frame, if not NULL, is guaranteed to be the
    4.56 @@ -1359,59 +1380,41 @@
    4.57  //   the arguments off advance the esp by dummy popframe_extra_args slots.
    4.58  //   Popping off those will establish the stack layout as it was before the call.
    4.59  //
    4.60 -int AbstractInterpreter::layout_activation(Method* method,
    4.61 -                                           int tempcount,
    4.62 -                                           int popframe_extra_args,
    4.63 -                                           int moncount,
    4.64 -                                           int caller_actual_parameters,
    4.65 -                                           int callee_param_count,
    4.66 -                                           int callee_locals,
    4.67 -                                           frame* caller,
    4.68 -                                           frame* interpreter_frame,
    4.69 -                                           bool is_top_frame,
    4.70 -                                           bool is_bottom_frame) {
    4.71 +void AbstractInterpreter::layout_activation(Method* method,
    4.72 +                                            int tempcount,
    4.73 +                                            int popframe_extra_args,
    4.74 +                                            int moncount,
    4.75 +                                            int caller_actual_parameters,
    4.76 +                                            int callee_param_count,
    4.77 +                                            int callee_locals_count,
    4.78 +                                            frame* caller,
    4.79 +                                            frame* interpreter_frame,
    4.80 +                                            bool is_top_frame,
    4.81 +                                            bool is_bottom_frame) {
    4.82  
    4.83 -  const int max_alignment_space = 2;
    4.84    const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
    4.85 -                                         (frame::abi_minframe_size / Interpreter::stackElementSize) ;
    4.86 -  const int conservative_framesize_in_slots =
    4.87 -    method->max_stack() + callee_locals - callee_param_count +
    4.88 -    (moncount * frame::interpreter_frame_monitor_size()) + max_alignment_space +
    4.89 -    abi_scratch + frame::ijava_state_size / Interpreter::stackElementSize;
    4.90 +                                         (frame::abi_minframe_size / Interpreter::stackElementSize);
    4.91  
    4.92 -  assert(!is_top_frame || conservative_framesize_in_slots * 8 > frame::abi_reg_args_size + frame::ijava_state_size, "frame too small");
    4.93 +  intptr_t* locals_base  = (caller->is_interpreted_frame()) ?
    4.94 +    caller->interpreter_frame_esp() + caller_actual_parameters :
    4.95 +    caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ;
    4.96  
    4.97 -  if (interpreter_frame == NULL) {
    4.98 -    // Since we don't know the exact alignment, we return the conservative size.
    4.99 -    return (conservative_framesize_in_slots & -2);
   4.100 -  } else {
   4.101 -    // Now we know our caller, calc the exact frame layout and size.
   4.102 -    intptr_t* locals_base  = (caller->is_interpreted_frame()) ?
   4.103 -      caller->interpreter_frame_esp() + caller_actual_parameters :
   4.104 -      caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ;
   4.105 +  intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize ;
   4.106 +  intptr_t* monitor      = monitor_base - (moncount * frame::interpreter_frame_monitor_size());
   4.107 +  intptr_t* esp_base     = monitor - 1;
   4.108 +  intptr_t* esp          = esp_base - tempcount - popframe_extra_args;
   4.109 +  intptr_t* sp           = (intptr_t *) (((intptr_t) (esp_base - callee_locals_count + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes);
   4.110 +  intptr_t* sender_sp    = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
   4.111 +  intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
   4.112  
   4.113 -    intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize ;
   4.114 -    intptr_t* monitor      = monitor_base - (moncount * frame::interpreter_frame_monitor_size());
   4.115 -    intptr_t* esp_base     = monitor - 1;
   4.116 -    intptr_t* esp          = esp_base - tempcount - popframe_extra_args;
   4.117 -    intptr_t* sp           = (intptr_t *) (((intptr_t) (esp_base- callee_locals + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes);
   4.118 -    intptr_t* sender_sp    = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
   4.119 -    intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
   4.120 -
   4.121 -    interpreter_frame->interpreter_frame_set_method(method);
   4.122 -    interpreter_frame->interpreter_frame_set_locals(locals_base);
   4.123 -    interpreter_frame->interpreter_frame_set_cpcache(method->constants()->cache());
   4.124 -    interpreter_frame->interpreter_frame_set_esp(esp);
   4.125 -    interpreter_frame->interpreter_frame_set_monitor_end((BasicObjectLock *)monitor);
   4.126 -    interpreter_frame->interpreter_frame_set_top_frame_sp(top_frame_sp);
   4.127 -    if (!is_bottom_frame) {
   4.128 -      interpreter_frame->interpreter_frame_set_sender_sp(sender_sp);
   4.129 -    }
   4.130 -
   4.131 -    int framesize_in_slots = caller->sp() - sp;
   4.132 -    assert(!is_top_frame ||framesize_in_slots >= (frame::abi_reg_args_size / Interpreter::stackElementSize) + frame::ijava_state_size / Interpreter::stackElementSize, "frame too small");
   4.133 -    assert(framesize_in_slots <= conservative_framesize_in_slots, "exact frame size must be smaller than the convervative size!");
   4.134 -    return framesize_in_slots;
   4.135 +  interpreter_frame->interpreter_frame_set_method(method);
   4.136 +  interpreter_frame->interpreter_frame_set_locals(locals_base);
   4.137 +  interpreter_frame->interpreter_frame_set_cpcache(method->constants()->cache());
   4.138 +  interpreter_frame->interpreter_frame_set_esp(esp);
   4.139 +  interpreter_frame->interpreter_frame_set_monitor_end((BasicObjectLock *)monitor);
   4.140 +  interpreter_frame->interpreter_frame_set_top_frame_sp(top_frame_sp);
   4.141 +  if (!is_bottom_frame) {
   4.142 +    interpreter_frame->interpreter_frame_set_sender_sp(sender_sp);
   4.143    }
   4.144  }
   4.145  
     5.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Jun 17 16:12:09 2014 -0700
     5.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Jun 17 22:15:24 2014 -0700
     5.3 @@ -152,7 +152,7 @@
     5.4  }
     5.5  
     5.6  
     5.7 -int LIR_Assembler::initial_frame_size_in_bytes() {
     5.8 +int LIR_Assembler::initial_frame_size_in_bytes() const {
     5.9    return in_bytes(frame_map()->framesize_in_bytes());
    5.10  }
    5.11  
    5.12 @@ -182,7 +182,7 @@
    5.13    int number_of_locks = entry_state->locks_size();
    5.14  
    5.15    // Create a frame for the compiled activation.
    5.16 -  __ build_frame(initial_frame_size_in_bytes());
    5.17 +  __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
    5.18  
    5.19    // OSR buffer is
    5.20    //
     6.1 --- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Tue Jun 17 16:12:09 2014 -0700
     6.2 +++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Tue Jun 17 22:15:24 2014 -0700
     6.3 @@ -55,9 +55,9 @@
     6.4  }
     6.5  
     6.6  
     6.7 -void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
     6.8 -
     6.9 -  generate_stack_overflow_check(frame_size_in_bytes);
    6.10 +void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
    6.11 +  assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
    6.12 +  generate_stack_overflow_check(bang_size_in_bytes);
    6.13    // Create the frame.
    6.14    save_frame_c1(frame_size_in_bytes);
    6.15  }
     7.1 --- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Tue Jun 17 16:12:09 2014 -0700
     7.2 +++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Tue Jun 17 22:15:24 2014 -0700
     7.3 @@ -2101,7 +2101,7 @@
     7.4    int monitor_size    = method->is_synchronized() ?
     7.5                                  1*frame::interpreter_frame_monitor_size() : 0;
     7.6    return size_activation_helper(method->max_locals(), method->max_stack(),
     7.7 -                                 monitor_size) + call_stub_size;
     7.8 +                                monitor_size) + call_stub_size;
     7.9  }
    7.10  
    7.11  void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
    7.12 @@ -2185,31 +2185,31 @@
    7.13    istate->_last_Java_pc = (intptr_t*) last_Java_pc;
    7.14  }
    7.15  
    7.16 +static int frame_size_helper(int max_stack,
    7.17 +                             int moncount,
    7.18 +                             int callee_param_size,
    7.19 +                             int callee_locals_size,
    7.20 +                             bool is_top_frame,
    7.21 +                             int& monitor_size,
    7.22 +                             int& full_frame_words) {
    7.23 +  int extra_locals_size = callee_locals_size - callee_param_size;
    7.24 +  monitor_size = (sizeof(BasicObjectLock) * moncount) / wordSize;
    7.25 +  full_frame_words = size_activation_helper(extra_locals_size, max_stack, monitor_size);
    7.26 +  int short_frame_words = size_activation_helper(extra_locals_size, max_stack, monitor_size);
    7.27 +  int frame_words = is_top_frame ? full_frame_words : short_frame_words;
    7.28  
    7.29 -int AbstractInterpreter::layout_activation(Method* method,
    7.30 -                                           int tempcount, // Number of slots on java expression stack in use
    7.31 -                                           int popframe_extra_args,
    7.32 -                                           int moncount,  // Number of active monitors
    7.33 -                                           int caller_actual_parameters,
    7.34 -                                           int callee_param_size,
    7.35 -                                           int callee_locals_size,
    7.36 -                                           frame* caller,
    7.37 -                                           frame* interpreter_frame,
    7.38 -                                           bool is_top_frame,
    7.39 -                                           bool is_bottom_frame) {
    7.40 +  return frame_words;
    7.41 +}
    7.42  
    7.43 -  assert(popframe_extra_args == 0, "NEED TO FIX");
    7.44 -  // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
    7.45 -  // does as far as allocating an interpreter frame.
    7.46 -  // If interpreter_frame!=NULL, set up the method, locals, and monitors.
    7.47 -  // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
    7.48 -  // as determined by a previous call to this method.
    7.49 -  // It is also guaranteed to be walkable even though it is in a skeletal state
    7.50 +int AbstractInterpreter::size_activation(int max_stack,
    7.51 +                                         int tempcount,
    7.52 +                                         int extra_args,
    7.53 +                                         int moncount,
    7.54 +                                         int callee_param_size,
    7.55 +                                         int callee_locals_size,
    7.56 +                                         bool is_top_frame) {
    7.57 +  assert(extra_args == 0, "NEED TO FIX");
    7.58    // NOTE: return size is in words not bytes
    7.59 -  // NOTE: tempcount is the current size of the java expression stack. For top most
    7.60 -  //       frames we will allocate a full sized expression stack and not the curback
    7.61 -  //       version that non-top frames have.
    7.62 -
    7.63    // Calculate the amount our frame will be adjust by the callee. For top frame
    7.64    // this is zero.
    7.65  
    7.66 @@ -2218,87 +2218,108 @@
    7.67    // to it. So it ignores last_frame_adjust value. Seems suspicious as far
    7.68    // as getting sender_sp correct.
    7.69  
    7.70 -  int extra_locals_size = callee_locals_size - callee_param_size;
    7.71 -  int monitor_size = (sizeof(BasicObjectLock) * moncount) / wordSize;
    7.72 -  int full_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size);
    7.73 -  int short_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size);
    7.74 -  int frame_words = is_top_frame ? full_frame_words : short_frame_words;
    7.75 +  int unused_monitor_size = 0;
    7.76 +  int unused_full_frame_words = 0;
    7.77 +  return frame_size_helper(max_stack, moncount, callee_param_size, callee_locals_size, is_top_frame,
    7.78 +                           unused_monitor_size, unused_full_frame_words);
    7.79 +}
    7.80 +void AbstractInterpreter::layout_activation(Method* method,
    7.81 +                                            int tempcount, // Number of slots on java expression stack in use
    7.82 +                                            int popframe_extra_args,
    7.83 +                                            int moncount,  // Number of active monitors
    7.84 +                                            int caller_actual_parameters,
    7.85 +                                            int callee_param_size,
    7.86 +                                            int callee_locals_size,
    7.87 +                                            frame* caller,
    7.88 +                                            frame* interpreter_frame,
    7.89 +                                            bool is_top_frame,
    7.90 +                                            bool is_bottom_frame) {
    7.91 +  assert(popframe_extra_args == 0, "NEED TO FIX");
    7.92 +  // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
    7.93 +  // does as far as allocating an interpreter frame.
    7.94 +  // Set up the method, locals, and monitors.
    7.95 +  // The frame interpreter_frame is guaranteed to be the right size,
    7.96 +  // as determined by a previous call to the size_activation() method.
    7.97 +  // It is also guaranteed to be walkable even though it is in a skeletal state
    7.98 +  // NOTE: tempcount is the current size of the java expression stack. For top most
    7.99 +  //       frames we will allocate a full sized expression stack and not the curback
   7.100 +  //       version that non-top frames have.
   7.101  
   7.102 +  int monitor_size = 0;
   7.103 +  int full_frame_words = 0;
   7.104 +  int frame_words = frame_size_helper(method->max_stack(), moncount, callee_param_size, callee_locals_size,
   7.105 +                                      is_top_frame, monitor_size, full_frame_words);
   7.106  
   7.107    /*
   7.108 -    if we actually have a frame to layout we must now fill in all the pieces. This means both
   7.109 +    We must now fill in all the pieces of the frame. This means both
   7.110      the interpreterState and the registers.
   7.111    */
   7.112 -  if (interpreter_frame != NULL) {
   7.113  
   7.114 -    // MUCHO HACK
   7.115 +  // MUCHO HACK
   7.116  
   7.117 -    intptr_t* frame_bottom = interpreter_frame->sp() - (full_frame_words - frame_words);
   7.118 -    // 'interpreter_frame->sp()' is unbiased while 'frame_bottom' must be a biased value in 64bit mode.
   7.119 -    assert(((intptr_t)frame_bottom & 0xf) == 0, "SP biased in layout_activation");
   7.120 -    frame_bottom = (intptr_t*)((intptr_t)frame_bottom - STACK_BIAS);
   7.121 +  intptr_t* frame_bottom = interpreter_frame->sp() - (full_frame_words - frame_words);
   7.122 +  // 'interpreter_frame->sp()' is unbiased while 'frame_bottom' must be a biased value in 64bit mode.
   7.123 +  assert(((intptr_t)frame_bottom & 0xf) == 0, "SP biased in layout_activation");
   7.124 +  frame_bottom = (intptr_t*)((intptr_t)frame_bottom - STACK_BIAS);
   7.125  
   7.126 -    /* Now fillin the interpreterState object */
   7.127 +  /* Now fillin the interpreterState object */
   7.128  
   7.129 -    interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() -  sizeof(BytecodeInterpreter));
   7.130 +  interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() -  sizeof(BytecodeInterpreter));
   7.131  
   7.132  
   7.133 -    intptr_t* locals;
   7.134 +  intptr_t* locals;
   7.135  
   7.136 -    // Calculate the postion of locals[0]. This is painful because of
   7.137 -    // stack alignment (same as ia64). The problem is that we can
   7.138 -    // not compute the location of locals from fp(). fp() will account
   7.139 -    // for the extra locals but it also accounts for aligning the stack
   7.140 -    // and we can't determine if the locals[0] was misaligned but max_locals
   7.141 -    // was enough to have the
   7.142 -    // calculate postion of locals. fp already accounts for extra locals.
   7.143 -    // +2 for the static long no_params() issue.
   7.144 +  // Calculate the postion of locals[0]. This is painful because of
   7.145 +  // stack alignment (same as ia64). The problem is that we can
   7.146 +  // not compute the location of locals from fp(). fp() will account
   7.147 +  // for the extra locals but it also accounts for aligning the stack
   7.148 +  // and we can't determine if the locals[0] was misaligned but max_locals
   7.149 +  // was enough to have the
   7.150 +  // calculate postion of locals. fp already accounts for extra locals.
   7.151 +  // +2 for the static long no_params() issue.
   7.152  
   7.153 -    if (caller->is_interpreted_frame()) {
   7.154 -      // locals must agree with the caller because it will be used to set the
   7.155 -      // caller's tos when we return.
   7.156 -      interpreterState prev  = caller->get_interpreterState();
   7.157 -      // stack() is prepushed.
   7.158 -      locals = prev->stack() + method->size_of_parameters();
   7.159 +  if (caller->is_interpreted_frame()) {
   7.160 +    // locals must agree with the caller because it will be used to set the
   7.161 +    // caller's tos when we return.
   7.162 +    interpreterState prev  = caller->get_interpreterState();
   7.163 +    // stack() is prepushed.
   7.164 +    locals = prev->stack() + method->size_of_parameters();
   7.165 +  } else {
   7.166 +    // Lay out locals block in the caller adjacent to the register window save area.
   7.167 +    //
   7.168 +    // Compiled frames do not allocate a varargs area which is why this if
   7.169 +    // statement is needed.
   7.170 +    //
   7.171 +    intptr_t* fp = interpreter_frame->fp();
   7.172 +    int local_words = method->max_locals() * Interpreter::stackElementWords;
   7.173 +
   7.174 +    if (caller->is_compiled_frame()) {
   7.175 +      locals = fp + frame::register_save_words + local_words - 1;
   7.176      } else {
   7.177 -      // Lay out locals block in the caller adjacent to the register window save area.
   7.178 -      //
   7.179 -      // Compiled frames do not allocate a varargs area which is why this if
   7.180 -      // statement is needed.
   7.181 -      //
   7.182 -      intptr_t* fp = interpreter_frame->fp();
   7.183 -      int local_words = method->max_locals() * Interpreter::stackElementWords;
   7.184 -
   7.185 -      if (caller->is_compiled_frame()) {
   7.186 -        locals = fp + frame::register_save_words + local_words - 1;
   7.187 -      } else {
   7.188 -        locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
   7.189 -      }
   7.190 -
   7.191 +      locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
   7.192      }
   7.193 -    // END MUCHO HACK
   7.194 -
   7.195 -    intptr_t* monitor_base = (intptr_t*) cur_state;
   7.196 -    intptr_t* stack_base =  monitor_base - monitor_size;
   7.197 -    /* +1 because stack is always prepushed */
   7.198 -    intptr_t* stack = stack_base - (tempcount + 1);
   7.199 -
   7.200 -
   7.201 -    BytecodeInterpreter::layout_interpreterState(cur_state,
   7.202 -                                          caller,
   7.203 -                                          interpreter_frame,
   7.204 -                                          method,
   7.205 -                                          locals,
   7.206 -                                          stack,
   7.207 -                                          stack_base,
   7.208 -                                          monitor_base,
   7.209 -                                          frame_bottom,
   7.210 -                                          is_top_frame);
   7.211 -
   7.212 -    BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
   7.213  
   7.214    }
   7.215 -  return frame_words;
   7.216 +  // END MUCHO HACK
   7.217 +
   7.218 +  intptr_t* monitor_base = (intptr_t*) cur_state;
   7.219 +  intptr_t* stack_base =  monitor_base - monitor_size;
   7.220 +  /* +1 because stack is always prepushed */
   7.221 +  intptr_t* stack = stack_base - (tempcount + 1);
   7.222 +
   7.223 +
   7.224 +  BytecodeInterpreter::layout_interpreterState(cur_state,
   7.225 +                                               caller,
   7.226 +                                               interpreter_frame,
   7.227 +                                               method,
   7.228 +                                               locals,
   7.229 +                                               stack,
   7.230 +                                               stack_base,
   7.231 +                                               monitor_base,
   7.232 +                                               frame_bottom,
   7.233 +                                               is_top_frame);
   7.234 +
   7.235 +  BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
   7.236  }
   7.237  
   7.238  #endif // CC_INTERP
     8.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Jun 17 16:12:09 2014 -0700
     8.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Jun 17 22:15:24 2014 -0700
     8.3 @@ -3531,7 +3531,7 @@
     8.4    // was post-decremented.)  Skip this address by starting at i=1, and
     8.5    // touch a few more pages below.  N.B.  It is important to touch all
     8.6    // the way down to and including i=StackShadowPages.
     8.7 -  for (int i = 1; i <= StackShadowPages; i++) {
     8.8 +  for (int i = 1; i < StackShadowPages; i++) {
     8.9      set((-i*offset)+STACK_BIAS, Rscratch);
    8.10      st(G0, Rtsp, Rscratch);
    8.11    }
     9.1 --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Tue Jun 17 16:12:09 2014 -0700
     9.2 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Tue Jun 17 22:15:24 2014 -0700
     9.3 @@ -3355,13 +3355,16 @@
     9.4    Register        O4array_size       = O4;
     9.5    Label           loop;
     9.6  
     9.7 -  // Before we make new frames, check to see if stack is available.
     9.8 -  // Do this after the caller's return address is on top of stack
     9.9 +#ifdef ASSERT
    9.10 +  // Compilers generate code that bang the stack by as much as the
    9.11 +  // interpreter would need. So this stack banging should never
    9.12 +  // trigger a fault. Verify that it does not on non product builds.
    9.13    if (UseStackBanging) {
    9.14      // Get total frame size for interpreted frames
    9.15      __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
    9.16      __ bang_stack_size(O4, O3, G3_scratch);
    9.17    }
    9.18 +#endif
    9.19  
    9.20    __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
    9.21    __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
    9.22 @@ -3409,9 +3412,11 @@
    9.23    ResourceMark rm;
    9.24    // setup code generation tools
    9.25    int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
    9.26 +#ifdef ASSERT
    9.27    if (UseStackBanging) {
    9.28      pad += StackShadowPages*16 + 32;
    9.29    }
    9.30 +#endif
    9.31  #ifdef _LP64
    9.32    CodeBuffer buffer("deopt_blob", 2100+pad, 512);
    9.33  #else
    9.34 @@ -3632,9 +3637,11 @@
    9.35    ResourceMark rm;
    9.36    // setup code generation tools
    9.37    int pad = VerifyThread ? 512 : 0;
    9.38 +#ifdef ASSERT
    9.39    if (UseStackBanging) {
    9.40      pad += StackShadowPages*16 + 32;
    9.41    }
    9.42 +#endif
    9.43  #ifdef _LP64
    9.44    CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
    9.45  #else
    10.1 --- a/src/cpu/sparc/vm/sparc.ad	Tue Jun 17 16:12:09 2014 -0700
    10.2 +++ b/src/cpu/sparc/vm/sparc.ad	Tue Jun 17 22:15:24 2014 -0700
    10.3 @@ -1193,15 +1193,16 @@
    10.4      st->print_cr("Verify_Thread"); st->print("\t");
    10.5    }
    10.6  
    10.7 -  size_t framesize = C->frame_slots() << LogBytesPerInt;
    10.8 +  size_t framesize = C->frame_size_in_bytes();
    10.9 +  int bangsize = C->bang_size_in_bytes();
   10.10  
   10.11    // Calls to C2R adapters often do not accept exceptional returns.
   10.12    // We require that their callers must bang for them.  But be careful, because
   10.13    // some VM calls (such as call site linkage) can use several kilobytes of
   10.14    // stack.  But the stack safety zone should account for that.
   10.15    // See bugs 4446381, 4468289, 4497237.
   10.16 -  if (C->need_stack_bang(framesize)) {
   10.17 -    st->print_cr("! stack bang"); st->print("\t");
   10.18 +  if (C->need_stack_bang(bangsize)) {
   10.19 +    st->print_cr("! stack bang (%d bytes)", bangsize); st->print("\t");
   10.20    }
   10.21  
   10.22    if (Assembler::is_simm13(-framesize)) {
   10.23 @@ -1225,17 +1226,18 @@
   10.24  
   10.25    __ verify_thread();
   10.26  
   10.27 -  size_t framesize = C->frame_slots() << LogBytesPerInt;
   10.28 +  size_t framesize = C->frame_size_in_bytes();
   10.29    assert(framesize >= 16*wordSize, "must have room for reg. save area");
   10.30    assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
   10.31 +  int bangsize = C->bang_size_in_bytes();
   10.32  
   10.33    // Calls to C2R adapters often do not accept exceptional returns.
   10.34    // We require that their callers must bang for them.  But be careful, because
   10.35    // some VM calls (such as call site linkage) can use several kilobytes of
   10.36    // stack.  But the stack safety zone should account for that.
   10.37    // See bugs 4446381, 4468289, 4497237.
   10.38 -  if (C->need_stack_bang(framesize)) {
   10.39 -    __ generate_stack_overflow_check(framesize);
   10.40 +  if (C->need_stack_bang(bangsize)) {
   10.41 +    __ generate_stack_overflow_check(bangsize);
   10.42    }
   10.43  
   10.44    if (Assembler::is_simm13(-framesize)) {
   10.45 @@ -2547,7 +2549,7 @@
   10.46    enc_class call_epilog %{
   10.47      if( VerifyStackAtCalls ) {
   10.48        MacroAssembler _masm(&cbuf);
   10.49 -      int framesize = ra_->C->frame_slots() << LogBytesPerInt;
   10.50 +      int framesize = ra_->C->frame_size_in_bytes();
   10.51        Register temp_reg = G3;
   10.52        __ add(SP, framesize, temp_reg);
   10.53        __ cmp(temp_reg, FP);
    11.1 --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Jun 17 16:12:09 2014 -0700
    11.2 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Jun 17 22:15:24 2014 -0700
    11.3 @@ -1564,37 +1564,23 @@
    11.4    int monitor_size    = method->is_synchronized() ?
    11.5                                  1*frame::interpreter_frame_monitor_size() : 0;
    11.6    return size_activation_helper(method->max_locals(), method->max_stack(),
    11.7 -                                 monitor_size) + call_stub_size;
    11.8 +                                monitor_size) + call_stub_size;
    11.9  }
   11.10  
   11.11 -int AbstractInterpreter::layout_activation(Method* method,
   11.12 -                                           int tempcount,
   11.13 -                                           int popframe_extra_args,
   11.14 -                                           int moncount,
   11.15 -                                           int caller_actual_parameters,
   11.16 -                                           int callee_param_count,
   11.17 -                                           int callee_local_count,
   11.18 -                                           frame* caller,
   11.19 -                                           frame* interpreter_frame,
   11.20 -                                           bool is_top_frame,
   11.21 -                                           bool is_bottom_frame) {
   11.22 +int AbstractInterpreter::size_activation(int max_stack,
   11.23 +                                         int temps,
   11.24 +                                         int extra_args,
   11.25 +                                         int monitors,
   11.26 +                                         int callee_params,
   11.27 +                                         int callee_locals,
   11.28 +                                         bool is_top_frame) {
   11.29    // Note: This calculation must exactly parallel the frame setup
   11.30    // in InterpreterGenerator::generate_fixed_frame.
   11.31 -  // If f!=NULL, set up the following variables:
   11.32 -  //   - Lmethod
   11.33 -  //   - Llocals
   11.34 -  //   - Lmonitors (to the indicated number of monitors)
   11.35 -  //   - Lesp (to the indicated number of temps)
   11.36 -  // The frame f (if not NULL) on entry is a description of the caller of the frame
   11.37 -  // we are about to layout. We are guaranteed that we will be able to fill in a
   11.38 -  // new interpreter frame as its callee (i.e. the stack space is allocated and
   11.39 -  // the amount was determined by an earlier call to this method with f == NULL).
   11.40 -  // On return f (if not NULL) while describe the interpreter frame we just layed out.
   11.41  
   11.42 -  int monitor_size           = moncount * frame::interpreter_frame_monitor_size();
   11.43 -  int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
   11.44 +  int monitor_size           = monitors * frame::interpreter_frame_monitor_size();
   11.45  
   11.46    assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
   11.47 +
   11.48    //
   11.49    // Note: if you look closely this appears to be doing something much different
   11.50    // than generate_fixed_frame. What is happening is this. On sparc we have to do
   11.51 @@ -1619,146 +1605,171 @@
   11.52    // there is no sense in messing working code.
   11.53    //
   11.54  
   11.55 -  int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong);
   11.56 +  int rounded_cls = round_to((callee_locals - callee_params), WordsPerLong);
   11.57    assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
   11.58  
   11.59 -  int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(),
   11.60 -                                              monitor_size);
   11.61 +  int raw_frame_size = size_activation_helper(rounded_cls, max_stack, monitor_size);
   11.62  
   11.63 -  if (interpreter_frame != NULL) {
   11.64 -    // The skeleton frame must already look like an interpreter frame
   11.65 -    // even if not fully filled out.
   11.66 -    assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
   11.67 +  return raw_frame_size;
   11.68 +}
   11.69  
   11.70 -    intptr_t* fp = interpreter_frame->fp();
   11.71 +void AbstractInterpreter::layout_activation(Method* method,
   11.72 +                                            int tempcount,
   11.73 +                                            int popframe_extra_args,
   11.74 +                                            int moncount,
   11.75 +                                            int caller_actual_parameters,
   11.76 +                                            int callee_param_count,
   11.77 +                                            int callee_local_count,
   11.78 +                                            frame* caller,
   11.79 +                                            frame* interpreter_frame,
   11.80 +                                            bool is_top_frame,
   11.81 +                                            bool is_bottom_frame) {
   11.82 +  // Set up the following variables:
   11.83 +  //   - Lmethod
   11.84 +  //   - Llocals
   11.85 +  //   - Lmonitors (to the indicated number of monitors)
   11.86 +  //   - Lesp (to the indicated number of temps)
   11.87 +  // The frame caller on entry is a description of the caller of the
   11.88 +  // frame we are about to layout. We are guaranteed that we will be
   11.89 +  // able to fill in a new interpreter frame as its callee (i.e. the
   11.90 +  // stack space is allocated and the amount was determined by an
   11.91 +  // earlier call to the size_activation() method).  On return caller
   11.92 +  // while describe the interpreter frame we just layed out.
   11.93  
   11.94 -    JavaThread* thread = JavaThread::current();
   11.95 -    RegisterMap map(thread, false);
   11.96 -    // More verification that skeleton frame is properly walkable
   11.97 -    assert(fp == caller->sp(), "fp must match");
   11.98 +  // The skeleton frame must already look like an interpreter frame
   11.99 +  // even if not fully filled out.
  11.100 +  assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
  11.101  
  11.102 -    intptr_t* montop     = fp - rounded_vm_local_words;
  11.103 +  int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
  11.104 +  int monitor_size           = moncount * frame::interpreter_frame_monitor_size();
  11.105 +  assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
  11.106  
  11.107 -    // preallocate monitors (cf. __ add_monitor_to_stack)
  11.108 -    intptr_t* monitors = montop - monitor_size;
  11.109 +  intptr_t* fp = interpreter_frame->fp();
  11.110  
  11.111 -    // preallocate stack space
  11.112 -    intptr_t*  esp = monitors - 1 -
  11.113 -                     (tempcount * Interpreter::stackElementWords) -
  11.114 -                     popframe_extra_args;
  11.115 +  JavaThread* thread = JavaThread::current();
  11.116 +  RegisterMap map(thread, false);
  11.117 +  // More verification that skeleton frame is properly walkable
  11.118 +  assert(fp == caller->sp(), "fp must match");
  11.119  
  11.120 -    int local_words = method->max_locals() * Interpreter::stackElementWords;
  11.121 -    NEEDS_CLEANUP;
  11.122 -    intptr_t* locals;
  11.123 -    if (caller->is_interpreted_frame()) {
  11.124 -      // Can force the locals area to end up properly overlapping the top of the expression stack.
  11.125 -      intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
  11.126 -      // Note that this computation means we replace size_of_parameters() values from the caller
  11.127 -      // interpreter frame's expression stack with our argument locals
  11.128 -      int parm_words  = caller_actual_parameters * Interpreter::stackElementWords;
  11.129 -      locals = Lesp_ptr + parm_words;
  11.130 -      int delta = local_words - parm_words;
  11.131 -      int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
  11.132 -      *interpreter_frame->register_addr(I5_savedSP)    = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
  11.133 -      if (!is_bottom_frame) {
  11.134 -        // Llast_SP is set below for the current frame to SP (with the
  11.135 -        // extra space for the callee's locals). Here we adjust
  11.136 -        // Llast_SP for the caller's frame, removing the extra space
  11.137 -        // for the current method's locals.
  11.138 -        *caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP);
  11.139 -      } else {
  11.140 -        assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP");
  11.141 -      }
  11.142 +  intptr_t* montop     = fp - rounded_vm_local_words;
  11.143 +
  11.144 +  // preallocate monitors (cf. __ add_monitor_to_stack)
  11.145 +  intptr_t* monitors = montop - monitor_size;
  11.146 +
  11.147 +  // preallocate stack space
  11.148 +  intptr_t*  esp = monitors - 1 -
  11.149 +    (tempcount * Interpreter::stackElementWords) -
  11.150 +    popframe_extra_args;
  11.151 +
  11.152 +  int local_words = method->max_locals() * Interpreter::stackElementWords;
  11.153 +  NEEDS_CLEANUP;
  11.154 +  intptr_t* locals;
  11.155 +  if (caller->is_interpreted_frame()) {
  11.156 +    // Can force the locals area to end up properly overlapping the top of the expression stack.
  11.157 +    intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
  11.158 +    // Note that this computation means we replace size_of_parameters() values from the caller
  11.159 +    // interpreter frame's expression stack with our argument locals
  11.160 +    int parm_words  = caller_actual_parameters * Interpreter::stackElementWords;
  11.161 +    locals = Lesp_ptr + parm_words;
  11.162 +    int delta = local_words - parm_words;
  11.163 +    int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
  11.164 +    *interpreter_frame->register_addr(I5_savedSP)    = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
  11.165 +    if (!is_bottom_frame) {
  11.166 +      // Llast_SP is set below for the current frame to SP (with the
  11.167 +      // extra space for the callee's locals). Here we adjust
  11.168 +      // Llast_SP for the caller's frame, removing the extra space
  11.169 +      // for the current method's locals.
  11.170 +      *caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP);
  11.171      } else {
  11.172 -      assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
  11.173 -      // Don't have Lesp available; lay out locals block in the caller
  11.174 -      // adjacent to the register window save area.
  11.175 -      //
  11.176 -      // Compiled frames do not allocate a varargs area which is why this if
  11.177 -      // statement is needed.
  11.178 -      //
  11.179 -      if (caller->is_compiled_frame()) {
  11.180 -        locals = fp + frame::register_save_words + local_words - 1;
  11.181 -      } else {
  11.182 -        locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
  11.183 -      }
  11.184 -      if (!caller->is_entry_frame()) {
  11.185 -        // Caller wants his own SP back
  11.186 -        int caller_frame_size = caller->cb()->frame_size();
  11.187 -        *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
  11.188 +      assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP");
  11.189 +    }
  11.190 +  } else {
  11.191 +    assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
  11.192 +    // Don't have Lesp available; lay out locals block in the caller
  11.193 +    // adjacent to the register window save area.
  11.194 +    //
  11.195 +    // Compiled frames do not allocate a varargs area which is why this if
  11.196 +    // statement is needed.
  11.197 +    //
  11.198 +    if (caller->is_compiled_frame()) {
  11.199 +      locals = fp + frame::register_save_words + local_words - 1;
  11.200 +    } else {
  11.201 +      locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
  11.202 +    }
  11.203 +    if (!caller->is_entry_frame()) {
  11.204 +      // Caller wants his own SP back
  11.205 +      int caller_frame_size = caller->cb()->frame_size();
  11.206 +      *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
  11.207 +    }
  11.208 +  }
  11.209 +  if (TraceDeoptimization) {
  11.210 +    if (caller->is_entry_frame()) {
  11.211 +      // make sure I5_savedSP and the entry frames notion of saved SP
  11.212 +      // agree.  This assertion duplicate a check in entry frame code
  11.213 +      // but catches the failure earlier.
  11.214 +      assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
  11.215 +             "would change callers SP");
  11.216 +    }
  11.217 +    if (caller->is_entry_frame()) {
  11.218 +      tty->print("entry ");
  11.219 +    }
  11.220 +    if (caller->is_compiled_frame()) {
  11.221 +      tty->print("compiled ");
  11.222 +      if (caller->is_deoptimized_frame()) {
  11.223 +        tty->print("(deopt) ");
  11.224        }
  11.225      }
  11.226 -    if (TraceDeoptimization) {
  11.227 -      if (caller->is_entry_frame()) {
  11.228 -        // make sure I5_savedSP and the entry frames notion of saved SP
  11.229 -        // agree.  This assertion duplicate a check in entry frame code
  11.230 -        // but catches the failure earlier.
  11.231 -        assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
  11.232 -               "would change callers SP");
  11.233 -      }
  11.234 -      if (caller->is_entry_frame()) {
  11.235 -        tty->print("entry ");
  11.236 -      }
  11.237 -      if (caller->is_compiled_frame()) {
  11.238 -        tty->print("compiled ");
  11.239 -        if (caller->is_deoptimized_frame()) {
  11.240 -          tty->print("(deopt) ");
  11.241 -        }
  11.242 -      }
  11.243 -      if (caller->is_interpreted_frame()) {
  11.244 -        tty->print("interpreted ");
  11.245 -      }
  11.246 -      tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
  11.247 -      tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
  11.248 -      tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
  11.249 -      tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
  11.250 -      tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
  11.251 -      tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
  11.252 -      tty->print_cr("Llocals = 0x%x", locals);
  11.253 -      tty->print_cr("Lesp = 0x%x", esp);
  11.254 -      tty->print_cr("Lmonitors = 0x%x", monitors);
  11.255 +    if (caller->is_interpreted_frame()) {
  11.256 +      tty->print("interpreted ");
  11.257      }
  11.258 +    tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
  11.259 +    tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
  11.260 +    tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
  11.261 +    tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
  11.262 +    tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
  11.263 +    tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
  11.264 +    tty->print_cr("Llocals = 0x%x", locals);
  11.265 +    tty->print_cr("Lesp = 0x%x", esp);
  11.266 +    tty->print_cr("Lmonitors = 0x%x", monitors);
  11.267 +  }
  11.268  
  11.269 -    if (method->max_locals() > 0) {
  11.270 -      assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
  11.271 -      assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
  11.272 -      assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
  11.273 -      assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
  11.274 -    }
  11.275 +  if (method->max_locals() > 0) {
  11.276 +    assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
  11.277 +    assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
  11.278 +    assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
  11.279 +    assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
  11.280 +  }
  11.281  #ifdef _LP64
  11.282 -    assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
  11.283 +  assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
  11.284  #endif
  11.285  
  11.286 -    *interpreter_frame->register_addr(Lmethod)     = (intptr_t) method;
  11.287 -    *interpreter_frame->register_addr(Llocals)     = (intptr_t) locals;
  11.288 -    *interpreter_frame->register_addr(Lmonitors)   = (intptr_t) monitors;
  11.289 -    *interpreter_frame->register_addr(Lesp)        = (intptr_t) esp;
  11.290 -    // Llast_SP will be same as SP as there is no adapter space
  11.291 -    *interpreter_frame->register_addr(Llast_SP)    = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
  11.292 -    *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
  11.293 +  *interpreter_frame->register_addr(Lmethod)     = (intptr_t) method;
  11.294 +  *interpreter_frame->register_addr(Llocals)     = (intptr_t) locals;
  11.295 +  *interpreter_frame->register_addr(Lmonitors)   = (intptr_t) monitors;
  11.296 +  *interpreter_frame->register_addr(Lesp)        = (intptr_t) esp;
  11.297 +  // Llast_SP will be same as SP as there is no adapter space
  11.298 +  *interpreter_frame->register_addr(Llast_SP)    = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
  11.299 +  *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
  11.300  #ifdef FAST_DISPATCH
  11.301 -    *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
  11.302 +  *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
  11.303  #endif
  11.304  
  11.305  
  11.306  #ifdef ASSERT
  11.307 -    BasicObjectLock* mp = (BasicObjectLock*)monitors;
  11.308 +  BasicObjectLock* mp = (BasicObjectLock*)monitors;
  11.309  
  11.310 -    assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
  11.311 -    assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
  11.312 -    assert(interpreter_frame->interpreter_frame_monitor_end()   == mp, "monitor_end matches");
  11.313 -    assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
  11.314 -    assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
  11.315 +  assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
  11.316 +  assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
  11.317 +  assert(interpreter_frame->interpreter_frame_monitor_end()   == mp, "monitor_end matches");
  11.318 +  assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
  11.319 +  assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
  11.320  
  11.321 -    // check bounds
  11.322 -    intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
  11.323 -    intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
  11.324 -    assert(lo < monitors && montop <= hi, "monitors in bounds");
  11.325 -    assert(lo <= esp && esp < monitors, "esp in bounds");
  11.326 +  // check bounds
  11.327 +  intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
  11.328 +  intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
  11.329 +  assert(lo < monitors && montop <= hi, "monitors in bounds");
  11.330 +  assert(lo <= esp && esp < monitors, "esp in bounds");
  11.331  #endif // ASSERT
  11.332 -  }
  11.333 -
  11.334 -  return raw_frame_size;
  11.335  }
  11.336  
  11.337  //----------------------------------------------------------------------------------------------------
    12.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Jun 17 16:12:09 2014 -0700
    12.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Jun 17 22:15:24 2014 -0700
    12.3 @@ -288,7 +288,7 @@
    12.4  
    12.5    // build frame
    12.6    ciMethod* m = compilation()->method();
    12.7 -  __ build_frame(initial_frame_size_in_bytes());
    12.8 +  __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
    12.9  
   12.10    // OSR buffer is
   12.11    //
   12.12 @@ -376,7 +376,7 @@
   12.13  }
   12.14  
   12.15  // This specifies the rsp decrement needed to build the frame
   12.16 -int LIR_Assembler::initial_frame_size_in_bytes() {
   12.17 +int LIR_Assembler::initial_frame_size_in_bytes() const {
   12.18    // if rounding, must let FrameMap know!
   12.19  
   12.20    // The frame_map records size in slots (32bit word)
    13.1 --- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Tue Jun 17 16:12:09 2014 -0700
    13.2 +++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Tue Jun 17 22:15:24 2014 -0700
    13.3 @@ -349,13 +349,14 @@
    13.4  }
    13.5  
    13.6  
    13.7 -void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
    13.8 +void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
    13.9 +  assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
   13.10    // Make sure there is enough stack space for this method's activation.
   13.11    // Note that we do this before doing an enter(). This matches the
   13.12    // ordering of C2's stack overflow check / rsp decrement and allows
   13.13    // the SharedRuntime stack overflow handling to be consistent
   13.14    // between the two compilers.
   13.15 -  generate_stack_overflow_check(frame_size_in_bytes);
   13.16 +  generate_stack_overflow_check(bang_size_in_bytes);
   13.17  
   13.18    push(rbp);
   13.19  #ifdef TIERED
    14.1 --- a/src/cpu/x86/vm/cppInterpreter_x86.cpp	Tue Jun 17 16:12:09 2014 -0700
    14.2 +++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp	Tue Jun 17 22:15:24 2014 -0700
    14.3 @@ -2336,29 +2336,42 @@
    14.4           "Stack top out of range");
    14.5  }
    14.6  
    14.7 -int AbstractInterpreter::layout_activation(Method* method,
    14.8 -                                           int tempcount,  //
    14.9 -                                           int popframe_extra_args,
   14.10 -                                           int moncount,
   14.11 -                                           int caller_actual_parameters,
   14.12 -                                           int callee_param_count,
   14.13 -                                           int callee_locals,
   14.14 -                                           frame* caller,
   14.15 -                                           frame* interpreter_frame,
   14.16 -                                           bool is_top_frame,
   14.17 -                                           bool is_bottom_frame) {
   14.18 -
   14.19 -  assert(popframe_extra_args == 0, "FIX ME");
   14.20 -  // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
   14.21 -  // does as far as allocating an interpreter frame.
   14.22 -  // If interpreter_frame!=NULL, set up the method, locals, and monitors.
   14.23 -  // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
   14.24 -  // as determined by a previous call to this method.
   14.25 -  // It is also guaranteed to be walkable even though it is in a skeletal state
   14.26 +
   14.27 +static int frame_size_helper(int max_stack,
   14.28 +                             int tempcount,
   14.29 +                             int moncount,
   14.30 +                             int callee_param_count,
   14.31 +                             int callee_locals,
   14.32 +                             bool is_top_frame,
   14.33 +                             int& monitor_size,
   14.34 +                             int& full_frame_size) {
   14.35 +  int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
   14.36 +  monitor_size = sizeof(BasicObjectLock) * moncount;
   14.37 +
   14.38 +  // First calculate the frame size without any java expression stack
   14.39 +  int short_frame_size = size_activation_helper(extra_locals_size,
   14.40 +                                                monitor_size);
   14.41 +
   14.42 +  // Now with full size expression stack
   14.43 +  full_frame_size = short_frame_size + max_stack * BytesPerWord;
   14.44 +
   14.45 +  // and now with only live portion of the expression stack
   14.46 +  short_frame_size = short_frame_size + tempcount * BytesPerWord;
   14.47 +
   14.48 +  // the size the activation is right now. Only top frame is full size
   14.49 +  int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
   14.50 +  return frame_size;
   14.51 +}
   14.52 +
   14.53 +int AbstractInterpreter::size_activation(int max_stack,
   14.54 +                                         int tempcount,
   14.55 +                                         int extra_args,
   14.56 +                                         int moncount,
   14.57 +                                         int callee_param_count,
   14.58 +                                         int callee_locals,
   14.59 +                                         bool is_top_frame) {
   14.60 +  assert(extra_args == 0, "FIX ME");
   14.61    // NOTE: return size is in words not bytes
   14.62 -  // NOTE: tempcount is the current size of the java expression stack. For top most
   14.63 -  //       frames we will allocate a full sized expression stack and not the curback
   14.64 -  //       version that non-top frames have.
   14.65  
   14.66    // Calculate the amount our frame will be adjust by the callee. For top frame
   14.67    // this is zero.
   14.68 @@ -2368,87 +2381,102 @@
   14.69    // to it. So it ignores last_frame_adjust value. Seems suspicious as far
   14.70    // as getting sender_sp correct.
   14.71  
   14.72 -  int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
   14.73 -  int monitor_size = sizeof(BasicObjectLock) * moncount;
   14.74 -
   14.75 -  // First calculate the frame size without any java expression stack
   14.76 -  int short_frame_size = size_activation_helper(extra_locals_size,
   14.77 -                                                monitor_size);
   14.78 -
   14.79 -  // Now with full size expression stack
   14.80 -  int full_frame_size = short_frame_size + method->max_stack() * BytesPerWord;
   14.81 -
   14.82 -  // and now with only live portion of the expression stack
   14.83 -  short_frame_size = short_frame_size + tempcount * BytesPerWord;
   14.84 -
   14.85 -  // the size the activation is right now. Only top frame is full size
   14.86 -  int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
   14.87 -
   14.88 -  if (interpreter_frame != NULL) {
   14.89 +  int unused_monitor_size = 0;
   14.90 +  int unused_full_frame_size = 0;
   14.91 +  return frame_size_helper(max_stack, tempcount, moncount, callee_param_count, callee_locals,
   14.92 +                           is_top_frame, unused_monitor_size, unused_full_frame_size)/BytesPerWord;
   14.93 +}
   14.94 +
   14.95 +void AbstractInterpreter::layout_activation(Method* method,
   14.96 +                                            int tempcount,  //
   14.97 +                                            int popframe_extra_args,
   14.98 +                                            int moncount,
   14.99 +                                            int caller_actual_parameters,
  14.100 +                                            int callee_param_count,
  14.101 +                                            int callee_locals,
  14.102 +                                            frame* caller,
  14.103 +                                            frame* interpreter_frame,
  14.104 +                                            bool is_top_frame,
  14.105 +                                            bool is_bottom_frame) {
  14.106 +
  14.107 +  assert(popframe_extra_args == 0, "FIX ME");
  14.108 +  // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
  14.109 +  // does as far as allocating an interpreter frame.
  14.110 +  // Set up the method, locals, and monitors.
  14.111 +  // The frame interpreter_frame is guaranteed to be the right size,
  14.112 +  // as determined by a previous call to the size_activation() method.
  14.113 +  // It is also guaranteed to be walkable even though it is in a skeletal state
  14.114 +  // NOTE: tempcount is the current size of the java expression stack. For top most
  14.115 +  //       frames we will allocate a full sized expression stack and not the curback
  14.116 +  //       version that non-top frames have.
  14.117 +
  14.118 +  int monitor_size = 0;
  14.119 +  int full_frame_size = 0;
  14.120 +  int frame_size = frame_size_helper(method->max_stack(), tempcount, moncount, callee_param_count, callee_locals,
  14.121 +                                     is_top_frame, monitor_size, full_frame_size);
  14.122 +
  14.123  #ifdef ASSERT
  14.124 -    assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
  14.125 +  assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
  14.126  #endif
  14.127  
  14.128 -    // MUCHO HACK
  14.129 -
  14.130 -    intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size));
  14.131 -
  14.132 -    /* Now fillin the interpreterState object */
  14.133 -
  14.134 -    // The state object is the first thing on the frame and easily located
  14.135 -
  14.136 -    interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
  14.137 -
  14.138 -
  14.139 -    // Find the locals pointer. This is rather simple on x86 because there is no
  14.140 -    // confusing rounding at the callee to account for. We can trivially locate
  14.141 -    // our locals based on the current fp().
  14.142 -    // Note: the + 2 is for handling the "static long no_params() method" issue.
  14.143 -    // (too bad I don't really remember that issue well...)
  14.144 -
  14.145 -    intptr_t* locals;
  14.146 -    // If the caller is interpreted we need to make sure that locals points to the first
  14.147 -    // argument that the caller passed and not in an area where the stack might have been extended.
  14.148 -    // because the stack to stack to converter needs a proper locals value in order to remove the
  14.149 -    // arguments from the caller and place the result in the proper location. Hmm maybe it'd be
  14.150 -    // simpler if we simply stored the result in the BytecodeInterpreter object and let the c++ code
  14.151 -    // adjust the stack?? HMMM QQQ
  14.152 -    //
  14.153 -    if (caller->is_interpreted_frame()) {
  14.154 -      // locals must agree with the caller because it will be used to set the
  14.155 -      // caller's tos when we return.
  14.156 -      interpreterState prev  = caller->get_interpreterState();
  14.157 -      // stack() is prepushed.
  14.158 -      locals = prev->stack() + method->size_of_parameters();
  14.159 -      // locals = caller->unextended_sp() + (method->size_of_parameters() - 1);
  14.160 -      if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) {
  14.161 -        // os::breakpoint();
  14.162 -      }
  14.163 -    } else {
  14.164 -      // this is where a c2i would have placed locals (except for the +2)
  14.165 -      locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2;
  14.166 +  // MUCHO HACK
  14.167 +
  14.168 +  intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size));
  14.169 +
  14.170 +  /* Now fillin the interpreterState object */
  14.171 +
  14.172 +  // The state object is the first thing on the frame and easily located
  14.173 +
  14.174 +  interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
  14.175 +
  14.176 +
  14.177 +  // Find the locals pointer. This is rather simple on x86 because there is no
  14.178 +  // confusing rounding at the callee to account for. We can trivially locate
  14.179 +  // our locals based on the current fp().
  14.180 +  // Note: the + 2 is for handling the "static long no_params() method" issue.
  14.181 +  // (too bad I don't really remember that issue well...)
  14.182 +
  14.183 +  intptr_t* locals;
  14.184 +  // If the caller is interpreted we need to make sure that locals points to the first
  14.185 +  // argument that the caller passed and not in an area where the stack might have been extended.
  14.186 +  // because the stack to stack to converter needs a proper locals value in order to remove the
  14.187 +  // arguments from the caller and place the result in the proper location. Hmm maybe it'd be
  14.188 +  // simpler if we simply stored the result in the BytecodeInterpreter object and let the c++ code
  14.189 +  // adjust the stack?? HMMM QQQ
  14.190 +  //
  14.191 +  if (caller->is_interpreted_frame()) {
  14.192 +    // locals must agree with the caller because it will be used to set the
  14.193 +    // caller's tos when we return.
  14.194 +    interpreterState prev  = caller->get_interpreterState();
  14.195 +    // stack() is prepushed.
  14.196 +    locals = prev->stack() + method->size_of_parameters();
  14.197 +    // locals = caller->unextended_sp() + (method->size_of_parameters() - 1);
  14.198 +    if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) {
  14.199 +      // os::breakpoint();
  14.200      }
  14.201 -
  14.202 -    intptr_t* monitor_base = (intptr_t*) cur_state;
  14.203 -    intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
  14.204 -    /* +1 because stack is always prepushed */
  14.205 -    intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord);
  14.206 -
  14.207 -
  14.208 -    BytecodeInterpreter::layout_interpreterState(cur_state,
  14.209 -                                          caller,
  14.210 -                                          interpreter_frame,
  14.211 -                                          method,
  14.212 -                                          locals,
  14.213 -                                          stack,
  14.214 -                                          stack_base,
  14.215 -                                          monitor_base,
  14.216 -                                          frame_bottom,
  14.217 -                                          is_top_frame);
  14.218 -
  14.219 -    // BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
  14.220 +  } else {
  14.221 +    // this is where a c2i would have placed locals (except for the +2)
  14.222 +    locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2;
  14.223    }
  14.224 -  return frame_size/BytesPerWord;
  14.225 +
  14.226 +  intptr_t* monitor_base = (intptr_t*) cur_state;
  14.227 +  intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
  14.228 +  /* +1 because stack is always prepushed */
  14.229 +  intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord);
  14.230 +
  14.231 +
  14.232 +  BytecodeInterpreter::layout_interpreterState(cur_state,
  14.233 +                                               caller,
  14.234 +                                               interpreter_frame,
  14.235 +                                               method,
  14.236 +                                               locals,
  14.237 +                                               stack,
  14.238 +                                               stack_base,
  14.239 +                                               monitor_base,
  14.240 +                                               frame_bottom,
  14.241 +                                               is_top_frame);
  14.242 +
  14.243 +  // BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
  14.244  }
  14.245  
  14.246  #endif // CC_INTERP (all)
    15.1 --- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Jun 17 16:12:09 2014 -0700
    15.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Jun 17 22:15:24 2014 -0700
    15.3 @@ -1052,7 +1052,7 @@
    15.4    // was post-decremented.)  Skip this address by starting at i=1, and
    15.5    // touch a few more pages below.  N.B.  It is important to touch all
    15.6    // the way down to and including i=StackShadowPages.
    15.7 -  for (int i = 1; i <= StackShadowPages; i++) {
    15.8 +  for (int i = 1; i < StackShadowPages; i++) {
    15.9      // this could be any sized move but this is can be a debugging crumb
   15.10      // so the bigger the better.
   15.11      movptr(Address(tmp, (-i*os::vm_page_size())), size );
   15.12 @@ -6096,7 +6096,7 @@
   15.13  
   15.14  
   15.15  // C2 compiled method's prolog code.
   15.16 -void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) {
   15.17 +void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b) {
   15.18  
   15.19    // WARNING: Initial instruction MUST be 5 bytes or longer so that
   15.20    // NativeJump::patch_verified_entry will be able to patch out the entry
   15.21 @@ -6104,18 +6104,20 @@
   15.22    // the frame allocation can be either 3 or 6 bytes. So if we don't do
   15.23    // stack bang then we must use the 6 byte frame allocation even if
   15.24    // we have no frame. :-(
   15.25 +  assert(stack_bang_size >= framesize || stack_bang_size <= 0, "stack bang size incorrect");
   15.26  
   15.27    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   15.28    // Remove word for return addr
   15.29    framesize -= wordSize;
   15.30 +  stack_bang_size -= wordSize;
   15.31  
   15.32    // Calls to C2R adapters often do not accept exceptional returns.
   15.33    // We require that their callers must bang for them.  But be careful, because
   15.34    // some VM calls (such as call site linkage) can use several kilobytes of
   15.35    // stack.  But the stack safety zone should account for that.
   15.36    // See bugs 4446381, 4468289, 4497237.
   15.37 -  if (stack_bang) {
   15.38 -    generate_stack_overflow_check(framesize);
   15.39 +  if (stack_bang_size > 0) {
   15.40 +    generate_stack_overflow_check(stack_bang_size);
   15.41  
   15.42      // We always push rbp, so that on return to interpreter rbp, will be
   15.43      // restored correctly and we can correct the stack.
    16.1 --- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Tue Jun 17 16:12:09 2014 -0700
    16.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Tue Jun 17 22:15:24 2014 -0700
    16.3 @@ -1170,7 +1170,7 @@
    16.4    void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
    16.5  
    16.6    // C2 compiled method's prolog code.
    16.7 -  void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b);
    16.8 +  void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b);
    16.9  
   16.10    // clear memory of size 'cnt' qwords, starting at 'base'.
   16.11    void clear_mem(Register base, Register cnt, Register rtmp);
    17.1 --- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Tue Jun 17 16:12:09 2014 -0700
    17.2 +++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Tue Jun 17 22:15:24 2014 -0700
    17.3 @@ -3014,11 +3014,15 @@
    17.4    // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
    17.5    __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
    17.6  
    17.7 -  // Stack bang to make sure there's enough room for these interpreter frames.
    17.8 +#ifdef ASSERT
    17.9 +  // Compilers generate code that bang the stack by as much as the
   17.10 +  // interpreter would need. So this stack banging should never
   17.11 +  // trigger a fault. Verify that it does not on non product builds.
   17.12    if (UseStackBanging) {
   17.13      __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
   17.14      __ bang_stack_size(rbx, rcx);
   17.15    }
   17.16 +#endif
   17.17  
   17.18    // Load array of frame pcs into ECX
   17.19    __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
   17.20 @@ -3240,12 +3244,15 @@
   17.21    // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
   17.22    __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
   17.23  
   17.24 -  // Stack bang to make sure there's enough room for these interpreter frames.
   17.25 +#ifdef ASSERT
   17.26 +  // Compilers generate code that bang the stack by as much as the
   17.27 +  // interpreter would need. So this stack banging should never
   17.28 +  // trigger a fault. Verify that it does not on non product builds.
   17.29    if (UseStackBanging) {
   17.30      __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
   17.31      __ bang_stack_size(rbx, rcx);
   17.32    }
   17.33 -
   17.34 +#endif
   17.35  
   17.36    // Load array of frame pcs into ECX
   17.37    __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
    18.1 --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Jun 17 16:12:09 2014 -0700
    18.2 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Jun 17 22:15:24 2014 -0700
    18.3 @@ -3484,11 +3484,15 @@
    18.4    // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
    18.5    __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
    18.6  
    18.7 -  // Stack bang to make sure there's enough room for these interpreter frames.
    18.8 +#ifdef ASSERT
    18.9 +  // Compilers generate code that bang the stack by as much as the
   18.10 +  // interpreter would need. So this stack banging should never
   18.11 +  // trigger a fault. Verify that it does not on non product builds.
   18.12    if (UseStackBanging) {
   18.13      __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
   18.14      __ bang_stack_size(rbx, rcx);
   18.15    }
   18.16 +#endif
   18.17  
   18.18    // Load address of array of frame pcs into rcx
   18.19    __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
   18.20 @@ -3682,11 +3686,15 @@
   18.21    // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
   18.22    __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
   18.23  
   18.24 -  // Stack bang to make sure there's enough room for these interpreter frames.
   18.25 +#ifdef ASSERT
   18.26 +  // Compilers generate code that bang the stack by as much as the
   18.27 +  // interpreter would need. So this stack banging should never
   18.28 +  // trigger a fault. Verify that it does not on non product builds.
   18.29    if (UseStackBanging) {
   18.30      __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
   18.31      __ bang_stack_size(rbx, rcx);
   18.32    }
   18.33 +#endif
   18.34  
   18.35    // Load address of array of frame pcs into rcx (address*)
   18.36    __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86.cpp	Tue Jun 17 22:15:24 2014 -0700
    19.3 @@ -0,0 +1,124 @@
    19.4 +/*
    19.5 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    19.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    19.7 + *
    19.8 + * This code is free software; you can redistribute it and/or modify it
    19.9 + * under the terms of the GNU General Public License version 2 only, as
   19.10 + * published by the Free Software Foundation.
   19.11 + *
   19.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   19.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   19.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   19.15 + * version 2 for more details (a copy is included in the LICENSE file that
   19.16 + * accompanied this code).
   19.17 + *
   19.18 + * You should have received a copy of the GNU General Public License version
   19.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   19.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19.21 + *
   19.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   19.23 + * or visit www.oracle.com if you need additional information or have any
   19.24 + * questions.
   19.25 + *
   19.26 + */
   19.27 +
   19.28 +#include "precompiled.hpp"
   19.29 +#include "ci/ciMethod.hpp"
   19.30 +#include "interpreter/interpreter.hpp"
   19.31 +#include "runtime/frame.inline.hpp"
   19.32 +
   19.33 +#ifndef CC_INTERP
   19.34 +
   19.35 +// asm based interpreter deoptimization helpers
   19.36 +int AbstractInterpreter::size_activation(int max_stack,
   19.37 +                                         int temps,
   19.38 +                                         int extra_args,
   19.39 +                                         int monitors,
   19.40 +                                         int callee_params,
   19.41 +                                         int callee_locals,
   19.42 +                                         bool is_top_frame) {
   19.43 +  // Note: This calculation must exactly parallel the frame setup
   19.44 +  // in AbstractInterpreterGenerator::generate_method_entry.
   19.45 +
   19.46 +  // fixed size of an interpreter frame:
   19.47 +  int overhead = frame::sender_sp_offset -
   19.48 +                 frame::interpreter_frame_initial_sp_offset;
   19.49 +  // Our locals were accounted for by the caller (or last_frame_adjust
   19.50 +  // on the transistion) Since the callee parameters already account
   19.51 +  // for the callee's params we only need to account for the extra
   19.52 +  // locals.
   19.53 +  int size = overhead +
   19.54 +         (callee_locals - callee_params)*Interpreter::stackElementWords +
   19.55 +         monitors * frame::interpreter_frame_monitor_size() +
   19.56 +         temps* Interpreter::stackElementWords + extra_args;
   19.57 +
   19.58 +  return size;
   19.59 +}
   19.60 +
   19.61 +void AbstractInterpreter::layout_activation(Method* method,
   19.62 +                                            int tempcount,
   19.63 +                                            int popframe_extra_args,
   19.64 +                                            int moncount,
   19.65 +                                            int caller_actual_parameters,
   19.66 +                                            int callee_param_count,
   19.67 +                                            int callee_locals,
   19.68 +                                            frame* caller,
   19.69 +                                            frame* interpreter_frame,
   19.70 +                                            bool is_top_frame,
   19.71 +                                            bool is_bottom_frame) {
   19.72 +  // The frame interpreter_frame is guaranteed to be the right size,
   19.73 +  // as determined by a previous call to the size_activation() method.
   19.74 +  // It is also guaranteed to be walkable even though it is in a
   19.75 +  // skeletal state
   19.76 +
   19.77 +  int max_locals = method->max_locals() * Interpreter::stackElementWords;
   19.78 +  int extra_locals = (method->max_locals() - method->size_of_parameters()) *
   19.79 +    Interpreter::stackElementWords;
   19.80 +
   19.81 +#ifdef ASSERT
   19.82 +  if (!EnableInvokeDynamic) {
   19.83 +    // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
   19.84 +    // Probably, since deoptimization doesn't work yet.
   19.85 +    assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
   19.86 +  }
   19.87 +  assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
   19.88 +#endif
   19.89 +
   19.90 +  interpreter_frame->interpreter_frame_set_method(method);
   19.91 +  // NOTE the difference in using sender_sp and
   19.92 +  // interpreter_frame_sender_sp interpreter_frame_sender_sp is
   19.93 +  // the original sp of the caller (the unextended_sp) and
   19.94 +  // sender_sp is fp+8/16 (32bit/64bit) XXX
   19.95 +  intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
   19.96 +
   19.97 +#ifdef ASSERT
   19.98 +  if (caller->is_interpreted_frame()) {
   19.99 +    assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
  19.100 +  }
  19.101 +#endif
  19.102 +
  19.103 +  interpreter_frame->interpreter_frame_set_locals(locals);
  19.104 +  BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
  19.105 +  BasicObjectLock* monbot = montop - moncount;
  19.106 +  interpreter_frame->interpreter_frame_set_monitor_end(monbot);
  19.107 +
  19.108 +  // Set last_sp
  19.109 +  intptr_t*  esp = (intptr_t*) monbot -
  19.110 +    tempcount*Interpreter::stackElementWords -
  19.111 +    popframe_extra_args;
  19.112 +  interpreter_frame->interpreter_frame_set_last_sp(esp);
  19.113 +
  19.114 +  // All frames but the initial (oldest) interpreter frame we fill in have
  19.115 +  // a value for sender_sp that allows walking the stack but isn't
  19.116 +  // truly correct. Correct the value here.
  19.117 +  if (extra_locals != 0 &&
  19.118 +      interpreter_frame->sender_sp() ==
  19.119 +      interpreter_frame->interpreter_frame_sender_sp()) {
  19.120 +    interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
  19.121 +                                                       extra_locals);
  19.122 +  }
  19.123 +  *interpreter_frame->interpreter_frame_cache_addr() =
  19.124 +    method->constants()->cache();
  19.125 +}
  19.126 +
  19.127 +#endif // CC_INTERP
    20.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Jun 17 16:12:09 2014 -0700
    20.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Jun 17 22:15:24 2014 -0700
    20.3 @@ -1686,91 +1686,6 @@
    20.4    return overhead_size + method_stack + stub_code;
    20.5  }
    20.6  
    20.7 -// asm based interpreter deoptimization helpers
    20.8 -
    20.9 -int AbstractInterpreter::layout_activation(Method* method,
   20.10 -                                           int tempcount,
   20.11 -                                           int popframe_extra_args,
   20.12 -                                           int moncount,
   20.13 -                                           int caller_actual_parameters,
   20.14 -                                           int callee_param_count,
   20.15 -                                           int callee_locals,
   20.16 -                                           frame* caller,
   20.17 -                                           frame* interpreter_frame,
   20.18 -                                           bool is_top_frame,
   20.19 -                                           bool is_bottom_frame) {
   20.20 -  // Note: This calculation must exactly parallel the frame setup
   20.21 -  // in AbstractInterpreterGenerator::generate_method_entry.
   20.22 -  // If interpreter_frame!=NULL, set up the method, locals, and monitors.
   20.23 -  // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
   20.24 -  // as determined by a previous call to this method.
   20.25 -  // It is also guaranteed to be walkable even though it is in a skeletal state
   20.26 -  // NOTE: return size is in words not bytes
   20.27 -
   20.28 -  // fixed size of an interpreter frame:
   20.29 -  int max_locals = method->max_locals() * Interpreter::stackElementWords;
   20.30 -  int extra_locals = (method->max_locals() - method->size_of_parameters()) *
   20.31 -                     Interpreter::stackElementWords;
   20.32 -
   20.33 -  int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
   20.34 -
   20.35 -  // Our locals were accounted for by the caller (or last_frame_adjust on the transistion)
   20.36 -  // Since the callee parameters already account for the callee's params we only need to account for
   20.37 -  // the extra locals.
   20.38 -
   20.39 -
   20.40 -  int size = overhead +
   20.41 -         ((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
   20.42 -         (moncount*frame::interpreter_frame_monitor_size()) +
   20.43 -         tempcount*Interpreter::stackElementWords + popframe_extra_args;
   20.44 -
   20.45 -  if (interpreter_frame != NULL) {
   20.46 -#ifdef ASSERT
   20.47 -    if (!EnableInvokeDynamic)
   20.48 -      // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
   20.49 -      // Probably, since deoptimization doesn't work yet.
   20.50 -      assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
   20.51 -    assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
   20.52 -#endif
   20.53 -
   20.54 -    interpreter_frame->interpreter_frame_set_method(method);
   20.55 -    // NOTE the difference in using sender_sp and interpreter_frame_sender_sp
   20.56 -    // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
   20.57 -    // and sender_sp is fp+8
   20.58 -    intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
   20.59 -
   20.60 -#ifdef ASSERT
   20.61 -    if (caller->is_interpreted_frame()) {
   20.62 -      assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
   20.63 -    }
   20.64 -#endif
   20.65 -
   20.66 -    interpreter_frame->interpreter_frame_set_locals(locals);
   20.67 -    BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
   20.68 -    BasicObjectLock* monbot = montop - moncount;
   20.69 -    interpreter_frame->interpreter_frame_set_monitor_end(monbot);
   20.70 -
   20.71 -    // Set last_sp
   20.72 -    intptr_t*  rsp = (intptr_t*) monbot  -
   20.73 -                     tempcount*Interpreter::stackElementWords -
   20.74 -                     popframe_extra_args;
   20.75 -    interpreter_frame->interpreter_frame_set_last_sp(rsp);
   20.76 -
   20.77 -    // All frames but the initial (oldest) interpreter frame we fill in have a
   20.78 -    // value for sender_sp that allows walking the stack but isn't
   20.79 -    // truly correct. Correct the value here.
   20.80 -
   20.81 -    if (extra_locals != 0 &&
   20.82 -        interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
   20.83 -      interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
   20.84 -    }
   20.85 -    *interpreter_frame->interpreter_frame_cache_addr() =
   20.86 -      method->constants()->cache();
   20.87 -  }
   20.88 -  return size;
   20.89 -}
   20.90 -
   20.91 -
   20.92  //------------------------------------------------------------------------------------------------------------------------
   20.93  // Exceptions
   20.94  
    21.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Tue Jun 17 16:12:09 2014 -0700
    21.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Tue Jun 17 22:15:24 2014 -0700
    21.3 @@ -1695,87 +1695,6 @@
    21.4    return (overhead_size + method_stack + stub_code);
    21.5  }
    21.6  
    21.7 -int AbstractInterpreter::layout_activation(Method* method,
    21.8 -                                           int tempcount,
    21.9 -                                           int popframe_extra_args,
   21.10 -                                           int moncount,
   21.11 -                                           int caller_actual_parameters,
   21.12 -                                           int callee_param_count,
   21.13 -                                           int callee_locals,
   21.14 -                                           frame* caller,
   21.15 -                                           frame* interpreter_frame,
   21.16 -                                           bool is_top_frame,
   21.17 -                                           bool is_bottom_frame) {
   21.18 -  // Note: This calculation must exactly parallel the frame setup
   21.19 -  // in AbstractInterpreterGenerator::generate_method_entry.
   21.20 -  // If interpreter_frame!=NULL, set up the method, locals, and monitors.
   21.21 -  // The frame interpreter_frame, if not NULL, is guaranteed to be the
   21.22 -  // right size, as determined by a previous call to this method.
   21.23 -  // It is also guaranteed to be walkable even though it is in a skeletal state
   21.24 -
   21.25 -  // fixed size of an interpreter frame:
   21.26 -  int max_locals = method->max_locals() * Interpreter::stackElementWords;
   21.27 -  int extra_locals = (method->max_locals() - method->size_of_parameters()) *
   21.28 -                     Interpreter::stackElementWords;
   21.29 -
   21.30 -  int overhead = frame::sender_sp_offset -
   21.31 -                 frame::interpreter_frame_initial_sp_offset;
   21.32 -  // Our locals were accounted for by the caller (or last_frame_adjust
   21.33 -  // on the transistion) Since the callee parameters already account
   21.34 -  // for the callee's params we only need to account for the extra
   21.35 -  // locals.
   21.36 -  int size = overhead +
   21.37 -         (callee_locals - callee_param_count)*Interpreter::stackElementWords +
   21.38 -         moncount * frame::interpreter_frame_monitor_size() +
   21.39 -         tempcount* Interpreter::stackElementWords + popframe_extra_args;
   21.40 -  if (interpreter_frame != NULL) {
   21.41 -#ifdef ASSERT
   21.42 -    if (!EnableInvokeDynamic)
   21.43 -      // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
   21.44 -      // Probably, since deoptimization doesn't work yet.
   21.45 -      assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
   21.46 -    assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
   21.47 -#endif
   21.48 -
   21.49 -    interpreter_frame->interpreter_frame_set_method(method);
   21.50 -    // NOTE the difference in using sender_sp and
   21.51 -    // interpreter_frame_sender_sp interpreter_frame_sender_sp is
   21.52 -    // the original sp of the caller (the unextended_sp) and
   21.53 -    // sender_sp is fp+16 XXX
   21.54 -    intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
   21.55 -
   21.56 -#ifdef ASSERT
   21.57 -    if (caller->is_interpreted_frame()) {
   21.58 -      assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
   21.59 -    }
   21.60 -#endif
   21.61 -
   21.62 -    interpreter_frame->interpreter_frame_set_locals(locals);
   21.63 -    BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
   21.64 -    BasicObjectLock* monbot = montop - moncount;
   21.65 -    interpreter_frame->interpreter_frame_set_monitor_end(monbot);
   21.66 -
   21.67 -    // Set last_sp
   21.68 -    intptr_t*  esp = (intptr_t*) monbot -
   21.69 -                     tempcount*Interpreter::stackElementWords -
   21.70 -                     popframe_extra_args;
   21.71 -    interpreter_frame->interpreter_frame_set_last_sp(esp);
   21.72 -
   21.73 -    // All frames but the initial (oldest) interpreter frame we fill in have
   21.74 -    // a value for sender_sp that allows walking the stack but isn't
   21.75 -    // truly correct. Correct the value here.
   21.76 -    if (extra_locals != 0 &&
   21.77 -        interpreter_frame->sender_sp() ==
   21.78 -        interpreter_frame->interpreter_frame_sender_sp()) {
   21.79 -      interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
   21.80 -                                                         extra_locals);
   21.81 -    }
   21.82 -    *interpreter_frame->interpreter_frame_cache_addr() =
   21.83 -      method->constants()->cache();
   21.84 -  }
   21.85 -  return size;
   21.86 -}
   21.87 -
   21.88  //-----------------------------------------------------------------------------
   21.89  // Exceptions
   21.90  
    22.1 --- a/src/cpu/x86/vm/x86_32.ad	Tue Jun 17 16:12:09 2014 -0700
    22.2 +++ b/src/cpu/x86/vm/x86_32.ad	Tue Jun 17 22:15:24 2014 -0700
    22.3 @@ -512,14 +512,15 @@
    22.4  void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
    22.5    Compile* C = ra_->C;
    22.6  
    22.7 -  int framesize = C->frame_slots() << LogBytesPerInt;
    22.8 +  int framesize = C->frame_size_in_bytes();
    22.9 +  int bangsize = C->bang_size_in_bytes();
   22.10    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   22.11    // Remove wordSize for return addr which is already pushed.
   22.12    framesize -= wordSize;
   22.13  
   22.14 -  if (C->need_stack_bang(framesize)) {
   22.15 +  if (C->need_stack_bang(bangsize)) {
   22.16      framesize -= wordSize;
   22.17 -    st->print("# stack bang");
   22.18 +    st->print("# stack bang (%d bytes)", bangsize);
   22.19      st->print("\n\t");
   22.20      st->print("PUSH   EBP\t# Save EBP");
   22.21      if (framesize) {
   22.22 @@ -563,9 +564,10 @@
   22.23    Compile* C = ra_->C;
   22.24    MacroAssembler _masm(&cbuf);
   22.25  
   22.26 -  int framesize = C->frame_slots() << LogBytesPerInt;
   22.27 -
   22.28 -  __ verified_entry(framesize, C->need_stack_bang(framesize), C->in_24_bit_fp_mode());
   22.29 +  int framesize = C->frame_size_in_bytes();
   22.30 +  int bangsize = C->bang_size_in_bytes();
   22.31 +
   22.32 +  __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, C->in_24_bit_fp_mode());
   22.33  
   22.34    C->set_frame_complete(cbuf.insts_size());
   22.35  
   22.36 @@ -589,7 +591,7 @@
   22.37  #ifndef PRODUCT
   22.38  void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
   22.39    Compile *C = ra_->C;
   22.40 -  int framesize = C->frame_slots() << LogBytesPerInt;
   22.41 +  int framesize = C->frame_size_in_bytes();
   22.42    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   22.43    // Remove two words for return addr and rbp,
   22.44    framesize -= 2*wordSize;
   22.45 @@ -629,7 +631,7 @@
   22.46      masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
   22.47    }
   22.48  
   22.49 -  int framesize = C->frame_slots() << LogBytesPerInt;
   22.50 +  int framesize = C->frame_size_in_bytes();
   22.51    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   22.52    // Remove two words for return addr and rbp,
   22.53    framesize -= 2*wordSize;
   22.54 @@ -663,7 +665,7 @@
   22.55    if (C->max_vector_size() > 16) size += 3; // vzeroupper
   22.56    if (do_polling() && C->is_method_compilation()) size += 6;
   22.57  
   22.58 -  int framesize = C->frame_slots() << LogBytesPerInt;
   22.59 +  int framesize = C->frame_size_in_bytes();
   22.60    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   22.61    // Remove two words for return addr and rbp,
   22.62    framesize -= 2*wordSize;
    23.1 --- a/src/cpu/x86/vm/x86_64.ad	Tue Jun 17 16:12:09 2014 -0700
    23.2 +++ b/src/cpu/x86/vm/x86_64.ad	Tue Jun 17 22:15:24 2014 -0700
    23.3 @@ -713,14 +713,15 @@
    23.4  void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
    23.5    Compile* C = ra_->C;
    23.6  
    23.7 -  int framesize = C->frame_slots() << LogBytesPerInt;
    23.8 +  int framesize = C->frame_size_in_bytes();
    23.9 +  int bangsize = C->bang_size_in_bytes();
   23.10    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   23.11    // Remove wordSize for return addr which is already pushed.
   23.12    framesize -= wordSize;
   23.13  
   23.14 -  if (C->need_stack_bang(framesize)) {
   23.15 +  if (C->need_stack_bang(bangsize)) {
   23.16      framesize -= wordSize;
   23.17 -    st->print("# stack bang");
   23.18 +    st->print("# stack bang (%d bytes)", bangsize);
   23.19      st->print("\n\t");
   23.20      st->print("pushq   rbp\t# Save rbp");
   23.21      if (framesize) {
   23.22 @@ -751,9 +752,10 @@
   23.23    Compile* C = ra_->C;
   23.24    MacroAssembler _masm(&cbuf);
   23.25  
   23.26 -  int framesize = C->frame_slots() << LogBytesPerInt;
   23.27 -
   23.28 -  __ verified_entry(framesize, C->need_stack_bang(framesize), false);
   23.29 +  int framesize = C->frame_size_in_bytes();
   23.30 +  int bangsize = C->bang_size_in_bytes();
   23.31 +
   23.32 +  __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
   23.33  
   23.34    C->set_frame_complete(cbuf.insts_size());
   23.35  
   23.36 @@ -786,7 +788,7 @@
   23.37      st->cr(); st->print("\t");
   23.38    }
   23.39  
   23.40 -  int framesize = C->frame_slots() << LogBytesPerInt;
   23.41 +  int framesize = C->frame_size_in_bytes();
   23.42    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   23.43    // Remove word for return adr already pushed
   23.44    // and RBP
   23.45 @@ -822,7 +824,7 @@
   23.46      __ vzeroupper();
   23.47    }
   23.48  
   23.49 -  int framesize = C->frame_slots() << LogBytesPerInt;
   23.50 +  int framesize = C->frame_size_in_bytes();
   23.51    assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
   23.52    // Remove word for return adr already pushed
   23.53    // and RBP
    24.1 --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Tue Jun 17 16:12:09 2014 -0700
    24.2 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Tue Jun 17 22:15:24 2014 -0700
    24.3 @@ -916,17 +916,32 @@
    24.4    return (InterpreterFrame *) fp;
    24.5  }
    24.6  
    24.7 -int AbstractInterpreter::layout_activation(Method* method,
    24.8 -                                           int       tempcount,
    24.9 -                                           int       popframe_extra_args,
   24.10 -                                           int       moncount,
   24.11 -                                           int       caller_actual_parameters,
   24.12 -                                           int       callee_param_count,
   24.13 -                                           int       callee_locals,
   24.14 -                                           frame*    caller,
   24.15 -                                           frame*    interpreter_frame,
   24.16 -                                           bool      is_top_frame,
   24.17 -                                           bool      is_bottom_frame) {
   24.18 +int AbstractInterpreter::size_activation(int       max_stack,
   24.19 +                                         int       tempcount,
   24.20 +                                         int       extra_args,
   24.21 +                                         int       moncount,
   24.22 +                                         int       callee_param_count,
   24.23 +                                         int       callee_locals,
   24.24 +                                         bool      is_top_frame) {
   24.25 +  int header_words        = InterpreterFrame::header_words;
   24.26 +  int monitor_words       = moncount * frame::interpreter_frame_monitor_size();
   24.27 +  int stack_words         = is_top_frame ? max_stack : tempcount;
   24.28 +  int callee_extra_locals = callee_locals - callee_param_count;
   24.29 +
   24.30 +  return header_words + monitor_words + stack_words + callee_extra_locals;
   24.31 +}
   24.32 +
   24.33 +void AbstractInterpreter::layout_activation(Method* method,
   24.34 +                                            int       tempcount,
   24.35 +                                            int       popframe_extra_args,
   24.36 +                                            int       moncount,
   24.37 +                                            int       caller_actual_parameters,
   24.38 +                                            int       callee_param_count,
   24.39 +                                            int       callee_locals,
   24.40 +                                            frame*    caller,
   24.41 +                                            frame*    interpreter_frame,
   24.42 +                                            bool      is_top_frame,
   24.43 +                                            bool      is_bottom_frame) {
   24.44    assert(popframe_extra_args == 0, "what to do?");
   24.45    assert(!is_top_frame || (!callee_locals && !callee_param_count),
   24.46           "top frame should have no caller");
   24.47 @@ -935,39 +950,31 @@
   24.48    // does (the full InterpreterFrame::build, that is, not the
   24.49    // one that creates empty frames for the deoptimizer).
   24.50    //
   24.51 -  // If interpreter_frame is not NULL then it will be filled in.
   24.52 -  // It's size is determined by a previous call to this method,
   24.53 -  // so it should be correct.
   24.54 +  // interpreter_frame will be filled in.  It's size is determined by
   24.55 +  // a previous call to the size_activation() method,
   24.56    //
   24.57    // Note that tempcount is the current size of the expression
   24.58    // stack.  For top most frames we will allocate a full sized
   24.59    // expression stack and not the trimmed version that non-top
   24.60    // frames have.
   24.61  
   24.62 -  int header_words        = InterpreterFrame::header_words;
   24.63    int monitor_words       = moncount * frame::interpreter_frame_monitor_size();
   24.64 -  int stack_words         = is_top_frame ? method->max_stack() : tempcount;
   24.65 -  int callee_extra_locals = callee_locals - callee_param_count;
   24.66 +  intptr_t *locals        = interpreter_frame->fp() + method->max_locals();
   24.67 +  interpreterState istate = interpreter_frame->get_interpreterState();
   24.68 +  intptr_t *monitor_base  = (intptr_t*) istate;
   24.69 +  intptr_t *stack_base    = monitor_base - monitor_words;
   24.70 +  intptr_t *stack         = stack_base - tempcount - 1;
   24.71  
   24.72 -  if (interpreter_frame) {
   24.73 -    intptr_t *locals        = interpreter_frame->fp() + method->max_locals();
   24.74 -    interpreterState istate = interpreter_frame->get_interpreterState();
   24.75 -    intptr_t *monitor_base  = (intptr_t*) istate;
   24.76 -    intptr_t *stack_base    = monitor_base - monitor_words;
   24.77 -    intptr_t *stack         = stack_base - tempcount - 1;
   24.78 -
   24.79 -    BytecodeInterpreter::layout_interpreterState(istate,
   24.80 -                                                 caller,
   24.81 -                                                 NULL,
   24.82 -                                                 method,
   24.83 -                                                 locals,
   24.84 -                                                 stack,
   24.85 -                                                 stack_base,
   24.86 -                                                 monitor_base,
   24.87 -                                                 NULL,
   24.88 -                                                 is_top_frame);
   24.89 -  }
   24.90 -  return header_words + monitor_words + stack_words + callee_extra_locals;
   24.91 +  BytecodeInterpreter::layout_interpreterState(istate,
   24.92 +                                               caller,
   24.93 +                                               NULL,
   24.94 +                                               method,
   24.95 +                                               locals,
   24.96 +                                               stack,
   24.97 +                                               stack_base,
   24.98 +                                               monitor_base,
   24.99 +                                               NULL,
  24.100 +                                               is_top_frame);
  24.101  }
  24.102  
  24.103  void BytecodeInterpreter::layout_interpreterState(interpreterState istate,
    25.1 --- a/src/share/vm/c1/c1_Compilation.cpp	Tue Jun 17 16:12:09 2014 -0700
    25.2 +++ b/src/share/vm/c1/c1_Compilation.cpp	Tue Jun 17 22:15:24 2014 -0700
    25.3 @@ -546,6 +546,7 @@
    25.4  , _code(buffer_blob)
    25.5  , _has_access_indexed(false)
    25.6  , _current_instruction(NULL)
    25.7 +, _interpreter_frame_size(0)
    25.8  #ifndef PRODUCT
    25.9  , _last_instruction_printed(NULL)
   25.10  #endif // PRODUCT
    26.1 --- a/src/share/vm/c1/c1_Compilation.hpp	Tue Jun 17 16:12:09 2014 -0700
    26.2 +++ b/src/share/vm/c1/c1_Compilation.hpp	Tue Jun 17 22:15:24 2014 -0700
    26.3 @@ -88,6 +88,7 @@
    26.4    CodeOffsets        _offsets;
    26.5    CodeBuffer         _code;
    26.6    bool               _has_access_indexed;
    26.7 +  int                _interpreter_frame_size; // Stack space needed in case of a deoptimization
    26.8  
    26.9    // compilation helpers
   26.10    void initialize();
   26.11 @@ -262,6 +263,18 @@
   26.12  
   26.13    // Dump inlining replay data to the stream.
   26.14    void dump_inline_data(outputStream* out) { /* do nothing now */ }
   26.15 +
   26.16 +  // How much stack space would the interpreter need in case of a
   26.17 +  // deoptimization (worst case)
   26.18 +  void update_interpreter_frame_size(int size) {
   26.19 +    if (_interpreter_frame_size < size) {
   26.20 +      _interpreter_frame_size = size;
   26.21 +    }
   26.22 +  }
   26.23 +
   26.24 +  int interpreter_frame_size() const {
   26.25 +    return _interpreter_frame_size;
   26.26 +  }
   26.27  };
   26.28  
   26.29  
    27.1 --- a/src/share/vm/c1/c1_IR.cpp	Tue Jun 17 16:12:09 2014 -0700
    27.2 +++ b/src/share/vm/c1/c1_IR.cpp	Tue Jun 17 22:15:24 2014 -0700
    27.3 @@ -226,8 +226,38 @@
    27.4    _oop_map->set_oop(name);
    27.5  }
    27.6  
    27.7 +// Mirror the stack size calculation in the deopt code
    27.8 +// How much stack space would we need at this point in the program in
    27.9 +// case of deoptimization?
   27.10 +int CodeEmitInfo::interpreter_frame_size() const {
   27.11 +  ValueStack* state = _stack;
   27.12 +  int size = 0;
   27.13 +  int callee_parameters = 0;
   27.14 +  int callee_locals = 0;
   27.15 +  int extra_args = state->scope()->method()->max_stack() - state->stack_size();
   27.16  
   27.17 +  while (state != NULL) {
   27.18 +    int locks = state->locks_size();
   27.19 +    int temps = state->stack_size();
   27.20 +    bool is_top_frame = (state == _stack);
   27.21 +    ciMethod* method = state->scope()->method();
   27.22  
   27.23 +    int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
   27.24 +                                                                 temps + callee_parameters,
   27.25 +                                                                 extra_args,
   27.26 +                                                                 locks,
   27.27 +                                                                 callee_parameters,
   27.28 +                                                                 callee_locals,
   27.29 +                                                                 is_top_frame);
   27.30 +    size += frame_size;
   27.31 +
   27.32 +    callee_parameters = method->size_of_parameters();
   27.33 +    callee_locals = method->max_locals();
   27.34 +    extra_args = 0;
   27.35 +    state = state->caller_state();
   27.36 +  }
   27.37 +  return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
   27.38 +}
   27.39  
   27.40  // Implementation of IR
   27.41  
    28.1 --- a/src/share/vm/c1/c1_IR.hpp	Tue Jun 17 16:12:09 2014 -0700
    28.2 +++ b/src/share/vm/c1/c1_IR.hpp	Tue Jun 17 22:15:24 2014 -0700
    28.3 @@ -280,6 +280,8 @@
    28.4  
    28.5    bool     is_method_handle_invoke() const { return _is_method_handle_invoke;     }
    28.6    void set_is_method_handle_invoke(bool x) {        _is_method_handle_invoke = x; }
    28.7 +
    28.8 +  int interpreter_frame_size() const;
    28.9  };
   28.10  
   28.11  
    29.1 --- a/src/share/vm/c1/c1_LIRAssembler.cpp	Tue Jun 17 16:12:09 2014 -0700
    29.2 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Tue Jun 17 22:15:24 2014 -0700
    29.3 @@ -185,6 +185,13 @@
    29.4    return _masm->pc();
    29.5  }
    29.6  
    29.7 +// To bang the stack of this compiled method we use the stack size
    29.8 +// that the interpreter would need in case of a deoptimization. This
    29.9 +// removes the need to bang the stack in the deoptimization blob which
   29.10 +// in turn simplifies stack overflow handling.
   29.11 +int LIR_Assembler::bang_size_in_bytes() const {
   29.12 +  return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
   29.13 +}
   29.14  
   29.15  void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
   29.16    for (int i = 0; i < info_list->length(); i++) {
   29.17 @@ -792,7 +799,7 @@
   29.18  
   29.19  
   29.20  void LIR_Assembler::build_frame() {
   29.21 -  _masm->build_frame(initial_frame_size_in_bytes());
   29.22 +  _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
   29.23  }
   29.24  
   29.25  
    30.1 --- a/src/share/vm/c1/c1_LIRAssembler.hpp	Tue Jun 17 16:12:09 2014 -0700
    30.2 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Tue Jun 17 22:15:24 2014 -0700
    30.3 @@ -132,7 +132,8 @@
    30.4    int code_offset() const;
    30.5    address pc() const;
    30.6  
    30.7 -  int  initial_frame_size_in_bytes();
    30.8 +  int  initial_frame_size_in_bytes() const;
    30.9 +  int  bang_size_in_bytes() const;
   30.10  
   30.11    // test for constants which can be encoded directly in instructions
   30.12    static bool is_small_constant(LIR_Opr opr);
    31.1 --- a/src/share/vm/c1/c1_LinearScan.cpp	Tue Jun 17 16:12:09 2014 -0700
    31.2 +++ b/src/share/vm/c1/c1_LinearScan.cpp	Tue Jun 17 22:15:24 2014 -0700
    31.3 @@ -2441,6 +2441,9 @@
    31.4      CodeEmitInfo* info = visitor.info_at(i);
    31.5      OopMap* oop_map = first_oop_map;
    31.6  
    31.7 +    // compute worst case interpreter size in case of a deoptimization
    31.8 +    _compilation->update_interpreter_frame_size(info->interpreter_frame_size());
    31.9 +
   31.10      if (info->stack()->locks_size() != first_info->stack()->locks_size()) {
   31.11        // this info has a different number of locks then the precomputed oop map
   31.12        // (possible for lock and unlock instructions) -> compute oop map with
    32.1 --- a/src/share/vm/c1/c1_MacroAssembler.hpp	Tue Jun 17 16:12:09 2014 -0700
    32.2 +++ b/src/share/vm/c1/c1_MacroAssembler.hpp	Tue Jun 17 22:15:24 2014 -0700
    32.3 @@ -39,7 +39,7 @@
    32.4    void explicit_null_check(Register base);
    32.5  
    32.6    void inline_cache_check(Register receiver, Register iCache);
    32.7 -  void build_frame(int frame_size_in_bytes);
    32.8 +  void build_frame(int frame_size_in_bytes, int bang_size_in_bytes);
    32.9    void remove_frame(int frame_size_in_bytes);
   32.10  
   32.11    void unverified_entry(Register receiver, Register ic_klass);
    33.1 --- a/src/share/vm/ci/ciMethod.cpp	Tue Jun 17 16:12:09 2014 -0700
    33.2 +++ b/src/share/vm/ci/ciMethod.cpp	Tue Jun 17 22:15:24 2014 -0700
    33.3 @@ -80,6 +80,7 @@
    33.4    _code_size          = h_m()->code_size();
    33.5    _intrinsic_id       = h_m()->intrinsic_id();
    33.6    _handler_count      = h_m()->exception_table_length();
    33.7 +  _size_of_parameters = h_m()->size_of_parameters();
    33.8    _uses_monitors      = h_m()->access_flags().has_monitor_bytecodes();
    33.9    _balanced_monitors  = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
   33.10    _is_c1_compilable   = !h_m()->is_not_c1_compilable();
    34.1 --- a/src/share/vm/ci/ciMethod.hpp	Tue Jun 17 16:12:09 2014 -0700
    34.2 +++ b/src/share/vm/ci/ciMethod.hpp	Tue Jun 17 22:15:24 2014 -0700
    34.3 @@ -71,6 +71,7 @@
    34.4    int _interpreter_invocation_count;
    34.5    int _interpreter_throwout_count;
    34.6    int _instructions_size;
    34.7 +  int _size_of_parameters;
    34.8  
    34.9    bool _uses_monitors;
   34.10    bool _balanced_monitors;
   34.11 @@ -166,6 +167,7 @@
   34.12    int exception_table_length() const             { check_is_loaded(); return _handler_count; }
   34.13    int interpreter_invocation_count() const       { check_is_loaded(); return _interpreter_invocation_count; }
   34.14    int interpreter_throwout_count() const         { check_is_loaded(); return _interpreter_throwout_count; }
   34.15 +  int size_of_parameters() const                 { check_is_loaded(); return _size_of_parameters; }
   34.16  
   34.17    // Code size for inlining decisions.
   34.18    int code_size_for_inlining();
   34.19 @@ -241,7 +243,6 @@
   34.20  
   34.21    ciField*      get_field_at_bci( int bci, bool &will_link);
   34.22    ciMethod*     get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
   34.23 -
   34.24    // Given a certain calling environment, find the monomorphic target
   34.25    // for the call.  Return NULL if the call is not monomorphic in
   34.26    // its calling environment.
    35.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jun 17 16:12:09 2014 -0700
    35.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jun 17 22:15:24 2014 -0700
    35.3 @@ -310,8 +310,7 @@
    35.4                               _cmsGen->refs_discovery_is_mt(),     // mt discovery
    35.5                               (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
    35.6                               _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
    35.7 -                             &_is_alive_closure,                  // closure for liveness info
    35.8 -                             false);                              // next field updates do not need write barrier
    35.9 +                             &_is_alive_closure);                 // closure for liveness info
   35.10      // Initialize the _ref_processor field of CMSGen
   35.11      _cmsGen->set_ref_processor(_ref_processor);
   35.12  
    36.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Jun 17 16:12:09 2014 -0700
    36.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Jun 17 22:15:24 2014 -0700
    36.3 @@ -2258,12 +2258,9 @@
    36.4                                  // degree of mt discovery
    36.5                             false,
    36.6                                  // Reference discovery is not atomic
    36.7 -                           &_is_alive_closure_cm,
    36.8 +                           &_is_alive_closure_cm);
    36.9                                  // is alive closure
   36.10                                  // (for efficiency/performance)
   36.11 -                           true);
   36.12 -                                // Setting next fields of discovered
   36.13 -                                // lists requires a barrier.
   36.14  
   36.15    // STW ref processor
   36.16    _ref_processor_stw =
   36.17 @@ -2278,12 +2275,9 @@
   36.18                                  // degree of mt discovery
   36.19                             true,
   36.20                                  // Reference discovery is atomic
   36.21 -                           &_is_alive_closure_stw,
   36.22 +                           &_is_alive_closure_stw);
   36.23                                  // is alive closure
   36.24                                  // (for efficiency/performance)
   36.25 -                           false);
   36.26 -                                // Setting next fields of discovered
   36.27 -                                // lists does not require a barrier.
   36.28  }
   36.29  
   36.30  size_t G1CollectedHeap::capacity() const {
    37.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Jun 17 16:12:09 2014 -0700
    37.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Jun 17 22:15:24 2014 -0700
    37.3 @@ -1638,8 +1638,7 @@
    37.4                               refs_discovery_is_mt(),     // mt discovery
    37.5                               (int) ParallelGCThreads,    // mt discovery degree
    37.6                               refs_discovery_is_atomic(), // atomic_discovery
    37.7 -                             NULL,                       // is_alive_non_header
    37.8 -                             false);                     // write barrier for next field updates
    37.9 +                             NULL);                      // is_alive_non_header
   37.10    }
   37.11  }
   37.12  
    38.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Jun 17 16:12:09 2014 -0700
    38.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Jun 17 22:15:24 2014 -0700
    38.3 @@ -853,8 +853,7 @@
    38.4                             true,          // mt discovery
    38.5                             (int) ParallelGCThreads, // mt discovery degree
    38.6                             true,          // atomic_discovery
    38.7 -                           &_is_alive_closure, // non-header is alive closure
    38.8 -                           false);        // write barrier for next field updates
    38.9 +                           &_is_alive_closure); // non-header is alive closure
   38.10    _counters = new CollectorCounters("PSParallelCompact", 1);
   38.11  
   38.12    // Initialize static fields in ParCompactionManager.
    39.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Jun 17 16:12:09 2014 -0700
    39.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Jun 17 22:15:24 2014 -0700
    39.3 @@ -861,8 +861,7 @@
    39.4                             true,                       // mt discovery
    39.5                             (int) ParallelGCThreads,    // mt discovery degree
    39.6                             true,                       // atomic_discovery
    39.7 -                           NULL,                       // header provides liveness info
    39.8 -                           false);                     // next field updates do not need write barrier
    39.9 +                           NULL);                      // header provides liveness info
   39.10  
   39.11    // Cache the cardtable
   39.12    BarrierSet* bs = Universe::heap()->barrier_set();
    40.1 --- a/src/share/vm/interpreter/abstractInterpreter.hpp	Tue Jun 17 16:12:09 2014 -0700
    40.2 +++ b/src/share/vm/interpreter/abstractInterpreter.hpp	Tue Jun 17 22:15:24 2014 -0700
    40.3 @@ -181,30 +181,16 @@
    40.4    // Deoptimization should reexecute this bytecode
    40.5    static bool    bytecode_should_reexecute(Bytecodes::Code code);
    40.6  
    40.7 -  // share implementation of size_activation and layout_activation:
    40.8 -  static int        size_activation(Method* method,
    40.9 +  // deoptimization support
   40.10 +  static int        size_activation(int max_stack,
   40.11                                      int temps,
   40.12 -                                    int popframe_args,
   40.13 +                                    int extra_args,
   40.14                                      int monitors,
   40.15 -                                    int caller_actual_parameters,
   40.16                                      int callee_params,
   40.17                                      int callee_locals,
   40.18 -                                    bool is_top_frame,
   40.19 -                                    bool is_bottom_frame) {
   40.20 -    return layout_activation(method,
   40.21 -                             temps,
   40.22 -                             popframe_args,
   40.23 -                             monitors,
   40.24 -                             caller_actual_parameters,
   40.25 -                             callee_params,
   40.26 -                             callee_locals,
   40.27 -                             (frame*)NULL,
   40.28 -                             (frame*)NULL,
   40.29 -                             is_top_frame,
   40.30 -                             is_bottom_frame);
   40.31 -  }
   40.32 +                                    bool is_top_frame);
   40.33  
   40.34 -  static int       layout_activation(Method* method,
   40.35 +  static void      layout_activation(Method* method,
   40.36                                       int temps,
   40.37                                       int popframe_args,
   40.38                                       int monitors,
    41.1 --- a/src/share/vm/memory/metaspace.cpp	Tue Jun 17 16:12:09 2014 -0700
    41.2 +++ b/src/share/vm/memory/metaspace.cpp	Tue Jun 17 22:15:24 2014 -0700
    41.3 @@ -1423,6 +1423,17 @@
    41.4    return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
    41.5  }
    41.6  
    41.7 +void MetaspaceGC::initialize() {
    41.8 +  // Set the high-water mark to MaxMetapaceSize during VM initializaton since
    41.9 +  // we can't do a GC during initialization.
   41.10 +  _capacity_until_GC = MaxMetaspaceSize;
   41.11 +}
   41.12 +
   41.13 +void MetaspaceGC::post_initialize() {
   41.14 +  // Reset the high-water mark once the VM initialization is done.
   41.15 +  _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
   41.16 +}
   41.17 +
   41.18  bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
   41.19    // Check if the compressed class space is full.
   41.20    if (is_class && Metaspace::using_class_space()) {
   41.21 @@ -1443,21 +1454,13 @@
   41.22  
   41.23  size_t MetaspaceGC::allowed_expansion() {
   41.24    size_t committed_bytes = MetaspaceAux::committed_bytes();
   41.25 +  size_t capacity_until_gc = capacity_until_GC();
   41.26 +
   41.27 +  assert(capacity_until_gc >= committed_bytes,
   41.28 +        err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
   41.29 +                capacity_until_gc, committed_bytes));
   41.30  
   41.31    size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
   41.32 -
   41.33 -  // Always grant expansion if we are initiating the JVM,
   41.34 -  // or if the GC_locker is preventing GCs.
   41.35 -  if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
   41.36 -    return left_until_max / BytesPerWord;
   41.37 -  }
   41.38 -
   41.39 -  size_t capacity_until_gc = capacity_until_GC();
   41.40 -
   41.41 -  if (capacity_until_gc <= committed_bytes) {
   41.42 -    return 0;
   41.43 -  }
   41.44 -
   41.45    size_t left_until_GC = capacity_until_gc - committed_bytes;
   41.46    size_t left_to_commit = MIN2(left_until_GC, left_until_max);
   41.47  
   41.48 @@ -1469,7 +1472,15 @@
   41.49    uint current_shrink_factor = _shrink_factor;
   41.50    _shrink_factor = 0;
   41.51  
   41.52 -  const size_t used_after_gc = MetaspaceAux::capacity_bytes();
   41.53 +  // Using committed_bytes() for used_after_gc is an overestimation, since the
   41.54 +  // chunk free lists are included in committed_bytes() and the memory in an
   41.55 +  // un-fragmented chunk free list is available for future allocations.
   41.56 +  // However, if the chunk free lists becomes fragmented, then the memory may
   41.57 +  // not be available for future allocations and the memory is therefore "in use".
   41.58 +  // Including the chunk free lists in the definition of "in use" is therefore
   41.59 +  // necessary. Not including the chunk free lists can cause capacity_until_GC to
   41.60 +  // shrink below committed_bytes() and this has caused serious bugs in the past.
   41.61 +  const size_t used_after_gc = MetaspaceAux::committed_bytes();
   41.62    const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
   41.63  
   41.64    const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
   41.65 @@ -3093,6 +3104,8 @@
   41.66  }
   41.67  
   41.68  void Metaspace::global_initialize() {
   41.69 +  MetaspaceGC::initialize();
   41.70 +
   41.71    // Initialize the alignment for shared spaces.
   41.72    int max_alignment = os::vm_page_size();
   41.73    size_t cds_total = 0;
   41.74 @@ -3200,10 +3213,13 @@
   41.75      }
   41.76    }
   41.77  
   41.78 -  MetaspaceGC::initialize();
   41.79    _tracer = new MetaspaceTracer();
   41.80  }
   41.81  
   41.82 +void Metaspace::post_initialize() {
   41.83 +  MetaspaceGC::post_initialize();
   41.84 +}
   41.85 +
   41.86  Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
   41.87                                                 size_t chunk_word_size,
   41.88                                                 size_t chunk_bunch) {
    42.1 --- a/src/share/vm/memory/metaspace.hpp	Tue Jun 17 16:12:09 2014 -0700
    42.2 +++ b/src/share/vm/memory/metaspace.hpp	Tue Jun 17 22:15:24 2014 -0700
    42.3 @@ -208,6 +208,7 @@
    42.4  
    42.5    static void ergo_initialize();
    42.6    static void global_initialize();
    42.7 +  static void post_initialize();
    42.8  
    42.9    static size_t first_chunk_word_size() { return _first_chunk_word_size; }
   42.10    static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
   42.11 @@ -398,7 +399,8 @@
   42.12  
   42.13   public:
   42.14  
   42.15 -  static void initialize() { _capacity_until_GC = MetaspaceSize; }
   42.16 +  static void initialize();
   42.17 +  static void post_initialize();
   42.18  
   42.19    static size_t capacity_until_GC();
   42.20    static size_t inc_capacity_until_GC(size_t v);
    43.1 --- a/src/share/vm/memory/referenceProcessor.cpp	Tue Jun 17 16:12:09 2014 -0700
    43.2 +++ b/src/share/vm/memory/referenceProcessor.cpp	Tue Jun 17 22:15:24 2014 -0700
    43.3 @@ -96,12 +96,10 @@
    43.4                                         bool      mt_discovery,
    43.5                                         uint      mt_discovery_degree,
    43.6                                         bool      atomic_discovery,
    43.7 -                                       BoolObjectClosure* is_alive_non_header,
    43.8 -                                       bool      discovered_list_needs_post_barrier)  :
    43.9 +                                       BoolObjectClosure* is_alive_non_header)  :
   43.10    _discovering_refs(false),
   43.11    _enqueuing_is_done(false),
   43.12    _is_alive_non_header(is_alive_non_header),
   43.13 -  _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier),
   43.14    _processing_is_mt(mt_processing),
   43.15    _next_id(0)
   43.16  {
   43.17 @@ -340,10 +338,18 @@
   43.18    // (java.lang.ref.Reference.discovered), self-loop their "next" field
   43.19    // thus distinguishing them from active References, then
   43.20    // prepend them to the pending list.
   43.21 +  //
   43.22 +  // The Java threads will see the Reference objects linked together through
   43.23 +  // the discovered field. Instead of trying to do the write barrier updates
   43.24 +  // in all places in the reference processor where we manipulate the discovered
   43.25 +  // field we make sure to do the barrier here where we anyway iterate through
   43.26 +  // all linked Reference objects. Note that it is important to not dirty any
   43.27 +  // cards during reference processing since this will cause card table
   43.28 +  // verification to fail for G1.
   43.29 +  //
   43.30    // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777),
   43.31    // the "next" field is used to chain the pending list, not the discovered
   43.32    // field.
   43.33 -
   43.34    if (TraceReferenceGC && PrintGCDetails) {
   43.35      gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
   43.36                             INTPTR_FORMAT, (address)refs_list.head());
   43.37 @@ -351,7 +357,7 @@
   43.38  
   43.39    oop obj = NULL;
   43.40    oop next_d = refs_list.head();
   43.41 -  if (pending_list_uses_discovered_field()) { // New behaviour
   43.42 +  if (pending_list_uses_discovered_field()) { // New behavior
   43.43      // Walk down the list, self-looping the next field
   43.44      // so that the References are not considered active.
   43.45      while (obj != next_d) {
   43.46 @@ -365,15 +371,15 @@
   43.47        assert(java_lang_ref_Reference::next(obj) == NULL,
   43.48               "Reference not active; should not be discovered");
   43.49        // Self-loop next, so as to make Ref not active.
   43.50 -      // Post-barrier not needed when looping to self.
   43.51        java_lang_ref_Reference::set_next_raw(obj, obj);
   43.52 -      if (next_d == obj) {  // obj is last
   43.53 -        // Swap refs_list into pendling_list_addr and
   43.54 +      if (next_d != obj) {
   43.55 +        oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d);
   43.56 +      } else {
   43.57 +        // This is the last object.
   43.58 +        // Swap refs_list into pending_list_addr and
   43.59          // set obj's discovered to what we read from pending_list_addr.
   43.60          oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
   43.61 -        // Need post-barrier on pending_list_addr above;
   43.62 -        // see special post-barrier code at the end of
   43.63 -        // enqueue_discovered_reflists() further below.
   43.64 +        // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above.
   43.65          java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
   43.66          oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
   43.67        }
   43.68 @@ -496,20 +502,15 @@
   43.69    // pre-barrier here because we know the Reference has already been found/marked,
   43.70    // that's how it ended up in the discovered list in the first place.
   43.71    oop_store_raw(_prev_next, new_next);
   43.72 -  if (_discovered_list_needs_post_barrier && _prev_next != _refs_list.adr_head()) {
   43.73 -    // Needs post-barrier and this is not the list head (which is not on the heap)
   43.74 -    oopDesc::bs()->write_ref_field(_prev_next, new_next);
   43.75 -  }
   43.76    NOT_PRODUCT(_removed++);
   43.77    _refs_list.dec_length(1);
   43.78  }
   43.79  
   43.80  // Make the Reference object active again.
   43.81  void DiscoveredListIterator::make_active() {
   43.82 -  // For G1 we don't want to use set_next - it
   43.83 -  // will dirty the card for the next field of
   43.84 -  // the reference object and will fail
   43.85 -  // CT verification.
   43.86 +  // The pre barrier for G1 is probably just needed for the old
   43.87 +  // reference processing behavior. Should we guard this with
   43.88 +  // ReferenceProcessor::pending_list_uses_discovered_field() ?
   43.89    if (UseG1GC) {
   43.90      HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
   43.91      if (UseCompressedOops) {
   43.92 @@ -517,10 +518,8 @@
   43.93      } else {
   43.94        oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL);
   43.95      }
   43.96 -    java_lang_ref_Reference::set_next_raw(_ref, NULL);
   43.97 -  } else {
   43.98 -    java_lang_ref_Reference::set_next(_ref, NULL);
   43.99    }
  43.100 +  java_lang_ref_Reference::set_next_raw(_ref, NULL);
  43.101  }
  43.102  
  43.103  void DiscoveredListIterator::clear_referent() {
  43.104 @@ -546,7 +545,7 @@
  43.105                                     OopClosure*        keep_alive,
  43.106                                     VoidClosure*       complete_gc) {
  43.107    assert(policy != NULL, "Must have a non-NULL policy");
  43.108 -  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
  43.109 +  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
  43.110    // Decide which softly reachable refs should be kept alive.
  43.111    while (iter.has_next()) {
  43.112      iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
  43.113 @@ -586,7 +585,7 @@
  43.114                               BoolObjectClosure* is_alive,
  43.115                               OopClosure*        keep_alive) {
  43.116    assert(discovery_is_atomic(), "Error");
  43.117 -  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
  43.118 +  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
  43.119    while (iter.has_next()) {
  43.120      iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
  43.121      DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
  43.122 @@ -623,7 +622,7 @@
  43.123                                                    OopClosure*        keep_alive,
  43.124                                                    VoidClosure*       complete_gc) {
  43.125    assert(!discovery_is_atomic(), "Error");
  43.126 -  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
  43.127 +  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
  43.128    while (iter.has_next()) {
  43.129      iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
  43.130      HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
  43.131 @@ -666,7 +665,7 @@
  43.132                                     OopClosure*        keep_alive,
  43.133                                     VoidClosure*       complete_gc) {
  43.134    ResourceMark rm;
  43.135 -  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
  43.136 +  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
  43.137    while (iter.has_next()) {
  43.138      iter.update_discovered();
  43.139      iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
  43.140 @@ -782,13 +781,6 @@
  43.141    bool _clear_referent;
  43.142  };
  43.143  
  43.144 -void ReferenceProcessor::set_discovered(oop ref, oop value) {
  43.145 -  java_lang_ref_Reference::set_discovered_raw(ref, value);
  43.146 -  if (_discovered_list_needs_post_barrier) {
  43.147 -    oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(ref), value);
  43.148 -  }
  43.149 -}
  43.150 -
  43.151  // Balances reference queues.
  43.152  // Move entries from all queues[0, 1, ..., _max_num_q-1] to
  43.153  // queues[0, 1, ..., _num_q-1] because only the first _num_q
  43.154 @@ -846,9 +838,9 @@
  43.155          // Add the chain to the to list.
  43.156          if (ref_lists[to_idx].head() == NULL) {
  43.157            // to list is empty. Make a loop at the end.
  43.158 -          set_discovered(move_tail, move_tail);
  43.159 +          java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail);
  43.160          } else {
  43.161 -          set_discovered(move_tail, ref_lists[to_idx].head());
  43.162 +          java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head());
  43.163          }
  43.164          ref_lists[to_idx].set_head(move_head);
  43.165          ref_lists[to_idx].inc_length(refs_to_move);
  43.166 @@ -982,7 +974,7 @@
  43.167  
  43.168  void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
  43.169    assert(!discovery_is_atomic(), "Else why call this method?");
  43.170 -  DiscoveredListIterator iter(refs_list, NULL, NULL, _discovered_list_needs_post_barrier);
  43.171 +  DiscoveredListIterator iter(refs_list, NULL, NULL);
  43.172    while (iter.has_next()) {
  43.173      iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
  43.174      oop next = java_lang_ref_Reference::next(iter.obj());
  43.175 @@ -1071,16 +1063,6 @@
  43.176    // The last ref must have its discovered field pointing to itself.
  43.177    oop next_discovered = (current_head != NULL) ? current_head : obj;
  43.178  
  43.179 -  // Note: In the case of G1, this specific pre-barrier is strictly
  43.180 -  // not necessary because the only case we are interested in
  43.181 -  // here is when *discovered_addr is NULL (see the CAS further below),
  43.182 -  // so this will expand to nothing. As a result, we have manually
  43.183 -  // elided this out for G1, but left in the test for some future
  43.184 -  // collector that might have need for a pre-barrier here, e.g.:-
  43.185 -  // oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
  43.186 -  assert(!_discovered_list_needs_post_barrier || UseG1GC,
  43.187 -         "Need to check non-G1 collector: "
  43.188 -         "may need a pre-write-barrier for CAS from NULL below");
  43.189    oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
  43.190                                                      NULL);
  43.191    if (retest == NULL) {
  43.192 @@ -1089,9 +1071,6 @@
  43.193      // is necessary.
  43.194      refs_list.set_head(obj);
  43.195      refs_list.inc_length(1);
  43.196 -    if (_discovered_list_needs_post_barrier) {
  43.197 -      oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
  43.198 -    }
  43.199  
  43.200      if (TraceReferenceGC) {
  43.201        gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
  43.202 @@ -1242,24 +1221,14 @@
  43.203    if (_discovery_is_mt) {
  43.204      add_to_discovered_list_mt(*list, obj, discovered_addr);
  43.205    } else {
  43.206 -    // If "_discovered_list_needs_post_barrier", we do write barriers when
  43.207 -    // updating the discovered reference list.  Otherwise, we do a raw store
  43.208 -    // here: the field will be visited later when processing the discovered
  43.209 -    // references.
  43.210 +    // We do a raw store here: the field will be visited later when processing
  43.211 +    // the discovered references.
  43.212      oop current_head = list->head();
  43.213      // The last ref must have its discovered field pointing to itself.
  43.214      oop next_discovered = (current_head != NULL) ? current_head : obj;
  43.215  
  43.216 -    // As in the case further above, since we are over-writing a NULL
  43.217 -    // pre-value, we can safely elide the pre-barrier here for the case of G1.
  43.218 -    // e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
  43.219      assert(discovered == NULL, "control point invariant");
  43.220 -    assert(!_discovered_list_needs_post_barrier || UseG1GC,
  43.221 -           "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");
  43.222      oop_store_raw(discovered_addr, next_discovered);
  43.223 -    if (_discovered_list_needs_post_barrier) {
  43.224 -      oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
  43.225 -    }
  43.226      list->set_head(obj);
  43.227      list->inc_length(1);
  43.228  
  43.229 @@ -1353,7 +1322,7 @@
  43.230                                                  OopClosure*        keep_alive,
  43.231                                                  VoidClosure*       complete_gc,
  43.232                                                  YieldClosure*      yield) {
  43.233 -  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
  43.234 +  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
  43.235    while (iter.has_next()) {
  43.236      iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
  43.237      oop obj = iter.obj();
    44.1 --- a/src/share/vm/memory/referenceProcessor.hpp	Tue Jun 17 16:12:09 2014 -0700
    44.2 +++ b/src/share/vm/memory/referenceProcessor.hpp	Tue Jun 17 22:15:24 2014 -0700
    44.3 @@ -99,7 +99,6 @@
    44.4    oop                _referent;
    44.5    OopClosure*        _keep_alive;
    44.6    BoolObjectClosure* _is_alive;
    44.7 -  bool               _discovered_list_needs_post_barrier;
    44.8  
    44.9    DEBUG_ONLY(
   44.10    oop                _first_seen; // cyclic linked list check
   44.11 @@ -113,8 +112,7 @@
   44.12  public:
   44.13    inline DiscoveredListIterator(DiscoveredList&    refs_list,
   44.14                                  OopClosure*        keep_alive,
   44.15 -                                BoolObjectClosure* is_alive,
   44.16 -                                bool               discovered_list_needs_post_barrier = false):
   44.17 +                                BoolObjectClosure* is_alive):
   44.18      _refs_list(refs_list),
   44.19      _prev_next(refs_list.adr_head()),
   44.20      _prev(NULL),
   44.21 @@ -128,8 +126,7 @@
   44.22  #endif
   44.23      _next(NULL),
   44.24      _keep_alive(keep_alive),
   44.25 -    _is_alive(is_alive),
   44.26 -    _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier)
   44.27 +    _is_alive(is_alive)
   44.28  { }
   44.29  
   44.30    // End Of List.
   44.31 @@ -230,14 +227,6 @@
   44.32                                          // other collectors in configuration
   44.33    bool        _discovery_is_mt;         // true if reference discovery is MT.
   44.34  
   44.35 -  // If true, setting "next" field of a discovered refs list requires
   44.36 -  // write post barrier.  (Must be true if used in a collector in which
   44.37 -  // elements of a discovered list may be moved during discovery: for
   44.38 -  // example, a collector like Garbage-First that moves objects during a
   44.39 -  // long-term concurrent marking phase that does weak reference
   44.40 -  // discovery.)
   44.41 -  bool        _discovered_list_needs_post_barrier;
   44.42 -
   44.43    bool        _enqueuing_is_done;       // true if all weak references enqueued
   44.44    bool        _processing_is_mt;        // true during phases when
   44.45                                          // reference processing is MT.
   44.46 @@ -382,11 +371,6 @@
   44.47    void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
   44.48  
   44.49   protected:
   44.50 -  // Set the 'discovered' field of the given reference to
   44.51 -  // the given value - emitting post barriers depending upon
   44.52 -  // the value of _discovered_list_needs_post_barrier.
   44.53 -  void set_discovered(oop ref, oop value);
   44.54 -
   44.55    // "Preclean" the given discovered reference list
   44.56    // by removing references with strongly reachable referents.
   44.57    // Currently used in support of CMS only.
   44.58 @@ -427,8 +411,7 @@
   44.59                       bool mt_processing = false, uint mt_processing_degree = 1,
   44.60                       bool mt_discovery  = false, uint mt_discovery_degree  = 1,
   44.61                       bool atomic_discovery = true,
   44.62 -                     BoolObjectClosure* is_alive_non_header = NULL,
   44.63 -                     bool discovered_list_needs_post_barrier = false);
   44.64 +                     BoolObjectClosure* is_alive_non_header = NULL);
   44.65  
   44.66    // RefDiscoveryPolicy values
   44.67    enum DiscoveryPolicy {
    45.1 --- a/src/share/vm/oops/cpCache.cpp	Tue Jun 17 16:12:09 2014 -0700
    45.2 +++ b/src/share/vm/oops/cpCache.cpp	Tue Jun 17 22:15:24 2014 -0700
    45.3 @@ -406,7 +406,7 @@
    45.4  
    45.5  
    45.6  oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) {
    45.7 -  if (is_f1_null() || !has_appendix())
    45.8 +  if (!has_appendix())
    45.9      return NULL;
   45.10    const int ref_index = f2_as_index() + _indy_resolved_references_appendix_offset;
   45.11    objArrayOop resolved_references = cpool->resolved_references();
   45.12 @@ -415,7 +415,7 @@
   45.13  
   45.14  
   45.15  oop ConstantPoolCacheEntry::method_type_if_resolved(constantPoolHandle cpool) {
   45.16 -  if (is_f1_null() || !has_method_type())
   45.17 +  if (!has_method_type())
   45.18      return NULL;
   45.19    const int ref_index = f2_as_index() + _indy_resolved_references_method_type_offset;
   45.20    objArrayOop resolved_references = cpool->resolved_references();
    46.1 --- a/src/share/vm/oops/cpCache.hpp	Tue Jun 17 16:12:09 2014 -0700
    46.2 +++ b/src/share/vm/oops/cpCache.hpp	Tue Jun 17 22:15:24 2014 -0700
    46.3 @@ -1,5 +1,5 @@
    46.4  /*
    46.5 - * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    46.6 + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
    46.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    46.8   *
    46.9   * This code is free software; you can redistribute it and/or modify it
   46.10 @@ -346,8 +346,8 @@
   46.11    bool is_final() const                          { return (_flags & (1 << is_final_shift))          != 0; }
   46.12    bool is_forced_virtual() const                 { return (_flags & (1 << is_forced_virtual_shift)) != 0; }
   46.13    bool is_vfinal() const                         { return (_flags & (1 << is_vfinal_shift))         != 0; }
   46.14 -  bool has_appendix() const                      { return (_flags & (1 << has_appendix_shift))      != 0; }
   46.15 -  bool has_method_type() const                   { return (_flags & (1 << has_method_type_shift))   != 0; }
   46.16 +  bool has_appendix() const                      { return (!is_f1_null()) && (_flags & (1 << has_appendix_shift))      != 0; }
   46.17 +  bool has_method_type() const                   { return (!is_f1_null()) && (_flags & (1 << has_method_type_shift))   != 0; }
   46.18    bool is_method_entry() const                   { return (_flags & (1 << is_field_entry_shift))    == 0; }
   46.19    bool is_field_entry() const                    { return (_flags & (1 << is_field_entry_shift))    != 0; }
   46.20    bool is_byte() const                           { return flag_state() == btos; }
    47.1 --- a/src/share/vm/oops/instanceKlass.cpp	Tue Jun 17 16:12:09 2014 -0700
    47.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Tue Jun 17 22:15:24 2014 -0700
    47.3 @@ -2771,7 +2771,7 @@
    47.4    Method* m = n->method();
    47.5    // Search for match
    47.6    while(cur != NULL && cur != n) {
    47.7 -    if (TieredCompilation) {
    47.8 +    if (TieredCompilation && m == cur->method()) {
    47.9        // Find max level before n
   47.10        max_level = MAX2(max_level, cur->comp_level());
   47.11      }
   47.12 @@ -2793,7 +2793,9 @@
   47.13      cur = next;
   47.14      while (cur != NULL) {
   47.15        // Find max level after n
   47.16 -      max_level = MAX2(max_level, cur->comp_level());
   47.17 +      if (m == cur->method()) {
   47.18 +        max_level = MAX2(max_level, cur->comp_level());
   47.19 +      }
   47.20        cur = cur->osr_link();
   47.21      }
   47.22      m->set_highest_osr_comp_level(max_level);
    48.1 --- a/src/share/vm/opto/callnode.cpp	Tue Jun 17 16:12:09 2014 -0700
    48.2 +++ b/src/share/vm/opto/callnode.cpp	Tue Jun 17 22:15:24 2014 -0700
    48.3 @@ -607,6 +607,39 @@
    48.4    }
    48.5  }
    48.6  
    48.7 +// Mirror the stack size calculation in the deopt code
    48.8 +// How much stack space would we need at this point in the program in
    48.9 +// case of deoptimization?
   48.10 +int JVMState::interpreter_frame_size() const {
   48.11 +  const JVMState* jvms = this;
   48.12 +  int size = 0;
   48.13 +  int callee_parameters = 0;
   48.14 +  int callee_locals = 0;
   48.15 +  int extra_args = method()->max_stack() - stk_size();
   48.16 +
   48.17 +  while (jvms != NULL) {
   48.18 +    int locks = jvms->nof_monitors();
   48.19 +    int temps = jvms->stk_size();
   48.20 +    bool is_top_frame = (jvms == this);
   48.21 +    ciMethod* method = jvms->method();
   48.22 +
   48.23 +    int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(),
   48.24 +                                                                 temps + callee_parameters,
   48.25 +                                                                 extra_args,
   48.26 +                                                                 locks,
   48.27 +                                                                 callee_parameters,
   48.28 +                                                                 callee_locals,
   48.29 +                                                                 is_top_frame);
   48.30 +    size += frame_size;
   48.31 +
   48.32 +    callee_parameters = method->size_of_parameters();
   48.33 +    callee_locals = method->max_locals();
   48.34 +    extra_args = 0;
   48.35 +    jvms = jvms->caller();
   48.36 +  }
   48.37 +  return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord;
   48.38 +}
   48.39 +
   48.40  //=============================================================================
   48.41  uint CallNode::cmp( const Node &n ) const
   48.42  { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
    49.1 --- a/src/share/vm/opto/callnode.hpp	Tue Jun 17 16:12:09 2014 -0700
    49.2 +++ b/src/share/vm/opto/callnode.hpp	Tue Jun 17 22:15:24 2014 -0700
    49.3 @@ -300,6 +300,7 @@
    49.4    JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
    49.5    void      set_map_deep(SafePointNode *map);// reset map for all callers
    49.6    void      adapt_position(int delta);       // Adapt offsets in in-array after adding an edge.
    49.7 +  int       interpreter_frame_size() const;
    49.8  
    49.9  #ifndef PRODUCT
   49.10    void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
    50.1 --- a/src/share/vm/opto/compile.cpp	Tue Jun 17 16:12:09 2014 -0700
    50.2 +++ b/src/share/vm/opto/compile.cpp	Tue Jun 17 22:15:24 2014 -0700
    50.3 @@ -439,6 +439,14 @@
    50.4    return words;
    50.5  }
    50.6  
    50.7 +// To bang the stack of this compiled method we use the stack size
    50.8 +// that the interpreter would need in case of a deoptimization. This
    50.9 +// removes the need to bang the stack in the deoptimization blob which
   50.10 +// in turn simplifies stack overflow handling.
   50.11 +int Compile::bang_size_in_bytes() const {
   50.12 +  return MAX2(_interpreter_frame_size, frame_size_in_bytes());
   50.13 +}
   50.14 +
   50.15  // ============================================================================
   50.16  //------------------------------CompileWrapper---------------------------------
   50.17  class CompileWrapper : public StackObj {
   50.18 @@ -662,7 +670,8 @@
   50.19                    _inlining_incrementally(false),
   50.20                    _print_inlining_list(NULL),
   50.21                    _print_inlining_idx(0),
   50.22 -                  _preserve_jvm_state(0) {
   50.23 +                  _preserve_jvm_state(0),
   50.24 +                  _interpreter_frame_size(0) {
   50.25    C = this;
   50.26  
   50.27    CompileWrapper cw(this);
   50.28 @@ -969,7 +978,8 @@
   50.29      _print_inlining_list(NULL),
   50.30      _print_inlining_idx(0),
   50.31      _preserve_jvm_state(0),
   50.32 -    _allowed_reasons(0) {
   50.33 +    _allowed_reasons(0),
   50.34 +    _interpreter_frame_size(0) {
   50.35    C = this;
   50.36  
   50.37  #ifndef PRODUCT
   50.38 @@ -3078,8 +3088,12 @@
   50.39        Node* m = n->in(i);
   50.40        ++i;
   50.41        if (m != NULL && !frc._visited.test_set(m->_idx)) {
   50.42 -        if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL)
   50.43 +        if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL) {
   50.44 +          // compute worst case interpreter size in case of a deoptimization
   50.45 +          update_interpreter_frame_size(m->as_SafePoint()->jvms()->interpreter_frame_size());
   50.46 +
   50.47            sfpt.push(m);
   50.48 +        }
   50.49          cnt = m->req();
   50.50          nstack.push(n, i); // put on stack parent and next input's index
   50.51          n = m;
    51.1 --- a/src/share/vm/opto/compile.hpp	Tue Jun 17 16:12:09 2014 -0700
    51.2 +++ b/src/share/vm/opto/compile.hpp	Tue Jun 17 22:15:24 2014 -0700
    51.3 @@ -487,6 +487,7 @@
    51.4    RegMask               _FIRST_STACK_mask;      // All stack slots usable for spills (depends on frame layout)
    51.5    Arena*                _indexSet_arena;        // control IndexSet allocation within PhaseChaitin
    51.6    void*                 _indexSet_free_block_list; // free list of IndexSet bit blocks
    51.7 +  int                   _interpreter_frame_size;
    51.8  
    51.9    uint                  _node_bundling_limit;
   51.10    Bundle*               _node_bundling_base;    // Information for instruction bundling
   51.11 @@ -946,6 +947,7 @@
   51.12    PhaseRegAlloc*    regalloc()                  { return _regalloc; }
   51.13    int               frame_slots() const         { return _frame_slots; }
   51.14    int               frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
   51.15 +  int               frame_size_in_bytes() const { return _frame_slots << LogBytesPerInt; }
   51.16    RegMask&          FIRST_STACK_mask()          { return _FIRST_STACK_mask; }
   51.17    Arena*            indexSet_arena()            { return _indexSet_arena; }
   51.18    void*             indexSet_free_block_list()  { return _indexSet_free_block_list; }
   51.19 @@ -957,6 +959,13 @@
   51.20    bool          need_stack_bang(int frame_size_in_bytes) const;
   51.21    bool          need_register_stack_bang() const;
   51.22  
   51.23 +  void  update_interpreter_frame_size(int size) {
   51.24 +    if (_interpreter_frame_size < size) {
   51.25 +      _interpreter_frame_size = size;
   51.26 +    }
   51.27 +  }
   51.28 +  int           bang_size_in_bytes() const;
   51.29 +
   51.30    void          set_matcher(Matcher* m)                 { _matcher = m; }
   51.31  //void          set_regalloc(PhaseRegAlloc* ra)           { _regalloc = ra; }
   51.32    void          set_indexSet_arena(Arena* a)            { _indexSet_arena = a; }
    52.1 --- a/src/share/vm/opto/loopopts.cpp	Tue Jun 17 16:12:09 2014 -0700
    52.2 +++ b/src/share/vm/opto/loopopts.cpp	Tue Jun 17 22:15:24 2014 -0700
    52.3 @@ -1401,7 +1401,8 @@
    52.4          // loop.  Happens if people set a loop-exit flag; then test the flag
    52.5          // in the loop to break the loop, then test is again outside of the
    52.6          // loop to determine which way the loop exited.
    52.7 -        if( use->is_If() || use->is_CMove() ) {
    52.8 +        // Loop predicate If node connects to Bool node through Opaque1 node.
    52.9 +        if (use->is_If() || use->is_CMove() || C->is_predicate_opaq(use)) {
   52.10            // Since this code is highly unlikely, we lazily build the worklist
   52.11            // of such Nodes to go split.
   52.12            if( !split_if_set )
   52.13 @@ -2768,11 +2769,11 @@
   52.14        // Hit!  Refactor use to use the post-incremented tripcounter.
   52.15        // Compute a post-increment tripcounter.
   52.16        Node *opaq = new (C) Opaque2Node( C, cle->incr() );
   52.17 -      register_new_node( opaq, u_ctrl );
   52.18 +      register_new_node(opaq, exit);
   52.19        Node *neg_stride = _igvn.intcon(-cle->stride_con());
   52.20        set_ctrl(neg_stride, C->root());
   52.21        Node *post = new (C) AddINode( opaq, neg_stride);
   52.22 -      register_new_node( post, u_ctrl );
   52.23 +      register_new_node(post, exit);
   52.24        _igvn.rehash_node_delayed(use);
   52.25        for (uint j = 1; j < use->req(); j++) {
   52.26          if (use->in(j) == phi)
    53.1 --- a/src/share/vm/opto/output.cpp	Tue Jun 17 16:12:09 2014 -0700
    53.2 +++ b/src/share/vm/opto/output.cpp	Tue Jun 17 22:15:24 2014 -0700
    53.3 @@ -165,8 +165,13 @@
    53.4    // Determine if we need to generate a stack overflow check.
    53.5    // Do it if the method is not a stub function and
    53.6    // has java calls or has frame size > vm_page_size/8.
    53.7 +  // The debug VM checks that deoptimization doesn't trigger an
    53.8 +  // unexpected stack overflow (compiled method stack banging should
    53.9 +  // guarantee it doesn't happen) so we always need the stack bang in
   53.10 +  // a debug VM.
   53.11    return (UseStackBanging && stub_function() == NULL &&
   53.12 -          (has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3));
   53.13 +          (has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3
   53.14 +           DEBUG_ONLY(|| true)));
   53.15  }
   53.16  
   53.17  bool Compile::need_register_stack_bang() const {
    54.1 --- a/src/share/vm/runtime/deoptimization.cpp	Tue Jun 17 16:12:09 2014 -0700
    54.2 +++ b/src/share/vm/runtime/deoptimization.cpp	Tue Jun 17 22:15:24 2014 -0700
    54.3 @@ -422,15 +422,9 @@
    54.4      // frame[number_of_frames - 1 ] = on_stack_size(youngest)
    54.5      // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
    54.6      // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
    54.7 -    int caller_parms = callee_parameters;
    54.8 -    if ((index == array->frames() - 1) && caller_was_method_handle) {
    54.9 -      caller_parms = 0;
   54.10 -    }
   54.11 -    frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(caller_parms,
   54.12 -                                                                                                    callee_parameters,
   54.13 +    frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
   54.14                                                                                                      callee_locals,
   54.15                                                                                                      index == 0,
   54.16 -                                                                                                    index == array->frames() - 1,
   54.17                                                                                                      popframe_extra_args);
   54.18      // This pc doesn't have to be perfect just good enough to identify the frame
   54.19      // as interpreted so the skeleton frame will be walkable
    55.1 --- a/src/share/vm/runtime/sharedRuntime.cpp	Tue Jun 17 16:12:09 2014 -0700
    55.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp	Tue Jun 17 22:15:24 2014 -0700
    55.3 @@ -785,10 +785,13 @@
    55.4          // going to be unwound. Dispatch to a shared runtime stub
    55.5          // which will cause the StackOverflowError to be fabricated
    55.6          // and processed.
    55.7 -        // For stack overflow in deoptimization blob, cleanup thread.
    55.8 -        if (thread->deopt_mark() != NULL) {
    55.9 -          Deoptimization::cleanup_deopt_info(thread, NULL);
   55.10 -        }
   55.11 +        // Stack overflow should never occur during deoptimization:
   55.12 +        // the compiled method bangs the stack by as much as the
   55.13 +        // interpreter would need in case of a deoptimization. The
   55.14 +        // deoptimization blob and uncommon trap blob bang the stack
   55.15 +        // in a debug VM to verify the correctness of the compiled
   55.16 +        // method stack banging.
   55.17 +        assert(thread->deopt_mark() == NULL, "no stack overflow from deopt blob/uncommon trap");
   55.18          Events::log_exception(thread, "StackOverflowError at " INTPTR_FORMAT, pc);
   55.19          return StubRoutines::throw_StackOverflowError_entry();
   55.20        }
    56.1 --- a/src/share/vm/runtime/thread.cpp	Tue Jun 17 16:12:09 2014 -0700
    56.2 +++ b/src/share/vm/runtime/thread.cpp	Tue Jun 17 22:15:24 2014 -0700
    56.3 @@ -3574,6 +3574,8 @@
    56.4    // debug stuff, that does not work until all basic classes have been initialized.
    56.5    set_init_completed();
    56.6  
    56.7 +  Metaspace::post_initialize();
    56.8 +
    56.9  #ifndef USDT2
   56.10    HS_DTRACE_PROBE(hotspot, vm__init__end);
   56.11  #else /* USDT2 */
    57.1 --- a/src/share/vm/runtime/vframeArray.cpp	Tue Jun 17 16:12:09 2014 -0700
    57.2 +++ b/src/share/vm/runtime/vframeArray.cpp	Tue Jun 17 22:15:24 2014 -0700
    57.3 @@ -419,24 +419,20 @@
    57.4  
    57.5  }
    57.6  
    57.7 -int vframeArrayElement::on_stack_size(int caller_actual_parameters,
    57.8 -                                      int callee_parameters,
    57.9 +int vframeArrayElement::on_stack_size(int callee_parameters,
   57.10                                        int callee_locals,
   57.11                                        bool is_top_frame,
   57.12 -                                      bool is_bottom_frame,
   57.13                                        int popframe_extra_stack_expression_els) const {
   57.14    assert(method()->max_locals() == locals()->size(), "just checking");
   57.15    int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
   57.16    int temps = expressions()->size();
   57.17 -  return Interpreter::size_activation(method(),
   57.18 +  return Interpreter::size_activation(method()->max_stack(),
   57.19                                        temps + callee_parameters,
   57.20                                        popframe_extra_stack_expression_els,
   57.21                                        locks,
   57.22 -                                      caller_actual_parameters,
   57.23                                        callee_parameters,
   57.24                                        callee_locals,
   57.25 -                                      is_top_frame,
   57.26 -                                      is_bottom_frame);
   57.27 +                                      is_top_frame);
   57.28  }
   57.29  
   57.30  
    58.1 --- a/src/share/vm/runtime/vframeArray.hpp	Tue Jun 17 16:12:09 2014 -0700
    58.2 +++ b/src/share/vm/runtime/vframeArray.hpp	Tue Jun 17 22:15:24 2014 -0700
    58.3 @@ -85,10 +85,8 @@
    58.4  
    58.5    // Returns the on stack word size for this frame
    58.6    // callee_parameters is the number of callee locals residing inside this frame
    58.7 -  int on_stack_size(int caller_actual_parameters,
    58.8 -                    int callee_parameters,
    58.9 +  int on_stack_size(int callee_parameters,
   58.10                      int callee_locals,
   58.11 -                    bool is_bottom_frame,
   58.12                      bool is_top_frame,
   58.13                      int popframe_extra_stack_expression_els) const;
   58.14  
    59.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    59.2 +++ b/test/compiler/loopopts/TestLogSum.java	Tue Jun 17 22:15:24 2014 -0700
    59.3 @@ -0,0 +1,111 @@
    59.4 +/*
    59.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    59.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    59.7 + *
    59.8 + * This code is free software; you can redistribute it and/or modify it
    59.9 + * under the terms of the GNU General Public License version 2 only, as
   59.10 + * published by the Free Software Foundation.
   59.11 + *
   59.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   59.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   59.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   59.15 + * version 2 for more details (a copy is included in the LICENSE file that
   59.16 + * accompanied this code).
   59.17 + *
   59.18 + * You should have received a copy of the GNU General Public License version
   59.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   59.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   59.21 + *
   59.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   59.23 + * or visit www.oracle.com if you need additional information or have any
   59.24 + * questions.
   59.25 + */
   59.26 +
   59.27 +/*
   59.28 + * @test
   59.29 + * @bug 8046516
   59.30 + * @summary Segmentation fault in JVM (easily reproducible)
   59.31 + * @run main/othervm -XX:-TieredCompilation -Xbatch TestLogSum
   59.32 + * @author jackkamm@gmail.com
   59.33 + */
   59.34 +
   59.35 +import java.util.Arrays;
   59.36 +import java.util.HashMap;
   59.37 +import java.util.List;
   59.38 +import java.util.Map;
   59.39 +public class TestLogSum {
   59.40 +  public static void main(String[] args) {
   59.41 +    double sum;
   59.42 +
   59.43 +    for (int i = 0; i < 6; i++) {
   59.44 +        for (int n = 2; n < 30; n++) {
   59.45 +           for (int j = 1; j <= n; j++) {
   59.46 +              for (int k = 1; k <= j; k++) {
   59.47 +                // System.out.println(computeSum(k, j));
   59.48 +                sum = computeSum(k, j);
   59.49 +              }
   59.50 +           }
   59.51 +        }
   59.52 +      }
   59.53 +   }
   59.54 +
   59.55 +   private static Map<List<Integer>, Double> cache = new HashMap<List<Integer>, Double>();
   59.56 +   public static double computeSum(int x, int y) {
   59.57 +      List<Integer> key = Arrays.asList(new Integer[] {x, y});
   59.58 +
   59.59 +      if (!cache.containsKey(key)) {
   59.60 +
   59.61 +        // explicitly creating/updating a double[] array, instead of using the LogSumArray wrapper object, will prevent the error
   59.62 +        LogSumArray toReturn = new LogSumArray(x);
   59.63 +
   59.64 +        // changing loop indices will prevent the error
   59.65 +        // in particular, for(z=0; z<x-1; z++), and then using z+1 in place of z, will not produce error
   59.66 +        for (int z = 1; z < x+1; z++) {
   59.67 +           double logSummand = Math.log(z + x + y);
   59.68 +           toReturn.addLogSummand(logSummand);
   59.69 +        }
   59.70 +
   59.71 +        // returning the value here without cacheing it will prevent the segfault
   59.72 +        cache.put(key, toReturn.retrieveLogSum());
   59.73 +      }
   59.74 +      return cache.get(key);
   59.75 +   }
   59.76 +
   59.77 +   /*
   59.78 +    * Given a bunch of logarithms log(X),log(Y),log(Z),...
   59.79 +    * This class is used to compute the log of the sum, log(X+Y+Z+...)
   59.80 +    */
   59.81 +   private static class LogSumArray {
   59.82 +      private double[] logSummandArray;
   59.83 +      private int currSize;
   59.84 +
   59.85 +      private double maxLogSummand;
   59.86 +
   59.87 +      public LogSumArray(int maxEntries) {
   59.88 +        this.logSummandArray = new double[maxEntries];
   59.89 +
   59.90 +        this.currSize = 0;
   59.91 +        this.maxLogSummand = Double.NEGATIVE_INFINITY;
   59.92 +      }
   59.93 +
   59.94 +      public void addLogSummand(double logSummand) {
   59.95 +        logSummandArray[currSize] = logSummand;
   59.96 +        currSize++;
   59.97 +        // removing this line will prevent the error
   59.98 +        maxLogSummand = Math.max(maxLogSummand, logSummand);
   59.99 +      }
  59.100 +
  59.101 +      public double retrieveLogSum() {
  59.102 +        if (maxLogSummand == Double.NEGATIVE_INFINITY) return Double.NEGATIVE_INFINITY;
  59.103 +
  59.104 +        assert currSize <= logSummandArray.length;
  59.105 +
  59.106 +        double factorSum = 0;
  59.107 +        for (int i = 0; i < currSize; i++) {
  59.108 +           factorSum += Math.exp(logSummandArray[i] - maxLogSummand);
  59.109 +        }
  59.110 +
  59.111 +        return Math.log(factorSum) + maxLogSummand;
  59.112 +      }
  59.113 +   }
  59.114 +}
    60.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    60.2 +++ b/test/compiler/uncommontrap/TestStackBangMonitorOwned.java	Tue Jun 17 22:15:24 2014 -0700
    60.3 @@ -0,0 +1,268 @@
    60.4 +/*
    60.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    60.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    60.7 + *
    60.8 + * This code is free software; you can redistribute it and/or modify it
    60.9 + * under the terms of the GNU General Public License version 2 only, as
   60.10 + * published by the Free Software Foundation.
   60.11 + *
   60.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   60.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   60.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   60.15 + * version 2 for more details (a copy is included in the LICENSE file that
   60.16 + * accompanied this code).
   60.17 + *
   60.18 + * You should have received a copy of the GNU General Public License version
   60.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   60.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   60.21 + *
   60.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   60.23 + * or visit www.oracle.com if you need additional information or have any
   60.24 + * questions.
   60.25 + */
   60.26 +
   60.27 +/*
   60.28 + * @test
   60.29 + * @bug 8032410
   60.30 + * @summary Stack overflow at deoptimization doesn't release owned monitors
   60.31 + * @run main/othervm -XX:-BackgroundCompilation -XX:CompileCommand=dontinline,TestStackBangMonitorOwned::m1 -XX:CompileCommand=exclude,TestStackBangMonitorOwned::m2 -Xss256K -XX:-UseOnStackReplacement TestStackBangMonitorOwned
   60.32 + *
   60.33 + */
   60.34 +public class TestStackBangMonitorOwned {
   60.35 +
   60.36 +    static class UnloadedClass1 {
   60.37 +        volatile int field;
   60.38 +    }
   60.39 +
   60.40 +    static Object m1(boolean deopt) {
   60.41 +        long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
   60.42 +        l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24,
   60.43 +        l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36,
   60.44 +        l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48,
   60.45 +        l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60,
   60.46 +        l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72,
   60.47 +        l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84,
   60.48 +        l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96,
   60.49 +        l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107,
   60.50 +        l108, l109, l110, l111, l112, l113, l114, l115, l116, l117,
   60.51 +        l118, l119, l120, l121, l122, l123, l124, l125, l126, l127,
   60.52 +        l128, l129, l130, l131, l132, l133, l134, l135, l136, l137,
   60.53 +        l138, l139, l140, l141, l142, l143, l144, l145, l146, l147,
   60.54 +        l148, l149, l150, l151, l152, l153, l154, l155, l156, l157,
   60.55 +        l158, l159, l160, l161, l162, l163, l164, l165, l166, l167,
   60.56 +        l168, l169, l170, l171, l172, l173, l174, l175, l176, l177,
   60.57 +        l178, l179, l180, l181, l182, l183, l184, l185, l186, l187,
   60.58 +        l188, l189, l190, l191, l192, l193, l194, l195, l196, l197,
   60.59 +        l198, l199, l200, l201, l202, l203, l204, l205, l206, l207,
   60.60 +        l208, l209, l210, l211, l212, l213, l214, l215, l216, l217,
   60.61 +        l218, l219, l220, l221, l222, l223, l224, l225, l226, l227,
   60.62 +        l228, l229, l230, l231, l232, l233, l234, l235, l236, l237,
   60.63 +        l238, l239, l240, l241, l242, l243, l244, l245, l246, l247,
   60.64 +        l248, l249, l250, l251, l252, l253, l254, l255, l256, l257,
   60.65 +        l258, l259, l260, l261, l262, l263, l264, l265, l266, l267,
   60.66 +        l268, l269, l270, l271, l272, l273, l274, l275, l276, l277,
   60.67 +        l278, l279, l280, l281, l282, l283, l284, l285, l286, l287,
   60.68 +        l288, l289, l290, l291, l292, l293, l294, l295, l296, l297,
   60.69 +        l298, l299, l300, l301, l302, l303, l304, l305, l306, l307,
   60.70 +        l308, l309, l310, l311, l312, l313, l314, l315, l316, l317,
   60.71 +        l318, l319, l320, l321, l322, l323, l324, l325, l326, l327,
   60.72 +        l328, l329, l330, l331, l332, l333, l334, l335, l336, l337,
   60.73 +        l338, l339, l340, l341, l342, l343, l344, l345, l346, l347,
   60.74 +        l348, l349, l350, l351, l352, l353, l354, l355, l356, l357,
   60.75 +        l358, l359, l360, l361, l362, l363, l364, l365, l366, l367,
   60.76 +        l368, l369, l370, l371, l372, l373, l374, l375, l376, l377,
   60.77 +        l378, l379, l380, l381, l382, l383, l384, l385, l386, l387,
   60.78 +        l388, l389, l390, l391, l392, l393, l394, l395, l396, l397,
   60.79 +        l398, l399, l400, l401, l402, l403, l404, l405, l406, l407,
   60.80 +        l408, l409, l410, l411, l412, l413, l414, l415, l416, l417,
   60.81 +        l418, l419, l420, l421, l422, l423, l424, l425, l426, l427,
   60.82 +        l428, l429, l430, l431, l432, l433, l434, l435, l436, l437,
   60.83 +        l438, l439, l440, l441, l442, l443, l444, l445, l446, l447,
   60.84 +        l448, l449, l450, l451, l452, l453, l454, l455, l456, l457,
   60.85 +        l458, l459, l460, l461, l462, l463, l464, l465, l466, l467,
   60.86 +        l468, l469, l470, l471, l472, l473, l474, l475, l476, l477,
   60.87 +        l478, l479, l480, l481, l482, l483, l484, l485, l486, l487,
   60.88 +        l488, l489, l490, l491, l492, l493, l494, l495, l496, l497,
   60.89 +        l498, l499, l500, l501, l502, l503, l504, l505, l506, l507,
   60.90 +        l508, l509, l510, l511;
   60.91 +
   60.92 +        long ll0, ll1, ll2, ll3, ll4, ll5, ll6, ll7, ll8, ll9, ll10, ll11, ll12,
   60.93 +        ll13, ll14, ll15, ll16, ll17, ll18, ll19, ll20, ll21, ll22, ll23, ll24,
   60.94 +        ll25, ll26, ll27, ll28, ll29, ll30, ll31, ll32, ll33, ll34, ll35, ll36,
   60.95 +        ll37, ll38, ll39, ll40, ll41, ll42, ll43, ll44, ll45, ll46, ll47, ll48,
   60.96 +        ll49, ll50, ll51, ll52, ll53, ll54, ll55, ll56, ll57, ll58, ll59, ll60,
   60.97 +        ll61, ll62, ll63, ll64, ll65, ll66, ll67, ll68, ll69, ll70, ll71, ll72,
   60.98 +        ll73, ll74, ll75, ll76, ll77, ll78, ll79, ll80, ll81, ll82, ll83, ll84,
   60.99 +        ll85, ll86, ll87, ll88, ll89, ll90, ll91, ll92, ll93, ll94, ll95, ll96,
  60.100 +        ll97, ll98, ll99, ll100, ll101, ll102, ll103, ll104, ll105, ll106, ll107,
  60.101 +        ll108, ll109, ll110, ll111, ll112, ll113, ll114, ll115, ll116, ll117,
  60.102 +        ll118, ll119, ll120, ll121, ll122, ll123, ll124, ll125, ll126, ll127,
  60.103 +        ll128, ll129, ll130, ll131, ll132, ll133, ll134, ll135, ll136, ll137,
  60.104 +        ll138, ll139, ll140, ll141, ll142, ll143, ll144, ll145, ll146, ll147,
  60.105 +        ll148, ll149, ll150, ll151, ll152, ll153, ll154, ll155, ll156, ll157,
  60.106 +        ll158, ll159, ll160, ll161, ll162, ll163, ll164, ll165, ll166, ll167,
  60.107 +        ll168, ll169, ll170, ll171, ll172, ll173, ll174, ll175, ll176, ll177,
  60.108 +        ll178, ll179, ll180, ll181, ll182, ll183, ll184, ll185, ll186, ll187,
  60.109 +        ll188, ll189, ll190, ll191, ll192, ll193, ll194, ll195, ll196, ll197,
  60.110 +        ll198, ll199, ll200, ll201, ll202, ll203, ll204, ll205, ll206, ll207,
  60.111 +        ll208, ll209, ll210, ll211, ll212, ll213, ll214, ll215, ll216, ll217,
  60.112 +        ll218, ll219, ll220, ll221, ll222, ll223, ll224, ll225, ll226, ll227,
  60.113 +        ll228, ll229, ll230, ll231, ll232, ll233, ll234, ll235, ll236, ll237,
  60.114 +        ll238, ll239, ll240, ll241, ll242, ll243, ll244, ll245, ll246, ll247,
  60.115 +        ll248, ll249, ll250, ll251, ll252, ll253, ll254, ll255, ll256, ll257,
  60.116 +        ll258, ll259, ll260, ll261, ll262, ll263, ll264, ll265, ll266, ll267,
  60.117 +        ll268, ll269, ll270, ll271, ll272, ll273, ll274, ll275, ll276, ll277,
  60.118 +        ll278, ll279, ll280, ll281, ll282, ll283, ll284, ll285, ll286, ll287,
  60.119 +        ll288, ll289, ll290, ll291, ll292, ll293, ll294, ll295, ll296, ll297,
  60.120 +        ll298, ll299, ll300, ll301, ll302, ll303, ll304, ll305, ll306, ll307,
  60.121 +        ll308, ll309, ll310, ll311, ll312, ll313, ll314, ll315, ll316, ll317,
  60.122 +        ll318, ll319, ll320, ll321, ll322, ll323, ll324, ll325, ll326, ll327,
  60.123 +        ll328, ll329, ll330, ll331, ll332, ll333, ll334, ll335, ll336, ll337,
  60.124 +        ll338, ll339, ll340, ll341, ll342, ll343, ll344, ll345, ll346, ll347,
  60.125 +        ll348, ll349, ll350, ll351, ll352, ll353, ll354, ll355, ll356, ll357,
  60.126 +        ll358, ll359, ll360, ll361, ll362, ll363, ll364, ll365, ll366, ll367,
  60.127 +        ll368, ll369, ll370, ll371, ll372, ll373, ll374, ll375, ll376, ll377,
  60.128 +        ll378, ll379, ll380, ll381, ll382, ll383, ll384, ll385, ll386, ll387,
  60.129 +        ll388, ll389, ll390, ll391, ll392, ll393, ll394, ll395, ll396, ll397,
  60.130 +        ll398, ll399, ll400, ll401, ll402, ll403, ll404, ll405, ll406, ll407,
  60.131 +        ll408, ll409, ll410, ll411, ll412, ll413, ll414, ll415, ll416, ll417,
  60.132 +        ll418, ll419, ll420, ll421, ll422, ll423, ll424, ll425, ll426, ll427,
  60.133 +        ll428, ll429, ll430, ll431, ll432, ll433, ll434, ll435, ll436, ll437,
  60.134 +        ll438, ll439, ll440, ll441, ll442, ll443, ll444, ll445, ll446, ll447,
  60.135 +        ll448, ll449, ll450, ll451, ll452, ll453, ll454, ll455, ll456, ll457,
  60.136 +        ll458, ll459, ll460, ll461, ll462, ll463, ll464, ll465, ll466, ll467,
  60.137 +        ll468, ll469, ll470, ll471, ll472, ll473, ll474, ll475, ll476, ll477,
  60.138 +        ll478, ll479, ll480, ll481, ll482, ll483, ll484, ll485, ll486, ll487,
  60.139 +        ll488, ll489, ll490, ll491, ll492, ll493, ll494, ll495, ll496, ll497,
  60.140 +        ll498, ll499, ll500, ll501, ll502, ll503, ll504, ll505, ll506, ll507,
  60.141 +        ll508, ll509, ll510, ll511;
  60.142 +
  60.143 +        if (deopt) {
  60.144 +            method_entered = true;
  60.145 +            synchronized(monitor) {
  60.146 +                do_monitor_acquire = true;
  60.147 +                UnloadedClass1 res = new UnloadedClass1(); // forces deopt with c2
  60.148 +                res.field = 0; //forced deopt with c1
  60.149 +                return res;
  60.150 +            }
  60.151 +        }
  60.152 +        return null;
  60.153 +    }
  60.154 +
  60.155 +    static boolean m2(boolean deopt) {
  60.156 +        long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
  60.157 +        l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24,
  60.158 +        l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36,
  60.159 +        l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48,
  60.160 +        l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60,
  60.161 +        l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72,
  60.162 +        l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84,
  60.163 +        l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96,
  60.164 +        l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107,
  60.165 +        l108, l109, l110, l111, l112, l113, l114, l115, l116, l117,
  60.166 +        l118, l119, l120, l121, l122, l123, l124, l125, l126, l127,
  60.167 +        l128, l129, l130, l131, l132, l133, l134, l135, l136, l137,
  60.168 +        l138, l139, l140, l141, l142, l143, l144, l145, l146, l147,
  60.169 +        l148, l149, l150, l151, l152, l153, l154, l155, l156, l157,
  60.170 +        l158, l159, l160, l161, l162, l163, l164, l165, l166, l167,
  60.171 +        l168, l169, l170, l171, l172, l173, l174, l175, l176, l177,
  60.172 +        l178, l179, l180, l181, l182, l183, l184, l185, l186, l187,
  60.173 +        l188, l189, l190, l191, l192, l193, l194, l195, l196, l197,
  60.174 +        l198, l199, l200, l201, l202, l203, l204, l205, l206, l207,
  60.175 +        l208, l209, l210, l211, l212, l213, l214, l215, l216, l217,
  60.176 +        l218, l219, l220, l221, l222, l223, l224, l225, l226, l227,
  60.177 +        l228, l229, l230, l231, l232, l233, l234, l235, l236, l237,
  60.178 +        l238, l239, l240, l241, l242, l243, l244, l245, l246, l247,
  60.179 +        l248, l249, l250, l251, l252, l253, l254, l255, l256, l257,
  60.180 +        l258, l259, l260, l261, l262, l263, l264, l265, l266, l267,
  60.181 +        l268, l269, l270, l271, l272, l273, l274, l275, l276, l277,
  60.182 +        l278, l279, l280, l281, l282, l283, l284, l285, l286, l287,
  60.183 +        l288, l289, l290, l291, l292, l293, l294, l295, l296, l297,
  60.184 +        l298, l299, l300, l301, l302, l303, l304, l305, l306, l307,
  60.185 +        l308, l309, l310, l311, l312, l313, l314, l315, l316, l317,
  60.186 +        l318, l319, l320, l321, l322, l323, l324, l325, l326, l327,
  60.187 +        l328, l329, l330, l331, l332, l333, l334, l335, l336, l337,
  60.188 +        l338, l339, l340, l341, l342, l343, l344, l345, l346, l347,
  60.189 +        l348, l349, l350, l351, l352, l353, l354, l355, l356, l357,
  60.190 +        l358, l359, l360, l361, l362, l363, l364, l365, l366, l367,
  60.191 +        l368, l369, l370, l371, l372, l373, l374, l375, l376, l377,
  60.192 +        l378, l379, l380, l381, l382, l383, l384, l385, l386, l387,
  60.193 +        l388, l389, l390, l391, l392, l393, l394, l395, l396, l397,
  60.194 +        l398, l399, l400, l401, l402, l403, l404, l405, l406, l407,
  60.195 +        l408, l409, l410, l411, l412, l413, l414, l415, l416, l417,
  60.196 +        l418, l419, l420, l421, l422, l423, l424, l425, l426, l427,
  60.197 +        l428, l429, l430, l431, l432, l433, l434, l435, l436, l437,
  60.198 +        l438, l439, l440, l441, l442, l443, l444, l445, l446, l447,
  60.199 +        l448, l449, l450, l451, l452, l453, l454, l455, l456, l457,
  60.200 +        l458, l459, l460, l461, l462, l463, l464, l465, l466, l467,
  60.201 +        l468, l469, l470, l471, l472, l473, l474, l475, l476, l477,
  60.202 +        l478, l479, l480, l481, l482, l483, l484, l485, l486, l487,
  60.203 +        l488, l489, l490, l491, l492, l493, l494, l495, l496, l497,
  60.204 +        l498, l499, l500, l501, l502, l503, l504, l505, l506, l507,
  60.205 +        l508, l509, l510, l511;
  60.206 +
  60.207 +        boolean do_m3 = false;
  60.208 +        try {
  60.209 +            do_m3 = m2(deopt);
  60.210 +        } catch (StackOverflowError e) {
  60.211 +            return true;
  60.212 +        }
  60.213 +        if (do_m3) {
  60.214 +            try {
  60.215 +                m1(deopt);
  60.216 +            } catch (StackOverflowError e) {}
  60.217 +        }
  60.218 +        return false;
  60.219 +    }
  60.220 +
  60.221 +    // Used for synchronization betwen threads
  60.222 +    static volatile boolean thread_started = false;
  60.223 +    static volatile boolean do_monitor_acquire = false;
  60.224 +    static volatile boolean monitor_acquired = false;
  60.225 +    static volatile boolean method_entered = false;
  60.226 +
  60.227 +    static Object monitor = new Object();
  60.228 +
  60.229 +    static public void main(String[] args) {
  60.230 +        // get m1 compiled
  60.231 +        for (int i = 0; i < 20000; i++) {
  60.232 +            m1(false);
  60.233 +        }
  60.234 +
  60.235 +        Thread thread = new Thread() {
  60.236 +            public void run() {
  60.237 +                thread_started = true;
  60.238 +                while(!do_monitor_acquire);
  60.239 +                System.out.println("Ok to try to acquire the lock");
  60.240 +                synchronized(monitor) {
  60.241 +                    monitor_acquired = true;
  60.242 +                }
  60.243 +            }
  60.244 +        };
  60.245 +
  60.246 +        thread.setDaemon(true);
  60.247 +        thread.start();
  60.248 +
  60.249 +        while(!thread_started);
  60.250 +
  60.251 +        m2(true);
  60.252 +
  60.253 +        if (!method_entered) {
  60.254 +            System.out.println("TEST PASSED");
  60.255 +            return;
  60.256 +        }
  60.257 +
  60.258 +        for (int i = 0; i < 10; i++) {
  60.259 +            System.out.println("Is lock acquired?");
  60.260 +            if (monitor_acquired) {
  60.261 +                System.out.println("TEST PASSED");
  60.262 +                return;
  60.263 +            }
  60.264 +            try {
  60.265 +                Thread.sleep(10000);
  60.266 +            } catch(InterruptedException ie) {
  60.267 +            }
  60.268 +        }
  60.269 +        System.out.println("TEST FAILED");
  60.270 +    }
  60.271 +}
    61.1 --- a/test/compiler/whitebox/IsMethodCompilableTest.java	Tue Jun 17 16:12:09 2014 -0700
    61.2 +++ b/test/compiler/whitebox/IsMethodCompilableTest.java	Tue Jun 17 22:15:24 2014 -0700
    61.3 @@ -28,7 +28,7 @@
    61.4   * @build IsMethodCompilableTest
    61.5   * @run main ClassFileInstaller sun.hotspot.WhiteBox
    61.6   * @run main ClassFileInstaller com.oracle.java.testlibrary.Platform
    61.7 - * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
    61.8 + * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
    61.9   * @summary testing of WB::isMethodCompilable()
   61.10   * @author igor.ignatyev@oracle.com
   61.11   */
    62.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    62.2 +++ b/test/gc/metaspace/TestMetaspaceInitialization.java	Tue Jun 17 22:15:24 2014 -0700
    62.3 @@ -0,0 +1,48 @@
    62.4 +/*
    62.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    62.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    62.7 + *
    62.8 + * This code is free software; you can redistribute it and/or modify it
    62.9 + * under the terms of the GNU General Public License version 2 only, as
   62.10 + * published by the Free Software Foundation.
   62.11 + *
   62.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   62.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   62.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   62.15 + * version 2 for more details (a copy is included in the LICENSE file that
   62.16 + * accompanied this code).
   62.17 + *
   62.18 + * You should have received a copy of the GNU General Public License version
   62.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   62.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   62.21 + *
   62.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   62.23 + * or visit www.oracle.com if you need additional information or have any
   62.24 + * questions.
   62.25 + */
   62.26 +
   62.27 +import java.util.ArrayList;
   62.28 +
   62.29 +/* @test TestMetaspaceInitialization
   62.30 + * @bug 8042933
   62.31 + * @summary Tests to initialize metaspace with a very low MetaspaceSize
   62.32 + * @library /testlibrary
   62.33 + * @run main/othervm -XX:MetaspaceSize=2m TestMetaspaceInitialization
   62.34 + */
   62.35 +public class TestMetaspaceInitialization {
   62.36 +    private class Internal {
   62.37 +        public int x;
   62.38 +        public Internal(int x) {
   62.39 +            this.x = x;
   62.40 +        }
   62.41 +    }
   62.42 +
   62.43 +    private void test() {
   62.44 +        ArrayList<Internal> l = new ArrayList<>();
   62.45 +        l.add(new Internal(17));
   62.46 +    }
   62.47 +
   62.48 +    public static void main(String[] args) {
   62.49 +        new TestMetaspaceInitialization().test();
   62.50 +    }
   62.51 +}
    63.1 --- a/test/runtime/Thread/TestThreadDumpMonitorContention.java	Tue Jun 17 16:12:09 2014 -0700
    63.2 +++ b/test/runtime/Thread/TestThreadDumpMonitorContention.java	Tue Jun 17 22:15:24 2014 -0700
    63.3 @@ -24,6 +24,7 @@
    63.4  /*
    63.5   * @test
    63.6   * @bug     8036823
    63.7 + * @bug     8046287
    63.8   * @summary Creates two threads contending for the same lock and checks
    63.9   *      whether jstack reports "locked" by more than one thread.
   63.10   *
   63.11 @@ -52,10 +53,13 @@
   63.12      // looking for header lines with these patterns:
   63.13      // "ContendingThread-1" #19 prio=5 os_prio=64 tid=0x000000000079c000 nid=0x23 runnable [0xffff80ffb8b87000]
   63.14      // "ContendingThread-2" #21 prio=5 os_prio=64 tid=0x0000000000780000 nid=0x2f waiting for monitor entry [0xfffffd7fc1111000]
   63.15 +    // "ContendingThread-2" #24 prio=5 os_prio=64 tid=0x0000000000ec8800 nid=0x31 waiting on condition [0xfffffd7bbfffe000]
   63.16      final static Pattern HEADER_PREFIX_PATTERN = Pattern.compile(
   63.17          "^\"ContendingThread-.*");
   63.18 -    final static Pattern HEADER_WAITING_PATTERN = Pattern.compile(
   63.19 +    final static Pattern HEADER_WAITING_PATTERN1 = Pattern.compile(
   63.20          "^\"ContendingThread-.* waiting for monitor entry .*");
   63.21 +    final static Pattern HEADER_WAITING_PATTERN2 = Pattern.compile(
   63.22 +        "^\"ContendingThread-.* waiting on condition .*");
   63.23      final static Pattern HEADER_RUNNABLE_PATTERN = Pattern.compile(
   63.24          "^\"ContendingThread-.* runnable .*");
   63.25  
   63.26 @@ -80,17 +84,34 @@
   63.27      final static Pattern WAITING_PATTERN = Pattern.compile(
   63.28          ".* waiting to lock \\<.*\\(a TestThreadDumpMonitorContention.*");
   63.29  
   63.30 +    final static Object barrier = new Object();
   63.31      volatile static boolean done = false;
   63.32  
   63.33 +    static int barrier_cnt = 0;
   63.34 +    static int blank_line_match_cnt = 0;
   63.35      static int error_cnt = 0;
   63.36 -    static String header_line = null;
   63.37      static boolean have_header_line = false;
   63.38      static boolean have_thread_state_line = false;
   63.39 -    static int match_cnt = 0;
   63.40 -    static String[] match_list = new String[2];
   63.41 +    static String header_line = null;
   63.42 +    static int header_prefix_match_cnt = 0;
   63.43 +    static int locked_line_match_cnt = 0;
   63.44 +    static String[] locked_match_list = new String[2];
   63.45      static int n_samples = 15;
   63.46 +    static int sum_both_running_cnt = 0;
   63.47 +    static int sum_both_waiting_cnt = 0;
   63.48 +    static int sum_contended_cnt = 0;
   63.49 +    static int sum_locked_hdr_runnable_cnt = 0;
   63.50 +    static int sum_locked_hdr_waiting1_cnt = 0;
   63.51 +    static int sum_locked_hdr_waiting2_cnt = 0;
   63.52 +    static int sum_locked_thr_state_blocked_cnt = 0;
   63.53 +    static int sum_locked_thr_state_runnable_cnt = 0;
   63.54 +    static int sum_one_waiting_cnt = 0;
   63.55 +    static int sum_uncontended_cnt = 0;
   63.56 +    static int sum_waiting_hdr_waiting1_cnt = 0;
   63.57 +    static int sum_waiting_thr_state_blocked_cnt = 0;
   63.58      static String thread_state_line = null;
   63.59      static boolean verbose = false;
   63.60 +    static int waiting_line_match_cnt = 0;
   63.61  
   63.62      public static void main(String[] args) throws Exception {
   63.63          if (args.length != 0) {
   63.64 @@ -110,6 +131,11 @@
   63.65  
   63.66          Runnable runnable = new Runnable() {
   63.67              public void run() {
   63.68 +                synchronized (barrier) {
   63.69 +                    // let the main thread know we're running
   63.70 +                    barrier_cnt++;
   63.71 +                    barrier.notify();
   63.72 +                }
   63.73                  while (!done) {
   63.74                      synchronized (this) { }
   63.75                  }
   63.76 @@ -118,8 +144,16 @@
   63.77          Thread[] thread_list = new Thread[2];
   63.78          thread_list[0] = new Thread(runnable, "ContendingThread-1");
   63.79          thread_list[1] = new Thread(runnable, "ContendingThread-2");
   63.80 -        thread_list[0].start();
   63.81 -        thread_list[1].start();
   63.82 +        synchronized (barrier) {
   63.83 +            thread_list[0].start();
   63.84 +            thread_list[1].start();
   63.85 +
   63.86 +            // Wait until the contending threads are running so that
   63.87 +            // we don't sample any thread init states.
   63.88 +            while (barrier_cnt < 2) {
   63.89 +                barrier.wait();
   63.90 +            }
   63.91 +        }
   63.92  
   63.93          doSamples();
   63.94  
   63.95 @@ -143,11 +177,12 @@
   63.96      // Example:
   63.97      // "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f runnable [0xfffffd7fc1111000]
   63.98      //    java.lang.Thread.State: RUNNABLE
   63.99 -    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:67)
  63.100 +    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
  63.101      //         at java.lang.Thread.run(Thread.java:745)
  63.102      //
  63.103      static boolean checkBlankLine(String line) {
  63.104          if (line.length() == 0) {
  63.105 +            blank_line_match_cnt++;
  63.106              have_header_line = false;
  63.107              have_thread_state_line = false;
  63.108              return true;
  63.109 @@ -161,49 +196,73 @@
  63.110      // Example 1:
  63.111      // "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f runnable [0xfffffd7fc1111000]
  63.112      //    java.lang.Thread.State: RUNNABLE
  63.113 -    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:67)
  63.114 +    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
  63.115      //         - locked <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1)
  63.116      //         at java.lang.Thread.run(Thread.java:745)
  63.117      //
  63.118      // Example 2:
  63.119      // "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f waiting for monitor entry [0xfffffd7fc1111000]
  63.120      //    java.lang.Thread.State: BLOCKED (on object monitor)
  63.121 -    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:67)
  63.122 +    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
  63.123      //         - locked <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1)
  63.124      //         at java.lang.Thread.run(Thread.java:745)
  63.125      //
  63.126 +    // Example 3:
  63.127 +    // "ContendingThread-2" #24 prio=5 os_prio=64 tid=0x0000000000ec8800 nid=0x31 waiting on condition [0xfffffd7bbfffe000]
  63.128 +    //    java.lang.Thread.State: RUNNABLE
  63.129 +    //    JavaThread state: _thread_blocked
  63.130 +    // Thread: 0x0000000000ec8800  [0x31] State: _at_safepoint _has_called_back 0 _at_poll_safepoint 0
  63.131 +    //    JavaThread state: _thread_blocked
  63.132 +    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
  63.133 +    //         - locked <0xfffffd7e6d03eb28> (a TestThreadDumpMonitorContention$1)
  63.134 +    //         at java.lang.Thread.run(Thread.java:745)
  63.135 +    //
  63.136      static boolean checkLockedLine(String line) {
  63.137          Matcher matcher = LOCK_PATTERN.matcher(line);
  63.138          if (matcher.matches()) {
  63.139              if (verbose) {
  63.140                  System.out.println("locked_line='" + line + "'");
  63.141              }
  63.142 -            match_list[match_cnt] = new String(line);
  63.143 -            match_cnt++;
  63.144 +            locked_match_list[locked_line_match_cnt] = new String(line);
  63.145 +            locked_line_match_cnt++;
  63.146  
  63.147              matcher = HEADER_RUNNABLE_PATTERN.matcher(header_line);
  63.148 -            if (!matcher.matches()) {
  63.149 +            if (matcher.matches()) {
  63.150 +                sum_locked_hdr_runnable_cnt++;
  63.151 +            } else {
  63.152                  // It's strange, but a locked line can also
  63.153 -                // match the HEADER_WAITING_PATTERN.
  63.154 -                matcher = HEADER_WAITING_PATTERN.matcher(header_line);
  63.155 -                if (!matcher.matches()) {
  63.156 -                    System.err.println();
  63.157 -                    System.err.println("ERROR: header line does " +
  63.158 -                        "not match runnable or waiting patterns.");
  63.159 -                    System.err.println("ERROR: header_line='" +
  63.160 -                        header_line + "'");
  63.161 -                    System.err.println("ERROR: locked_line='" + line + "'");
  63.162 -                    error_cnt++;
  63.163 +                // match the HEADER_WAITING_PATTERN{1,2}.
  63.164 +                matcher = HEADER_WAITING_PATTERN1.matcher(header_line);
  63.165 +                if (matcher.matches()) {
  63.166 +                    sum_locked_hdr_waiting1_cnt++;
  63.167 +                } else {
  63.168 +                    matcher = HEADER_WAITING_PATTERN2.matcher(header_line);
  63.169 +                    if (matcher.matches()) {
  63.170 +                        sum_locked_hdr_waiting2_cnt++;
  63.171 +                    } else {
  63.172 +                        System.err.println();
  63.173 +                        System.err.println("ERROR: header line does " +
  63.174 +                            "not match runnable or waiting patterns.");
  63.175 +                        System.err.println("ERROR: header_line='" +
  63.176 +                            header_line + "'");
  63.177 +                        System.err.println("ERROR: locked_line='" + line +
  63.178 +                            "'");
  63.179 +                        error_cnt++;
  63.180 +                    }
  63.181                  }
  63.182              }
  63.183  
  63.184              matcher = THREAD_STATE_RUNNABLE_PATTERN.matcher(thread_state_line);
  63.185 -            if (!matcher.matches()) {
  63.186 +            if (matcher.matches()) {
  63.187 +                sum_locked_thr_state_runnable_cnt++;
  63.188 +            } else {
  63.189                  // It's strange, but a locked line can also
  63.190                  // match the THREAD_STATE_BLOCKED_PATTERN.
  63.191                  matcher = THREAD_STATE_BLOCKED_PATTERN.matcher(
  63.192                                thread_state_line);
  63.193 -                if (!matcher.matches()) {
  63.194 +                if (matcher.matches()) {
  63.195 +                    sum_locked_thr_state_blocked_cnt++;
  63.196 +                } else {
  63.197                      System.err.println();
  63.198                      System.err.println("ERROR: thread state line does not " +
  63.199                          "match runnable or waiting patterns.");
  63.200 @@ -229,19 +288,22 @@
  63.201      // Example:
  63.202      // "ContendingThread-2" #22 prio=5 os_prio=64 tid=0x00000000007b9800 nid=0x30 waiting for monitor entry [0xfffffd7fc1010000]
  63.203      //    java.lang.Thread.State: BLOCKED (on object monitor)
  63.204 -    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:67)
  63.205 +    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
  63.206      //         - waiting to lock <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1)
  63.207      //         at java.lang.Thread.run(Thread.java:745)
  63.208      //
  63.209      static boolean checkWaitingLine(String line) {
  63.210          Matcher matcher = WAITING_PATTERN.matcher(line);
  63.211          if (matcher.matches()) {
  63.212 +            waiting_line_match_cnt++;
  63.213              if (verbose) {
  63.214                  System.out.println("waiting_line='" + line + "'");
  63.215              }
  63.216  
  63.217 -            matcher = HEADER_WAITING_PATTERN.matcher(header_line);
  63.218 -            if (!matcher.matches()) {
  63.219 +            matcher = HEADER_WAITING_PATTERN1.matcher(header_line);
  63.220 +            if (matcher.matches()) {
  63.221 +                sum_waiting_hdr_waiting1_cnt++;
  63.222 +            } else {
  63.223                  System.err.println();
  63.224                  System.err.println("ERROR: header line does " +
  63.225                      "not match a waiting pattern.");
  63.226 @@ -251,7 +313,9 @@
  63.227              }
  63.228  
  63.229              matcher = THREAD_STATE_BLOCKED_PATTERN.matcher(thread_state_line);
  63.230 -            if (!matcher.matches()) {
  63.231 +            if (matcher.matches()) {
  63.232 +                sum_waiting_thr_state_blocked_cnt++;
  63.233 +            } else {
  63.234                  System.err.println();
  63.235                  System.err.println("ERROR: thread state line " +
  63.236                      "does not match a waiting pattern.");
  63.237 @@ -273,7 +337,10 @@
  63.238  
  63.239      static void doSamples() throws Exception {
  63.240          for (int count = 0; count < n_samples; count++) {
  63.241 -            match_cnt = 0;
  63.242 +            blank_line_match_cnt = 0;
  63.243 +            header_prefix_match_cnt = 0;
  63.244 +            locked_line_match_cnt = 0;
  63.245 +            waiting_line_match_cnt = 0;
  63.246              // verbose mode or an error has a lot of output so add more space
  63.247              if (verbose || error_cnt > 0) System.out.println();
  63.248              System.out.println("Sample #" + count);
  63.249 @@ -290,12 +357,12 @@
  63.250              //   a failure and we report it
  63.251              // - for a stack trace that matches LOCKED_PATTERN, we verify:
  63.252              //   - the header line matches HEADER_RUNNABLE_PATTERN
  63.253 -            //     or HEADER_WAITING_PATTERN
  63.254 +            //     or HEADER_WAITING_PATTERN{1,2}
  63.255              //   - the thread state line matches THREAD_STATE_BLOCKED_PATTERN
  63.256              //     or THREAD_STATE_RUNNABLE_PATTERN
  63.257              //   - we report any mismatches as failures
  63.258              // - for a stack trace that matches WAITING_PATTERN, we verify:
  63.259 -            //   - the header line matches HEADER_WAITING_PATTERN
  63.260 +            //   - the header line matches HEADER_WAITING_PATTERN1
  63.261              //   - the thread state line matches THREAD_STATE_BLOCKED_PATTERN
  63.262              //   - we report any mismatches as failures
  63.263              // - the stack traces that match HEADER_PREFIX_PATTERN may
  63.264 @@ -324,6 +391,7 @@
  63.265                  if (!have_header_line) {
  63.266                      matcher = HEADER_PREFIX_PATTERN.matcher(line);
  63.267                      if (matcher.matches()) {
  63.268 +                        header_prefix_match_cnt++;
  63.269                          if (verbose) {
  63.270                              System.out.println();
  63.271                              System.out.println("header='" + line + "'");
  63.272 @@ -366,19 +434,80 @@
  63.273              }
  63.274              process.waitFor();
  63.275  
  63.276 -           if (match_cnt == 2) {
  63.277 -               if (match_list[0].equals(match_list[1])) {
  63.278 -                   System.err.println();
  63.279 -                   System.err.println("ERROR: matching lock lines:");
  63.280 -                   System.err.println("ERROR: line[0]'" + match_list[0] + "'");
  63.281 -                   System.err.println("ERROR: line[1]'" + match_list[1] + "'");
  63.282 -                   error_cnt++;
  63.283 -               }
  63.284 -           }
  63.285 +            if (header_prefix_match_cnt != 2) {
  63.286 +                System.err.println();
  63.287 +                System.err.println("ERROR: should match exactly two headers.");
  63.288 +                System.err.println("ERROR: header_prefix_match_cnt=" +
  63.289 +                    header_prefix_match_cnt);
  63.290 +                error_cnt++;
  63.291 +            }
  63.292 +
  63.293 +            if (locked_line_match_cnt == 2) {
  63.294 +                if (locked_match_list[0].equals(locked_match_list[1])) {
  63.295 +                    System.err.println();
  63.296 +                    System.err.println("ERROR: matching lock lines:");
  63.297 +                    System.err.println("ERROR: line[0]'" +
  63.298 +                        locked_match_list[0] + "'");
  63.299 +                    System.err.println("ERROR: line[1]'" +
  63.300 +                        locked_match_list[1] + "'");
  63.301 +                    error_cnt++;
  63.302 +                }
  63.303 +            }
  63.304 +
  63.305 +            if (locked_line_match_cnt == 1) {
  63.306 +                // one thread has the lock
  63.307 +                if (waiting_line_match_cnt == 1) {
  63.308 +                    // and the other contended for it
  63.309 +                    sum_contended_cnt++;
  63.310 +                } else {
  63.311 +                    // and the other is just running
  63.312 +                    sum_uncontended_cnt++;
  63.313 +                }
  63.314 +            } else if (waiting_line_match_cnt == 1) {
  63.315 +                // one thread is waiting
  63.316 +                sum_one_waiting_cnt++;
  63.317 +            } else if (waiting_line_match_cnt == 2) {
  63.318 +                // both threads are waiting
  63.319 +                sum_both_waiting_cnt++;
  63.320 +            } else {
  63.321 +                // both threads are running
  63.322 +                sum_both_running_cnt++;
  63.323 +            }
  63.324  
  63.325              // slight delay between jstack launches
  63.326              Thread.sleep(500);
  63.327          }
  63.328 +
  63.329 +        if (error_cnt != 0) {
  63.330 +            // skip summary info since there were errors
  63.331 +            return;
  63.332 +        }
  63.333 +
  63.334 +        System.out.println("INFO: Summary for all samples:");
  63.335 +        System.out.println("INFO: both_running_cnt=" + sum_both_running_cnt);
  63.336 +        System.out.println("INFO: both_waiting_cnt=" + sum_both_waiting_cnt);
  63.337 +        System.out.println("INFO: contended_cnt=" + sum_contended_cnt);
  63.338 +        System.out.println("INFO: one_waiting_cnt=" + sum_one_waiting_cnt);
  63.339 +        System.out.println("INFO: uncontended_cnt=" + sum_uncontended_cnt);
  63.340 +        System.out.println("INFO: locked_hdr_runnable_cnt=" +
  63.341 +            sum_locked_hdr_runnable_cnt);
  63.342 +        System.out.println("INFO: locked_hdr_waiting1_cnt=" +
  63.343 +            sum_locked_hdr_waiting1_cnt);
  63.344 +        System.out.println("INFO: locked_hdr_waiting2_cnt=" +
  63.345 +            sum_locked_hdr_waiting2_cnt);
  63.346 +        System.out.println("INFO: locked_thr_state_blocked_cnt=" +
  63.347 +            sum_locked_thr_state_blocked_cnt);
  63.348 +        System.out.println("INFO: locked_thr_state_runnable_cnt=" +
  63.349 +            sum_locked_thr_state_runnable_cnt);
  63.350 +        System.out.println("INFO: waiting_hdr_waiting1_cnt=" +
  63.351 +            sum_waiting_hdr_waiting1_cnt);
  63.352 +        System.out.println("INFO: waiting_thr_state_blocked_cnt=" +
  63.353 +            sum_waiting_thr_state_blocked_cnt);
  63.354 +
  63.355 +        if (sum_contended_cnt == 0) {
  63.356 +            System.err.println("WARNING: the primary scenario for 8036823" +
  63.357 +                " has not been exercised by this test run.");
  63.358 +        }
  63.359      }
  63.360  
  63.361      // This helper relies on RuntimeMXBean.getName() returning a string
    64.1 --- a/test/serviceability/ParserTest.java	Tue Jun 17 16:12:09 2014 -0700
    64.2 +++ b/test/serviceability/ParserTest.java	Tue Jun 17 22:15:24 2014 -0700
    64.3 @@ -22,10 +22,10 @@
    64.4   */
    64.5  
    64.6  /*
    64.7 - * @test ParserTest
    64.8 + * @test
    64.9   * @summary Test that the diagnostic command arguemnt parser works
   64.10   * @library /testlibrary /testlibrary/whitebox
   64.11 - * @build ParserTest
   64.12 + * @build ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.parser.*
   64.13   * @run main ClassFileInstaller sun.hotspot.WhiteBox
   64.14   * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ParserTest
   64.15   */
    65.1 --- a/test/serviceability/attach/AttachWithStalePidFile.java	Tue Jun 17 16:12:09 2014 -0700
    65.2 +++ b/test/serviceability/attach/AttachWithStalePidFile.java	Tue Jun 17 22:15:24 2014 -0700
    65.3 @@ -27,7 +27,7 @@
    65.4   * @key regression
    65.5   * @summary Regression test for attach issue where stale pid files in /tmp lead to connection issues
    65.6   * @library /testlibrary
    65.7 - * @compile AttachWithStalePidFileTarget.java
    65.8 + * @build com.oracle.java.testlibrary.* AttachWithStalePidFileTarget
    65.9   * @run main AttachWithStalePidFile
   65.10   */
   65.11  
    66.1 --- a/test/serviceability/jvmti/GetObjectSizeOverflow.java	Tue Jun 17 16:12:09 2014 -0700
    66.2 +++ b/test/serviceability/jvmti/GetObjectSizeOverflow.java	Tue Jun 17 22:15:24 2014 -0700
    66.3 @@ -29,7 +29,7 @@
    66.4   * @test
    66.5   * @bug 8027230
    66.6   * @library /testlibrary
    66.7 - * @build GetObjectSizeOverflowAgent
    66.8 + * @build ClassFileInstaller com.oracle.java.testlibrary.* GetObjectSizeOverflowAgent
    66.9   * @run main ClassFileInstaller GetObjectSizeOverflowAgent
   66.10   * @run main GetObjectSizeOverflow
   66.11   */
    67.1 --- a/test/serviceability/jvmti/TestRedefineWithUnresolvedClass.java	Tue Jun 17 16:12:09 2014 -0700
    67.2 +++ b/test/serviceability/jvmti/TestRedefineWithUnresolvedClass.java	Tue Jun 17 22:15:24 2014 -0700
    67.3 @@ -26,7 +26,7 @@
    67.4   * @summary Redefine a class with an UnresolvedClass reference in the constant pool.
    67.5   * @bug 8035150
    67.6   * @library /testlibrary
    67.7 - * @build UnresolvedClassAgent com.oracle.java.testlibrary.ProcessTools com.oracle.java.testlibrary.OutputAnalyzer
    67.8 + * @build com.oracle.java.testlibrary.* UnresolvedClassAgent
    67.9   * @run main TestRedefineWithUnresolvedClass
   67.10   */
   67.11  
    68.1 --- a/test/serviceability/sa/jmap-hashcode/Test8028623.java	Tue Jun 17 16:12:09 2014 -0700
    68.2 +++ b/test/serviceability/sa/jmap-hashcode/Test8028623.java	Tue Jun 17 22:15:24 2014 -0700
    68.3 @@ -26,6 +26,7 @@
    68.4   * @bug 8028623
    68.5   * @summary Test hashing of extended characters in Serviceability Agent.
    68.6   * @library /testlibrary
    68.7 + * @build com.oracle.java.testlibrary.*
    68.8   * @compile -encoding utf8 Test8028623.java
    68.9   * @run main Test8028623
   68.10   */
    69.1 --- a/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java	Tue Jun 17 16:12:09 2014 -0700
    69.2 +++ b/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java	Tue Jun 17 22:15:24 2014 -0700
    69.3 @@ -44,7 +44,7 @@
    69.4   * @key regression
    69.5   * @summary Regression test for hprof export issue due to large heaps (>2G)
    69.6   * @library /testlibrary
    69.7 - * @compile JMapHProfLargeHeapProc.java
    69.8 + * @build com.oracle.java.testlibrary.* JMapHProfLargeHeapProc
    69.9   * @run main JMapHProfLargeHeapTest
   69.10   */
   69.11  
    70.1 --- a/test/testlibrary/ctw/test/ClassesDirTest.java	Tue Jun 17 16:12:09 2014 -0700
    70.2 +++ b/test/testlibrary/ctw/test/ClassesDirTest.java	Tue Jun 17 22:15:24 2014 -0700
    70.3 @@ -22,10 +22,10 @@
    70.4   */
    70.5  
    70.6  /*
    70.7 - * @test ClassesDirTest
    70.8 + * @test
    70.9   * @bug 8012447
   70.10   * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
   70.11 - * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox ClassesDirTest Foo Bar
   70.12 + * @build ClassFileInstaller sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox Foo Bar
   70.13   * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
   70.14   * @run main ClassesDirTest prepare
   70.15   * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld classes
    71.1 --- a/test/testlibrary/ctw/test/ClassesListTest.java	Tue Jun 17 16:12:09 2014 -0700
    71.2 +++ b/test/testlibrary/ctw/test/ClassesListTest.java	Tue Jun 17 22:15:24 2014 -0700
    71.3 @@ -22,10 +22,10 @@
    71.4   */
    71.5  
    71.6  /*
    71.7 - * @test ClassesListTest
    71.8 + * @test
    71.9   * @bug 8012447
   71.10   * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
   71.11 - * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox ClassesListTest Foo Bar
   71.12 + * @build ClassFileInstaller sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox Foo Bar
   71.13   * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
   71.14   * @run main ClassesListTest prepare
   71.15   * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld classes.lst
    72.1 --- a/test/testlibrary/ctw/test/JarDirTest.java	Tue Jun 17 16:12:09 2014 -0700
    72.2 +++ b/test/testlibrary/ctw/test/JarDirTest.java	Tue Jun 17 22:15:24 2014 -0700
    72.3 @@ -22,10 +22,10 @@
    72.4   */
    72.5  
    72.6  /*
    72.7 - * @test JarDirTest
    72.8 + * @test
    72.9   * @bug 8012447
   72.10   * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
   72.11 - * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox JarDirTest Foo Bar
   72.12 + * @build ClassFileInstaller com.oracle.java.testlibrary.* sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox Foo Bar
   72.13   * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
   72.14   * @run main JarDirTest prepare
   72.15   * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld jars/*
    73.1 --- a/test/testlibrary/ctw/test/JarsTest.java	Tue Jun 17 16:12:09 2014 -0700
    73.2 +++ b/test/testlibrary/ctw/test/JarsTest.java	Tue Jun 17 22:15:24 2014 -0700
    73.3 @@ -22,10 +22,10 @@
    73.4   */
    73.5  
    73.6  /*
    73.7 - * @test JarsTest
    73.8 + * @test
    73.9   * @bug 8012447
   73.10   * @library /testlibrary /testlibrary/whitebox /testlibrary/ctw/src
   73.11 - * @build sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox JarsTest Foo Bar
   73.12 + * @build ClassFileInstaller com.oracle.java.testlibrary.* sun.hotspot.tools.ctw.CompileTheWorld sun.hotspot.WhiteBox Foo Bar
   73.13   * @run main ClassFileInstaller sun.hotspot.WhiteBox Foo Bar
   73.14   * @run main JarsTest prepare
   73.15   * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Dsun.hotspot.tools.ctw.logfile=ctw.log sun.hotspot.tools.ctw.CompileTheWorld foo.jar bar.jar

mercurial