src/share/vm/runtime/vframeArray.cpp

Tue, 25 Nov 2014 17:33:59 +0100

author
roland
date
Tue, 25 Nov 2014 17:33:59 +0100
changeset 7419
d3f3f7677537
parent 6723
0bf37f737702
child 7535
7ae4e26cb1e0
permissions
-rw-r--r--

6898462: The escape analysis with G1 cause crash assertion src/share/vm/runtime/vframeArray.cpp:94
Summary: OOM during reallocation of scalar replaced objects in deoptimization causes crashes
Reviewed-by: kvn, jrose

duke@435 1 /*
drchase@6680 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "classfile/vmSymbols.hpp"
twisti@3969 27 #include "interpreter/bytecode.hpp"
stefank@2314 28 #include "interpreter/interpreter.hpp"
stefank@2314 29 #include "memory/allocation.inline.hpp"
stefank@2314 30 #include "memory/resourceArea.hpp"
stefank@2314 31 #include "memory/universe.inline.hpp"
coleenp@4037 32 #include "oops/methodData.hpp"
stefank@2314 33 #include "oops/oop.inline.hpp"
stefank@2314 34 #include "prims/jvmtiThreadState.hpp"
stefank@2314 35 #include "runtime/handles.inline.hpp"
stefank@2314 36 #include "runtime/monitorChunk.hpp"
stefank@2314 37 #include "runtime/sharedRuntime.hpp"
stefank@2314 38 #include "runtime/vframe.hpp"
stefank@2314 39 #include "runtime/vframeArray.hpp"
stefank@2314 40 #include "runtime/vframe_hp.hpp"
stefank@2314 41 #include "utilities/events.hpp"
stefank@2314 42 #ifdef COMPILER2
stefank@2314 43 #include "opto/runtime.hpp"
stefank@2314 44 #endif
duke@435 45
drchase@6680 46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
duke@435 47
duke@435 48 int vframeArrayElement:: bci(void) const { return (_bci == SynchronizationEntryBCI ? 0 : _bci); }
duke@435 49
duke@435 50 void vframeArrayElement::free_monitors(JavaThread* jt) {
duke@435 51 if (_monitors != NULL) {
duke@435 52 MonitorChunk* chunk = _monitors;
duke@435 53 _monitors = NULL;
duke@435 54 jt->remove_monitor_chunk(chunk);
duke@435 55 delete chunk;
duke@435 56 }
duke@435 57 }
duke@435 58
roland@7419 59 void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) {
duke@435 60
duke@435 61 // Copy the information from the compiled vframe to the
duke@435 62 // interpreter frame we will be creating to replace vf
duke@435 63
duke@435 64 _method = vf->method();
duke@435 65 _bci = vf->raw_bci();
cfang@1335 66 _reexecute = vf->should_reexecute();
roland@7419 67 #ifdef ASSERT
roland@7419 68 _removed_monitors = false;
roland@7419 69 #endif
duke@435 70
duke@435 71 int index;
duke@435 72
duke@435 73 // Get the monitors off-stack
duke@435 74
duke@435 75 GrowableArray<MonitorInfo*>* list = vf->monitors();
duke@435 76 if (list->is_empty()) {
duke@435 77 _monitors = NULL;
duke@435 78 } else {
duke@435 79
duke@435 80 // Allocate monitor chunk
duke@435 81 _monitors = new MonitorChunk(list->length());
duke@435 82 vf->thread()->add_monitor_chunk(_monitors);
duke@435 83
duke@435 84 // Migrate the BasicLocks from the stack to the monitor chunk
duke@435 85 for (index = 0; index < list->length(); index++) {
duke@435 86 MonitorInfo* monitor = list->at(index);
roland@7419 87 assert(!monitor->owner_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
duke@435 88 BasicObjectLock* dest = _monitors->at(index);
roland@7419 89 if (monitor->owner_is_scalar_replaced()) {
roland@7419 90 dest->set_obj(NULL);
roland@7419 91 } else {
roland@7419 92 assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
roland@7419 93 dest->set_obj(monitor->owner());
roland@7419 94 monitor->lock()->move_to(monitor->owner(), dest->lock());
roland@7419 95 }
duke@435 96 }
duke@435 97 }
duke@435 98
duke@435 99 // Convert the vframe locals and expressions to off stack
duke@435 100 // values. Because we will not gc all oops can be converted to
duke@435 101 // intptr_t (i.e. a stack slot) and we are fine. This is
duke@435 102 // good since we are inside a HandleMark and the oops in our
duke@435 103 // collection would go away between packing them here and
duke@435 104 // unpacking them in unpack_on_stack.
duke@435 105
duke@435 106 // First the locals go off-stack
duke@435 107
duke@435 108 // FIXME this seems silly it creates a StackValueCollection
duke@435 109 // in order to get the size to then copy them and
duke@435 110 // convert the types to intptr_t size slots. Seems like it
duke@435 111 // could do it in place... Still uses less memory than the
duke@435 112 // old way though
duke@435 113
duke@435 114 StackValueCollection *locs = vf->locals();
duke@435 115 _locals = new StackValueCollection(locs->size());
duke@435 116 for(index = 0; index < locs->size(); index++) {
duke@435 117 StackValue* value = locs->at(index);
duke@435 118 switch(value->type()) {
duke@435 119 case T_OBJECT:
roland@7419 120 assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
duke@435 121 // preserve object type
hseigel@5784 122 _locals->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
duke@435 123 break;
duke@435 124 case T_CONFLICT:
duke@435 125 // A dead local. Will be initialized to null/zero.
duke@435 126 _locals->add( new StackValue());
duke@435 127 break;
duke@435 128 case T_INT:
duke@435 129 _locals->add( new StackValue(value->get_int()));
duke@435 130 break;
duke@435 131 default:
duke@435 132 ShouldNotReachHere();
duke@435 133 }
duke@435 134 }
duke@435 135
duke@435 136 // Now the expressions off-stack
duke@435 137 // Same silliness as above
duke@435 138
duke@435 139 StackValueCollection *exprs = vf->expressions();
duke@435 140 _expressions = new StackValueCollection(exprs->size());
duke@435 141 for(index = 0; index < exprs->size(); index++) {
duke@435 142 StackValue* value = exprs->at(index);
duke@435 143 switch(value->type()) {
duke@435 144 case T_OBJECT:
roland@7419 145 assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
duke@435 146 // preserve object type
hseigel@5784 147 _expressions->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
duke@435 148 break;
duke@435 149 case T_CONFLICT:
duke@435 150 // A dead stack element. Will be initialized to null/zero.
duke@435 151 // This can occur when the compiler emits a state in which stack
duke@435 152 // elements are known to be dead (because of an imminent exception).
duke@435 153 _expressions->add( new StackValue());
duke@435 154 break;
duke@435 155 case T_INT:
duke@435 156 _expressions->add( new StackValue(value->get_int()));
duke@435 157 break;
duke@435 158 default:
duke@435 159 ShouldNotReachHere();
duke@435 160 }
duke@435 161 }
duke@435 162 }
duke@435 163
duke@435 164 int unpack_counter = 0;
duke@435 165
never@2901 166 void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
never@2901 167 int callee_parameters,
duke@435 168 int callee_locals,
duke@435 169 frame* caller,
duke@435 170 bool is_top_frame,
roland@4727 171 bool is_bottom_frame,
duke@435 172 int exec_mode) {
duke@435 173 JavaThread* thread = (JavaThread*) Thread::current();
duke@435 174
duke@435 175 // Look at bci and decide on bcp and continuation pc
duke@435 176 address bcp;
duke@435 177 // C++ interpreter doesn't need a pc since it will figure out what to do when it
duke@435 178 // begins execution
duke@435 179 address pc;
cfang@1335 180 bool use_next_mdp = false; // true if we should use the mdp associated with the next bci
cfang@1335 181 // rather than the one associated with bcp
duke@435 182 if (raw_bci() == SynchronizationEntryBCI) {
duke@435 183 // We are deoptimizing while hanging in prologue code for synchronized method
duke@435 184 bcp = method()->bcp_from(0); // first byte code
duke@435 185 pc = Interpreter::deopt_entry(vtos, 0); // step = 0 since we don't skip current bytecode
cfang@1335 186 } else if (should_reexecute()) { //reexecute this bytecode
cfang@1335 187 assert(is_top_frame, "reexecute allowed only for the top frame");
cfang@1335 188 bcp = method()->bcp_from(bci());
cfang@1335 189 pc = Interpreter::deopt_reexecute_entry(method(), bcp);
duke@435 190 } else {
duke@435 191 bcp = method()->bcp_from(bci());
cfang@1335 192 pc = Interpreter::deopt_continue_after_entry(method(), bcp, callee_parameters, is_top_frame);
cfang@1335 193 use_next_mdp = true;
duke@435 194 }
duke@435 195 assert(Bytecodes::is_defined(*bcp), "must be a valid bytecode");
duke@435 196
duke@435 197 // Monitorenter and pending exceptions:
duke@435 198 //
duke@435 199 // For Compiler2, there should be no pending exception when deoptimizing at monitorenter
duke@435 200 // because there is no safepoint at the null pointer check (it is either handled explicitly
duke@435 201 // or prior to the monitorenter) and asynchronous exceptions are not made "pending" by the
duke@435 202 // runtime interface for the slow case (see JRT_ENTRY_FOR_MONITORENTER). If an asynchronous
duke@435 203 // exception was processed, the bytecode pointer would have to be extended one bytecode beyond
duke@435 204 // the monitorenter to place it in the proper exception range.
duke@435 205 //
duke@435 206 // For Compiler1, deoptimization can occur while throwing a NullPointerException at monitorenter,
duke@435 207 // in which case bcp should point to the monitorenter since it is within the exception's range.
duke@435 208
duke@435 209 assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame");
iveresov@2169 210 assert(thread->deopt_nmethod() != NULL, "nmethod should be known");
iveresov@2169 211 guarantee(!(thread->deopt_nmethod()->is_compiled_by_c2() &&
iveresov@2169 212 *bcp == Bytecodes::_monitorenter &&
iveresov@2169 213 exec_mode == Deoptimization::Unpack_exception),
iveresov@2169 214 "shouldn't get exception during monitorenter");
duke@435 215
duke@435 216 int popframe_preserved_args_size_in_bytes = 0;
duke@435 217 int popframe_preserved_args_size_in_words = 0;
duke@435 218 if (is_top_frame) {
kvn@1690 219 JvmtiThreadState *state = thread->jvmti_thread_state();
duke@435 220 if (JvmtiExport::can_pop_frame() &&
duke@435 221 (thread->has_pending_popframe() || thread->popframe_forcing_deopt_reexecution())) {
duke@435 222 if (thread->has_pending_popframe()) {
duke@435 223 // Pop top frame after deoptimization
duke@435 224 #ifndef CC_INTERP
duke@435 225 pc = Interpreter::remove_activation_preserving_args_entry();
duke@435 226 #else
duke@435 227 // Do an uncommon trap type entry. c++ interpreter will know
duke@435 228 // to pop frame and preserve the args
duke@435 229 pc = Interpreter::deopt_entry(vtos, 0);
duke@435 230 use_next_mdp = false;
duke@435 231 #endif
duke@435 232 } else {
duke@435 233 // Reexecute invoke in top frame
duke@435 234 pc = Interpreter::deopt_entry(vtos, 0);
duke@435 235 use_next_mdp = false;
duke@435 236 popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size());
duke@435 237 // Note: the PopFrame-related extension of the expression stack size is done in
duke@435 238 // Deoptimization::fetch_unroll_info_helper
duke@435 239 popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words());
duke@435 240 }
duke@435 241 } else if (JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) {
duke@435 242 // Force early return from top frame after deoptimization
duke@435 243 #ifndef CC_INTERP
duke@435 244 pc = Interpreter::remove_activation_early_entry(state->earlyret_tos());
duke@435 245 #endif
duke@435 246 } else {
duke@435 247 // Possibly override the previous pc computation of the top (youngest) frame
duke@435 248 switch (exec_mode) {
duke@435 249 case Deoptimization::Unpack_deopt:
duke@435 250 // use what we've got
duke@435 251 break;
duke@435 252 case Deoptimization::Unpack_exception:
duke@435 253 // exception is pending
twisti@1730 254 pc = SharedRuntime::raw_exception_handler_for_return_address(thread, pc);
duke@435 255 // [phh] We're going to end up in some handler or other, so it doesn't
duke@435 256 // matter what mdp we point to. See exception_handler_for_exception()
duke@435 257 // in interpreterRuntime.cpp.
duke@435 258 break;
duke@435 259 case Deoptimization::Unpack_uncommon_trap:
duke@435 260 case Deoptimization::Unpack_reexecute:
duke@435 261 // redo last byte code
duke@435 262 pc = Interpreter::deopt_entry(vtos, 0);
duke@435 263 use_next_mdp = false;
duke@435 264 break;
duke@435 265 default:
duke@435 266 ShouldNotReachHere();
duke@435 267 }
duke@435 268 }
duke@435 269 }
duke@435 270
duke@435 271 // Setup the interpreter frame
duke@435 272
duke@435 273 assert(method() != NULL, "method must exist");
duke@435 274 int temps = expressions()->size();
duke@435 275
duke@435 276 int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
duke@435 277
duke@435 278 Interpreter::layout_activation(method(),
duke@435 279 temps + callee_parameters,
duke@435 280 popframe_preserved_args_size_in_words,
duke@435 281 locks,
never@2901 282 caller_actual_parameters,
duke@435 283 callee_parameters,
duke@435 284 callee_locals,
duke@435 285 caller,
duke@435 286 iframe(),
roland@4727 287 is_top_frame,
roland@4727 288 is_bottom_frame);
duke@435 289
duke@435 290 // Update the pc in the frame object and overwrite the temporary pc
duke@435 291 // we placed in the skeletal frame now that we finally know the
duke@435 292 // exact interpreter address we should use.
duke@435 293
duke@435 294 _frame.patch_pc(thread, pc);
duke@435 295
roland@7419 296 assert (!method()->is_synchronized() || locks > 0 || _removed_monitors, "synchronized methods must have monitors");
duke@435 297
duke@435 298 BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin();
duke@435 299 for (int index = 0; index < locks; index++) {
duke@435 300 top = iframe()->previous_monitor_in_interpreter_frame(top);
duke@435 301 BasicObjectLock* src = _monitors->at(index);
duke@435 302 top->set_obj(src->obj());
duke@435 303 src->lock()->move_to(src->obj(), top->lock());
duke@435 304 }
duke@435 305 if (ProfileInterpreter) {
duke@435 306 iframe()->interpreter_frame_set_mdx(0); // clear out the mdp.
duke@435 307 }
duke@435 308 iframe()->interpreter_frame_set_bcx((intptr_t)bcp); // cannot use bcp because frame is not initialized yet
duke@435 309 if (ProfileInterpreter) {
coleenp@4037 310 MethodData* mdo = method()->method_data();
duke@435 311 if (mdo != NULL) {
duke@435 312 int bci = iframe()->interpreter_frame_bci();
duke@435 313 if (use_next_mdp) ++bci;
duke@435 314 address mdp = mdo->bci_to_dp(bci);
duke@435 315 iframe()->interpreter_frame_set_mdp(mdp);
duke@435 316 }
duke@435 317 }
duke@435 318
duke@435 319 // Unpack expression stack
duke@435 320 // If this is an intermediate frame (i.e. not top frame) then this
duke@435 321 // only unpacks the part of the expression stack not used by callee
duke@435 322 // as parameters. The callee parameters are unpacked as part of the
duke@435 323 // callee locals.
duke@435 324 int i;
duke@435 325 for(i = 0; i < expressions()->size(); i++) {
duke@435 326 StackValue *value = expressions()->at(i);
duke@435 327 intptr_t* addr = iframe()->interpreter_frame_expression_stack_at(i);
duke@435 328 switch(value->type()) {
duke@435 329 case T_INT:
duke@435 330 *addr = value->get_int();
duke@435 331 break;
duke@435 332 case T_OBJECT:
duke@435 333 *addr = value->get_int(T_OBJECT);
duke@435 334 break;
duke@435 335 case T_CONFLICT:
duke@435 336 // A dead stack slot. Initialize to null in case it is an oop.
duke@435 337 *addr = NULL_WORD;
duke@435 338 break;
duke@435 339 default:
duke@435 340 ShouldNotReachHere();
duke@435 341 }
duke@435 342 }
duke@435 343
duke@435 344
duke@435 345 // Unpack the locals
duke@435 346 for(i = 0; i < locals()->size(); i++) {
duke@435 347 StackValue *value = locals()->at(i);
duke@435 348 intptr_t* addr = iframe()->interpreter_frame_local_at(i);
duke@435 349 switch(value->type()) {
duke@435 350 case T_INT:
duke@435 351 *addr = value->get_int();
duke@435 352 break;
duke@435 353 case T_OBJECT:
duke@435 354 *addr = value->get_int(T_OBJECT);
duke@435 355 break;
duke@435 356 case T_CONFLICT:
duke@435 357 // A dead location. If it is an oop then we need a NULL to prevent GC from following it
duke@435 358 *addr = NULL_WORD;
duke@435 359 break;
duke@435 360 default:
duke@435 361 ShouldNotReachHere();
duke@435 362 }
duke@435 363 }
duke@435 364
duke@435 365 if (is_top_frame && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
duke@435 366 // An interpreted frame was popped but it returns to a deoptimized
duke@435 367 // frame. The incoming arguments to the interpreted activation
duke@435 368 // were preserved in thread-local storage by the
duke@435 369 // remove_activation_preserving_args_entry in the interpreter; now
duke@435 370 // we put them back into the just-unpacked interpreter frame.
duke@435 371 // Note that this assumes that the locals arena grows toward lower
duke@435 372 // addresses.
duke@435 373 if (popframe_preserved_args_size_in_words != 0) {
duke@435 374 void* saved_args = thread->popframe_preserved_args();
duke@435 375 assert(saved_args != NULL, "must have been saved by interpreter");
duke@435 376 #ifdef ASSERT
duke@435 377 assert(popframe_preserved_args_size_in_words <=
twisti@1861 378 iframe()->interpreter_frame_expression_stack_size()*Interpreter::stackElementWords,
duke@435 379 "expression stack size should have been extended");
duke@435 380 #endif // ASSERT
duke@435 381 int top_element = iframe()->interpreter_frame_expression_stack_size()-1;
duke@435 382 intptr_t* base;
duke@435 383 if (frame::interpreter_frame_expression_stack_direction() < 0) {
duke@435 384 base = iframe()->interpreter_frame_expression_stack_at(top_element);
duke@435 385 } else {
duke@435 386 base = iframe()->interpreter_frame_expression_stack();
duke@435 387 }
kvn@1958 388 Copy::conjoint_jbytes(saved_args,
kvn@1958 389 base,
kvn@1958 390 popframe_preserved_args_size_in_bytes);
duke@435 391 thread->popframe_free_preserved_args();
duke@435 392 }
duke@435 393 }
duke@435 394
duke@435 395 #ifndef PRODUCT
duke@435 396 if (TraceDeoptimization && Verbose) {
duke@435 397 ttyLocker ttyl;
duke@435 398 tty->print_cr("[%d Interpreted Frame]", ++unpack_counter);
duke@435 399 iframe()->print_on(tty);
duke@435 400 RegisterMap map(thread);
duke@435 401 vframe* f = vframe::new_vframe(iframe(), &map, thread);
duke@435 402 f->print();
duke@435 403
duke@435 404 tty->print_cr("locals size %d", locals()->size());
duke@435 405 tty->print_cr("expression size %d", expressions()->size());
duke@435 406
duke@435 407 method()->print_value();
duke@435 408 tty->cr();
duke@435 409 // method()->print_codes();
duke@435 410 } else if (TraceDeoptimization) {
duke@435 411 tty->print(" ");
duke@435 412 method()->print_value();
never@2462 413 Bytecodes::Code code = Bytecodes::java_code_at(method(), bcp);
duke@435 414 int bci = method()->bci_from(bcp);
duke@435 415 tty->print(" - %s", Bytecodes::name(code));
duke@435 416 tty->print(" @ bci %d ", bci);
duke@435 417 tty->print_cr("sp = " PTR_FORMAT, iframe()->sp());
duke@435 418 }
duke@435 419 #endif // PRODUCT
duke@435 420
duke@435 421 // The expression stack and locals are in the resource area don't leave
duke@435 422 // a dangling pointer in the vframeArray we leave around for debug
duke@435 423 // purposes
duke@435 424
duke@435 425 _locals = _expressions = NULL;
duke@435 426
duke@435 427 }
duke@435 428
roland@6723 429 int vframeArrayElement::on_stack_size(int callee_parameters,
duke@435 430 int callee_locals,
duke@435 431 bool is_top_frame,
duke@435 432 int popframe_extra_stack_expression_els) const {
duke@435 433 assert(method()->max_locals() == locals()->size(), "just checking");
duke@435 434 int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
duke@435 435 int temps = expressions()->size();
roland@6723 436 return Interpreter::size_activation(method()->max_stack(),
duke@435 437 temps + callee_parameters,
duke@435 438 popframe_extra_stack_expression_els,
duke@435 439 locks,
duke@435 440 callee_parameters,
duke@435 441 callee_locals,
roland@6723 442 is_top_frame);
duke@435 443 }
duke@435 444
duke@435 445
duke@435 446
duke@435 447 vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
roland@7419 448 RegisterMap *reg_map, frame sender, frame caller, frame self,
roland@7419 449 bool realloc_failures) {
duke@435 450
duke@435 451 // Allocate the vframeArray
duke@435 452 vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part
duke@435 453 sizeof(vframeArrayElement) * (chunk->length() - 1), // variable part
zgu@3900 454 mtCompiler);
duke@435 455 result->_frames = chunk->length();
duke@435 456 result->_owner_thread = thread;
duke@435 457 result->_sender = sender;
duke@435 458 result->_caller = caller;
duke@435 459 result->_original = self;
duke@435 460 result->set_unroll_block(NULL); // initialize it
roland@7419 461 result->fill_in(thread, frame_size, chunk, reg_map, realloc_failures);
duke@435 462 return result;
duke@435 463 }
duke@435 464
duke@435 465 void vframeArray::fill_in(JavaThread* thread,
duke@435 466 int frame_size,
duke@435 467 GrowableArray<compiledVFrame*>* chunk,
roland@7419 468 const RegisterMap *reg_map,
roland@7419 469 bool realloc_failures) {
duke@435 470 // Set owner first, it is used when adding monitor chunks
duke@435 471
duke@435 472 _frame_size = frame_size;
duke@435 473 for(int i = 0; i < chunk->length(); i++) {
roland@7419 474 element(i)->fill_in(chunk->at(i), realloc_failures);
duke@435 475 }
duke@435 476
duke@435 477 // Copy registers for callee-saved registers
duke@435 478 if (reg_map != NULL) {
duke@435 479 for(int i = 0; i < RegisterMap::reg_count; i++) {
duke@435 480 #ifdef AMD64
duke@435 481 // The register map has one entry for every int (32-bit value), so
duke@435 482 // 64-bit physical registers have two entries in the map, one for
duke@435 483 // each half. Ignore the high halves of 64-bit registers, just like
duke@435 484 // frame::oopmapreg_to_location does.
duke@435 485 //
duke@435 486 // [phh] FIXME: this is a temporary hack! This code *should* work
duke@435 487 // correctly w/o this hack, possibly by changing RegisterMap::pd_location
duke@435 488 // in frame_amd64.cpp and the values of the phantom high half registers
duke@435 489 // in amd64.ad.
duke@435 490 // if (VMReg::Name(i) < SharedInfo::stack0 && is_even(i)) {
duke@435 491 intptr_t* src = (intptr_t*) reg_map->location(VMRegImpl::as_VMReg(i));
duke@435 492 _callee_registers[i] = src != NULL ? *src : NULL_WORD;
duke@435 493 // } else {
duke@435 494 // jint* src = (jint*) reg_map->location(VMReg::Name(i));
duke@435 495 // _callee_registers[i] = src != NULL ? *src : NULL_WORD;
duke@435 496 // }
duke@435 497 #else
duke@435 498 jint* src = (jint*) reg_map->location(VMRegImpl::as_VMReg(i));
duke@435 499 _callee_registers[i] = src != NULL ? *src : NULL_WORD;
duke@435 500 #endif
duke@435 501 if (src == NULL) {
duke@435 502 set_location_valid(i, false);
duke@435 503 } else {
duke@435 504 set_location_valid(i, true);
duke@435 505 jint* dst = (jint*) register_location(i);
duke@435 506 *dst = *src;
duke@435 507 }
duke@435 508 }
duke@435 509 }
duke@435 510 }
duke@435 511
never@2901 512 void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode, int caller_actual_parameters) {
duke@435 513 // stack picture
duke@435 514 // unpack_frame
duke@435 515 // [new interpreter frames ] (frames are skeletal but walkable)
duke@435 516 // caller_frame
duke@435 517 //
duke@435 518 // This routine fills in the missing data for the skeletal interpreter frames
duke@435 519 // in the above picture.
duke@435 520
duke@435 521 // Find the skeletal interpreter frames to unpack into
twisti@3969 522 JavaThread* THREAD = JavaThread::current();
twisti@3969 523 RegisterMap map(THREAD, false);
duke@435 524 // Get the youngest frame we will unpack (last to be unpacked)
duke@435 525 frame me = unpack_frame.sender(&map);
duke@435 526 int index;
duke@435 527 for (index = 0; index < frames(); index++ ) {
duke@435 528 *element(index)->iframe() = me;
duke@435 529 // Get the caller frame (possibly skeletal)
duke@435 530 me = me.sender(&map);
duke@435 531 }
duke@435 532
twisti@3969 533 // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee
twisti@3969 534 // Unpack the frames from the oldest (frames() -1) to the youngest (0)
roland@4727 535 frame* caller_frame = &me;
duke@435 536 for (index = frames() - 1; index >= 0 ; index--) {
twisti@3969 537 vframeArrayElement* elem = element(index); // caller
twisti@3969 538 int callee_parameters, callee_locals;
twisti@3969 539 if (index == 0) {
twisti@3969 540 callee_parameters = callee_locals = 0;
twisti@3969 541 } else {
twisti@3969 542 methodHandle caller = elem->method();
twisti@3969 543 methodHandle callee = element(index - 1)->method();
twisti@3969 544 Bytecode_invoke inv(caller, elem->bci());
twisti@3969 545 // invokedynamic instructions don't have a class but obviously don't have a MemberName appendix.
twisti@3969 546 // NOTE: Use machinery here that avoids resolving of any kind.
twisti@3969 547 const bool has_member_arg =
twisti@3969 548 !inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name());
twisti@3969 549 callee_parameters = callee->size_of_parameters() + (has_member_arg ? 1 : 0);
twisti@3969 550 callee_locals = callee->max_locals();
twisti@3969 551 }
twisti@3969 552 elem->unpack_on_stack(caller_actual_parameters,
twisti@3969 553 callee_parameters,
twisti@3969 554 callee_locals,
roland@4727 555 caller_frame,
twisti@3969 556 index == 0,
roland@4727 557 index == frames() - 1,
twisti@3969 558 exec_mode);
duke@435 559 if (index == frames() - 1) {
twisti@3969 560 Deoptimization::unwind_callee_save_values(elem->iframe(), this);
duke@435 561 }
roland@4727 562 caller_frame = elem->iframe();
never@2901 563 caller_actual_parameters = callee_parameters;
duke@435 564 }
duke@435 565 deallocate_monitor_chunks();
duke@435 566 }
duke@435 567
duke@435 568 void vframeArray::deallocate_monitor_chunks() {
duke@435 569 JavaThread* jt = JavaThread::current();
duke@435 570 for (int index = 0; index < frames(); index++ ) {
duke@435 571 element(index)->free_monitors(jt);
duke@435 572 }
duke@435 573 }
duke@435 574
duke@435 575 #ifndef PRODUCT
duke@435 576
duke@435 577 bool vframeArray::structural_compare(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk) {
duke@435 578 if (owner_thread() != thread) return false;
duke@435 579 int index = 0;
duke@435 580 #if 0 // FIXME can't do this comparison
duke@435 581
duke@435 582 // Compare only within vframe array.
duke@435 583 for (deoptimizedVFrame* vf = deoptimizedVFrame::cast(vframe_at(first_index())); vf; vf = vf->deoptimized_sender_or_null()) {
duke@435 584 if (index >= chunk->length() || !vf->structural_compare(chunk->at(index))) return false;
duke@435 585 index++;
duke@435 586 }
duke@435 587 if (index != chunk->length()) return false;
duke@435 588 #endif
duke@435 589
duke@435 590 return true;
duke@435 591 }
duke@435 592
duke@435 593 #endif
duke@435 594
duke@435 595 address vframeArray::register_location(int i) const {
duke@435 596 assert(0 <= i && i < RegisterMap::reg_count, "index out of bounds");
duke@435 597 return (address) & _callee_registers[i];
duke@435 598 }
duke@435 599
duke@435 600
duke@435 601 #ifndef PRODUCT
duke@435 602
duke@435 603 // Printing
duke@435 604
duke@435 605 // Note: we cannot have print_on as const, as we allocate inside the method
duke@435 606 void vframeArray::print_on_2(outputStream* st) {
duke@435 607 st->print_cr(" - sp: " INTPTR_FORMAT, sp());
duke@435 608 st->print(" - thread: ");
duke@435 609 Thread::current()->print();
duke@435 610 st->print_cr(" - frame size: %d", frame_size());
duke@435 611 for (int index = 0; index < frames() ; index++ ) {
duke@435 612 element(index)->print(st);
duke@435 613 }
duke@435 614 }
duke@435 615
duke@435 616 void vframeArrayElement::print(outputStream* st) {
kvn@1690 617 st->print_cr(" - interpreter_frame -> sp: " INTPTR_FORMAT, iframe()->sp());
duke@435 618 }
duke@435 619
duke@435 620 void vframeArray::print_value_on(outputStream* st) const {
duke@435 621 st->print_cr("vframeArray [%d] ", frames());
duke@435 622 }
duke@435 623
duke@435 624
duke@435 625 #endif

mercurial