1 /* |
1 /* |
2 * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved. |
2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
416 // subtract two words to account for return address and link |
416 // subtract two words to account for return address and link |
417 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; |
417 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; |
418 } |
418 } |
419 |
419 |
420 |
420 |
421 int LIR_Assembler::emit_exception_handler() { |
421 void LIR_Assembler::emit_exception_handler() { |
422 // if the last instruction is a call (typically to do a throw which |
422 // if the last instruction is a call (typically to do a throw which |
423 // is coming at the end after block reordering) the return address |
423 // is coming at the end after block reordering) the return address |
424 // must still point into the code area in order to avoid assertion |
424 // must still point into the code area in order to avoid assertion |
425 // failures when searching for the corresponding bci => add a nop |
425 // failures when searching for the corresponding bci => add a nop |
426 // (was bug 5/14/1999 - gri) |
426 // (was bug 5/14/1999 - gri) |
|
427 |
427 __ nop(); |
428 __ nop(); |
428 |
429 |
429 // generate code for exception handler |
430 // generate code for exception handler |
430 address handler_base = __ start_a_stub(exception_handler_size); |
431 address handler_base = __ start_a_stub(exception_handler_size); |
431 if (handler_base == NULL) { |
432 if (handler_base == NULL) { |
432 // not enough space left for the handler |
433 // not enough space left for the handler |
433 bailout("exception handler overflow"); |
434 bailout("exception handler overflow"); |
434 return -1; |
435 return; |
435 } |
436 } |
436 |
437 #ifdef ASSERT |
437 int offset = code_offset(); |
438 int offset = code_offset(); |
|
439 #endif // ASSERT |
|
440 |
|
441 compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset()); |
438 |
442 |
439 // if the method does not have an exception handler, then there is |
443 // if the method does not have an exception handler, then there is |
440 // no reason to search for one |
444 // no reason to search for one |
441 if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) { |
445 if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) { |
442 // the exception oop and pc are in rax, and rdx |
446 // the exception oop and pc are in rax, and rdx |
468 } |
472 } |
469 |
473 |
470 // unwind activation and forward exception to caller |
474 // unwind activation and forward exception to caller |
471 // rax,: exception |
475 // rax,: exception |
472 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); |
476 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); |
|
477 |
473 assert(code_offset() - offset <= exception_handler_size, "overflow"); |
478 assert(code_offset() - offset <= exception_handler_size, "overflow"); |
|
479 |
474 __ end_a_stub(); |
480 __ end_a_stub(); |
475 |
481 } |
476 return offset; |
482 |
477 } |
483 void LIR_Assembler::emit_deopt_handler() { |
478 |
|
479 |
|
480 int LIR_Assembler::emit_deopt_handler() { |
|
481 // if the last instruction is a call (typically to do a throw which |
484 // if the last instruction is a call (typically to do a throw which |
482 // is coming at the end after block reordering) the return address |
485 // is coming at the end after block reordering) the return address |
483 // must still point into the code area in order to avoid assertion |
486 // must still point into the code area in order to avoid assertion |
484 // failures when searching for the corresponding bci => add a nop |
487 // failures when searching for the corresponding bci => add a nop |
485 // (was bug 5/14/1999 - gri) |
488 // (was bug 5/14/1999 - gri) |
|
489 |
486 __ nop(); |
490 __ nop(); |
487 |
491 |
488 // generate code for exception handler |
492 // generate code for exception handler |
489 address handler_base = __ start_a_stub(deopt_handler_size); |
493 address handler_base = __ start_a_stub(deopt_handler_size); |
490 if (handler_base == NULL) { |
494 if (handler_base == NULL) { |
491 // not enough space left for the handler |
495 // not enough space left for the handler |
492 bailout("deopt handler overflow"); |
496 bailout("deopt handler overflow"); |
493 return -1; |
497 return; |
494 } |
498 } |
495 |
499 #ifdef ASSERT |
496 int offset = code_offset(); |
500 int offset = code_offset(); |
|
501 #endif // ASSERT |
|
502 |
|
503 compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset()); |
|
504 |
497 InternalAddress here(__ pc()); |
505 InternalAddress here(__ pc()); |
498 __ pushptr(here.addr()); |
506 __ pushptr(here.addr()); |
|
507 |
499 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); |
508 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); |
|
509 |
500 assert(code_offset() - offset <= deopt_handler_size, "overflow"); |
510 assert(code_offset() - offset <= deopt_handler_size, "overflow"); |
|
511 |
501 __ end_a_stub(); |
512 __ end_a_stub(); |
502 |
513 |
503 return offset; |
|
504 } |
514 } |
505 |
515 |
506 |
516 |
507 // This is the fast version of java.lang.String.compare; it has not |
517 // This is the fast version of java.lang.String.compare; it has not |
508 // OSR-entry and therefore, we generate a slow version for OSR's |
518 // OSR-entry and therefore, we generate a slow version for OSR's |