1 /* |
1 /* |
2 * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved. |
2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
355 // done |
355 // done |
356 __ bind(*slow_case->continuation()); |
356 __ bind(*slow_case->continuation()); |
357 } |
357 } |
358 |
358 |
359 |
359 |
360 int LIR_Assembler::emit_exception_handler() { |
360 void LIR_Assembler::emit_exception_handler() { |
361 // if the last instruction is a call (typically to do a throw which |
361 // if the last instruction is a call (typically to do a throw which |
362 // is coming at the end after block reordering) the return address |
362 // is coming at the end after block reordering) the return address |
363 // must still point into the code area in order to avoid assertion |
363 // must still point into the code area in order to avoid assertion |
364 // failures when searching for the corresponding bci => add a nop |
364 // failures when searching for the corresponding bci => add a nop |
365 // (was bug 5/14/1999 - gri) |
365 // (was bug 5/14/1999 - gri) |
371 address handler_base = __ start_a_stub(exception_handler_size); |
371 address handler_base = __ start_a_stub(exception_handler_size); |
372 |
372 |
373 if (handler_base == NULL) { |
373 if (handler_base == NULL) { |
374 // not enough space left for the handler |
374 // not enough space left for the handler |
375 bailout("exception handler overflow"); |
375 bailout("exception handler overflow"); |
376 return -1; |
376 return; |
377 } |
377 } |
378 |
378 #ifdef ASSERT |
379 int offset = code_offset(); |
379 int offset = code_offset(); |
|
380 #endif // ASSERT |
|
381 compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset()); |
|
382 |
380 |
383 |
381 if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) { |
384 if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) { |
382 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); |
385 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); |
383 __ delayed()->nop(); |
386 __ delayed()->nop(); |
384 } |
387 } |
385 |
388 |
386 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); |
389 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); |
387 __ delayed()->nop(); |
390 __ delayed()->nop(); |
388 debug_only(__ stop("should have gone to the caller");) |
391 debug_only(__ stop("should have gone to the caller");) |
389 assert(code_offset() - offset <= exception_handler_size, "overflow"); |
392 assert(code_offset() - offset <= exception_handler_size, "overflow"); |
|
393 |
390 __ end_a_stub(); |
394 __ end_a_stub(); |
391 |
395 } |
392 return offset; |
396 |
393 } |
397 void LIR_Assembler::emit_deopt_handler() { |
394 |
|
395 |
|
396 int LIR_Assembler::emit_deopt_handler() { |
|
397 // if the last instruction is a call (typically to do a throw which |
398 // if the last instruction is a call (typically to do a throw which |
398 // is coming at the end after block reordering) the return address |
399 // is coming at the end after block reordering) the return address |
399 // must still point into the code area in order to avoid assertion |
400 // must still point into the code area in order to avoid assertion |
400 // failures when searching for the corresponding bci => add a nop |
401 // failures when searching for the corresponding bci => add a nop |
401 // (was bug 5/14/1999 - gri) |
402 // (was bug 5/14/1999 - gri) |
405 ciMethod* method = compilation()->method(); |
406 ciMethod* method = compilation()->method(); |
406 address handler_base = __ start_a_stub(deopt_handler_size); |
407 address handler_base = __ start_a_stub(deopt_handler_size); |
407 if (handler_base == NULL) { |
408 if (handler_base == NULL) { |
408 // not enough space left for the handler |
409 // not enough space left for the handler |
409 bailout("deopt handler overflow"); |
410 bailout("deopt handler overflow"); |
410 return -1; |
411 return; |
411 } |
412 } |
412 |
413 #ifdef ASSERT |
413 int offset = code_offset(); |
414 int offset = code_offset(); |
|
415 #endif // ASSERT |
|
416 compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset()); |
|
417 |
414 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); |
418 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); |
|
419 |
415 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp |
420 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp |
416 __ delayed()->nop(); |
421 __ delayed()->nop(); |
|
422 |
417 assert(code_offset() - offset <= deopt_handler_size, "overflow"); |
423 assert(code_offset() - offset <= deopt_handler_size, "overflow"); |
|
424 |
418 debug_only(__ stop("should have gone to the caller");) |
425 debug_only(__ stop("should have gone to the caller");) |
|
426 |
419 __ end_a_stub(); |
427 __ end_a_stub(); |
420 |
|
421 return offset; |
|
422 } |
428 } |
423 |
429 |
424 |
430 |
425 void LIR_Assembler::jobject2reg(jobject o, Register reg) { |
431 void LIR_Assembler::jobject2reg(jobject o, Register reg) { |
426 if (o == NULL) { |
432 if (o == NULL) { |