Sun, 15 Sep 2013 15:28:58 +0200
8024468: PPC64 (part 201): cppInterpreter: implement bytecode profiling
Summary: Implement profiling for c2 jit compilation. Also enable new cppInterpreter features.
Reviewed-by: kvn
1.1 --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Sep 19 17:31:42 2013 +0200 1.2 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp Sun Sep 15 15:28:58 2013 +0200 1.3 @@ -220,7 +220,7 @@ 1.4 } 1.5 InvocationCounter *counter = mcs->invocation_counter(); 1.6 counter->increment(); 1.7 - if (counter->reached_InvocationLimit()) { 1.8 + if (counter->reached_InvocationLimit(mcs->backedge_counter())) { 1.9 CALL_VM_NOCHECK( 1.10 InterpreterRuntime::frequency_counter_overflow(thread, NULL)); 1.11 if (HAS_PENDING_EXCEPTION)
2.1 --- a/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Sep 19 17:31:42 2013 +0200 2.2 +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp Sun Sep 15 15:28:58 2013 +0200 2.3 @@ -28,6 +28,7 @@ 2.4 #include "interpreter/bytecodeHistogram.hpp" 2.5 #include "interpreter/bytecodeInterpreter.hpp" 2.6 #include "interpreter/bytecodeInterpreter.inline.hpp" 2.7 +#include "interpreter/bytecodeInterpreterProfiling.hpp" 2.8 #include "interpreter/interpreter.hpp" 2.9 #include "interpreter/interpreterRuntime.hpp" 2.10 #include "memory/resourceArea.hpp" 2.11 @@ -142,19 +143,20 @@ 2.12 * is no entry point to do the transition to vm so we just 2.13 * do it by hand here. 2.14 */ 2.15 -#define VM_JAVA_ERROR_NO_JUMP(name, msg) \ 2.16 +#define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 2.17 DECACHE_STATE(); \ 2.18 SET_LAST_JAVA_FRAME(); \ 2.19 { \ 2.20 + InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \ 2.21 ThreadInVMfromJava trans(THREAD); \ 2.22 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ 2.23 } \ 2.24 RESET_LAST_JAVA_FRAME(); \ 2.25 CACHE_STATE(); 2.26 2.27 -// Normal throw of a java error 2.28 -#define VM_JAVA_ERROR(name, msg) \ 2.29 - VM_JAVA_ERROR_NO_JUMP(name, msg) \ 2.30 +// Normal throw of a java error. 2.31 +#define VM_JAVA_ERROR(name, msg, note_a_trap) \ 2.32 + VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 2.33 goto handle_exception; 2.34 2.35 #ifdef PRODUCT 2.36 @@ -340,9 +342,25 @@ 2.37 if (UseLoopCounter) { \ 2.38 bool do_OSR = UseOnStackReplacement; \ 2.39 mcs->backedge_counter()->increment(); \ 2.40 - if (do_OSR) do_OSR = mcs->backedge_counter()->reached_InvocationLimit(); \ 2.41 + if (ProfileInterpreter) { \ 2.42 + BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \ 2.43 + /* Check for overflow against MDO count. */ \ 2.44 + do_OSR = do_OSR \ 2.45 + && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\ 2.46 + /* When ProfileInterpreter is on, the backedge_count comes */ \ 2.47 + /* from the methodDataOop, which value does not get reset on */ \ 2.48 + /* the call to frequency_counter_overflow(). To avoid */ \ 2.49 + /* excessive calls to the overflow routine while the method is */ \ 2.50 + /* being compiled, add a second test to make sure the overflow */ \ 2.51 + /* function is called only once every overflow_frequency. */ \ 2.52 + && (!(mdo_last_branch_taken_count & 1023)); \ 2.53 + } else { \ 2.54 + /* check for overflow of backedge counter */ \ 2.55 + do_OSR = do_OSR \ 2.56 + && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \ 2.57 + } \ 2.58 if (do_OSR) { \ 2.59 - nmethod* osr_nmethod; \ 2.60 + nmethod* osr_nmethod; \ 2.61 OSR_REQUEST(osr_nmethod, branch_pc); \ 2.62 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \ 2.63 intptr_t* buf; \ 2.64 @@ -355,7 +373,6 @@ 2.65 } \ 2.66 } \ 2.67 } /* UseCompiler ... */ \ 2.68 - mcs->invocation_counter()->increment(); \ 2.69 SAFEPOINT; \ 2.70 } 2.71 2.72 @@ -388,17 +405,21 @@ 2.73 #undef CACHE_FRAME 2.74 #define CACHE_FRAME() 2.75 2.76 +// BCI() returns the current bytecode-index. 2.77 +#undef BCI 2.78 +#define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base())) 2.79 + 2.80 /* 2.81 * CHECK_NULL - Macro for throwing a NullPointerException if the object 2.82 * passed is a null ref. 2.83 * On some architectures/platforms it should be possible to do this implicitly 2.84 */ 2.85 #undef CHECK_NULL 2.86 -#define CHECK_NULL(obj_) \ 2.87 - if ((obj_) == NULL) { \ 2.88 - VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), ""); \ 2.89 - } \ 2.90 - VERIFY_OOP(obj_) 2.91 +#define CHECK_NULL(obj_) \ 2.92 + if ((obj_) == NULL) { \ 2.93 + VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); \ 2.94 + } \ 2.95 + VERIFY_OOP(obj_) 2.96 2.97 #define VMdoubleConstZero() 0.0 2.98 #define VMdoubleConstOne() 1.0 2.99 @@ -635,9 +656,16 @@ 2.100 topOfStack < istate->stack_base(), 2.101 "Stack top out of range"); 2.102 2.103 +#ifdef CC_INTERP_PROFILE 2.104 + // MethodData's last branch taken count. 2.105 + uint mdo_last_branch_taken_count = 0; 2.106 +#else 2.107 + const uint mdo_last_branch_taken_count = 0; 2.108 +#endif 2.109 + 2.110 switch (istate->msg()) { 2.111 case initialize: { 2.112 - if (initialized++) ShouldNotReachHere(); // Only one initialize call 2.113 + if (initialized++) ShouldNotReachHere(); // Only one initialize call. 2.114 _compiling = (UseCompiler || CountCompiledCalls); 2.115 #ifdef VM_JVMTI 2.116 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); 2.117 @@ -656,15 +684,12 @@ 2.118 METHOD->increment_interpreter_invocation_count(THREAD); 2.119 } 2.120 mcs->invocation_counter()->increment(); 2.121 - if (mcs->invocation_counter()->reached_InvocationLimit()) { 2.122 - CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); 2.123 - 2.124 - // We no longer retry on a counter overflow 2.125 - 2.126 - // istate->set_msg(retry_method); 2.127 - // THREAD->clr_do_not_unlock(); 2.128 - // return; 2.129 + if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) { 2.130 + CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); 2.131 + // We no longer retry on a counter overflow. 2.132 } 2.133 + // Get or create profile data. Check for pending (async) exceptions. 2.134 + BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 2.135 SAFEPOINT; 2.136 } 2.137 2.138 @@ -686,8 +711,7 @@ 2.139 } 2.140 #endif // HACK 2.141 2.142 - 2.143 - // lock method if synchronized 2.144 + // Lock method if synchronized. 2.145 if (METHOD->is_synchronized()) { 2.146 // oop rcvr = locals[0].j.r; 2.147 oop rcvr; 2.148 @@ -697,7 +721,7 @@ 2.149 rcvr = LOCALS_OBJECT(0); 2.150 VERIFY_OOP(rcvr); 2.151 } 2.152 - // The initial monitor is ours for the taking 2.153 + // The initial monitor is ours for the taking. 2.154 // Monitor not filled in frame manager any longer as this caused race condition with biased locking. 2.155 BasicObjectLock* mon = &istate->monitor_base()[-1]; 2.156 mon->set_obj(rcvr); 2.157 @@ -803,6 +827,12 @@ 2.158 // clear the message so we don't confuse ourselves later 2.159 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); 2.160 istate->set_msg(no_request); 2.161 + if (_compiling) { 2.162 + // Set MDX back to the ProfileData of the invoke bytecode that will be 2.163 + // restarted. 2.164 + SET_MDX(NULL); 2.165 + BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 2.166 + } 2.167 THREAD->clr_pop_frame_in_process(); 2.168 goto run; 2.169 } 2.170 @@ -836,6 +866,11 @@ 2.171 if (THREAD->has_pending_exception()) goto handle_exception; 2.172 // Update the pc by the saved amount of the invoke bytecode size 2.173 UPDATE_PC(istate->bcp_advance()); 2.174 + 2.175 + if (_compiling) { 2.176 + // Get or create profile data. Check for pending (async) exceptions. 2.177 + BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 2.178 + } 2.179 goto run; 2.180 } 2.181 2.182 @@ -843,6 +878,11 @@ 2.183 // Returned from an opcode that will reexecute. Deopt was 2.184 // a result of a PopFrame request. 2.185 // 2.186 + 2.187 + if (_compiling) { 2.188 + // Get or create profile data. Check for pending (async) exceptions. 2.189 + BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 2.190 + } 2.191 goto run; 2.192 } 2.193 2.194 @@ -865,6 +905,11 @@ 2.195 } 2.196 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); 2.197 if (THREAD->has_pending_exception()) goto handle_exception; 2.198 + 2.199 + if (_compiling) { 2.200 + // Get or create profile data. Check for pending (async) exceptions. 2.201 + BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 2.202 + } 2.203 goto run; 2.204 } 2.205 case got_monitors: { 2.206 @@ -1115,6 +1160,11 @@ 2.207 uint16_t reg = Bytes::get_Java_u2(pc + 2); 2.208 2.209 opcode = pc[1]; 2.210 + 2.211 + // Wide and it's sub-bytecode are counted as separate instructions. If we 2.212 + // don't account for this here, the bytecode trace skips the next bytecode. 2.213 + DO_UPDATE_INSTRUCTION_COUNT(opcode); 2.214 + 2.215 switch(opcode) { 2.216 case Bytecodes::_aload: 2.217 VERIFY_OOP(LOCALS_OBJECT(reg)); 2.218 @@ -1158,10 +1208,13 @@ 2.219 UPDATE_PC_AND_CONTINUE(6); 2.220 } 2.221 case Bytecodes::_ret: 2.222 + // Profile ret. 2.223 + BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg)))); 2.224 + // Now, update the pc. 2.225 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); 2.226 UPDATE_PC_AND_CONTINUE(0); 2.227 default: 2.228 - VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode"); 2.229 + VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap); 2.230 } 2.231 } 2.232 2.233 @@ -1242,7 +1295,7 @@ 2.234 CASE(_i##opcname): \ 2.235 if (test && (STACK_INT(-1) == 0)) { \ 2.236 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 2.237 - "/ by zero"); \ 2.238 + "/ by zero", note_div0Check_trap); \ 2.239 } \ 2.240 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 2.241 STACK_INT(-1)), \ 2.242 @@ -1254,7 +1307,7 @@ 2.243 jlong l1 = STACK_LONG(-1); \ 2.244 if (VMlongEqz(l1)) { \ 2.245 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 2.246 - "/ by long zero"); \ 2.247 + "/ by long zero", note_div0Check_trap); \ 2.248 } \ 2.249 } \ 2.250 /* First long at (-1,-2) next long at (-3,-4) */ \ 2.251 @@ -1467,17 +1520,23 @@ 2.252 2.253 #define COMPARISON_OP(name, comparison) \ 2.254 CASE(_if_icmp##name): { \ 2.255 - int skip = (STACK_INT(-2) comparison STACK_INT(-1)) \ 2.256 + const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \ 2.257 + int skip = cmp \ 2.258 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 2.259 address branch_pc = pc; \ 2.260 + /* Profile branch. */ \ 2.261 + BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 2.262 UPDATE_PC_AND_TOS(skip, -2); \ 2.263 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 2.264 CONTINUE; \ 2.265 } \ 2.266 CASE(_if##name): { \ 2.267 - int skip = (STACK_INT(-1) comparison 0) \ 2.268 + const bool cmp = (STACK_INT(-1) comparison 0); \ 2.269 + int skip = cmp \ 2.270 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 2.271 address branch_pc = pc; \ 2.272 + /* Profile branch. */ \ 2.273 + BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 2.274 UPDATE_PC_AND_TOS(skip, -1); \ 2.275 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 2.276 CONTINUE; \ 2.277 @@ -1486,9 +1545,12 @@ 2.278 #define COMPARISON_OP2(name, comparison) \ 2.279 COMPARISON_OP(name, comparison) \ 2.280 CASE(_if_acmp##name): { \ 2.281 - int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)) \ 2.282 + const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \ 2.283 + int skip = cmp \ 2.284 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 2.285 address branch_pc = pc; \ 2.286 + /* Profile branch. */ \ 2.287 + BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 2.288 UPDATE_PC_AND_TOS(skip, -2); \ 2.289 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 2.290 CONTINUE; \ 2.291 @@ -1496,9 +1558,12 @@ 2.292 2.293 #define NULL_COMPARISON_NOT_OP(name) \ 2.294 CASE(_if##name): { \ 2.295 - int skip = (!(STACK_OBJECT(-1) == NULL)) \ 2.296 + const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \ 2.297 + int skip = cmp \ 2.298 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 2.299 address branch_pc = pc; \ 2.300 + /* Profile branch. */ \ 2.301 + BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 2.302 UPDATE_PC_AND_TOS(skip, -1); \ 2.303 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 2.304 CONTINUE; \ 2.305 @@ -1506,9 +1571,12 @@ 2.306 2.307 #define NULL_COMPARISON_OP(name) \ 2.308 CASE(_if##name): { \ 2.309 - int skip = ((STACK_OBJECT(-1) == NULL)) \ 2.310 + const bool cmp = ((STACK_OBJECT(-1) == NULL)); \ 2.311 + int skip = cmp \ 2.312 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 2.313 address branch_pc = pc; \ 2.314 + /* Profile branch. */ \ 2.315 + BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 2.316 UPDATE_PC_AND_TOS(skip, -1); \ 2.317 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 2.318 CONTINUE; \ 2.319 @@ -1531,30 +1599,42 @@ 2.320 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); 2.321 int32_t skip; 2.322 key -= low; 2.323 - skip = ((uint32_t) key > (uint32_t)(high - low)) 2.324 - ? Bytes::get_Java_u4((address)&lpc[0]) 2.325 - : Bytes::get_Java_u4((address)&lpc[key + 3]); 2.326 - // Does this really need a full backedge check (osr?) 2.327 + if (((uint32_t) key > (uint32_t)(high - low))) { 2.328 + key = -1; 2.329 + skip = Bytes::get_Java_u4((address)&lpc[0]); 2.330 + } else { 2.331 + skip = Bytes::get_Java_u4((address)&lpc[key + 3]); 2.332 + } 2.333 + // Profile switch. 2.334 + BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key); 2.335 + // Does this really need a full backedge check (osr)? 2.336 address branch_pc = pc; 2.337 UPDATE_PC_AND_TOS(skip, -1); 2.338 DO_BACKEDGE_CHECKS(skip, branch_pc); 2.339 CONTINUE; 2.340 } 2.341 2.342 - /* Goto pc whose table entry matches specified key */ 2.343 + /* Goto pc whose table entry matches specified key. */ 2.344 2.345 CASE(_lookupswitch): { 2.346 jint* lpc = (jint*)VMalignWordUp(pc+1); 2.347 int32_t key = STACK_INT(-1); 2.348 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ 2.349 + // Remember index. 2.350 + int index = -1; 2.351 + int newindex = 0; 2.352 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); 2.353 while (--npairs >= 0) { 2.354 - lpc += 2; 2.355 - if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { 2.356 - skip = Bytes::get_Java_u4((address)&lpc[1]); 2.357 - break; 2.358 - } 2.359 + lpc += 2; 2.360 + if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { 2.361 + skip = Bytes::get_Java_u4((address)&lpc[1]); 2.362 + index = newindex; 2.363 + break; 2.364 + } 2.365 + newindex += 1; 2.366 } 2.367 + // Profile switch. 2.368 + BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index); 2.369 address branch_pc = pc; 2.370 UPDATE_PC_AND_TOS(skip, -1); 2.371 DO_BACKEDGE_CHECKS(skip, branch_pc); 2.372 @@ -1639,7 +1719,7 @@ 2.373 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ 2.374 sprintf(message, "%d", index); \ 2.375 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ 2.376 - message); \ 2.377 + message, note_rangeCheck_trap); \ 2.378 } 2.379 2.380 /* 32-bit loads. These handle conversion from < 32-bit types */ 2.381 @@ -1713,15 +1793,22 @@ 2.382 // arrObj, index are set 2.383 if (rhsObject != NULL) { 2.384 /* Check assignability of rhsObject into arrObj */ 2.385 - Klass* rhsKlassOop = rhsObject->klass(); // EBX (subclass) 2.386 - Klass* elemKlassOop = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX 2.387 + Klass* rhsKlass = rhsObject->klass(); // EBX (subclass) 2.388 + Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX 2.389 // 2.390 // Check for compatibilty. This check must not GC!! 2.391 // Seems way more expensive now that we must dispatch 2.392 // 2.393 - if (rhsKlassOop != elemKlassOop && !rhsKlassOop->is_subtype_of(elemKlassOop)) { // ebx->is... 2.394 - VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), ""); 2.395 + if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is... 2.396 + // Decrement counter if subtype check failed. 2.397 + BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass); 2.398 + VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap); 2.399 } 2.400 + // Profile checkcast with null_seen and receiver. 2.401 + BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass); 2.402 + } else { 2.403 + // Profile checkcast with null_seen and receiver. 2.404 + BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 2.405 } 2.406 ((objArrayOopDesc *) arrObj)->obj_at_put(index, rhsObject); 2.407 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 2.408 @@ -2119,10 +2206,14 @@ 2.409 if (UseTLAB) { 2.410 result = (oop) THREAD->tlab().allocate(obj_size); 2.411 } 2.412 + // Disable non-TLAB-based fast-path, because profiling requires that all 2.413 + // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate 2.414 + // returns NULL. 2.415 +#ifndef CC_INTERP_PROFILE 2.416 if (result == NULL) { 2.417 need_zero = true; 2.418 // Try allocate in shared eden 2.419 - retry: 2.420 + retry: 2.421 HeapWord* compare_to = *Universe::heap()->top_addr(); 2.422 HeapWord* new_top = compare_to + obj_size; 2.423 if (new_top <= *Universe::heap()->end_addr()) { 2.424 @@ -2132,6 +2223,7 @@ 2.425 result = (oop) compare_to; 2.426 } 2.427 } 2.428 +#endif 2.429 if (result != NULL) { 2.430 // Initialize object (if nonzero size and need) and then the header 2.431 if (need_zero ) { 2.432 @@ -2187,61 +2279,63 @@ 2.433 if (STACK_OBJECT(-1) != NULL) { 2.434 VERIFY_OOP(STACK_OBJECT(-1)); 2.435 u2 index = Bytes::get_Java_u2(pc+1); 2.436 - if (ProfileInterpreter) { 2.437 - // needs Profile_checkcast QQQ 2.438 - ShouldNotReachHere(); 2.439 - } 2.440 // Constant pool may have actual klass or unresolved klass. If it is 2.441 - // unresolved we must resolve it 2.442 + // unresolved we must resolve it. 2.443 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2.444 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2.445 } 2.446 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2.447 - Klass* objKlassOop = STACK_OBJECT(-1)->klass(); //ebx 2.448 + Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx 2.449 // 2.450 // Check for compatibilty. This check must not GC!! 2.451 - // Seems way more expensive now that we must dispatch 2.452 + // Seems way more expensive now that we must dispatch. 2.453 // 2.454 - if (objKlassOop != klassOf && 2.455 - !objKlassOop->is_subtype_of(klassOf)) { 2.456 + if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) { 2.457 + // Decrement counter at checkcast. 2.458 + BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2.459 ResourceMark rm(THREAD); 2.460 - const char* objName = objKlassOop->external_name(); 2.461 + const char* objName = objKlass->external_name(); 2.462 const char* klassName = klassOf->external_name(); 2.463 char* message = SharedRuntime::generate_class_cast_message( 2.464 objName, klassName); 2.465 - VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message); 2.466 + VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap); 2.467 } 2.468 + // Profile checkcast with null_seen and receiver. 2.469 + BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass); 2.470 } else { 2.471 - if (UncommonNullCast) { 2.472 -// istate->method()->set_null_cast_seen(); 2.473 -// [RGV] Not sure what to do here! 2.474 - 2.475 - } 2.476 + // Profile checkcast with null_seen and receiver. 2.477 + BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 2.478 } 2.479 UPDATE_PC_AND_CONTINUE(3); 2.480 2.481 CASE(_instanceof): 2.482 if (STACK_OBJECT(-1) == NULL) { 2.483 SET_STACK_INT(0, -1); 2.484 + // Profile instanceof with null_seen and receiver. 2.485 + BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL); 2.486 } else { 2.487 VERIFY_OOP(STACK_OBJECT(-1)); 2.488 u2 index = Bytes::get_Java_u2(pc+1); 2.489 // Constant pool may have actual klass or unresolved klass. If it is 2.490 - // unresolved we must resolve it 2.491 + // unresolved we must resolve it. 2.492 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2.493 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2.494 } 2.495 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2.496 - Klass* objKlassOop = STACK_OBJECT(-1)->klass(); 2.497 + Klass* objKlass = STACK_OBJECT(-1)->klass(); 2.498 // 2.499 // Check for compatibilty. This check must not GC!! 2.500 - // Seems way more expensive now that we must dispatch 2.501 + // Seems way more expensive now that we must dispatch. 2.502 // 2.503 - if ( objKlassOop == klassOf || objKlassOop->is_subtype_of(klassOf)) { 2.504 + if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) { 2.505 SET_STACK_INT(1, -1); 2.506 } else { 2.507 SET_STACK_INT(0, -1); 2.508 + // Decrement counter at checkcast. 2.509 + BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2.510 } 2.511 + // Profile instanceof with null_seen and receiver. 2.512 + BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass); 2.513 } 2.514 UPDATE_PC_AND_CONTINUE(3); 2.515 2.516 @@ -2384,6 +2478,9 @@ 2.517 istate->set_callee_entry_point(method->from_interpreted_entry()); 2.518 istate->set_bcp_advance(5); 2.519 2.520 + // Invokedynamic has got a call counter, just like an invokestatic -> increment! 2.521 + BI_PROFILE_UPDATE_CALL(); 2.522 + 2.523 UPDATE_PC_AND_RETURN(0); // I'll be back... 2.524 } 2.525 2.526 @@ -2416,6 +2513,9 @@ 2.527 istate->set_callee_entry_point(method->from_interpreted_entry()); 2.528 istate->set_bcp_advance(3); 2.529 2.530 + // Invokehandle has got a call counter, just like a final call -> increment! 2.531 + BI_PROFILE_UPDATE_FINALCALL(); 2.532 + 2.533 UPDATE_PC_AND_RETURN(0); // I'll be back... 2.534 } 2.535 2.536 @@ -2443,14 +2543,18 @@ 2.537 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2.538 if (cache->is_vfinal()) { 2.539 callee = cache->f2_as_vfinal_method(); 2.540 + // Profile 'special case of invokeinterface' final call. 2.541 + BI_PROFILE_UPDATE_FINALCALL(); 2.542 } else { 2.543 - // get receiver 2.544 + // Get receiver. 2.545 int parms = cache->parameter_size(); 2.546 - // Same comments as invokevirtual apply here 2.547 - VERIFY_OOP(STACK_OBJECT(-parms)); 2.548 - InstanceKlass* rcvrKlass = (InstanceKlass*) 2.549 - STACK_OBJECT(-parms)->klass(); 2.550 + // Same comments as invokevirtual apply here. 2.551 + oop rcvr = STACK_OBJECT(-parms); 2.552 + VERIFY_OOP(rcvr); 2.553 + InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass(); 2.554 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2.555 + // Profile 'special case of invokeinterface' virtual call. 2.556 + BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2.557 } 2.558 istate->set_callee(callee); 2.559 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2.560 @@ -2481,15 +2585,18 @@ 2.561 // interface. The link resolver checks this but only for the first 2.562 // time this interface is called. 2.563 if (i == int2->itable_length()) { 2.564 - VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), ""); 2.565 + VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap); 2.566 } 2.567 int mindex = cache->f2_as_index(); 2.568 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); 2.569 callee = im[mindex].method(); 2.570 if (callee == NULL) { 2.571 - VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), ""); 2.572 + VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap); 2.573 } 2.574 2.575 + // Profile virtual call. 2.576 + BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2.577 + 2.578 istate->set_callee(callee); 2.579 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2.580 #ifdef VM_JVMTI 2.581 @@ -2521,8 +2628,11 @@ 2.582 Method* callee; 2.583 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { 2.584 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2.585 - if (cache->is_vfinal()) callee = cache->f2_as_vfinal_method(); 2.586 - else { 2.587 + if (cache->is_vfinal()) { 2.588 + callee = cache->f2_as_vfinal_method(); 2.589 + // Profile final call. 2.590 + BI_PROFILE_UPDATE_FINALCALL(); 2.591 + } else { 2.592 // get receiver 2.593 int parms = cache->parameter_size(); 2.594 // this works but needs a resourcemark and seems to create a vtable on every call: 2.595 @@ -2531,8 +2641,9 @@ 2.596 // this fails with an assert 2.597 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); 2.598 // but this works 2.599 - VERIFY_OOP(STACK_OBJECT(-parms)); 2.600 - InstanceKlass* rcvrKlass = (InstanceKlass*) STACK_OBJECT(-parms)->klass(); 2.601 + oop rcvr = STACK_OBJECT(-parms); 2.602 + VERIFY_OOP(rcvr); 2.603 + InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass(); 2.604 /* 2.605 Executing this code in java.lang.String: 2.606 public String(char value[]) { 2.607 @@ -2550,12 +2661,17 @@ 2.608 2.609 */ 2.610 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2.611 + // Profile virtual call. 2.612 + BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2.613 } 2.614 } else { 2.615 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { 2.616 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2.617 } 2.618 callee = cache->f1_as_method(); 2.619 + 2.620 + // Profile call. 2.621 + BI_PROFILE_UPDATE_CALL(); 2.622 } 2.623 2.624 istate->set_callee(callee); 2.625 @@ -2607,6 +2723,8 @@ 2.626 CASE(_goto): 2.627 { 2.628 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); 2.629 + // Profile jump. 2.630 + BI_PROFILE_UPDATE_JUMP(); 2.631 address branch_pc = pc; 2.632 UPDATE_PC(offset); 2.633 DO_BACKEDGE_CHECKS(offset, branch_pc); 2.634 @@ -2623,6 +2741,8 @@ 2.635 CASE(_goto_w): 2.636 { 2.637 int32_t offset = Bytes::get_Java_u4(pc + 1); 2.638 + // Profile jump. 2.639 + BI_PROFILE_UPDATE_JUMP(); 2.640 address branch_pc = pc; 2.641 UPDATE_PC(offset); 2.642 DO_BACKEDGE_CHECKS(offset, branch_pc); 2.643 @@ -2632,6 +2752,9 @@ 2.644 /* return from a jsr or jsr_w */ 2.645 2.646 CASE(_ret): { 2.647 + // Profile ret. 2.648 + BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1])))); 2.649 + // Now, update the pc. 2.650 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); 2.651 UPDATE_PC_AND_CONTINUE(0); 2.652 } 2.653 @@ -2713,6 +2836,9 @@ 2.654 } 2.655 // for AbortVMOnException flag 2.656 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); 2.657 + 2.658 + // Update profiling data. 2.659 + BI_PROFILE_ALIGN_TO_CURRENT_BCI(); 2.660 goto run; 2.661 } 2.662 if (TraceExceptions) { 2.663 @@ -2920,7 +3046,7 @@ 2.664 oop rcvr = base->obj(); 2.665 if (rcvr == NULL) { 2.666 if (!suppress_error) { 2.667 - VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), ""); 2.668 + VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); 2.669 illegal_state_oop = THREAD->pending_exception(); 2.670 THREAD->clear_pending_exception(); 2.671 } 2.672 @@ -3008,9 +3134,9 @@ 2.673 // A pending exception that was pending prior to a possible popping frame 2.674 // overrides the popping frame. 2.675 // 2.676 - assert(!suppress_error || suppress_error && illegal_state_oop() == NULL, "Error was not suppressed"); 2.677 + assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed"); 2.678 if (illegal_state_oop() != NULL || original_exception() != NULL) { 2.679 - // inform the frame manager we have no result 2.680 + // Inform the frame manager we have no result. 2.681 istate->set_msg(throwing_exception); 2.682 if (illegal_state_oop() != NULL) 2.683 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0);
3.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 3.2 +++ b/src/share/vm/interpreter/bytecodeInterpreterProfiling.hpp Sun Sep 15 15:28:58 2013 +0200 3.3 @@ -0,0 +1,308 @@ 3.4 +/* 3.5 + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. 3.6 + * Copyright 2012, 2013 SAP AG. All rights reserved. 3.7 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3.8 + * 3.9 + * This code is free software; you can redistribute it and/or modify it 3.10 + * under the terms of the GNU General Public License version 2 only, as 3.11 + * published by the Free Software Foundation. 3.12 + * 3.13 + * This code is distributed in the hope that it will be useful, but WITHOUT 3.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 3.15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 3.16 + * version 2 for more details (a copy is included in the LICENSE file that 3.17 + * accompanied this code). 3.18 + * 3.19 + * You should have received a copy of the GNU General Public License version 3.20 + * 2 along with this work; if not, write to the Free Software Foundation, 3.21 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 3.22 + * 3.23 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 3.24 + * or visit www.oracle.com if you need additional information or have any 3.25 + * questions. 3.26 + * 3.27 + */ 3.28 + 3.29 +// This file defines a set of macros which are used by the c++-interpreter 3.30 +// for updating a method's methodData object. 3.31 + 3.32 + 3.33 +#ifndef SHARE_VM_INTERPRETER_BYTECODEINTERPRETERPROFILING_HPP 3.34 +#define SHARE_VM_INTERPRETER_BYTECODEINTERPRETERPROFILING_HPP 3.35 + 3.36 + 3.37 +// Global settings ///////////////////////////////////////////////////////////// 3.38 + 3.39 + 3.40 +// Enables profiling support. 3.41 +#if defined(COMPILER2) 3.42 +#define CC_INTERP_PROFILE 3.43 +#endif 3.44 + 3.45 +// Enables assertions for profiling code (also works in product-builds). 3.46 +// #define CC_INTERP_PROFILE_WITH_ASSERTIONS 3.47 + 3.48 + 3.49 +#ifdef CC_INTERP 3.50 + 3.51 +// Empty dummy implementations if profiling code is switched off. ////////////// 3.52 + 3.53 +#ifndef CC_INTERP_PROFILE 3.54 + 3.55 +#define SET_MDX(mdx) 3.56 + 3.57 +#define BI_PROFILE_GET_OR_CREATE_METHOD_DATA(exception_handler) \ 3.58 + if (ProfileInterpreter) { \ 3.59 + ShouldNotReachHere(); \ 3.60 + } 3.61 + 3.62 +#define BI_PROFILE_ALIGN_TO_CURRENT_BCI() 3.63 + 3.64 +#define BI_PROFILE_UPDATE_JUMP() 3.65 +#define BI_PROFILE_UPDATE_BRANCH(is_taken) 3.66 +#define BI_PROFILE_UPDATE_RET(bci) 3.67 +#define BI_PROFILE_SUBTYPECHECK_FAILED(receiver) 3.68 +#define BI_PROFILE_UPDATE_CHECKCAST(null_seen, receiver) 3.69 +#define BI_PROFILE_UPDATE_INSTANCEOF(null_seen, receiver) 3.70 +#define BI_PROFILE_UPDATE_CALL() 3.71 +#define BI_PROFILE_UPDATE_FINALCALL() 3.72 +#define BI_PROFILE_UPDATE_VIRTUALCALL(receiver) 3.73 +#define BI_PROFILE_UPDATE_SWITCH(switch_index) 3.74 + 3.75 + 3.76 +#else 3.77 + 3.78 + 3.79 +// Non-dummy implementations /////////////////////////////////////////////////// 3.80 + 3.81 +// Accessors for the current method data pointer 'mdx'. 3.82 +#define MDX() (istate->mdx()) 3.83 +#define SET_MDX(mdx) \ 3.84 + if (TraceProfileInterpreter) { \ 3.85 + /* Let it look like TraceBytecodes' format. */ \ 3.86 + tty->print_cr("[%d] %4d " \ 3.87 + "mdx " PTR_FORMAT "(%d)" \ 3.88 + " " \ 3.89 + " \t-> " PTR_FORMAT "(%d)", \ 3.90 + (int) THREAD->osthread()->thread_id(), \ 3.91 + BCI(), \ 3.92 + MDX(), \ 3.93 + (MDX() == NULL \ 3.94 + ? 0 \ 3.95 + : istate->method()->method_data()->dp_to_di((address)MDX())), \ 3.96 + mdx, \ 3.97 + istate->method()->method_data()->dp_to_di((address)mdx) \ 3.98 + ); \ 3.99 + }; \ 3.100 + istate->set_mdx(mdx); 3.101 + 3.102 + 3.103 +// Dumps the profiling method data for the current method. 3.104 +#ifdef PRODUCT 3.105 +#define BI_PROFILE_PRINT_METHOD_DATA() 3.106 +#else // PRODUCT 3.107 +#define BI_PROFILE_PRINT_METHOD_DATA() \ 3.108 + { \ 3.109 + ttyLocker ttyl; \ 3.110 + MethodData *md = istate->method()->method_data(); \ 3.111 + tty->cr(); \ 3.112 + tty->print("method data at mdx " PTR_FORMAT "(0) for", \ 3.113 + md->data_layout_at(md->bci_to_di(0))); \ 3.114 + istate->method()->print_short_name(tty); \ 3.115 + tty->cr(); \ 3.116 + if (md != NULL) { \ 3.117 + md->print_data_on(tty); \ 3.118 + address mdx = (address) MDX(); \ 3.119 + if (mdx != NULL) { \ 3.120 + tty->print_cr("current mdx " PTR_FORMAT "(%d)", \ 3.121 + mdx, \ 3.122 + istate->method()->method_data()->dp_to_di(mdx)); \ 3.123 + } \ 3.124 + } else { \ 3.125 + tty->print_cr("no method data"); \ 3.126 + } \ 3.127 + } 3.128 +#endif // PRODUCT 3.129 + 3.130 + 3.131 +// Gets or creates the profiling method data and initializes mdx. 3.132 +#define BI_PROFILE_GET_OR_CREATE_METHOD_DATA(exception_handler) \ 3.133 + if (ProfileInterpreter && MDX() == NULL) { \ 3.134 + /* Mdx is not yet initialized for this activation. */ \ 3.135 + MethodData *md = istate->method()->method_data(); \ 3.136 + if (md == NULL) { \ 3.137 + MethodCounters* mcs; \ 3.138 + GET_METHOD_COUNTERS(mcs); \ 3.139 + /* The profiling method data doesn't exist for this method, */ \ 3.140 + /* create it if the counters have overflowed. */ \ 3.141 + if (mcs->invocation_counter() \ 3.142 + ->reached_ProfileLimit(mcs->backedge_counter())) { \ 3.143 + /* Must use CALL_VM, because an async exception may be pending. */ \ 3.144 + CALL_VM((InterpreterRuntime::profile_method(THREAD)), \ 3.145 + exception_handler); \ 3.146 + md = istate->method()->method_data(); \ 3.147 + if (md != NULL) { \ 3.148 + if (TraceProfileInterpreter) { \ 3.149 + BI_PROFILE_PRINT_METHOD_DATA(); \ 3.150 + } \ 3.151 + Method *m = istate->method(); \ 3.152 + int bci = m->bci_from(pc); \ 3.153 + jint di = md->bci_to_di(bci); \ 3.154 + SET_MDX(md->data_layout_at(di)); \ 3.155 + } \ 3.156 + } \ 3.157 + } else { \ 3.158 + /* The profiling method data exists, align the method data pointer */ \ 3.159 + /* mdx to the current bytecode index. */ \ 3.160 + if (TraceProfileInterpreter) { \ 3.161 + BI_PROFILE_PRINT_METHOD_DATA(); \ 3.162 + } \ 3.163 + SET_MDX(md->data_layout_at(md->bci_to_di(BCI()))); \ 3.164 + } \ 3.165 + } 3.166 + 3.167 + 3.168 +// Asserts that the current method data pointer mdx corresponds 3.169 +// to the current bytecode. 3.170 +#if defined(CC_INTERP_PROFILE_WITH_ASSERTIONS) 3.171 +#define BI_PROFILE_CHECK_MDX() \ 3.172 + { \ 3.173 + MethodData *md = istate->method()->method_data(); \ 3.174 + address mdx = (address) MDX(); \ 3.175 + address mdx2 = (address) md->data_layout_at(md->bci_to_di(BCI())); \ 3.176 + guarantee(md != NULL, "1"); \ 3.177 + guarantee(mdx != NULL, "2"); \ 3.178 + guarantee(mdx2 != NULL, "3"); \ 3.179 + if (mdx != mdx2) { \ 3.180 + BI_PROFILE_PRINT_METHOD_DATA(); \ 3.181 + fatal3("invalid mdx at bci %d:" \ 3.182 + " was " PTR_FORMAT \ 3.183 + " but expected " PTR_FORMAT, \ 3.184 + BCI(), \ 3.185 + mdx, \ 3.186 + mdx2); \ 3.187 + } \ 3.188 + } 3.189 +#else 3.190 +#define BI_PROFILE_CHECK_MDX() 3.191 +#endif 3.192 + 3.193 + 3.194 +// Aligns the method data pointer mdx to the current bytecode index. 3.195 +#define BI_PROFILE_ALIGN_TO_CURRENT_BCI() \ 3.196 + if (ProfileInterpreter && MDX() != NULL) { \ 3.197 + MethodData *md = istate->method()->method_data(); \ 3.198 + SET_MDX(md->data_layout_at(md->bci_to_di(BCI()))); \ 3.199 + } 3.200 + 3.201 + 3.202 +// Updates profiling data for a jump. 3.203 +#define BI_PROFILE_UPDATE_JUMP() \ 3.204 + if (ProfileInterpreter && MDX() != NULL) { \ 3.205 + BI_PROFILE_CHECK_MDX(); \ 3.206 + JumpData::increment_taken_count_no_overflow(MDX()); \ 3.207 + /* Remember last branch taken count. */ \ 3.208 + mdo_last_branch_taken_count = JumpData::taken_count(MDX()); \ 3.209 + SET_MDX(JumpData::advance_taken(MDX())); \ 3.210 + } 3.211 + 3.212 + 3.213 +// Updates profiling data for a taken/not taken branch. 3.214 +#define BI_PROFILE_UPDATE_BRANCH(is_taken) \ 3.215 + if (ProfileInterpreter && MDX() != NULL) { \ 3.216 + BI_PROFILE_CHECK_MDX(); \ 3.217 + if (is_taken) { \ 3.218 + BranchData::increment_taken_count_no_overflow(MDX()); \ 3.219 + /* Remember last branch taken count. */ \ 3.220 + mdo_last_branch_taken_count = BranchData::taken_count(MDX()); \ 3.221 + SET_MDX(BranchData::advance_taken(MDX())); \ 3.222 + } else { \ 3.223 + BranchData::increment_not_taken_count_no_overflow(MDX()); \ 3.224 + SET_MDX(BranchData::advance_not_taken(MDX())); \ 3.225 + } \ 3.226 + } 3.227 + 3.228 + 3.229 +// Updates profiling data for a ret with given bci. 3.230 +#define BI_PROFILE_UPDATE_RET(bci) \ 3.231 + if (ProfileInterpreter && MDX() != NULL) { \ 3.232 + BI_PROFILE_CHECK_MDX(); \ 3.233 + MethodData *md = istate->method()->method_data(); \ 3.234 +/* FIXME: there is more to do here than increment and advance(mdx)! */ \ 3.235 + CounterData::increment_count_no_overflow(MDX()); \ 3.236 + SET_MDX(RetData::advance(md, bci)); \ 3.237 + } 3.238 + 3.239 +// Decrement counter at checkcast if the subtype check fails (as template 3.240 +// interpreter does!). 3.241 +#define BI_PROFILE_SUBTYPECHECK_FAILED(receiver) \ 3.242 + if (ProfileInterpreter && MDX() != NULL) { \ 3.243 + BI_PROFILE_CHECK_MDX(); \ 3.244 + ReceiverTypeData::increment_receiver_count_no_overflow(MDX(), receiver); \ 3.245 + ReceiverTypeData::decrement_count(MDX()); \ 3.246 + } 3.247 + 3.248 +// Updates profiling data for a checkcast (was a null seen? which receiver?). 3.249 +#define BI_PROFILE_UPDATE_CHECKCAST(null_seen, receiver) \ 3.250 + if (ProfileInterpreter && MDX() != NULL) { \ 3.251 + BI_PROFILE_CHECK_MDX(); \ 3.252 + if (null_seen) { \ 3.253 + ReceiverTypeData::set_null_seen(MDX()); \ 3.254 + } else { \ 3.255 + /* Template interpreter doesn't increment count. */ \ 3.256 + /* ReceiverTypeData::increment_count_no_overflow(MDX()); */ \ 3.257 + ReceiverTypeData::increment_receiver_count_no_overflow(MDX(), receiver); \ 3.258 + } \ 3.259 + SET_MDX(ReceiverTypeData::advance(MDX())); \ 3.260 + } 3.261 + 3.262 + 3.263 +// Updates profiling data for an instanceof (was a null seen? which receiver?). 3.264 +#define BI_PROFILE_UPDATE_INSTANCEOF(null_seen, receiver) \ 3.265 + BI_PROFILE_UPDATE_CHECKCAST(null_seen, receiver) 3.266 + 3.267 + 3.268 +// Updates profiling data for a call. 3.269 +#define BI_PROFILE_UPDATE_CALL() \ 3.270 + if (ProfileInterpreter && MDX() != NULL) { \ 3.271 + BI_PROFILE_CHECK_MDX(); \ 3.272 + CounterData::increment_count_no_overflow(MDX()); \ 3.273 + SET_MDX(CounterData::advance(MDX())); \ 3.274 + } 3.275 + 3.276 + 3.277 +// Updates profiling data for a final call. 3.278 +#define BI_PROFILE_UPDATE_FINALCALL() \ 3.279 + if (ProfileInterpreter && MDX() != NULL) { \ 3.280 + BI_PROFILE_CHECK_MDX(); \ 3.281 + VirtualCallData::increment_count_no_overflow(MDX()); \ 3.282 + SET_MDX(VirtualCallData::advance(MDX())); \ 3.283 + } 3.284 + 3.285 + 3.286 +// Updates profiling data for a virtual call with given receiver Klass. 3.287 +#define BI_PROFILE_UPDATE_VIRTUALCALL(receiver) \ 3.288 + if (ProfileInterpreter && MDX() != NULL) { \ 3.289 + BI_PROFILE_CHECK_MDX(); \ 3.290 + VirtualCallData::increment_receiver_count_no_overflow(MDX(), receiver); \ 3.291 + SET_MDX(VirtualCallData::advance(MDX())); \ 3.292 + } 3.293 + 3.294 + 3.295 +// Updates profiling data for a switch (tabelswitch or lookupswitch) with 3.296 +// given taken index (-1 means default case was taken). 3.297 +#define BI_PROFILE_UPDATE_SWITCH(switch_index) \ 3.298 + if (ProfileInterpreter && MDX() != NULL) { \ 3.299 + BI_PROFILE_CHECK_MDX(); \ 3.300 + MultiBranchData::increment_count_no_overflow(MDX(), switch_index); \ 3.301 + SET_MDX(MultiBranchData::advance(MDX(), switch_index)); \ 3.302 + } 3.303 + 3.304 + 3.305 +// The end ///////////////////////////////////////////////////////////////////// 3.306 + 3.307 +#endif // CC_INTERP_PROFILE 3.308 + 3.309 +#endif // CC_INTERP 3.310 + 3.311 +#endif // SHARE_VM_INTERPRETER_BYTECODECINTERPRETERPROFILING_HPP
4.1 --- a/src/share/vm/interpreter/interpreterRuntime.cpp Thu Sep 19 17:31:42 2013 +0200 4.2 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Sun Sep 15 15:28:58 2013 +0200 4.3 @@ -241,18 +241,15 @@ 4.4 //------------------------------------------------------------------------------------------------------------------------ 4.5 // Exceptions 4.6 4.7 -// Assume the compiler is (or will be) interested in this event. 4.8 -// If necessary, create an MDO to hold the information, and record it. 4.9 -void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) { 4.10 - assert(ProfileTraps, "call me only if profiling"); 4.11 - methodHandle trap_method(thread, method(thread)); 4.12 - 4.13 +void InterpreterRuntime::note_trap_inner(JavaThread* thread, int reason, 4.14 + methodHandle trap_method, int trap_bci, TRAPS) { 4.15 if (trap_method.not_null()) { 4.16 MethodData* trap_mdo = trap_method->method_data(); 4.17 if (trap_mdo == NULL) { 4.18 Method::build_interpreter_method_data(trap_method, THREAD); 4.19 if (HAS_PENDING_EXCEPTION) { 4.20 - assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 4.21 + assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), 4.22 + "we expect only an OOM error here"); 4.23 CLEAR_PENDING_EXCEPTION; 4.24 } 4.25 trap_mdo = trap_method->method_data(); 4.26 @@ -261,12 +258,42 @@ 4.27 if (trap_mdo != NULL) { 4.28 // Update per-method count of trap events. The interpreter 4.29 // is updating the MDO to simulate the effect of compiler traps. 4.30 - int trap_bci = trap_method->bci_from(bcp(thread)); 4.31 Deoptimization::update_method_data_from_interpreter(trap_mdo, trap_bci, reason); 4.32 } 4.33 } 4.34 } 4.35 4.36 +// Assume the compiler is (or will be) interested in this event. 4.37 +// If necessary, create an MDO to hold the information, and record it. 4.38 +void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) { 4.39 + assert(ProfileTraps, "call me only if profiling"); 4.40 + methodHandle trap_method(thread, method(thread)); 4.41 + int trap_bci = trap_method->bci_from(bcp(thread)); 4.42 + note_trap_inner(thread, reason, trap_method, trap_bci, THREAD); 4.43 +} 4.44 + 4.45 +#ifdef CC_INTERP 4.46 +// As legacy note_trap, but we have more arguments. 4.47 +IRT_ENTRY(void, InterpreterRuntime::note_trap(JavaThread* thread, int reason, Method *method, int trap_bci)) 4.48 + methodHandle trap_method(method); 4.49 + note_trap_inner(thread, reason, trap_method, trap_bci, THREAD); 4.50 +IRT_END 4.51 + 4.52 +// Class Deoptimization is not visible in BytecodeInterpreter, so we need a wrapper 4.53 +// for each exception. 4.54 +void InterpreterRuntime::note_nullCheck_trap(JavaThread* thread, Method *method, int trap_bci) 4.55 + { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_null_check, method, trap_bci); } 4.56 +void InterpreterRuntime::note_div0Check_trap(JavaThread* thread, Method *method, int trap_bci) 4.57 + { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_div0_check, method, trap_bci); } 4.58 +void InterpreterRuntime::note_rangeCheck_trap(JavaThread* thread, Method *method, int trap_bci) 4.59 + { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_range_check, method, trap_bci); } 4.60 +void InterpreterRuntime::note_classCheck_trap(JavaThread* thread, Method *method, int trap_bci) 4.61 + { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_class_check, method, trap_bci); } 4.62 +void InterpreterRuntime::note_arrayCheck_trap(JavaThread* thread, Method *method, int trap_bci) 4.63 + { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_array_check, method, trap_bci); } 4.64 +#endif // CC_INTERP 4.65 + 4.66 + 4.67 static Handle get_preinitialized_exception(Klass* k, TRAPS) { 4.68 // get klass 4.69 InstanceKlass* klass = InstanceKlass::cast(k);
5.1 --- a/src/share/vm/interpreter/interpreterRuntime.hpp Thu Sep 19 17:31:42 2013 +0200 5.2 +++ b/src/share/vm/interpreter/interpreterRuntime.hpp Sun Sep 15 15:28:58 2013 +0200 5.3 @@ -66,9 +66,15 @@ 5.4 5.5 static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i) { return method(thread)->constants()->cache()->entry_at(i); } 5.6 static ConstantPoolCacheEntry* cache_entry(JavaThread *thread) { return cache_entry_at(thread, Bytes::get_native_u2(bcp(thread) + 1)); } 5.7 + static void note_trap_inner(JavaThread* thread, int reason, 5.8 + methodHandle trap_method, int trap_bci, TRAPS); 5.9 static void note_trap(JavaThread *thread, int reason, TRAPS); 5.10 +#ifdef CC_INTERP 5.11 + // Profile traps in C++ interpreter. 5.12 + static void note_trap(JavaThread* thread, int reason, Method *method, int trap_bci); 5.13 +#endif // CC_INTERP 5.14 5.15 - // Inner work method for Interpreter's frequency counter overflow 5.16 + // Inner work method for Interpreter's frequency counter overflow. 5.17 static nmethod* frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp); 5.18 5.19 public: 5.20 @@ -100,6 +106,17 @@ 5.21 #endif 5.22 static void throw_pending_exception(JavaThread* thread); 5.23 5.24 +#ifdef CC_INTERP 5.25 + // Profile traps in C++ interpreter. 5.26 + static void note_nullCheck_trap (JavaThread* thread, Method *method, int trap_bci); 5.27 + static void note_div0Check_trap (JavaThread* thread, Method *method, int trap_bci); 5.28 + static void note_rangeCheck_trap(JavaThread* thread, Method *method, int trap_bci); 5.29 + static void note_classCheck_trap(JavaThread* thread, Method *method, int trap_bci); 5.30 + static void note_arrayCheck_trap(JavaThread* thread, Method *method, int trap_bci); 5.31 + // A dummy for makros that shall not profile traps. 5.32 + static void note_no_trap(JavaThread* thread, Method *method, int trap_bci) {} 5.33 +#endif // CC_INTERP 5.34 + 5.35 // Statics & fields 5.36 static void resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode); 5.37
6.1 --- a/src/share/vm/interpreter/invocationCounter.hpp Thu Sep 19 17:31:42 2013 +0200 6.2 +++ b/src/share/vm/interpreter/invocationCounter.hpp Sun Sep 15 15:28:58 2013 +0200 6.3 @@ -99,16 +99,24 @@ 6.4 int get_BackwardBranchLimit() const { return InterpreterBackwardBranchLimit >> number_of_noncount_bits; } 6.5 int get_ProfileLimit() const { return InterpreterProfileLimit >> number_of_noncount_bits; } 6.6 6.7 +#ifdef CC_INTERP 6.8 // Test counter using scaled limits like the asm interpreter would do rather than doing 6.9 // the shifts to normalize the counter. 6.10 - 6.11 - bool reached_InvocationLimit() const { return _counter >= (unsigned int) InterpreterInvocationLimit; } 6.12 - bool reached_BackwardBranchLimit() const { return _counter >= (unsigned int) InterpreterBackwardBranchLimit; } 6.13 - 6.14 - // Do this just like asm interpreter does for max speed 6.15 - bool reached_ProfileLimit(InvocationCounter *back_edge_count) const { 6.16 - return (_counter && count_mask) + back_edge_count->_counter >= (unsigned int) InterpreterProfileLimit; 6.17 + // Checks sum of invocation_counter and backedge_counter as the template interpreter does. 6.18 + bool reached_InvocationLimit(InvocationCounter *back_edge_count) const { 6.19 + return (_counter & count_mask) + (back_edge_count->_counter & count_mask) >= 6.20 + (unsigned int) InterpreterInvocationLimit; 6.21 } 6.22 + bool reached_BackwardBranchLimit(InvocationCounter *back_edge_count) const { 6.23 + return (_counter & count_mask) + (back_edge_count->_counter & count_mask) >= 6.24 + (unsigned int) InterpreterBackwardBranchLimit; 6.25 + } 6.26 + // Do this just like asm interpreter does for max speed. 6.27 + bool reached_ProfileLimit(InvocationCounter *back_edge_count) const { 6.28 + return (_counter & count_mask) + (back_edge_count->_counter & count_mask) >= 6.29 + (unsigned int) InterpreterProfileLimit; 6.30 + } 6.31 +#endif // CC_INTERP 6.32 6.33 void increment() { _counter += count_increment; } 6.34
7.1 --- a/src/share/vm/oops/methodData.cpp Thu Sep 19 17:31:42 2013 +0200 7.2 +++ b/src/share/vm/oops/methodData.cpp Sun Sep 15 15:28:58 2013 +0200 7.3 @@ -244,6 +244,11 @@ 7.4 return mdp; 7.5 } 7.6 7.7 +#ifdef CC_INTERP 7.8 +DataLayout* RetData::advance(MethodData *md, int bci) { 7.9 + return (DataLayout*) md->bci_to_dp(bci); 7.10 +} 7.11 +#endif // CC_INTERP 7.12 7.13 #ifndef PRODUCT 7.14 void RetData::print_data_on(outputStream* st) {
8.1 --- a/src/share/vm/oops/methodData.hpp Thu Sep 19 17:31:42 2013 +0200 8.2 +++ b/src/share/vm/oops/methodData.hpp Sun Sep 15 15:28:58 2013 +0200 8.3 @@ -225,6 +225,11 @@ 8.4 static ByteSize cell_offset(int index) { 8.5 return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size); 8.6 } 8.7 +#ifdef CC_INTERP 8.8 + static int cell_offset_in_bytes(int index) { 8.9 + return (int)offset_of(DataLayout, _cells[index]); 8.10 + } 8.11 +#endif // CC_INTERP 8.12 // Return a value which, when or-ed as a byte into _flags, sets the flag. 8.13 static int flag_number_to_byte_constant(int flag_number) { 8.14 assert(0 <= flag_number && flag_number < flag_limit, "oob"); 8.15 @@ -356,6 +361,41 @@ 8.16 _data = data; 8.17 } 8.18 8.19 +#ifdef CC_INTERP 8.20 + // Static low level accessors for DataLayout with ProfileData's semantics. 8.21 + 8.22 + static int cell_offset_in_bytes(int index) { 8.23 + return DataLayout::cell_offset_in_bytes(index); 8.24 + } 8.25 + 8.26 + static void increment_uint_at_no_overflow(DataLayout* layout, int index, 8.27 + int inc = DataLayout::counter_increment) { 8.28 + uint count = ((uint)layout->cell_at(index)) + inc; 8.29 + if (count == 0) return; 8.30 + layout->set_cell_at(index, (intptr_t) count); 8.31 + } 8.32 + 8.33 + static int int_at(DataLayout* layout, int index) { 8.34 + return (int)layout->cell_at(index); 8.35 + } 8.36 + 8.37 + static int uint_at(DataLayout* layout, int index) { 8.38 + return (uint)layout->cell_at(index); 8.39 + } 8.40 + 8.41 + static oop oop_at(DataLayout* layout, int index) { 8.42 + return (oop)layout->cell_at(index); 8.43 + } 8.44 + 8.45 + static void set_intptr_at(DataLayout* layout, int index, intptr_t value) { 8.46 + layout->set_cell_at(index, (intptr_t) value); 8.47 + } 8.48 + 8.49 + static void set_flag_at(DataLayout* layout, int flag_number) { 8.50 + layout->set_flag_at(flag_number); 8.51 + } 8.52 +#endif // CC_INTERP 8.53 + 8.54 public: 8.55 // Constructor for invalid ProfileData. 8.56 ProfileData(); 8.57 @@ -495,6 +535,20 @@ 8.58 return cell_offset(bit_cell_count); 8.59 } 8.60 8.61 +#ifdef CC_INTERP 8.62 + static int bit_data_size_in_bytes() { 8.63 + return cell_offset_in_bytes(bit_cell_count); 8.64 + } 8.65 + 8.66 + static void set_null_seen(DataLayout* layout) { 8.67 + set_flag_at(layout, null_seen_flag); 8.68 + } 8.69 + 8.70 + static DataLayout* advance(DataLayout* layout) { 8.71 + return (DataLayout*) (((address)layout) + (ssize_t)BitData::bit_data_size_in_bytes()); 8.72 + } 8.73 +#endif // CC_INTERP 8.74 + 8.75 #ifndef PRODUCT 8.76 void print_data_on(outputStream* st); 8.77 #endif 8.78 @@ -539,6 +593,25 @@ 8.79 set_uint_at(count_off, count); 8.80 } 8.81 8.82 +#ifdef CC_INTERP 8.83 + static int counter_data_size_in_bytes() { 8.84 + return cell_offset_in_bytes(counter_cell_count); 8.85 + } 8.86 + 8.87 + static void increment_count_no_overflow(DataLayout* layout) { 8.88 + increment_uint_at_no_overflow(layout, count_off); 8.89 + } 8.90 + 8.91 + // Support counter decrementation at checkcast / subtype check failed. 8.92 + static void decrement_count(DataLayout* layout) { 8.93 + increment_uint_at_no_overflow(layout, count_off, -1); 8.94 + } 8.95 + 8.96 + static DataLayout* advance(DataLayout* layout) { 8.97 + return (DataLayout*) (((address)layout) + (ssize_t)CounterData::counter_data_size_in_bytes()); 8.98 + } 8.99 +#endif // CC_INTERP 8.100 + 8.101 #ifndef PRODUCT 8.102 void print_data_on(outputStream* st); 8.103 #endif 8.104 @@ -609,6 +682,20 @@ 8.105 return cell_offset(displacement_off_set); 8.106 } 8.107 8.108 +#ifdef CC_INTERP 8.109 + static void increment_taken_count_no_overflow(DataLayout* layout) { 8.110 + increment_uint_at_no_overflow(layout, taken_off_set); 8.111 + } 8.112 + 8.113 + static DataLayout* advance_taken(DataLayout* layout) { 8.114 + return (DataLayout*) (((address)layout) + (ssize_t)int_at(layout, displacement_off_set)); 8.115 + } 8.116 + 8.117 + static uint taken_count(DataLayout* layout) { 8.118 + return (uint) uint_at(layout, taken_off_set); 8.119 + } 8.120 +#endif // CC_INTERP 8.121 + 8.122 // Specific initialization. 8.123 void post_initialize(BytecodeStream* stream, MethodData* mdo); 8.124 8.125 @@ -718,6 +805,43 @@ 8.126 // GC support 8.127 virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure); 8.128 8.129 +#ifdef CC_INTERP 8.130 + static int receiver_type_data_size_in_bytes() { 8.131 + return cell_offset_in_bytes(static_cell_count()); 8.132 + } 8.133 + 8.134 + static Klass *receiver_unchecked(DataLayout* layout, uint row) { 8.135 + oop recv = oop_at(layout, receiver_cell_index(row)); 8.136 + return (Klass *)recv; 8.137 + } 8.138 + 8.139 + static void increment_receiver_count_no_overflow(DataLayout* layout, Klass *rcvr) { 8.140 + const int num_rows = row_limit(); 8.141 + // Receiver already exists? 8.142 + for (int row = 0; row < num_rows; row++) { 8.143 + if (receiver_unchecked(layout, row) == rcvr) { 8.144 + increment_uint_at_no_overflow(layout, receiver_count_cell_index(row)); 8.145 + return; 8.146 + } 8.147 + } 8.148 + // New receiver, find a free slot. 8.149 + for (int row = 0; row < num_rows; row++) { 8.150 + if (receiver_unchecked(layout, row) == NULL) { 8.151 + set_intptr_at(layout, receiver_cell_index(row), (intptr_t)rcvr); 8.152 + increment_uint_at_no_overflow(layout, receiver_count_cell_index(row)); 8.153 + return; 8.154 + } 8.155 + } 8.156 + // Receiver did not match any saved receiver and there is no empty row for it. 8.157 + // Increment total counter to indicate polymorphic case. 8.158 + increment_count_no_overflow(layout); 8.159 + } 8.160 + 8.161 + static DataLayout* advance(DataLayout* layout) { 8.162 + return (DataLayout*) (((address)layout) + (ssize_t)ReceiverTypeData::receiver_type_data_size_in_bytes()); 8.163 + } 8.164 +#endif // CC_INTERP 8.165 + 8.166 #ifndef PRODUCT 8.167 void print_receiver_data_on(outputStream* st); 8.168 void print_data_on(outputStream* st); 8.169 @@ -751,6 +875,16 @@ 8.170 return cell_offset(static_cell_count()); 8.171 } 8.172 8.173 +#ifdef CC_INTERP 8.174 + static int virtual_call_data_size_in_bytes() { 8.175 + return cell_offset_in_bytes(static_cell_count()); 8.176 + } 8.177 + 8.178 + static DataLayout* advance(DataLayout* layout) { 8.179 + return (DataLayout*) (((address)layout) + (ssize_t)VirtualCallData::virtual_call_data_size_in_bytes()); 8.180 + } 8.181 +#endif // CC_INTERP 8.182 + 8.183 #ifndef PRODUCT 8.184 void print_data_on(outputStream* st); 8.185 #endif 8.186 @@ -847,6 +981,10 @@ 8.187 return cell_offset(bci_displacement_cell_index(row)); 8.188 } 8.189 8.190 +#ifdef CC_INTERP 8.191 + static DataLayout* advance(MethodData *md, int bci); 8.192 +#endif // CC_INTERP 8.193 + 8.194 // Specific initialization. 8.195 void post_initialize(BytecodeStream* stream, MethodData* mdo); 8.196 8.197 @@ -911,6 +1049,20 @@ 8.198 return cell_offset(branch_cell_count); 8.199 } 8.200 8.201 +#ifdef CC_INTERP 8.202 + static int branch_data_size_in_bytes() { 8.203 + return cell_offset_in_bytes(branch_cell_count); 8.204 + } 8.205 + 8.206 + static void increment_not_taken_count_no_overflow(DataLayout* layout) { 8.207 + increment_uint_at_no_overflow(layout, not_taken_off_set); 8.208 + } 8.209 + 8.210 + static DataLayout* advance_not_taken(DataLayout* layout) { 8.211 + return (DataLayout*) (((address)layout) + (ssize_t)BranchData::branch_data_size_in_bytes()); 8.212 + } 8.213 +#endif // CC_INTERP 8.214 + 8.215 // Specific initialization. 8.216 void post_initialize(BytecodeStream* stream, MethodData* mdo); 8.217 8.218 @@ -950,6 +1102,20 @@ 8.219 set_int_at(aindex, value); 8.220 } 8.221 8.222 +#ifdef CC_INTERP 8.223 + // Static low level accessors for DataLayout with ArrayData's semantics. 8.224 + 8.225 + static void increment_array_uint_at_no_overflow(DataLayout* layout, int index) { 8.226 + int aindex = index + array_start_off_set; 8.227 + increment_uint_at_no_overflow(layout, aindex); 8.228 + } 8.229 + 8.230 + static int array_int_at(DataLayout* layout, int index) { 8.231 + int aindex = index + array_start_off_set; 8.232 + return int_at(layout, aindex); 8.233 + } 8.234 +#endif // CC_INTERP 8.235 + 8.236 // Code generation support for subclasses. 8.237 static ByteSize array_element_offset(int index) { 8.238 return cell_offset(array_start_off_set + index); 8.239 @@ -1068,6 +1234,28 @@ 8.240 return in_ByteSize(relative_displacement_off_set) * cell_size; 8.241 } 8.242 8.243 +#ifdef CC_INTERP 8.244 + static void increment_count_no_overflow(DataLayout* layout, int index) { 8.245 + if (index == -1) { 8.246 + increment_array_uint_at_no_overflow(layout, default_count_off_set); 8.247 + } else { 8.248 + increment_array_uint_at_no_overflow(layout, case_array_start + 8.249 + index * per_case_cell_count + 8.250 + relative_count_off_set); 8.251 + } 8.252 + } 8.253 + 8.254 + static DataLayout* advance(DataLayout* layout, int index) { 8.255 + if (index == -1) { 8.256 + return (DataLayout*) (((address)layout) + (ssize_t)array_int_at(layout, default_disaplacement_off_set)); 8.257 + } else { 8.258 + return (DataLayout*) (((address)layout) + (ssize_t)array_int_at(layout, case_array_start + 8.259 + index * per_case_cell_count + 8.260 + relative_displacement_off_set)); 8.261 + } 8.262 + } 8.263 +#endif // CC_INTERP 8.264 + 8.265 // Specific initialization. 8.266 void post_initialize(BytecodeStream* stream, MethodData* mdo); 8.267 8.268 @@ -1146,8 +1334,11 @@ 8.269 // adjusted in the event of a change in control flow. 8.270 // 8.271 8.272 +CC_INTERP_ONLY(class BytecodeInterpreter;) 8.273 + 8.274 class MethodData : public Metadata { 8.275 friend class VMStructs; 8.276 + CC_INTERP_ONLY(friend class BytecodeInterpreter;) 8.277 private: 8.278 friend class ProfileData; 8.279
9.1 --- a/src/share/vm/prims/jvmtiManageCapabilities.cpp Thu Sep 19 17:31:42 2013 +0200 9.2 +++ b/src/share/vm/prims/jvmtiManageCapabilities.cpp Sun Sep 15 15:28:58 2013 +0200 9.3 @@ -117,10 +117,10 @@ 9.4 jvmtiCapabilities jc; 9.5 9.6 memset(&jc, 0, sizeof(jc)); 9.7 -#ifndef CC_INTERP 9.8 +#ifndef ZERO 9.9 jc.can_pop_frame = 1; 9.10 jc.can_force_early_return = 1; 9.11 -#endif // !CC_INTERP 9.12 +#endif // !ZERO 9.13 jc.can_get_source_debug_extension = 1; 9.14 jc.can_access_local_variables = 1; 9.15 jc.can_maintain_original_method_order = 1;
10.1 --- a/src/share/vm/runtime/arguments.cpp Thu Sep 19 17:31:42 2013 +0200 10.2 +++ b/src/share/vm/runtime/arguments.cpp Sun Sep 15 15:28:58 2013 +0200 10.3 @@ -3592,8 +3592,8 @@ 10.4 UseBiasedLocking = false; 10.5 } 10.6 10.7 -#ifdef CC_INTERP 10.8 - // Clear flags not supported by the C++ interpreter 10.9 +#ifdef ZERO 10.10 + // Clear flags not supported on zero. 10.11 FLAG_SET_DEFAULT(ProfileInterpreter, false); 10.12 FLAG_SET_DEFAULT(UseBiasedLocking, false); 10.13 LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
11.1 --- a/src/share/vm/runtime/globals.hpp Thu Sep 19 17:31:42 2013 +0200 11.2 +++ b/src/share/vm/runtime/globals.hpp Sun Sep 15 15:28:58 2013 +0200 11.3 @@ -2727,6 +2727,11 @@ 11.4 product_pd(bool, ProfileInterpreter, \ 11.5 "Profile at the bytecode level during interpretation") \ 11.6 \ 11.7 + develop(bool, TraceProfileInterpreter, false, \ 11.8 + "Trace profiling at the bytecode level during interpretation. " \ 11.9 + "This outputs the profiling information collected to improve " \ 11.10 + "jit compilation.") \ 11.11 + \ 11.12 develop_pd(bool, ProfileTraps, \ 11.13 "Profile deoptimization traps at the bytecode level") \ 11.14 \