1.1 --- a/src/share/vm/interpreter/rewriter.hpp Wed Nov 13 07:31:26 2013 -0800 1.2 +++ b/src/share/vm/interpreter/rewriter.hpp Wed Nov 13 16:42:24 2013 -0500 1.3 @@ -1,5 +1,5 @@ 1.4 /* 1.5 - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. 1.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. 1.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.8 * 1.9 * This code is free software; you can redistribute it and/or modify it 1.10 @@ -46,55 +46,102 @@ 1.11 intArray _method_handle_invokers; 1.12 int _resolved_reference_limit; 1.13 1.14 + // For mapping invokedynamic bytecodes, which are discovered during method 1.15 + // scanning. The invokedynamic entries are added at the end of the cpCache. 1.16 + // If there are any invokespecial/InterfaceMethodref special case bytecodes, 1.17 + // these entries are added before invokedynamic entries so that the 1.18 + // invokespecial bytecode 16 bit index doesn't overflow. 1.19 + intStack _invokedynamic_cp_cache_map; 1.20 + 1.21 + // For patching. 1.22 + GrowableArray<address>* _patch_invokedynamic_bcps; 1.23 + GrowableArray<int>* _patch_invokedynamic_refs; 1.24 + 1.25 void init_maps(int length) { 1.26 _cp_map.initialize(length, -1); 1.27 // Choose an initial value large enough that we don't get frequent 1.28 // calls to grow(). 1.29 - _cp_cache_map.initialize(length / 2); 1.30 + _cp_cache_map.initialize(length/2); 1.31 // Also cache resolved objects, in another different cache. 1.32 _reference_map.initialize(length, -1); 1.33 - _resolved_references_map.initialize(length / 2); 1.34 - _invokedynamic_references_map.initialize(length / 2); 1.35 + _resolved_references_map.initialize(length/2); 1.36 + _invokedynamic_references_map.initialize(length/2); 1.37 _resolved_reference_limit = -1; 1.38 - DEBUG_ONLY(_cp_cache_index_limit = -1); 1.39 + _first_iteration_cp_cache_limit = -1; 1.40 + 1.41 + // invokedynamic specific fields 1.42 + _invokedynamic_cp_cache_map.initialize(length/4); 1.43 + _patch_invokedynamic_bcps = new GrowableArray<address>(length/4); 1.44 + _patch_invokedynamic_refs = new GrowableArray<int>(length/4); 1.45 } 1.46 1.47 - int _cp_cache_index_limit; 1.48 + int _first_iteration_cp_cache_limit; 1.49 void record_map_limits() { 1.50 -#ifdef ASSERT 1.51 - // Record initial size of the two arrays generated for the CP cache: 1.52 - _cp_cache_index_limit = _cp_cache_map.length(); 1.53 -#endif //ASSERT 1.54 + // Record initial size of the two arrays generated for the CP cache 1.55 + // relative to walking the constant pool. 1.56 + _first_iteration_cp_cache_limit = _cp_cache_map.length(); 1.57 _resolved_reference_limit = _resolved_references_map.length(); 1.58 } 1.59 1.60 + int cp_cache_delta() { 1.61 + // How many cp cache entries were added since recording map limits after 1.62 + // cp cache initialization? 1.63 + assert(_first_iteration_cp_cache_limit != -1, "only valid after first iteration"); 1.64 + return _cp_cache_map.length() - _first_iteration_cp_cache_limit; 1.65 + } 1.66 + 1.67 int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; } 1.68 bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; } 1.69 1.70 + int add_map_entry(int cp_index, intArray* cp_map, intStack* cp_cache_map) { 1.71 + assert(cp_map->at(cp_index) == -1, "not twice on same cp_index"); 1.72 + int cache_index = cp_cache_map->append(cp_index); 1.73 + cp_map->at_put(cp_index, cache_index); 1.74 + return cache_index; 1.75 + } 1.76 + 1.77 int add_cp_cache_entry(int cp_index) { 1.78 assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version"); 1.79 - assert(_cp_map[cp_index] == -1, "not twice on same cp_index"); 1.80 - assert(_cp_cache_index_limit == -1, "do not add cache entries after first iteration"); 1.81 - int cache_index = _cp_cache_map.append(cp_index); 1.82 - _cp_map.at_put(cp_index, cache_index); 1.83 + assert(_first_iteration_cp_cache_limit == -1, "do not add cache entries after first iteration"); 1.84 + int cache_index = add_map_entry(cp_index, &_cp_map, &_cp_cache_map); 1.85 assert(cp_entry_to_cp_cache(cp_index) == cache_index, ""); 1.86 assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); 1.87 return cache_index; 1.88 } 1.89 1.90 - // add a new CP cache entry beyond the normal cache (for invokedynamic only) 1.91 int add_invokedynamic_cp_cache_entry(int cp_index) { 1.92 assert(_pool->tag_at(cp_index).value() == JVM_CONSTANT_InvokeDynamic, "use non-indy version"); 1.93 - assert(_cp_map[cp_index] == -1, "do not map from cp_index"); 1.94 - assert(_cp_cache_index_limit >= 0, "add indy cache entries after first iteration"); 1.95 + assert(_first_iteration_cp_cache_limit >= 0, "add indy cache entries after first iteration"); 1.96 + // add to the invokedynamic index map. 1.97 + int cache_index = _invokedynamic_cp_cache_map.append(cp_index); 1.98 + // do not update _cp_map, since the mapping is one-to-many 1.99 + assert(invokedynamic_cp_cache_entry_pool_index(cache_index) == cp_index, ""); 1.100 + // this index starts at one but in the bytecode it's appended to the end. 1.101 + return cache_index + _first_iteration_cp_cache_limit; 1.102 + } 1.103 + 1.104 + int invokedynamic_cp_cache_entry_pool_index(int cache_index) { 1.105 + int cp_index = _invokedynamic_cp_cache_map[cache_index]; 1.106 + return cp_index; 1.107 + } 1.108 + 1.109 + // add a new CP cache entry beyond the normal cache for the special case of 1.110 + // invokespecial with InterfaceMethodref as cpool operand. 1.111 + int add_invokespecial_cp_cache_entry(int cp_index) { 1.112 + assert(_first_iteration_cp_cache_limit >= 0, "add these special cache entries after first iteration"); 1.113 + // Don't add InterfaceMethodref if it already exists at the end. 1.114 + for (int i = _first_iteration_cp_cache_limit; i < _cp_cache_map.length(); i++) { 1.115 + if (cp_cache_entry_pool_index(i) == cp_index) { 1.116 + return i; 1.117 + } 1.118 + } 1.119 int cache_index = _cp_cache_map.append(cp_index); 1.120 - assert(cache_index >= _cp_cache_index_limit, ""); 1.121 + assert(cache_index >= _first_iteration_cp_cache_limit, ""); 1.122 // do not update _cp_map, since the mapping is one-to-many 1.123 assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); 1.124 return cache_index; 1.125 } 1.126 1.127 - // fix duplicated code later 1.128 int cp_entry_to_resolved_references(int cp_index) const { 1.129 assert(has_entry_in_resolved_references(cp_index), "oob"); 1.130 return _reference_map[cp_index]; 1.131 @@ -105,10 +152,7 @@ 1.132 1.133 // add a new entry to the resolved_references map 1.134 int add_resolved_references_entry(int cp_index) { 1.135 - assert(_reference_map[cp_index] == -1, "not twice on same cp_index"); 1.136 - assert(_resolved_reference_limit == -1, "do not add CP refs after first iteration"); 1.137 - int ref_index = _resolved_references_map.append(cp_index); 1.138 - _reference_map.at_put(cp_index, ref_index); 1.139 + int ref_index = add_map_entry(cp_index, &_reference_map, &_resolved_references_map); 1.140 assert(cp_entry_to_resolved_references(cp_index) == ref_index, ""); 1.141 return ref_index; 1.142 } 1.143 @@ -137,7 +181,7 @@ 1.144 // Access the contents of _cp_cache_map to determine CP cache layout. 1.145 int cp_cache_entry_pool_index(int cache_index) { 1.146 int cp_index = _cp_cache_map[cache_index]; 1.147 - return cp_index; 1.148 + return cp_index; 1.149 } 1.150 1.151 // All the work goes in here: 1.152 @@ -145,14 +189,18 @@ 1.153 1.154 void compute_index_maps(); 1.155 void make_constant_pool_cache(TRAPS); 1.156 - void scan_method(Method* m, bool reverse = false); 1.157 + void scan_method(Method* m, bool reverse, TRAPS); 1.158 void rewrite_Object_init(methodHandle m, TRAPS); 1.159 - void rewrite_member_reference(address bcp, int offset, bool reverse = false); 1.160 - void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse = false); 1.161 - void rewrite_invokedynamic(address bcp, int offset, bool reverse = false); 1.162 - void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false); 1.163 + void rewrite_member_reference(address bcp, int offset, bool reverse); 1.164 + void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse); 1.165 + void rewrite_invokedynamic(address bcp, int offset, bool reverse); 1.166 + void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse); 1.167 + void rewrite_invokespecial(address bcp, int offset, bool reverse, TRAPS); 1.168 + 1.169 + void patch_invokedynamic_bytecodes(); 1.170 + 1.171 // Revert bytecodes in case of an exception. 1.172 - void restore_bytecodes(); 1.173 + void restore_bytecodes(TRAPS); 1.174 1.175 static methodHandle rewrite_jsrs(methodHandle m, TRAPS); 1.176 public: