Wed, 03 Jul 2013 11:50:29 -0700
Merge
duke@435 | 1 | /* |
coleenp@4037 | 2 | * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "interpreter/oopMapCache.hpp" |
stefank@2314 | 27 | #include "memory/allocation.inline.hpp" |
stefank@2314 | 28 | #include "memory/resourceArea.hpp" |
stefank@2314 | 29 | #include "oops/oop.inline.hpp" |
stefank@2314 | 30 | #include "prims/jvmtiRedefineClassesTrace.hpp" |
stefank@2314 | 31 | #include "runtime/handles.inline.hpp" |
stefank@2314 | 32 | #include "runtime/signature.hpp" |
duke@435 | 33 | |
duke@435 | 34 | class OopMapCacheEntry: private InterpreterOopMap { |
duke@435 | 35 | friend class InterpreterOopMap; |
duke@435 | 36 | friend class OopMapForCacheEntry; |
duke@435 | 37 | friend class OopMapCache; |
duke@435 | 38 | friend class VerifyClosure; |
duke@435 | 39 | |
duke@435 | 40 | protected: |
duke@435 | 41 | // Initialization |
duke@435 | 42 | void fill(methodHandle method, int bci); |
duke@435 | 43 | // fills the bit mask for native calls |
duke@435 | 44 | void fill_for_native(methodHandle method); |
duke@435 | 45 | void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top); |
duke@435 | 46 | |
duke@435 | 47 | // Deallocate bit masks and initialize fields |
duke@435 | 48 | void flush(); |
duke@435 | 49 | |
duke@435 | 50 | private: |
duke@435 | 51 | void allocate_bit_mask(); // allocates the bit mask on C heap f necessary |
duke@435 | 52 | void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary |
duke@435 | 53 | bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top); |
duke@435 | 54 | |
duke@435 | 55 | public: |
duke@435 | 56 | OopMapCacheEntry() : InterpreterOopMap() { |
duke@435 | 57 | #ifdef ASSERT |
duke@435 | 58 | _resource_allocate_bit_mask = false; |
duke@435 | 59 | #endif |
duke@435 | 60 | } |
duke@435 | 61 | }; |
duke@435 | 62 | |
duke@435 | 63 | |
duke@435 | 64 | // Implementation of OopMapForCacheEntry |
duke@435 | 65 | // (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci) |
duke@435 | 66 | |
duke@435 | 67 | class OopMapForCacheEntry: public GenerateOopMap { |
duke@435 | 68 | OopMapCacheEntry *_entry; |
duke@435 | 69 | int _bci; |
duke@435 | 70 | int _stack_top; |
duke@435 | 71 | |
duke@435 | 72 | virtual bool report_results() const { return false; } |
duke@435 | 73 | virtual bool possible_gc_point (BytecodeStream *bcs); |
duke@435 | 74 | virtual void fill_stackmap_prolog (int nof_gc_points); |
duke@435 | 75 | virtual void fill_stackmap_epilog (); |
duke@435 | 76 | virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs, |
duke@435 | 77 | CellTypeState* vars, |
duke@435 | 78 | CellTypeState* stack, |
duke@435 | 79 | int stack_top); |
duke@435 | 80 | virtual void fill_init_vars (GrowableArray<intptr_t> *init_vars); |
duke@435 | 81 | |
duke@435 | 82 | public: |
duke@435 | 83 | OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry *entry); |
duke@435 | 84 | |
duke@435 | 85 | // Computes stack map for (method,bci) and initialize entry |
duke@435 | 86 | void compute_map(TRAPS); |
duke@435 | 87 | int size(); |
duke@435 | 88 | }; |
duke@435 | 89 | |
duke@435 | 90 | |
duke@435 | 91 | OopMapForCacheEntry::OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) { |
duke@435 | 92 | _bci = bci; |
duke@435 | 93 | _entry = entry; |
duke@435 | 94 | _stack_top = -1; |
duke@435 | 95 | } |
duke@435 | 96 | |
duke@435 | 97 | |
duke@435 | 98 | void OopMapForCacheEntry::compute_map(TRAPS) { |
duke@435 | 99 | assert(!method()->is_native(), "cannot compute oop map for native methods"); |
duke@435 | 100 | // First check if it is a method where the stackmap is always empty |
duke@435 | 101 | if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) { |
duke@435 | 102 | _entry->set_mask_size(0); |
duke@435 | 103 | } else { |
duke@435 | 104 | ResourceMark rm; |
duke@435 | 105 | GenerateOopMap::compute_map(CATCH); |
duke@435 | 106 | result_for_basicblock(_bci); |
duke@435 | 107 | } |
duke@435 | 108 | } |
duke@435 | 109 | |
duke@435 | 110 | |
duke@435 | 111 | bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) { |
duke@435 | 112 | return false; // We are not reporting any result. We call result_for_basicblock directly |
duke@435 | 113 | } |
duke@435 | 114 | |
duke@435 | 115 | |
duke@435 | 116 | void OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) { |
duke@435 | 117 | // Do nothing |
duke@435 | 118 | } |
duke@435 | 119 | |
duke@435 | 120 | |
duke@435 | 121 | void OopMapForCacheEntry::fill_stackmap_epilog() { |
duke@435 | 122 | // Do nothing |
duke@435 | 123 | } |
duke@435 | 124 | |
duke@435 | 125 | |
duke@435 | 126 | void OopMapForCacheEntry::fill_init_vars(GrowableArray<intptr_t> *init_vars) { |
duke@435 | 127 | // Do nothing |
duke@435 | 128 | } |
duke@435 | 129 | |
duke@435 | 130 | |
duke@435 | 131 | void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs, |
duke@435 | 132 | CellTypeState* vars, |
duke@435 | 133 | CellTypeState* stack, |
duke@435 | 134 | int stack_top) { |
duke@435 | 135 | // Only interested in one specific bci |
duke@435 | 136 | if (bcs->bci() == _bci) { |
duke@435 | 137 | _entry->set_mask(vars, stack, stack_top); |
duke@435 | 138 | _stack_top = stack_top; |
duke@435 | 139 | } |
duke@435 | 140 | } |
duke@435 | 141 | |
duke@435 | 142 | |
duke@435 | 143 | int OopMapForCacheEntry::size() { |
duke@435 | 144 | assert(_stack_top != -1, "compute_map must be called first"); |
duke@435 | 145 | return ((method()->is_static()) ? 0 : 1) + method()->max_locals() + _stack_top; |
duke@435 | 146 | } |
duke@435 | 147 | |
duke@435 | 148 | |
duke@435 | 149 | // Implementation of InterpreterOopMap and OopMapCacheEntry |
duke@435 | 150 | |
duke@435 | 151 | class VerifyClosure : public OffsetClosure { |
duke@435 | 152 | private: |
duke@435 | 153 | OopMapCacheEntry* _entry; |
duke@435 | 154 | bool _failed; |
duke@435 | 155 | |
duke@435 | 156 | public: |
duke@435 | 157 | VerifyClosure(OopMapCacheEntry* entry) { _entry = entry; _failed = false; } |
duke@435 | 158 | void offset_do(int offset) { if (!_entry->is_oop(offset)) _failed = true; } |
duke@435 | 159 | bool failed() const { return _failed; } |
duke@435 | 160 | }; |
duke@435 | 161 | |
duke@435 | 162 | InterpreterOopMap::InterpreterOopMap() { |
duke@435 | 163 | initialize(); |
duke@435 | 164 | #ifdef ASSERT |
duke@435 | 165 | _resource_allocate_bit_mask = true; |
duke@435 | 166 | #endif |
duke@435 | 167 | } |
duke@435 | 168 | |
duke@435 | 169 | InterpreterOopMap::~InterpreterOopMap() { |
duke@435 | 170 | // The expection is that the bit mask was allocated |
duke@435 | 171 | // last in this resource area. That would make the free of the |
duke@435 | 172 | // bit_mask effective (see how FREE_RESOURCE_ARRAY does a free). |
duke@435 | 173 | // If it was not allocated last, there is not a correctness problem |
duke@435 | 174 | // but the space for the bit_mask is not freed. |
duke@435 | 175 | assert(_resource_allocate_bit_mask, "Trying to free C heap space"); |
duke@435 | 176 | if (mask_size() > small_mask_limit) { |
duke@435 | 177 | FREE_RESOURCE_ARRAY(uintptr_t, _bit_mask[0], mask_word_size()); |
duke@435 | 178 | } |
duke@435 | 179 | } |
duke@435 | 180 | |
duke@435 | 181 | bool InterpreterOopMap::is_empty() { |
duke@435 | 182 | bool result = _method == NULL; |
duke@435 | 183 | assert(_method != NULL || (_bci == 0 && |
duke@435 | 184 | (_mask_size == 0 || _mask_size == USHRT_MAX) && |
duke@435 | 185 | _bit_mask[0] == 0), "Should be completely empty"); |
duke@435 | 186 | return result; |
duke@435 | 187 | } |
duke@435 | 188 | |
duke@435 | 189 | void InterpreterOopMap::initialize() { |
duke@435 | 190 | _method = NULL; |
duke@435 | 191 | _mask_size = USHRT_MAX; // This value should cause a failure quickly |
duke@435 | 192 | _bci = 0; |
duke@435 | 193 | _expression_stack_size = 0; |
duke@435 | 194 | for (int i = 0; i < N; i++) _bit_mask[i] = 0; |
duke@435 | 195 | } |
duke@435 | 196 | |
duke@435 | 197 | void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) { |
duke@435 | 198 | int n = number_of_entries(); |
duke@435 | 199 | int word_index = 0; |
duke@435 | 200 | uintptr_t value = 0; |
duke@435 | 201 | uintptr_t mask = 0; |
duke@435 | 202 | // iterate over entries |
duke@435 | 203 | for (int i = 0; i < n; i++, mask <<= bits_per_entry) { |
duke@435 | 204 | // get current word |
duke@435 | 205 | if (mask == 0) { |
duke@435 | 206 | value = bit_mask()[word_index++]; |
duke@435 | 207 | mask = 1; |
duke@435 | 208 | } |
duke@435 | 209 | // test for oop |
duke@435 | 210 | if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i); |
duke@435 | 211 | } |
duke@435 | 212 | } |
duke@435 | 213 | |
duke@435 | 214 | |
duke@435 | 215 | #ifdef ENABLE_ZAP_DEAD_LOCALS |
duke@435 | 216 | |
duke@435 | 217 | void InterpreterOopMap::iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure) { |
duke@435 | 218 | int n = number_of_entries(); |
duke@435 | 219 | int word_index = 0; |
duke@435 | 220 | uintptr_t value = 0; |
duke@435 | 221 | uintptr_t mask = 0; |
duke@435 | 222 | // iterate over entries |
duke@435 | 223 | for (int i = 0; i < n; i++, mask <<= bits_per_entry) { |
duke@435 | 224 | // get current word |
duke@435 | 225 | if (mask == 0) { |
duke@435 | 226 | value = bit_mask()[word_index++]; |
duke@435 | 227 | mask = 1; |
duke@435 | 228 | } |
duke@435 | 229 | // test for dead values & oops, and for live values |
duke@435 | 230 | if ((value & (mask << dead_bit_number)) != 0) dead_closure->offset_do(i); // call this for all dead values or oops |
duke@435 | 231 | else if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i); // call this for all live oops |
duke@435 | 232 | else value_closure->offset_do(i); // call this for all live values |
duke@435 | 233 | } |
duke@435 | 234 | } |
duke@435 | 235 | |
duke@435 | 236 | #endif |
duke@435 | 237 | |
duke@435 | 238 | |
duke@435 | 239 | void InterpreterOopMap::print() { |
duke@435 | 240 | int n = number_of_entries(); |
duke@435 | 241 | tty->print("oop map for "); |
duke@435 | 242 | method()->print_value(); |
duke@435 | 243 | tty->print(" @ %d = [%d] { ", bci(), n); |
duke@435 | 244 | for (int i = 0; i < n; i++) { |
duke@435 | 245 | #ifdef ENABLE_ZAP_DEAD_LOCALS |
duke@435 | 246 | if (is_dead(i)) tty->print("%d+ ", i); |
duke@435 | 247 | else |
duke@435 | 248 | #endif |
duke@435 | 249 | if (is_oop(i)) tty->print("%d ", i); |
duke@435 | 250 | } |
duke@435 | 251 | tty->print_cr("}"); |
duke@435 | 252 | } |
duke@435 | 253 | |
duke@435 | 254 | class MaskFillerForNative: public NativeSignatureIterator { |
duke@435 | 255 | private: |
duke@435 | 256 | uintptr_t * _mask; // the bit mask to be filled |
duke@435 | 257 | int _size; // the mask size in bits |
duke@435 | 258 | |
duke@435 | 259 | void set_one(int i) { |
duke@435 | 260 | i *= InterpreterOopMap::bits_per_entry; |
duke@435 | 261 | assert(0 <= i && i < _size, "offset out of bounds"); |
duke@435 | 262 | _mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord)); |
duke@435 | 263 | } |
duke@435 | 264 | |
duke@435 | 265 | public: |
duke@435 | 266 | void pass_int() { /* ignore */ } |
duke@435 | 267 | void pass_long() { /* ignore */ } |
duke@435 | 268 | void pass_float() { /* ignore */ } |
duke@435 | 269 | void pass_double() { /* ignore */ } |
duke@435 | 270 | void pass_object() { set_one(offset()); } |
duke@435 | 271 | |
duke@435 | 272 | MaskFillerForNative(methodHandle method, uintptr_t* mask, int size) : NativeSignatureIterator(method) { |
duke@435 | 273 | _mask = mask; |
duke@435 | 274 | _size = size; |
duke@435 | 275 | // initialize with 0 |
duke@435 | 276 | int i = (size + BitsPerWord - 1) / BitsPerWord; |
duke@435 | 277 | while (i-- > 0) _mask[i] = 0; |
duke@435 | 278 | } |
duke@435 | 279 | |
duke@435 | 280 | void generate() { |
duke@435 | 281 | NativeSignatureIterator::iterate(); |
duke@435 | 282 | } |
duke@435 | 283 | }; |
duke@435 | 284 | |
duke@435 | 285 | bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) { |
duke@435 | 286 | // Check mask includes map |
duke@435 | 287 | VerifyClosure blk(this); |
duke@435 | 288 | iterate_oop(&blk); |
duke@435 | 289 | if (blk.failed()) return false; |
duke@435 | 290 | |
duke@435 | 291 | // Check if map is generated correctly |
duke@435 | 292 | // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards) |
duke@435 | 293 | if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals); |
duke@435 | 294 | |
duke@435 | 295 | for(int i = 0; i < max_locals; i++) { |
duke@435 | 296 | bool v1 = is_oop(i) ? true : false; |
duke@435 | 297 | bool v2 = vars[i].is_reference() ? true : false; |
duke@435 | 298 | assert(v1 == v2, "locals oop mask generation error"); |
duke@435 | 299 | if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0); |
duke@435 | 300 | #ifdef ENABLE_ZAP_DEAD_LOCALS |
duke@435 | 301 | bool v3 = is_dead(i) ? true : false; |
duke@435 | 302 | bool v4 = !vars[i].is_live() ? true : false; |
duke@435 | 303 | assert(v3 == v4, "locals live mask generation error"); |
duke@435 | 304 | assert(!(v1 && v3), "dead value marked as oop"); |
duke@435 | 305 | #endif |
duke@435 | 306 | } |
duke@435 | 307 | |
duke@435 | 308 | if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); } |
duke@435 | 309 | for(int j = 0; j < stack_top; j++) { |
duke@435 | 310 | bool v1 = is_oop(max_locals + j) ? true : false; |
duke@435 | 311 | bool v2 = stack[j].is_reference() ? true : false; |
duke@435 | 312 | assert(v1 == v2, "stack oop mask generation error"); |
duke@435 | 313 | if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0); |
duke@435 | 314 | #ifdef ENABLE_ZAP_DEAD_LOCALS |
duke@435 | 315 | bool v3 = is_dead(max_locals + j) ? true : false; |
duke@435 | 316 | bool v4 = !stack[j].is_live() ? true : false; |
duke@435 | 317 | assert(v3 == v4, "stack live mask generation error"); |
duke@435 | 318 | assert(!(v1 && v3), "dead value marked as oop"); |
duke@435 | 319 | #endif |
duke@435 | 320 | } |
duke@435 | 321 | if (TraceOopMapGeneration && Verbose) tty->cr(); |
duke@435 | 322 | return true; |
duke@435 | 323 | } |
duke@435 | 324 | |
duke@435 | 325 | void OopMapCacheEntry::allocate_bit_mask() { |
duke@435 | 326 | if (mask_size() > small_mask_limit) { |
duke@435 | 327 | assert(_bit_mask[0] == 0, "bit mask should be new or just flushed"); |
duke@435 | 328 | _bit_mask[0] = (intptr_t) |
zgu@3900 | 329 | NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass); |
duke@435 | 330 | } |
duke@435 | 331 | } |
duke@435 | 332 | |
duke@435 | 333 | void OopMapCacheEntry::deallocate_bit_mask() { |
duke@435 | 334 | if (mask_size() > small_mask_limit && _bit_mask[0] != 0) { |
duke@435 | 335 | assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]), |
duke@435 | 336 | "This bit mask should not be in the resource area"); |
zgu@3900 | 337 | FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0], mtClass); |
duke@435 | 338 | debug_only(_bit_mask[0] = 0;) |
duke@435 | 339 | } |
duke@435 | 340 | } |
duke@435 | 341 | |
duke@435 | 342 | |
duke@435 | 343 | void OopMapCacheEntry::fill_for_native(methodHandle mh) { |
duke@435 | 344 | assert(mh->is_native(), "method must be native method"); |
duke@435 | 345 | set_mask_size(mh->size_of_parameters() * bits_per_entry); |
duke@435 | 346 | allocate_bit_mask(); |
duke@435 | 347 | // fill mask for parameters |
duke@435 | 348 | MaskFillerForNative mf(mh, bit_mask(), mask_size()); |
duke@435 | 349 | mf.generate(); |
duke@435 | 350 | } |
duke@435 | 351 | |
duke@435 | 352 | |
duke@435 | 353 | void OopMapCacheEntry::fill(methodHandle method, int bci) { |
duke@435 | 354 | HandleMark hm; |
duke@435 | 355 | // Flush entry to deallocate an existing entry |
duke@435 | 356 | flush(); |
duke@435 | 357 | set_method(method()); |
duke@435 | 358 | set_bci(bci); |
duke@435 | 359 | if (method->is_native()) { |
duke@435 | 360 | // Native method activations have oops only among the parameters and one |
duke@435 | 361 | // extra oop following the parameters (the mirror for static native methods). |
duke@435 | 362 | fill_for_native(method); |
duke@435 | 363 | } else { |
duke@435 | 364 | EXCEPTION_MARK; |
duke@435 | 365 | OopMapForCacheEntry gen(method, bci, this); |
duke@435 | 366 | gen.compute_map(CATCH); |
duke@435 | 367 | } |
duke@435 | 368 | } |
duke@435 | 369 | |
duke@435 | 370 | |
duke@435 | 371 | void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int stack_top) { |
duke@435 | 372 | // compute bit mask size |
duke@435 | 373 | int max_locals = method()->max_locals(); |
duke@435 | 374 | int n_entries = max_locals + stack_top; |
duke@435 | 375 | set_mask_size(n_entries * bits_per_entry); |
duke@435 | 376 | allocate_bit_mask(); |
duke@435 | 377 | set_expression_stack_size(stack_top); |
duke@435 | 378 | |
duke@435 | 379 | // compute bits |
duke@435 | 380 | int word_index = 0; |
duke@435 | 381 | uintptr_t value = 0; |
duke@435 | 382 | uintptr_t mask = 1; |
duke@435 | 383 | |
duke@435 | 384 | CellTypeState* cell = vars; |
duke@435 | 385 | for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) { |
duke@435 | 386 | // store last word |
duke@435 | 387 | if (mask == 0) { |
duke@435 | 388 | bit_mask()[word_index++] = value; |
duke@435 | 389 | value = 0; |
duke@435 | 390 | mask = 1; |
duke@435 | 391 | } |
duke@435 | 392 | |
duke@435 | 393 | // switch to stack when done with locals |
duke@435 | 394 | if (entry_index == max_locals) { |
duke@435 | 395 | cell = stack; |
duke@435 | 396 | } |
duke@435 | 397 | |
duke@435 | 398 | // set oop bit |
duke@435 | 399 | if ( cell->is_reference()) { |
duke@435 | 400 | value |= (mask << oop_bit_number ); |
duke@435 | 401 | } |
duke@435 | 402 | |
duke@435 | 403 | #ifdef ENABLE_ZAP_DEAD_LOCALS |
duke@435 | 404 | // set dead bit |
duke@435 | 405 | if (!cell->is_live()) { |
duke@435 | 406 | value |= (mask << dead_bit_number); |
duke@435 | 407 | assert(!cell->is_reference(), "dead value marked as oop"); |
duke@435 | 408 | } |
duke@435 | 409 | #endif |
duke@435 | 410 | } |
duke@435 | 411 | |
duke@435 | 412 | // make sure last word is stored |
duke@435 | 413 | bit_mask()[word_index] = value; |
duke@435 | 414 | |
duke@435 | 415 | // verify bit mask |
duke@435 | 416 | assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified"); |
duke@435 | 417 | |
duke@435 | 418 | |
duke@435 | 419 | } |
duke@435 | 420 | |
duke@435 | 421 | void OopMapCacheEntry::flush() { |
duke@435 | 422 | deallocate_bit_mask(); |
duke@435 | 423 | initialize(); |
duke@435 | 424 | } |
duke@435 | 425 | |
duke@435 | 426 | |
duke@435 | 427 | // Implementation of OopMapCache |
duke@435 | 428 | |
duke@435 | 429 | #ifndef PRODUCT |
duke@435 | 430 | |
duke@435 | 431 | static long _total_memory_usage = 0; |
duke@435 | 432 | |
duke@435 | 433 | long OopMapCache::memory_usage() { |
duke@435 | 434 | return _total_memory_usage; |
duke@435 | 435 | } |
duke@435 | 436 | |
duke@435 | 437 | #endif |
duke@435 | 438 | |
duke@435 | 439 | void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) { |
duke@435 | 440 | assert(_resource_allocate_bit_mask, |
duke@435 | 441 | "Should not resource allocate the _bit_mask"); |
duke@435 | 442 | |
duke@435 | 443 | set_method(from->method()); |
duke@435 | 444 | set_bci(from->bci()); |
duke@435 | 445 | set_mask_size(from->mask_size()); |
duke@435 | 446 | set_expression_stack_size(from->expression_stack_size()); |
duke@435 | 447 | |
duke@435 | 448 | // Is the bit mask contained in the entry? |
duke@435 | 449 | if (from->mask_size() <= small_mask_limit) { |
duke@435 | 450 | memcpy((void *)_bit_mask, (void *)from->_bit_mask, |
duke@435 | 451 | mask_word_size() * BytesPerWord); |
duke@435 | 452 | } else { |
duke@435 | 453 | // The expectation is that this InterpreterOopMap is a recently created |
duke@435 | 454 | // and empty. It is used to get a copy of a cached entry. |
duke@435 | 455 | // If the bit mask has a value, it should be in the |
duke@435 | 456 | // resource area. |
duke@435 | 457 | assert(_bit_mask[0] == 0 || |
duke@435 | 458 | Thread::current()->resource_area()->contains((void*)_bit_mask[0]), |
duke@435 | 459 | "The bit mask should have been allocated from a resource area"); |
duke@435 | 460 | // Allocate the bit_mask from a Resource area for performance. Allocating |
duke@435 | 461 | // from the C heap as is done for OopMapCache has a significant |
duke@435 | 462 | // performance impact. |
duke@435 | 463 | _bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size()); |
duke@435 | 464 | assert(_bit_mask[0] != 0, "bit mask was not allocated"); |
duke@435 | 465 | memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0], |
duke@435 | 466 | mask_word_size() * BytesPerWord); |
duke@435 | 467 | } |
duke@435 | 468 | } |
duke@435 | 469 | |
duke@435 | 470 | inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) { |
duke@435 | 471 | // We use method->code_size() rather than method->identity_hash() below since |
duke@435 | 472 | // the mark may not be present if a pointer to the method is already reversed. |
duke@435 | 473 | return ((unsigned int) bci) |
duke@435 | 474 | ^ ((unsigned int) method->max_locals() << 2) |
duke@435 | 475 | ^ ((unsigned int) method->code_size() << 4) |
duke@435 | 476 | ^ ((unsigned int) method->size_of_parameters() << 6); |
duke@435 | 477 | } |
duke@435 | 478 | |
duke@435 | 479 | |
duke@435 | 480 | OopMapCache::OopMapCache() : |
duke@435 | 481 | _mut(Mutex::leaf, "An OopMapCache lock", true) |
duke@435 | 482 | { |
zgu@3900 | 483 | _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size, mtClass); |
duke@435 | 484 | // Cannot call flush for initialization, since flush |
duke@435 | 485 | // will check if memory should be deallocated |
duke@435 | 486 | for(int i = 0; i < _size; i++) _array[i].initialize(); |
duke@435 | 487 | NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);) |
duke@435 | 488 | } |
duke@435 | 489 | |
duke@435 | 490 | |
duke@435 | 491 | OopMapCache::~OopMapCache() { |
duke@435 | 492 | assert(_array != NULL, "sanity check"); |
duke@435 | 493 | // Deallocate oop maps that are allocated out-of-line |
duke@435 | 494 | flush(); |
duke@435 | 495 | // Deallocate array |
duke@435 | 496 | NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);) |
zgu@3900 | 497 | FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array, mtClass); |
duke@435 | 498 | } |
duke@435 | 499 | |
duke@435 | 500 | OopMapCacheEntry* OopMapCache::entry_at(int i) const { |
duke@435 | 501 | return &_array[i % _size]; |
duke@435 | 502 | } |
duke@435 | 503 | |
duke@435 | 504 | void OopMapCache::flush() { |
duke@435 | 505 | for (int i = 0; i < _size; i++) _array[i].flush(); |
duke@435 | 506 | } |
duke@435 | 507 | |
duke@435 | 508 | void OopMapCache::flush_obsolete_entries() { |
duke@435 | 509 | for (int i = 0; i < _size; i++) |
duke@435 | 510 | if (!_array[i].is_empty() && _array[i].method()->is_old()) { |
duke@435 | 511 | // Cache entry is occupied by an old redefined method and we don't want |
duke@435 | 512 | // to pin it down so flush the entry. |
dcubed@483 | 513 | RC_TRACE(0x08000000, ("flush: %s(%s): cached entry @%d", |
dcubed@483 | 514 | _array[i].method()->name()->as_C_string(), |
dcubed@483 | 515 | _array[i].method()->signature()->as_C_string(), i)); |
dcubed@483 | 516 | |
duke@435 | 517 | _array[i].flush(); |
duke@435 | 518 | } |
duke@435 | 519 | } |
duke@435 | 520 | |
duke@435 | 521 | void OopMapCache::lookup(methodHandle method, |
duke@435 | 522 | int bci, |
duke@435 | 523 | InterpreterOopMap* entry_for) { |
duke@435 | 524 | MutexLocker x(&_mut); |
duke@435 | 525 | |
duke@435 | 526 | OopMapCacheEntry* entry = NULL; |
duke@435 | 527 | int probe = hash_value_for(method, bci); |
duke@435 | 528 | |
duke@435 | 529 | // Search hashtable for match |
duke@435 | 530 | int i; |
duke@435 | 531 | for(i = 0; i < _probe_depth; i++) { |
duke@435 | 532 | entry = entry_at(probe + i); |
duke@435 | 533 | if (entry->match(method, bci)) { |
duke@435 | 534 | entry_for->resource_copy(entry); |
duke@435 | 535 | assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); |
duke@435 | 536 | return; |
duke@435 | 537 | } |
duke@435 | 538 | } |
duke@435 | 539 | |
duke@435 | 540 | if (TraceOopMapGeneration) { |
duke@435 | 541 | static int count = 0; |
duke@435 | 542 | ResourceMark rm; |
duke@435 | 543 | tty->print("%d - Computing oopmap at bci %d for ", ++count, bci); |
duke@435 | 544 | method->print_value(); tty->cr(); |
duke@435 | 545 | } |
duke@435 | 546 | |
duke@435 | 547 | // Entry is not in hashtable. |
duke@435 | 548 | // Compute entry and return it |
duke@435 | 549 | |
dcubed@483 | 550 | if (method->should_not_be_cached()) { |
coleenp@4037 | 551 | // It is either not safe or not a good idea to cache this Method* |
dcubed@483 | 552 | // at this time. We give the caller of lookup() a copy of the |
dcubed@483 | 553 | // interesting info via parameter entry_for, but we don't add it to |
coleenp@4037 | 554 | // the cache. See the gory details in Method*.cpp. |
dcubed@483 | 555 | compute_one_oop_map(method, bci, entry_for); |
dcubed@483 | 556 | return; |
dcubed@483 | 557 | } |
dcubed@483 | 558 | |
duke@435 | 559 | // First search for an empty slot |
duke@435 | 560 | for(i = 0; i < _probe_depth; i++) { |
duke@435 | 561 | entry = entry_at(probe + i); |
duke@435 | 562 | if (entry->is_empty()) { |
duke@435 | 563 | entry->fill(method, bci); |
duke@435 | 564 | entry_for->resource_copy(entry); |
duke@435 | 565 | assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); |
duke@435 | 566 | return; |
duke@435 | 567 | } |
duke@435 | 568 | } |
duke@435 | 569 | |
duke@435 | 570 | if (TraceOopMapGeneration) { |
duke@435 | 571 | ResourceMark rm; |
duke@435 | 572 | tty->print_cr("*** collision in oopmap cache - flushing item ***"); |
duke@435 | 573 | } |
duke@435 | 574 | |
duke@435 | 575 | // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm |
duke@435 | 576 | //entry_at(probe + _probe_depth - 1)->flush(); |
duke@435 | 577 | //for(i = _probe_depth - 1; i > 0; i--) { |
duke@435 | 578 | // // Coping entry[i] = entry[i-1]; |
duke@435 | 579 | // OopMapCacheEntry *to = entry_at(probe + i); |
duke@435 | 580 | // OopMapCacheEntry *from = entry_at(probe + i - 1); |
duke@435 | 581 | // to->copy(from); |
duke@435 | 582 | // } |
duke@435 | 583 | |
duke@435 | 584 | assert(method->is_method(), "gaga"); |
duke@435 | 585 | |
duke@435 | 586 | entry = entry_at(probe + 0); |
duke@435 | 587 | entry->fill(method, bci); |
duke@435 | 588 | |
duke@435 | 589 | // Copy the newly cached entry to input parameter |
duke@435 | 590 | entry_for->resource_copy(entry); |
duke@435 | 591 | |
duke@435 | 592 | if (TraceOopMapGeneration) { |
duke@435 | 593 | ResourceMark rm; |
duke@435 | 594 | tty->print("Done with "); |
duke@435 | 595 | method->print_value(); tty->cr(); |
duke@435 | 596 | } |
duke@435 | 597 | assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); |
duke@435 | 598 | |
duke@435 | 599 | return; |
duke@435 | 600 | } |
duke@435 | 601 | |
duke@435 | 602 | void OopMapCache::compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry) { |
duke@435 | 603 | // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack |
zgu@3900 | 604 | OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1, mtClass); |
duke@435 | 605 | tmp->initialize(); |
duke@435 | 606 | tmp->fill(method, bci); |
duke@435 | 607 | entry->resource_copy(tmp); |
zgu@3900 | 608 | FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp, mtInternal); |
duke@435 | 609 | } |