1.1 --- a/src/share/vm/code/nmethod.hpp Tue Jul 01 09:03:55 2014 +0200 1.2 +++ b/src/share/vm/code/nmethod.hpp Mon Jul 07 10:12:40 2014 +0200 1.3 @@ -116,6 +116,11 @@ 1.4 friend class NMethodSweeper; 1.5 friend class CodeCache; // scavengable oops 1.6 private: 1.7 + 1.8 + // GC support to help figure out if an nmethod has been 1.9 + // cleaned/unloaded by the current GC. 1.10 + static unsigned char _global_unloading_clock; 1.11 + 1.12 // Shared fields for all nmethod's 1.13 Method* _method; 1.14 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method 1.15 @@ -123,7 +128,13 @@ 1.16 1.17 // To support simple linked-list chaining of nmethods: 1.18 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head 1.19 - nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods 1.20 + 1.21 + union { 1.22 + // Used by G1 to chain nmethods. 1.23 + nmethod* _unloading_next; 1.24 + // Used by non-G1 GCs to chain nmethods. 1.25 + nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods 1.26 + }; 1.27 1.28 static nmethod* volatile _oops_do_mark_nmethods; 1.29 nmethod* volatile _oops_do_mark_link; 1.30 @@ -185,6 +196,8 @@ 1.31 // Protected by Patching_lock 1.32 volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded} 1.33 1.34 + volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod 1.35 + 1.36 #ifdef ASSERT 1.37 bool _oops_are_stale; // indicates that it's no longer safe to access oops section 1.38 #endif 1.39 @@ -442,6 +455,15 @@ 1.40 bool unload_reported() { return _unload_reported; } 1.41 void set_unload_reported() { _unload_reported = true; } 1.42 1.43 + void set_unloading_next(nmethod* next) { _unloading_next = next; } 1.44 + nmethod* unloading_next() { return _unloading_next; } 1.45 + 1.46 + static unsigned char global_unloading_clock() { return _global_unloading_clock; } 1.47 + static void increase_unloading_clock(); 1.48 + 1.49 + void set_unloading_clock(unsigned char unloading_clock); 1.50 + unsigned char unloading_clock(); 1.51 + 1.52 bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; } 1.53 void mark_for_deoptimization() { _marked_for_deoptimization = true; } 1.54 1.55 @@ -557,6 +579,10 @@ 1.56 return (addr >= code_begin() && addr < verified_entry_point()); 1.57 } 1.58 1.59 + // Verify calls to dead methods have been cleaned. 1.60 + void verify_clean_inline_caches(); 1.61 + // Verify and count cached icholder relocations. 1.62 + int verify_icholder_relocations(); 1.63 // Check that all metadata is still alive 1.64 void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive); 1.65 1.66 @@ -582,6 +608,10 @@ 1.67 1.68 // GC support 1.69 void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); 1.70 + // The parallel versions are used by G1. 1.71 + bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred); 1.72 + void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred); 1.73 + // Unload a nmethod if the *root object is dead. 1.74 bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred); 1.75 1.76 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,