src/share/vm/memory/sharedHeap.hpp

Mon, 07 Jul 2014 10:12:40 +0200

author
stefank
date
Mon, 07 Jul 2014 10:12:40 +0200
changeset 6992
2c6ef90f030a
parent 6978
30c99d8e0f02
child 7535
7ae4e26cb1e0
child 7659
38d6febe66af
permissions
-rw-r--r--

8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com

duke@435 1 /*
mikael@6198 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_MEMORY_SHAREDHEAP_HPP
stefank@2314 26 #define SHARE_VM_MEMORY_SHAREDHEAP_HPP
stefank@2314 27
stefank@2314 28 #include "gc_interface/collectedHeap.hpp"
stefank@2314 29 #include "memory/generation.hpp"
stefank@2314 30
duke@435 31 // A "SharedHeap" is an implementation of a java heap for HotSpot. This
duke@435 32 // is an abstract class: there may be many different kinds of heaps. This
duke@435 33 // class defines the functions that a heap must implement, and contains
duke@435 34 // infrastructure common to all heaps.
duke@435 35
duke@435 36 class Generation;
duke@435 37 class BarrierSet;
duke@435 38 class GenRemSet;
duke@435 39 class Space;
duke@435 40 class SpaceClosure;
duke@435 41 class OopClosure;
duke@435 42 class OopsInGenClosure;
duke@435 43 class ObjectClosure;
duke@435 44 class SubTasksDone;
duke@435 45 class WorkGang;
jmasa@2188 46 class FlexibleWorkGang;
duke@435 47 class CollectorPolicy;
coleenp@4037 48 class KlassClosure;
duke@435 49
jmasa@3294 50 // Note on use of FlexibleWorkGang's for GC.
jmasa@3294 51 // There are three places where task completion is determined.
jmasa@3294 52 // In
jmasa@3294 53 // 1) ParallelTaskTerminator::offer_termination() where _n_threads
jmasa@3294 54 // must be set to the correct value so that count of workers that
jmasa@3294 55 // have offered termination will exactly match the number
jmasa@3294 56 // working on the task. Tasks such as those derived from GCTask
jmasa@3294 57 // use ParallelTaskTerminator's. Tasks that want load balancing
jmasa@3294 58 // by work stealing use this method to gauge completion.
jmasa@3294 59 // 2) SubTasksDone has a variable _n_threads that is used in
jmasa@3294 60 // all_tasks_completed() to determine completion. all_tasks_complete()
jmasa@3294 61 // counts the number of tasks that have been done and then reset
jmasa@3294 62 // the SubTasksDone so that it can be used again. When the number of
jmasa@3294 63 // tasks is set to the number of GC workers, then _n_threads must
jmasa@3294 64 // be set to the number of active GC workers. G1CollectedHeap,
jmasa@3294 65 // HRInto_G1RemSet, GenCollectedHeap and SharedHeap have SubTasksDone.
jmasa@3294 66 // This seems too many.
jmasa@3294 67 // 3) SequentialSubTasksDone has an _n_threads that is used in
jmasa@3294 68 // a way similar to SubTasksDone and has the same dependency on the
jmasa@3294 69 // number of active GC workers. CompactibleFreeListSpace and Space
jmasa@3294 70 // have SequentialSubTasksDone's.
jmasa@3294 71 // Example of using SubTasksDone and SequentialSubTasksDone
stefank@6992 72 // G1CollectedHeap::g1_process_roots()
stefank@6992 73 // to SharedHeap::process_roots() and uses
jmasa@3294 74 // SubTasksDone* _process_strong_tasks to claim tasks.
stefank@6992 75 // process_roots() calls
coleenp@4037 76 // rem_set()->younger_refs_iterate()
jmasa@3294 77 // to scan the card table and which eventually calls down into
jmasa@3294 78 // CardTableModRefBS::par_non_clean_card_iterate_work(). This method
jmasa@3294 79 // uses SequentialSubTasksDone* _pst to claim tasks.
jmasa@3294 80 // Both SubTasksDone and SequentialSubTasksDone call their method
jmasa@3294 81 // all_tasks_completed() to count the number of GC workers that have
jmasa@3294 82 // finished their work. That logic is "when all the workers are
jmasa@3294 83 // finished the tasks are finished".
jmasa@3294 84 //
jmasa@3294 85 // The pattern that appears in the code is to set _n_threads
jmasa@3294 86 // to a value > 1 before a task that you would like executed in parallel
jmasa@3294 87 // and then to set it to 0 after that task has completed. A value of
jmasa@3294 88 // 0 is a "special" value in set_n_threads() which translates to
jmasa@3294 89 // setting _n_threads to 1.
jmasa@3294 90 //
jmasa@3294 91 // Some code uses _n_terminiation to decide if work should be done in
jmasa@3294 92 // parallel. The notorious possibly_parallel_oops_do() in threads.cpp
jmasa@3294 93 // is an example of such code. Look for variable "is_par" for other
jmasa@3294 94 // examples.
jmasa@3294 95 //
jmasa@3294 96 // The active_workers is not reset to 0 after a parallel phase. It's
jmasa@3294 97 // value may be used in later phases and in one instance at least
jmasa@3294 98 // (the parallel remark) it has to be used (the parallel remark depends
jmasa@3294 99 // on the partitioning done in the previous parallel scavenge).
jmasa@3294 100
duke@435 101 class SharedHeap : public CollectedHeap {
duke@435 102 friend class VMStructs;
duke@435 103
ysr@777 104 friend class VM_GC_Operation;
ysr@777 105 friend class VM_CGC_Operation;
ysr@777 106
duke@435 107 private:
duke@435 108 // For claiming strong_roots tasks.
duke@435 109 SubTasksDone* _process_strong_tasks;
duke@435 110
duke@435 111 protected:
duke@435 112 // There should be only a single instance of "SharedHeap" in a program.
duke@435 113 // This is enforced with the protected constructor below, which will also
duke@435 114 // set the static pointer "_sh" to that instance.
duke@435 115 static SharedHeap* _sh;
duke@435 116
duke@435 117 // and the Gen Remembered Set, at least one good enough to scan the perm
duke@435 118 // gen.
duke@435 119 GenRemSet* _rem_set;
duke@435 120
duke@435 121 // A gc policy, controls global gc resource issues
duke@435 122 CollectorPolicy *_collector_policy;
duke@435 123
duke@435 124 // See the discussion below, in the specification of the reader function
duke@435 125 // for this variable.
duke@435 126 int _strong_roots_parity;
duke@435 127
duke@435 128 // If we're doing parallel GC, use this gang of threads.
jmasa@2188 129 FlexibleWorkGang* _workers;
duke@435 130
duke@435 131 // Full initialization is done in a concrete subtype's "initialize"
duke@435 132 // function.
duke@435 133 SharedHeap(CollectorPolicy* policy_);
duke@435 134
ysr@777 135 // Returns true if the calling thread holds the heap lock,
ysr@777 136 // or the calling thread is a par gc thread and the heap_lock is held
ysr@777 137 // by the vm thread doing a gc operation.
ysr@777 138 bool heap_lock_held_for_gc();
ysr@777 139 // True if the heap_lock is held by the a non-gc thread invoking a gc
ysr@777 140 // operation.
ysr@777 141 bool _thread_holds_heap_lock_for_gc;
ysr@777 142
duke@435 143 public:
duke@435 144 static SharedHeap* heap() { return _sh; }
duke@435 145
duke@435 146 void set_barrier_set(BarrierSet* bs);
jmasa@3294 147 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
duke@435 148
duke@435 149 // Does operations required after initialization has been done.
duke@435 150 virtual void post_initialize();
duke@435 151
duke@435 152 // Initialization of ("weak") reference processing support
duke@435 153 virtual void ref_processing_init();
duke@435 154
duke@435 155 // This function returns the "GenRemSet" object that allows us to scan
coleenp@4037 156 // generations in a fully generational heap.
duke@435 157 GenRemSet* rem_set() { return _rem_set; }
duke@435 158
duke@435 159 // Iteration functions.
coleenp@4037 160 void oop_iterate(ExtendedOopClosure* cl) = 0;
duke@435 161
duke@435 162 // Iterate over all spaces in use in the heap, in an undefined order.
duke@435 163 virtual void space_iterate(SpaceClosure* cl) = 0;
duke@435 164
duke@435 165 // A SharedHeap will contain some number of spaces. This finds the
duke@435 166 // space whose reserved area contains the given address, or else returns
duke@435 167 // NULL.
duke@435 168 virtual Space* space_containing(const void* addr) const = 0;
duke@435 169
duke@435 170 bool no_gc_in_progress() { return !is_gc_active(); }
duke@435 171
duke@435 172 // Some collectors will perform "process_strong_roots" in parallel.
duke@435 173 // Such a call will involve claiming some fine-grained tasks, such as
duke@435 174 // scanning of threads. To make this process simpler, we provide the
duke@435 175 // "strong_roots_parity()" method. Collectors that start parallel tasks
duke@435 176 // whose threads invoke "process_strong_roots" must
duke@435 177 // call "change_strong_roots_parity" in sequential code starting such a
duke@435 178 // task. (This also means that a parallel thread may only call
duke@435 179 // process_strong_roots once.)
duke@435 180 //
stefank@6992 181 // For calls to process_roots by sequential code, the parity is
duke@435 182 // updated automatically.
duke@435 183 //
duke@435 184 // The idea is that objects representing fine-grained tasks, such as
duke@435 185 // threads, will contain a "parity" field. A task will is claimed in the
stefank@6992 186 // current "process_roots" call only if its parity field is the
duke@435 187 // same as the "strong_roots_parity"; task claiming is accomplished by
duke@435 188 // updating the parity field to the strong_roots_parity with a CAS.
duke@435 189 //
duke@435 190 // If the client meats this spec, then strong_roots_parity() will have
duke@435 191 // the following properties:
duke@435 192 // a) to return a different value than was returned before the last
duke@435 193 // call to change_strong_roots_parity, and
duke@435 194 // c) to never return a distinguished value (zero) with which such
duke@435 195 // task-claiming variables may be initialized, to indicate "never
duke@435 196 // claimed".
jrose@1424 197 public:
duke@435 198 int strong_roots_parity() { return _strong_roots_parity; }
duke@435 199
stefank@6992 200 // Call these in sequential code around process_roots.
jrose@1424 201 // strong_roots_prologue calls change_strong_roots_parity, if
jrose@1424 202 // parallel tasks are enabled.
jrose@1424 203 class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
stefank@6992 204 // Used to implement the Thread work barrier.
stefank@6992 205 static Monitor* _lock;
stefank@6992 206
stefank@6992 207 SharedHeap* _sh;
stefank@6992 208 volatile jint _n_workers_done_with_threads;
stefank@6992 209
stefank@6992 210 public:
stefank@6992 211 StrongRootsScope(SharedHeap* heap, bool activate = true);
jrose@1424 212 ~StrongRootsScope();
stefank@6992 213
stefank@6992 214 // Mark that this thread is done with the Threads work.
stefank@6992 215 void mark_worker_done_with_threads(uint n_workers);
stefank@6992 216 // Wait until all n_workers are done with the Threads work.
stefank@6992 217 void wait_until_all_workers_done_with_threads(uint n_workers);
jrose@1424 218 };
jrose@1424 219 friend class StrongRootsScope;
jrose@1424 220
stefank@6992 221 // The current active StrongRootScope
stefank@6992 222 StrongRootsScope* _strong_roots_scope;
stefank@6992 223
stefank@6992 224 StrongRootsScope* active_strong_roots_scope() const;
stefank@6992 225
stefank@6992 226 private:
stefank@6992 227 void register_strong_roots_scope(StrongRootsScope* scope);
stefank@6992 228 void unregister_strong_roots_scope(StrongRootsScope* scope);
stefank@6992 229 void change_strong_roots_parity();
stefank@6992 230
stefank@6992 231 public:
duke@435 232 enum ScanningOption {
stefank@6992 233 SO_None = 0x0,
stefank@6992 234 SO_AllCodeCache = 0x8,
mgerdin@6968 235 SO_ScavengeCodeCache = 0x10
duke@435 236 };
duke@435 237
jmasa@2188 238 FlexibleWorkGang* workers() const { return _workers; }
duke@435 239
duke@435 240 // Invoke the "do_oop" method the closure "roots" on all root locations.
coleenp@4037 241 // The "so" argument determines which roots the closure is applied to:
duke@435 242 // "SO_None" does none;
mgerdin@6968 243 // "SO_AllCodeCache" applies the closure to all elements of the CodeCache.
mgerdin@6968 244 // "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache.
stefank@6992 245 void process_roots(bool activate_scope,
stefank@6992 246 ScanningOption so,
stefank@6992 247 OopClosure* strong_roots,
stefank@6992 248 OopClosure* weak_roots,
stefank@6992 249 CLDClosure* strong_cld_closure,
stefank@6992 250 CLDClosure* weak_cld_closure,
stefank@6992 251 CodeBlobClosure* code_roots);
stefank@6992 252 void process_all_roots(bool activate_scope,
stefank@6992 253 ScanningOption so,
stefank@6992 254 OopClosure* roots,
stefank@6992 255 CLDClosure* cld_closure,
stefank@6992 256 CodeBlobClosure* code_roots);
jrose@1424 257 void process_strong_roots(bool activate_scope,
duke@435 258 ScanningOption so,
duke@435 259 OopClosure* roots,
stefank@6992 260 CLDClosure* cld_closure,
stefank@6992 261 CodeBlobClosure* code_roots);
stefank@6992 262
duke@435 263
stefank@6972 264 // Apply "root_closure" to the JNI weak roots..
stefank@6971 265 void process_weak_roots(OopClosure* root_closure);
duke@435 266
duke@435 267 // The functions below are helper functions that a subclass of
duke@435 268 // "SharedHeap" can use in the implementation of its virtual
duke@435 269 // functions.
duke@435 270
ysr@777 271 public:
duke@435 272
duke@435 273 // Do anything common to GC's.
duke@435 274 virtual void gc_prologue(bool full) = 0;
duke@435 275 virtual void gc_epilogue(bool full) = 0;
duke@435 276
jmasa@3294 277 // Sets the number of parallel threads that will be doing tasks
stefank@6992 278 // (such as process roots) subsequently.
jmasa@3357 279 virtual void set_par_threads(uint t);
jmasa@3294 280
jmasa@3294 281 int n_termination();
jmasa@3294 282 void set_n_termination(int t);
jmasa@3294 283
duke@435 284 //
duke@435 285 // New methods from CollectedHeap
duke@435 286 //
duke@435 287
duke@435 288 // Some utilities.
ysr@777 289 void print_size_transition(outputStream* out,
ysr@777 290 size_t bytes_before,
duke@435 291 size_t bytes_after,
duke@435 292 size_t capacity);
duke@435 293 };
stefank@2314 294
stefank@6971 295 inline SharedHeap::ScanningOption operator|(SharedHeap::ScanningOption so0, SharedHeap::ScanningOption so1) {
stefank@6971 296 return static_cast<SharedHeap::ScanningOption>(static_cast<int>(so0) | static_cast<int>(so1));
stefank@6971 297 }
stefank@6971 298
stefank@2314 299 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP

mercurial