src/share/vm/memory/sharedHeap.hpp

Thu, 26 Sep 2013 12:18:21 +0200

author
tschatzl
date
Thu, 26 Sep 2013 12:18:21 +0200
changeset 5775
461159cd7a91
parent 5369
71180a6e5080
child 6198
55fb97c4c58d
permissions
-rw-r--r--

Merge

duke@435 1 /*
coleenp@4037 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_MEMORY_SHAREDHEAP_HPP
stefank@2314 26 #define SHARE_VM_MEMORY_SHAREDHEAP_HPP
stefank@2314 27
stefank@2314 28 #include "gc_interface/collectedHeap.hpp"
stefank@2314 29 #include "memory/generation.hpp"
stefank@2314 30
duke@435 31 // A "SharedHeap" is an implementation of a java heap for HotSpot. This
duke@435 32 // is an abstract class: there may be many different kinds of heaps. This
duke@435 33 // class defines the functions that a heap must implement, and contains
duke@435 34 // infrastructure common to all heaps.
duke@435 35
duke@435 36 class Generation;
duke@435 37 class BarrierSet;
duke@435 38 class GenRemSet;
duke@435 39 class Space;
duke@435 40 class SpaceClosure;
duke@435 41 class OopClosure;
duke@435 42 class OopsInGenClosure;
duke@435 43 class ObjectClosure;
duke@435 44 class SubTasksDone;
duke@435 45 class WorkGang;
jmasa@2188 46 class FlexibleWorkGang;
duke@435 47 class CollectorPolicy;
coleenp@4037 48 class KlassClosure;
duke@435 49
jmasa@3294 50 // Note on use of FlexibleWorkGang's for GC.
jmasa@3294 51 // There are three places where task completion is determined.
jmasa@3294 52 // In
jmasa@3294 53 // 1) ParallelTaskTerminator::offer_termination() where _n_threads
jmasa@3294 54 // must be set to the correct value so that count of workers that
jmasa@3294 55 // have offered termination will exactly match the number
jmasa@3294 56 // working on the task. Tasks such as those derived from GCTask
jmasa@3294 57 // use ParallelTaskTerminator's. Tasks that want load balancing
jmasa@3294 58 // by work stealing use this method to gauge completion.
jmasa@3294 59 // 2) SubTasksDone has a variable _n_threads that is used in
jmasa@3294 60 // all_tasks_completed() to determine completion. all_tasks_complete()
jmasa@3294 61 // counts the number of tasks that have been done and then reset
jmasa@3294 62 // the SubTasksDone so that it can be used again. When the number of
jmasa@3294 63 // tasks is set to the number of GC workers, then _n_threads must
jmasa@3294 64 // be set to the number of active GC workers. G1CollectedHeap,
jmasa@3294 65 // HRInto_G1RemSet, GenCollectedHeap and SharedHeap have SubTasksDone.
jmasa@3294 66 // This seems too many.
jmasa@3294 67 // 3) SequentialSubTasksDone has an _n_threads that is used in
jmasa@3294 68 // a way similar to SubTasksDone and has the same dependency on the
jmasa@3294 69 // number of active GC workers. CompactibleFreeListSpace and Space
jmasa@3294 70 // have SequentialSubTasksDone's.
jmasa@3294 71 // Example of using SubTasksDone and SequentialSubTasksDone
jmasa@3294 72 // G1CollectedHeap::g1_process_strong_roots() calls
jmasa@3294 73 // process_strong_roots(false, // no scoping; this is parallel code
coleenp@4037 74 // is_scavenging, so,
jmasa@3294 75 // &buf_scan_non_heap_roots,
coleenp@4037 76 // &eager_scan_code_roots);
jmasa@3294 77 // which delegates to SharedHeap::process_strong_roots() and uses
jmasa@3294 78 // SubTasksDone* _process_strong_tasks to claim tasks.
jmasa@3294 79 // process_strong_roots() calls
coleenp@4037 80 // rem_set()->younger_refs_iterate()
jmasa@3294 81 // to scan the card table and which eventually calls down into
jmasa@3294 82 // CardTableModRefBS::par_non_clean_card_iterate_work(). This method
jmasa@3294 83 // uses SequentialSubTasksDone* _pst to claim tasks.
jmasa@3294 84 // Both SubTasksDone and SequentialSubTasksDone call their method
jmasa@3294 85 // all_tasks_completed() to count the number of GC workers that have
jmasa@3294 86 // finished their work. That logic is "when all the workers are
jmasa@3294 87 // finished the tasks are finished".
jmasa@3294 88 //
jmasa@3294 89 // The pattern that appears in the code is to set _n_threads
jmasa@3294 90 // to a value > 1 before a task that you would like executed in parallel
jmasa@3294 91 // and then to set it to 0 after that task has completed. A value of
jmasa@3294 92 // 0 is a "special" value in set_n_threads() which translates to
jmasa@3294 93 // setting _n_threads to 1.
jmasa@3294 94 //
jmasa@3294 95 // Some code uses _n_terminiation to decide if work should be done in
jmasa@3294 96 // parallel. The notorious possibly_parallel_oops_do() in threads.cpp
jmasa@3294 97 // is an example of such code. Look for variable "is_par" for other
jmasa@3294 98 // examples.
jmasa@3294 99 //
jmasa@3294 100 // The active_workers is not reset to 0 after a parallel phase. It's
jmasa@3294 101 // value may be used in later phases and in one instance at least
jmasa@3294 102 // (the parallel remark) it has to be used (the parallel remark depends
jmasa@3294 103 // on the partitioning done in the previous parallel scavenge).
jmasa@3294 104
duke@435 105 class SharedHeap : public CollectedHeap {
duke@435 106 friend class VMStructs;
duke@435 107
ysr@777 108 friend class VM_GC_Operation;
ysr@777 109 friend class VM_CGC_Operation;
ysr@777 110
duke@435 111 private:
duke@435 112 // For claiming strong_roots tasks.
duke@435 113 SubTasksDone* _process_strong_tasks;
duke@435 114
duke@435 115 protected:
duke@435 116 // There should be only a single instance of "SharedHeap" in a program.
duke@435 117 // This is enforced with the protected constructor below, which will also
duke@435 118 // set the static pointer "_sh" to that instance.
duke@435 119 static SharedHeap* _sh;
duke@435 120
duke@435 121 // and the Gen Remembered Set, at least one good enough to scan the perm
duke@435 122 // gen.
duke@435 123 GenRemSet* _rem_set;
duke@435 124
duke@435 125 // A gc policy, controls global gc resource issues
duke@435 126 CollectorPolicy *_collector_policy;
duke@435 127
duke@435 128 // See the discussion below, in the specification of the reader function
duke@435 129 // for this variable.
duke@435 130 int _strong_roots_parity;
duke@435 131
duke@435 132 // If we're doing parallel GC, use this gang of threads.
jmasa@2188 133 FlexibleWorkGang* _workers;
duke@435 134
duke@435 135 // Full initialization is done in a concrete subtype's "initialize"
duke@435 136 // function.
duke@435 137 SharedHeap(CollectorPolicy* policy_);
duke@435 138
ysr@777 139 // Returns true if the calling thread holds the heap lock,
ysr@777 140 // or the calling thread is a par gc thread and the heap_lock is held
ysr@777 141 // by the vm thread doing a gc operation.
ysr@777 142 bool heap_lock_held_for_gc();
ysr@777 143 // True if the heap_lock is held by the a non-gc thread invoking a gc
ysr@777 144 // operation.
ysr@777 145 bool _thread_holds_heap_lock_for_gc;
ysr@777 146
duke@435 147 public:
duke@435 148 static SharedHeap* heap() { return _sh; }
duke@435 149
duke@435 150 void set_barrier_set(BarrierSet* bs);
jmasa@3294 151 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
duke@435 152
duke@435 153 // Does operations required after initialization has been done.
duke@435 154 virtual void post_initialize();
duke@435 155
duke@435 156 // Initialization of ("weak") reference processing support
duke@435 157 virtual void ref_processing_init();
duke@435 158
duke@435 159 // This function returns the "GenRemSet" object that allows us to scan
coleenp@4037 160 // generations in a fully generational heap.
duke@435 161 GenRemSet* rem_set() { return _rem_set; }
duke@435 162
duke@435 163 // Iteration functions.
coleenp@4037 164 void oop_iterate(ExtendedOopClosure* cl) = 0;
duke@435 165
duke@435 166 // Same as above, restricted to a memory region.
coleenp@4037 167 virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0;
duke@435 168
duke@435 169 // Iterate over all spaces in use in the heap, in an undefined order.
duke@435 170 virtual void space_iterate(SpaceClosure* cl) = 0;
duke@435 171
duke@435 172 // A SharedHeap will contain some number of spaces. This finds the
duke@435 173 // space whose reserved area contains the given address, or else returns
duke@435 174 // NULL.
duke@435 175 virtual Space* space_containing(const void* addr) const = 0;
duke@435 176
duke@435 177 bool no_gc_in_progress() { return !is_gc_active(); }
duke@435 178
duke@435 179 // Some collectors will perform "process_strong_roots" in parallel.
duke@435 180 // Such a call will involve claiming some fine-grained tasks, such as
duke@435 181 // scanning of threads. To make this process simpler, we provide the
duke@435 182 // "strong_roots_parity()" method. Collectors that start parallel tasks
duke@435 183 // whose threads invoke "process_strong_roots" must
duke@435 184 // call "change_strong_roots_parity" in sequential code starting such a
duke@435 185 // task. (This also means that a parallel thread may only call
duke@435 186 // process_strong_roots once.)
duke@435 187 //
duke@435 188 // For calls to process_strong_roots by sequential code, the parity is
duke@435 189 // updated automatically.
duke@435 190 //
duke@435 191 // The idea is that objects representing fine-grained tasks, such as
duke@435 192 // threads, will contain a "parity" field. A task will is claimed in the
duke@435 193 // current "process_strong_roots" call only if its parity field is the
duke@435 194 // same as the "strong_roots_parity"; task claiming is accomplished by
duke@435 195 // updating the parity field to the strong_roots_parity with a CAS.
duke@435 196 //
duke@435 197 // If the client meats this spec, then strong_roots_parity() will have
duke@435 198 // the following properties:
duke@435 199 // a) to return a different value than was returned before the last
duke@435 200 // call to change_strong_roots_parity, and
duke@435 201 // c) to never return a distinguished value (zero) with which such
duke@435 202 // task-claiming variables may be initialized, to indicate "never
duke@435 203 // claimed".
jrose@1424 204 private:
duke@435 205 void change_strong_roots_parity();
jrose@1424 206 public:
duke@435 207 int strong_roots_parity() { return _strong_roots_parity; }
duke@435 208
jrose@1424 209 // Call these in sequential code around process_strong_roots.
jrose@1424 210 // strong_roots_prologue calls change_strong_roots_parity, if
jrose@1424 211 // parallel tasks are enabled.
jrose@1424 212 class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
jrose@1424 213 public:
jrose@1424 214 StrongRootsScope(SharedHeap* outer, bool activate = true);
jrose@1424 215 ~StrongRootsScope();
jrose@1424 216 };
jrose@1424 217 friend class StrongRootsScope;
jrose@1424 218
duke@435 219 enum ScanningOption {
duke@435 220 SO_None = 0x0,
duke@435 221 SO_AllClasses = 0x1,
duke@435 222 SO_SystemClasses = 0x2,
ysr@2825 223 SO_Strings = 0x4,
ysr@2825 224 SO_CodeCache = 0x8
duke@435 225 };
duke@435 226
jmasa@2188 227 FlexibleWorkGang* workers() const { return _workers; }
duke@435 228
duke@435 229 // Invoke the "do_oop" method the closure "roots" on all root locations.
coleenp@4037 230 // The "so" argument determines which roots the closure is applied to:
duke@435 231 // "SO_None" does none;
duke@435 232 // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
duke@435 233 // "SO_SystemClasses" to all the "system" classes and loaders;
duke@435 234 // "SO_Strings" applies the closure to all entries in StringTable;
duke@435 235 // "SO_CodeCache" applies the closure to all elements of the CodeCache.
jrose@1424 236 void process_strong_roots(bool activate_scope,
coleenp@4037 237 bool is_scavenging,
duke@435 238 ScanningOption so,
duke@435 239 OopClosure* roots,
jrose@1424 240 CodeBlobClosure* code_roots,
coleenp@4037 241 KlassClosure* klass_closure);
duke@435 242
duke@435 243 // Apply "blk" to all the weak roots of the system. These include
duke@435 244 // JNI weak roots, the code cache, system dictionary, symbol table,
duke@435 245 // string table.
duke@435 246 void process_weak_roots(OopClosure* root_closure,
stefank@5011 247 CodeBlobClosure* code_roots);
duke@435 248
duke@435 249 // The functions below are helper functions that a subclass of
duke@435 250 // "SharedHeap" can use in the implementation of its virtual
duke@435 251 // functions.
duke@435 252
ysr@777 253 public:
duke@435 254
duke@435 255 // Do anything common to GC's.
duke@435 256 virtual void gc_prologue(bool full) = 0;
duke@435 257 virtual void gc_epilogue(bool full) = 0;
duke@435 258
jmasa@3294 259 // Sets the number of parallel threads that will be doing tasks
jmasa@3294 260 // (such as process strong roots) subsequently.
jmasa@3357 261 virtual void set_par_threads(uint t);
jmasa@3294 262
jmasa@3294 263 int n_termination();
jmasa@3294 264 void set_n_termination(int t);
jmasa@3294 265
duke@435 266 //
duke@435 267 // New methods from CollectedHeap
duke@435 268 //
duke@435 269
duke@435 270 // Some utilities.
ysr@777 271 void print_size_transition(outputStream* out,
ysr@777 272 size_t bytes_before,
duke@435 273 size_t bytes_after,
duke@435 274 size_t capacity);
duke@435 275 };
stefank@2314 276
stefank@2314 277 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP

mercurial