Wed, 27 Apr 2016 01:25:04 +0800
Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17
aoqi@0 | 1 | /* |
aoqi@0 | 2 | * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. |
aoqi@0 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
aoqi@0 | 4 | * |
aoqi@0 | 5 | * This code is free software; you can redistribute it and/or modify it |
aoqi@0 | 6 | * under the terms of the GNU General Public License version 2 only, as |
aoqi@0 | 7 | * published by the Free Software Foundation. |
aoqi@0 | 8 | * |
aoqi@0 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
aoqi@0 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
aoqi@0 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
aoqi@0 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
aoqi@0 | 13 | * accompanied this code). |
aoqi@0 | 14 | * |
aoqi@0 | 15 | * You should have received a copy of the GNU General Public License version |
aoqi@0 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
aoqi@0 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
aoqi@0 | 18 | * |
aoqi@0 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
aoqi@0 | 20 | * or visit www.oracle.com if you need additional information or have any |
aoqi@0 | 21 | * questions. |
aoqi@0 | 22 | * |
aoqi@0 | 23 | */ |
aoqi@0 | 24 | |
aoqi@0 | 25 | #ifndef SHARE_VM_MEMORY_SHAREDHEAP_HPP |
aoqi@0 | 26 | #define SHARE_VM_MEMORY_SHAREDHEAP_HPP |
aoqi@0 | 27 | |
aoqi@0 | 28 | #include "gc_interface/collectedHeap.hpp" |
aoqi@0 | 29 | #include "memory/generation.hpp" |
aoqi@0 | 30 | |
aoqi@0 | 31 | // A "SharedHeap" is an implementation of a java heap for HotSpot. This |
aoqi@0 | 32 | // is an abstract class: there may be many different kinds of heaps. This |
aoqi@0 | 33 | // class defines the functions that a heap must implement, and contains |
aoqi@0 | 34 | // infrastructure common to all heaps. |
aoqi@0 | 35 | |
aoqi@0 | 36 | class Generation; |
aoqi@0 | 37 | class BarrierSet; |
aoqi@0 | 38 | class GenRemSet; |
aoqi@0 | 39 | class Space; |
aoqi@0 | 40 | class SpaceClosure; |
aoqi@0 | 41 | class OopClosure; |
aoqi@0 | 42 | class OopsInGenClosure; |
aoqi@0 | 43 | class ObjectClosure; |
aoqi@0 | 44 | class SubTasksDone; |
aoqi@0 | 45 | class WorkGang; |
aoqi@0 | 46 | class FlexibleWorkGang; |
aoqi@0 | 47 | class CollectorPolicy; |
aoqi@0 | 48 | class KlassClosure; |
aoqi@0 | 49 | |
aoqi@0 | 50 | // Note on use of FlexibleWorkGang's for GC. |
aoqi@0 | 51 | // There are three places where task completion is determined. |
aoqi@0 | 52 | // In |
aoqi@0 | 53 | // 1) ParallelTaskTerminator::offer_termination() where _n_threads |
aoqi@0 | 54 | // must be set to the correct value so that count of workers that |
aoqi@0 | 55 | // have offered termination will exactly match the number |
aoqi@0 | 56 | // working on the task. Tasks such as those derived from GCTask |
aoqi@0 | 57 | // use ParallelTaskTerminator's. Tasks that want load balancing |
aoqi@0 | 58 | // by work stealing use this method to gauge completion. |
aoqi@0 | 59 | // 2) SubTasksDone has a variable _n_threads that is used in |
aoqi@0 | 60 | // all_tasks_completed() to determine completion. all_tasks_complete() |
aoqi@0 | 61 | // counts the number of tasks that have been done and then reset |
aoqi@0 | 62 | // the SubTasksDone so that it can be used again. When the number of |
aoqi@0 | 63 | // tasks is set to the number of GC workers, then _n_threads must |
aoqi@0 | 64 | // be set to the number of active GC workers. G1CollectedHeap, |
aoqi@0 | 65 | // HRInto_G1RemSet, GenCollectedHeap and SharedHeap have SubTasksDone. |
aoqi@0 | 66 | // This seems too many. |
aoqi@0 | 67 | // 3) SequentialSubTasksDone has an _n_threads that is used in |
aoqi@0 | 68 | // a way similar to SubTasksDone and has the same dependency on the |
aoqi@0 | 69 | // number of active GC workers. CompactibleFreeListSpace and Space |
aoqi@0 | 70 | // have SequentialSubTasksDone's. |
aoqi@0 | 71 | // Example of using SubTasksDone and SequentialSubTasksDone |
aoqi@0 | 72 | // G1CollectedHeap::g1_process_strong_roots() calls |
aoqi@0 | 73 | // process_strong_roots(false, // no scoping; this is parallel code |
aoqi@0 | 74 | // is_scavenging, so, |
aoqi@0 | 75 | // &buf_scan_non_heap_roots, |
aoqi@0 | 76 | // &eager_scan_code_roots); |
aoqi@0 | 77 | // which delegates to SharedHeap::process_strong_roots() and uses |
aoqi@0 | 78 | // SubTasksDone* _process_strong_tasks to claim tasks. |
aoqi@0 | 79 | // process_strong_roots() calls |
aoqi@0 | 80 | // rem_set()->younger_refs_iterate() |
aoqi@0 | 81 | // to scan the card table and which eventually calls down into |
aoqi@0 | 82 | // CardTableModRefBS::par_non_clean_card_iterate_work(). This method |
aoqi@0 | 83 | // uses SequentialSubTasksDone* _pst to claim tasks. |
aoqi@0 | 84 | // Both SubTasksDone and SequentialSubTasksDone call their method |
aoqi@0 | 85 | // all_tasks_completed() to count the number of GC workers that have |
aoqi@0 | 86 | // finished their work. That logic is "when all the workers are |
aoqi@0 | 87 | // finished the tasks are finished". |
aoqi@0 | 88 | // |
aoqi@0 | 89 | // The pattern that appears in the code is to set _n_threads |
aoqi@0 | 90 | // to a value > 1 before a task that you would like executed in parallel |
aoqi@0 | 91 | // and then to set it to 0 after that task has completed. A value of |
aoqi@0 | 92 | // 0 is a "special" value in set_n_threads() which translates to |
aoqi@0 | 93 | // setting _n_threads to 1. |
aoqi@0 | 94 | // |
aoqi@0 | 95 | // Some code uses _n_terminiation to decide if work should be done in |
aoqi@0 | 96 | // parallel. The notorious possibly_parallel_oops_do() in threads.cpp |
aoqi@0 | 97 | // is an example of such code. Look for variable "is_par" for other |
aoqi@0 | 98 | // examples. |
aoqi@0 | 99 | // |
aoqi@0 | 100 | // The active_workers is not reset to 0 after a parallel phase. It's |
aoqi@0 | 101 | // value may be used in later phases and in one instance at least |
aoqi@0 | 102 | // (the parallel remark) it has to be used (the parallel remark depends |
aoqi@0 | 103 | // on the partitioning done in the previous parallel scavenge). |
aoqi@0 | 104 | |
aoqi@0 | 105 | class SharedHeap : public CollectedHeap { |
aoqi@0 | 106 | friend class VMStructs; |
aoqi@0 | 107 | |
aoqi@0 | 108 | friend class VM_GC_Operation; |
aoqi@0 | 109 | friend class VM_CGC_Operation; |
aoqi@0 | 110 | |
aoqi@0 | 111 | private: |
aoqi@0 | 112 | // For claiming strong_roots tasks. |
aoqi@0 | 113 | SubTasksDone* _process_strong_tasks; |
aoqi@0 | 114 | |
aoqi@0 | 115 | protected: |
aoqi@0 | 116 | // There should be only a single instance of "SharedHeap" in a program. |
aoqi@0 | 117 | // This is enforced with the protected constructor below, which will also |
aoqi@0 | 118 | // set the static pointer "_sh" to that instance. |
aoqi@0 | 119 | static SharedHeap* _sh; |
aoqi@0 | 120 | |
aoqi@0 | 121 | // and the Gen Remembered Set, at least one good enough to scan the perm |
aoqi@0 | 122 | // gen. |
aoqi@0 | 123 | GenRemSet* _rem_set; |
aoqi@0 | 124 | |
aoqi@0 | 125 | // A gc policy, controls global gc resource issues |
aoqi@0 | 126 | CollectorPolicy *_collector_policy; |
aoqi@0 | 127 | |
aoqi@0 | 128 | // See the discussion below, in the specification of the reader function |
aoqi@0 | 129 | // for this variable. |
aoqi@0 | 130 | int _strong_roots_parity; |
aoqi@0 | 131 | |
aoqi@0 | 132 | // If we're doing parallel GC, use this gang of threads. |
aoqi@0 | 133 | FlexibleWorkGang* _workers; |
aoqi@0 | 134 | |
aoqi@0 | 135 | // Full initialization is done in a concrete subtype's "initialize" |
aoqi@0 | 136 | // function. |
aoqi@0 | 137 | SharedHeap(CollectorPolicy* policy_); |
aoqi@0 | 138 | |
aoqi@0 | 139 | // Returns true if the calling thread holds the heap lock, |
aoqi@0 | 140 | // or the calling thread is a par gc thread and the heap_lock is held |
aoqi@0 | 141 | // by the vm thread doing a gc operation. |
aoqi@0 | 142 | bool heap_lock_held_for_gc(); |
aoqi@0 | 143 | // True if the heap_lock is held by the a non-gc thread invoking a gc |
aoqi@0 | 144 | // operation. |
aoqi@0 | 145 | bool _thread_holds_heap_lock_for_gc; |
aoqi@0 | 146 | |
aoqi@0 | 147 | public: |
aoqi@0 | 148 | static SharedHeap* heap() { return _sh; } |
aoqi@0 | 149 | |
aoqi@0 | 150 | void set_barrier_set(BarrierSet* bs); |
aoqi@0 | 151 | SubTasksDone* process_strong_tasks() { return _process_strong_tasks; } |
aoqi@0 | 152 | |
aoqi@0 | 153 | // Does operations required after initialization has been done. |
aoqi@0 | 154 | virtual void post_initialize(); |
aoqi@0 | 155 | |
aoqi@0 | 156 | // Initialization of ("weak") reference processing support |
aoqi@0 | 157 | virtual void ref_processing_init(); |
aoqi@0 | 158 | |
aoqi@0 | 159 | // This function returns the "GenRemSet" object that allows us to scan |
aoqi@0 | 160 | // generations in a fully generational heap. |
aoqi@0 | 161 | GenRemSet* rem_set() { return _rem_set; } |
aoqi@0 | 162 | |
aoqi@0 | 163 | // Iteration functions. |
aoqi@0 | 164 | void oop_iterate(ExtendedOopClosure* cl) = 0; |
aoqi@0 | 165 | |
aoqi@0 | 166 | // Same as above, restricted to a memory region. |
aoqi@0 | 167 | virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0; |
aoqi@0 | 168 | |
aoqi@0 | 169 | // Iterate over all spaces in use in the heap, in an undefined order. |
aoqi@0 | 170 | virtual void space_iterate(SpaceClosure* cl) = 0; |
aoqi@0 | 171 | |
aoqi@0 | 172 | // A SharedHeap will contain some number of spaces. This finds the |
aoqi@0 | 173 | // space whose reserved area contains the given address, or else returns |
aoqi@0 | 174 | // NULL. |
aoqi@0 | 175 | virtual Space* space_containing(const void* addr) const = 0; |
aoqi@0 | 176 | |
aoqi@0 | 177 | bool no_gc_in_progress() { return !is_gc_active(); } |
aoqi@0 | 178 | |
aoqi@0 | 179 | // Some collectors will perform "process_strong_roots" in parallel. |
aoqi@0 | 180 | // Such a call will involve claiming some fine-grained tasks, such as |
aoqi@0 | 181 | // scanning of threads. To make this process simpler, we provide the |
aoqi@0 | 182 | // "strong_roots_parity()" method. Collectors that start parallel tasks |
aoqi@0 | 183 | // whose threads invoke "process_strong_roots" must |
aoqi@0 | 184 | // call "change_strong_roots_parity" in sequential code starting such a |
aoqi@0 | 185 | // task. (This also means that a parallel thread may only call |
aoqi@0 | 186 | // process_strong_roots once.) |
aoqi@0 | 187 | // |
aoqi@0 | 188 | // For calls to process_strong_roots by sequential code, the parity is |
aoqi@0 | 189 | // updated automatically. |
aoqi@0 | 190 | // |
aoqi@0 | 191 | // The idea is that objects representing fine-grained tasks, such as |
aoqi@0 | 192 | // threads, will contain a "parity" field. A task will is claimed in the |
aoqi@0 | 193 | // current "process_strong_roots" call only if its parity field is the |
aoqi@0 | 194 | // same as the "strong_roots_parity"; task claiming is accomplished by |
aoqi@0 | 195 | // updating the parity field to the strong_roots_parity with a CAS. |
aoqi@0 | 196 | // |
aoqi@0 | 197 | // If the client meats this spec, then strong_roots_parity() will have |
aoqi@0 | 198 | // the following properties: |
aoqi@0 | 199 | // a) to return a different value than was returned before the last |
aoqi@0 | 200 | // call to change_strong_roots_parity, and |
aoqi@0 | 201 | // c) to never return a distinguished value (zero) with which such |
aoqi@0 | 202 | // task-claiming variables may be initialized, to indicate "never |
aoqi@0 | 203 | // claimed". |
aoqi@0 | 204 | private: |
aoqi@0 | 205 | void change_strong_roots_parity(); |
aoqi@0 | 206 | public: |
aoqi@0 | 207 | int strong_roots_parity() { return _strong_roots_parity; } |
aoqi@0 | 208 | |
aoqi@0 | 209 | // Call these in sequential code around process_strong_roots. |
aoqi@0 | 210 | // strong_roots_prologue calls change_strong_roots_parity, if |
aoqi@0 | 211 | // parallel tasks are enabled. |
aoqi@0 | 212 | class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope { |
aoqi@0 | 213 | public: |
aoqi@0 | 214 | StrongRootsScope(SharedHeap* outer, bool activate = true); |
aoqi@0 | 215 | ~StrongRootsScope(); |
aoqi@0 | 216 | }; |
aoqi@0 | 217 | friend class StrongRootsScope; |
aoqi@0 | 218 | |
aoqi@0 | 219 | enum ScanningOption { |
aoqi@0 | 220 | SO_None = 0x0, |
aoqi@0 | 221 | SO_AllClasses = 0x1, |
aoqi@0 | 222 | SO_SystemClasses = 0x2, |
aoqi@0 | 223 | SO_Strings = 0x4, |
aoqi@0 | 224 | SO_CodeCache = 0x8 |
aoqi@0 | 225 | }; |
aoqi@0 | 226 | |
aoqi@0 | 227 | FlexibleWorkGang* workers() const { return _workers; } |
aoqi@0 | 228 | |
aoqi@0 | 229 | // Invoke the "do_oop" method the closure "roots" on all root locations. |
aoqi@0 | 230 | // The "so" argument determines which roots the closure is applied to: |
aoqi@0 | 231 | // "SO_None" does none; |
aoqi@0 | 232 | // "SO_AllClasses" applies the closure to all entries in the SystemDictionary; |
aoqi@0 | 233 | // "SO_SystemClasses" to all the "system" classes and loaders; |
aoqi@0 | 234 | // "SO_Strings" applies the closure to all entries in StringTable; |
aoqi@0 | 235 | // "SO_CodeCache" applies the closure to all elements of the CodeCache. |
aoqi@0 | 236 | void process_strong_roots(bool activate_scope, |
aoqi@0 | 237 | bool is_scavenging, |
aoqi@0 | 238 | ScanningOption so, |
aoqi@0 | 239 | OopClosure* roots, |
aoqi@0 | 240 | CodeBlobClosure* code_roots, |
aoqi@0 | 241 | KlassClosure* klass_closure); |
aoqi@0 | 242 | |
aoqi@0 | 243 | // Apply "blk" to all the weak roots of the system. These include |
aoqi@0 | 244 | // JNI weak roots, the code cache, system dictionary, symbol table, |
aoqi@0 | 245 | // string table. |
aoqi@0 | 246 | void process_weak_roots(OopClosure* root_closure, |
aoqi@0 | 247 | CodeBlobClosure* code_roots); |
aoqi@0 | 248 | |
aoqi@0 | 249 | // The functions below are helper functions that a subclass of |
aoqi@0 | 250 | // "SharedHeap" can use in the implementation of its virtual |
aoqi@0 | 251 | // functions. |
aoqi@0 | 252 | |
aoqi@0 | 253 | public: |
aoqi@0 | 254 | |
aoqi@0 | 255 | // Do anything common to GC's. |
aoqi@0 | 256 | virtual void gc_prologue(bool full) = 0; |
aoqi@0 | 257 | virtual void gc_epilogue(bool full) = 0; |
aoqi@0 | 258 | |
aoqi@0 | 259 | // Sets the number of parallel threads that will be doing tasks |
aoqi@0 | 260 | // (such as process strong roots) subsequently. |
aoqi@0 | 261 | virtual void set_par_threads(uint t); |
aoqi@0 | 262 | |
aoqi@0 | 263 | int n_termination(); |
aoqi@0 | 264 | void set_n_termination(int t); |
aoqi@0 | 265 | |
aoqi@0 | 266 | // |
aoqi@0 | 267 | // New methods from CollectedHeap |
aoqi@0 | 268 | // |
aoqi@0 | 269 | |
aoqi@0 | 270 | // Some utilities. |
aoqi@0 | 271 | void print_size_transition(outputStream* out, |
aoqi@0 | 272 | size_t bytes_before, |
aoqi@0 | 273 | size_t bytes_after, |
aoqi@0 | 274 | size_t capacity); |
aoqi@0 | 275 | }; |
aoqi@0 | 276 | |
aoqi@0 | 277 | #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP |