src/share/vm/memory/sharedHeap.cpp

Mon, 21 Nov 2011 07:47:34 +0100

author
brutisso
date
Mon, 21 Nov 2011 07:47:34 +0100
changeset 3290
d06a2d7fcd5b
parent 3115
c2bf0120ee5d
child 3294
bca17e38de00
permissions
-rw-r--r--

7110718: -XX:MarkSweepAlwaysCompactCount=0 crashes the JVM
Summary: Interpret MarkSweepAlwaysCompactCount < 1 as never do full compaction
Reviewed-by: ysr, tonyp, jmasa, johnc

duke@435 1 /*
trims@2708 2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "classfile/symbolTable.hpp"
stefank@2314 27 #include "classfile/systemDictionary.hpp"
stefank@2314 28 #include "code/codeCache.hpp"
stefank@2314 29 #include "gc_interface/collectedHeap.inline.hpp"
stefank@2314 30 #include "memory/sharedHeap.hpp"
stefank@2314 31 #include "oops/oop.inline.hpp"
stefank@2314 32 #include "runtime/fprofiler.hpp"
stefank@2314 33 #include "runtime/java.hpp"
stefank@2314 34 #include "services/management.hpp"
stefank@2314 35 #include "utilities/copy.hpp"
stefank@2314 36 #include "utilities/workgroup.hpp"
duke@435 37
duke@435 38 SharedHeap* SharedHeap::_sh;
duke@435 39
duke@435 40 // The set of potentially parallel tasks in strong root scanning.
duke@435 41 enum SH_process_strong_roots_tasks {
duke@435 42 SH_PS_Universe_oops_do,
duke@435 43 SH_PS_JNIHandles_oops_do,
duke@435 44 SH_PS_ObjectSynchronizer_oops_do,
duke@435 45 SH_PS_FlatProfiler_oops_do,
duke@435 46 SH_PS_Management_oops_do,
duke@435 47 SH_PS_SystemDictionary_oops_do,
duke@435 48 SH_PS_jvmti_oops_do,
duke@435 49 SH_PS_StringTable_oops_do,
duke@435 50 SH_PS_CodeCache_oops_do,
duke@435 51 // Leave this one last.
duke@435 52 SH_PS_NumElements
duke@435 53 };
duke@435 54
duke@435 55 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
duke@435 56 CollectedHeap(),
duke@435 57 _collector_policy(policy_),
duke@435 58 _perm_gen(NULL), _rem_set(NULL),
duke@435 59 _strong_roots_parity(0),
duke@435 60 _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
jmasa@2188 61 _n_par_threads(0),
jmasa@2188 62 _workers(NULL)
duke@435 63 {
duke@435 64 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
duke@435 65 vm_exit_during_initialization("Failed necessary allocation.");
duke@435 66 }
duke@435 67 _sh = this; // ch is static, should be set only once.
duke@435 68 if ((UseParNewGC ||
ysr@777 69 (UseConcMarkSweepGC && CMSParallelRemarkEnabled) ||
ysr@777 70 UseG1GC) &&
duke@435 71 ParallelGCThreads > 0) {
jmasa@2188 72 _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
ysr@777 73 /* are_GC_task_threads */true,
ysr@777 74 /* are_ConcurrentGC_threads */false);
duke@435 75 if (_workers == NULL) {
duke@435 76 vm_exit_during_initialization("Failed necessary allocation.");
jmasa@2188 77 } else {
jmasa@2188 78 _workers->initialize_workers();
duke@435 79 }
duke@435 80 }
duke@435 81 }
duke@435 82
ysr@777 83 bool SharedHeap::heap_lock_held_for_gc() {
ysr@777 84 Thread* t = Thread::current();
ysr@777 85 return Heap_lock->owned_by_self()
ysr@777 86 || ( (t->is_GC_task_thread() || t->is_VM_thread())
ysr@777 87 && _thread_holds_heap_lock_for_gc);
ysr@777 88 }
duke@435 89
duke@435 90 void SharedHeap::set_par_threads(int t) {
jmasa@2188 91 assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
duke@435 92 _n_par_threads = t;
jmasa@2188 93 _process_strong_tasks->set_n_threads(t);
duke@435 94 }
duke@435 95
duke@435 96 class AssertIsPermClosure: public OopClosure {
duke@435 97 public:
coleenp@548 98 virtual void do_oop(oop* p) {
duke@435 99 assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
duke@435 100 }
coleenp@548 101 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
duke@435 102 };
duke@435 103 static AssertIsPermClosure assert_is_perm_closure;
duke@435 104
jmasa@2909 105 #ifdef ASSERT
jmasa@2909 106 class AssertNonScavengableClosure: public OopClosure {
jmasa@2909 107 public:
jmasa@2909 108 virtual void do_oop(oop* p) {
jmasa@2909 109 assert(!Universe::heap()->is_in_partial_collection(*p),
jmasa@2909 110 "Referent should not be scavengable."); }
jmasa@2909 111 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
jmasa@2909 112 };
jmasa@2909 113 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
jmasa@2909 114 #endif
jmasa@2909 115
duke@435 116 void SharedHeap::change_strong_roots_parity() {
duke@435 117 // Also set the new collection parity.
duke@435 118 assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
duke@435 119 "Not in range.");
duke@435 120 _strong_roots_parity++;
duke@435 121 if (_strong_roots_parity == 3) _strong_roots_parity = 1;
duke@435 122 assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
duke@435 123 "Not in range.");
duke@435 124 }
duke@435 125
jrose@1424 126 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
jrose@1424 127 : MarkScope(activate)
jrose@1424 128 {
jrose@1424 129 if (_active) {
jrose@1424 130 outer->change_strong_roots_parity();
jrose@1424 131 }
jrose@1424 132 }
jrose@1424 133
jrose@1424 134 SharedHeap::StrongRootsScope::~StrongRootsScope() {
jrose@1424 135 // nothing particular
jrose@1424 136 }
jrose@1424 137
jrose@1424 138 void SharedHeap::process_strong_roots(bool activate_scope,
jrose@1424 139 bool collecting_perm_gen,
duke@435 140 ScanningOption so,
duke@435 141 OopClosure* roots,
jrose@1424 142 CodeBlobClosure* code_roots,
duke@435 143 OopsInGenClosure* perm_blk) {
jrose@1424 144 StrongRootsScope srs(this, activate_scope);
duke@435 145 // General strong roots.
jrose@1424 146 assert(_strong_roots_parity != 0, "must have called prologue code");
duke@435 147 if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
duke@435 148 Universe::oops_do(roots);
duke@435 149 // Consider perm-gen discovered lists to be strong.
duke@435 150 perm_gen()->ref_processor()->weak_oops_do(roots);
duke@435 151 }
duke@435 152 // Global (strong) JNI handles
duke@435 153 if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
duke@435 154 JNIHandles::oops_do(roots);
duke@435 155 // All threads execute this; the individual threads are task groups.
duke@435 156 if (ParallelGCThreads > 0) {
jrose@1424 157 Threads::possibly_parallel_oops_do(roots, code_roots);
duke@435 158 } else {
jrose@1424 159 Threads::oops_do(roots, code_roots);
duke@435 160 }
duke@435 161 if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
duke@435 162 ObjectSynchronizer::oops_do(roots);
duke@435 163 if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
duke@435 164 FlatProfiler::oops_do(roots);
duke@435 165 if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
duke@435 166 Management::oops_do(roots);
duke@435 167 if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
duke@435 168 JvmtiExport::oops_do(roots);
duke@435 169
duke@435 170 if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
duke@435 171 if (so & SO_AllClasses) {
duke@435 172 SystemDictionary::oops_do(roots);
ysr@2825 173 } else if (so & SO_SystemClasses) {
ysr@2825 174 SystemDictionary::always_strong_oops_do(roots);
ysr@2825 175 }
duke@435 176 }
duke@435 177
duke@435 178 if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
jcoomes@2661 179 if (so & SO_Strings || (!collecting_perm_gen && !JavaObjectsInPerm)) {
jcoomes@2661 180 StringTable::oops_do(roots);
jcoomes@2661 181 }
jcoomes@2661 182 if (JavaObjectsInPerm) {
jcoomes@2661 183 // Verify the string table contents are in the perm gen
jcoomes@2661 184 NOT_PRODUCT(StringTable::oops_do(&assert_is_perm_closure));
jcoomes@2661 185 }
duke@435 186 }
duke@435 187
duke@435 188 if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
jrose@1424 189 if (so & SO_CodeCache) {
jrose@1424 190 // (Currently, CMSCollector uses this to do intermediate-strength collections.)
jrose@1424 191 assert(collecting_perm_gen, "scanning all of code cache");
jrose@1424 192 assert(code_roots != NULL, "must supply closure for code cache");
jrose@1424 193 if (code_roots != NULL) {
jrose@1424 194 CodeCache::blobs_do(code_roots);
jrose@1424 195 }
jrose@1424 196 } else if (so & (SO_SystemClasses|SO_AllClasses)) {
jrose@1424 197 if (!collecting_perm_gen) {
jrose@1424 198 // If we are collecting from class statics, but we are not going to
jrose@1424 199 // visit all of the CodeCache, collect from the non-perm roots if any.
jrose@1424 200 // This makes the code cache function temporarily as a source of strong
jrose@1424 201 // roots for oops, until the next major collection.
jrose@1424 202 //
jrose@1424 203 // If collecting_perm_gen is true, we require that this phase will call
jrose@1424 204 // CodeCache::do_unloading. This will kill off nmethods with expired
jrose@1424 205 // weak references, such as stale invokedynamic targets.
jrose@1424 206 CodeCache::scavenge_root_nmethods_do(code_roots);
jrose@1424 207 }
jrose@1424 208 }
jmasa@2909 209 // Verify that the code cache contents are not subject to
jmasa@2909 210 // movement by a scavenging collection.
jmasa@2909 211 DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
jmasa@2909 212 DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
duke@435 213 }
duke@435 214
duke@435 215 if (!collecting_perm_gen) {
duke@435 216 // All threads perform this; coordination is handled internally.
duke@435 217
duke@435 218 rem_set()->younger_refs_iterate(perm_gen(), perm_blk);
duke@435 219 }
duke@435 220 _process_strong_tasks->all_tasks_completed();
duke@435 221 }
duke@435 222
duke@435 223 class AlwaysTrueClosure: public BoolObjectClosure {
duke@435 224 public:
duke@435 225 void do_object(oop p) { ShouldNotReachHere(); }
duke@435 226 bool do_object_b(oop p) { return true; }
duke@435 227 };
duke@435 228 static AlwaysTrueClosure always_true;
duke@435 229
duke@435 230 class SkipAdjustingSharedStrings: public OopClosure {
duke@435 231 OopClosure* _clo;
duke@435 232 public:
duke@435 233 SkipAdjustingSharedStrings(OopClosure* clo) : _clo(clo) {}
duke@435 234
coleenp@548 235 virtual void do_oop(oop* p) {
duke@435 236 oop o = (*p);
duke@435 237 if (!o->is_shared_readwrite()) {
duke@435 238 _clo->do_oop(p);
duke@435 239 }
duke@435 240 }
coleenp@548 241 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
duke@435 242 };
duke@435 243
duke@435 244 // Unmarked shared Strings in the StringTable (which got there due to
duke@435 245 // being in the constant pools of as-yet unloaded shared classes) were
duke@435 246 // not marked and therefore did not have their mark words preserved.
duke@435 247 // These entries are also deliberately not purged from the string
duke@435 248 // table during unloading of unmarked strings. If an identity hash
duke@435 249 // code was computed for any of these objects, it will not have been
duke@435 250 // cleared to zero during the forwarding process or by the
duke@435 251 // RecursiveAdjustSharedObjectClosure, and will be confused by the
duke@435 252 // adjusting process as a forwarding pointer. We need to skip
duke@435 253 // forwarding StringTable entries which contain unmarked shared
duke@435 254 // Strings. Actually, since shared strings won't be moving, we can
duke@435 255 // just skip adjusting any shared entries in the string table.
duke@435 256
duke@435 257 void SharedHeap::process_weak_roots(OopClosure* root_closure,
jrose@1424 258 CodeBlobClosure* code_roots,
duke@435 259 OopClosure* non_root_closure) {
duke@435 260 // Global (weak) JNI handles
duke@435 261 JNIHandles::weak_oops_do(&always_true, root_closure);
duke@435 262
jrose@1424 263 CodeCache::blobs_do(code_roots);
duke@435 264 if (UseSharedSpaces && !DumpSharedSpaces) {
duke@435 265 SkipAdjustingSharedStrings skip_closure(root_closure);
duke@435 266 StringTable::oops_do(&skip_closure);
duke@435 267 } else {
duke@435 268 StringTable::oops_do(root_closure);
duke@435 269 }
duke@435 270 }
duke@435 271
duke@435 272 void SharedHeap::set_barrier_set(BarrierSet* bs) {
duke@435 273 _barrier_set = bs;
duke@435 274 // Cached barrier set for fast access in oops
duke@435 275 oopDesc::set_bs(bs);
duke@435 276 }
duke@435 277
duke@435 278 void SharedHeap::post_initialize() {
duke@435 279 ref_processing_init();
duke@435 280 }
duke@435 281
duke@435 282 void SharedHeap::ref_processing_init() {
duke@435 283 perm_gen()->ref_processor_init();
duke@435 284 }
duke@435 285
duke@435 286 // Some utilities.
ysr@777 287 void SharedHeap::print_size_transition(outputStream* out,
ysr@777 288 size_t bytes_before,
duke@435 289 size_t bytes_after,
duke@435 290 size_t capacity) {
ysr@777 291 out->print(" %d%s->%d%s(%d%s)",
duke@435 292 byte_size_in_proper_unit(bytes_before),
duke@435 293 proper_unit_for_byte_size(bytes_before),
duke@435 294 byte_size_in_proper_unit(bytes_after),
duke@435 295 proper_unit_for_byte_size(bytes_after),
duke@435 296 byte_size_in_proper_unit(capacity),
duke@435 297 proper_unit_for_byte_size(capacity));
duke@435 298 }

mercurial