Mon, 07 Jul 2014 10:12:40 +0200
8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_MEMORY_ITERATOR_HPP
26 #define SHARE_VM_MEMORY_ITERATOR_HPP
28 #include "memory/allocation.hpp"
29 #include "memory/memRegion.hpp"
30 #include "runtime/prefetch.hpp"
31 #include "utilities/top.hpp"
33 // The following classes are C++ `closures` for iterating over objects, roots and spaces
35 class CodeBlob;
36 class nmethod;
37 class ReferenceProcessor;
38 class DataLayout;
39 class KlassClosure;
40 class ClassLoaderData;
42 // Closure provides abortability.
44 class Closure : public StackObj {
45 protected:
46 bool _abort;
47 void set_abort() { _abort = true; }
48 public:
49 Closure() : _abort(false) {}
50 // A subtype can use this mechanism to indicate to some iterator mapping
51 // functions that the iteration should cease.
52 bool abort() { return _abort; }
53 void clear_abort() { _abort = false; }
54 };
56 // OopClosure is used for iterating through references to Java objects.
58 class OopClosure : public Closure {
59 public:
60 virtual void do_oop(oop* o) = 0;
61 virtual void do_oop_v(oop* o) { do_oop(o); }
62 virtual void do_oop(narrowOop* o) = 0;
63 virtual void do_oop_v(narrowOop* o) { do_oop(o); }
64 };
66 // ExtendedOopClosure adds extra code to be run during oop iterations.
67 // This is needed by the GC and is extracted to a separate type to not
68 // pollute the OopClosure interface.
69 class ExtendedOopClosure : public OopClosure {
70 public:
71 ReferenceProcessor* _ref_processor;
72 ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
73 ExtendedOopClosure() : OopClosure(), _ref_processor(NULL) { }
75 // If the do_metadata functions return "true",
76 // we invoke the following when running oop_iterate():
77 //
78 // 1) do_klass on the header klass pointer.
79 // 2) do_klass on the klass pointer in the mirrors.
80 // 3) do_class_loader_data on the class loader data in class loaders.
81 //
82 // The virtual (without suffix) and the non-virtual (with _nv suffix) need
83 // to be updated together, or else the devirtualization will break.
84 //
85 // Providing default implementations of the _nv functions unfortunately
86 // removes the compile-time safeness, but reduces the clutter for the
87 // ExtendedOopClosures that don't need to walk the metadata.
88 // Currently, only CMS and G1 need these.
90 virtual bool do_metadata() { return do_metadata_nv(); }
91 bool do_metadata_v() { return do_metadata(); }
92 bool do_metadata_nv() { return false; }
94 virtual void do_klass(Klass* k) { do_klass_nv(k); }
95 void do_klass_v(Klass* k) { do_klass(k); }
96 void do_klass_nv(Klass* k) { ShouldNotReachHere(); }
98 virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
100 // Controls how prefetching is done for invocations of this closure.
101 Prefetch::style prefetch_style() { // Note that this is non-virtual.
102 return Prefetch::do_none;
103 }
105 // True iff this closure may be safely applied more than once to an oop
106 // location without an intervening "major reset" (like the end of a GC).
107 virtual bool idempotent() { return false; }
108 virtual bool apply_to_weak_ref_discovered_field() { return false; }
109 };
111 // Wrapper closure only used to implement oop_iterate_no_header().
112 class NoHeaderExtendedOopClosure : public ExtendedOopClosure {
113 OopClosure* _wrapped_closure;
114 public:
115 NoHeaderExtendedOopClosure(OopClosure* cl) : _wrapped_closure(cl) {}
116 // Warning: this calls the virtual version do_oop in the the wrapped closure.
117 void do_oop_nv(oop* p) { _wrapped_closure->do_oop(p); }
118 void do_oop_nv(narrowOop* p) { _wrapped_closure->do_oop(p); }
120 void do_oop(oop* p) { assert(false, "Only the _nv versions should be used");
121 _wrapped_closure->do_oop(p); }
122 void do_oop(narrowOop* p) { assert(false, "Only the _nv versions should be used");
123 _wrapped_closure->do_oop(p);}
124 };
126 class KlassClosure : public Closure {
127 public:
128 virtual void do_klass(Klass* k) = 0;
129 };
131 class CLDClosure : public Closure {
132 public:
133 virtual void do_cld(ClassLoaderData* cld) = 0;
134 };
136 class KlassToOopClosure : public KlassClosure {
137 friend class MetadataAwareOopClosure;
138 friend class MetadataAwareOopsInGenClosure;
140 OopClosure* _oop_closure;
142 // Used when _oop_closure couldn't be set in an initialization list.
143 void initialize(OopClosure* oop_closure) {
144 assert(_oop_closure == NULL, "Should only be called once");
145 _oop_closure = oop_closure;
146 }
148 public:
149 KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
151 virtual void do_klass(Klass* k);
152 };
154 class CLDToOopClosure : public CLDClosure {
155 OopClosure* _oop_closure;
156 KlassToOopClosure _klass_closure;
157 bool _must_claim_cld;
159 public:
160 CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
161 _oop_closure(oop_closure),
162 _klass_closure(oop_closure),
163 _must_claim_cld(must_claim_cld) {}
165 void do_cld(ClassLoaderData* cld);
166 };
168 class CLDToKlassAndOopClosure : public CLDClosure {
169 friend class SharedHeap;
170 friend class G1CollectedHeap;
171 protected:
172 OopClosure* _oop_closure;
173 KlassClosure* _klass_closure;
174 bool _must_claim_cld;
175 public:
176 CLDToKlassAndOopClosure(KlassClosure* klass_closure,
177 OopClosure* oop_closure,
178 bool must_claim_cld) :
179 _oop_closure(oop_closure),
180 _klass_closure(klass_closure),
181 _must_claim_cld(must_claim_cld) {}
182 void do_cld(ClassLoaderData* cld);
183 };
185 // The base class for all concurrent marking closures,
186 // that participates in class unloading.
187 // It's used to proxy through the metadata to the oops defined in them.
188 class MetadataAwareOopClosure: public ExtendedOopClosure {
189 KlassToOopClosure _klass_closure;
191 public:
192 MetadataAwareOopClosure() : ExtendedOopClosure() {
193 _klass_closure.initialize(this);
194 }
195 MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
196 _klass_closure.initialize(this);
197 }
199 virtual bool do_metadata() { return do_metadata_nv(); }
200 inline bool do_metadata_nv() { return true; }
202 virtual void do_klass(Klass* k);
203 void do_klass_nv(Klass* k);
205 virtual void do_class_loader_data(ClassLoaderData* cld);
206 };
208 // ObjectClosure is used for iterating through an object space
210 class ObjectClosure : public Closure {
211 public:
212 // Called for each object.
213 virtual void do_object(oop obj) = 0;
214 };
217 class BoolObjectClosure : public Closure {
218 public:
219 virtual bool do_object_b(oop obj) = 0;
220 };
222 // Applies an oop closure to all ref fields in objects iterated over in an
223 // object iteration.
224 class ObjectToOopClosure: public ObjectClosure {
225 ExtendedOopClosure* _cl;
226 public:
227 void do_object(oop obj);
228 ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
229 };
231 // A version of ObjectClosure that is expected to be robust
232 // in the face of possibly uninitialized objects.
233 class ObjectClosureCareful : public ObjectClosure {
234 public:
235 virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0;
236 virtual size_t do_object_careful(oop p) = 0;
237 };
239 // The following are used in CompactibleFreeListSpace and
240 // ConcurrentMarkSweepGeneration.
242 // Blk closure (abstract class)
243 class BlkClosure : public StackObj {
244 public:
245 virtual size_t do_blk(HeapWord* addr) = 0;
246 };
248 // A version of BlkClosure that is expected to be robust
249 // in the face of possibly uninitialized objects.
250 class BlkClosureCareful : public BlkClosure {
251 public:
252 size_t do_blk(HeapWord* addr) {
253 guarantee(false, "call do_blk_careful instead");
254 return 0;
255 }
256 virtual size_t do_blk_careful(HeapWord* addr) = 0;
257 };
259 // SpaceClosure is used for iterating over spaces
261 class Space;
262 class CompactibleSpace;
264 class SpaceClosure : public StackObj {
265 public:
266 // Called for each space
267 virtual void do_space(Space* s) = 0;
268 };
270 class CompactibleSpaceClosure : public StackObj {
271 public:
272 // Called for each compactible space
273 virtual void do_space(CompactibleSpace* s) = 0;
274 };
277 // CodeBlobClosure is used for iterating through code blobs
278 // in the code cache or on thread stacks
280 class CodeBlobClosure : public Closure {
281 public:
282 // Called for each code blob.
283 virtual void do_code_blob(CodeBlob* cb) = 0;
284 };
286 // Applies an oop closure to all ref fields in code blobs
287 // iterated over in an object iteration.
288 class CodeBlobToOopClosure : public CodeBlobClosure {
289 OopClosure* _cl;
290 bool _fix_relocations;
291 protected:
292 void do_nmethod(nmethod* nm);
293 public:
294 CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
295 virtual void do_code_blob(CodeBlob* cb);
297 const static bool FixRelocations = true;
298 };
300 class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
301 public:
302 MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {}
303 // Called for each code blob, but at most once per unique blob.
305 virtual void do_code_blob(CodeBlob* cb);
307 class MarkScope : public StackObj {
308 protected:
309 bool _active;
310 public:
311 MarkScope(bool activate = true);
312 // = { if (active) nmethod::oops_do_marking_prologue(); }
313 ~MarkScope();
314 // = { if (active) nmethod::oops_do_marking_epilogue(); }
315 };
316 };
318 // MonitorClosure is used for iterating over monitors in the monitors cache
320 class ObjectMonitor;
322 class MonitorClosure : public StackObj {
323 public:
324 // called for each monitor in cache
325 virtual void do_monitor(ObjectMonitor* m) = 0;
326 };
328 // A closure that is applied without any arguments.
329 class VoidClosure : public StackObj {
330 public:
331 // I would have liked to declare this a pure virtual, but that breaks
332 // in mysterious ways, for unknown reasons.
333 virtual void do_void();
334 };
337 // YieldClosure is intended for use by iteration loops
338 // to incrementalize their work, allowing interleaving
339 // of an interruptable task so as to allow other
340 // threads to run (which may not otherwise be able to access
341 // exclusive resources, for instance). Additionally, the
342 // closure also allows for aborting an ongoing iteration
343 // by means of checking the return value from the polling
344 // call.
345 class YieldClosure : public StackObj {
346 public:
347 virtual bool should_return() = 0;
348 };
350 // Abstract closure for serializing data (read or write).
352 class SerializeClosure : public Closure {
353 public:
354 // Return bool indicating whether closure implements read or write.
355 virtual bool reading() const = 0;
357 // Read/write the void pointer pointed to by p.
358 virtual void do_ptr(void** p) = 0;
360 // Read/write the region specified.
361 virtual void do_region(u_char* start, size_t size) = 0;
363 // Check/write the tag. If reading, then compare the tag against
364 // the passed in value and fail is they don't match. This allows
365 // for verification that sections of the serialized data are of the
366 // correct length.
367 virtual void do_tag(int tag) = 0;
368 };
370 class SymbolClosure : public StackObj {
371 public:
372 virtual void do_symbol(Symbol**) = 0;
374 // Clear LSB in symbol address; it can be set by CPSlot.
375 static Symbol* load_symbol(Symbol** p) {
376 return (Symbol*)(intptr_t(*p) & ~1);
377 }
379 // Store symbol, adjusting new pointer if the original pointer was adjusted
380 // (symbol references in constant pool slots have their LSB set to 1).
381 static void store_symbol(Symbol** p, Symbol* sym) {
382 *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1));
383 }
384 };
387 // Helper defines for ExtendOopClosure
389 #define if_do_metadata_checked(closure, nv_suffix) \
390 /* Make sure the non-virtual and the virtual versions match. */ \
391 assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
392 "Inconsistency in do_metadata"); \
393 if (closure->do_metadata##nv_suffix())
395 #define assert_should_ignore_metadata(closure, nv_suffix) \
396 assert(!closure->do_metadata##nv_suffix(), "Code to handle metadata is not implemented")
398 #endif // SHARE_VM_MEMORY_ITERATOR_HPP