src/share/vm/memory/iterator.hpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/memory/iterator.hpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,348 @@
     1.4 +/*
     1.5 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef SHARE_VM_MEMORY_ITERATOR_HPP
    1.29 +#define SHARE_VM_MEMORY_ITERATOR_HPP
    1.30 +
    1.31 +#include "memory/allocation.hpp"
    1.32 +#include "memory/memRegion.hpp"
    1.33 +#include "runtime/prefetch.hpp"
    1.34 +#include "utilities/top.hpp"
    1.35 +
    1.36 +// The following classes are C++ `closures` for iterating over objects, roots and spaces
    1.37 +
    1.38 +class CodeBlob;
    1.39 +class nmethod;
    1.40 +class ReferenceProcessor;
    1.41 +class DataLayout;
    1.42 +class KlassClosure;
    1.43 +class ClassLoaderData;
    1.44 +
    1.45 +// Closure provides abortability.
    1.46 +
    1.47 +class Closure : public StackObj {
    1.48 + protected:
    1.49 +  bool _abort;
    1.50 +  void set_abort() { _abort = true; }
    1.51 + public:
    1.52 +  Closure() : _abort(false) {}
    1.53 +  // A subtype can use this mechanism to indicate to some iterator mapping
    1.54 +  // functions that the iteration should cease.
    1.55 +  bool abort() { return _abort; }
    1.56 +  void clear_abort() { _abort = false; }
    1.57 +};
    1.58 +
    1.59 +// OopClosure is used for iterating through references to Java objects.
    1.60 +
    1.61 +class OopClosure : public Closure {
    1.62 + public:
    1.63 +  virtual void do_oop(oop* o) = 0;
    1.64 +  virtual void do_oop_v(oop* o) { do_oop(o); }
    1.65 +  virtual void do_oop(narrowOop* o) = 0;
    1.66 +  virtual void do_oop_v(narrowOop* o) { do_oop(o); }
    1.67 +};
    1.68 +
    1.69 +// ExtendedOopClosure adds extra code to be run during oop iterations.
    1.70 +// This is needed by the GC and is extracted to a separate type to not
    1.71 +// pollute the OopClosure interface.
    1.72 +class ExtendedOopClosure : public OopClosure {
    1.73 + public:
    1.74 +  ReferenceProcessor* _ref_processor;
    1.75 +  ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
    1.76 +  ExtendedOopClosure() : OopClosure(), _ref_processor(NULL) { }
    1.77 +
    1.78 +  // If the do_metadata functions return "true",
    1.79 +  // we invoke the following when running oop_iterate():
    1.80 +  //
    1.81 +  // 1) do_klass on the header klass pointer.
    1.82 +  // 2) do_klass on the klass pointer in the mirrors.
    1.83 +  // 3) do_class_loader_data on the class loader data in class loaders.
    1.84 +  //
    1.85 +  // The virtual (without suffix) and the non-virtual (with _nv suffix) need
    1.86 +  // to be updated together, or else the devirtualization will break.
    1.87 +  //
    1.88 +  // Providing default implementations of the _nv functions unfortunately
    1.89 +  // removes the compile-time safeness, but reduces the clutter for the
    1.90 +  // ExtendedOopClosures that don't need to walk the metadata. Currently,
    1.91 +  // only CMS needs these.
    1.92 +
    1.93 +  virtual bool do_metadata() { return do_metadata_nv(); }
    1.94 +  bool do_metadata_v()       { return do_metadata(); }
    1.95 +  bool do_metadata_nv()      { return false; }
    1.96 +
    1.97 +  virtual void do_klass(Klass* k)   { do_klass_nv(k); }
    1.98 +  void do_klass_v(Klass* k)         { do_klass(k); }
    1.99 +  void do_klass_nv(Klass* k)        { ShouldNotReachHere(); }
   1.100 +
   1.101 +  virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
   1.102 +
   1.103 +  // Controls how prefetching is done for invocations of this closure.
   1.104 +  Prefetch::style prefetch_style() { // Note that this is non-virtual.
   1.105 +    return Prefetch::do_none;
   1.106 +  }
   1.107 +
   1.108 +  // True iff this closure may be safely applied more than once to an oop
   1.109 +  // location without an intervening "major reset" (like the end of a GC).
   1.110 +  virtual bool idempotent() { return false; }
   1.111 +  virtual bool apply_to_weak_ref_discovered_field() { return false; }
   1.112 +};
   1.113 +
   1.114 +// Wrapper closure only used to implement oop_iterate_no_header().
   1.115 +class NoHeaderExtendedOopClosure : public ExtendedOopClosure {
   1.116 +  OopClosure* _wrapped_closure;
   1.117 + public:
   1.118 +  NoHeaderExtendedOopClosure(OopClosure* cl) : _wrapped_closure(cl) {}
   1.119 +  // Warning: this calls the virtual version do_oop in the the wrapped closure.
   1.120 +  void do_oop_nv(oop* p)       { _wrapped_closure->do_oop(p); }
   1.121 +  void do_oop_nv(narrowOop* p) { _wrapped_closure->do_oop(p); }
   1.122 +
   1.123 +  void do_oop(oop* p)          { assert(false, "Only the _nv versions should be used");
   1.124 +                                 _wrapped_closure->do_oop(p); }
   1.125 +  void do_oop(narrowOop* p)    { assert(false, "Only the _nv versions should be used");
   1.126 +                                 _wrapped_closure->do_oop(p);}
   1.127 +};
   1.128 +
   1.129 +class KlassClosure : public Closure {
   1.130 + public:
   1.131 +  virtual void do_klass(Klass* k) = 0;
   1.132 +};
   1.133 +
   1.134 +class KlassToOopClosure : public KlassClosure {
   1.135 +  OopClosure* _oop_closure;
   1.136 + public:
   1.137 +  KlassToOopClosure(OopClosure* oop_closure) : _oop_closure(oop_closure) {}
   1.138 +  virtual void do_klass(Klass* k);
   1.139 +};
   1.140 +
   1.141 +class CLDToOopClosure {
   1.142 +  OopClosure* _oop_closure;
   1.143 +  KlassToOopClosure _klass_closure;
   1.144 +  bool _must_claim_cld;
   1.145 +
   1.146 + public:
   1.147 +  CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
   1.148 +      _oop_closure(oop_closure),
   1.149 +      _klass_closure(oop_closure),
   1.150 +      _must_claim_cld(must_claim_cld) {}
   1.151 +
   1.152 +  void do_cld(ClassLoaderData* cld);
   1.153 +};
   1.154 +
   1.155 +// ObjectClosure is used for iterating through an object space
   1.156 +
   1.157 +class ObjectClosure : public Closure {
   1.158 + public:
   1.159 +  // Called for each object.
   1.160 +  virtual void do_object(oop obj) = 0;
   1.161 +};
   1.162 +
   1.163 +
   1.164 +class BoolObjectClosure : public Closure {
   1.165 + public:
   1.166 +  virtual bool do_object_b(oop obj) = 0;
   1.167 +};
   1.168 +
   1.169 +// Applies an oop closure to all ref fields in objects iterated over in an
   1.170 +// object iteration.
   1.171 +class ObjectToOopClosure: public ObjectClosure {
   1.172 +  ExtendedOopClosure* _cl;
   1.173 +public:
   1.174 +  void do_object(oop obj);
   1.175 +  ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
   1.176 +};
   1.177 +
   1.178 +// A version of ObjectClosure with "memory" (see _previous_address below)
   1.179 +class UpwardsObjectClosure: public BoolObjectClosure {
   1.180 +  HeapWord* _previous_address;
   1.181 + public:
   1.182 +  UpwardsObjectClosure() : _previous_address(NULL) { }
   1.183 +  void set_previous(HeapWord* addr) { _previous_address = addr; }
   1.184 +  HeapWord* previous()              { return _previous_address; }
   1.185 +  // A return value of "true" can be used by the caller to decide
   1.186 +  // if this object's end should *NOT* be recorded in
   1.187 +  // _previous_address above.
   1.188 +  virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
   1.189 +};
   1.190 +
   1.191 +// A version of ObjectClosure that is expected to be robust
   1.192 +// in the face of possibly uninitialized objects.
   1.193 +class ObjectClosureCareful : public ObjectClosure {
   1.194 + public:
   1.195 +  virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0;
   1.196 +  virtual size_t do_object_careful(oop p) = 0;
   1.197 +};
   1.198 +
   1.199 +// The following are used in CompactibleFreeListSpace and
   1.200 +// ConcurrentMarkSweepGeneration.
   1.201 +
   1.202 +// Blk closure (abstract class)
   1.203 +class BlkClosure : public StackObj {
   1.204 + public:
   1.205 +  virtual size_t do_blk(HeapWord* addr) = 0;
   1.206 +};
   1.207 +
   1.208 +// A version of BlkClosure that is expected to be robust
   1.209 +// in the face of possibly uninitialized objects.
   1.210 +class BlkClosureCareful : public BlkClosure {
   1.211 + public:
   1.212 +  size_t do_blk(HeapWord* addr) {
   1.213 +    guarantee(false, "call do_blk_careful instead");
   1.214 +    return 0;
   1.215 +  }
   1.216 +  virtual size_t do_blk_careful(HeapWord* addr) = 0;
   1.217 +};
   1.218 +
   1.219 +// SpaceClosure is used for iterating over spaces
   1.220 +
   1.221 +class Space;
   1.222 +class CompactibleSpace;
   1.223 +
   1.224 +class SpaceClosure : public StackObj {
   1.225 + public:
   1.226 +  // Called for each space
   1.227 +  virtual void do_space(Space* s) = 0;
   1.228 +};
   1.229 +
   1.230 +class CompactibleSpaceClosure : public StackObj {
   1.231 + public:
   1.232 +  // Called for each compactible space
   1.233 +  virtual void do_space(CompactibleSpace* s) = 0;
   1.234 +};
   1.235 +
   1.236 +
   1.237 +// CodeBlobClosure is used for iterating through code blobs
   1.238 +// in the code cache or on thread stacks
   1.239 +
   1.240 +class CodeBlobClosure : public Closure {
   1.241 + public:
   1.242 +  // Called for each code blob.
   1.243 +  virtual void do_code_blob(CodeBlob* cb) = 0;
   1.244 +};
   1.245 +
   1.246 +
   1.247 +class MarkingCodeBlobClosure : public CodeBlobClosure {
   1.248 + public:
   1.249 +  // Called for each code blob, but at most once per unique blob.
   1.250 +  virtual void do_newly_marked_nmethod(nmethod* nm) = 0;
   1.251 +
   1.252 +  virtual void do_code_blob(CodeBlob* cb);
   1.253 +    // = { if (!nmethod(cb)->test_set_oops_do_mark())  do_newly_marked_nmethod(cb); }
   1.254 +
   1.255 +  class MarkScope : public StackObj {
   1.256 +  protected:
   1.257 +    bool _active;
   1.258 +  public:
   1.259 +    MarkScope(bool activate = true);
   1.260 +      // = { if (active) nmethod::oops_do_marking_prologue(); }
   1.261 +    ~MarkScope();
   1.262 +      // = { if (active) nmethod::oops_do_marking_epilogue(); }
   1.263 +  };
   1.264 +};
   1.265 +
   1.266 +
   1.267 +// Applies an oop closure to all ref fields in code blobs
   1.268 +// iterated over in an object iteration.
   1.269 +class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
   1.270 +  OopClosure* _cl;
   1.271 +  bool _do_marking;
   1.272 +public:
   1.273 +  virtual void do_newly_marked_nmethod(nmethod* cb);
   1.274 +    // = { cb->oops_do(_cl); }
   1.275 +  virtual void do_code_blob(CodeBlob* cb);
   1.276 +    // = { if (_do_marking)  super::do_code_blob(cb); else cb->oops_do(_cl); }
   1.277 +  CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
   1.278 +    : _cl(cl), _do_marking(do_marking) {}
   1.279 +};
   1.280 +
   1.281 +
   1.282 +
   1.283 +// MonitorClosure is used for iterating over monitors in the monitors cache
   1.284 +
   1.285 +class ObjectMonitor;
   1.286 +
   1.287 +class MonitorClosure : public StackObj {
   1.288 + public:
   1.289 +  // called for each monitor in cache
   1.290 +  virtual void do_monitor(ObjectMonitor* m) = 0;
   1.291 +};
   1.292 +
   1.293 +// A closure that is applied without any arguments.
   1.294 +class VoidClosure : public StackObj {
   1.295 + public:
   1.296 +  // I would have liked to declare this a pure virtual, but that breaks
   1.297 +  // in mysterious ways, for unknown reasons.
   1.298 +  virtual void do_void();
   1.299 +};
   1.300 +
   1.301 +
   1.302 +// YieldClosure is intended for use by iteration loops
   1.303 +// to incrementalize their work, allowing interleaving
   1.304 +// of an interruptable task so as to allow other
   1.305 +// threads to run (which may not otherwise be able to access
   1.306 +// exclusive resources, for instance). Additionally, the
   1.307 +// closure also allows for aborting an ongoing iteration
   1.308 +// by means of checking the return value from the polling
   1.309 +// call.
   1.310 +class YieldClosure : public StackObj {
   1.311 +  public:
   1.312 +   virtual bool should_return() = 0;
   1.313 +};
   1.314 +
   1.315 +// Abstract closure for serializing data (read or write).
   1.316 +
   1.317 +class SerializeClosure : public Closure {
   1.318 +public:
   1.319 +  // Return bool indicating whether closure implements read or write.
   1.320 +  virtual bool reading() const = 0;
   1.321 +
   1.322 +  // Read/write the void pointer pointed to by p.
   1.323 +  virtual void do_ptr(void** p) = 0;
   1.324 +
   1.325 +  // Read/write the region specified.
   1.326 +  virtual void do_region(u_char* start, size_t size) = 0;
   1.327 +
   1.328 +  // Check/write the tag.  If reading, then compare the tag against
   1.329 +  // the passed in value and fail is they don't match.  This allows
   1.330 +  // for verification that sections of the serialized data are of the
   1.331 +  // correct length.
   1.332 +  virtual void do_tag(int tag) = 0;
   1.333 +};
   1.334 +
   1.335 +class SymbolClosure : public StackObj {
   1.336 + public:
   1.337 +  virtual void do_symbol(Symbol**) = 0;
   1.338 +
   1.339 +  // Clear LSB in symbol address; it can be set by CPSlot.
   1.340 +  static Symbol* load_symbol(Symbol** p) {
   1.341 +    return (Symbol*)(intptr_t(*p) & ~1);
   1.342 +  }
   1.343 +
   1.344 +  // Store symbol, adjusting new pointer if the original pointer was adjusted
   1.345 +  // (symbol references in constant pool slots have their LSB set to 1).
   1.346 +  static void store_symbol(Symbol** p, Symbol* sym) {
   1.347 +    *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1));
   1.348 +  }
   1.349 +};
   1.350 +
   1.351 +#endif // SHARE_VM_MEMORY_ITERATOR_HPP

mercurial