Fri, 21 Feb 2014 10:01:20 +0100
8035393: Use CLDClosure instead of CLDToOopClosure in frame::oops_interpreted_do
Reviewed-by: tschatzl, coleenp
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_MEMORY_SPACE_HPP
26 #define SHARE_VM_MEMORY_SPACE_HPP
28 #include "memory/allocation.hpp"
29 #include "memory/blockOffsetTable.hpp"
30 #include "memory/cardTableModRefBS.hpp"
31 #include "memory/iterator.hpp"
32 #include "memory/memRegion.hpp"
33 #include "memory/watermark.hpp"
34 #include "oops/markOop.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "utilities/macros.hpp"
37 #include "utilities/workgroup.hpp"
39 // A space is an abstraction for the "storage units" backing
40 // up the generation abstraction. It includes specific
41 // implementations for keeping track of free and used space,
42 // for iterating over objects and free blocks, etc.
44 // Here's the Space hierarchy:
45 //
46 // - Space -- an asbtract base class describing a heap area
47 // - CompactibleSpace -- a space supporting compaction
48 // - CompactibleFreeListSpace -- (used for CMS generation)
49 // - ContiguousSpace -- a compactible space in which all free space
50 // is contiguous
51 // - EdenSpace -- contiguous space used as nursery
52 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
53 // - OffsetTableContigSpace -- contiguous space with a block offset array
54 // that allows "fast" block_start calls
55 // - TenuredSpace -- (used for TenuredGeneration)
57 // Forward decls.
58 class Space;
59 class BlockOffsetArray;
60 class BlockOffsetArrayContigSpace;
61 class Generation;
62 class CompactibleSpace;
63 class BlockOffsetTable;
64 class GenRemSet;
65 class CardTableRS;
66 class DirtyCardToOopClosure;
68 // An oop closure that is circumscribed by a filtering memory region.
69 class SpaceMemRegionOopsIterClosure: public ExtendedOopClosure {
70 private:
71 ExtendedOopClosure* _cl;
72 MemRegion _mr;
73 protected:
74 template <class T> void do_oop_work(T* p) {
75 if (_mr.contains(p)) {
76 _cl->do_oop(p);
77 }
78 }
79 public:
80 SpaceMemRegionOopsIterClosure(ExtendedOopClosure* cl, MemRegion mr):
81 _cl(cl), _mr(mr) {}
82 virtual void do_oop(oop* p);
83 virtual void do_oop(narrowOop* p);
84 virtual bool do_metadata() {
85 // _cl is of type ExtendedOopClosure instead of OopClosure, so that we can check this.
86 assert(!_cl->do_metadata(), "I've checked all call paths, this shouldn't happen.");
87 return false;
88 }
89 virtual void do_klass(Klass* k) { ShouldNotReachHere(); }
90 virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
91 };
93 // A Space describes a heap area. Class Space is an abstract
94 // base class.
95 //
96 // Space supports allocation, size computation and GC support is provided.
97 //
98 // Invariant: bottom() and end() are on page_size boundaries and
99 // bottom() <= top() <= end()
100 // top() is inclusive and end() is exclusive.
102 class Space: public CHeapObj<mtGC> {
103 friend class VMStructs;
104 protected:
105 HeapWord* _bottom;
106 HeapWord* _end;
108 // Used in support of save_marks()
109 HeapWord* _saved_mark_word;
111 MemRegionClosure* _preconsumptionDirtyCardClosure;
113 // A sequential tasks done structure. This supports
114 // parallel GC, where we have threads dynamically
115 // claiming sub-tasks from a larger parallel task.
116 SequentialSubTasksDone _par_seq_tasks;
118 Space():
119 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
121 public:
122 // Accessors
123 HeapWord* bottom() const { return _bottom; }
124 HeapWord* end() const { return _end; }
125 virtual void set_bottom(HeapWord* value) { _bottom = value; }
126 virtual void set_end(HeapWord* value) { _end = value; }
128 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
130 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
132 MemRegionClosure* preconsumptionDirtyCardClosure() const {
133 return _preconsumptionDirtyCardClosure;
134 }
135 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
136 _preconsumptionDirtyCardClosure = cl;
137 }
139 // Returns a subregion of the space containing all the objects in
140 // the space.
141 virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
143 // Returns a region that is guaranteed to contain (at least) all objects
144 // allocated at the time of the last call to "save_marks". If the space
145 // initializes its DirtyCardToOopClosure's specifying the "contig" option
146 // (that is, if the space is contiguous), then this region must contain only
147 // such objects: the memregion will be from the bottom of the region to the
148 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
149 // the space must distiguish between objects in the region allocated before
150 // and after the call to save marks.
151 virtual MemRegion used_region_at_save_marks() const {
152 return MemRegion(bottom(), saved_mark_word());
153 }
155 // Initialization.
156 // "initialize" should be called once on a space, before it is used for
157 // any purpose. The "mr" arguments gives the bounds of the space, and
158 // the "clear_space" argument should be true unless the memory in "mr" is
159 // known to be zeroed.
160 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
162 // The "clear" method must be called on a region that may have
163 // had allocation performed in it, but is now to be considered empty.
164 virtual void clear(bool mangle_space);
166 // For detecting GC bugs. Should only be called at GC boundaries, since
167 // some unused space may be used as scratch space during GC's.
168 // Default implementation does nothing. We also call this when expanding
169 // a space to satisfy an allocation request. See bug #4668531
170 virtual void mangle_unused_area() {}
171 virtual void mangle_unused_area_complete() {}
172 virtual void mangle_region(MemRegion mr) {}
174 // Testers
175 bool is_empty() const { return used() == 0; }
176 bool not_empty() const { return used() > 0; }
178 // Returns true iff the given the space contains the
179 // given address as part of an allocated object. For
180 // ceratin kinds of spaces, this might be a potentially
181 // expensive operation. To prevent performance problems
182 // on account of its inadvertent use in product jvm's,
183 // we restrict its use to assertion checks only.
184 virtual bool is_in(const void* p) const = 0;
186 // Returns true iff the given reserved memory of the space contains the
187 // given address.
188 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
190 // Returns true iff the given block is not allocated.
191 virtual bool is_free_block(const HeapWord* p) const = 0;
193 // Test whether p is double-aligned
194 static bool is_aligned(void* p) {
195 return ((intptr_t)p & (sizeof(double)-1)) == 0;
196 }
198 // Size computations. Sizes are in bytes.
199 size_t capacity() const { return byte_size(bottom(), end()); }
200 virtual size_t used() const = 0;
201 virtual size_t free() const = 0;
203 // Iterate over all the ref-containing fields of all objects in the
204 // space, calling "cl.do_oop" on each. Fields in objects allocated by
205 // applications of the closure are not included in the iteration.
206 virtual void oop_iterate(ExtendedOopClosure* cl);
208 // Same as above, restricted to the intersection of a memory region and
209 // the space. Fields in objects allocated by applications of the closure
210 // are not included in the iteration.
211 virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0;
213 // Iterate over all objects in the space, calling "cl.do_object" on
214 // each. Objects allocated by applications of the closure are not
215 // included in the iteration.
216 virtual void object_iterate(ObjectClosure* blk) = 0;
217 // Similar to object_iterate() except only iterates over
218 // objects whose internal references point to objects in the space.
219 virtual void safe_object_iterate(ObjectClosure* blk) = 0;
221 // Iterate over all objects that intersect with mr, calling "cl->do_object"
222 // on each. There is an exception to this: if this closure has already
223 // been invoked on an object, it may skip such objects in some cases. This is
224 // Most likely to happen in an "upwards" (ascending address) iteration of
225 // MemRegions.
226 virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
228 // Iterate over as many initialized objects in the space as possible,
229 // calling "cl.do_object_careful" on each. Return NULL if all objects
230 // in the space (at the start of the iteration) were iterated over.
231 // Return an address indicating the extent of the iteration in the
232 // event that the iteration had to return because of finding an
233 // uninitialized object in the space, or if the closure "cl"
234 // signalled early termination.
235 virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
236 virtual HeapWord* object_iterate_careful_m(MemRegion mr,
237 ObjectClosureCareful* cl);
239 // Create and return a new dirty card to oop closure. Can be
240 // overriden to return the appropriate type of closure
241 // depending on the type of space in which the closure will
242 // operate. ResourceArea allocated.
243 virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
244 CardTableModRefBS::PrecisionStyle precision,
245 HeapWord* boundary = NULL);
247 // If "p" is in the space, returns the address of the start of the
248 // "block" that contains "p". We say "block" instead of "object" since
249 // some heaps may not pack objects densely; a chunk may either be an
250 // object or a non-object. If "p" is not in the space, return NULL.
251 virtual HeapWord* block_start_const(const void* p) const = 0;
253 // The non-const version may have benevolent side effects on the data
254 // structure supporting these calls, possibly speeding up future calls.
255 // The default implementation, however, is simply to call the const
256 // version.
257 inline virtual HeapWord* block_start(const void* p);
259 // Requires "addr" to be the start of a chunk, and returns its size.
260 // "addr + size" is required to be the start of a new chunk, or the end
261 // of the active area of the heap.
262 virtual size_t block_size(const HeapWord* addr) const = 0;
264 // Requires "addr" to be the start of a block, and returns "TRUE" iff
265 // the block is an object.
266 virtual bool block_is_obj(const HeapWord* addr) const = 0;
268 // Requires "addr" to be the start of a block, and returns "TRUE" iff
269 // the block is an object and the object is alive.
270 virtual bool obj_is_alive(const HeapWord* addr) const;
272 // Allocation (return NULL if full). Assumes the caller has established
273 // mutually exclusive access to the space.
274 virtual HeapWord* allocate(size_t word_size) = 0;
276 // Allocation (return NULL if full). Enforces mutual exclusion internally.
277 virtual HeapWord* par_allocate(size_t word_size) = 0;
279 // Returns true if this object has been allocated since a
280 // generation's "save_marks" call.
281 virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
283 // Mark-sweep-compact support: all spaces can update pointers to objects
284 // moving as a part of compaction.
285 virtual void adjust_pointers();
287 // PrintHeapAtGC support
288 virtual void print() const;
289 virtual void print_on(outputStream* st) const;
290 virtual void print_short() const;
291 virtual void print_short_on(outputStream* st) const;
294 // Accessor for parallel sequential tasks.
295 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
297 // IF "this" is a ContiguousSpace, return it, else return NULL.
298 virtual ContiguousSpace* toContiguousSpace() {
299 return NULL;
300 }
302 // Debugging
303 virtual void verify() const = 0;
304 };
306 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
307 // OopClosure to (the addresses of) all the ref-containing fields that could
308 // be modified by virtue of the given MemRegion being dirty. (Note that
309 // because of the imprecise nature of the write barrier, this may iterate
310 // over oops beyond the region.)
311 // This base type for dirty card to oop closures handles memory regions
312 // in non-contiguous spaces with no boundaries, and should be sub-classed
313 // to support other space types. See ContiguousDCTOC for a sub-class
314 // that works with ContiguousSpaces.
316 class DirtyCardToOopClosure: public MemRegionClosureRO {
317 protected:
318 ExtendedOopClosure* _cl;
319 Space* _sp;
320 CardTableModRefBS::PrecisionStyle _precision;
321 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
322 // pointing below boundary.
323 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
324 // a downwards traversal; this is the
325 // lowest location already done (or,
326 // alternatively, the lowest address that
327 // shouldn't be done again. NULL means infinity.)
328 NOT_PRODUCT(HeapWord* _last_bottom;)
329 NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
331 // Get the actual top of the area on which the closure will
332 // operate, given where the top is assumed to be (the end of the
333 // memory region passed to do_MemRegion) and where the object
334 // at the top is assumed to start. For example, an object may
335 // start at the top but actually extend past the assumed top,
336 // in which case the top becomes the end of the object.
337 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
339 // Walk the given memory region from bottom to (actual) top
340 // looking for objects and applying the oop closure (_cl) to
341 // them. The base implementation of this treats the area as
342 // blocks, where a block may or may not be an object. Sub-
343 // classes should override this to provide more accurate
344 // or possibly more efficient walking.
345 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
347 public:
348 DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
349 CardTableModRefBS::PrecisionStyle precision,
350 HeapWord* boundary) :
351 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
352 _min_done(NULL) {
353 NOT_PRODUCT(_last_bottom = NULL);
354 NOT_PRODUCT(_last_explicit_min_done = NULL);
355 }
357 void do_MemRegion(MemRegion mr);
359 void set_min_done(HeapWord* min_done) {
360 _min_done = min_done;
361 NOT_PRODUCT(_last_explicit_min_done = _min_done);
362 }
363 #ifndef PRODUCT
364 void set_last_bottom(HeapWord* last_bottom) {
365 _last_bottom = last_bottom;
366 }
367 #endif
368 };
370 // A structure to represent a point at which objects are being copied
371 // during compaction.
372 class CompactPoint : public StackObj {
373 public:
374 Generation* gen;
375 CompactibleSpace* space;
376 HeapWord* threshold;
377 CompactPoint(Generation* _gen, CompactibleSpace* _space,
378 HeapWord* _threshold) :
379 gen(_gen), space(_space), threshold(_threshold) {}
380 };
383 // A space that supports compaction operations. This is usually, but not
384 // necessarily, a space that is normally contiguous. But, for example, a
385 // free-list-based space whose normal collection is a mark-sweep without
386 // compaction could still support compaction in full GC's.
388 class CompactibleSpace: public Space {
389 friend class VMStructs;
390 friend class CompactibleFreeListSpace;
391 private:
392 HeapWord* _compaction_top;
393 CompactibleSpace* _next_compaction_space;
395 public:
396 CompactibleSpace() :
397 _compaction_top(NULL), _next_compaction_space(NULL) {}
399 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
400 virtual void clear(bool mangle_space);
402 // Used temporarily during a compaction phase to hold the value
403 // top should have when compaction is complete.
404 HeapWord* compaction_top() const { return _compaction_top; }
406 void set_compaction_top(HeapWord* value) {
407 assert(value == NULL || (value >= bottom() && value <= end()),
408 "should point inside space");
409 _compaction_top = value;
410 }
412 // Perform operations on the space needed after a compaction
413 // has been performed.
414 virtual void reset_after_compaction() {}
416 // Returns the next space (in the current generation) to be compacted in
417 // the global compaction order. Also is used to select the next
418 // space into which to compact.
420 virtual CompactibleSpace* next_compaction_space() const {
421 return _next_compaction_space;
422 }
424 void set_next_compaction_space(CompactibleSpace* csp) {
425 _next_compaction_space = csp;
426 }
428 // MarkSweep support phase2
430 // Start the process of compaction of the current space: compute
431 // post-compaction addresses, and insert forwarding pointers. The fields
432 // "cp->gen" and "cp->compaction_space" are the generation and space into
433 // which we are currently compacting. This call updates "cp" as necessary,
434 // and leaves the "compaction_top" of the final value of
435 // "cp->compaction_space" up-to-date. Offset tables may be updated in
436 // this phase as if the final copy had occurred; if so, "cp->threshold"
437 // indicates when the next such action should be taken.
438 virtual void prepare_for_compaction(CompactPoint* cp);
439 // MarkSweep support phase3
440 virtual void adjust_pointers();
441 // MarkSweep support phase4
442 virtual void compact();
444 // The maximum percentage of objects that can be dead in the compacted
445 // live part of a compacted space ("deadwood" support.)
446 virtual size_t allowed_dead_ratio() const { return 0; };
448 // Some contiguous spaces may maintain some data structures that should
449 // be updated whenever an allocation crosses a boundary. This function
450 // returns the first such boundary.
451 // (The default implementation returns the end of the space, so the
452 // boundary is never crossed.)
453 virtual HeapWord* initialize_threshold() { return end(); }
455 // "q" is an object of the given "size" that should be forwarded;
456 // "cp" names the generation ("gen") and containing "this" (which must
457 // also equal "cp->space"). "compact_top" is where in "this" the
458 // next object should be forwarded to. If there is room in "this" for
459 // the object, insert an appropriate forwarding pointer in "q".
460 // If not, go to the next compaction space (there must
461 // be one, since compaction must succeed -- we go to the first space of
462 // the previous generation if necessary, updating "cp"), reset compact_top
463 // and then forward. In either case, returns the new value of "compact_top".
464 // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
465 // function of the then-current compaction space, and updates "cp->threshold
466 // accordingly".
467 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
468 HeapWord* compact_top);
470 // Return a size with adjusments as required of the space.
471 virtual size_t adjust_object_size_v(size_t size) const { return size; }
473 protected:
474 // Used during compaction.
475 HeapWord* _first_dead;
476 HeapWord* _end_of_live;
478 // Minimum size of a free block.
479 virtual size_t minimum_free_block_size() const = 0;
481 // This the function is invoked when an allocation of an object covering
482 // "start" to "end occurs crosses the threshold; returns the next
483 // threshold. (The default implementation does nothing.)
484 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
485 return end();
486 }
488 // Requires "allowed_deadspace_words > 0", that "q" is the start of a
489 // free block of the given "word_len", and that "q", were it an object,
490 // would not move if forwared. If the size allows, fill the free
491 // block with an object, to prevent excessive compaction. Returns "true"
492 // iff the free region was made deadspace, and modifies
493 // "allowed_deadspace_words" to reflect the number of available deadspace
494 // words remaining after this operation.
495 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
496 size_t word_len);
497 };
499 class GenSpaceMangler;
501 // A space in which the free area is contiguous. It therefore supports
502 // faster allocation, and compaction.
503 class ContiguousSpace: public CompactibleSpace {
504 friend class OneContigSpaceCardGeneration;
505 friend class VMStructs;
506 protected:
507 HeapWord* _top;
508 HeapWord* _concurrent_iteration_safe_limit;
509 // A helper for mangling the unused area of the space in debug builds.
510 GenSpaceMangler* _mangler;
512 GenSpaceMangler* mangler() { return _mangler; }
514 // Allocation helpers (return NULL if full).
515 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
516 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
518 public:
519 ContiguousSpace();
520 ~ContiguousSpace();
522 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
523 virtual void clear(bool mangle_space);
525 // Accessors
526 HeapWord* top() const { return _top; }
527 void set_top(HeapWord* value) { _top = value; }
529 virtual void set_saved_mark() { _saved_mark_word = top(); }
530 void reset_saved_mark() { _saved_mark_word = bottom(); }
532 WaterMark bottom_mark() { return WaterMark(this, bottom()); }
533 WaterMark top_mark() { return WaterMark(this, top()); }
534 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
535 bool saved_mark_at_top() const { return saved_mark_word() == top(); }
537 // In debug mode mangle (write it with a particular bit
538 // pattern) the unused part of a space.
540 // Used to save the an address in a space for later use during mangling.
541 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
542 // Used to save the space's current top for later use during mangling.
543 void set_top_for_allocations() PRODUCT_RETURN;
545 // Mangle regions in the space from the current top up to the
546 // previously mangled part of the space.
547 void mangle_unused_area() PRODUCT_RETURN;
548 // Mangle [top, end)
549 void mangle_unused_area_complete() PRODUCT_RETURN;
550 // Mangle the given MemRegion.
551 void mangle_region(MemRegion mr) PRODUCT_RETURN;
553 // Do some sparse checking on the area that should have been mangled.
554 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
555 // Check the complete area that should have been mangled.
556 // This code may be NULL depending on the macro DEBUG_MANGLING.
557 void check_mangled_unused_area_complete() PRODUCT_RETURN;
559 // Size computations: sizes in bytes.
560 size_t capacity() const { return byte_size(bottom(), end()); }
561 size_t used() const { return byte_size(bottom(), top()); }
562 size_t free() const { return byte_size(top(), end()); }
564 // Override from space.
565 bool is_in(const void* p) const;
567 virtual bool is_free_block(const HeapWord* p) const;
569 // In a contiguous space we have a more obvious bound on what parts
570 // contain objects.
571 MemRegion used_region() const { return MemRegion(bottom(), top()); }
573 MemRegion used_region_at_save_marks() const {
574 return MemRegion(bottom(), saved_mark_word());
575 }
577 // Allocation (return NULL if full)
578 virtual HeapWord* allocate(size_t word_size);
579 virtual HeapWord* par_allocate(size_t word_size);
581 virtual bool obj_allocated_since_save_marks(const oop obj) const {
582 return (HeapWord*)obj >= saved_mark_word();
583 }
585 // Iteration
586 void oop_iterate(ExtendedOopClosure* cl);
587 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
588 void object_iterate(ObjectClosure* blk);
589 // For contiguous spaces this method will iterate safely over objects
590 // in the space (i.e., between bottom and top) when at a safepoint.
591 void safe_object_iterate(ObjectClosure* blk);
592 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
593 // iterates on objects up to the safe limit
594 HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
595 HeapWord* concurrent_iteration_safe_limit() {
596 assert(_concurrent_iteration_safe_limit <= top(),
597 "_concurrent_iteration_safe_limit update missed");
598 return _concurrent_iteration_safe_limit;
599 }
600 // changes the safe limit, all objects from bottom() to the new
601 // limit should be properly initialized
602 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
603 assert(new_limit <= top(), "uninitialized objects in the safe range");
604 _concurrent_iteration_safe_limit = new_limit;
605 }
608 #if INCLUDE_ALL_GCS
609 // In support of parallel oop_iterate.
610 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
611 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
613 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
614 #undef ContigSpace_PAR_OOP_ITERATE_DECL
615 #endif // INCLUDE_ALL_GCS
617 // Compaction support
618 virtual void reset_after_compaction() {
619 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
620 set_top(compaction_top());
621 // set new iteration safe limit
622 set_concurrent_iteration_safe_limit(compaction_top());
623 }
624 virtual size_t minimum_free_block_size() const { return 0; }
626 // Override.
627 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
628 CardTableModRefBS::PrecisionStyle precision,
629 HeapWord* boundary = NULL);
631 // Apply "blk->do_oop" to the addresses of all reference fields in objects
632 // starting with the _saved_mark_word, which was noted during a generation's
633 // save_marks and is required to denote the head of an object.
634 // Fields in objects allocated by applications of the closure
635 // *are* included in the iteration.
636 // Updates _saved_mark_word to point to just after the last object
637 // iterated over.
638 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
639 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
641 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
642 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
644 // Same as object_iterate, but starting from "mark", which is required
645 // to denote the start of an object. Objects allocated by
646 // applications of the closure *are* included in the iteration.
647 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
649 // Very inefficient implementation.
650 virtual HeapWord* block_start_const(const void* p) const;
651 size_t block_size(const HeapWord* p) const;
652 // If a block is in the allocated area, it is an object.
653 bool block_is_obj(const HeapWord* p) const { return p < top(); }
655 // Addresses for inlined allocation
656 HeapWord** top_addr() { return &_top; }
657 HeapWord** end_addr() { return &_end; }
659 // Overrides for more efficient compaction support.
660 void prepare_for_compaction(CompactPoint* cp);
662 // PrintHeapAtGC support.
663 virtual void print_on(outputStream* st) const;
665 // Checked dynamic downcasts.
666 virtual ContiguousSpace* toContiguousSpace() {
667 return this;
668 }
670 // Debugging
671 virtual void verify() const;
673 // Used to increase collection frequency. "factor" of 0 means entire
674 // space.
675 void allocate_temporary_filler(int factor);
677 };
680 // A dirty card to oop closure that does filtering.
681 // It knows how to filter out objects that are outside of the _boundary.
682 class Filtering_DCTOC : public DirtyCardToOopClosure {
683 protected:
684 // Override.
685 void walk_mem_region(MemRegion mr,
686 HeapWord* bottom, HeapWord* top);
688 // Walk the given memory region, from bottom to top, applying
689 // the given oop closure to (possibly) all objects found. The
690 // given oop closure may or may not be the same as the oop
691 // closure with which this closure was created, as it may
692 // be a filtering closure which makes use of the _boundary.
693 // We offer two signatures, so the FilteringClosure static type is
694 // apparent.
695 virtual void walk_mem_region_with_cl(MemRegion mr,
696 HeapWord* bottom, HeapWord* top,
697 ExtendedOopClosure* cl) = 0;
698 virtual void walk_mem_region_with_cl(MemRegion mr,
699 HeapWord* bottom, HeapWord* top,
700 FilteringClosure* cl) = 0;
702 public:
703 Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl,
704 CardTableModRefBS::PrecisionStyle precision,
705 HeapWord* boundary) :
706 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
707 };
709 // A dirty card to oop closure for contiguous spaces
710 // (ContiguousSpace and sub-classes).
711 // It is a FilteringClosure, as defined above, and it knows:
712 //
713 // 1. That the actual top of any area in a memory region
714 // contained by the space is bounded by the end of the contiguous
715 // region of the space.
716 // 2. That the space is really made up of objects and not just
717 // blocks.
719 class ContiguousSpaceDCTOC : public Filtering_DCTOC {
720 protected:
721 // Overrides.
722 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
724 virtual void walk_mem_region_with_cl(MemRegion mr,
725 HeapWord* bottom, HeapWord* top,
726 ExtendedOopClosure* cl);
727 virtual void walk_mem_region_with_cl(MemRegion mr,
728 HeapWord* bottom, HeapWord* top,
729 FilteringClosure* cl);
731 public:
732 ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
733 CardTableModRefBS::PrecisionStyle precision,
734 HeapWord* boundary) :
735 Filtering_DCTOC(sp, cl, precision, boundary)
736 {}
737 };
740 // Class EdenSpace describes eden-space in new generation.
742 class DefNewGeneration;
744 class EdenSpace : public ContiguousSpace {
745 friend class VMStructs;
746 private:
747 DefNewGeneration* _gen;
749 // _soft_end is used as a soft limit on allocation. As soft limits are
750 // reached, the slow-path allocation code can invoke other actions and then
751 // adjust _soft_end up to a new soft limit or to end().
752 HeapWord* _soft_end;
754 public:
755 EdenSpace(DefNewGeneration* gen) :
756 _gen(gen), _soft_end(NULL) {}
758 // Get/set just the 'soft' limit.
759 HeapWord* soft_end() { return _soft_end; }
760 HeapWord** soft_end_addr() { return &_soft_end; }
761 void set_soft_end(HeapWord* value) { _soft_end = value; }
763 // Override.
764 void clear(bool mangle_space);
766 // Set both the 'hard' and 'soft' limits (_end and _soft_end).
767 void set_end(HeapWord* value) {
768 set_soft_end(value);
769 ContiguousSpace::set_end(value);
770 }
772 // Allocation (return NULL if full)
773 HeapWord* allocate(size_t word_size);
774 HeapWord* par_allocate(size_t word_size);
775 };
777 // Class ConcEdenSpace extends EdenSpace for the sake of safe
778 // allocation while soft-end is being modified concurrently
780 class ConcEdenSpace : public EdenSpace {
781 public:
782 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
784 // Allocation (return NULL if full)
785 HeapWord* par_allocate(size_t word_size);
786 };
789 // A ContigSpace that Supports an efficient "block_start" operation via
790 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
791 // other spaces.) This is the abstract base class for old generation
792 // (tenured) spaces.
794 class OffsetTableContigSpace: public ContiguousSpace {
795 friend class VMStructs;
796 protected:
797 BlockOffsetArrayContigSpace _offsets;
798 Mutex _par_alloc_lock;
800 public:
801 // Constructor
802 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
803 MemRegion mr);
805 void set_bottom(HeapWord* value);
806 void set_end(HeapWord* value);
808 void clear(bool mangle_space);
810 inline HeapWord* block_start_const(const void* p) const;
812 // Add offset table update.
813 virtual inline HeapWord* allocate(size_t word_size);
814 inline HeapWord* par_allocate(size_t word_size);
816 // MarkSweep support phase3
817 virtual HeapWord* initialize_threshold();
818 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
820 virtual void print_on(outputStream* st) const;
822 // Debugging
823 void verify() const;
824 };
827 // Class TenuredSpace is used by TenuredGeneration
829 class TenuredSpace: public OffsetTableContigSpace {
830 friend class VMStructs;
831 protected:
832 // Mark sweep support
833 size_t allowed_dead_ratio() const;
834 public:
835 // Constructor
836 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
837 MemRegion mr) :
838 OffsetTableContigSpace(sharedOffsetArray, mr) {}
839 };
840 #endif // SHARE_VM_MEMORY_SPACE_HPP