Mon, 26 Jan 2009 12:47:21 -0800
6786503: Overflow list performance can be improved
Summary: Avoid overflow list walk in CMS & ParNew when it is unnecessary. Fix a couple of correctness issues, including a C-heap leak, in ParNew at the intersection of promotion failure, work queue overflow and object array chunking. Add stress testing option and related assertion checking.
Reviewed-by: jmasa
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_cardTableRS.cpp.incl"
28 CardTableRS::CardTableRS(MemRegion whole_heap,
29 int max_covered_regions) :
30 GenRemSet(),
31 _cur_youngergen_card_val(youngergenP1_card),
32 _regions_to_iterate(max_covered_regions - 1)
33 {
34 #ifndef SERIALGC
35 if (UseG1GC) {
36 if (G1RSBarrierUseQueue) {
37 _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap,
38 max_covered_regions);
39 } else {
40 _ct_bs = new G1SATBCardTableModRefBS(whole_heap, max_covered_regions);
41 }
42 } else {
43 _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
44 }
45 #else
46 _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
47 #endif
48 set_bs(_ct_bs);
49 _last_cur_val_in_gen = new jbyte[GenCollectedHeap::max_gens + 1];
50 if (_last_cur_val_in_gen == NULL) {
51 vm_exit_during_initialization("Could not last_cur_val_in_gen array.");
52 }
53 for (int i = 0; i < GenCollectedHeap::max_gens + 1; i++) {
54 _last_cur_val_in_gen[i] = clean_card_val();
55 }
56 _ct_bs->set_CTRS(this);
57 }
59 void CardTableRS::resize_covered_region(MemRegion new_region) {
60 _ct_bs->resize_covered_region(new_region);
61 }
63 jbyte CardTableRS::find_unused_youngergenP_card_value() {
64 for (jbyte v = youngergenP1_card;
65 v < cur_youngergen_and_prev_nonclean_card;
66 v++) {
67 bool seen = false;
68 for (int g = 0; g < _regions_to_iterate; g++) {
69 if (_last_cur_val_in_gen[g] == v) {
70 seen = true;
71 break;
72 }
73 }
74 if (!seen) return v;
75 }
76 ShouldNotReachHere();
77 return 0;
78 }
80 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {
81 // Parallel or sequential, we must always set the prev to equal the
82 // last one written.
83 if (parallel) {
84 // Find a parallel value to be used next.
85 jbyte next_val = find_unused_youngergenP_card_value();
86 set_cur_youngergen_card_val(next_val);
88 } else {
89 // In an sequential traversal we will always write youngergen, so that
90 // the inline barrier is correct.
91 set_cur_youngergen_card_val(youngergen_card);
92 }
93 }
95 void CardTableRS::younger_refs_iterate(Generation* g,
96 OopsInGenClosure* blk) {
97 _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val();
98 g->younger_refs_iterate(blk);
99 }
101 class ClearNoncleanCardWrapper: public MemRegionClosure {
102 MemRegionClosure* _dirty_card_closure;
103 CardTableRS* _ct;
104 bool _is_par;
105 private:
106 // Clears the given card, return true if the corresponding card should be
107 // processed.
108 bool clear_card(jbyte* entry) {
109 if (_is_par) {
110 while (true) {
111 // In the parallel case, we may have to do this several times.
112 jbyte entry_val = *entry;
113 assert(entry_val != CardTableRS::clean_card_val(),
114 "We shouldn't be looking at clean cards, and this should "
115 "be the only place they get cleaned.");
116 if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
117 || _ct->is_prev_youngergen_card_val(entry_val)) {
118 jbyte res =
119 Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
120 if (res == entry_val) {
121 break;
122 } else {
123 assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
124 "The CAS above should only fail if another thread did "
125 "a GC write barrier.");
126 }
127 } else if (entry_val ==
128 CardTableRS::cur_youngergen_and_prev_nonclean_card) {
129 // Parallelism shouldn't matter in this case. Only the thread
130 // assigned to scan the card should change this value.
131 *entry = _ct->cur_youngergen_card_val();
132 break;
133 } else {
134 assert(entry_val == _ct->cur_youngergen_card_val(),
135 "Should be the only possibility.");
136 // In this case, the card was clean before, and become
137 // cur_youngergen only because of processing of a promoted object.
138 // We don't have to look at the card.
139 return false;
140 }
141 }
142 return true;
143 } else {
144 jbyte entry_val = *entry;
145 assert(entry_val != CardTableRS::clean_card_val(),
146 "We shouldn't be looking at clean cards, and this should "
147 "be the only place they get cleaned.");
148 assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
149 "This should be possible in the sequential case.");
150 *entry = CardTableRS::clean_card_val();
151 return true;
152 }
153 }
155 public:
156 ClearNoncleanCardWrapper(MemRegionClosure* dirty_card_closure,
157 CardTableRS* ct) :
158 _dirty_card_closure(dirty_card_closure), _ct(ct) {
159 _is_par = (SharedHeap::heap()->n_par_threads() > 0);
160 }
161 void do_MemRegion(MemRegion mr) {
162 // We start at the high end of "mr", walking backwards
163 // while accumulating a contiguous dirty range of cards in
164 // [start_of_non_clean, end_of_non_clean) which we then
165 // process en masse.
166 HeapWord* end_of_non_clean = mr.end();
167 HeapWord* start_of_non_clean = end_of_non_clean;
168 jbyte* entry = _ct->byte_for(mr.last());
169 const jbyte* first_entry = _ct->byte_for(mr.start());
170 while (entry >= first_entry) {
171 HeapWord* cur = _ct->addr_for(entry);
172 if (!clear_card(entry)) {
173 // We hit a clean card; process any non-empty
174 // dirty range accumulated so far.
175 if (start_of_non_clean < end_of_non_clean) {
176 MemRegion mr2(start_of_non_clean, end_of_non_clean);
177 _dirty_card_closure->do_MemRegion(mr2);
178 }
179 // Reset the dirty window while continuing to
180 // look for the next dirty window to process.
181 end_of_non_clean = cur;
182 start_of_non_clean = end_of_non_clean;
183 }
184 // Open the left end of the window one card to the left.
185 start_of_non_clean = cur;
186 // Note that "entry" leads "start_of_non_clean" in
187 // its leftward excursion after this point
188 // in the loop and, when we hit the left end of "mr",
189 // will point off of the left end of the card-table
190 // for "mr".
191 entry--;
192 }
193 // If the first card of "mr" was dirty, we will have
194 // been left with a dirty window, co-initial with "mr",
195 // which we now process.
196 if (start_of_non_clean < end_of_non_clean) {
197 MemRegion mr2(start_of_non_clean, end_of_non_clean);
198 _dirty_card_closure->do_MemRegion(mr2);
199 }
200 }
201 };
202 // clean (by dirty->clean before) ==> cur_younger_gen
203 // dirty ==> cur_youngergen_and_prev_nonclean_card
204 // precleaned ==> cur_youngergen_and_prev_nonclean_card
205 // prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card
206 // cur-younger-gen ==> cur_younger_gen
207 // cur_youngergen_and_prev_nonclean_card ==> no change.
208 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
209 jbyte* entry = ct_bs()->byte_for(field);
210 do {
211 jbyte entry_val = *entry;
212 // We put this first because it's probably the most common case.
213 if (entry_val == clean_card_val()) {
214 // No threat of contention with cleaning threads.
215 *entry = cur_youngergen_card_val();
216 return;
217 } else if (card_is_dirty_wrt_gen_iter(entry_val)
218 || is_prev_youngergen_card_val(entry_val)) {
219 // Mark it as both cur and prev youngergen; card cleaning thread will
220 // eventually remove the previous stuff.
221 jbyte new_val = cur_youngergen_and_prev_nonclean_card;
222 jbyte res = Atomic::cmpxchg(new_val, entry, entry_val);
223 // Did the CAS succeed?
224 if (res == entry_val) return;
225 // Otherwise, retry, to see the new value.
226 continue;
227 } else {
228 assert(entry_val == cur_youngergen_and_prev_nonclean_card
229 || entry_val == cur_youngergen_card_val(),
230 "should be only possibilities.");
231 return;
232 }
233 } while (true);
234 }
236 void CardTableRS::younger_refs_in_space_iterate(Space* sp,
237 OopsInGenClosure* cl) {
238 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, _ct_bs->precision(),
239 cl->gen_boundary());
240 ClearNoncleanCardWrapper clear_cl(dcto_cl, this);
242 _ct_bs->non_clean_card_iterate(sp, sp->used_region_at_save_marks(),
243 dcto_cl, &clear_cl, false);
244 }
246 void CardTableRS::clear_into_younger(Generation* gen, bool clear_perm) {
247 GenCollectedHeap* gch = GenCollectedHeap::heap();
248 // Generations younger than gen have been evacuated. We can clear
249 // card table entries for gen (we know that it has no pointers
250 // to younger gens) and for those below. The card tables for
251 // the youngest gen need never be cleared, and those for perm gen
252 // will be cleared based on the parameter clear_perm.
253 // There's a bit of subtlety in the clear() and invalidate()
254 // methods that we exploit here and in invalidate_or_clear()
255 // below to avoid missing cards at the fringes. If clear() or
256 // invalidate() are changed in the future, this code should
257 // be revisited. 20040107.ysr
258 Generation* g = gen;
259 for(Generation* prev_gen = gch->prev_gen(g);
260 prev_gen != NULL;
261 g = prev_gen, prev_gen = gch->prev_gen(g)) {
262 MemRegion to_be_cleared_mr = g->prev_used_region();
263 clear(to_be_cleared_mr);
264 }
265 // Clear perm gen cards if asked to do so.
266 if (clear_perm) {
267 MemRegion to_be_cleared_mr = gch->perm_gen()->prev_used_region();
268 clear(to_be_cleared_mr);
269 }
270 }
272 void CardTableRS::invalidate_or_clear(Generation* gen, bool younger,
273 bool perm) {
274 GenCollectedHeap* gch = GenCollectedHeap::heap();
275 // For each generation gen (and younger and/or perm)
276 // invalidate the cards for the currently occupied part
277 // of that generation and clear the cards for the
278 // unoccupied part of the generation (if any, making use
279 // of that generation's prev_used_region to determine that
280 // region). No need to do anything for the youngest
281 // generation. Also see note#20040107.ysr above.
282 Generation* g = gen;
283 for(Generation* prev_gen = gch->prev_gen(g); prev_gen != NULL;
284 g = prev_gen, prev_gen = gch->prev_gen(g)) {
285 MemRegion used_mr = g->used_region();
286 MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr);
287 if (!to_be_cleared_mr.is_empty()) {
288 clear(to_be_cleared_mr);
289 }
290 invalidate(used_mr);
291 if (!younger) break;
292 }
293 // Clear perm gen cards if asked to do so.
294 if (perm) {
295 g = gch->perm_gen();
296 MemRegion used_mr = g->used_region();
297 MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr);
298 if (!to_be_cleared_mr.is_empty()) {
299 clear(to_be_cleared_mr);
300 }
301 invalidate(used_mr);
302 }
303 }
306 class VerifyCleanCardClosure: public OopClosure {
307 private:
308 HeapWord* _boundary;
309 HeapWord* _begin;
310 HeapWord* _end;
311 protected:
312 template <class T> void do_oop_work(T* p) {
313 HeapWord* jp = (HeapWord*)p;
314 if (jp >= _begin && jp < _end) {
315 oop obj = oopDesc::load_decode_heap_oop(p);
316 guarantee(obj == NULL ||
317 (HeapWord*)p < _boundary ||
318 (HeapWord*)obj >= _boundary,
319 "pointer on clean card crosses boundary");
320 }
321 }
322 public:
323 VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) :
324 _boundary(b), _begin(begin), _end(end) {}
325 virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); }
326 virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); }
327 };
329 class VerifyCTSpaceClosure: public SpaceClosure {
330 private:
331 CardTableRS* _ct;
332 HeapWord* _boundary;
333 public:
334 VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) :
335 _ct(ct), _boundary(boundary) {}
336 virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); }
337 };
339 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
340 CardTableRS* _ct;
341 public:
342 VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {}
343 void do_generation(Generation* gen) {
344 // Skip the youngest generation.
345 if (gen->level() == 0) return;
346 // Normally, we're interested in pointers to younger generations.
347 VerifyCTSpaceClosure blk(_ct, gen->reserved().start());
348 gen->space_iterate(&blk, true);
349 }
350 };
352 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
353 // We don't need to do young-gen spaces.
354 if (s->end() <= gen_boundary) return;
355 MemRegion used = s->used_region();
357 jbyte* cur_entry = byte_for(used.start());
358 jbyte* limit = byte_after(used.last());
359 while (cur_entry < limit) {
360 if (*cur_entry == CardTableModRefBS::clean_card) {
361 jbyte* first_dirty = cur_entry+1;
362 while (first_dirty < limit &&
363 *first_dirty == CardTableModRefBS::clean_card) {
364 first_dirty++;
365 }
366 // If the first object is a regular object, and it has a
367 // young-to-old field, that would mark the previous card.
368 HeapWord* boundary = addr_for(cur_entry);
369 HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty);
370 HeapWord* boundary_block = s->block_start(boundary);
371 HeapWord* begin = boundary; // Until proven otherwise.
372 HeapWord* start_block = boundary_block; // Until proven otherwise.
373 if (boundary_block < boundary) {
374 if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) {
375 oop boundary_obj = oop(boundary_block);
376 if (!boundary_obj->is_objArray() &&
377 !boundary_obj->is_typeArray()) {
378 guarantee(cur_entry > byte_for(used.start()),
379 "else boundary would be boundary_block");
380 if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
381 begin = boundary_block + s->block_size(boundary_block);
382 start_block = begin;
383 }
384 }
385 }
386 }
387 // Now traverse objects until end.
388 HeapWord* cur = start_block;
389 VerifyCleanCardClosure verify_blk(gen_boundary, begin, end);
390 while (cur < end) {
391 if (s->block_is_obj(cur) && s->obj_is_alive(cur)) {
392 oop(cur)->oop_iterate(&verify_blk);
393 }
394 cur += s->block_size(cur);
395 }
396 cur_entry = first_dirty;
397 } else {
398 // We'd normally expect that cur_youngergen_and_prev_nonclean_card
399 // is a transient value, that cannot be in the card table
400 // except during GC, and thus assert that:
401 // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card,
402 // "Illegal CT value");
403 // That however, need not hold, as will become clear in the
404 // following...
406 // We'd normally expect that if we are in the parallel case,
407 // we can't have left a prev value (which would be different
408 // from the current value) in the card table, and so we'd like to
409 // assert that:
410 // guarantee(cur_youngergen_card_val() == youngergen_card
411 // || !is_prev_youngergen_card_val(*cur_entry),
412 // "Illegal CT value");
413 // That, however, may not hold occasionally, because of
414 // CMS or MSC in the old gen. To wit, consider the
415 // following two simple illustrative scenarios:
416 // (a) CMS: Consider the case where a large object L
417 // spanning several cards is allocated in the old
418 // gen, and has a young gen reference stored in it, dirtying
419 // some interior cards. A young collection scans the card,
420 // finds a young ref and installs a youngergenP_n value.
421 // L then goes dead. Now a CMS collection starts,
422 // finds L dead and sweeps it up. Assume that L is
423 // abutting _unallocated_blk, so _unallocated_blk is
424 // adjusted down to (below) L. Assume further that
425 // no young collection intervenes during this CMS cycle.
426 // The next young gen cycle will not get to look at this
427 // youngergenP_n card since it lies in the unoccupied
428 // part of the space.
429 // Some young collections later the blocks on this
430 // card can be re-allocated either due to direct allocation
431 // or due to absorbing promotions. At this time, the
432 // before-gc verification will fail the above assert.
433 // (b) MSC: In this case, an object L with a young reference
434 // is on a card that (therefore) holds a youngergen_n value.
435 // Suppose also that L lies towards the end of the used
436 // the used space before GC. An MSC collection
437 // occurs that compacts to such an extent that this
438 // card is no longer in the occupied part of the space.
439 // Since current code in MSC does not always clear cards
440 // in the unused part of old gen, this stale youngergen_n
441 // value is left behind and can later be covered by
442 // an object when promotion or direct allocation
443 // re-allocates that part of the heap.
444 //
445 // Fortunately, the presence of such stale card values is
446 // "only" a minor annoyance in that subsequent young collections
447 // might needlessly scan such cards, but would still never corrupt
448 // the heap as a result. However, it's likely not to be a significant
449 // performance inhibitor in practice. For instance,
450 // some recent measurements with unoccupied cards eagerly cleared
451 // out to maintain this invariant, showed next to no
452 // change in young collection times; of course one can construct
453 // degenerate examples where the cost can be significant.)
454 // Note, in particular, that if the "stale" card is modified
455 // after re-allocation, it would be dirty, not "stale". Thus,
456 // we can never have a younger ref in such a card and it is
457 // safe not to scan that card in any collection. [As we see
458 // below, we do some unnecessary scanning
459 // in some cases in the current parallel scanning algorithm.]
460 //
461 // The main point below is that the parallel card scanning code
462 // deals correctly with these stale card values. There are two main
463 // cases to consider where we have a stale "younger gen" value and a
464 // "derivative" case to consider, where we have a stale
465 // "cur_younger_gen_and_prev_non_clean" value, as will become
466 // apparent in the case analysis below.
467 // o Case 1. If the stale value corresponds to a younger_gen_n
468 // value other than the cur_younger_gen value then the code
469 // treats this as being tantamount to a prev_younger_gen
470 // card. This means that the card may be unnecessarily scanned.
471 // There are two sub-cases to consider:
472 // o Case 1a. Let us say that the card is in the occupied part
473 // of the generation at the time the collection begins. In
474 // that case the card will be either cleared when it is scanned
475 // for young pointers, or will be set to cur_younger_gen as a
476 // result of promotion. (We have elided the normal case where
477 // the scanning thread and the promoting thread interleave
478 // possibly resulting in a transient
479 // cur_younger_gen_and_prev_non_clean value before settling
480 // to cur_younger_gen. [End Case 1a.]
481 // o Case 1b. Consider now the case when the card is in the unoccupied
482 // part of the space which becomes occupied because of promotions
483 // into it during the current young GC. In this case the card
484 // will never be scanned for young references. The current
485 // code will set the card value to either
486 // cur_younger_gen_and_prev_non_clean or leave
487 // it with its stale value -- because the promotions didn't
488 // result in any younger refs on that card. Of these two
489 // cases, the latter will be covered in Case 1a during
490 // a subsequent scan. To deal with the former case, we need
491 // to further consider how we deal with a stale value of
492 // cur_younger_gen_and_prev_non_clean in our case analysis
493 // below. This we do in Case 3 below. [End Case 1b]
494 // [End Case 1]
495 // o Case 2. If the stale value corresponds to cur_younger_gen being
496 // a value not necessarily written by a current promotion, the
497 // card will not be scanned by the younger refs scanning code.
498 // (This is OK since as we argued above such cards cannot contain
499 // any younger refs.) The result is that this value will be
500 // treated as a prev_younger_gen value in a subsequent collection,
501 // which is addressed in Case 1 above. [End Case 2]
502 // o Case 3. We here consider the "derivative" case from Case 1b. above
503 // because of which we may find a stale
504 // cur_younger_gen_and_prev_non_clean card value in the table.
505 // Once again, as in Case 1, we consider two subcases, depending
506 // on whether the card lies in the occupied or unoccupied part
507 // of the space at the start of the young collection.
508 // o Case 3a. Let us say the card is in the occupied part of
509 // the old gen at the start of the young collection. In that
510 // case, the card will be scanned by the younger refs scanning
511 // code which will set it to cur_younger_gen. In a subsequent
512 // scan, the card will be considered again and get its final
513 // correct value. [End Case 3a]
514 // o Case 3b. Now consider the case where the card is in the
515 // unoccupied part of the old gen, and is occupied as a result
516 // of promotions during thus young gc. In that case,
517 // the card will not be scanned for younger refs. The presence
518 // of newly promoted objects on the card will then result in
519 // its keeping the value cur_younger_gen_and_prev_non_clean
520 // value, which we have dealt with in Case 3 here. [End Case 3b]
521 // [End Case 3]
522 //
523 // (Please refer to the code in the helper class
524 // ClearNonCleanCardWrapper and in CardTableModRefBS for details.)
525 //
526 // The informal arguments above can be tightened into a formal
527 // correctness proof and it behooves us to write up such a proof,
528 // or to use model checking to prove that there are no lingering
529 // concerns.
530 //
531 // Clearly because of Case 3b one cannot bound the time for
532 // which a card will retain what we have called a "stale" value.
533 // However, one can obtain a Loose upper bound on the redundant
534 // work as a result of such stale values. Note first that any
535 // time a stale card lies in the occupied part of the space at
536 // the start of the collection, it is scanned by younger refs
537 // code and we can define a rank function on card values that
538 // declines when this is so. Note also that when a card does not
539 // lie in the occupied part of the space at the beginning of a
540 // young collection, its rank can either decline or stay unchanged.
541 // In this case, no extra work is done in terms of redundant
542 // younger refs scanning of that card.
543 // Then, the case analysis above reveals that, in the worst case,
544 // any such stale card will be scanned unnecessarily at most twice.
545 //
546 // It is nonethelss advisable to try and get rid of some of this
547 // redundant work in a subsequent (low priority) re-design of
548 // the card-scanning code, if only to simplify the underlying
549 // state machine analysis/proof. ysr 1/28/2002. XXX
550 cur_entry++;
551 }
552 }
553 }
555 void CardTableRS::verify() {
556 // At present, we only know how to verify the card table RS for
557 // generational heaps.
558 VerifyCTGenClosure blk(this);
559 CollectedHeap* ch = Universe::heap();
560 // We will do the perm-gen portion of the card table, too.
561 Generation* pg = SharedHeap::heap()->perm_gen();
562 HeapWord* pg_boundary = pg->reserved().start();
564 if (ch->kind() == CollectedHeap::GenCollectedHeap) {
565 GenCollectedHeap::heap()->generation_iterate(&blk, false);
566 _ct_bs->verify();
568 // If the old gen collections also collect perm, then we are only
569 // interested in perm-to-young pointers, not perm-to-old pointers.
570 GenCollectedHeap* gch = GenCollectedHeap::heap();
571 CollectorPolicy* cp = gch->collector_policy();
572 if (cp->is_mark_sweep_policy() || cp->is_concurrent_mark_sweep_policy()) {
573 pg_boundary = gch->get_gen(1)->reserved().start();
574 }
575 }
576 VerifyCTSpaceClosure perm_space_blk(this, pg_boundary);
577 SharedHeap::heap()->perm_gen()->space_iterate(&perm_space_blk, true);
578 }
581 void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
582 if (!mr.is_empty()) {
583 jbyte* cur_entry = byte_for(mr.start());
584 jbyte* limit = byte_after(mr.last());
585 // The region mr may not start on a card boundary so
586 // the first card may reflect a write to the space
587 // just prior to mr.
588 if (!is_aligned(mr.start())) {
589 cur_entry++;
590 }
591 for (;cur_entry < limit; cur_entry++) {
592 guarantee(*cur_entry == CardTableModRefBS::clean_card,
593 "Unexpected dirty card found");
594 }
595 }
596 }