Fri, 11 Feb 2011 14:15:16 +0100
7018257: jmm_DumpThreads allocates into permgen
Summary: Don't allocate in permgen
Reviewed-by: ysr, sla
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "memory/allocation.inline.hpp"
27 #include "memory/cardTableRS.hpp"
28 #include "memory/genCollectedHeap.hpp"
29 #include "memory/generation.hpp"
30 #include "memory/space.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "runtime/java.hpp"
33 #include "runtime/os.hpp"
34 #ifndef SERIALGC
35 #include "gc_implementation/g1/concurrentMark.hpp"
36 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
37 #endif
39 CardTableRS::CardTableRS(MemRegion whole_heap,
40 int max_covered_regions) :
41 GenRemSet(),
42 _cur_youngergen_card_val(youngergenP1_card),
43 _regions_to_iterate(max_covered_regions - 1)
44 {
45 #ifndef SERIALGC
46 if (UseG1GC) {
47 _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap,
48 max_covered_regions);
49 } else {
50 _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
51 }
52 #else
53 _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
54 #endif
55 set_bs(_ct_bs);
56 _last_cur_val_in_gen = new jbyte[GenCollectedHeap::max_gens + 1];
57 if (_last_cur_val_in_gen == NULL) {
58 vm_exit_during_initialization("Could not last_cur_val_in_gen array.");
59 }
60 for (int i = 0; i < GenCollectedHeap::max_gens + 1; i++) {
61 _last_cur_val_in_gen[i] = clean_card_val();
62 }
63 _ct_bs->set_CTRS(this);
64 }
66 void CardTableRS::resize_covered_region(MemRegion new_region) {
67 _ct_bs->resize_covered_region(new_region);
68 }
70 jbyte CardTableRS::find_unused_youngergenP_card_value() {
71 for (jbyte v = youngergenP1_card;
72 v < cur_youngergen_and_prev_nonclean_card;
73 v++) {
74 bool seen = false;
75 for (int g = 0; g < _regions_to_iterate; g++) {
76 if (_last_cur_val_in_gen[g] == v) {
77 seen = true;
78 break;
79 }
80 }
81 if (!seen) return v;
82 }
83 ShouldNotReachHere();
84 return 0;
85 }
87 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {
88 // Parallel or sequential, we must always set the prev to equal the
89 // last one written.
90 if (parallel) {
91 // Find a parallel value to be used next.
92 jbyte next_val = find_unused_youngergenP_card_value();
93 set_cur_youngergen_card_val(next_val);
95 } else {
96 // In an sequential traversal we will always write youngergen, so that
97 // the inline barrier is correct.
98 set_cur_youngergen_card_val(youngergen_card);
99 }
100 }
102 void CardTableRS::younger_refs_iterate(Generation* g,
103 OopsInGenClosure* blk) {
104 _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val();
105 g->younger_refs_iterate(blk);
106 }
108 class ClearNoncleanCardWrapper: public MemRegionClosure {
109 MemRegionClosure* _dirty_card_closure;
110 CardTableRS* _ct;
111 bool _is_par;
112 private:
113 // Clears the given card, return true if the corresponding card should be
114 // processed.
115 bool clear_card(jbyte* entry) {
116 if (_is_par) {
117 while (true) {
118 // In the parallel case, we may have to do this several times.
119 jbyte entry_val = *entry;
120 assert(entry_val != CardTableRS::clean_card_val(),
121 "We shouldn't be looking at clean cards, and this should "
122 "be the only place they get cleaned.");
123 if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
124 || _ct->is_prev_youngergen_card_val(entry_val)) {
125 jbyte res =
126 Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
127 if (res == entry_val) {
128 break;
129 } else {
130 assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
131 "The CAS above should only fail if another thread did "
132 "a GC write barrier.");
133 }
134 } else if (entry_val ==
135 CardTableRS::cur_youngergen_and_prev_nonclean_card) {
136 // Parallelism shouldn't matter in this case. Only the thread
137 // assigned to scan the card should change this value.
138 *entry = _ct->cur_youngergen_card_val();
139 break;
140 } else {
141 assert(entry_val == _ct->cur_youngergen_card_val(),
142 "Should be the only possibility.");
143 // In this case, the card was clean before, and become
144 // cur_youngergen only because of processing of a promoted object.
145 // We don't have to look at the card.
146 return false;
147 }
148 }
149 return true;
150 } else {
151 jbyte entry_val = *entry;
152 assert(entry_val != CardTableRS::clean_card_val(),
153 "We shouldn't be looking at clean cards, and this should "
154 "be the only place they get cleaned.");
155 assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
156 "This should be possible in the sequential case.");
157 *entry = CardTableRS::clean_card_val();
158 return true;
159 }
160 }
162 public:
163 ClearNoncleanCardWrapper(MemRegionClosure* dirty_card_closure,
164 CardTableRS* ct) :
165 _dirty_card_closure(dirty_card_closure), _ct(ct) {
166 _is_par = (SharedHeap::heap()->n_par_threads() > 0);
167 }
168 void do_MemRegion(MemRegion mr) {
169 // We start at the high end of "mr", walking backwards
170 // while accumulating a contiguous dirty range of cards in
171 // [start_of_non_clean, end_of_non_clean) which we then
172 // process en masse.
173 HeapWord* end_of_non_clean = mr.end();
174 HeapWord* start_of_non_clean = end_of_non_clean;
175 jbyte* entry = _ct->byte_for(mr.last());
176 const jbyte* first_entry = _ct->byte_for(mr.start());
177 while (entry >= first_entry) {
178 HeapWord* cur = _ct->addr_for(entry);
179 if (!clear_card(entry)) {
180 // We hit a clean card; process any non-empty
181 // dirty range accumulated so far.
182 if (start_of_non_clean < end_of_non_clean) {
183 MemRegion mr2(start_of_non_clean, end_of_non_clean);
184 _dirty_card_closure->do_MemRegion(mr2);
185 }
186 // Reset the dirty window while continuing to
187 // look for the next dirty window to process.
188 end_of_non_clean = cur;
189 start_of_non_clean = end_of_non_clean;
190 }
191 // Open the left end of the window one card to the left.
192 start_of_non_clean = cur;
193 // Note that "entry" leads "start_of_non_clean" in
194 // its leftward excursion after this point
195 // in the loop and, when we hit the left end of "mr",
196 // will point off of the left end of the card-table
197 // for "mr".
198 entry--;
199 }
200 // If the first card of "mr" was dirty, we will have
201 // been left with a dirty window, co-initial with "mr",
202 // which we now process.
203 if (start_of_non_clean < end_of_non_clean) {
204 MemRegion mr2(start_of_non_clean, end_of_non_clean);
205 _dirty_card_closure->do_MemRegion(mr2);
206 }
207 }
208 };
209 // clean (by dirty->clean before) ==> cur_younger_gen
210 // dirty ==> cur_youngergen_and_prev_nonclean_card
211 // precleaned ==> cur_youngergen_and_prev_nonclean_card
212 // prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card
213 // cur-younger-gen ==> cur_younger_gen
214 // cur_youngergen_and_prev_nonclean_card ==> no change.
215 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
216 jbyte* entry = ct_bs()->byte_for(field);
217 do {
218 jbyte entry_val = *entry;
219 // We put this first because it's probably the most common case.
220 if (entry_val == clean_card_val()) {
221 // No threat of contention with cleaning threads.
222 *entry = cur_youngergen_card_val();
223 return;
224 } else if (card_is_dirty_wrt_gen_iter(entry_val)
225 || is_prev_youngergen_card_val(entry_val)) {
226 // Mark it as both cur and prev youngergen; card cleaning thread will
227 // eventually remove the previous stuff.
228 jbyte new_val = cur_youngergen_and_prev_nonclean_card;
229 jbyte res = Atomic::cmpxchg(new_val, entry, entry_val);
230 // Did the CAS succeed?
231 if (res == entry_val) return;
232 // Otherwise, retry, to see the new value.
233 continue;
234 } else {
235 assert(entry_val == cur_youngergen_and_prev_nonclean_card
236 || entry_val == cur_youngergen_card_val(),
237 "should be only possibilities.");
238 return;
239 }
240 } while (true);
241 }
243 void CardTableRS::younger_refs_in_space_iterate(Space* sp,
244 OopsInGenClosure* cl) {
245 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, _ct_bs->precision(),
246 cl->gen_boundary());
247 ClearNoncleanCardWrapper clear_cl(dcto_cl, this);
249 _ct_bs->non_clean_card_iterate(sp, sp->used_region_at_save_marks(),
250 dcto_cl, &clear_cl, false);
251 }
253 void CardTableRS::clear_into_younger(Generation* gen, bool clear_perm) {
254 GenCollectedHeap* gch = GenCollectedHeap::heap();
255 // Generations younger than gen have been evacuated. We can clear
256 // card table entries for gen (we know that it has no pointers
257 // to younger gens) and for those below. The card tables for
258 // the youngest gen need never be cleared, and those for perm gen
259 // will be cleared based on the parameter clear_perm.
260 // There's a bit of subtlety in the clear() and invalidate()
261 // methods that we exploit here and in invalidate_or_clear()
262 // below to avoid missing cards at the fringes. If clear() or
263 // invalidate() are changed in the future, this code should
264 // be revisited. 20040107.ysr
265 Generation* g = gen;
266 for(Generation* prev_gen = gch->prev_gen(g);
267 prev_gen != NULL;
268 g = prev_gen, prev_gen = gch->prev_gen(g)) {
269 MemRegion to_be_cleared_mr = g->prev_used_region();
270 clear(to_be_cleared_mr);
271 }
272 // Clear perm gen cards if asked to do so.
273 if (clear_perm) {
274 MemRegion to_be_cleared_mr = gch->perm_gen()->prev_used_region();
275 clear(to_be_cleared_mr);
276 }
277 }
279 void CardTableRS::invalidate_or_clear(Generation* gen, bool younger,
280 bool perm) {
281 GenCollectedHeap* gch = GenCollectedHeap::heap();
282 // For each generation gen (and younger and/or perm)
283 // invalidate the cards for the currently occupied part
284 // of that generation and clear the cards for the
285 // unoccupied part of the generation (if any, making use
286 // of that generation's prev_used_region to determine that
287 // region). No need to do anything for the youngest
288 // generation. Also see note#20040107.ysr above.
289 Generation* g = gen;
290 for(Generation* prev_gen = gch->prev_gen(g); prev_gen != NULL;
291 g = prev_gen, prev_gen = gch->prev_gen(g)) {
292 MemRegion used_mr = g->used_region();
293 MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr);
294 if (!to_be_cleared_mr.is_empty()) {
295 clear(to_be_cleared_mr);
296 }
297 invalidate(used_mr);
298 if (!younger) break;
299 }
300 // Clear perm gen cards if asked to do so.
301 if (perm) {
302 g = gch->perm_gen();
303 MemRegion used_mr = g->used_region();
304 MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr);
305 if (!to_be_cleared_mr.is_empty()) {
306 clear(to_be_cleared_mr);
307 }
308 invalidate(used_mr);
309 }
310 }
313 class VerifyCleanCardClosure: public OopClosure {
314 private:
315 HeapWord* _boundary;
316 HeapWord* _begin;
317 HeapWord* _end;
318 protected:
319 template <class T> void do_oop_work(T* p) {
320 HeapWord* jp = (HeapWord*)p;
321 if (jp >= _begin && jp < _end) {
322 oop obj = oopDesc::load_decode_heap_oop(p);
323 guarantee(obj == NULL ||
324 (HeapWord*)p < _boundary ||
325 (HeapWord*)obj >= _boundary,
326 "pointer on clean card crosses boundary");
327 }
328 }
329 public:
330 VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) :
331 _boundary(b), _begin(begin), _end(end) {}
332 virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); }
333 virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); }
334 };
336 class VerifyCTSpaceClosure: public SpaceClosure {
337 private:
338 CardTableRS* _ct;
339 HeapWord* _boundary;
340 public:
341 VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) :
342 _ct(ct), _boundary(boundary) {}
343 virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); }
344 };
346 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
347 CardTableRS* _ct;
348 public:
349 VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {}
350 void do_generation(Generation* gen) {
351 // Skip the youngest generation.
352 if (gen->level() == 0) return;
353 // Normally, we're interested in pointers to younger generations.
354 VerifyCTSpaceClosure blk(_ct, gen->reserved().start());
355 gen->space_iterate(&blk, true);
356 }
357 };
359 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
360 // We don't need to do young-gen spaces.
361 if (s->end() <= gen_boundary) return;
362 MemRegion used = s->used_region();
364 jbyte* cur_entry = byte_for(used.start());
365 jbyte* limit = byte_after(used.last());
366 while (cur_entry < limit) {
367 if (*cur_entry == CardTableModRefBS::clean_card) {
368 jbyte* first_dirty = cur_entry+1;
369 while (first_dirty < limit &&
370 *first_dirty == CardTableModRefBS::clean_card) {
371 first_dirty++;
372 }
373 // If the first object is a regular object, and it has a
374 // young-to-old field, that would mark the previous card.
375 HeapWord* boundary = addr_for(cur_entry);
376 HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty);
377 HeapWord* boundary_block = s->block_start(boundary);
378 HeapWord* begin = boundary; // Until proven otherwise.
379 HeapWord* start_block = boundary_block; // Until proven otherwise.
380 if (boundary_block < boundary) {
381 if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) {
382 oop boundary_obj = oop(boundary_block);
383 if (!boundary_obj->is_objArray() &&
384 !boundary_obj->is_typeArray()) {
385 guarantee(cur_entry > byte_for(used.start()),
386 "else boundary would be boundary_block");
387 if (*byte_for(boundary_block) != CardTableModRefBS::clean_card) {
388 begin = boundary_block + s->block_size(boundary_block);
389 start_block = begin;
390 }
391 }
392 }
393 }
394 // Now traverse objects until end.
395 HeapWord* cur = start_block;
396 VerifyCleanCardClosure verify_blk(gen_boundary, begin, end);
397 while (cur < end) {
398 if (s->block_is_obj(cur) && s->obj_is_alive(cur)) {
399 oop(cur)->oop_iterate(&verify_blk);
400 }
401 cur += s->block_size(cur);
402 }
403 cur_entry = first_dirty;
404 } else {
405 // We'd normally expect that cur_youngergen_and_prev_nonclean_card
406 // is a transient value, that cannot be in the card table
407 // except during GC, and thus assert that:
408 // guarantee(*cur_entry != cur_youngergen_and_prev_nonclean_card,
409 // "Illegal CT value");
410 // That however, need not hold, as will become clear in the
411 // following...
413 // We'd normally expect that if we are in the parallel case,
414 // we can't have left a prev value (which would be different
415 // from the current value) in the card table, and so we'd like to
416 // assert that:
417 // guarantee(cur_youngergen_card_val() == youngergen_card
418 // || !is_prev_youngergen_card_val(*cur_entry),
419 // "Illegal CT value");
420 // That, however, may not hold occasionally, because of
421 // CMS or MSC in the old gen. To wit, consider the
422 // following two simple illustrative scenarios:
423 // (a) CMS: Consider the case where a large object L
424 // spanning several cards is allocated in the old
425 // gen, and has a young gen reference stored in it, dirtying
426 // some interior cards. A young collection scans the card,
427 // finds a young ref and installs a youngergenP_n value.
428 // L then goes dead. Now a CMS collection starts,
429 // finds L dead and sweeps it up. Assume that L is
430 // abutting _unallocated_blk, so _unallocated_blk is
431 // adjusted down to (below) L. Assume further that
432 // no young collection intervenes during this CMS cycle.
433 // The next young gen cycle will not get to look at this
434 // youngergenP_n card since it lies in the unoccupied
435 // part of the space.
436 // Some young collections later the blocks on this
437 // card can be re-allocated either due to direct allocation
438 // or due to absorbing promotions. At this time, the
439 // before-gc verification will fail the above assert.
440 // (b) MSC: In this case, an object L with a young reference
441 // is on a card that (therefore) holds a youngergen_n value.
442 // Suppose also that L lies towards the end of the used
443 // the used space before GC. An MSC collection
444 // occurs that compacts to such an extent that this
445 // card is no longer in the occupied part of the space.
446 // Since current code in MSC does not always clear cards
447 // in the unused part of old gen, this stale youngergen_n
448 // value is left behind and can later be covered by
449 // an object when promotion or direct allocation
450 // re-allocates that part of the heap.
451 //
452 // Fortunately, the presence of such stale card values is
453 // "only" a minor annoyance in that subsequent young collections
454 // might needlessly scan such cards, but would still never corrupt
455 // the heap as a result. However, it's likely not to be a significant
456 // performance inhibitor in practice. For instance,
457 // some recent measurements with unoccupied cards eagerly cleared
458 // out to maintain this invariant, showed next to no
459 // change in young collection times; of course one can construct
460 // degenerate examples where the cost can be significant.)
461 // Note, in particular, that if the "stale" card is modified
462 // after re-allocation, it would be dirty, not "stale". Thus,
463 // we can never have a younger ref in such a card and it is
464 // safe not to scan that card in any collection. [As we see
465 // below, we do some unnecessary scanning
466 // in some cases in the current parallel scanning algorithm.]
467 //
468 // The main point below is that the parallel card scanning code
469 // deals correctly with these stale card values. There are two main
470 // cases to consider where we have a stale "younger gen" value and a
471 // "derivative" case to consider, where we have a stale
472 // "cur_younger_gen_and_prev_non_clean" value, as will become
473 // apparent in the case analysis below.
474 // o Case 1. If the stale value corresponds to a younger_gen_n
475 // value other than the cur_younger_gen value then the code
476 // treats this as being tantamount to a prev_younger_gen
477 // card. This means that the card may be unnecessarily scanned.
478 // There are two sub-cases to consider:
479 // o Case 1a. Let us say that the card is in the occupied part
480 // of the generation at the time the collection begins. In
481 // that case the card will be either cleared when it is scanned
482 // for young pointers, or will be set to cur_younger_gen as a
483 // result of promotion. (We have elided the normal case where
484 // the scanning thread and the promoting thread interleave
485 // possibly resulting in a transient
486 // cur_younger_gen_and_prev_non_clean value before settling
487 // to cur_younger_gen. [End Case 1a.]
488 // o Case 1b. Consider now the case when the card is in the unoccupied
489 // part of the space which becomes occupied because of promotions
490 // into it during the current young GC. In this case the card
491 // will never be scanned for young references. The current
492 // code will set the card value to either
493 // cur_younger_gen_and_prev_non_clean or leave
494 // it with its stale value -- because the promotions didn't
495 // result in any younger refs on that card. Of these two
496 // cases, the latter will be covered in Case 1a during
497 // a subsequent scan. To deal with the former case, we need
498 // to further consider how we deal with a stale value of
499 // cur_younger_gen_and_prev_non_clean in our case analysis
500 // below. This we do in Case 3 below. [End Case 1b]
501 // [End Case 1]
502 // o Case 2. If the stale value corresponds to cur_younger_gen being
503 // a value not necessarily written by a current promotion, the
504 // card will not be scanned by the younger refs scanning code.
505 // (This is OK since as we argued above such cards cannot contain
506 // any younger refs.) The result is that this value will be
507 // treated as a prev_younger_gen value in a subsequent collection,
508 // which is addressed in Case 1 above. [End Case 2]
509 // o Case 3. We here consider the "derivative" case from Case 1b. above
510 // because of which we may find a stale
511 // cur_younger_gen_and_prev_non_clean card value in the table.
512 // Once again, as in Case 1, we consider two subcases, depending
513 // on whether the card lies in the occupied or unoccupied part
514 // of the space at the start of the young collection.
515 // o Case 3a. Let us say the card is in the occupied part of
516 // the old gen at the start of the young collection. In that
517 // case, the card will be scanned by the younger refs scanning
518 // code which will set it to cur_younger_gen. In a subsequent
519 // scan, the card will be considered again and get its final
520 // correct value. [End Case 3a]
521 // o Case 3b. Now consider the case where the card is in the
522 // unoccupied part of the old gen, and is occupied as a result
523 // of promotions during thus young gc. In that case,
524 // the card will not be scanned for younger refs. The presence
525 // of newly promoted objects on the card will then result in
526 // its keeping the value cur_younger_gen_and_prev_non_clean
527 // value, which we have dealt with in Case 3 here. [End Case 3b]
528 // [End Case 3]
529 //
530 // (Please refer to the code in the helper class
531 // ClearNonCleanCardWrapper and in CardTableModRefBS for details.)
532 //
533 // The informal arguments above can be tightened into a formal
534 // correctness proof and it behooves us to write up such a proof,
535 // or to use model checking to prove that there are no lingering
536 // concerns.
537 //
538 // Clearly because of Case 3b one cannot bound the time for
539 // which a card will retain what we have called a "stale" value.
540 // However, one can obtain a Loose upper bound on the redundant
541 // work as a result of such stale values. Note first that any
542 // time a stale card lies in the occupied part of the space at
543 // the start of the collection, it is scanned by younger refs
544 // code and we can define a rank function on card values that
545 // declines when this is so. Note also that when a card does not
546 // lie in the occupied part of the space at the beginning of a
547 // young collection, its rank can either decline or stay unchanged.
548 // In this case, no extra work is done in terms of redundant
549 // younger refs scanning of that card.
550 // Then, the case analysis above reveals that, in the worst case,
551 // any such stale card will be scanned unnecessarily at most twice.
552 //
553 // It is nonethelss advisable to try and get rid of some of this
554 // redundant work in a subsequent (low priority) re-design of
555 // the card-scanning code, if only to simplify the underlying
556 // state machine analysis/proof. ysr 1/28/2002. XXX
557 cur_entry++;
558 }
559 }
560 }
562 void CardTableRS::verify() {
563 // At present, we only know how to verify the card table RS for
564 // generational heaps.
565 VerifyCTGenClosure blk(this);
566 CollectedHeap* ch = Universe::heap();
567 // We will do the perm-gen portion of the card table, too.
568 Generation* pg = SharedHeap::heap()->perm_gen();
569 HeapWord* pg_boundary = pg->reserved().start();
571 if (ch->kind() == CollectedHeap::GenCollectedHeap) {
572 GenCollectedHeap::heap()->generation_iterate(&blk, false);
573 _ct_bs->verify();
575 // If the old gen collections also collect perm, then we are only
576 // interested in perm-to-young pointers, not perm-to-old pointers.
577 GenCollectedHeap* gch = GenCollectedHeap::heap();
578 CollectorPolicy* cp = gch->collector_policy();
579 if (cp->is_mark_sweep_policy() || cp->is_concurrent_mark_sweep_policy()) {
580 pg_boundary = gch->get_gen(1)->reserved().start();
581 }
582 }
583 VerifyCTSpaceClosure perm_space_blk(this, pg_boundary);
584 SharedHeap::heap()->perm_gen()->space_iterate(&perm_space_blk, true);
585 }
588 void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
589 if (!mr.is_empty()) {
590 jbyte* cur_entry = byte_for(mr.start());
591 jbyte* limit = byte_after(mr.last());
592 // The region mr may not start on a card boundary so
593 // the first card may reflect a write to the space
594 // just prior to mr.
595 if (!is_aligned(mr.start())) {
596 cur_entry++;
597 }
598 for (;cur_entry < limit; cur_entry++) {
599 guarantee(*cur_entry == CardTableModRefBS::clean_card,
600 "Unexpected dirty card found");
601 }
602 }
603 }