Sat, 01 Sep 2012 13:25:18 -0400
6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
27 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
28 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
29 #include "gc_implementation/parallelScavenge/psTasks.hpp"
30 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "oops/oop.psgc.inline.hpp"
34 // Checks an individual oop for missing precise marks. Mark
35 // may be either dirty or newgen.
36 class CheckForUnmarkedOops : public OopClosure {
37 private:
38 PSYoungGen* _young_gen;
39 CardTableExtension* _card_table;
40 HeapWord* _unmarked_addr;
41 jbyte* _unmarked_card;
43 protected:
44 template <class T> void do_oop_work(T* p) {
45 oop obj = oopDesc::load_decode_heap_oop(p);
46 if (_young_gen->is_in_reserved(obj) &&
47 !_card_table->addr_is_marked_imprecise(p)) {
48 // Don't overwrite the first missing card mark
49 if (_unmarked_addr == NULL) {
50 _unmarked_addr = (HeapWord*)p;
51 _unmarked_card = _card_table->byte_for(p);
52 }
53 }
54 }
56 public:
57 CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
58 _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
60 virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
61 virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
63 bool has_unmarked_oop() {
64 return _unmarked_addr != NULL;
65 }
66 };
68 // Checks all objects for the existance of some type of mark,
69 // precise or imprecise, dirty or newgen.
70 class CheckForUnmarkedObjects : public ObjectClosure {
71 private:
72 PSYoungGen* _young_gen;
73 CardTableExtension* _card_table;
75 public:
76 CheckForUnmarkedObjects() {
77 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
78 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
80 _young_gen = heap->young_gen();
81 _card_table = (CardTableExtension*)heap->barrier_set();
82 // No point in asserting barrier set type here. Need to make CardTableExtension
83 // a unique barrier set type.
84 }
86 // Card marks are not precise. The current system can leave us with
87 // a mismash of precise marks and beginning of object marks. This means
88 // we test for missing precise marks first. If any are found, we don't
89 // fail unless the object head is also unmarked.
90 virtual void do_object(oop obj) {
91 CheckForUnmarkedOops object_check(_young_gen, _card_table);
92 obj->oop_iterate_no_header(&object_check);
93 if (object_check.has_unmarked_oop()) {
94 assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
95 }
96 }
97 };
99 // Checks for precise marking of oops as newgen.
100 class CheckForPreciseMarks : public OopClosure {
101 private:
102 PSYoungGen* _young_gen;
103 CardTableExtension* _card_table;
105 protected:
106 template <class T> void do_oop_work(T* p) {
107 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
108 if (_young_gen->is_in_reserved(obj)) {
109 assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
110 _card_table->set_card_newgen(p);
111 }
112 }
114 public:
115 CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
116 _young_gen(young_gen), _card_table(card_table) { }
118 virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }
119 virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
120 };
122 // We get passed the space_top value to prevent us from traversing into
123 // the old_gen promotion labs, which cannot be safely parsed.
124 void CardTableExtension::scavenge_contents(ObjectStartArray* start_array,
125 MutableSpace* sp,
126 HeapWord* space_top,
127 PSPromotionManager* pm)
128 {
129 assert(start_array != NULL && sp != NULL && pm != NULL, "Sanity");
130 assert(start_array->covered_region().contains(sp->used_region()),
131 "ObjectStartArray does not cover space");
133 if (sp->not_empty()) {
134 oop* sp_top = (oop*)space_top;
135 oop* prev_top = NULL;
136 jbyte* current_card = byte_for(sp->bottom());
137 jbyte* end_card = byte_for(sp_top - 1); // sp_top is exclusive
138 // scan card marking array
139 while (current_card <= end_card) {
140 jbyte value = *current_card;
141 // skip clean cards
142 if (card_is_clean(value)) {
143 current_card++;
144 } else {
145 // we found a non-clean card
146 jbyte* first_nonclean_card = current_card++;
147 oop* bottom = (oop*)addr_for(first_nonclean_card);
148 // find object starting on card
149 oop* bottom_obj = (oop*)start_array->object_start((HeapWord*)bottom);
150 // bottom_obj = (oop*)start_array->object_start((HeapWord*)bottom);
151 assert(bottom_obj <= bottom, "just checking");
152 // make sure we don't scan oops we already looked at
153 if (bottom < prev_top) bottom = prev_top;
154 // figure out when to stop scanning
155 jbyte* first_clean_card;
156 oop* top;
157 bool restart_scanning;
158 do {
159 restart_scanning = false;
160 // find a clean card
161 while (current_card <= end_card) {
162 value = *current_card;
163 if (card_is_clean(value)) break;
164 current_card++;
165 }
166 // check if we reached the end, if so we are done
167 if (current_card >= end_card) {
168 first_clean_card = end_card + 1;
169 current_card++;
170 top = sp_top;
171 } else {
172 // we have a clean card, find object starting on that card
173 first_clean_card = current_card++;
174 top = (oop*)addr_for(first_clean_card);
175 oop* top_obj = (oop*)start_array->object_start((HeapWord*)top);
176 // top_obj = (oop*)start_array->object_start((HeapWord*)top);
177 assert(top_obj <= top, "just checking");
178 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
179 // an arrayOop is starting on the clean card - since we do exact store
180 // checks for objArrays we are done
181 } else {
182 // otherwise, it is possible that the object starting on the clean card
183 // spans the entire card, and that the store happened on a later card.
184 // figure out where the object ends
185 top = top_obj + oop(top_obj)->size();
186 jbyte* top_card = CardTableModRefBS::byte_for(top - 1); // top is exclusive
187 if (top_card > first_clean_card) {
188 // object ends a different card
189 current_card = top_card + 1;
190 if (card_is_clean(*top_card)) {
191 // the ending card is clean, we are done
192 first_clean_card = top_card;
193 } else {
194 // the ending card is not clean, continue scanning at start of do-while
195 restart_scanning = true;
196 }
197 } else {
198 // object ends on the clean card, we are done.
199 assert(first_clean_card == top_card, "just checking");
200 }
201 }
202 }
203 } while (restart_scanning);
204 // we know which cards to scan, now clear them
205 while (first_nonclean_card < first_clean_card) {
206 *first_nonclean_card++ = clean_card;
207 }
208 // scan oops in objects
209 do {
210 oop(bottom_obj)->push_contents(pm);
211 bottom_obj += oop(bottom_obj)->size();
212 assert(bottom_obj <= sp_top, "just checking");
213 } while (bottom_obj < top);
214 pm->drain_stacks_cond_depth();
215 // remember top oop* scanned
216 prev_top = top;
217 }
218 }
219 }
220 }
222 void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
223 MutableSpace* sp,
224 HeapWord* space_top,
225 PSPromotionManager* pm,
226 uint stripe_number,
227 uint stripe_total) {
228 int ssize = 128; // Naked constant! Work unit = 64k.
229 int dirty_card_count = 0;
231 oop* sp_top = (oop*)space_top;
232 oop* sp_last = sp->bottom() == space_top ? sp_top : sp_top - 1;
233 jbyte* start_card = byte_for(sp->bottom());
234 jbyte* end_card = byte_for(sp_last) + 1;
235 oop* last_scanned = NULL; // Prevent scanning objects more than once
236 // The width of the stripe ssize*stripe_total must be
237 // consistent with the number of stripes so that the complete slice
238 // is covered.
239 size_t slice_width = ssize * stripe_total;
240 for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {
241 jbyte* worker_start_card = slice + stripe_number * ssize;
242 if (worker_start_card >= end_card)
243 return; // We're done.
245 jbyte* worker_end_card = worker_start_card + ssize;
246 if (worker_end_card > end_card)
247 worker_end_card = end_card;
249 // We do not want to scan objects more than once. In order to accomplish
250 // this, we assert that any object with an object head inside our 'slice'
251 // belongs to us. We may need to extend the range of scanned cards if the
252 // last object continues into the next 'slice'.
253 //
254 // Note! ending cards are exclusive!
255 HeapWord* slice_start = addr_for(worker_start_card);
256 HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
258 // If there are not objects starting within the chunk, skip it.
259 if (!start_array->object_starts_in_range(slice_start, slice_end)) {
260 continue;
261 }
262 // Update our beginning addr
263 HeapWord* first_object = start_array->object_start(slice_start);
264 debug_only(oop* first_object_within_slice = (oop*) first_object;)
265 if (first_object < slice_start) {
266 last_scanned = (oop*)(first_object + oop(first_object)->size());
267 debug_only(first_object_within_slice = last_scanned;)
268 worker_start_card = byte_for(last_scanned);
269 }
271 // Update the ending addr
272 if (slice_end < (HeapWord*)sp_top) {
273 // The subtraction is important! An object may start precisely at slice_end.
274 HeapWord* last_object = start_array->object_start(slice_end - 1);
275 slice_end = last_object + oop(last_object)->size();
276 // worker_end_card is exclusive, so bump it one past the end of last_object's
277 // covered span.
278 worker_end_card = byte_for(slice_end) + 1;
280 if (worker_end_card > end_card)
281 worker_end_card = end_card;
282 }
284 assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
285 assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
286 assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
287 // Note that worker_start_card >= worker_end_card is legal, and happens when
288 // an object spans an entire slice.
289 assert(worker_start_card <= end_card, "worker start card beyond end card");
290 assert(worker_end_card <= end_card, "worker end card beyond end card");
292 jbyte* current_card = worker_start_card;
293 while (current_card < worker_end_card) {
294 // Find an unclean card.
295 while (current_card < worker_end_card && card_is_clean(*current_card)) {
296 current_card++;
297 }
298 jbyte* first_unclean_card = current_card;
300 // Find the end of a run of contiguous unclean cards
301 while (current_card < worker_end_card && !card_is_clean(*current_card)) {
302 while (current_card < worker_end_card && !card_is_clean(*current_card)) {
303 current_card++;
304 }
306 if (current_card < worker_end_card) {
307 // Some objects may be large enough to span several cards. If such
308 // an object has more than one dirty card, separated by a clean card,
309 // we will attempt to scan it twice. The test against "last_scanned"
310 // prevents the redundant object scan, but it does not prevent newly
311 // marked cards from being cleaned.
312 HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
313 size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
314 HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
315 jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
316 assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
317 if (ending_card_of_last_object > current_card) {
318 // This means the object spans the next complete card.
319 // We need to bump the current_card to ending_card_of_last_object
320 current_card = ending_card_of_last_object;
321 }
322 }
323 }
324 jbyte* following_clean_card = current_card;
326 if (first_unclean_card < worker_end_card) {
327 oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
328 assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
329 // "p" should always be >= "last_scanned" because newly GC dirtied
330 // cards are no longer scanned again (see comment at end
331 // of loop on the increment of "current_card"). Test that
332 // hypothesis before removing this code.
333 // If this code is removed, deal with the first time through
334 // the loop when the last_scanned is the object starting in
335 // the previous slice.
336 assert((p >= last_scanned) ||
337 (last_scanned == first_object_within_slice),
338 "Should no longer be possible");
339 if (p < last_scanned) {
340 // Avoid scanning more than once; this can happen because
341 // newgen cards set by GC may a different set than the
342 // originally dirty set
343 p = last_scanned;
344 }
345 oop* to = (oop*)addr_for(following_clean_card);
347 // Test slice_end first!
348 if ((HeapWord*)to > slice_end) {
349 to = (oop*)slice_end;
350 } else if (to > sp_top) {
351 to = sp_top;
352 }
354 // we know which cards to scan, now clear them
355 if (first_unclean_card <= worker_start_card+1)
356 first_unclean_card = worker_start_card+1;
357 if (following_clean_card >= worker_end_card-1)
358 following_clean_card = worker_end_card-1;
360 while (first_unclean_card < following_clean_card) {
361 *first_unclean_card++ = clean_card;
362 }
364 const int interval = PrefetchScanIntervalInBytes;
365 // scan all objects in the range
366 if (interval != 0) {
367 while (p < to) {
368 Prefetch::write(p, interval);
369 oop m = oop(p);
370 assert(m->is_oop_or_null(), "check for header");
371 m->push_contents(pm);
372 p += m->size();
373 }
374 pm->drain_stacks_cond_depth();
375 } else {
376 while (p < to) {
377 oop m = oop(p);
378 assert(m->is_oop_or_null(), "check for header");
379 m->push_contents(pm);
380 p += m->size();
381 }
382 pm->drain_stacks_cond_depth();
383 }
384 last_scanned = p;
385 }
386 // "current_card" is still the "following_clean_card" or
387 // the current_card is >= the worker_end_card so the
388 // loop will not execute again.
389 assert((current_card == following_clean_card) ||
390 (current_card >= worker_end_card),
391 "current_card should only be incremented if it still equals "
392 "following_clean_card");
393 // Increment current_card so that it is not processed again.
394 // It may now be dirty because a old-to-young pointer was
395 // found on it an updated. If it is now dirty, it cannot be
396 // be safely cleaned in the next iteration.
397 current_card++;
398 }
399 }
400 }
402 // This should be called before a scavenge.
403 void CardTableExtension::verify_all_young_refs_imprecise() {
404 CheckForUnmarkedObjects check;
406 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
407 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
409 PSOldGen* old_gen = heap->old_gen();
411 old_gen->object_iterate(&check);
412 }
414 // This should be called immediately after a scavenge, before mutators resume.
415 void CardTableExtension::verify_all_young_refs_precise() {
416 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
417 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
419 PSOldGen* old_gen = heap->old_gen();
421 CheckForPreciseMarks check(heap->young_gen(), (CardTableExtension*)heap->barrier_set());
423 old_gen->oop_iterate_no_header(&check);
425 verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
426 }
428 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
429 CardTableExtension* card_table = (CardTableExtension*)Universe::heap()->barrier_set();
430 // FIX ME ASSERT HERE
432 jbyte* bot = card_table->byte_for(mr.start());
433 jbyte* top = card_table->byte_for(mr.end());
434 while(bot <= top) {
435 assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
436 if (*bot == verify_card)
437 *bot = youngergen_card;
438 bot++;
439 }
440 }
442 bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
443 jbyte* p = byte_for(addr);
444 jbyte val = *p;
446 if (card_is_dirty(val))
447 return true;
449 if (card_is_newgen(val))
450 return true;
452 if (card_is_clean(val))
453 return false;
455 assert(false, "Found unhandled card mark type");
457 return false;
458 }
460 // Also includes verify_card
461 bool CardTableExtension::addr_is_marked_precise(void *addr) {
462 jbyte* p = byte_for(addr);
463 jbyte val = *p;
465 if (card_is_newgen(val))
466 return true;
468 if (card_is_verify(val))
469 return true;
471 if (card_is_clean(val))
472 return false;
474 if (card_is_dirty(val))
475 return false;
477 assert(false, "Found unhandled card mark type");
479 return false;
480 }
482 // Assumes that only the base or the end changes. This allows indentification
483 // of the region that is being resized. The
484 // CardTableModRefBS::resize_covered_region() is used for the normal case
485 // where the covered regions are growing or shrinking at the high end.
486 // The method resize_covered_region_by_end() is analogous to
487 // CardTableModRefBS::resize_covered_region() but
488 // for regions that grow or shrink at the low end.
489 void CardTableExtension::resize_covered_region(MemRegion new_region) {
491 for (int i = 0; i < _cur_covered_regions; i++) {
492 if (_covered[i].start() == new_region.start()) {
493 // Found a covered region with the same start as the
494 // new region. The region is growing or shrinking
495 // from the start of the region.
496 resize_covered_region_by_start(new_region);
497 return;
498 }
499 if (_covered[i].start() > new_region.start()) {
500 break;
501 }
502 }
504 int changed_region = -1;
505 for (int j = 0; j < _cur_covered_regions; j++) {
506 if (_covered[j].end() == new_region.end()) {
507 changed_region = j;
508 // This is a case where the covered region is growing or shrinking
509 // at the start of the region.
510 assert(changed_region != -1, "Don't expect to add a covered region");
511 assert(_covered[changed_region].byte_size() != new_region.byte_size(),
512 "The sizes should be different here");
513 resize_covered_region_by_end(changed_region, new_region);
514 return;
515 }
516 }
517 // This should only be a new covered region (where no existing
518 // covered region matches at the start or the end).
519 assert(_cur_covered_regions < _max_covered_regions,
520 "An existing region should have been found");
521 resize_covered_region_by_start(new_region);
522 }
524 void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {
525 CardTableModRefBS::resize_covered_region(new_region);
526 debug_only(verify_guard();)
527 }
529 void CardTableExtension::resize_covered_region_by_end(int changed_region,
530 MemRegion new_region) {
531 assert(SafepointSynchronize::is_at_safepoint(),
532 "Only expect an expansion at the low end at a GC");
533 debug_only(verify_guard();)
534 #ifdef ASSERT
535 for (int k = 0; k < _cur_covered_regions; k++) {
536 if (_covered[k].end() == new_region.end()) {
537 assert(changed_region == k, "Changed region is incorrect");
538 break;
539 }
540 }
541 #endif
543 // Commit new or uncommit old pages, if necessary.
544 if (resize_commit_uncommit(changed_region, new_region)) {
545 // Set the new start of the committed region
546 resize_update_committed_table(changed_region, new_region);
547 }
549 // Update card table entries
550 resize_update_card_table_entries(changed_region, new_region);
552 // Update the covered region
553 resize_update_covered_table(changed_region, new_region);
555 if (TraceCardTableModRefBS) {
556 int ind = changed_region;
557 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
558 gclog_or_tty->print_cr(" "
559 " _covered[%d].start(): " INTPTR_FORMAT
560 " _covered[%d].last(): " INTPTR_FORMAT,
561 ind, _covered[ind].start(),
562 ind, _covered[ind].last());
563 gclog_or_tty->print_cr(" "
564 " _committed[%d].start(): " INTPTR_FORMAT
565 " _committed[%d].last(): " INTPTR_FORMAT,
566 ind, _committed[ind].start(),
567 ind, _committed[ind].last());
568 gclog_or_tty->print_cr(" "
569 " byte_for(start): " INTPTR_FORMAT
570 " byte_for(last): " INTPTR_FORMAT,
571 byte_for(_covered[ind].start()),
572 byte_for(_covered[ind].last()));
573 gclog_or_tty->print_cr(" "
574 " addr_for(start): " INTPTR_FORMAT
575 " addr_for(last): " INTPTR_FORMAT,
576 addr_for((jbyte*) _committed[ind].start()),
577 addr_for((jbyte*) _committed[ind].last()));
578 }
579 debug_only(verify_guard();)
580 }
582 bool CardTableExtension::resize_commit_uncommit(int changed_region,
583 MemRegion new_region) {
584 bool result = false;
585 // Commit new or uncommit old pages, if necessary.
586 MemRegion cur_committed = _committed[changed_region];
587 assert(_covered[changed_region].end() == new_region.end(),
588 "The ends of the regions are expected to match");
589 // Extend the start of this _committed region to
590 // to cover the start of any previous _committed region.
591 // This forms overlapping regions, but never interior regions.
592 HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
593 if (min_prev_start < cur_committed.start()) {
594 // Only really need to set start of "cur_committed" to
595 // the new start (min_prev_start) but assertion checking code
596 // below use cur_committed.end() so make it correct.
597 MemRegion new_committed =
598 MemRegion(min_prev_start, cur_committed.end());
599 cur_committed = new_committed;
600 }
601 #ifdef ASSERT
602 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
603 assert(cur_committed.start() ==
604 (HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
605 os::vm_page_size()),
606 "Starts should have proper alignment");
607 #endif
609 jbyte* new_start = byte_for(new_region.start());
610 // Round down because this is for the start address
611 HeapWord* new_start_aligned =
612 (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
613 // The guard page is always committed and should not be committed over.
614 // This method is used in cases where the generation is growing toward
615 // lower addresses but the guard region is still at the end of the
616 // card table. That still makes sense when looking for writes
617 // off the end of the card table.
618 if (new_start_aligned < cur_committed.start()) {
619 // Expand the committed region
620 //
621 // Case A
622 // |+ guard +|
623 // |+ cur committed +++++++++|
624 // |+ new committed +++++++++++++++++|
625 //
626 // Case B
627 // |+ guard +|
628 // |+ cur committed +|
629 // |+ new committed +++++++|
630 //
631 // These are not expected because the calculation of the
632 // cur committed region and the new committed region
633 // share the same end for the covered region.
634 // Case C
635 // |+ guard +|
636 // |+ cur committed +|
637 // |+ new committed +++++++++++++++++|
638 // Case D
639 // |+ guard +|
640 // |+ cur committed +++++++++++|
641 // |+ new committed +++++++|
643 HeapWord* new_end_for_commit =
644 MIN2(cur_committed.end(), _guard_region.start());
645 if(new_start_aligned < new_end_for_commit) {
646 MemRegion new_committed =
647 MemRegion(new_start_aligned, new_end_for_commit);
648 if (!os::commit_memory((char*)new_committed.start(),
649 new_committed.byte_size())) {
650 vm_exit_out_of_memory(new_committed.byte_size(),
651 "card table expansion");
652 }
653 }
654 result = true;
655 } else if (new_start_aligned > cur_committed.start()) {
656 // Shrink the committed region
657 #if 0 // uncommitting space is currently unsafe because of the interactions
658 // of growing and shrinking regions. One region A can uncommit space
659 // that it owns but which is being used by another region B (maybe).
660 // Region B has not committed the space because it was already
661 // committed by region A.
662 MemRegion uncommit_region = committed_unique_to_self(changed_region,
663 MemRegion(cur_committed.start(), new_start_aligned));
664 if (!uncommit_region.is_empty()) {
665 if (!os::uncommit_memory((char*)uncommit_region.start(),
666 uncommit_region.byte_size())) {
667 // If the uncommit fails, ignore it. Let the
668 // committed table resizing go even though the committed
669 // table will over state the committed space.
670 }
671 }
672 #else
673 assert(!result, "Should be false with current workaround");
674 #endif
675 }
676 assert(_committed[changed_region].end() == cur_committed.end(),
677 "end should not change");
678 return result;
679 }
681 void CardTableExtension::resize_update_committed_table(int changed_region,
682 MemRegion new_region) {
684 jbyte* new_start = byte_for(new_region.start());
685 // Set the new start of the committed region
686 HeapWord* new_start_aligned =
687 (HeapWord*)align_size_down((uintptr_t)new_start,
688 os::vm_page_size());
689 MemRegion new_committed = MemRegion(new_start_aligned,
690 _committed[changed_region].end());
691 _committed[changed_region] = new_committed;
692 _committed[changed_region].set_start(new_start_aligned);
693 }
695 void CardTableExtension::resize_update_card_table_entries(int changed_region,
696 MemRegion new_region) {
697 debug_only(verify_guard();)
698 MemRegion original_covered = _covered[changed_region];
699 // Initialize the card entries. Only consider the
700 // region covered by the card table (_whole_heap)
701 jbyte* entry;
702 if (new_region.start() < _whole_heap.start()) {
703 entry = byte_for(_whole_heap.start());
704 } else {
705 entry = byte_for(new_region.start());
706 }
707 jbyte* end = byte_for(original_covered.start());
708 // If _whole_heap starts at the original covered regions start,
709 // this loop will not execute.
710 while (entry < end) { *entry++ = clean_card; }
711 }
713 void CardTableExtension::resize_update_covered_table(int changed_region,
714 MemRegion new_region) {
715 // Update the covered region
716 _covered[changed_region].set_start(new_region.start());
717 _covered[changed_region].set_word_size(new_region.word_size());
719 // reorder regions. There should only be at most 1 out
720 // of order.
721 for (int i = _cur_covered_regions-1 ; i > 0; i--) {
722 if (_covered[i].start() < _covered[i-1].start()) {
723 MemRegion covered_mr = _covered[i-1];
724 _covered[i-1] = _covered[i];
725 _covered[i] = covered_mr;
726 MemRegion committed_mr = _committed[i-1];
727 _committed[i-1] = _committed[i];
728 _committed[i] = committed_mr;
729 break;
730 }
731 }
732 #ifdef ASSERT
733 for (int m = 0; m < _cur_covered_regions-1; m++) {
734 assert(_covered[m].start() <= _covered[m+1].start(),
735 "Covered regions out of order");
736 assert(_committed[m].start() <= _committed[m+1].start(),
737 "Committed regions out of order");
738 }
739 #endif
740 }
742 // Returns the start of any committed region that is lower than
743 // the target committed region (index ind) and that intersects the
744 // target region. If none, return start of target region.
745 //
746 // -------------
747 // | |
748 // -------------
749 // ------------
750 // | target |
751 // ------------
752 // -------------
753 // | |
754 // -------------
755 // ^ returns this
756 //
757 // -------------
758 // | |
759 // -------------
760 // ------------
761 // | target |
762 // ------------
763 // -------------
764 // | |
765 // -------------
766 // ^ returns this
768 HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
769 assert(_cur_covered_regions >= 0, "Expecting at least on region");
770 HeapWord* min_start = _committed[ind].start();
771 for (int j = 0; j < ind; j++) {
772 HeapWord* this_start = _committed[j].start();
773 if ((this_start < min_start) &&
774 !(_committed[j].intersection(_committed[ind])).is_empty()) {
775 min_start = this_start;
776 }
777 }
778 return min_start;
779 }