Thu, 12 Oct 2017 21:27:07 +0800
merge
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
27 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
28 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
29 #include "gc_implementation/parallelScavenge/psTasks.hpp"
30 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "oops/oop.psgc.inline.hpp"
33 #include "runtime/prefetch.inline.hpp"
35 // Checks an individual oop for missing precise marks. Mark
36 // may be either dirty or newgen.
37 class CheckForUnmarkedOops : public OopClosure {
38 private:
39 PSYoungGen* _young_gen;
40 CardTableExtension* _card_table;
41 HeapWord* _unmarked_addr;
42 jbyte* _unmarked_card;
44 protected:
45 template <class T> void do_oop_work(T* p) {
46 oop obj = oopDesc::load_decode_heap_oop(p);
47 if (_young_gen->is_in_reserved(obj) &&
48 !_card_table->addr_is_marked_imprecise(p)) {
49 // Don't overwrite the first missing card mark
50 if (_unmarked_addr == NULL) {
51 _unmarked_addr = (HeapWord*)p;
52 _unmarked_card = _card_table->byte_for(p);
53 }
54 }
55 }
57 public:
58 CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
59 _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
61 virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
62 virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
64 bool has_unmarked_oop() {
65 return _unmarked_addr != NULL;
66 }
67 };
69 // Checks all objects for the existance of some type of mark,
70 // precise or imprecise, dirty or newgen.
71 class CheckForUnmarkedObjects : public ObjectClosure {
72 private:
73 PSYoungGen* _young_gen;
74 CardTableExtension* _card_table;
76 public:
77 CheckForUnmarkedObjects() {
78 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
79 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
81 _young_gen = heap->young_gen();
82 _card_table = (CardTableExtension*)heap->barrier_set();
83 // No point in asserting barrier set type here. Need to make CardTableExtension
84 // a unique barrier set type.
85 }
87 // Card marks are not precise. The current system can leave us with
88 // a mismash of precise marks and beginning of object marks. This means
89 // we test for missing precise marks first. If any are found, we don't
90 // fail unless the object head is also unmarked.
91 virtual void do_object(oop obj) {
92 CheckForUnmarkedOops object_check(_young_gen, _card_table);
93 obj->oop_iterate_no_header(&object_check);
94 if (object_check.has_unmarked_oop()) {
95 assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
96 }
97 }
98 };
100 // Checks for precise marking of oops as newgen.
101 class CheckForPreciseMarks : public OopClosure {
102 private:
103 PSYoungGen* _young_gen;
104 CardTableExtension* _card_table;
106 protected:
107 template <class T> void do_oop_work(T* p) {
108 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
109 if (_young_gen->is_in_reserved(obj)) {
110 assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
111 _card_table->set_card_newgen(p);
112 }
113 }
115 public:
116 CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
117 _young_gen(young_gen), _card_table(card_table) { }
119 virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }
120 virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
121 };
123 // We get passed the space_top value to prevent us from traversing into
124 // the old_gen promotion labs, which cannot be safely parsed.
126 // Do not call this method if the space is empty.
127 // It is a waste to start tasks and get here only to
128 // do no work. If this method needs to be called
129 // when the space is empty, fix the calculation of
130 // end_card to allow sp_top == sp->bottom().
132 void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
133 MutableSpace* sp,
134 HeapWord* space_top,
135 PSPromotionManager* pm,
136 uint stripe_number,
137 uint stripe_total) {
138 int ssize = 128; // Naked constant! Work unit = 64k.
139 int dirty_card_count = 0;
141 // It is a waste to get here if empty.
142 assert(sp->bottom() < sp->top(), "Should not be called if empty");
143 oop* sp_top = (oop*)space_top;
144 jbyte* start_card = byte_for(sp->bottom());
145 jbyte* end_card = byte_for(sp_top - 1) + 1;
146 oop* last_scanned = NULL; // Prevent scanning objects more than once
147 // The width of the stripe ssize*stripe_total must be
148 // consistent with the number of stripes so that the complete slice
149 // is covered.
150 size_t slice_width = ssize * stripe_total;
151 for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {
152 jbyte* worker_start_card = slice + stripe_number * ssize;
153 if (worker_start_card >= end_card)
154 return; // We're done.
156 jbyte* worker_end_card = worker_start_card + ssize;
157 if (worker_end_card > end_card)
158 worker_end_card = end_card;
160 // We do not want to scan objects more than once. In order to accomplish
161 // this, we assert that any object with an object head inside our 'slice'
162 // belongs to us. We may need to extend the range of scanned cards if the
163 // last object continues into the next 'slice'.
164 //
165 // Note! ending cards are exclusive!
166 HeapWord* slice_start = addr_for(worker_start_card);
167 HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
169 #ifdef ASSERT
170 if (GCWorkerDelayMillis > 0) {
171 // Delay 1 worker so that it proceeds after all the work
172 // has been completed.
173 if (stripe_number < 2) {
174 os::sleep(Thread::current(), GCWorkerDelayMillis, false);
175 }
176 }
177 #endif
179 // If there are not objects starting within the chunk, skip it.
180 if (!start_array->object_starts_in_range(slice_start, slice_end)) {
181 continue;
182 }
183 // Update our beginning addr
184 HeapWord* first_object = start_array->object_start(slice_start);
185 debug_only(oop* first_object_within_slice = (oop*) first_object;)
186 if (first_object < slice_start) {
187 last_scanned = (oop*)(first_object + oop(first_object)->size());
188 debug_only(first_object_within_slice = last_scanned;)
189 worker_start_card = byte_for(last_scanned);
190 }
192 // Update the ending addr
193 if (slice_end < (HeapWord*)sp_top) {
194 // The subtraction is important! An object may start precisely at slice_end.
195 HeapWord* last_object = start_array->object_start(slice_end - 1);
196 slice_end = last_object + oop(last_object)->size();
197 // worker_end_card is exclusive, so bump it one past the end of last_object's
198 // covered span.
199 worker_end_card = byte_for(slice_end) + 1;
201 if (worker_end_card > end_card)
202 worker_end_card = end_card;
203 }
205 assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
206 assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
207 assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
208 // Note that worker_start_card >= worker_end_card is legal, and happens when
209 // an object spans an entire slice.
210 assert(worker_start_card <= end_card, "worker start card beyond end card");
211 assert(worker_end_card <= end_card, "worker end card beyond end card");
213 jbyte* current_card = worker_start_card;
214 while (current_card < worker_end_card) {
215 // Find an unclean card.
216 while (current_card < worker_end_card && card_is_clean(*current_card)) {
217 current_card++;
218 }
219 jbyte* first_unclean_card = current_card;
221 // Find the end of a run of contiguous unclean cards
222 while (current_card < worker_end_card && !card_is_clean(*current_card)) {
223 while (current_card < worker_end_card && !card_is_clean(*current_card)) {
224 current_card++;
225 }
227 if (current_card < worker_end_card) {
228 // Some objects may be large enough to span several cards. If such
229 // an object has more than one dirty card, separated by a clean card,
230 // we will attempt to scan it twice. The test against "last_scanned"
231 // prevents the redundant object scan, but it does not prevent newly
232 // marked cards from being cleaned.
233 HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
234 size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
235 HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
236 jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
237 assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
238 if (ending_card_of_last_object > current_card) {
239 // This means the object spans the next complete card.
240 // We need to bump the current_card to ending_card_of_last_object
241 current_card = ending_card_of_last_object;
242 }
243 }
244 }
245 jbyte* following_clean_card = current_card;
247 if (first_unclean_card < worker_end_card) {
248 oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
249 assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
250 // "p" should always be >= "last_scanned" because newly GC dirtied
251 // cards are no longer scanned again (see comment at end
252 // of loop on the increment of "current_card"). Test that
253 // hypothesis before removing this code.
254 // If this code is removed, deal with the first time through
255 // the loop when the last_scanned is the object starting in
256 // the previous slice.
257 assert((p >= last_scanned) ||
258 (last_scanned == first_object_within_slice),
259 "Should no longer be possible");
260 if (p < last_scanned) {
261 // Avoid scanning more than once; this can happen because
262 // newgen cards set by GC may a different set than the
263 // originally dirty set
264 p = last_scanned;
265 }
266 oop* to = (oop*)addr_for(following_clean_card);
268 // Test slice_end first!
269 if ((HeapWord*)to > slice_end) {
270 to = (oop*)slice_end;
271 } else if (to > sp_top) {
272 to = sp_top;
273 }
275 // we know which cards to scan, now clear them
276 if (first_unclean_card <= worker_start_card+1)
277 first_unclean_card = worker_start_card+1;
278 if (following_clean_card >= worker_end_card-1)
279 following_clean_card = worker_end_card-1;
281 while (first_unclean_card < following_clean_card) {
282 *first_unclean_card++ = clean_card;
283 }
285 const int interval = PrefetchScanIntervalInBytes;
286 // scan all objects in the range
287 if (interval != 0) {
288 while (p < to) {
289 Prefetch::write(p, interval);
290 oop m = oop(p);
291 assert(m->is_oop_or_null(), "check for header");
292 m->push_contents(pm);
293 p += m->size();
294 }
295 pm->drain_stacks_cond_depth();
296 } else {
297 while (p < to) {
298 oop m = oop(p);
299 assert(m->is_oop_or_null(), "check for header");
300 m->push_contents(pm);
301 p += m->size();
302 }
303 pm->drain_stacks_cond_depth();
304 }
305 last_scanned = p;
306 }
307 // "current_card" is still the "following_clean_card" or
308 // the current_card is >= the worker_end_card so the
309 // loop will not execute again.
310 assert((current_card == following_clean_card) ||
311 (current_card >= worker_end_card),
312 "current_card should only be incremented if it still equals "
313 "following_clean_card");
314 // Increment current_card so that it is not processed again.
315 // It may now be dirty because a old-to-young pointer was
316 // found on it an updated. If it is now dirty, it cannot be
317 // be safely cleaned in the next iteration.
318 current_card++;
319 }
320 }
321 }
323 // This should be called before a scavenge.
324 void CardTableExtension::verify_all_young_refs_imprecise() {
325 CheckForUnmarkedObjects check;
327 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
328 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
330 PSOldGen* old_gen = heap->old_gen();
332 old_gen->object_iterate(&check);
333 }
335 // This should be called immediately after a scavenge, before mutators resume.
336 void CardTableExtension::verify_all_young_refs_precise() {
337 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
338 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
340 PSOldGen* old_gen = heap->old_gen();
342 CheckForPreciseMarks check(heap->young_gen(), (CardTableExtension*)heap->barrier_set());
344 old_gen->oop_iterate_no_header(&check);
346 verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
347 }
349 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
350 CardTableExtension* card_table = (CardTableExtension*)Universe::heap()->barrier_set();
351 // FIX ME ASSERT HERE
353 jbyte* bot = card_table->byte_for(mr.start());
354 jbyte* top = card_table->byte_for(mr.end());
355 while(bot <= top) {
356 assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
357 if (*bot == verify_card)
358 *bot = youngergen_card;
359 bot++;
360 }
361 }
363 bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
364 jbyte* p = byte_for(addr);
365 jbyte val = *p;
367 if (card_is_dirty(val))
368 return true;
370 if (card_is_newgen(val))
371 return true;
373 if (card_is_clean(val))
374 return false;
376 assert(false, "Found unhandled card mark type");
378 return false;
379 }
381 // Also includes verify_card
382 bool CardTableExtension::addr_is_marked_precise(void *addr) {
383 jbyte* p = byte_for(addr);
384 jbyte val = *p;
386 if (card_is_newgen(val))
387 return true;
389 if (card_is_verify(val))
390 return true;
392 if (card_is_clean(val))
393 return false;
395 if (card_is_dirty(val))
396 return false;
398 assert(false, "Found unhandled card mark type");
400 return false;
401 }
403 // Assumes that only the base or the end changes. This allows indentification
404 // of the region that is being resized. The
405 // CardTableModRefBS::resize_covered_region() is used for the normal case
406 // where the covered regions are growing or shrinking at the high end.
407 // The method resize_covered_region_by_end() is analogous to
408 // CardTableModRefBS::resize_covered_region() but
409 // for regions that grow or shrink at the low end.
410 void CardTableExtension::resize_covered_region(MemRegion new_region) {
412 for (int i = 0; i < _cur_covered_regions; i++) {
413 if (_covered[i].start() == new_region.start()) {
414 // Found a covered region with the same start as the
415 // new region. The region is growing or shrinking
416 // from the start of the region.
417 resize_covered_region_by_start(new_region);
418 return;
419 }
420 if (_covered[i].start() > new_region.start()) {
421 break;
422 }
423 }
425 int changed_region = -1;
426 for (int j = 0; j < _cur_covered_regions; j++) {
427 if (_covered[j].end() == new_region.end()) {
428 changed_region = j;
429 // This is a case where the covered region is growing or shrinking
430 // at the start of the region.
431 assert(changed_region != -1, "Don't expect to add a covered region");
432 assert(_covered[changed_region].byte_size() != new_region.byte_size(),
433 "The sizes should be different here");
434 resize_covered_region_by_end(changed_region, new_region);
435 return;
436 }
437 }
438 // This should only be a new covered region (where no existing
439 // covered region matches at the start or the end).
440 assert(_cur_covered_regions < _max_covered_regions,
441 "An existing region should have been found");
442 resize_covered_region_by_start(new_region);
443 }
445 void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {
446 CardTableModRefBS::resize_covered_region(new_region);
447 debug_only(verify_guard();)
448 }
450 void CardTableExtension::resize_covered_region_by_end(int changed_region,
451 MemRegion new_region) {
452 assert(SafepointSynchronize::is_at_safepoint(),
453 "Only expect an expansion at the low end at a GC");
454 debug_only(verify_guard();)
455 #ifdef ASSERT
456 for (int k = 0; k < _cur_covered_regions; k++) {
457 if (_covered[k].end() == new_region.end()) {
458 assert(changed_region == k, "Changed region is incorrect");
459 break;
460 }
461 }
462 #endif
464 // Commit new or uncommit old pages, if necessary.
465 if (resize_commit_uncommit(changed_region, new_region)) {
466 // Set the new start of the committed region
467 resize_update_committed_table(changed_region, new_region);
468 }
470 // Update card table entries
471 resize_update_card_table_entries(changed_region, new_region);
473 // Update the covered region
474 resize_update_covered_table(changed_region, new_region);
476 if (TraceCardTableModRefBS) {
477 int ind = changed_region;
478 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
479 gclog_or_tty->print_cr(" "
480 " _covered[%d].start(): " INTPTR_FORMAT
481 " _covered[%d].last(): " INTPTR_FORMAT,
482 ind, p2i(_covered[ind].start()),
483 ind, p2i(_covered[ind].last()));
484 gclog_or_tty->print_cr(" "
485 " _committed[%d].start(): " INTPTR_FORMAT
486 " _committed[%d].last(): " INTPTR_FORMAT,
487 ind, p2i(_committed[ind].start()),
488 ind, p2i(_committed[ind].last()));
489 gclog_or_tty->print_cr(" "
490 " byte_for(start): " INTPTR_FORMAT
491 " byte_for(last): " INTPTR_FORMAT,
492 p2i(byte_for(_covered[ind].start())),
493 p2i(byte_for(_covered[ind].last())));
494 gclog_or_tty->print_cr(" "
495 " addr_for(start): " INTPTR_FORMAT
496 " addr_for(last): " INTPTR_FORMAT,
497 p2i(addr_for((jbyte*) _committed[ind].start())),
498 p2i(addr_for((jbyte*) _committed[ind].last())));
499 }
500 debug_only(verify_guard();)
501 }
503 bool CardTableExtension::resize_commit_uncommit(int changed_region,
504 MemRegion new_region) {
505 bool result = false;
506 // Commit new or uncommit old pages, if necessary.
507 MemRegion cur_committed = _committed[changed_region];
508 assert(_covered[changed_region].end() == new_region.end(),
509 "The ends of the regions are expected to match");
510 // Extend the start of this _committed region to
511 // to cover the start of any previous _committed region.
512 // This forms overlapping regions, but never interior regions.
513 HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
514 if (min_prev_start < cur_committed.start()) {
515 // Only really need to set start of "cur_committed" to
516 // the new start (min_prev_start) but assertion checking code
517 // below use cur_committed.end() so make it correct.
518 MemRegion new_committed =
519 MemRegion(min_prev_start, cur_committed.end());
520 cur_committed = new_committed;
521 }
522 #ifdef ASSERT
523 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
524 assert(cur_committed.start() ==
525 (HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
526 os::vm_page_size()),
527 "Starts should have proper alignment");
528 #endif
530 jbyte* new_start = byte_for(new_region.start());
531 // Round down because this is for the start address
532 HeapWord* new_start_aligned =
533 (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
534 // The guard page is always committed and should not be committed over.
535 // This method is used in cases where the generation is growing toward
536 // lower addresses but the guard region is still at the end of the
537 // card table. That still makes sense when looking for writes
538 // off the end of the card table.
539 if (new_start_aligned < cur_committed.start()) {
540 // Expand the committed region
541 //
542 // Case A
543 // |+ guard +|
544 // |+ cur committed +++++++++|
545 // |+ new committed +++++++++++++++++|
546 //
547 // Case B
548 // |+ guard +|
549 // |+ cur committed +|
550 // |+ new committed +++++++|
551 //
552 // These are not expected because the calculation of the
553 // cur committed region and the new committed region
554 // share the same end for the covered region.
555 // Case C
556 // |+ guard +|
557 // |+ cur committed +|
558 // |+ new committed +++++++++++++++++|
559 // Case D
560 // |+ guard +|
561 // |+ cur committed +++++++++++|
562 // |+ new committed +++++++|
564 HeapWord* new_end_for_commit =
565 MIN2(cur_committed.end(), _guard_region.start());
566 if(new_start_aligned < new_end_for_commit) {
567 MemRegion new_committed =
568 MemRegion(new_start_aligned, new_end_for_commit);
569 os::commit_memory_or_exit((char*)new_committed.start(),
570 new_committed.byte_size(), !ExecMem,
571 "card table expansion");
572 }
573 result = true;
574 } else if (new_start_aligned > cur_committed.start()) {
575 // Shrink the committed region
576 #if 0 // uncommitting space is currently unsafe because of the interactions
577 // of growing and shrinking regions. One region A can uncommit space
578 // that it owns but which is being used by another region B (maybe).
579 // Region B has not committed the space because it was already
580 // committed by region A.
581 MemRegion uncommit_region = committed_unique_to_self(changed_region,
582 MemRegion(cur_committed.start(), new_start_aligned));
583 if (!uncommit_region.is_empty()) {
584 if (!os::uncommit_memory((char*)uncommit_region.start(),
585 uncommit_region.byte_size())) {
586 // If the uncommit fails, ignore it. Let the
587 // committed table resizing go even though the committed
588 // table will over state the committed space.
589 }
590 }
591 #else
592 assert(!result, "Should be false with current workaround");
593 #endif
594 }
595 assert(_committed[changed_region].end() == cur_committed.end(),
596 "end should not change");
597 return result;
598 }
600 void CardTableExtension::resize_update_committed_table(int changed_region,
601 MemRegion new_region) {
603 jbyte* new_start = byte_for(new_region.start());
604 // Set the new start of the committed region
605 HeapWord* new_start_aligned =
606 (HeapWord*)align_size_down((uintptr_t)new_start,
607 os::vm_page_size());
608 MemRegion new_committed = MemRegion(new_start_aligned,
609 _committed[changed_region].end());
610 _committed[changed_region] = new_committed;
611 _committed[changed_region].set_start(new_start_aligned);
612 }
614 void CardTableExtension::resize_update_card_table_entries(int changed_region,
615 MemRegion new_region) {
616 debug_only(verify_guard();)
617 MemRegion original_covered = _covered[changed_region];
618 // Initialize the card entries. Only consider the
619 // region covered by the card table (_whole_heap)
620 jbyte* entry;
621 if (new_region.start() < _whole_heap.start()) {
622 entry = byte_for(_whole_heap.start());
623 } else {
624 entry = byte_for(new_region.start());
625 }
626 jbyte* end = byte_for(original_covered.start());
627 // If _whole_heap starts at the original covered regions start,
628 // this loop will not execute.
629 while (entry < end) { *entry++ = clean_card; }
630 }
632 void CardTableExtension::resize_update_covered_table(int changed_region,
633 MemRegion new_region) {
634 // Update the covered region
635 _covered[changed_region].set_start(new_region.start());
636 _covered[changed_region].set_word_size(new_region.word_size());
638 // reorder regions. There should only be at most 1 out
639 // of order.
640 for (int i = _cur_covered_regions-1 ; i > 0; i--) {
641 if (_covered[i].start() < _covered[i-1].start()) {
642 MemRegion covered_mr = _covered[i-1];
643 _covered[i-1] = _covered[i];
644 _covered[i] = covered_mr;
645 MemRegion committed_mr = _committed[i-1];
646 _committed[i-1] = _committed[i];
647 _committed[i] = committed_mr;
648 break;
649 }
650 }
651 #ifdef ASSERT
652 for (int m = 0; m < _cur_covered_regions-1; m++) {
653 assert(_covered[m].start() <= _covered[m+1].start(),
654 "Covered regions out of order");
655 assert(_committed[m].start() <= _committed[m+1].start(),
656 "Committed regions out of order");
657 }
658 #endif
659 }
661 // Returns the start of any committed region that is lower than
662 // the target committed region (index ind) and that intersects the
663 // target region. If none, return start of target region.
664 //
665 // -------------
666 // | |
667 // -------------
668 // ------------
669 // | target |
670 // ------------
671 // -------------
672 // | |
673 // -------------
674 // ^ returns this
675 //
676 // -------------
677 // | |
678 // -------------
679 // ------------
680 // | target |
681 // ------------
682 // -------------
683 // | |
684 // -------------
685 // ^ returns this
687 HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
688 assert(_cur_covered_regions >= 0, "Expecting at least on region");
689 HeapWord* min_start = _committed[ind].start();
690 for (int j = 0; j < ind; j++) {
691 HeapWord* this_start = _committed[j].start();
692 if ((this_start < min_start) &&
693 !(_committed[j].intersection(_committed[ind])).is_empty()) {
694 min_start = this_start;
695 }
696 }
697 return min_start;
698 }