Mon, 20 Sep 2010 14:38:38 -0700
6984287: Regularize how GC parallel workers are specified.
Summary: Associate number of GC workers with the workgang as opposed to the task.
Reviewed-by: johnc, ysr
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_cardTableExtension.cpp.incl"
28 // Checks an individual oop for missing precise marks. Mark
29 // may be either dirty or newgen.
30 class CheckForUnmarkedOops : public OopClosure {
31 private:
32 PSYoungGen* _young_gen;
33 CardTableExtension* _card_table;
34 HeapWord* _unmarked_addr;
35 jbyte* _unmarked_card;
37 protected:
38 template <class T> void do_oop_work(T* p) {
39 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
40 if (_young_gen->is_in_reserved(obj) &&
41 !_card_table->addr_is_marked_imprecise(p)) {
42 // Don't overwrite the first missing card mark
43 if (_unmarked_addr == NULL) {
44 _unmarked_addr = (HeapWord*)p;
45 _unmarked_card = _card_table->byte_for(p);
46 }
47 }
48 }
50 public:
51 CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
52 _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
54 virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
55 virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
57 bool has_unmarked_oop() {
58 return _unmarked_addr != NULL;
59 }
60 };
62 // Checks all objects for the existance of some type of mark,
63 // precise or imprecise, dirty or newgen.
64 class CheckForUnmarkedObjects : public ObjectClosure {
65 private:
66 PSYoungGen* _young_gen;
67 CardTableExtension* _card_table;
69 public:
70 CheckForUnmarkedObjects() {
71 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
72 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
74 _young_gen = heap->young_gen();
75 _card_table = (CardTableExtension*)heap->barrier_set();
76 // No point in asserting barrier set type here. Need to make CardTableExtension
77 // a unique barrier set type.
78 }
80 // Card marks are not precise. The current system can leave us with
81 // a mismash of precise marks and beginning of object marks. This means
82 // we test for missing precise marks first. If any are found, we don't
83 // fail unless the object head is also unmarked.
84 virtual void do_object(oop obj) {
85 CheckForUnmarkedOops object_check(_young_gen, _card_table);
86 obj->oop_iterate(&object_check);
87 if (object_check.has_unmarked_oop()) {
88 assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
89 }
90 }
91 };
93 // Checks for precise marking of oops as newgen.
94 class CheckForPreciseMarks : public OopClosure {
95 private:
96 PSYoungGen* _young_gen;
97 CardTableExtension* _card_table;
99 protected:
100 template <class T> void do_oop_work(T* p) {
101 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
102 if (_young_gen->is_in_reserved(obj)) {
103 assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
104 _card_table->set_card_newgen(p);
105 }
106 }
108 public:
109 CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
110 _young_gen(young_gen), _card_table(card_table) { }
112 virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }
113 virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
114 };
116 // We get passed the space_top value to prevent us from traversing into
117 // the old_gen promotion labs, which cannot be safely parsed.
118 void CardTableExtension::scavenge_contents(ObjectStartArray* start_array,
119 MutableSpace* sp,
120 HeapWord* space_top,
121 PSPromotionManager* pm)
122 {
123 assert(start_array != NULL && sp != NULL && pm != NULL, "Sanity");
124 assert(start_array->covered_region().contains(sp->used_region()),
125 "ObjectStartArray does not cover space");
127 if (sp->not_empty()) {
128 oop* sp_top = (oop*)space_top;
129 oop* prev_top = NULL;
130 jbyte* current_card = byte_for(sp->bottom());
131 jbyte* end_card = byte_for(sp_top - 1); // sp_top is exclusive
132 // scan card marking array
133 while (current_card <= end_card) {
134 jbyte value = *current_card;
135 // skip clean cards
136 if (card_is_clean(value)) {
137 current_card++;
138 } else {
139 // we found a non-clean card
140 jbyte* first_nonclean_card = current_card++;
141 oop* bottom = (oop*)addr_for(first_nonclean_card);
142 // find object starting on card
143 oop* bottom_obj = (oop*)start_array->object_start((HeapWord*)bottom);
144 // bottom_obj = (oop*)start_array->object_start((HeapWord*)bottom);
145 assert(bottom_obj <= bottom, "just checking");
146 // make sure we don't scan oops we already looked at
147 if (bottom < prev_top) bottom = prev_top;
148 // figure out when to stop scanning
149 jbyte* first_clean_card;
150 oop* top;
151 bool restart_scanning;
152 do {
153 restart_scanning = false;
154 // find a clean card
155 while (current_card <= end_card) {
156 value = *current_card;
157 if (card_is_clean(value)) break;
158 current_card++;
159 }
160 // check if we reached the end, if so we are done
161 if (current_card >= end_card) {
162 first_clean_card = end_card + 1;
163 current_card++;
164 top = sp_top;
165 } else {
166 // we have a clean card, find object starting on that card
167 first_clean_card = current_card++;
168 top = (oop*)addr_for(first_clean_card);
169 oop* top_obj = (oop*)start_array->object_start((HeapWord*)top);
170 // top_obj = (oop*)start_array->object_start((HeapWord*)top);
171 assert(top_obj <= top, "just checking");
172 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
173 // an arrayOop is starting on the clean card - since we do exact store
174 // checks for objArrays we are done
175 } else {
176 // otherwise, it is possible that the object starting on the clean card
177 // spans the entire card, and that the store happened on a later card.
178 // figure out where the object ends
179 top = top_obj + oop(top_obj)->size();
180 jbyte* top_card = CardTableModRefBS::byte_for(top - 1); // top is exclusive
181 if (top_card > first_clean_card) {
182 // object ends a different card
183 current_card = top_card + 1;
184 if (card_is_clean(*top_card)) {
185 // the ending card is clean, we are done
186 first_clean_card = top_card;
187 } else {
188 // the ending card is not clean, continue scanning at start of do-while
189 restart_scanning = true;
190 }
191 } else {
192 // object ends on the clean card, we are done.
193 assert(first_clean_card == top_card, "just checking");
194 }
195 }
196 }
197 } while (restart_scanning);
198 // we know which cards to scan, now clear them
199 while (first_nonclean_card < first_clean_card) {
200 *first_nonclean_card++ = clean_card;
201 }
202 // scan oops in objects
203 do {
204 oop(bottom_obj)->push_contents(pm);
205 bottom_obj += oop(bottom_obj)->size();
206 assert(bottom_obj <= sp_top, "just checking");
207 } while (bottom_obj < top);
208 pm->drain_stacks_cond_depth();
209 // remember top oop* scanned
210 prev_top = top;
211 }
212 }
213 }
214 }
216 void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
217 MutableSpace* sp,
218 HeapWord* space_top,
219 PSPromotionManager* pm,
220 uint stripe_number) {
221 int ssize = 128; // Naked constant! Work unit = 64k.
222 int dirty_card_count = 0;
224 oop* sp_top = (oop*)space_top;
225 jbyte* start_card = byte_for(sp->bottom());
226 jbyte* end_card = byte_for(sp_top - 1) + 1;
227 oop* last_scanned = NULL; // Prevent scanning objects more than once
228 for (jbyte* slice = start_card; slice < end_card; slice += ssize*ParallelGCThreads) {
229 jbyte* worker_start_card = slice + stripe_number * ssize;
230 if (worker_start_card >= end_card)
231 return; // We're done.
233 jbyte* worker_end_card = worker_start_card + ssize;
234 if (worker_end_card > end_card)
235 worker_end_card = end_card;
237 // We do not want to scan objects more than once. In order to accomplish
238 // this, we assert that any object with an object head inside our 'slice'
239 // belongs to us. We may need to extend the range of scanned cards if the
240 // last object continues into the next 'slice'.
241 //
242 // Note! ending cards are exclusive!
243 HeapWord* slice_start = addr_for(worker_start_card);
244 HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
246 // If there are not objects starting within the chunk, skip it.
247 if (!start_array->object_starts_in_range(slice_start, slice_end)) {
248 continue;
249 }
250 // Update our beginning addr
251 HeapWord* first_object = start_array->object_start(slice_start);
252 debug_only(oop* first_object_within_slice = (oop*) first_object;)
253 if (first_object < slice_start) {
254 last_scanned = (oop*)(first_object + oop(first_object)->size());
255 debug_only(first_object_within_slice = last_scanned;)
256 worker_start_card = byte_for(last_scanned);
257 }
259 // Update the ending addr
260 if (slice_end < (HeapWord*)sp_top) {
261 // The subtraction is important! An object may start precisely at slice_end.
262 HeapWord* last_object = start_array->object_start(slice_end - 1);
263 slice_end = last_object + oop(last_object)->size();
264 // worker_end_card is exclusive, so bump it one past the end of last_object's
265 // covered span.
266 worker_end_card = byte_for(slice_end) + 1;
268 if (worker_end_card > end_card)
269 worker_end_card = end_card;
270 }
272 assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
273 assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
274 assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
275 // Note that worker_start_card >= worker_end_card is legal, and happens when
276 // an object spans an entire slice.
277 assert(worker_start_card <= end_card, "worker start card beyond end card");
278 assert(worker_end_card <= end_card, "worker end card beyond end card");
280 jbyte* current_card = worker_start_card;
281 while (current_card < worker_end_card) {
282 // Find an unclean card.
283 while (current_card < worker_end_card && card_is_clean(*current_card)) {
284 current_card++;
285 }
286 jbyte* first_unclean_card = current_card;
288 // Find the end of a run of contiguous unclean cards
289 while (current_card < worker_end_card && !card_is_clean(*current_card)) {
290 while (current_card < worker_end_card && !card_is_clean(*current_card)) {
291 current_card++;
292 }
294 if (current_card < worker_end_card) {
295 // Some objects may be large enough to span several cards. If such
296 // an object has more than one dirty card, separated by a clean card,
297 // we will attempt to scan it twice. The test against "last_scanned"
298 // prevents the redundant object scan, but it does not prevent newly
299 // marked cards from being cleaned.
300 HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
301 size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
302 HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
303 jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
304 assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
305 if (ending_card_of_last_object > current_card) {
306 // This means the object spans the next complete card.
307 // We need to bump the current_card to ending_card_of_last_object
308 current_card = ending_card_of_last_object;
309 }
310 }
311 }
312 jbyte* following_clean_card = current_card;
314 if (first_unclean_card < worker_end_card) {
315 oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
316 assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
317 // "p" should always be >= "last_scanned" because newly GC dirtied
318 // cards are no longer scanned again (see comment at end
319 // of loop on the increment of "current_card"). Test that
320 // hypothesis before removing this code.
321 // If this code is removed, deal with the first time through
322 // the loop when the last_scanned is the object starting in
323 // the previous slice.
324 assert((p >= last_scanned) ||
325 (last_scanned == first_object_within_slice),
326 "Should no longer be possible");
327 if (p < last_scanned) {
328 // Avoid scanning more than once; this can happen because
329 // newgen cards set by GC may a different set than the
330 // originally dirty set
331 p = last_scanned;
332 }
333 oop* to = (oop*)addr_for(following_clean_card);
335 // Test slice_end first!
336 if ((HeapWord*)to > slice_end) {
337 to = (oop*)slice_end;
338 } else if (to > sp_top) {
339 to = sp_top;
340 }
342 // we know which cards to scan, now clear them
343 if (first_unclean_card <= worker_start_card+1)
344 first_unclean_card = worker_start_card+1;
345 if (following_clean_card >= worker_end_card-1)
346 following_clean_card = worker_end_card-1;
348 while (first_unclean_card < following_clean_card) {
349 *first_unclean_card++ = clean_card;
350 }
352 const int interval = PrefetchScanIntervalInBytes;
353 // scan all objects in the range
354 if (interval != 0) {
355 while (p < to) {
356 Prefetch::write(p, interval);
357 oop m = oop(p);
358 assert(m->is_oop_or_null(), "check for header");
359 m->push_contents(pm);
360 p += m->size();
361 }
362 pm->drain_stacks_cond_depth();
363 } else {
364 while (p < to) {
365 oop m = oop(p);
366 assert(m->is_oop_or_null(), "check for header");
367 m->push_contents(pm);
368 p += m->size();
369 }
370 pm->drain_stacks_cond_depth();
371 }
372 last_scanned = p;
373 }
374 // "current_card" is still the "following_clean_card" or
375 // the current_card is >= the worker_end_card so the
376 // loop will not execute again.
377 assert((current_card == following_clean_card) ||
378 (current_card >= worker_end_card),
379 "current_card should only be incremented if it still equals "
380 "following_clean_card");
381 // Increment current_card so that it is not processed again.
382 // It may now be dirty because a old-to-young pointer was
383 // found on it an updated. If it is now dirty, it cannot be
384 // be safely cleaned in the next iteration.
385 current_card++;
386 }
387 }
388 }
390 // This should be called before a scavenge.
391 void CardTableExtension::verify_all_young_refs_imprecise() {
392 CheckForUnmarkedObjects check;
394 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
395 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
397 PSOldGen* old_gen = heap->old_gen();
398 PSPermGen* perm_gen = heap->perm_gen();
400 old_gen->object_iterate(&check);
401 perm_gen->object_iterate(&check);
402 }
404 // This should be called immediately after a scavenge, before mutators resume.
405 void CardTableExtension::verify_all_young_refs_precise() {
406 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
407 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
409 PSOldGen* old_gen = heap->old_gen();
410 PSPermGen* perm_gen = heap->perm_gen();
412 CheckForPreciseMarks check(heap->young_gen(), (CardTableExtension*)heap->barrier_set());
414 old_gen->oop_iterate(&check);
415 perm_gen->oop_iterate(&check);
417 verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
418 verify_all_young_refs_precise_helper(perm_gen->object_space()->used_region());
419 }
421 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
422 CardTableExtension* card_table = (CardTableExtension*)Universe::heap()->barrier_set();
423 // FIX ME ASSERT HERE
425 jbyte* bot = card_table->byte_for(mr.start());
426 jbyte* top = card_table->byte_for(mr.end());
427 while(bot <= top) {
428 assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
429 if (*bot == verify_card)
430 *bot = youngergen_card;
431 bot++;
432 }
433 }
435 bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
436 jbyte* p = byte_for(addr);
437 jbyte val = *p;
439 if (card_is_dirty(val))
440 return true;
442 if (card_is_newgen(val))
443 return true;
445 if (card_is_clean(val))
446 return false;
448 assert(false, "Found unhandled card mark type");
450 return false;
451 }
453 // Also includes verify_card
454 bool CardTableExtension::addr_is_marked_precise(void *addr) {
455 jbyte* p = byte_for(addr);
456 jbyte val = *p;
458 if (card_is_newgen(val))
459 return true;
461 if (card_is_verify(val))
462 return true;
464 if (card_is_clean(val))
465 return false;
467 if (card_is_dirty(val))
468 return false;
470 assert(false, "Found unhandled card mark type");
472 return false;
473 }
475 // Assumes that only the base or the end changes. This allows indentification
476 // of the region that is being resized. The
477 // CardTableModRefBS::resize_covered_region() is used for the normal case
478 // where the covered regions are growing or shrinking at the high end.
479 // The method resize_covered_region_by_end() is analogous to
480 // CardTableModRefBS::resize_covered_region() but
481 // for regions that grow or shrink at the low end.
482 void CardTableExtension::resize_covered_region(MemRegion new_region) {
484 for (int i = 0; i < _cur_covered_regions; i++) {
485 if (_covered[i].start() == new_region.start()) {
486 // Found a covered region with the same start as the
487 // new region. The region is growing or shrinking
488 // from the start of the region.
489 resize_covered_region_by_start(new_region);
490 return;
491 }
492 if (_covered[i].start() > new_region.start()) {
493 break;
494 }
495 }
497 int changed_region = -1;
498 for (int j = 0; j < _cur_covered_regions; j++) {
499 if (_covered[j].end() == new_region.end()) {
500 changed_region = j;
501 // This is a case where the covered region is growing or shrinking
502 // at the start of the region.
503 assert(changed_region != -1, "Don't expect to add a covered region");
504 assert(_covered[changed_region].byte_size() != new_region.byte_size(),
505 "The sizes should be different here");
506 resize_covered_region_by_end(changed_region, new_region);
507 return;
508 }
509 }
510 // This should only be a new covered region (where no existing
511 // covered region matches at the start or the end).
512 assert(_cur_covered_regions < _max_covered_regions,
513 "An existing region should have been found");
514 resize_covered_region_by_start(new_region);
515 }
517 void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {
518 CardTableModRefBS::resize_covered_region(new_region);
519 debug_only(verify_guard();)
520 }
522 void CardTableExtension::resize_covered_region_by_end(int changed_region,
523 MemRegion new_region) {
524 assert(SafepointSynchronize::is_at_safepoint(),
525 "Only expect an expansion at the low end at a GC");
526 debug_only(verify_guard();)
527 #ifdef ASSERT
528 for (int k = 0; k < _cur_covered_regions; k++) {
529 if (_covered[k].end() == new_region.end()) {
530 assert(changed_region == k, "Changed region is incorrect");
531 break;
532 }
533 }
534 #endif
536 // Commit new or uncommit old pages, if necessary.
537 if (resize_commit_uncommit(changed_region, new_region)) {
538 // Set the new start of the committed region
539 resize_update_committed_table(changed_region, new_region);
540 }
542 // Update card table entries
543 resize_update_card_table_entries(changed_region, new_region);
545 // Update the covered region
546 resize_update_covered_table(changed_region, new_region);
548 if (TraceCardTableModRefBS) {
549 int ind = changed_region;
550 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
551 gclog_or_tty->print_cr(" "
552 " _covered[%d].start(): " INTPTR_FORMAT
553 " _covered[%d].last(): " INTPTR_FORMAT,
554 ind, _covered[ind].start(),
555 ind, _covered[ind].last());
556 gclog_or_tty->print_cr(" "
557 " _committed[%d].start(): " INTPTR_FORMAT
558 " _committed[%d].last(): " INTPTR_FORMAT,
559 ind, _committed[ind].start(),
560 ind, _committed[ind].last());
561 gclog_or_tty->print_cr(" "
562 " byte_for(start): " INTPTR_FORMAT
563 " byte_for(last): " INTPTR_FORMAT,
564 byte_for(_covered[ind].start()),
565 byte_for(_covered[ind].last()));
566 gclog_or_tty->print_cr(" "
567 " addr_for(start): " INTPTR_FORMAT
568 " addr_for(last): " INTPTR_FORMAT,
569 addr_for((jbyte*) _committed[ind].start()),
570 addr_for((jbyte*) _committed[ind].last()));
571 }
572 debug_only(verify_guard();)
573 }
575 bool CardTableExtension::resize_commit_uncommit(int changed_region,
576 MemRegion new_region) {
577 bool result = false;
578 // Commit new or uncommit old pages, if necessary.
579 MemRegion cur_committed = _committed[changed_region];
580 assert(_covered[changed_region].end() == new_region.end(),
581 "The ends of the regions are expected to match");
582 // Extend the start of this _committed region to
583 // to cover the start of any previous _committed region.
584 // This forms overlapping regions, but never interior regions.
585 HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
586 if (min_prev_start < cur_committed.start()) {
587 // Only really need to set start of "cur_committed" to
588 // the new start (min_prev_start) but assertion checking code
589 // below use cur_committed.end() so make it correct.
590 MemRegion new_committed =
591 MemRegion(min_prev_start, cur_committed.end());
592 cur_committed = new_committed;
593 }
594 #ifdef ASSERT
595 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
596 assert(cur_committed.start() ==
597 (HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
598 os::vm_page_size()),
599 "Starts should have proper alignment");
600 #endif
602 jbyte* new_start = byte_for(new_region.start());
603 // Round down because this is for the start address
604 HeapWord* new_start_aligned =
605 (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
606 // The guard page is always committed and should not be committed over.
607 // This method is used in cases where the generation is growing toward
608 // lower addresses but the guard region is still at the end of the
609 // card table. That still makes sense when looking for writes
610 // off the end of the card table.
611 if (new_start_aligned < cur_committed.start()) {
612 // Expand the committed region
613 //
614 // Case A
615 // |+ guard +|
616 // |+ cur committed +++++++++|
617 // |+ new committed +++++++++++++++++|
618 //
619 // Case B
620 // |+ guard +|
621 // |+ cur committed +|
622 // |+ new committed +++++++|
623 //
624 // These are not expected because the calculation of the
625 // cur committed region and the new committed region
626 // share the same end for the covered region.
627 // Case C
628 // |+ guard +|
629 // |+ cur committed +|
630 // |+ new committed +++++++++++++++++|
631 // Case D
632 // |+ guard +|
633 // |+ cur committed +++++++++++|
634 // |+ new committed +++++++|
636 HeapWord* new_end_for_commit =
637 MIN2(cur_committed.end(), _guard_region.start());
638 if(new_start_aligned < new_end_for_commit) {
639 MemRegion new_committed =
640 MemRegion(new_start_aligned, new_end_for_commit);
641 if (!os::commit_memory((char*)new_committed.start(),
642 new_committed.byte_size())) {
643 vm_exit_out_of_memory(new_committed.byte_size(),
644 "card table expansion");
645 }
646 }
647 result = true;
648 } else if (new_start_aligned > cur_committed.start()) {
649 // Shrink the committed region
650 #if 0 // uncommitting space is currently unsafe because of the interactions
651 // of growing and shrinking regions. One region A can uncommit space
652 // that it owns but which is being used by another region B (maybe).
653 // Region B has not committed the space because it was already
654 // committed by region A.
655 MemRegion uncommit_region = committed_unique_to_self(changed_region,
656 MemRegion(cur_committed.start(), new_start_aligned));
657 if (!uncommit_region.is_empty()) {
658 if (!os::uncommit_memory((char*)uncommit_region.start(),
659 uncommit_region.byte_size())) {
660 // If the uncommit fails, ignore it. Let the
661 // committed table resizing go even though the committed
662 // table will over state the committed space.
663 }
664 }
665 #else
666 assert(!result, "Should be false with current workaround");
667 #endif
668 }
669 assert(_committed[changed_region].end() == cur_committed.end(),
670 "end should not change");
671 return result;
672 }
674 void CardTableExtension::resize_update_committed_table(int changed_region,
675 MemRegion new_region) {
677 jbyte* new_start = byte_for(new_region.start());
678 // Set the new start of the committed region
679 HeapWord* new_start_aligned =
680 (HeapWord*)align_size_down((uintptr_t)new_start,
681 os::vm_page_size());
682 MemRegion new_committed = MemRegion(new_start_aligned,
683 _committed[changed_region].end());
684 _committed[changed_region] = new_committed;
685 _committed[changed_region].set_start(new_start_aligned);
686 }
688 void CardTableExtension::resize_update_card_table_entries(int changed_region,
689 MemRegion new_region) {
690 debug_only(verify_guard();)
691 MemRegion original_covered = _covered[changed_region];
692 // Initialize the card entries. Only consider the
693 // region covered by the card table (_whole_heap)
694 jbyte* entry;
695 if (new_region.start() < _whole_heap.start()) {
696 entry = byte_for(_whole_heap.start());
697 } else {
698 entry = byte_for(new_region.start());
699 }
700 jbyte* end = byte_for(original_covered.start());
701 // If _whole_heap starts at the original covered regions start,
702 // this loop will not execute.
703 while (entry < end) { *entry++ = clean_card; }
704 }
706 void CardTableExtension::resize_update_covered_table(int changed_region,
707 MemRegion new_region) {
708 // Update the covered region
709 _covered[changed_region].set_start(new_region.start());
710 _covered[changed_region].set_word_size(new_region.word_size());
712 // reorder regions. There should only be at most 1 out
713 // of order.
714 for (int i = _cur_covered_regions-1 ; i > 0; i--) {
715 if (_covered[i].start() < _covered[i-1].start()) {
716 MemRegion covered_mr = _covered[i-1];
717 _covered[i-1] = _covered[i];
718 _covered[i] = covered_mr;
719 MemRegion committed_mr = _committed[i-1];
720 _committed[i-1] = _committed[i];
721 _committed[i] = committed_mr;
722 break;
723 }
724 }
725 #ifdef ASSERT
726 for (int m = 0; m < _cur_covered_regions-1; m++) {
727 assert(_covered[m].start() <= _covered[m+1].start(),
728 "Covered regions out of order");
729 assert(_committed[m].start() <= _committed[m+1].start(),
730 "Committed regions out of order");
731 }
732 #endif
733 }
735 // Returns the start of any committed region that is lower than
736 // the target committed region (index ind) and that intersects the
737 // target region. If none, return start of target region.
738 //
739 // -------------
740 // | |
741 // -------------
742 // ------------
743 // | target |
744 // ------------
745 // -------------
746 // | |
747 // -------------
748 // ^ returns this
749 //
750 // -------------
751 // | |
752 // -------------
753 // ------------
754 // | target |
755 // ------------
756 // -------------
757 // | |
758 // -------------
759 // ^ returns this
761 HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
762 assert(_cur_covered_regions >= 0, "Expecting at least on region");
763 HeapWord* min_start = _committed[ind].start();
764 for (int j = 0; j < ind; j++) {
765 HeapWord* this_start = _committed[j].start();
766 if ((this_start < min_start) &&
767 !(_committed[j].intersection(_committed[ind])).is_empty()) {
768 min_start = this_start;
769 }
770 }
771 return min_start;
772 }