Thu, 16 Jun 2011 15:51:57 -0400
6994322: Remove the is_tlab and is_noref / is_large_noref parameters from the CollectedHeap
Summary: Remove two unused parameters from the mem_allocate() method and update its uses accordingly.
Reviewed-by: stefank, johnc
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
27 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
28 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
29 #include "gc_implementation/parallelScavenge/psTasks.hpp"
30 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "oops/oop.psgc.inline.hpp"
34 // Checks an individual oop for missing precise marks. Mark
35 // may be either dirty or newgen.
36 class CheckForUnmarkedOops : public OopClosure {
37 private:
38 PSYoungGen* _young_gen;
39 CardTableExtension* _card_table;
40 HeapWord* _unmarked_addr;
41 jbyte* _unmarked_card;
43 protected:
44 template <class T> void do_oop_work(T* p) {
45 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
46 if (_young_gen->is_in_reserved(obj) &&
47 !_card_table->addr_is_marked_imprecise(p)) {
48 // Don't overwrite the first missing card mark
49 if (_unmarked_addr == NULL) {
50 _unmarked_addr = (HeapWord*)p;
51 _unmarked_card = _card_table->byte_for(p);
52 }
53 }
54 }
56 public:
57 CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
58 _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
60 virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
61 virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
63 bool has_unmarked_oop() {
64 return _unmarked_addr != NULL;
65 }
66 };
68 // Checks all objects for the existance of some type of mark,
69 // precise or imprecise, dirty or newgen.
70 class CheckForUnmarkedObjects : public ObjectClosure {
71 private:
72 PSYoungGen* _young_gen;
73 CardTableExtension* _card_table;
75 public:
76 CheckForUnmarkedObjects() {
77 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
78 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
80 _young_gen = heap->young_gen();
81 _card_table = (CardTableExtension*)heap->barrier_set();
82 // No point in asserting barrier set type here. Need to make CardTableExtension
83 // a unique barrier set type.
84 }
86 // Card marks are not precise. The current system can leave us with
87 // a mismash of precise marks and beginning of object marks. This means
88 // we test for missing precise marks first. If any are found, we don't
89 // fail unless the object head is also unmarked.
90 virtual void do_object(oop obj) {
91 CheckForUnmarkedOops object_check(_young_gen, _card_table);
92 obj->oop_iterate(&object_check);
93 if (object_check.has_unmarked_oop()) {
94 assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
95 }
96 }
97 };
99 // Checks for precise marking of oops as newgen.
100 class CheckForPreciseMarks : public OopClosure {
101 private:
102 PSYoungGen* _young_gen;
103 CardTableExtension* _card_table;
105 protected:
106 template <class T> void do_oop_work(T* p) {
107 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
108 if (_young_gen->is_in_reserved(obj)) {
109 assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
110 _card_table->set_card_newgen(p);
111 }
112 }
114 public:
115 CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
116 _young_gen(young_gen), _card_table(card_table) { }
118 virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }
119 virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
120 };
122 // We get passed the space_top value to prevent us from traversing into
123 // the old_gen promotion labs, which cannot be safely parsed.
124 void CardTableExtension::scavenge_contents(ObjectStartArray* start_array,
125 MutableSpace* sp,
126 HeapWord* space_top,
127 PSPromotionManager* pm)
128 {
129 assert(start_array != NULL && sp != NULL && pm != NULL, "Sanity");
130 assert(start_array->covered_region().contains(sp->used_region()),
131 "ObjectStartArray does not cover space");
133 if (sp->not_empty()) {
134 oop* sp_top = (oop*)space_top;
135 oop* prev_top = NULL;
136 jbyte* current_card = byte_for(sp->bottom());
137 jbyte* end_card = byte_for(sp_top - 1); // sp_top is exclusive
138 // scan card marking array
139 while (current_card <= end_card) {
140 jbyte value = *current_card;
141 // skip clean cards
142 if (card_is_clean(value)) {
143 current_card++;
144 } else {
145 // we found a non-clean card
146 jbyte* first_nonclean_card = current_card++;
147 oop* bottom = (oop*)addr_for(first_nonclean_card);
148 // find object starting on card
149 oop* bottom_obj = (oop*)start_array->object_start((HeapWord*)bottom);
150 // bottom_obj = (oop*)start_array->object_start((HeapWord*)bottom);
151 assert(bottom_obj <= bottom, "just checking");
152 // make sure we don't scan oops we already looked at
153 if (bottom < prev_top) bottom = prev_top;
154 // figure out when to stop scanning
155 jbyte* first_clean_card;
156 oop* top;
157 bool restart_scanning;
158 do {
159 restart_scanning = false;
160 // find a clean card
161 while (current_card <= end_card) {
162 value = *current_card;
163 if (card_is_clean(value)) break;
164 current_card++;
165 }
166 // check if we reached the end, if so we are done
167 if (current_card >= end_card) {
168 first_clean_card = end_card + 1;
169 current_card++;
170 top = sp_top;
171 } else {
172 // we have a clean card, find object starting on that card
173 first_clean_card = current_card++;
174 top = (oop*)addr_for(first_clean_card);
175 oop* top_obj = (oop*)start_array->object_start((HeapWord*)top);
176 // top_obj = (oop*)start_array->object_start((HeapWord*)top);
177 assert(top_obj <= top, "just checking");
178 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
179 // an arrayOop is starting on the clean card - since we do exact store
180 // checks for objArrays we are done
181 } else {
182 // otherwise, it is possible that the object starting on the clean card
183 // spans the entire card, and that the store happened on a later card.
184 // figure out where the object ends
185 top = top_obj + oop(top_obj)->size();
186 jbyte* top_card = CardTableModRefBS::byte_for(top - 1); // top is exclusive
187 if (top_card > first_clean_card) {
188 // object ends a different card
189 current_card = top_card + 1;
190 if (card_is_clean(*top_card)) {
191 // the ending card is clean, we are done
192 first_clean_card = top_card;
193 } else {
194 // the ending card is not clean, continue scanning at start of do-while
195 restart_scanning = true;
196 }
197 } else {
198 // object ends on the clean card, we are done.
199 assert(first_clean_card == top_card, "just checking");
200 }
201 }
202 }
203 } while (restart_scanning);
204 // we know which cards to scan, now clear them
205 while (first_nonclean_card < first_clean_card) {
206 *first_nonclean_card++ = clean_card;
207 }
208 // scan oops in objects
209 do {
210 oop(bottom_obj)->push_contents(pm);
211 bottom_obj += oop(bottom_obj)->size();
212 assert(bottom_obj <= sp_top, "just checking");
213 } while (bottom_obj < top);
214 pm->drain_stacks_cond_depth();
215 // remember top oop* scanned
216 prev_top = top;
217 }
218 }
219 }
220 }
222 void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
223 MutableSpace* sp,
224 HeapWord* space_top,
225 PSPromotionManager* pm,
226 uint stripe_number) {
227 int ssize = 128; // Naked constant! Work unit = 64k.
228 int dirty_card_count = 0;
230 oop* sp_top = (oop*)space_top;
231 jbyte* start_card = byte_for(sp->bottom());
232 jbyte* end_card = byte_for(sp_top - 1) + 1;
233 oop* last_scanned = NULL; // Prevent scanning objects more than once
234 for (jbyte* slice = start_card; slice < end_card; slice += ssize*ParallelGCThreads) {
235 jbyte* worker_start_card = slice + stripe_number * ssize;
236 if (worker_start_card >= end_card)
237 return; // We're done.
239 jbyte* worker_end_card = worker_start_card + ssize;
240 if (worker_end_card > end_card)
241 worker_end_card = end_card;
243 // We do not want to scan objects more than once. In order to accomplish
244 // this, we assert that any object with an object head inside our 'slice'
245 // belongs to us. We may need to extend the range of scanned cards if the
246 // last object continues into the next 'slice'.
247 //
248 // Note! ending cards are exclusive!
249 HeapWord* slice_start = addr_for(worker_start_card);
250 HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
252 // If there are not objects starting within the chunk, skip it.
253 if (!start_array->object_starts_in_range(slice_start, slice_end)) {
254 continue;
255 }
256 // Update our beginning addr
257 HeapWord* first_object = start_array->object_start(slice_start);
258 debug_only(oop* first_object_within_slice = (oop*) first_object;)
259 if (first_object < slice_start) {
260 last_scanned = (oop*)(first_object + oop(first_object)->size());
261 debug_only(first_object_within_slice = last_scanned;)
262 worker_start_card = byte_for(last_scanned);
263 }
265 // Update the ending addr
266 if (slice_end < (HeapWord*)sp_top) {
267 // The subtraction is important! An object may start precisely at slice_end.
268 HeapWord* last_object = start_array->object_start(slice_end - 1);
269 slice_end = last_object + oop(last_object)->size();
270 // worker_end_card is exclusive, so bump it one past the end of last_object's
271 // covered span.
272 worker_end_card = byte_for(slice_end) + 1;
274 if (worker_end_card > end_card)
275 worker_end_card = end_card;
276 }
278 assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
279 assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
280 assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
281 // Note that worker_start_card >= worker_end_card is legal, and happens when
282 // an object spans an entire slice.
283 assert(worker_start_card <= end_card, "worker start card beyond end card");
284 assert(worker_end_card <= end_card, "worker end card beyond end card");
286 jbyte* current_card = worker_start_card;
287 while (current_card < worker_end_card) {
288 // Find an unclean card.
289 while (current_card < worker_end_card && card_is_clean(*current_card)) {
290 current_card++;
291 }
292 jbyte* first_unclean_card = current_card;
294 // Find the end of a run of contiguous unclean cards
295 while (current_card < worker_end_card && !card_is_clean(*current_card)) {
296 while (current_card < worker_end_card && !card_is_clean(*current_card)) {
297 current_card++;
298 }
300 if (current_card < worker_end_card) {
301 // Some objects may be large enough to span several cards. If such
302 // an object has more than one dirty card, separated by a clean card,
303 // we will attempt to scan it twice. The test against "last_scanned"
304 // prevents the redundant object scan, but it does not prevent newly
305 // marked cards from being cleaned.
306 HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
307 size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
308 HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
309 jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
310 assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
311 if (ending_card_of_last_object > current_card) {
312 // This means the object spans the next complete card.
313 // We need to bump the current_card to ending_card_of_last_object
314 current_card = ending_card_of_last_object;
315 }
316 }
317 }
318 jbyte* following_clean_card = current_card;
320 if (first_unclean_card < worker_end_card) {
321 oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
322 assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
323 // "p" should always be >= "last_scanned" because newly GC dirtied
324 // cards are no longer scanned again (see comment at end
325 // of loop on the increment of "current_card"). Test that
326 // hypothesis before removing this code.
327 // If this code is removed, deal with the first time through
328 // the loop when the last_scanned is the object starting in
329 // the previous slice.
330 assert((p >= last_scanned) ||
331 (last_scanned == first_object_within_slice),
332 "Should no longer be possible");
333 if (p < last_scanned) {
334 // Avoid scanning more than once; this can happen because
335 // newgen cards set by GC may a different set than the
336 // originally dirty set
337 p = last_scanned;
338 }
339 oop* to = (oop*)addr_for(following_clean_card);
341 // Test slice_end first!
342 if ((HeapWord*)to > slice_end) {
343 to = (oop*)slice_end;
344 } else if (to > sp_top) {
345 to = sp_top;
346 }
348 // we know which cards to scan, now clear them
349 if (first_unclean_card <= worker_start_card+1)
350 first_unclean_card = worker_start_card+1;
351 if (following_clean_card >= worker_end_card-1)
352 following_clean_card = worker_end_card-1;
354 while (first_unclean_card < following_clean_card) {
355 *first_unclean_card++ = clean_card;
356 }
358 const int interval = PrefetchScanIntervalInBytes;
359 // scan all objects in the range
360 if (interval != 0) {
361 while (p < to) {
362 Prefetch::write(p, interval);
363 oop m = oop(p);
364 assert(m->is_oop_or_null(), "check for header");
365 m->push_contents(pm);
366 p += m->size();
367 }
368 pm->drain_stacks_cond_depth();
369 } else {
370 while (p < to) {
371 oop m = oop(p);
372 assert(m->is_oop_or_null(), "check for header");
373 m->push_contents(pm);
374 p += m->size();
375 }
376 pm->drain_stacks_cond_depth();
377 }
378 last_scanned = p;
379 }
380 // "current_card" is still the "following_clean_card" or
381 // the current_card is >= the worker_end_card so the
382 // loop will not execute again.
383 assert((current_card == following_clean_card) ||
384 (current_card >= worker_end_card),
385 "current_card should only be incremented if it still equals "
386 "following_clean_card");
387 // Increment current_card so that it is not processed again.
388 // It may now be dirty because a old-to-young pointer was
389 // found on it an updated. If it is now dirty, it cannot be
390 // be safely cleaned in the next iteration.
391 current_card++;
392 }
393 }
394 }
396 // This should be called before a scavenge.
397 void CardTableExtension::verify_all_young_refs_imprecise() {
398 CheckForUnmarkedObjects check;
400 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
401 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
403 PSOldGen* old_gen = heap->old_gen();
404 PSPermGen* perm_gen = heap->perm_gen();
406 old_gen->object_iterate(&check);
407 perm_gen->object_iterate(&check);
408 }
410 // This should be called immediately after a scavenge, before mutators resume.
411 void CardTableExtension::verify_all_young_refs_precise() {
412 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
413 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
415 PSOldGen* old_gen = heap->old_gen();
416 PSPermGen* perm_gen = heap->perm_gen();
418 CheckForPreciseMarks check(heap->young_gen(), (CardTableExtension*)heap->barrier_set());
420 old_gen->oop_iterate(&check);
421 perm_gen->oop_iterate(&check);
423 verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
424 verify_all_young_refs_precise_helper(perm_gen->object_space()->used_region());
425 }
427 void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
428 CardTableExtension* card_table = (CardTableExtension*)Universe::heap()->barrier_set();
429 // FIX ME ASSERT HERE
431 jbyte* bot = card_table->byte_for(mr.start());
432 jbyte* top = card_table->byte_for(mr.end());
433 while(bot <= top) {
434 assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
435 if (*bot == verify_card)
436 *bot = youngergen_card;
437 bot++;
438 }
439 }
441 bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
442 jbyte* p = byte_for(addr);
443 jbyte val = *p;
445 if (card_is_dirty(val))
446 return true;
448 if (card_is_newgen(val))
449 return true;
451 if (card_is_clean(val))
452 return false;
454 assert(false, "Found unhandled card mark type");
456 return false;
457 }
459 // Also includes verify_card
460 bool CardTableExtension::addr_is_marked_precise(void *addr) {
461 jbyte* p = byte_for(addr);
462 jbyte val = *p;
464 if (card_is_newgen(val))
465 return true;
467 if (card_is_verify(val))
468 return true;
470 if (card_is_clean(val))
471 return false;
473 if (card_is_dirty(val))
474 return false;
476 assert(false, "Found unhandled card mark type");
478 return false;
479 }
481 // Assumes that only the base or the end changes. This allows indentification
482 // of the region that is being resized. The
483 // CardTableModRefBS::resize_covered_region() is used for the normal case
484 // where the covered regions are growing or shrinking at the high end.
485 // The method resize_covered_region_by_end() is analogous to
486 // CardTableModRefBS::resize_covered_region() but
487 // for regions that grow or shrink at the low end.
488 void CardTableExtension::resize_covered_region(MemRegion new_region) {
490 for (int i = 0; i < _cur_covered_regions; i++) {
491 if (_covered[i].start() == new_region.start()) {
492 // Found a covered region with the same start as the
493 // new region. The region is growing or shrinking
494 // from the start of the region.
495 resize_covered_region_by_start(new_region);
496 return;
497 }
498 if (_covered[i].start() > new_region.start()) {
499 break;
500 }
501 }
503 int changed_region = -1;
504 for (int j = 0; j < _cur_covered_regions; j++) {
505 if (_covered[j].end() == new_region.end()) {
506 changed_region = j;
507 // This is a case where the covered region is growing or shrinking
508 // at the start of the region.
509 assert(changed_region != -1, "Don't expect to add a covered region");
510 assert(_covered[changed_region].byte_size() != new_region.byte_size(),
511 "The sizes should be different here");
512 resize_covered_region_by_end(changed_region, new_region);
513 return;
514 }
515 }
516 // This should only be a new covered region (where no existing
517 // covered region matches at the start or the end).
518 assert(_cur_covered_regions < _max_covered_regions,
519 "An existing region should have been found");
520 resize_covered_region_by_start(new_region);
521 }
523 void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {
524 CardTableModRefBS::resize_covered_region(new_region);
525 debug_only(verify_guard();)
526 }
528 void CardTableExtension::resize_covered_region_by_end(int changed_region,
529 MemRegion new_region) {
530 assert(SafepointSynchronize::is_at_safepoint(),
531 "Only expect an expansion at the low end at a GC");
532 debug_only(verify_guard();)
533 #ifdef ASSERT
534 for (int k = 0; k < _cur_covered_regions; k++) {
535 if (_covered[k].end() == new_region.end()) {
536 assert(changed_region == k, "Changed region is incorrect");
537 break;
538 }
539 }
540 #endif
542 // Commit new or uncommit old pages, if necessary.
543 if (resize_commit_uncommit(changed_region, new_region)) {
544 // Set the new start of the committed region
545 resize_update_committed_table(changed_region, new_region);
546 }
548 // Update card table entries
549 resize_update_card_table_entries(changed_region, new_region);
551 // Update the covered region
552 resize_update_covered_table(changed_region, new_region);
554 if (TraceCardTableModRefBS) {
555 int ind = changed_region;
556 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
557 gclog_or_tty->print_cr(" "
558 " _covered[%d].start(): " INTPTR_FORMAT
559 " _covered[%d].last(): " INTPTR_FORMAT,
560 ind, _covered[ind].start(),
561 ind, _covered[ind].last());
562 gclog_or_tty->print_cr(" "
563 " _committed[%d].start(): " INTPTR_FORMAT
564 " _committed[%d].last(): " INTPTR_FORMAT,
565 ind, _committed[ind].start(),
566 ind, _committed[ind].last());
567 gclog_or_tty->print_cr(" "
568 " byte_for(start): " INTPTR_FORMAT
569 " byte_for(last): " INTPTR_FORMAT,
570 byte_for(_covered[ind].start()),
571 byte_for(_covered[ind].last()));
572 gclog_or_tty->print_cr(" "
573 " addr_for(start): " INTPTR_FORMAT
574 " addr_for(last): " INTPTR_FORMAT,
575 addr_for((jbyte*) _committed[ind].start()),
576 addr_for((jbyte*) _committed[ind].last()));
577 }
578 debug_only(verify_guard();)
579 }
581 bool CardTableExtension::resize_commit_uncommit(int changed_region,
582 MemRegion new_region) {
583 bool result = false;
584 // Commit new or uncommit old pages, if necessary.
585 MemRegion cur_committed = _committed[changed_region];
586 assert(_covered[changed_region].end() == new_region.end(),
587 "The ends of the regions are expected to match");
588 // Extend the start of this _committed region to
589 // to cover the start of any previous _committed region.
590 // This forms overlapping regions, but never interior regions.
591 HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
592 if (min_prev_start < cur_committed.start()) {
593 // Only really need to set start of "cur_committed" to
594 // the new start (min_prev_start) but assertion checking code
595 // below use cur_committed.end() so make it correct.
596 MemRegion new_committed =
597 MemRegion(min_prev_start, cur_committed.end());
598 cur_committed = new_committed;
599 }
600 #ifdef ASSERT
601 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
602 assert(cur_committed.start() ==
603 (HeapWord*) align_size_up((uintptr_t) cur_committed.start(),
604 os::vm_page_size()),
605 "Starts should have proper alignment");
606 #endif
608 jbyte* new_start = byte_for(new_region.start());
609 // Round down because this is for the start address
610 HeapWord* new_start_aligned =
611 (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());
612 // The guard page is always committed and should not be committed over.
613 // This method is used in cases where the generation is growing toward
614 // lower addresses but the guard region is still at the end of the
615 // card table. That still makes sense when looking for writes
616 // off the end of the card table.
617 if (new_start_aligned < cur_committed.start()) {
618 // Expand the committed region
619 //
620 // Case A
621 // |+ guard +|
622 // |+ cur committed +++++++++|
623 // |+ new committed +++++++++++++++++|
624 //
625 // Case B
626 // |+ guard +|
627 // |+ cur committed +|
628 // |+ new committed +++++++|
629 //
630 // These are not expected because the calculation of the
631 // cur committed region and the new committed region
632 // share the same end for the covered region.
633 // Case C
634 // |+ guard +|
635 // |+ cur committed +|
636 // |+ new committed +++++++++++++++++|
637 // Case D
638 // |+ guard +|
639 // |+ cur committed +++++++++++|
640 // |+ new committed +++++++|
642 HeapWord* new_end_for_commit =
643 MIN2(cur_committed.end(), _guard_region.start());
644 if(new_start_aligned < new_end_for_commit) {
645 MemRegion new_committed =
646 MemRegion(new_start_aligned, new_end_for_commit);
647 if (!os::commit_memory((char*)new_committed.start(),
648 new_committed.byte_size())) {
649 vm_exit_out_of_memory(new_committed.byte_size(),
650 "card table expansion");
651 }
652 }
653 result = true;
654 } else if (new_start_aligned > cur_committed.start()) {
655 // Shrink the committed region
656 #if 0 // uncommitting space is currently unsafe because of the interactions
657 // of growing and shrinking regions. One region A can uncommit space
658 // that it owns but which is being used by another region B (maybe).
659 // Region B has not committed the space because it was already
660 // committed by region A.
661 MemRegion uncommit_region = committed_unique_to_self(changed_region,
662 MemRegion(cur_committed.start(), new_start_aligned));
663 if (!uncommit_region.is_empty()) {
664 if (!os::uncommit_memory((char*)uncommit_region.start(),
665 uncommit_region.byte_size())) {
666 // If the uncommit fails, ignore it. Let the
667 // committed table resizing go even though the committed
668 // table will over state the committed space.
669 }
670 }
671 #else
672 assert(!result, "Should be false with current workaround");
673 #endif
674 }
675 assert(_committed[changed_region].end() == cur_committed.end(),
676 "end should not change");
677 return result;
678 }
680 void CardTableExtension::resize_update_committed_table(int changed_region,
681 MemRegion new_region) {
683 jbyte* new_start = byte_for(new_region.start());
684 // Set the new start of the committed region
685 HeapWord* new_start_aligned =
686 (HeapWord*)align_size_down((uintptr_t)new_start,
687 os::vm_page_size());
688 MemRegion new_committed = MemRegion(new_start_aligned,
689 _committed[changed_region].end());
690 _committed[changed_region] = new_committed;
691 _committed[changed_region].set_start(new_start_aligned);
692 }
694 void CardTableExtension::resize_update_card_table_entries(int changed_region,
695 MemRegion new_region) {
696 debug_only(verify_guard();)
697 MemRegion original_covered = _covered[changed_region];
698 // Initialize the card entries. Only consider the
699 // region covered by the card table (_whole_heap)
700 jbyte* entry;
701 if (new_region.start() < _whole_heap.start()) {
702 entry = byte_for(_whole_heap.start());
703 } else {
704 entry = byte_for(new_region.start());
705 }
706 jbyte* end = byte_for(original_covered.start());
707 // If _whole_heap starts at the original covered regions start,
708 // this loop will not execute.
709 while (entry < end) { *entry++ = clean_card; }
710 }
712 void CardTableExtension::resize_update_covered_table(int changed_region,
713 MemRegion new_region) {
714 // Update the covered region
715 _covered[changed_region].set_start(new_region.start());
716 _covered[changed_region].set_word_size(new_region.word_size());
718 // reorder regions. There should only be at most 1 out
719 // of order.
720 for (int i = _cur_covered_regions-1 ; i > 0; i--) {
721 if (_covered[i].start() < _covered[i-1].start()) {
722 MemRegion covered_mr = _covered[i-1];
723 _covered[i-1] = _covered[i];
724 _covered[i] = covered_mr;
725 MemRegion committed_mr = _committed[i-1];
726 _committed[i-1] = _committed[i];
727 _committed[i] = committed_mr;
728 break;
729 }
730 }
731 #ifdef ASSERT
732 for (int m = 0; m < _cur_covered_regions-1; m++) {
733 assert(_covered[m].start() <= _covered[m+1].start(),
734 "Covered regions out of order");
735 assert(_committed[m].start() <= _committed[m+1].start(),
736 "Committed regions out of order");
737 }
738 #endif
739 }
741 // Returns the start of any committed region that is lower than
742 // the target committed region (index ind) and that intersects the
743 // target region. If none, return start of target region.
744 //
745 // -------------
746 // | |
747 // -------------
748 // ------------
749 // | target |
750 // ------------
751 // -------------
752 // | |
753 // -------------
754 // ^ returns this
755 //
756 // -------------
757 // | |
758 // -------------
759 // ------------
760 // | target |
761 // ------------
762 // -------------
763 // | |
764 // -------------
765 // ^ returns this
767 HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
768 assert(_cur_covered_regions >= 0, "Expecting at least on region");
769 HeapWord* min_start = _committed[ind].start();
770 for (int j = 0; j < ind; j++) {
771 HeapWord* this_start = _committed[j].start();
772 if ((this_start < min_start) &&
773 !(_committed[j].intersection(_committed[ind])).is_empty()) {
774 min_start = this_start;
775 }
776 }
777 return min_start;
778 }