Thu, 12 Oct 2017 21:27:07 +0800
merge
aoqi@0 | 1 | /* |
aoqi@0 | 2 | * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. |
aoqi@0 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
aoqi@0 | 4 | * |
aoqi@0 | 5 | * This code is free software; you can redistribute it and/or modify it |
aoqi@0 | 6 | * under the terms of the GNU General Public License version 2 only, as |
aoqi@0 | 7 | * published by the Free Software Foundation. |
aoqi@0 | 8 | * |
aoqi@0 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
aoqi@0 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
aoqi@0 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
aoqi@0 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
aoqi@0 | 13 | * accompanied this code). |
aoqi@0 | 14 | * |
aoqi@0 | 15 | * You should have received a copy of the GNU General Public License version |
aoqi@0 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
aoqi@0 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
aoqi@0 | 18 | * |
aoqi@0 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
aoqi@0 | 20 | * or visit www.oracle.com if you need additional information or have any |
aoqi@0 | 21 | * questions. |
aoqi@0 | 22 | * |
aoqi@0 | 23 | */ |
aoqi@0 | 24 | |
aoqi@0 | 25 | #include "precompiled.hpp" |
aoqi@0 | 26 | #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" |
aoqi@0 | 27 | #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" |
aoqi@0 | 28 | #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" |
aoqi@0 | 29 | #include "gc_implementation/parallelScavenge/psTasks.hpp" |
aoqi@0 | 30 | #include "gc_implementation/parallelScavenge/psYoungGen.hpp" |
aoqi@0 | 31 | #include "oops/oop.inline.hpp" |
aoqi@0 | 32 | #include "oops/oop.psgc.inline.hpp" |
goetz@6912 | 33 | #include "runtime/prefetch.inline.hpp" |
aoqi@0 | 34 | |
aoqi@0 | 35 | // Checks an individual oop for missing precise marks. Mark |
aoqi@0 | 36 | // may be either dirty or newgen. |
aoqi@0 | 37 | class CheckForUnmarkedOops : public OopClosure { |
aoqi@0 | 38 | private: |
aoqi@0 | 39 | PSYoungGen* _young_gen; |
aoqi@0 | 40 | CardTableExtension* _card_table; |
aoqi@0 | 41 | HeapWord* _unmarked_addr; |
aoqi@0 | 42 | jbyte* _unmarked_card; |
aoqi@0 | 43 | |
aoqi@0 | 44 | protected: |
aoqi@0 | 45 | template <class T> void do_oop_work(T* p) { |
aoqi@0 | 46 | oop obj = oopDesc::load_decode_heap_oop(p); |
aoqi@0 | 47 | if (_young_gen->is_in_reserved(obj) && |
aoqi@0 | 48 | !_card_table->addr_is_marked_imprecise(p)) { |
aoqi@0 | 49 | // Don't overwrite the first missing card mark |
aoqi@0 | 50 | if (_unmarked_addr == NULL) { |
aoqi@0 | 51 | _unmarked_addr = (HeapWord*)p; |
aoqi@0 | 52 | _unmarked_card = _card_table->byte_for(p); |
aoqi@0 | 53 | } |
aoqi@0 | 54 | } |
aoqi@0 | 55 | } |
aoqi@0 | 56 | |
aoqi@0 | 57 | public: |
aoqi@0 | 58 | CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) : |
aoqi@0 | 59 | _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { } |
aoqi@0 | 60 | |
aoqi@0 | 61 | virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); } |
aoqi@0 | 62 | virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); } |
aoqi@0 | 63 | |
aoqi@0 | 64 | bool has_unmarked_oop() { |
aoqi@0 | 65 | return _unmarked_addr != NULL; |
aoqi@0 | 66 | } |
aoqi@0 | 67 | }; |
aoqi@0 | 68 | |
aoqi@0 | 69 | // Checks all objects for the existance of some type of mark, |
aoqi@0 | 70 | // precise or imprecise, dirty or newgen. |
aoqi@0 | 71 | class CheckForUnmarkedObjects : public ObjectClosure { |
aoqi@0 | 72 | private: |
aoqi@0 | 73 | PSYoungGen* _young_gen; |
aoqi@0 | 74 | CardTableExtension* _card_table; |
aoqi@0 | 75 | |
aoqi@0 | 76 | public: |
aoqi@0 | 77 | CheckForUnmarkedObjects() { |
aoqi@0 | 78 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
aoqi@0 | 79 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
aoqi@0 | 80 | |
aoqi@0 | 81 | _young_gen = heap->young_gen(); |
aoqi@0 | 82 | _card_table = (CardTableExtension*)heap->barrier_set(); |
aoqi@0 | 83 | // No point in asserting barrier set type here. Need to make CardTableExtension |
aoqi@0 | 84 | // a unique barrier set type. |
aoqi@0 | 85 | } |
aoqi@0 | 86 | |
aoqi@0 | 87 | // Card marks are not precise. The current system can leave us with |
aoqi@0 | 88 | // a mismash of precise marks and beginning of object marks. This means |
aoqi@0 | 89 | // we test for missing precise marks first. If any are found, we don't |
aoqi@0 | 90 | // fail unless the object head is also unmarked. |
aoqi@0 | 91 | virtual void do_object(oop obj) { |
aoqi@0 | 92 | CheckForUnmarkedOops object_check(_young_gen, _card_table); |
aoqi@0 | 93 | obj->oop_iterate_no_header(&object_check); |
aoqi@0 | 94 | if (object_check.has_unmarked_oop()) { |
aoqi@0 | 95 | assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object"); |
aoqi@0 | 96 | } |
aoqi@0 | 97 | } |
aoqi@0 | 98 | }; |
aoqi@0 | 99 | |
aoqi@0 | 100 | // Checks for precise marking of oops as newgen. |
aoqi@0 | 101 | class CheckForPreciseMarks : public OopClosure { |
aoqi@0 | 102 | private: |
aoqi@0 | 103 | PSYoungGen* _young_gen; |
aoqi@0 | 104 | CardTableExtension* _card_table; |
aoqi@0 | 105 | |
aoqi@0 | 106 | protected: |
aoqi@0 | 107 | template <class T> void do_oop_work(T* p) { |
aoqi@0 | 108 | oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
aoqi@0 | 109 | if (_young_gen->is_in_reserved(obj)) { |
aoqi@0 | 110 | assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop"); |
aoqi@0 | 111 | _card_table->set_card_newgen(p); |
aoqi@0 | 112 | } |
aoqi@0 | 113 | } |
aoqi@0 | 114 | |
aoqi@0 | 115 | public: |
aoqi@0 | 116 | CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) : |
aoqi@0 | 117 | _young_gen(young_gen), _card_table(card_table) { } |
aoqi@0 | 118 | |
aoqi@0 | 119 | virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); } |
aoqi@0 | 120 | virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); } |
aoqi@0 | 121 | }; |
aoqi@0 | 122 | |
aoqi@0 | 123 | // We get passed the space_top value to prevent us from traversing into |
aoqi@0 | 124 | // the old_gen promotion labs, which cannot be safely parsed. |
aoqi@0 | 125 | |
aoqi@0 | 126 | // Do not call this method if the space is empty. |
aoqi@0 | 127 | // It is a waste to start tasks and get here only to |
aoqi@0 | 128 | // do no work. If this method needs to be called |
aoqi@0 | 129 | // when the space is empty, fix the calculation of |
aoqi@0 | 130 | // end_card to allow sp_top == sp->bottom(). |
aoqi@0 | 131 | |
aoqi@0 | 132 | void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array, |
aoqi@0 | 133 | MutableSpace* sp, |
aoqi@0 | 134 | HeapWord* space_top, |
aoqi@0 | 135 | PSPromotionManager* pm, |
aoqi@0 | 136 | uint stripe_number, |
aoqi@0 | 137 | uint stripe_total) { |
aoqi@0 | 138 | int ssize = 128; // Naked constant! Work unit = 64k. |
aoqi@0 | 139 | int dirty_card_count = 0; |
aoqi@0 | 140 | |
aoqi@0 | 141 | // It is a waste to get here if empty. |
aoqi@0 | 142 | assert(sp->bottom() < sp->top(), "Should not be called if empty"); |
aoqi@0 | 143 | oop* sp_top = (oop*)space_top; |
aoqi@0 | 144 | jbyte* start_card = byte_for(sp->bottom()); |
aoqi@0 | 145 | jbyte* end_card = byte_for(sp_top - 1) + 1; |
aoqi@0 | 146 | oop* last_scanned = NULL; // Prevent scanning objects more than once |
aoqi@0 | 147 | // The width of the stripe ssize*stripe_total must be |
aoqi@0 | 148 | // consistent with the number of stripes so that the complete slice |
aoqi@0 | 149 | // is covered. |
aoqi@0 | 150 | size_t slice_width = ssize * stripe_total; |
aoqi@0 | 151 | for (jbyte* slice = start_card; slice < end_card; slice += slice_width) { |
aoqi@0 | 152 | jbyte* worker_start_card = slice + stripe_number * ssize; |
aoqi@0 | 153 | if (worker_start_card >= end_card) |
aoqi@0 | 154 | return; // We're done. |
aoqi@0 | 155 | |
aoqi@0 | 156 | jbyte* worker_end_card = worker_start_card + ssize; |
aoqi@0 | 157 | if (worker_end_card > end_card) |
aoqi@0 | 158 | worker_end_card = end_card; |
aoqi@0 | 159 | |
aoqi@0 | 160 | // We do not want to scan objects more than once. In order to accomplish |
aoqi@0 | 161 | // this, we assert that any object with an object head inside our 'slice' |
aoqi@0 | 162 | // belongs to us. We may need to extend the range of scanned cards if the |
aoqi@0 | 163 | // last object continues into the next 'slice'. |
aoqi@0 | 164 | // |
aoqi@0 | 165 | // Note! ending cards are exclusive! |
aoqi@0 | 166 | HeapWord* slice_start = addr_for(worker_start_card); |
aoqi@0 | 167 | HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card)); |
aoqi@0 | 168 | |
aoqi@0 | 169 | #ifdef ASSERT |
aoqi@0 | 170 | if (GCWorkerDelayMillis > 0) { |
aoqi@0 | 171 | // Delay 1 worker so that it proceeds after all the work |
aoqi@0 | 172 | // has been completed. |
aoqi@0 | 173 | if (stripe_number < 2) { |
aoqi@0 | 174 | os::sleep(Thread::current(), GCWorkerDelayMillis, false); |
aoqi@0 | 175 | } |
aoqi@0 | 176 | } |
aoqi@0 | 177 | #endif |
aoqi@0 | 178 | |
aoqi@0 | 179 | // If there are not objects starting within the chunk, skip it. |
aoqi@0 | 180 | if (!start_array->object_starts_in_range(slice_start, slice_end)) { |
aoqi@0 | 181 | continue; |
aoqi@0 | 182 | } |
aoqi@0 | 183 | // Update our beginning addr |
aoqi@0 | 184 | HeapWord* first_object = start_array->object_start(slice_start); |
aoqi@0 | 185 | debug_only(oop* first_object_within_slice = (oop*) first_object;) |
aoqi@0 | 186 | if (first_object < slice_start) { |
aoqi@0 | 187 | last_scanned = (oop*)(first_object + oop(first_object)->size()); |
aoqi@0 | 188 | debug_only(first_object_within_slice = last_scanned;) |
aoqi@0 | 189 | worker_start_card = byte_for(last_scanned); |
aoqi@0 | 190 | } |
aoqi@0 | 191 | |
aoqi@0 | 192 | // Update the ending addr |
aoqi@0 | 193 | if (slice_end < (HeapWord*)sp_top) { |
aoqi@0 | 194 | // The subtraction is important! An object may start precisely at slice_end. |
aoqi@0 | 195 | HeapWord* last_object = start_array->object_start(slice_end - 1); |
aoqi@0 | 196 | slice_end = last_object + oop(last_object)->size(); |
aoqi@0 | 197 | // worker_end_card is exclusive, so bump it one past the end of last_object's |
aoqi@0 | 198 | // covered span. |
aoqi@0 | 199 | worker_end_card = byte_for(slice_end) + 1; |
aoqi@0 | 200 | |
aoqi@0 | 201 | if (worker_end_card > end_card) |
aoqi@0 | 202 | worker_end_card = end_card; |
aoqi@0 | 203 | } |
aoqi@0 | 204 | |
aoqi@25 | 205 | assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary"); |
aoqi@0 | 206 | assert(is_valid_card_address(worker_start_card), "Invalid worker start card"); |
aoqi@0 | 207 | assert(is_valid_card_address(worker_end_card), "Invalid worker end card"); |
aoqi@0 | 208 | // Note that worker_start_card >= worker_end_card is legal, and happens when |
aoqi@0 | 209 | // an object spans an entire slice. |
aoqi@0 | 210 | assert(worker_start_card <= end_card, "worker start card beyond end card"); |
aoqi@0 | 211 | assert(worker_end_card <= end_card, "worker end card beyond end card"); |
aoqi@0 | 212 | |
aoqi@0 | 213 | jbyte* current_card = worker_start_card; |
aoqi@0 | 214 | while (current_card < worker_end_card) { |
aoqi@0 | 215 | // Find an unclean card. |
aoqi@0 | 216 | while (current_card < worker_end_card && card_is_clean(*current_card)) { |
aoqi@0 | 217 | current_card++; |
aoqi@0 | 218 | } |
aoqi@0 | 219 | jbyte* first_unclean_card = current_card; |
aoqi@0 | 220 | |
aoqi@0 | 221 | // Find the end of a run of contiguous unclean cards |
aoqi@0 | 222 | while (current_card < worker_end_card && !card_is_clean(*current_card)) { |
aoqi@0 | 223 | while (current_card < worker_end_card && !card_is_clean(*current_card)) { |
aoqi@0 | 224 | current_card++; |
aoqi@0 | 225 | } |
aoqi@0 | 226 | |
aoqi@0 | 227 | if (current_card < worker_end_card) { |
aoqi@0 | 228 | // Some objects may be large enough to span several cards. If such |
aoqi@0 | 229 | // an object has more than one dirty card, separated by a clean card, |
aoqi@0 | 230 | // we will attempt to scan it twice. The test against "last_scanned" |
aoqi@0 | 231 | // prevents the redundant object scan, but it does not prevent newly |
aoqi@0 | 232 | // marked cards from being cleaned. |
aoqi@0 | 233 | HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1); |
aoqi@0 | 234 | size_t size_of_last_object = oop(last_object_in_dirty_region)->size(); |
aoqi@0 | 235 | HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object; |
aoqi@0 | 236 | jbyte* ending_card_of_last_object = byte_for(end_of_last_object); |
aoqi@0 | 237 | assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card"); |
aoqi@0 | 238 | if (ending_card_of_last_object > current_card) { |
aoqi@0 | 239 | // This means the object spans the next complete card. |
aoqi@0 | 240 | // We need to bump the current_card to ending_card_of_last_object |
aoqi@0 | 241 | current_card = ending_card_of_last_object; |
aoqi@0 | 242 | } |
aoqi@0 | 243 | } |
aoqi@0 | 244 | } |
aoqi@0 | 245 | jbyte* following_clean_card = current_card; |
aoqi@0 | 246 | |
aoqi@0 | 247 | if (first_unclean_card < worker_end_card) { |
aoqi@0 | 248 | oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card)); |
aoqi@0 | 249 | assert((HeapWord*)p <= addr_for(first_unclean_card), "checking"); |
aoqi@0 | 250 | // "p" should always be >= "last_scanned" because newly GC dirtied |
aoqi@0 | 251 | // cards are no longer scanned again (see comment at end |
aoqi@0 | 252 | // of loop on the increment of "current_card"). Test that |
aoqi@0 | 253 | // hypothesis before removing this code. |
aoqi@0 | 254 | // If this code is removed, deal with the first time through |
aoqi@0 | 255 | // the loop when the last_scanned is the object starting in |
aoqi@0 | 256 | // the previous slice. |
aoqi@0 | 257 | assert((p >= last_scanned) || |
aoqi@0 | 258 | (last_scanned == first_object_within_slice), |
aoqi@0 | 259 | "Should no longer be possible"); |
aoqi@0 | 260 | if (p < last_scanned) { |
aoqi@0 | 261 | // Avoid scanning more than once; this can happen because |
aoqi@0 | 262 | // newgen cards set by GC may a different set than the |
aoqi@0 | 263 | // originally dirty set |
aoqi@0 | 264 | p = last_scanned; |
aoqi@0 | 265 | } |
aoqi@0 | 266 | oop* to = (oop*)addr_for(following_clean_card); |
aoqi@0 | 267 | |
aoqi@0 | 268 | // Test slice_end first! |
aoqi@0 | 269 | if ((HeapWord*)to > slice_end) { |
aoqi@0 | 270 | to = (oop*)slice_end; |
aoqi@0 | 271 | } else if (to > sp_top) { |
aoqi@0 | 272 | to = sp_top; |
aoqi@0 | 273 | } |
aoqi@0 | 274 | |
aoqi@0 | 275 | // we know which cards to scan, now clear them |
aoqi@0 | 276 | if (first_unclean_card <= worker_start_card+1) |
aoqi@0 | 277 | first_unclean_card = worker_start_card+1; |
aoqi@0 | 278 | if (following_clean_card >= worker_end_card-1) |
aoqi@0 | 279 | following_clean_card = worker_end_card-1; |
aoqi@0 | 280 | |
aoqi@0 | 281 | while (first_unclean_card < following_clean_card) { |
aoqi@0 | 282 | *first_unclean_card++ = clean_card; |
aoqi@0 | 283 | } |
aoqi@0 | 284 | |
aoqi@0 | 285 | const int interval = PrefetchScanIntervalInBytes; |
aoqi@0 | 286 | // scan all objects in the range |
aoqi@0 | 287 | if (interval != 0) { |
aoqi@0 | 288 | while (p < to) { |
aoqi@0 | 289 | Prefetch::write(p, interval); |
aoqi@0 | 290 | oop m = oop(p); |
aoqi@0 | 291 | assert(m->is_oop_or_null(), "check for header"); |
aoqi@0 | 292 | m->push_contents(pm); |
aoqi@0 | 293 | p += m->size(); |
aoqi@0 | 294 | } |
aoqi@0 | 295 | pm->drain_stacks_cond_depth(); |
aoqi@0 | 296 | } else { |
aoqi@0 | 297 | while (p < to) { |
aoqi@0 | 298 | oop m = oop(p); |
aoqi@0 | 299 | assert(m->is_oop_or_null(), "check for header"); |
aoqi@0 | 300 | m->push_contents(pm); |
aoqi@0 | 301 | p += m->size(); |
aoqi@0 | 302 | } |
aoqi@0 | 303 | pm->drain_stacks_cond_depth(); |
aoqi@0 | 304 | } |
aoqi@0 | 305 | last_scanned = p; |
aoqi@0 | 306 | } |
aoqi@0 | 307 | // "current_card" is still the "following_clean_card" or |
aoqi@0 | 308 | // the current_card is >= the worker_end_card so the |
aoqi@0 | 309 | // loop will not execute again. |
aoqi@0 | 310 | assert((current_card == following_clean_card) || |
aoqi@0 | 311 | (current_card >= worker_end_card), |
aoqi@0 | 312 | "current_card should only be incremented if it still equals " |
aoqi@0 | 313 | "following_clean_card"); |
aoqi@0 | 314 | // Increment current_card so that it is not processed again. |
aoqi@0 | 315 | // It may now be dirty because a old-to-young pointer was |
aoqi@0 | 316 | // found on it an updated. If it is now dirty, it cannot be |
aoqi@0 | 317 | // be safely cleaned in the next iteration. |
aoqi@0 | 318 | current_card++; |
aoqi@0 | 319 | } |
aoqi@0 | 320 | } |
aoqi@0 | 321 | } |
aoqi@0 | 322 | |
aoqi@0 | 323 | // This should be called before a scavenge. |
aoqi@0 | 324 | void CardTableExtension::verify_all_young_refs_imprecise() { |
aoqi@0 | 325 | CheckForUnmarkedObjects check; |
aoqi@0 | 326 | |
aoqi@0 | 327 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
aoqi@0 | 328 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
aoqi@0 | 329 | |
aoqi@0 | 330 | PSOldGen* old_gen = heap->old_gen(); |
aoqi@0 | 331 | |
aoqi@0 | 332 | old_gen->object_iterate(&check); |
aoqi@0 | 333 | } |
aoqi@0 | 334 | |
aoqi@0 | 335 | // This should be called immediately after a scavenge, before mutators resume. |
aoqi@0 | 336 | void CardTableExtension::verify_all_young_refs_precise() { |
aoqi@0 | 337 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
aoqi@0 | 338 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
aoqi@0 | 339 | |
aoqi@0 | 340 | PSOldGen* old_gen = heap->old_gen(); |
aoqi@0 | 341 | |
aoqi@0 | 342 | CheckForPreciseMarks check(heap->young_gen(), (CardTableExtension*)heap->barrier_set()); |
aoqi@0 | 343 | |
aoqi@0 | 344 | old_gen->oop_iterate_no_header(&check); |
aoqi@0 | 345 | |
aoqi@0 | 346 | verify_all_young_refs_precise_helper(old_gen->object_space()->used_region()); |
aoqi@0 | 347 | } |
aoqi@0 | 348 | |
aoqi@0 | 349 | void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) { |
aoqi@0 | 350 | CardTableExtension* card_table = (CardTableExtension*)Universe::heap()->barrier_set(); |
aoqi@0 | 351 | // FIX ME ASSERT HERE |
aoqi@0 | 352 | |
aoqi@0 | 353 | jbyte* bot = card_table->byte_for(mr.start()); |
aoqi@0 | 354 | jbyte* top = card_table->byte_for(mr.end()); |
aoqi@0 | 355 | while(bot <= top) { |
aoqi@0 | 356 | assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark"); |
aoqi@0 | 357 | if (*bot == verify_card) |
aoqi@0 | 358 | *bot = youngergen_card; |
aoqi@0 | 359 | bot++; |
aoqi@0 | 360 | } |
aoqi@0 | 361 | } |
aoqi@0 | 362 | |
aoqi@0 | 363 | bool CardTableExtension::addr_is_marked_imprecise(void *addr) { |
aoqi@0 | 364 | jbyte* p = byte_for(addr); |
aoqi@0 | 365 | jbyte val = *p; |
aoqi@0 | 366 | |
aoqi@0 | 367 | if (card_is_dirty(val)) |
aoqi@0 | 368 | return true; |
aoqi@0 | 369 | |
aoqi@0 | 370 | if (card_is_newgen(val)) |
aoqi@0 | 371 | return true; |
aoqi@0 | 372 | |
aoqi@0 | 373 | if (card_is_clean(val)) |
aoqi@0 | 374 | return false; |
aoqi@0 | 375 | |
aoqi@0 | 376 | assert(false, "Found unhandled card mark type"); |
aoqi@0 | 377 | |
aoqi@0 | 378 | return false; |
aoqi@0 | 379 | } |
aoqi@0 | 380 | |
aoqi@0 | 381 | // Also includes verify_card |
aoqi@0 | 382 | bool CardTableExtension::addr_is_marked_precise(void *addr) { |
aoqi@0 | 383 | jbyte* p = byte_for(addr); |
aoqi@0 | 384 | jbyte val = *p; |
aoqi@0 | 385 | |
aoqi@0 | 386 | if (card_is_newgen(val)) |
aoqi@0 | 387 | return true; |
aoqi@0 | 388 | |
aoqi@0 | 389 | if (card_is_verify(val)) |
aoqi@0 | 390 | return true; |
aoqi@0 | 391 | |
aoqi@0 | 392 | if (card_is_clean(val)) |
aoqi@0 | 393 | return false; |
aoqi@0 | 394 | |
aoqi@0 | 395 | if (card_is_dirty(val)) |
aoqi@0 | 396 | return false; |
aoqi@0 | 397 | |
aoqi@0 | 398 | assert(false, "Found unhandled card mark type"); |
aoqi@0 | 399 | |
aoqi@0 | 400 | return false; |
aoqi@0 | 401 | } |
aoqi@0 | 402 | |
aoqi@0 | 403 | // Assumes that only the base or the end changes. This allows indentification |
aoqi@0 | 404 | // of the region that is being resized. The |
aoqi@0 | 405 | // CardTableModRefBS::resize_covered_region() is used for the normal case |
aoqi@0 | 406 | // where the covered regions are growing or shrinking at the high end. |
aoqi@0 | 407 | // The method resize_covered_region_by_end() is analogous to |
aoqi@0 | 408 | // CardTableModRefBS::resize_covered_region() but |
aoqi@0 | 409 | // for regions that grow or shrink at the low end. |
aoqi@0 | 410 | void CardTableExtension::resize_covered_region(MemRegion new_region) { |
aoqi@0 | 411 | |
aoqi@0 | 412 | for (int i = 0; i < _cur_covered_regions; i++) { |
aoqi@0 | 413 | if (_covered[i].start() == new_region.start()) { |
aoqi@0 | 414 | // Found a covered region with the same start as the |
aoqi@0 | 415 | // new region. The region is growing or shrinking |
aoqi@0 | 416 | // from the start of the region. |
aoqi@0 | 417 | resize_covered_region_by_start(new_region); |
aoqi@0 | 418 | return; |
aoqi@0 | 419 | } |
aoqi@0 | 420 | if (_covered[i].start() > new_region.start()) { |
aoqi@0 | 421 | break; |
aoqi@0 | 422 | } |
aoqi@0 | 423 | } |
aoqi@0 | 424 | |
aoqi@0 | 425 | int changed_region = -1; |
aoqi@0 | 426 | for (int j = 0; j < _cur_covered_regions; j++) { |
aoqi@0 | 427 | if (_covered[j].end() == new_region.end()) { |
aoqi@0 | 428 | changed_region = j; |
aoqi@0 | 429 | // This is a case where the covered region is growing or shrinking |
aoqi@0 | 430 | // at the start of the region. |
aoqi@0 | 431 | assert(changed_region != -1, "Don't expect to add a covered region"); |
aoqi@0 | 432 | assert(_covered[changed_region].byte_size() != new_region.byte_size(), |
aoqi@0 | 433 | "The sizes should be different here"); |
aoqi@0 | 434 | resize_covered_region_by_end(changed_region, new_region); |
aoqi@0 | 435 | return; |
aoqi@0 | 436 | } |
aoqi@0 | 437 | } |
aoqi@0 | 438 | // This should only be a new covered region (where no existing |
aoqi@0 | 439 | // covered region matches at the start or the end). |
aoqi@0 | 440 | assert(_cur_covered_regions < _max_covered_regions, |
aoqi@0 | 441 | "An existing region should have been found"); |
aoqi@0 | 442 | resize_covered_region_by_start(new_region); |
aoqi@0 | 443 | } |
aoqi@0 | 444 | |
aoqi@0 | 445 | void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) { |
aoqi@0 | 446 | CardTableModRefBS::resize_covered_region(new_region); |
aoqi@0 | 447 | debug_only(verify_guard();) |
aoqi@0 | 448 | } |
aoqi@0 | 449 | |
aoqi@0 | 450 | void CardTableExtension::resize_covered_region_by_end(int changed_region, |
aoqi@0 | 451 | MemRegion new_region) { |
aoqi@0 | 452 | assert(SafepointSynchronize::is_at_safepoint(), |
aoqi@0 | 453 | "Only expect an expansion at the low end at a GC"); |
aoqi@0 | 454 | debug_only(verify_guard();) |
aoqi@0 | 455 | #ifdef ASSERT |
aoqi@0 | 456 | for (int k = 0; k < _cur_covered_regions; k++) { |
aoqi@0 | 457 | if (_covered[k].end() == new_region.end()) { |
aoqi@0 | 458 | assert(changed_region == k, "Changed region is incorrect"); |
aoqi@0 | 459 | break; |
aoqi@0 | 460 | } |
aoqi@0 | 461 | } |
aoqi@0 | 462 | #endif |
aoqi@0 | 463 | |
aoqi@0 | 464 | // Commit new or uncommit old pages, if necessary. |
aoqi@0 | 465 | if (resize_commit_uncommit(changed_region, new_region)) { |
aoqi@0 | 466 | // Set the new start of the committed region |
aoqi@0 | 467 | resize_update_committed_table(changed_region, new_region); |
aoqi@0 | 468 | } |
aoqi@0 | 469 | |
aoqi@0 | 470 | // Update card table entries |
aoqi@0 | 471 | resize_update_card_table_entries(changed_region, new_region); |
aoqi@0 | 472 | |
aoqi@0 | 473 | // Update the covered region |
aoqi@0 | 474 | resize_update_covered_table(changed_region, new_region); |
aoqi@0 | 475 | |
aoqi@0 | 476 | if (TraceCardTableModRefBS) { |
aoqi@0 | 477 | int ind = changed_region; |
aoqi@0 | 478 | gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); |
aoqi@0 | 479 | gclog_or_tty->print_cr(" " |
aoqi@0 | 480 | " _covered[%d].start(): " INTPTR_FORMAT |
aoqi@0 | 481 | " _covered[%d].last(): " INTPTR_FORMAT, |
aoqi@0 | 482 | ind, p2i(_covered[ind].start()), |
aoqi@0 | 483 | ind, p2i(_covered[ind].last())); |
aoqi@0 | 484 | gclog_or_tty->print_cr(" " |
aoqi@0 | 485 | " _committed[%d].start(): " INTPTR_FORMAT |
aoqi@0 | 486 | " _committed[%d].last(): " INTPTR_FORMAT, |
aoqi@0 | 487 | ind, p2i(_committed[ind].start()), |
aoqi@0 | 488 | ind, p2i(_committed[ind].last())); |
aoqi@0 | 489 | gclog_or_tty->print_cr(" " |
aoqi@0 | 490 | " byte_for(start): " INTPTR_FORMAT |
aoqi@0 | 491 | " byte_for(last): " INTPTR_FORMAT, |
aoqi@0 | 492 | p2i(byte_for(_covered[ind].start())), |
aoqi@0 | 493 | p2i(byte_for(_covered[ind].last()))); |
aoqi@0 | 494 | gclog_or_tty->print_cr(" " |
aoqi@0 | 495 | " addr_for(start): " INTPTR_FORMAT |
aoqi@0 | 496 | " addr_for(last): " INTPTR_FORMAT, |
aoqi@0 | 497 | p2i(addr_for((jbyte*) _committed[ind].start())), |
aoqi@0 | 498 | p2i(addr_for((jbyte*) _committed[ind].last()))); |
aoqi@0 | 499 | } |
aoqi@0 | 500 | debug_only(verify_guard();) |
aoqi@0 | 501 | } |
aoqi@0 | 502 | |
aoqi@0 | 503 | bool CardTableExtension::resize_commit_uncommit(int changed_region, |
aoqi@0 | 504 | MemRegion new_region) { |
aoqi@0 | 505 | bool result = false; |
aoqi@0 | 506 | // Commit new or uncommit old pages, if necessary. |
aoqi@0 | 507 | MemRegion cur_committed = _committed[changed_region]; |
aoqi@0 | 508 | assert(_covered[changed_region].end() == new_region.end(), |
aoqi@0 | 509 | "The ends of the regions are expected to match"); |
aoqi@0 | 510 | // Extend the start of this _committed region to |
aoqi@0 | 511 | // to cover the start of any previous _committed region. |
aoqi@0 | 512 | // This forms overlapping regions, but never interior regions. |
aoqi@0 | 513 | HeapWord* min_prev_start = lowest_prev_committed_start(changed_region); |
aoqi@0 | 514 | if (min_prev_start < cur_committed.start()) { |
aoqi@0 | 515 | // Only really need to set start of "cur_committed" to |
aoqi@0 | 516 | // the new start (min_prev_start) but assertion checking code |
aoqi@0 | 517 | // below use cur_committed.end() so make it correct. |
aoqi@0 | 518 | MemRegion new_committed = |
aoqi@0 | 519 | MemRegion(min_prev_start, cur_committed.end()); |
aoqi@0 | 520 | cur_committed = new_committed; |
aoqi@0 | 521 | } |
aoqi@0 | 522 | #ifdef ASSERT |
aoqi@0 | 523 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
aoqi@0 | 524 | assert(cur_committed.start() == |
aoqi@0 | 525 | (HeapWord*) align_size_up((uintptr_t) cur_committed.start(), |
aoqi@0 | 526 | os::vm_page_size()), |
aoqi@0 | 527 | "Starts should have proper alignment"); |
aoqi@0 | 528 | #endif |
aoqi@0 | 529 | |
aoqi@0 | 530 | jbyte* new_start = byte_for(new_region.start()); |
aoqi@0 | 531 | // Round down because this is for the start address |
aoqi@0 | 532 | HeapWord* new_start_aligned = |
aoqi@0 | 533 | (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size()); |
aoqi@0 | 534 | // The guard page is always committed and should not be committed over. |
aoqi@0 | 535 | // This method is used in cases where the generation is growing toward |
aoqi@0 | 536 | // lower addresses but the guard region is still at the end of the |
aoqi@0 | 537 | // card table. That still makes sense when looking for writes |
aoqi@0 | 538 | // off the end of the card table. |
aoqi@0 | 539 | if (new_start_aligned < cur_committed.start()) { |
aoqi@0 | 540 | // Expand the committed region |
aoqi@0 | 541 | // |
aoqi@0 | 542 | // Case A |
aoqi@0 | 543 | // |+ guard +| |
aoqi@0 | 544 | // |+ cur committed +++++++++| |
aoqi@0 | 545 | // |+ new committed +++++++++++++++++| |
aoqi@0 | 546 | // |
aoqi@0 | 547 | // Case B |
aoqi@0 | 548 | // |+ guard +| |
aoqi@0 | 549 | // |+ cur committed +| |
aoqi@0 | 550 | // |+ new committed +++++++| |
aoqi@0 | 551 | // |
aoqi@0 | 552 | // These are not expected because the calculation of the |
aoqi@0 | 553 | // cur committed region and the new committed region |
aoqi@0 | 554 | // share the same end for the covered region. |
aoqi@0 | 555 | // Case C |
aoqi@0 | 556 | // |+ guard +| |
aoqi@0 | 557 | // |+ cur committed +| |
aoqi@0 | 558 | // |+ new committed +++++++++++++++++| |
aoqi@0 | 559 | // Case D |
aoqi@0 | 560 | // |+ guard +| |
aoqi@0 | 561 | // |+ cur committed +++++++++++| |
aoqi@0 | 562 | // |+ new committed +++++++| |
aoqi@0 | 563 | |
aoqi@0 | 564 | HeapWord* new_end_for_commit = |
aoqi@0 | 565 | MIN2(cur_committed.end(), _guard_region.start()); |
aoqi@0 | 566 | if(new_start_aligned < new_end_for_commit) { |
aoqi@0 | 567 | MemRegion new_committed = |
aoqi@0 | 568 | MemRegion(new_start_aligned, new_end_for_commit); |
aoqi@0 | 569 | os::commit_memory_or_exit((char*)new_committed.start(), |
aoqi@0 | 570 | new_committed.byte_size(), !ExecMem, |
aoqi@0 | 571 | "card table expansion"); |
aoqi@0 | 572 | } |
aoqi@0 | 573 | result = true; |
aoqi@0 | 574 | } else if (new_start_aligned > cur_committed.start()) { |
aoqi@0 | 575 | // Shrink the committed region |
aoqi@0 | 576 | #if 0 // uncommitting space is currently unsafe because of the interactions |
aoqi@0 | 577 | // of growing and shrinking regions. One region A can uncommit space |
aoqi@0 | 578 | // that it owns but which is being used by another region B (maybe). |
aoqi@0 | 579 | // Region B has not committed the space because it was already |
aoqi@0 | 580 | // committed by region A. |
aoqi@0 | 581 | MemRegion uncommit_region = committed_unique_to_self(changed_region, |
aoqi@0 | 582 | MemRegion(cur_committed.start(), new_start_aligned)); |
aoqi@0 | 583 | if (!uncommit_region.is_empty()) { |
aoqi@0 | 584 | if (!os::uncommit_memory((char*)uncommit_region.start(), |
aoqi@0 | 585 | uncommit_region.byte_size())) { |
aoqi@0 | 586 | // If the uncommit fails, ignore it. Let the |
aoqi@0 | 587 | // committed table resizing go even though the committed |
aoqi@0 | 588 | // table will over state the committed space. |
aoqi@0 | 589 | } |
aoqi@0 | 590 | } |
aoqi@0 | 591 | #else |
aoqi@0 | 592 | assert(!result, "Should be false with current workaround"); |
aoqi@0 | 593 | #endif |
aoqi@0 | 594 | } |
aoqi@0 | 595 | assert(_committed[changed_region].end() == cur_committed.end(), |
aoqi@0 | 596 | "end should not change"); |
aoqi@0 | 597 | return result; |
aoqi@0 | 598 | } |
aoqi@0 | 599 | |
aoqi@0 | 600 | void CardTableExtension::resize_update_committed_table(int changed_region, |
aoqi@0 | 601 | MemRegion new_region) { |
aoqi@0 | 602 | |
aoqi@0 | 603 | jbyte* new_start = byte_for(new_region.start()); |
aoqi@0 | 604 | // Set the new start of the committed region |
aoqi@0 | 605 | HeapWord* new_start_aligned = |
aoqi@0 | 606 | (HeapWord*)align_size_down((uintptr_t)new_start, |
aoqi@0 | 607 | os::vm_page_size()); |
aoqi@0 | 608 | MemRegion new_committed = MemRegion(new_start_aligned, |
aoqi@0 | 609 | _committed[changed_region].end()); |
aoqi@0 | 610 | _committed[changed_region] = new_committed; |
aoqi@0 | 611 | _committed[changed_region].set_start(new_start_aligned); |
aoqi@0 | 612 | } |
aoqi@0 | 613 | |
aoqi@0 | 614 | void CardTableExtension::resize_update_card_table_entries(int changed_region, |
aoqi@0 | 615 | MemRegion new_region) { |
aoqi@0 | 616 | debug_only(verify_guard();) |
aoqi@0 | 617 | MemRegion original_covered = _covered[changed_region]; |
aoqi@0 | 618 | // Initialize the card entries. Only consider the |
aoqi@0 | 619 | // region covered by the card table (_whole_heap) |
aoqi@0 | 620 | jbyte* entry; |
aoqi@0 | 621 | if (new_region.start() < _whole_heap.start()) { |
aoqi@0 | 622 | entry = byte_for(_whole_heap.start()); |
aoqi@0 | 623 | } else { |
aoqi@0 | 624 | entry = byte_for(new_region.start()); |
aoqi@0 | 625 | } |
aoqi@0 | 626 | jbyte* end = byte_for(original_covered.start()); |
aoqi@0 | 627 | // If _whole_heap starts at the original covered regions start, |
aoqi@0 | 628 | // this loop will not execute. |
aoqi@0 | 629 | while (entry < end) { *entry++ = clean_card; } |
aoqi@0 | 630 | } |
aoqi@0 | 631 | |
aoqi@0 | 632 | void CardTableExtension::resize_update_covered_table(int changed_region, |
aoqi@0 | 633 | MemRegion new_region) { |
aoqi@0 | 634 | // Update the covered region |
aoqi@0 | 635 | _covered[changed_region].set_start(new_region.start()); |
aoqi@0 | 636 | _covered[changed_region].set_word_size(new_region.word_size()); |
aoqi@0 | 637 | |
aoqi@0 | 638 | // reorder regions. There should only be at most 1 out |
aoqi@0 | 639 | // of order. |
aoqi@0 | 640 | for (int i = _cur_covered_regions-1 ; i > 0; i--) { |
aoqi@0 | 641 | if (_covered[i].start() < _covered[i-1].start()) { |
aoqi@0 | 642 | MemRegion covered_mr = _covered[i-1]; |
aoqi@0 | 643 | _covered[i-1] = _covered[i]; |
aoqi@0 | 644 | _covered[i] = covered_mr; |
aoqi@0 | 645 | MemRegion committed_mr = _committed[i-1]; |
aoqi@0 | 646 | _committed[i-1] = _committed[i]; |
aoqi@0 | 647 | _committed[i] = committed_mr; |
aoqi@0 | 648 | break; |
aoqi@0 | 649 | } |
aoqi@0 | 650 | } |
aoqi@0 | 651 | #ifdef ASSERT |
aoqi@0 | 652 | for (int m = 0; m < _cur_covered_regions-1; m++) { |
aoqi@0 | 653 | assert(_covered[m].start() <= _covered[m+1].start(), |
aoqi@0 | 654 | "Covered regions out of order"); |
aoqi@0 | 655 | assert(_committed[m].start() <= _committed[m+1].start(), |
aoqi@0 | 656 | "Committed regions out of order"); |
aoqi@0 | 657 | } |
aoqi@0 | 658 | #endif |
aoqi@0 | 659 | } |
aoqi@0 | 660 | |
aoqi@0 | 661 | // Returns the start of any committed region that is lower than |
aoqi@0 | 662 | // the target committed region (index ind) and that intersects the |
aoqi@0 | 663 | // target region. If none, return start of target region. |
aoqi@0 | 664 | // |
aoqi@0 | 665 | // ------------- |
aoqi@0 | 666 | // | | |
aoqi@0 | 667 | // ------------- |
aoqi@0 | 668 | // ------------ |
aoqi@0 | 669 | // | target | |
aoqi@0 | 670 | // ------------ |
aoqi@0 | 671 | // ------------- |
aoqi@0 | 672 | // | | |
aoqi@0 | 673 | // ------------- |
aoqi@0 | 674 | // ^ returns this |
aoqi@0 | 675 | // |
aoqi@0 | 676 | // ------------- |
aoqi@0 | 677 | // | | |
aoqi@0 | 678 | // ------------- |
aoqi@0 | 679 | // ------------ |
aoqi@0 | 680 | // | target | |
aoqi@0 | 681 | // ------------ |
aoqi@0 | 682 | // ------------- |
aoqi@0 | 683 | // | | |
aoqi@0 | 684 | // ------------- |
aoqi@0 | 685 | // ^ returns this |
aoqi@0 | 686 | |
aoqi@0 | 687 | HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const { |
aoqi@0 | 688 | assert(_cur_covered_regions >= 0, "Expecting at least on region"); |
aoqi@0 | 689 | HeapWord* min_start = _committed[ind].start(); |
aoqi@0 | 690 | for (int j = 0; j < ind; j++) { |
aoqi@0 | 691 | HeapWord* this_start = _committed[j].start(); |
aoqi@0 | 692 | if ((this_start < min_start) && |
aoqi@0 | 693 | !(_committed[j].intersection(_committed[ind])).is_empty()) { |
aoqi@0 | 694 | min_start = this_start; |
aoqi@0 | 695 | } |
aoqi@0 | 696 | } |
aoqi@0 | 697 | return min_start; |
aoqi@0 | 698 | } |