Thu, 14 Mar 2013 09:37:38 +0100
6733980: par compact - TraceGen1Time always shows 0.0000 seconds
Summary: Use the correct collector to retrieve accumulated gen1 trace time
Reviewed-by: johnc, jmasa
duke@435 | 1 | /* |
coleenp@4037 | 2 | * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" |
stefank@2314 | 27 | #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" |
stefank@2314 | 28 | #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" |
stefank@2314 | 29 | #include "gc_implementation/parallelScavenge/psTasks.hpp" |
stefank@2314 | 30 | #include "gc_implementation/parallelScavenge/psYoungGen.hpp" |
stefank@2314 | 31 | #include "oops/oop.inline.hpp" |
stefank@2314 | 32 | #include "oops/oop.psgc.inline.hpp" |
duke@435 | 33 | |
duke@435 | 34 | // Checks an individual oop for missing precise marks. Mark |
duke@435 | 35 | // may be either dirty or newgen. |
duke@435 | 36 | class CheckForUnmarkedOops : public OopClosure { |
coleenp@548 | 37 | private: |
coleenp@548 | 38 | PSYoungGen* _young_gen; |
duke@435 | 39 | CardTableExtension* _card_table; |
coleenp@548 | 40 | HeapWord* _unmarked_addr; |
coleenp@548 | 41 | jbyte* _unmarked_card; |
duke@435 | 42 | |
coleenp@548 | 43 | protected: |
coleenp@548 | 44 | template <class T> void do_oop_work(T* p) { |
stefank@3712 | 45 | oop obj = oopDesc::load_decode_heap_oop(p); |
coleenp@548 | 46 | if (_young_gen->is_in_reserved(obj) && |
duke@435 | 47 | !_card_table->addr_is_marked_imprecise(p)) { |
duke@435 | 48 | // Don't overwrite the first missing card mark |
duke@435 | 49 | if (_unmarked_addr == NULL) { |
duke@435 | 50 | _unmarked_addr = (HeapWord*)p; |
duke@435 | 51 | _unmarked_card = _card_table->byte_for(p); |
duke@435 | 52 | } |
duke@435 | 53 | } |
duke@435 | 54 | } |
duke@435 | 55 | |
coleenp@548 | 56 | public: |
coleenp@548 | 57 | CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) : |
coleenp@548 | 58 | _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { } |
coleenp@548 | 59 | |
coleenp@548 | 60 | virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); } |
coleenp@548 | 61 | virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); } |
coleenp@548 | 62 | |
duke@435 | 63 | bool has_unmarked_oop() { |
duke@435 | 64 | return _unmarked_addr != NULL; |
duke@435 | 65 | } |
duke@435 | 66 | }; |
duke@435 | 67 | |
duke@435 | 68 | // Checks all objects for the existance of some type of mark, |
duke@435 | 69 | // precise or imprecise, dirty or newgen. |
duke@435 | 70 | class CheckForUnmarkedObjects : public ObjectClosure { |
coleenp@548 | 71 | private: |
coleenp@548 | 72 | PSYoungGen* _young_gen; |
duke@435 | 73 | CardTableExtension* _card_table; |
duke@435 | 74 | |
duke@435 | 75 | public: |
duke@435 | 76 | CheckForUnmarkedObjects() { |
duke@435 | 77 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 78 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 79 | |
duke@435 | 80 | _young_gen = heap->young_gen(); |
duke@435 | 81 | _card_table = (CardTableExtension*)heap->barrier_set(); |
duke@435 | 82 | // No point in asserting barrier set type here. Need to make CardTableExtension |
duke@435 | 83 | // a unique barrier set type. |
duke@435 | 84 | } |
duke@435 | 85 | |
duke@435 | 86 | // Card marks are not precise. The current system can leave us with |
twisti@1040 | 87 | // a mismash of precise marks and beginning of object marks. This means |
duke@435 | 88 | // we test for missing precise marks first. If any are found, we don't |
duke@435 | 89 | // fail unless the object head is also unmarked. |
duke@435 | 90 | virtual void do_object(oop obj) { |
coleenp@548 | 91 | CheckForUnmarkedOops object_check(_young_gen, _card_table); |
coleenp@4037 | 92 | obj->oop_iterate_no_header(&object_check); |
duke@435 | 93 | if (object_check.has_unmarked_oop()) { |
duke@435 | 94 | assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object"); |
duke@435 | 95 | } |
duke@435 | 96 | } |
duke@435 | 97 | }; |
duke@435 | 98 | |
duke@435 | 99 | // Checks for precise marking of oops as newgen. |
duke@435 | 100 | class CheckForPreciseMarks : public OopClosure { |
coleenp@548 | 101 | private: |
coleenp@548 | 102 | PSYoungGen* _young_gen; |
duke@435 | 103 | CardTableExtension* _card_table; |
duke@435 | 104 | |
coleenp@548 | 105 | protected: |
coleenp@548 | 106 | template <class T> void do_oop_work(T* p) { |
coleenp@548 | 107 | oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
coleenp@548 | 108 | if (_young_gen->is_in_reserved(obj)) { |
coleenp@548 | 109 | assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop"); |
coleenp@548 | 110 | _card_table->set_card_newgen(p); |
coleenp@548 | 111 | } |
coleenp@548 | 112 | } |
coleenp@548 | 113 | |
duke@435 | 114 | public: |
duke@435 | 115 | CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) : |
duke@435 | 116 | _young_gen(young_gen), _card_table(card_table) { } |
duke@435 | 117 | |
coleenp@548 | 118 | virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); } |
coleenp@548 | 119 | virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); } |
duke@435 | 120 | }; |
duke@435 | 121 | |
duke@435 | 122 | // We get passed the space_top value to prevent us from traversing into |
duke@435 | 123 | // the old_gen promotion labs, which cannot be safely parsed. |
duke@435 | 124 | |
jmasa@4128 | 125 | // Do not call this method if the space is empty. |
jmasa@4128 | 126 | // It is a waste to start tasks and get here only to |
jmasa@4128 | 127 | // do no work. If this method needs to be called |
jmasa@4128 | 128 | // when the space is empty, fix the calculation of |
jmasa@4128 | 129 | // end_card to allow sp_top == sp->bottom(). |
duke@435 | 130 | |
duke@435 | 131 | void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array, |
duke@435 | 132 | MutableSpace* sp, |
duke@435 | 133 | HeapWord* space_top, |
duke@435 | 134 | PSPromotionManager* pm, |
jmasa@3294 | 135 | uint stripe_number, |
jmasa@3294 | 136 | uint stripe_total) { |
duke@435 | 137 | int ssize = 128; // Naked constant! Work unit = 64k. |
duke@435 | 138 | int dirty_card_count = 0; |
duke@435 | 139 | |
jmasa@4128 | 140 | // It is a waste to get here if empty. |
jmasa@4128 | 141 | assert(sp->bottom() < sp->top(), "Should not be called if empty"); |
duke@435 | 142 | oop* sp_top = (oop*)space_top; |
duke@435 | 143 | jbyte* start_card = byte_for(sp->bottom()); |
jmasa@4128 | 144 | jbyte* end_card = byte_for(sp_top - 1) + 1; |
duke@435 | 145 | oop* last_scanned = NULL; // Prevent scanning objects more than once |
jmasa@3294 | 146 | // The width of the stripe ssize*stripe_total must be |
jmasa@3294 | 147 | // consistent with the number of stripes so that the complete slice |
jmasa@3294 | 148 | // is covered. |
jmasa@3294 | 149 | size_t slice_width = ssize * stripe_total; |
jmasa@3294 | 150 | for (jbyte* slice = start_card; slice < end_card; slice += slice_width) { |
duke@435 | 151 | jbyte* worker_start_card = slice + stripe_number * ssize; |
duke@435 | 152 | if (worker_start_card >= end_card) |
duke@435 | 153 | return; // We're done. |
duke@435 | 154 | |
duke@435 | 155 | jbyte* worker_end_card = worker_start_card + ssize; |
duke@435 | 156 | if (worker_end_card > end_card) |
duke@435 | 157 | worker_end_card = end_card; |
duke@435 | 158 | |
duke@435 | 159 | // We do not want to scan objects more than once. In order to accomplish |
duke@435 | 160 | // this, we assert that any object with an object head inside our 'slice' |
duke@435 | 161 | // belongs to us. We may need to extend the range of scanned cards if the |
duke@435 | 162 | // last object continues into the next 'slice'. |
duke@435 | 163 | // |
duke@435 | 164 | // Note! ending cards are exclusive! |
duke@435 | 165 | HeapWord* slice_start = addr_for(worker_start_card); |
duke@435 | 166 | HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card)); |
duke@435 | 167 | |
jmasa@4128 | 168 | #ifdef ASSERT |
jmasa@4128 | 169 | if (GCWorkerDelayMillis > 0) { |
jmasa@4128 | 170 | // Delay 1 worker so that it proceeds after all the work |
jmasa@4128 | 171 | // has been completed. |
jmasa@4128 | 172 | if (stripe_number < 2) { |
jmasa@4128 | 173 | os::sleep(Thread::current(), GCWorkerDelayMillis, false); |
jmasa@4128 | 174 | } |
jmasa@4128 | 175 | } |
jmasa@4128 | 176 | #endif |
jmasa@4128 | 177 | |
duke@435 | 178 | // If there are not objects starting within the chunk, skip it. |
duke@435 | 179 | if (!start_array->object_starts_in_range(slice_start, slice_end)) { |
duke@435 | 180 | continue; |
duke@435 | 181 | } |
twisti@1040 | 182 | // Update our beginning addr |
duke@435 | 183 | HeapWord* first_object = start_array->object_start(slice_start); |
duke@435 | 184 | debug_only(oop* first_object_within_slice = (oop*) first_object;) |
duke@435 | 185 | if (first_object < slice_start) { |
duke@435 | 186 | last_scanned = (oop*)(first_object + oop(first_object)->size()); |
duke@435 | 187 | debug_only(first_object_within_slice = last_scanned;) |
duke@435 | 188 | worker_start_card = byte_for(last_scanned); |
duke@435 | 189 | } |
duke@435 | 190 | |
duke@435 | 191 | // Update the ending addr |
duke@435 | 192 | if (slice_end < (HeapWord*)sp_top) { |
duke@435 | 193 | // The subtraction is important! An object may start precisely at slice_end. |
duke@435 | 194 | HeapWord* last_object = start_array->object_start(slice_end - 1); |
duke@435 | 195 | slice_end = last_object + oop(last_object)->size(); |
duke@435 | 196 | // worker_end_card is exclusive, so bump it one past the end of last_object's |
duke@435 | 197 | // covered span. |
duke@435 | 198 | worker_end_card = byte_for(slice_end) + 1; |
duke@435 | 199 | |
duke@435 | 200 | if (worker_end_card > end_card) |
duke@435 | 201 | worker_end_card = end_card; |
duke@435 | 202 | } |
duke@435 | 203 | |
duke@435 | 204 | assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary"); |
duke@435 | 205 | assert(is_valid_card_address(worker_start_card), "Invalid worker start card"); |
duke@435 | 206 | assert(is_valid_card_address(worker_end_card), "Invalid worker end card"); |
duke@435 | 207 | // Note that worker_start_card >= worker_end_card is legal, and happens when |
duke@435 | 208 | // an object spans an entire slice. |
duke@435 | 209 | assert(worker_start_card <= end_card, "worker start card beyond end card"); |
duke@435 | 210 | assert(worker_end_card <= end_card, "worker end card beyond end card"); |
duke@435 | 211 | |
duke@435 | 212 | jbyte* current_card = worker_start_card; |
duke@435 | 213 | while (current_card < worker_end_card) { |
duke@435 | 214 | // Find an unclean card. |
duke@435 | 215 | while (current_card < worker_end_card && card_is_clean(*current_card)) { |
duke@435 | 216 | current_card++; |
duke@435 | 217 | } |
duke@435 | 218 | jbyte* first_unclean_card = current_card; |
duke@435 | 219 | |
duke@435 | 220 | // Find the end of a run of contiguous unclean cards |
duke@435 | 221 | while (current_card < worker_end_card && !card_is_clean(*current_card)) { |
duke@435 | 222 | while (current_card < worker_end_card && !card_is_clean(*current_card)) { |
duke@435 | 223 | current_card++; |
duke@435 | 224 | } |
duke@435 | 225 | |
duke@435 | 226 | if (current_card < worker_end_card) { |
duke@435 | 227 | // Some objects may be large enough to span several cards. If such |
duke@435 | 228 | // an object has more than one dirty card, separated by a clean card, |
duke@435 | 229 | // we will attempt to scan it twice. The test against "last_scanned" |
duke@435 | 230 | // prevents the redundant object scan, but it does not prevent newly |
duke@435 | 231 | // marked cards from being cleaned. |
duke@435 | 232 | HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1); |
duke@435 | 233 | size_t size_of_last_object = oop(last_object_in_dirty_region)->size(); |
duke@435 | 234 | HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object; |
duke@435 | 235 | jbyte* ending_card_of_last_object = byte_for(end_of_last_object); |
duke@435 | 236 | assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card"); |
duke@435 | 237 | if (ending_card_of_last_object > current_card) { |
duke@435 | 238 | // This means the object spans the next complete card. |
duke@435 | 239 | // We need to bump the current_card to ending_card_of_last_object |
duke@435 | 240 | current_card = ending_card_of_last_object; |
duke@435 | 241 | } |
duke@435 | 242 | } |
duke@435 | 243 | } |
duke@435 | 244 | jbyte* following_clean_card = current_card; |
duke@435 | 245 | |
duke@435 | 246 | if (first_unclean_card < worker_end_card) { |
duke@435 | 247 | oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card)); |
duke@435 | 248 | assert((HeapWord*)p <= addr_for(first_unclean_card), "checking"); |
duke@435 | 249 | // "p" should always be >= "last_scanned" because newly GC dirtied |
duke@435 | 250 | // cards are no longer scanned again (see comment at end |
duke@435 | 251 | // of loop on the increment of "current_card"). Test that |
duke@435 | 252 | // hypothesis before removing this code. |
duke@435 | 253 | // If this code is removed, deal with the first time through |
duke@435 | 254 | // the loop when the last_scanned is the object starting in |
duke@435 | 255 | // the previous slice. |
duke@435 | 256 | assert((p >= last_scanned) || |
duke@435 | 257 | (last_scanned == first_object_within_slice), |
duke@435 | 258 | "Should no longer be possible"); |
duke@435 | 259 | if (p < last_scanned) { |
duke@435 | 260 | // Avoid scanning more than once; this can happen because |
duke@435 | 261 | // newgen cards set by GC may a different set than the |
duke@435 | 262 | // originally dirty set |
duke@435 | 263 | p = last_scanned; |
duke@435 | 264 | } |
duke@435 | 265 | oop* to = (oop*)addr_for(following_clean_card); |
duke@435 | 266 | |
duke@435 | 267 | // Test slice_end first! |
duke@435 | 268 | if ((HeapWord*)to > slice_end) { |
duke@435 | 269 | to = (oop*)slice_end; |
duke@435 | 270 | } else if (to > sp_top) { |
duke@435 | 271 | to = sp_top; |
duke@435 | 272 | } |
duke@435 | 273 | |
duke@435 | 274 | // we know which cards to scan, now clear them |
duke@435 | 275 | if (first_unclean_card <= worker_start_card+1) |
duke@435 | 276 | first_unclean_card = worker_start_card+1; |
duke@435 | 277 | if (following_clean_card >= worker_end_card-1) |
duke@435 | 278 | following_clean_card = worker_end_card-1; |
duke@435 | 279 | |
duke@435 | 280 | while (first_unclean_card < following_clean_card) { |
duke@435 | 281 | *first_unclean_card++ = clean_card; |
duke@435 | 282 | } |
duke@435 | 283 | |
duke@435 | 284 | const int interval = PrefetchScanIntervalInBytes; |
duke@435 | 285 | // scan all objects in the range |
duke@435 | 286 | if (interval != 0) { |
tonyp@2061 | 287 | while (p < to) { |
tonyp@2061 | 288 | Prefetch::write(p, interval); |
tonyp@2061 | 289 | oop m = oop(p); |
tonyp@2061 | 290 | assert(m->is_oop_or_null(), "check for header"); |
tonyp@2061 | 291 | m->push_contents(pm); |
tonyp@2061 | 292 | p += m->size(); |
duke@435 | 293 | } |
tonyp@2061 | 294 | pm->drain_stacks_cond_depth(); |
duke@435 | 295 | } else { |
tonyp@2061 | 296 | while (p < to) { |
tonyp@2061 | 297 | oop m = oop(p); |
tonyp@2061 | 298 | assert(m->is_oop_or_null(), "check for header"); |
tonyp@2061 | 299 | m->push_contents(pm); |
tonyp@2061 | 300 | p += m->size(); |
duke@435 | 301 | } |
tonyp@2061 | 302 | pm->drain_stacks_cond_depth(); |
duke@435 | 303 | } |
duke@435 | 304 | last_scanned = p; |
duke@435 | 305 | } |
duke@435 | 306 | // "current_card" is still the "following_clean_card" or |
duke@435 | 307 | // the current_card is >= the worker_end_card so the |
duke@435 | 308 | // loop will not execute again. |
duke@435 | 309 | assert((current_card == following_clean_card) || |
duke@435 | 310 | (current_card >= worker_end_card), |
duke@435 | 311 | "current_card should only be incremented if it still equals " |
duke@435 | 312 | "following_clean_card"); |
duke@435 | 313 | // Increment current_card so that it is not processed again. |
duke@435 | 314 | // It may now be dirty because a old-to-young pointer was |
duke@435 | 315 | // found on it an updated. If it is now dirty, it cannot be |
duke@435 | 316 | // be safely cleaned in the next iteration. |
duke@435 | 317 | current_card++; |
duke@435 | 318 | } |
duke@435 | 319 | } |
duke@435 | 320 | } |
duke@435 | 321 | |
duke@435 | 322 | // This should be called before a scavenge. |
duke@435 | 323 | void CardTableExtension::verify_all_young_refs_imprecise() { |
duke@435 | 324 | CheckForUnmarkedObjects check; |
duke@435 | 325 | |
duke@435 | 326 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 327 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 328 | |
duke@435 | 329 | PSOldGen* old_gen = heap->old_gen(); |
duke@435 | 330 | |
duke@435 | 331 | old_gen->object_iterate(&check); |
duke@435 | 332 | } |
duke@435 | 333 | |
duke@435 | 334 | // This should be called immediately after a scavenge, before mutators resume. |
duke@435 | 335 | void CardTableExtension::verify_all_young_refs_precise() { |
duke@435 | 336 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 337 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 338 | |
duke@435 | 339 | PSOldGen* old_gen = heap->old_gen(); |
duke@435 | 340 | |
duke@435 | 341 | CheckForPreciseMarks check(heap->young_gen(), (CardTableExtension*)heap->barrier_set()); |
duke@435 | 342 | |
coleenp@4037 | 343 | old_gen->oop_iterate_no_header(&check); |
duke@435 | 344 | |
duke@435 | 345 | verify_all_young_refs_precise_helper(old_gen->object_space()->used_region()); |
duke@435 | 346 | } |
duke@435 | 347 | |
duke@435 | 348 | void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) { |
duke@435 | 349 | CardTableExtension* card_table = (CardTableExtension*)Universe::heap()->barrier_set(); |
duke@435 | 350 | // FIX ME ASSERT HERE |
duke@435 | 351 | |
duke@435 | 352 | jbyte* bot = card_table->byte_for(mr.start()); |
duke@435 | 353 | jbyte* top = card_table->byte_for(mr.end()); |
duke@435 | 354 | while(bot <= top) { |
duke@435 | 355 | assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark"); |
duke@435 | 356 | if (*bot == verify_card) |
duke@435 | 357 | *bot = youngergen_card; |
duke@435 | 358 | bot++; |
duke@435 | 359 | } |
duke@435 | 360 | } |
duke@435 | 361 | |
duke@435 | 362 | bool CardTableExtension::addr_is_marked_imprecise(void *addr) { |
duke@435 | 363 | jbyte* p = byte_for(addr); |
duke@435 | 364 | jbyte val = *p; |
duke@435 | 365 | |
duke@435 | 366 | if (card_is_dirty(val)) |
duke@435 | 367 | return true; |
duke@435 | 368 | |
duke@435 | 369 | if (card_is_newgen(val)) |
duke@435 | 370 | return true; |
duke@435 | 371 | |
duke@435 | 372 | if (card_is_clean(val)) |
duke@435 | 373 | return false; |
duke@435 | 374 | |
duke@435 | 375 | assert(false, "Found unhandled card mark type"); |
duke@435 | 376 | |
duke@435 | 377 | return false; |
duke@435 | 378 | } |
duke@435 | 379 | |
duke@435 | 380 | // Also includes verify_card |
duke@435 | 381 | bool CardTableExtension::addr_is_marked_precise(void *addr) { |
duke@435 | 382 | jbyte* p = byte_for(addr); |
duke@435 | 383 | jbyte val = *p; |
duke@435 | 384 | |
duke@435 | 385 | if (card_is_newgen(val)) |
duke@435 | 386 | return true; |
duke@435 | 387 | |
duke@435 | 388 | if (card_is_verify(val)) |
duke@435 | 389 | return true; |
duke@435 | 390 | |
duke@435 | 391 | if (card_is_clean(val)) |
duke@435 | 392 | return false; |
duke@435 | 393 | |
duke@435 | 394 | if (card_is_dirty(val)) |
duke@435 | 395 | return false; |
duke@435 | 396 | |
duke@435 | 397 | assert(false, "Found unhandled card mark type"); |
duke@435 | 398 | |
duke@435 | 399 | return false; |
duke@435 | 400 | } |
duke@435 | 401 | |
duke@435 | 402 | // Assumes that only the base or the end changes. This allows indentification |
duke@435 | 403 | // of the region that is being resized. The |
duke@435 | 404 | // CardTableModRefBS::resize_covered_region() is used for the normal case |
duke@435 | 405 | // where the covered regions are growing or shrinking at the high end. |
duke@435 | 406 | // The method resize_covered_region_by_end() is analogous to |
duke@435 | 407 | // CardTableModRefBS::resize_covered_region() but |
duke@435 | 408 | // for regions that grow or shrink at the low end. |
duke@435 | 409 | void CardTableExtension::resize_covered_region(MemRegion new_region) { |
duke@435 | 410 | |
duke@435 | 411 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 412 | if (_covered[i].start() == new_region.start()) { |
duke@435 | 413 | // Found a covered region with the same start as the |
duke@435 | 414 | // new region. The region is growing or shrinking |
duke@435 | 415 | // from the start of the region. |
duke@435 | 416 | resize_covered_region_by_start(new_region); |
duke@435 | 417 | return; |
duke@435 | 418 | } |
duke@435 | 419 | if (_covered[i].start() > new_region.start()) { |
duke@435 | 420 | break; |
duke@435 | 421 | } |
duke@435 | 422 | } |
duke@435 | 423 | |
duke@435 | 424 | int changed_region = -1; |
duke@435 | 425 | for (int j = 0; j < _cur_covered_regions; j++) { |
duke@435 | 426 | if (_covered[j].end() == new_region.end()) { |
duke@435 | 427 | changed_region = j; |
duke@435 | 428 | // This is a case where the covered region is growing or shrinking |
duke@435 | 429 | // at the start of the region. |
duke@435 | 430 | assert(changed_region != -1, "Don't expect to add a covered region"); |
duke@435 | 431 | assert(_covered[changed_region].byte_size() != new_region.byte_size(), |
duke@435 | 432 | "The sizes should be different here"); |
duke@435 | 433 | resize_covered_region_by_end(changed_region, new_region); |
duke@435 | 434 | return; |
duke@435 | 435 | } |
duke@435 | 436 | } |
duke@435 | 437 | // This should only be a new covered region (where no existing |
duke@435 | 438 | // covered region matches at the start or the end). |
duke@435 | 439 | assert(_cur_covered_regions < _max_covered_regions, |
duke@435 | 440 | "An existing region should have been found"); |
duke@435 | 441 | resize_covered_region_by_start(new_region); |
duke@435 | 442 | } |
duke@435 | 443 | |
duke@435 | 444 | void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) { |
duke@435 | 445 | CardTableModRefBS::resize_covered_region(new_region); |
duke@435 | 446 | debug_only(verify_guard();) |
duke@435 | 447 | } |
duke@435 | 448 | |
duke@435 | 449 | void CardTableExtension::resize_covered_region_by_end(int changed_region, |
duke@435 | 450 | MemRegion new_region) { |
duke@435 | 451 | assert(SafepointSynchronize::is_at_safepoint(), |
duke@435 | 452 | "Only expect an expansion at the low end at a GC"); |
duke@435 | 453 | debug_only(verify_guard();) |
duke@435 | 454 | #ifdef ASSERT |
duke@435 | 455 | for (int k = 0; k < _cur_covered_regions; k++) { |
duke@435 | 456 | if (_covered[k].end() == new_region.end()) { |
duke@435 | 457 | assert(changed_region == k, "Changed region is incorrect"); |
duke@435 | 458 | break; |
duke@435 | 459 | } |
duke@435 | 460 | } |
duke@435 | 461 | #endif |
duke@435 | 462 | |
duke@435 | 463 | // Commit new or uncommit old pages, if necessary. |
jmasa@1967 | 464 | if (resize_commit_uncommit(changed_region, new_region)) { |
jmasa@1967 | 465 | // Set the new start of the committed region |
jmasa@1967 | 466 | resize_update_committed_table(changed_region, new_region); |
jmasa@1967 | 467 | } |
duke@435 | 468 | |
duke@435 | 469 | // Update card table entries |
duke@435 | 470 | resize_update_card_table_entries(changed_region, new_region); |
duke@435 | 471 | |
duke@435 | 472 | // Update the covered region |
duke@435 | 473 | resize_update_covered_table(changed_region, new_region); |
duke@435 | 474 | |
duke@435 | 475 | if (TraceCardTableModRefBS) { |
duke@435 | 476 | int ind = changed_region; |
duke@435 | 477 | gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); |
duke@435 | 478 | gclog_or_tty->print_cr(" " |
duke@435 | 479 | " _covered[%d].start(): " INTPTR_FORMAT |
duke@435 | 480 | " _covered[%d].last(): " INTPTR_FORMAT, |
duke@435 | 481 | ind, _covered[ind].start(), |
duke@435 | 482 | ind, _covered[ind].last()); |
duke@435 | 483 | gclog_or_tty->print_cr(" " |
duke@435 | 484 | " _committed[%d].start(): " INTPTR_FORMAT |
duke@435 | 485 | " _committed[%d].last(): " INTPTR_FORMAT, |
duke@435 | 486 | ind, _committed[ind].start(), |
duke@435 | 487 | ind, _committed[ind].last()); |
duke@435 | 488 | gclog_or_tty->print_cr(" " |
duke@435 | 489 | " byte_for(start): " INTPTR_FORMAT |
duke@435 | 490 | " byte_for(last): " INTPTR_FORMAT, |
duke@435 | 491 | byte_for(_covered[ind].start()), |
duke@435 | 492 | byte_for(_covered[ind].last())); |
duke@435 | 493 | gclog_or_tty->print_cr(" " |
duke@435 | 494 | " addr_for(start): " INTPTR_FORMAT |
duke@435 | 495 | " addr_for(last): " INTPTR_FORMAT, |
duke@435 | 496 | addr_for((jbyte*) _committed[ind].start()), |
duke@435 | 497 | addr_for((jbyte*) _committed[ind].last())); |
duke@435 | 498 | } |
duke@435 | 499 | debug_only(verify_guard();) |
duke@435 | 500 | } |
duke@435 | 501 | |
jmasa@1967 | 502 | bool CardTableExtension::resize_commit_uncommit(int changed_region, |
duke@435 | 503 | MemRegion new_region) { |
jmasa@1967 | 504 | bool result = false; |
duke@435 | 505 | // Commit new or uncommit old pages, if necessary. |
duke@435 | 506 | MemRegion cur_committed = _committed[changed_region]; |
duke@435 | 507 | assert(_covered[changed_region].end() == new_region.end(), |
duke@435 | 508 | "The ends of the regions are expected to match"); |
duke@435 | 509 | // Extend the start of this _committed region to |
duke@435 | 510 | // to cover the start of any previous _committed region. |
duke@435 | 511 | // This forms overlapping regions, but never interior regions. |
duke@435 | 512 | HeapWord* min_prev_start = lowest_prev_committed_start(changed_region); |
duke@435 | 513 | if (min_prev_start < cur_committed.start()) { |
duke@435 | 514 | // Only really need to set start of "cur_committed" to |
duke@435 | 515 | // the new start (min_prev_start) but assertion checking code |
duke@435 | 516 | // below use cur_committed.end() so make it correct. |
duke@435 | 517 | MemRegion new_committed = |
duke@435 | 518 | MemRegion(min_prev_start, cur_committed.end()); |
duke@435 | 519 | cur_committed = new_committed; |
duke@435 | 520 | } |
duke@435 | 521 | #ifdef ASSERT |
duke@435 | 522 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 523 | assert(cur_committed.start() == |
duke@435 | 524 | (HeapWord*) align_size_up((uintptr_t) cur_committed.start(), |
duke@435 | 525 | os::vm_page_size()), |
duke@435 | 526 | "Starts should have proper alignment"); |
duke@435 | 527 | #endif |
duke@435 | 528 | |
duke@435 | 529 | jbyte* new_start = byte_for(new_region.start()); |
duke@435 | 530 | // Round down because this is for the start address |
duke@435 | 531 | HeapWord* new_start_aligned = |
duke@435 | 532 | (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size()); |
duke@435 | 533 | // The guard page is always committed and should not be committed over. |
duke@435 | 534 | // This method is used in cases where the generation is growing toward |
duke@435 | 535 | // lower addresses but the guard region is still at the end of the |
duke@435 | 536 | // card table. That still makes sense when looking for writes |
duke@435 | 537 | // off the end of the card table. |
duke@435 | 538 | if (new_start_aligned < cur_committed.start()) { |
duke@435 | 539 | // Expand the committed region |
duke@435 | 540 | // |
duke@435 | 541 | // Case A |
duke@435 | 542 | // |+ guard +| |
duke@435 | 543 | // |+ cur committed +++++++++| |
duke@435 | 544 | // |+ new committed +++++++++++++++++| |
duke@435 | 545 | // |
duke@435 | 546 | // Case B |
duke@435 | 547 | // |+ guard +| |
duke@435 | 548 | // |+ cur committed +| |
duke@435 | 549 | // |+ new committed +++++++| |
duke@435 | 550 | // |
duke@435 | 551 | // These are not expected because the calculation of the |
duke@435 | 552 | // cur committed region and the new committed region |
duke@435 | 553 | // share the same end for the covered region. |
duke@435 | 554 | // Case C |
duke@435 | 555 | // |+ guard +| |
duke@435 | 556 | // |+ cur committed +| |
duke@435 | 557 | // |+ new committed +++++++++++++++++| |
duke@435 | 558 | // Case D |
duke@435 | 559 | // |+ guard +| |
duke@435 | 560 | // |+ cur committed +++++++++++| |
duke@435 | 561 | // |+ new committed +++++++| |
duke@435 | 562 | |
duke@435 | 563 | HeapWord* new_end_for_commit = |
duke@435 | 564 | MIN2(cur_committed.end(), _guard_region.start()); |
jmasa@698 | 565 | if(new_start_aligned < new_end_for_commit) { |
jmasa@698 | 566 | MemRegion new_committed = |
jmasa@698 | 567 | MemRegion(new_start_aligned, new_end_for_commit); |
duke@435 | 568 | if (!os::commit_memory((char*)new_committed.start(), |
duke@435 | 569 | new_committed.byte_size())) { |
duke@435 | 570 | vm_exit_out_of_memory(new_committed.byte_size(), |
duke@435 | 571 | "card table expansion"); |
duke@435 | 572 | } |
duke@435 | 573 | } |
jmasa@1967 | 574 | result = true; |
duke@435 | 575 | } else if (new_start_aligned > cur_committed.start()) { |
duke@435 | 576 | // Shrink the committed region |
jmasa@1967 | 577 | #if 0 // uncommitting space is currently unsafe because of the interactions |
jmasa@1967 | 578 | // of growing and shrinking regions. One region A can uncommit space |
jmasa@1967 | 579 | // that it owns but which is being used by another region B (maybe). |
jmasa@1967 | 580 | // Region B has not committed the space because it was already |
jmasa@1967 | 581 | // committed by region A. |
duke@435 | 582 | MemRegion uncommit_region = committed_unique_to_self(changed_region, |
duke@435 | 583 | MemRegion(cur_committed.start(), new_start_aligned)); |
duke@435 | 584 | if (!uncommit_region.is_empty()) { |
duke@435 | 585 | if (!os::uncommit_memory((char*)uncommit_region.start(), |
duke@435 | 586 | uncommit_region.byte_size())) { |
jmasa@1967 | 587 | // If the uncommit fails, ignore it. Let the |
jmasa@1967 | 588 | // committed table resizing go even though the committed |
jmasa@1967 | 589 | // table will over state the committed space. |
duke@435 | 590 | } |
duke@435 | 591 | } |
jmasa@1967 | 592 | #else |
jmasa@1967 | 593 | assert(!result, "Should be false with current workaround"); |
jmasa@1967 | 594 | #endif |
duke@435 | 595 | } |
duke@435 | 596 | assert(_committed[changed_region].end() == cur_committed.end(), |
duke@435 | 597 | "end should not change"); |
jmasa@1967 | 598 | return result; |
duke@435 | 599 | } |
duke@435 | 600 | |
duke@435 | 601 | void CardTableExtension::resize_update_committed_table(int changed_region, |
duke@435 | 602 | MemRegion new_region) { |
duke@435 | 603 | |
duke@435 | 604 | jbyte* new_start = byte_for(new_region.start()); |
duke@435 | 605 | // Set the new start of the committed region |
duke@435 | 606 | HeapWord* new_start_aligned = |
duke@435 | 607 | (HeapWord*)align_size_down((uintptr_t)new_start, |
duke@435 | 608 | os::vm_page_size()); |
duke@435 | 609 | MemRegion new_committed = MemRegion(new_start_aligned, |
duke@435 | 610 | _committed[changed_region].end()); |
duke@435 | 611 | _committed[changed_region] = new_committed; |
duke@435 | 612 | _committed[changed_region].set_start(new_start_aligned); |
duke@435 | 613 | } |
duke@435 | 614 | |
duke@435 | 615 | void CardTableExtension::resize_update_card_table_entries(int changed_region, |
duke@435 | 616 | MemRegion new_region) { |
duke@435 | 617 | debug_only(verify_guard();) |
duke@435 | 618 | MemRegion original_covered = _covered[changed_region]; |
duke@435 | 619 | // Initialize the card entries. Only consider the |
duke@435 | 620 | // region covered by the card table (_whole_heap) |
duke@435 | 621 | jbyte* entry; |
duke@435 | 622 | if (new_region.start() < _whole_heap.start()) { |
duke@435 | 623 | entry = byte_for(_whole_heap.start()); |
duke@435 | 624 | } else { |
duke@435 | 625 | entry = byte_for(new_region.start()); |
duke@435 | 626 | } |
duke@435 | 627 | jbyte* end = byte_for(original_covered.start()); |
duke@435 | 628 | // If _whole_heap starts at the original covered regions start, |
duke@435 | 629 | // this loop will not execute. |
duke@435 | 630 | while (entry < end) { *entry++ = clean_card; } |
duke@435 | 631 | } |
duke@435 | 632 | |
duke@435 | 633 | void CardTableExtension::resize_update_covered_table(int changed_region, |
duke@435 | 634 | MemRegion new_region) { |
duke@435 | 635 | // Update the covered region |
duke@435 | 636 | _covered[changed_region].set_start(new_region.start()); |
duke@435 | 637 | _covered[changed_region].set_word_size(new_region.word_size()); |
duke@435 | 638 | |
duke@435 | 639 | // reorder regions. There should only be at most 1 out |
duke@435 | 640 | // of order. |
duke@435 | 641 | for (int i = _cur_covered_regions-1 ; i > 0; i--) { |
duke@435 | 642 | if (_covered[i].start() < _covered[i-1].start()) { |
duke@435 | 643 | MemRegion covered_mr = _covered[i-1]; |
duke@435 | 644 | _covered[i-1] = _covered[i]; |
duke@435 | 645 | _covered[i] = covered_mr; |
duke@435 | 646 | MemRegion committed_mr = _committed[i-1]; |
duke@435 | 647 | _committed[i-1] = _committed[i]; |
duke@435 | 648 | _committed[i] = committed_mr; |
duke@435 | 649 | break; |
duke@435 | 650 | } |
duke@435 | 651 | } |
duke@435 | 652 | #ifdef ASSERT |
duke@435 | 653 | for (int m = 0; m < _cur_covered_regions-1; m++) { |
duke@435 | 654 | assert(_covered[m].start() <= _covered[m+1].start(), |
duke@435 | 655 | "Covered regions out of order"); |
duke@435 | 656 | assert(_committed[m].start() <= _committed[m+1].start(), |
duke@435 | 657 | "Committed regions out of order"); |
duke@435 | 658 | } |
duke@435 | 659 | #endif |
duke@435 | 660 | } |
duke@435 | 661 | |
duke@435 | 662 | // Returns the start of any committed region that is lower than |
duke@435 | 663 | // the target committed region (index ind) and that intersects the |
duke@435 | 664 | // target region. If none, return start of target region. |
duke@435 | 665 | // |
duke@435 | 666 | // ------------- |
duke@435 | 667 | // | | |
duke@435 | 668 | // ------------- |
duke@435 | 669 | // ------------ |
duke@435 | 670 | // | target | |
duke@435 | 671 | // ------------ |
duke@435 | 672 | // ------------- |
duke@435 | 673 | // | | |
duke@435 | 674 | // ------------- |
duke@435 | 675 | // ^ returns this |
duke@435 | 676 | // |
duke@435 | 677 | // ------------- |
duke@435 | 678 | // | | |
duke@435 | 679 | // ------------- |
duke@435 | 680 | // ------------ |
duke@435 | 681 | // | target | |
duke@435 | 682 | // ------------ |
duke@435 | 683 | // ------------- |
duke@435 | 684 | // | | |
duke@435 | 685 | // ------------- |
duke@435 | 686 | // ^ returns this |
duke@435 | 687 | |
duke@435 | 688 | HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const { |
duke@435 | 689 | assert(_cur_covered_regions >= 0, "Expecting at least on region"); |
duke@435 | 690 | HeapWord* min_start = _committed[ind].start(); |
duke@435 | 691 | for (int j = 0; j < ind; j++) { |
duke@435 | 692 | HeapWord* this_start = _committed[j].start(); |
duke@435 | 693 | if ((this_start < min_start) && |
duke@435 | 694 | !(_committed[j].intersection(_committed[ind])).is_empty()) { |
duke@435 | 695 | min_start = this_start; |
duke@435 | 696 | } |
duke@435 | 697 | } |
duke@435 | 698 | return min_start; |
duke@435 | 699 | } |