src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp

Thu, 14 Apr 2011 12:10:15 -0700

author
ysr
date
Thu, 14 Apr 2011 12:10:15 -0700
changeset 2788
c69b1043dfb1
parent 2314
f95d63e2154a
child 2819
c48ad6ab8bdf
permissions
-rw-r--r--

7036482: clear argument is redundant and unused in cardtable methods
Summary: Removed the unused clear argument to various cardtbale methods and unused mod_oop_in_space_iterate method. Unrelated to synopsis, added a pair of clarifying parens in AllocationStats constructor.
Reviewed-by: brutisso, jcoomes

duke@435 1 /*
ysr@2788 2 * Copyright (c) 2007, 2011 Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "memory/allocation.inline.hpp"
stefank@2314 27 #include "memory/cardTableModRefBS.hpp"
stefank@2314 28 #include "memory/cardTableRS.hpp"
stefank@2314 29 #include "memory/sharedHeap.hpp"
stefank@2314 30 #include "memory/space.inline.hpp"
stefank@2314 31 #include "memory/universe.hpp"
stefank@2314 32 #include "runtime/java.hpp"
stefank@2314 33 #include "runtime/mutexLocker.hpp"
stefank@2314 34 #include "runtime/virtualspace.hpp"
duke@435 35
duke@435 36 void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
duke@435 37 DirtyCardToOopClosure* dcto_cl,
duke@435 38 MemRegionClosure* cl,
duke@435 39 int n_threads) {
duke@435 40 if (n_threads > 0) {
ysr@1280 41 assert((n_threads == 1 && ParallelGCThreads == 0) ||
ysr@1280 42 n_threads <= (int)ParallelGCThreads,
ysr@1280 43 "# worker threads != # requested!");
ysr@1280 44 // Make sure the LNC array is valid for the space.
duke@435 45 jbyte** lowest_non_clean;
duke@435 46 uintptr_t lowest_non_clean_base_chunk_index;
duke@435 47 size_t lowest_non_clean_chunk_size;
duke@435 48 get_LNC_array_for_space(sp, lowest_non_clean,
duke@435 49 lowest_non_clean_base_chunk_index,
duke@435 50 lowest_non_clean_chunk_size);
duke@435 51
duke@435 52 int n_strides = n_threads * StridesPerThread;
duke@435 53 SequentialSubTasksDone* pst = sp->par_seq_tasks();
jmasa@2188 54 pst->set_n_threads(n_threads);
duke@435 55 pst->set_n_tasks(n_strides);
duke@435 56
duke@435 57 int stride = 0;
duke@435 58 while (!pst->is_task_claimed(/* reference */ stride)) {
ysr@2788 59 process_stride(sp, mr, stride, n_strides, dcto_cl, cl,
duke@435 60 lowest_non_clean,
duke@435 61 lowest_non_clean_base_chunk_index,
duke@435 62 lowest_non_clean_chunk_size);
duke@435 63 }
duke@435 64 if (pst->all_tasks_completed()) {
duke@435 65 // Clear lowest_non_clean array for next time.
duke@435 66 intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
duke@435 67 uintptr_t last_chunk_index = addr_to_chunk_index(mr.last());
duke@435 68 for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
duke@435 69 intptr_t ind = ch - lowest_non_clean_base_chunk_index;
duke@435 70 assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
duke@435 71 "Bounds error");
duke@435 72 lowest_non_clean[ind] = NULL;
duke@435 73 }
duke@435 74 }
duke@435 75 }
duke@435 76 }
duke@435 77
duke@435 78 void
duke@435 79 CardTableModRefBS::
duke@435 80 process_stride(Space* sp,
duke@435 81 MemRegion used,
duke@435 82 jint stride, int n_strides,
duke@435 83 DirtyCardToOopClosure* dcto_cl,
duke@435 84 MemRegionClosure* cl,
duke@435 85 jbyte** lowest_non_clean,
duke@435 86 uintptr_t lowest_non_clean_base_chunk_index,
duke@435 87 size_t lowest_non_clean_chunk_size) {
duke@435 88 // We don't have to go downwards here; it wouldn't help anyway,
duke@435 89 // because of parallelism.
duke@435 90
duke@435 91 // Find the first card address of the first chunk in the stride that is
duke@435 92 // at least "bottom" of the used region.
duke@435 93 jbyte* start_card = byte_for(used.start());
duke@435 94 jbyte* end_card = byte_after(used.last());
duke@435 95 uintptr_t start_chunk = addr_to_chunk_index(used.start());
duke@435 96 uintptr_t start_chunk_stride_num = start_chunk % n_strides;
duke@435 97 jbyte* chunk_card_start;
duke@435 98
duke@435 99 if ((uintptr_t)stride >= start_chunk_stride_num) {
duke@435 100 chunk_card_start = (jbyte*)(start_card +
duke@435 101 (stride - start_chunk_stride_num) *
duke@435 102 CardsPerStrideChunk);
duke@435 103 } else {
duke@435 104 // Go ahead to the next chunk group boundary, then to the requested stride.
duke@435 105 chunk_card_start = (jbyte*)(start_card +
duke@435 106 (n_strides - start_chunk_stride_num + stride) *
duke@435 107 CardsPerStrideChunk);
duke@435 108 }
duke@435 109
duke@435 110 while (chunk_card_start < end_card) {
duke@435 111 // We don't have to go downwards here; it wouldn't help anyway,
duke@435 112 // because of parallelism. (We take care with "min_done"; see below.)
duke@435 113 // Invariant: chunk_mr should be fully contained within the "used" region.
duke@435 114 jbyte* chunk_card_end = chunk_card_start + CardsPerStrideChunk;
duke@435 115 MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
duke@435 116 chunk_card_end >= end_card ?
duke@435 117 used.end() : addr_for(chunk_card_end));
duke@435 118 assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
duke@435 119 assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
duke@435 120
duke@435 121 // Process the chunk.
duke@435 122 process_chunk_boundaries(sp,
duke@435 123 dcto_cl,
duke@435 124 chunk_mr,
duke@435 125 used,
duke@435 126 lowest_non_clean,
duke@435 127 lowest_non_clean_base_chunk_index,
duke@435 128 lowest_non_clean_chunk_size);
duke@435 129
ysr@2788 130 non_clean_card_iterate_work(chunk_mr, cl);
duke@435 131
duke@435 132 // Find the next chunk of the stride.
duke@435 133 chunk_card_start += CardsPerStrideChunk * n_strides;
duke@435 134 }
duke@435 135 }
duke@435 136
duke@435 137 void
duke@435 138 CardTableModRefBS::
duke@435 139 process_chunk_boundaries(Space* sp,
duke@435 140 DirtyCardToOopClosure* dcto_cl,
duke@435 141 MemRegion chunk_mr,
duke@435 142 MemRegion used,
duke@435 143 jbyte** lowest_non_clean,
duke@435 144 uintptr_t lowest_non_clean_base_chunk_index,
duke@435 145 size_t lowest_non_clean_chunk_size)
duke@435 146 {
duke@435 147 // We must worry about the chunk boundaries.
duke@435 148
duke@435 149 // First, set our max_to_do:
duke@435 150 HeapWord* max_to_do = NULL;
duke@435 151 uintptr_t cur_chunk_index = addr_to_chunk_index(chunk_mr.start());
duke@435 152 cur_chunk_index = cur_chunk_index - lowest_non_clean_base_chunk_index;
duke@435 153
duke@435 154 if (chunk_mr.end() < used.end()) {
duke@435 155 // This is not the last chunk in the used region. What is the last
duke@435 156 // object?
duke@435 157 HeapWord* last_block = sp->block_start(chunk_mr.end());
duke@435 158 assert(last_block <= chunk_mr.end(), "In case this property changes.");
duke@435 159 if (last_block == chunk_mr.end()
duke@435 160 || !sp->block_is_obj(last_block)) {
duke@435 161 max_to_do = chunk_mr.end();
duke@435 162
duke@435 163 } else {
duke@435 164 // It is an object and starts before the end of the current chunk.
duke@435 165 // last_obj_card is the card corresponding to the start of the last object
duke@435 166 // in the chunk. Note that the last object may not start in
duke@435 167 // the chunk.
duke@435 168 jbyte* last_obj_card = byte_for(last_block);
duke@435 169 if (!card_may_have_been_dirty(*last_obj_card)) {
duke@435 170 // The card containing the head is not dirty. Any marks in
duke@435 171 // subsequent cards still in this chunk must have been made
duke@435 172 // precisely; we can cap processing at the end.
duke@435 173 max_to_do = chunk_mr.end();
duke@435 174 } else {
duke@435 175 // The last object must be considered dirty, and extends onto the
duke@435 176 // following chunk. Look for a dirty card in that chunk that will
duke@435 177 // bound our processing.
duke@435 178 jbyte* limit_card = NULL;
duke@435 179 size_t last_block_size = sp->block_size(last_block);
duke@435 180 jbyte* last_card_of_last_obj =
duke@435 181 byte_for(last_block + last_block_size - 1);
duke@435 182 jbyte* first_card_of_next_chunk = byte_for(chunk_mr.end());
duke@435 183 // This search potentially goes a long distance looking
duke@435 184 // for the next card that will be scanned. For example,
duke@435 185 // an object that is an array of primitives will not
duke@435 186 // have any cards covering regions interior to the array
duke@435 187 // that will need to be scanned. The scan can be terminated
duke@435 188 // at the last card of the next chunk. That would leave
duke@435 189 // limit_card as NULL and would result in "max_to_do"
duke@435 190 // being set with the LNC value or with the end
duke@435 191 // of the last block.
duke@435 192 jbyte* last_card_of_next_chunk = first_card_of_next_chunk +
duke@435 193 CardsPerStrideChunk;
duke@435 194 assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start())
duke@435 195 == CardsPerStrideChunk, "last card of next chunk may be wrong");
duke@435 196 jbyte* last_card_to_check = (jbyte*) MIN2(last_card_of_last_obj,
duke@435 197 last_card_of_next_chunk);
duke@435 198 for (jbyte* cur = first_card_of_next_chunk;
duke@435 199 cur <= last_card_to_check; cur++) {
duke@435 200 if (card_will_be_scanned(*cur)) {
duke@435 201 limit_card = cur; break;
duke@435 202 }
duke@435 203 }
duke@435 204 assert(0 <= cur_chunk_index+1 &&
duke@435 205 cur_chunk_index+1 < lowest_non_clean_chunk_size,
duke@435 206 "Bounds error.");
duke@435 207 // LNC for the next chunk
duke@435 208 jbyte* lnc_card = lowest_non_clean[cur_chunk_index+1];
duke@435 209 if (limit_card == NULL) {
duke@435 210 limit_card = lnc_card;
duke@435 211 }
duke@435 212 if (limit_card != NULL) {
duke@435 213 if (lnc_card != NULL) {
duke@435 214 limit_card = (jbyte*)MIN2((intptr_t)limit_card,
duke@435 215 (intptr_t)lnc_card);
duke@435 216 }
duke@435 217 max_to_do = addr_for(limit_card);
duke@435 218 } else {
duke@435 219 max_to_do = last_block + last_block_size;
duke@435 220 }
duke@435 221 }
duke@435 222 }
duke@435 223 assert(max_to_do != NULL, "OOPS!");
duke@435 224 } else {
duke@435 225 max_to_do = used.end();
duke@435 226 }
duke@435 227 // Now we can set the closure we're using so it doesn't to beyond
duke@435 228 // max_to_do.
duke@435 229 dcto_cl->set_min_done(max_to_do);
duke@435 230 #ifndef PRODUCT
duke@435 231 dcto_cl->set_last_bottom(max_to_do);
duke@435 232 #endif
duke@435 233
duke@435 234 // Now we set *our" lowest_non_clean entry.
duke@435 235 // Find the object that spans our boundary, if one exists.
duke@435 236 // Nothing to do on the first chunk.
duke@435 237 if (chunk_mr.start() > used.start()) {
duke@435 238 // first_block is the block possibly spanning the chunk start
duke@435 239 HeapWord* first_block = sp->block_start(chunk_mr.start());
duke@435 240 // Does the block span the start of the chunk and is it
duke@435 241 // an object?
duke@435 242 if (first_block < chunk_mr.start() &&
duke@435 243 sp->block_is_obj(first_block)) {
duke@435 244 jbyte* first_dirty_card = NULL;
duke@435 245 jbyte* last_card_of_first_obj =
duke@435 246 byte_for(first_block + sp->block_size(first_block) - 1);
duke@435 247 jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
duke@435 248 jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());
duke@435 249 jbyte* last_card_to_check =
duke@435 250 (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
duke@435 251 (intptr_t) last_card_of_first_obj);
duke@435 252 for (jbyte* cur = first_card_of_cur_chunk;
duke@435 253 cur <= last_card_to_check; cur++) {
duke@435 254 if (card_will_be_scanned(*cur)) {
duke@435 255 first_dirty_card = cur; break;
duke@435 256 }
duke@435 257 }
duke@435 258 if (first_dirty_card != NULL) {
duke@435 259 assert(0 <= cur_chunk_index &&
duke@435 260 cur_chunk_index < lowest_non_clean_chunk_size,
duke@435 261 "Bounds error.");
duke@435 262 lowest_non_clean[cur_chunk_index] = first_dirty_card;
duke@435 263 }
duke@435 264 }
duke@435 265 }
duke@435 266 }
duke@435 267
duke@435 268 void
duke@435 269 CardTableModRefBS::
duke@435 270 get_LNC_array_for_space(Space* sp,
duke@435 271 jbyte**& lowest_non_clean,
duke@435 272 uintptr_t& lowest_non_clean_base_chunk_index,
duke@435 273 size_t& lowest_non_clean_chunk_size) {
duke@435 274
duke@435 275 int i = find_covering_region_containing(sp->bottom());
duke@435 276 MemRegion covered = _covered[i];
duke@435 277 size_t n_chunks = chunks_to_cover(covered);
duke@435 278
duke@435 279 // Only the first thread to obtain the lock will resize the
duke@435 280 // LNC array for the covered region. Any later expansion can't affect
duke@435 281 // the used_at_save_marks region.
duke@435 282 // (I observed a bug in which the first thread to execute this would
duke@435 283 // resize, and then it would cause "expand_and_allocates" that would
duke@435 284 // Increase the number of chunks in the covered region. Then a second
duke@435 285 // thread would come and execute this, see that the size didn't match,
duke@435 286 // and free and allocate again. So the first thread would be using a
duke@435 287 // freed "_lowest_non_clean" array.)
duke@435 288
duke@435 289 // Do a dirty read here. If we pass the conditional then take the rare
duke@435 290 // event lock and do the read again in case some other thread had already
duke@435 291 // succeeded and done the resize.
duke@435 292 int cur_collection = Universe::heap()->total_collections();
duke@435 293 if (_last_LNC_resizing_collection[i] != cur_collection) {
duke@435 294 MutexLocker x(ParGCRareEvent_lock);
duke@435 295 if (_last_LNC_resizing_collection[i] != cur_collection) {
duke@435 296 if (_lowest_non_clean[i] == NULL ||
duke@435 297 n_chunks != _lowest_non_clean_chunk_size[i]) {
duke@435 298
duke@435 299 // Should we delete the old?
duke@435 300 if (_lowest_non_clean[i] != NULL) {
duke@435 301 assert(n_chunks != _lowest_non_clean_chunk_size[i],
duke@435 302 "logical consequence");
duke@435 303 FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
duke@435 304 _lowest_non_clean[i] = NULL;
duke@435 305 }
duke@435 306 // Now allocate a new one if necessary.
duke@435 307 if (_lowest_non_clean[i] == NULL) {
duke@435 308 _lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks);
duke@435 309 _lowest_non_clean_chunk_size[i] = n_chunks;
duke@435 310 _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
duke@435 311 for (int j = 0; j < (int)n_chunks; j++)
duke@435 312 _lowest_non_clean[i][j] = NULL;
duke@435 313 }
duke@435 314 }
duke@435 315 _last_LNC_resizing_collection[i] = cur_collection;
duke@435 316 }
duke@435 317 }
duke@435 318 // In any case, now do the initialization.
duke@435 319 lowest_non_clean = _lowest_non_clean[i];
duke@435 320 lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
duke@435 321 lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i];
duke@435 322 }

mercurial