src/share/vm/memory/cardTableModRefBS.cpp

changeset 643
35ca13d63fe8
parent 548
ba764ed4b6f2
child 670
9c2ecc2ffb12
child 781
bb254e57d2f4
equal deleted inserted replaced
625:d1635bf93939 643:35ca13d63fe8
194 void CardTableModRefBS::resize_covered_region(MemRegion new_region) { 194 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
195 // We don't change the start of a region, only the end. 195 // We don't change the start of a region, only the end.
196 assert(_whole_heap.contains(new_region), 196 assert(_whole_heap.contains(new_region),
197 "attempt to cover area not in reserved area"); 197 "attempt to cover area not in reserved area");
198 debug_only(verify_guard();) 198 debug_only(verify_guard();)
199 // collided is true if the expansion would push into another committed region
200 debug_only(bool collided = false;)
199 int const ind = find_covering_region_by_base(new_region.start()); 201 int const ind = find_covering_region_by_base(new_region.start());
200 MemRegion const old_region = _covered[ind]; 202 MemRegion const old_region = _covered[ind];
201 assert(old_region.start() == new_region.start(), "just checking"); 203 assert(old_region.start() == new_region.start(), "just checking");
202 if (new_region.word_size() != old_region.word_size()) { 204 if (new_region.word_size() != old_region.word_size()) {
203 // Commit new or uncommit old pages, if necessary. 205 // Commit new or uncommit old pages, if necessary.
209 if (max_prev_end > cur_committed.end()) { 211 if (max_prev_end > cur_committed.end()) {
210 cur_committed.set_end(max_prev_end); 212 cur_committed.set_end(max_prev_end);
211 } 213 }
212 // Align the end up to a page size (starts are already aligned). 214 // Align the end up to a page size (starts are already aligned).
213 jbyte* const new_end = byte_after(new_region.last()); 215 jbyte* const new_end = byte_after(new_region.last());
214 HeapWord* const new_end_aligned = 216 HeapWord* new_end_aligned =
215 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); 217 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
216 assert(new_end_aligned >= (HeapWord*) new_end, 218 assert(new_end_aligned >= (HeapWord*) new_end,
217 "align up, but less"); 219 "align up, but less");
220 int ri = 0;
221 for (ri = 0; ri < _cur_covered_regions; ri++) {
222 if (ri != ind) {
223 if (_committed[ri].contains(new_end_aligned)) {
224 assert((new_end_aligned >= _committed[ri].start()) &&
225 (_committed[ri].start() > _committed[ind].start()),
226 "New end of committed region is inconsistent");
227 new_end_aligned = _committed[ri].start();
228 assert(new_end_aligned > _committed[ind].start(),
229 "New end of committed region is before start");
230 debug_only(collided = true;)
231 // Should only collide with 1 region
232 break;
233 }
234 }
235 }
236 #ifdef ASSERT
237 for (++ri; ri < _cur_covered_regions; ri++) {
238 assert(!_committed[ri].contains(new_end_aligned),
239 "New end of committed region is in a second committed region");
240 }
241 #endif
218 // The guard page is always committed and should not be committed over. 242 // The guard page is always committed and should not be committed over.
219 HeapWord* const new_end_for_commit = MIN2(new_end_aligned, _guard_region.start()); 243 HeapWord* const new_end_for_commit = MIN2(new_end_aligned,
244 _guard_region.start());
245
220 if (new_end_for_commit > cur_committed.end()) { 246 if (new_end_for_commit > cur_committed.end()) {
221 // Must commit new pages. 247 // Must commit new pages.
222 MemRegion const new_committed = 248 MemRegion const new_committed =
223 MemRegion(cur_committed.end(), new_end_for_commit); 249 MemRegion(cur_committed.end(), new_end_for_commit);
224 250
237 committed_unique_to_self(ind, MemRegion(new_end_aligned, 263 committed_unique_to_self(ind, MemRegion(new_end_aligned,
238 cur_committed.end())); 264 cur_committed.end()));
239 if (!uncommit_region.is_empty()) { 265 if (!uncommit_region.is_empty()) {
240 if (!os::uncommit_memory((char*)uncommit_region.start(), 266 if (!os::uncommit_memory((char*)uncommit_region.start(),
241 uncommit_region.byte_size())) { 267 uncommit_region.byte_size())) {
242 // Do better than this for Merlin 268 assert(false, "Card table contraction failed");
243 vm_exit_out_of_memory(uncommit_region.byte_size(), 269 // The call failed so don't change the end of the
244 "card table contraction"); 270 // committed region. This is better than taking the
271 // VM down.
272 new_end_aligned = _committed[ind].end();
245 } 273 }
246 } 274 }
247 } 275 }
248 // In any case, we can reset the end of the current committed entry. 276 // In any case, we can reset the end of the current committed entry.
249 _committed[ind].set_end(new_end_aligned); 277 _committed[ind].set_end(new_end_aligned);
255 } else { 283 } else {
256 entry = byte_after(old_region.last()); 284 entry = byte_after(old_region.last());
257 } 285 }
258 assert(index_for(new_region.last()) < (int) _guard_index, 286 assert(index_for(new_region.last()) < (int) _guard_index,
259 "The guard card will be overwritten"); 287 "The guard card will be overwritten");
260 jbyte* const end = byte_after(new_region.last()); 288 // This line commented out cleans the newly expanded region and
289 // not the aligned up expanded region.
290 // jbyte* const end = byte_after(new_region.last());
291 jbyte* const end = (jbyte*) new_end_for_commit;
292 assert((end >= byte_after(new_region.last())) || collided,
293 "Expect to be beyond new region unless impacting another region");
261 // do nothing if we resized downward. 294 // do nothing if we resized downward.
295 #ifdef ASSERT
296 for (int ri = 0; ri < _cur_covered_regions; ri++) {
297 if (ri != ind) {
298 // The end of the new committed region should not
299 // be in any existing region unless it matches
300 // the start of the next region.
301 assert(!_committed[ri].contains(end) ||
302 (_committed[ri].start() == (HeapWord*) end),
303 "Overlapping committed regions");
304 }
305 }
306 #endif
262 if (entry < end) { 307 if (entry < end) {
263 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); 308 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
264 } 309 }
265 } 310 }
266 // In any case, the covered size changes. 311 // In any case, the covered size changes.

mercurial