1.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Mon Aug 04 15:04:45 2014 +0200 1.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Wed Aug 06 09:55:16 2014 +0200 1.3 @@ -94,26 +94,37 @@ 1.4 inline bool 1.5 HeapRegion::block_is_obj(const HeapWord* p) const { 1.6 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1.7 - return !g1h->is_obj_dead(oop(p), this); 1.8 + if (ClassUnloadingWithConcurrentMark) { 1.9 + return !g1h->is_obj_dead(oop(p), this); 1.10 + } 1.11 + return p < top(); 1.12 } 1.13 1.14 inline size_t 1.15 HeapRegion::block_size(const HeapWord *addr) const { 1.16 + if (addr == top()) { 1.17 + return pointer_delta(end(), addr); 1.18 + } 1.19 + 1.20 + if (block_is_obj(addr)) { 1.21 + return oop(addr)->size(); 1.22 + } 1.23 + 1.24 + assert(ClassUnloadingWithConcurrentMark, 1.25 + err_msg("All blocks should be objects if G1 Class Unloading isn't used. " 1.26 + "HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") " 1.27 + "addr: " PTR_FORMAT, 1.28 + p2i(bottom()), p2i(top()), p2i(end()), p2i(addr))); 1.29 + 1.30 // Old regions' dead objects may have dead classes 1.31 // We need to find the next live object in some other 1.32 // manner than getting the oop size 1.33 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1.34 - if (g1h->is_obj_dead(oop(addr), this)) { 1.35 - HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()-> 1.36 - getNextMarkedWordAddress(addr, prev_top_at_mark_start()); 1.37 + HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()-> 1.38 + getNextMarkedWordAddress(addr, prev_top_at_mark_start()); 1.39 1.40 - assert(next > addr, "must get the next live object"); 1.41 - 1.42 - return pointer_delta(next, addr); 1.43 - } else if (addr == top()) { 1.44 - return pointer_delta(end(), addr); 1.45 - } 1.46 - return oop(addr)->size(); 1.47 + assert(next > addr, "must get the next live object"); 1.48 + return pointer_delta(next, addr); 1.49 } 1.50 1.51 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {