Thu, 12 Jan 2012 00:06:47 -0800
6484965: G1: piggy-back liveness accounting phase on marking
Summary: Remove the separate counting phase of concurrent marking by tracking the amount of marked bytes and the cards spanned by marked objects in marking task/worker thread local data structures, which are updated as individual objects are marked.
Reviewed-by: brutisso, tonyp
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
28 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
29 HeapWord* res = ContiguousSpace::allocate(size);
30 if (res != NULL) {
31 _offsets.alloc_block(res, size);
32 }
33 return res;
34 }
36 // Because of the requirement of keeping "_offsets" up to date with the
37 // allocations, we sequentialize these with a lock. Therefore, best if
38 // this is used for larger LAB allocations only.
39 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
40 MutexLocker x(&_par_alloc_lock);
41 // Given that we take the lock no need to use par_allocate() here.
42 HeapWord* res = ContiguousSpace::allocate(size);
43 if (res != NULL) {
44 _offsets.alloc_block(res, size);
45 }
46 return res;
47 }
49 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
50 return _offsets.block_start(p);
51 }
53 inline HeapWord*
54 G1OffsetTableContigSpace::block_start_const(const void* p) const {
55 return _offsets.block_start_const(p);
56 }
58 inline void HeapRegion::note_start_of_marking() {
59 init_top_at_conc_mark_count();
60 _next_marked_bytes = 0;
61 _next_top_at_mark_start = top();
62 }
64 inline void HeapRegion::note_end_of_marking() {
65 _prev_top_at_mark_start = _next_top_at_mark_start;
66 _prev_marked_bytes = _next_marked_bytes;
67 _next_marked_bytes = 0;
69 assert(_prev_marked_bytes <=
70 (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
71 HeapWordSize, "invariant");
72 }
74 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
75 if (during_initial_mark) {
76 if (is_survivor()) {
77 assert(false, "should not allocate survivors during IM");
78 } else {
79 // During initial-mark we'll explicitly mark any objects on old
80 // regions that are pointed to by roots. Given that explicit
81 // marks only make sense under NTAMS it'd be nice if we could
82 // check that condition if we wanted to. Given that we don't
83 // know where the top of this region will end up, we simply set
84 // NTAMS to the end of the region so all marks will be below
85 // NTAMS. We'll set it to the actual top when we retire this region.
86 _next_top_at_mark_start = end();
87 }
88 } else {
89 if (is_survivor()) {
90 // This is how we always allocate survivors.
91 assert(_next_top_at_mark_start == bottom(), "invariant");
92 } else {
93 // We could have re-used this old region as to-space over a
94 // couple of GCs since the start of the concurrent marking
95 // cycle. This means that [bottom,NTAMS) will contain objects
96 // copied up to and including initial-mark and [NTAMS, top)
97 // will contain objects copied during the concurrent marking cycle.
98 assert(top() >= _next_top_at_mark_start, "invariant");
99 }
100 }
101 }
103 inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
104 if (during_initial_mark) {
105 if (is_survivor()) {
106 assert(false, "should not allocate survivors during IM");
107 } else {
108 // See the comment for note_start_of_copying() for the details
109 // on this.
110 assert(_next_top_at_mark_start == end(), "pre-condition");
111 _next_top_at_mark_start = top();
112 }
113 } else {
114 if (is_survivor()) {
115 // This is how we always allocate survivors.
116 assert(_next_top_at_mark_start == bottom(), "invariant");
117 } else {
118 // See the comment for note_start_of_copying() for the details
119 // on this.
120 assert(top() >= _next_top_at_mark_start, "invariant");
121 }
122 }
123 }
125 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP