42 |
42 |
43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and |
43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and |
44 // enumerate ref fields that have been modified (since the last |
44 // enumerate ref fields that have been modified (since the last |
45 // enumeration.) |
45 // enumeration.) |
46 |
46 |
47 size_t CardTableModRefBS::cards_required(size_t covered_words) |
|
48 { |
|
49 // Add one for a guard card, used to detect errors. |
|
50 const size_t words = align_size_up(covered_words, card_size_in_words); |
|
51 return words / card_size_in_words + 1; |
|
52 } |
|
53 |
|
54 size_t CardTableModRefBS::compute_byte_map_size() |
47 size_t CardTableModRefBS::compute_byte_map_size() |
55 { |
48 { |
56 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, |
49 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, |
57 "unitialized, check declaration order"); |
50 "unitialized, check declaration order"); |
58 assert(_page_size != 0, "unitialized, check declaration order"); |
51 assert(_page_size != 0, "unitialized, check declaration order"); |
62 |
55 |
63 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, |
56 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, |
64 int max_covered_regions): |
57 int max_covered_regions): |
65 ModRefBarrierSet(max_covered_regions), |
58 ModRefBarrierSet(max_covered_regions), |
66 _whole_heap(whole_heap), |
59 _whole_heap(whole_heap), |
67 _guard_index(cards_required(whole_heap.word_size()) - 1), |
60 _guard_index(0), |
68 _last_valid_index(_guard_index - 1), |
61 _guard_region(), |
|
62 _last_valid_index(0), |
69 _page_size(os::vm_page_size()), |
63 _page_size(os::vm_page_size()), |
70 _byte_map_size(compute_byte_map_size()) |
64 _byte_map_size(0), |
|
65 _covered(NULL), |
|
66 _committed(NULL), |
|
67 _cur_covered_regions(0), |
|
68 _byte_map(NULL), |
|
69 byte_map_base(NULL), |
|
70 // LNC functionality |
|
71 _lowest_non_clean(NULL), |
|
72 _lowest_non_clean_chunk_size(NULL), |
|
73 _lowest_non_clean_base_chunk_index(NULL), |
|
74 _last_LNC_resizing_collection(NULL) |
71 { |
75 { |
72 _kind = BarrierSet::CardTableModRef; |
76 _kind = BarrierSet::CardTableModRef; |
73 |
77 |
|
78 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary"); |
|
79 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary"); |
|
80 |
|
81 assert(card_size <= 512, "card_size must be less than 512"); // why? |
|
82 |
|
83 _covered = new MemRegion[_max_covered_regions]; |
|
84 if (_covered == NULL) { |
|
85 vm_exit_during_initialization("Could not allocate card table covered region set."); |
|
86 } |
|
87 } |
|
88 |
|
89 void CardTableModRefBS::initialize() { |
|
90 _guard_index = cards_required(_whole_heap.word_size()) - 1; |
|
91 _last_valid_index = _guard_index - 1; |
|
92 |
|
93 _byte_map_size = compute_byte_map_size(); |
|
94 |
74 HeapWord* low_bound = _whole_heap.start(); |
95 HeapWord* low_bound = _whole_heap.start(); |
75 HeapWord* high_bound = _whole_heap.end(); |
96 HeapWord* high_bound = _whole_heap.end(); |
76 assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary"); |
|
77 assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary"); |
|
78 |
|
79 assert(card_size <= 512, "card_size must be less than 512"); // why? |
|
80 |
|
81 _covered = new MemRegion[max_covered_regions]; |
|
82 _committed = new MemRegion[max_covered_regions]; |
|
83 if (_covered == NULL || _committed == NULL) { |
|
84 vm_exit_during_initialization("couldn't alloc card table covered region set."); |
|
85 } |
|
86 |
97 |
87 _cur_covered_regions = 0; |
98 _cur_covered_regions = 0; |
|
99 _committed = new MemRegion[_max_covered_regions]; |
|
100 if (_committed == NULL) { |
|
101 vm_exit_during_initialization("Could not allocate card table committed region set."); |
|
102 } |
|
103 |
88 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : |
104 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : |
89 MAX2(_page_size, (size_t) os::vm_allocation_granularity()); |
105 MAX2(_page_size, (size_t) os::vm_allocation_granularity()); |
90 ReservedSpace heap_rs(_byte_map_size, rs_align, false); |
106 ReservedSpace heap_rs(_byte_map_size, rs_align, false); |
91 |
107 |
92 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); |
108 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); |
112 _guard_region = MemRegion((HeapWord*)guard_page, _page_size); |
128 _guard_region = MemRegion((HeapWord*)guard_page, _page_size); |
113 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, |
129 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, |
114 !ExecMem, "card table last card"); |
130 !ExecMem, "card table last card"); |
115 *guard_card = last_card; |
131 *guard_card = last_card; |
116 |
132 |
117 _lowest_non_clean = |
133 _lowest_non_clean = |
118 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC); |
134 NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC); |
119 _lowest_non_clean_chunk_size = |
135 _lowest_non_clean_chunk_size = |
120 NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC); |
136 NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC); |
121 _lowest_non_clean_base_chunk_index = |
137 _lowest_non_clean_base_chunk_index = |
122 NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC); |
138 NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC); |
123 _last_LNC_resizing_collection = |
139 _last_LNC_resizing_collection = |
124 NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC); |
140 NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC); |
125 if (_lowest_non_clean == NULL |
141 if (_lowest_non_clean == NULL |
126 || _lowest_non_clean_chunk_size == NULL |
142 || _lowest_non_clean_chunk_size == NULL |
127 || _lowest_non_clean_base_chunk_index == NULL |
143 || _lowest_non_clean_base_chunk_index == NULL |
128 || _last_LNC_resizing_collection == NULL) |
144 || _last_LNC_resizing_collection == NULL) |
129 vm_exit_during_initialization("couldn't allocate an LNC array."); |
145 vm_exit_during_initialization("couldn't allocate an LNC array."); |
130 for (int i = 0; i < max_covered_regions; i++) { |
146 for (int i = 0; i < _max_covered_regions; i++) { |
131 _lowest_non_clean[i] = NULL; |
147 _lowest_non_clean[i] = NULL; |
132 _lowest_non_clean_chunk_size[i] = 0; |
148 _lowest_non_clean_chunk_size[i] = 0; |
133 _last_LNC_resizing_collection[i] = -1; |
149 _last_LNC_resizing_collection[i] = -1; |
134 } |
150 } |
135 |
151 |
648 #ifndef PRODUCT |
664 #ifndef PRODUCT |
649 void CardTableModRefBS::verify_region(MemRegion mr, |
665 void CardTableModRefBS::verify_region(MemRegion mr, |
650 jbyte val, bool val_equals) { |
666 jbyte val, bool val_equals) { |
651 jbyte* start = byte_for(mr.start()); |
667 jbyte* start = byte_for(mr.start()); |
652 jbyte* end = byte_for(mr.last()); |
668 jbyte* end = byte_for(mr.last()); |
653 bool failures = false; |
669 bool failures = false; |
654 for (jbyte* curr = start; curr <= end; ++curr) { |
670 for (jbyte* curr = start; curr <= end; ++curr) { |
655 jbyte curr_val = *curr; |
671 jbyte curr_val = *curr; |
656 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); |
672 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); |
657 if (failed) { |
673 if (failed) { |
658 if (!failures) { |
674 if (!failures) { |