42 // considerations into account. |
42 // considerations into account. |
43 |
43 |
44 class Generation; |
44 class Generation; |
45 class OopsInGenClosure; |
45 class OopsInGenClosure; |
46 class DirtyCardToOopClosure; |
46 class DirtyCardToOopClosure; |
|
47 class ClearNoncleanCardWrapper; |
47 |
48 |
48 class CardTableModRefBS: public ModRefBarrierSet { |
49 class CardTableModRefBS: public ModRefBarrierSet { |
49 // Some classes get to look at some private stuff. |
50 // Some classes get to look at some private stuff. |
50 friend class BytecodeInterpreter; |
51 friend class BytecodeInterpreter; |
51 friend class VMStructs; |
52 friend class VMStructs; |
163 return byte_for(p) + 1; |
164 return byte_for(p) + 1; |
164 } |
165 } |
165 |
166 |
166 // Iterate over the portion of the card-table which covers the given |
167 // Iterate over the portion of the card-table which covers the given |
167 // region mr in the given space and apply cl to any dirty sub-regions |
168 // region mr in the given space and apply cl to any dirty sub-regions |
168 // of mr. cl and dcto_cl must either be the same closure or cl must |
169 // of mr. Dirty cards are _not_ cleared by the iterator method itself, |
169 // wrap dcto_cl. Both are required - neither may be NULL. Also, dcto_cl |
170 // but closures may arrange to do so on their own should they so wish. |
170 // may be modified. Note that this function will operate in a parallel |
171 void non_clean_card_iterate_serial(MemRegion mr, MemRegionClosure* cl); |
171 // mode if worker threads are available. |
172 |
172 void non_clean_card_iterate(Space* sp, MemRegion mr, |
173 // A variant of the above that will operate in a parallel mode if |
173 DirtyCardToOopClosure* dcto_cl, |
174 // worker threads are available, and clear the dirty cards as it |
174 MemRegionClosure* cl); |
175 // processes them. |
175 |
176 // ClearNoncleanCardWrapper cl must wrap the DirtyCardToOopClosure dcto_cl, |
176 // Utility function used to implement the other versions below. |
177 // which may itself be modified by the method. |
177 void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl); |
178 void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr, |
178 |
179 DirtyCardToOopClosure* dcto_cl, |
179 void par_non_clean_card_iterate_work(Space* sp, MemRegion mr, |
180 ClearNoncleanCardWrapper* cl); |
180 DirtyCardToOopClosure* dcto_cl, |
181 |
181 MemRegionClosure* cl, |
182 private: |
182 int n_threads); |
183 // Work method used to implement non_clean_card_iterate_possibly_parallel() |
183 |
184 // above in the parallel case. |
|
185 void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, |
|
186 DirtyCardToOopClosure* dcto_cl, |
|
187 ClearNoncleanCardWrapper* cl, |
|
188 int n_threads); |
|
189 |
|
190 protected: |
184 // Dirty the bytes corresponding to "mr" (not all of which must be |
191 // Dirty the bytes corresponding to "mr" (not all of which must be |
185 // covered.) |
192 // covered.) |
186 void dirty_MemRegion(MemRegion mr); |
193 void dirty_MemRegion(MemRegion mr); |
187 |
194 |
188 // Clear (to clean_card) the bytes entirely contained within "mr" (not |
195 // Clear (to clean_card) the bytes entirely contained within "mr" (not |
235 // to the cards in the stride (of n_strides) within the given space. |
242 // to the cards in the stride (of n_strides) within the given space. |
236 void process_stride(Space* sp, |
243 void process_stride(Space* sp, |
237 MemRegion used, |
244 MemRegion used, |
238 jint stride, int n_strides, |
245 jint stride, int n_strides, |
239 DirtyCardToOopClosure* dcto_cl, |
246 DirtyCardToOopClosure* dcto_cl, |
240 MemRegionClosure* cl, |
247 ClearNoncleanCardWrapper* cl, |
241 jbyte** lowest_non_clean, |
248 jbyte** lowest_non_clean, |
242 uintptr_t lowest_non_clean_base_chunk_index, |
249 uintptr_t lowest_non_clean_base_chunk_index, |
243 size_t lowest_non_clean_chunk_size); |
250 size_t lowest_non_clean_chunk_size); |
244 |
251 |
245 // Makes sure that chunk boundaries are handled appropriately, by |
252 // Makes sure that chunk boundaries are handled appropriately, by |
407 // region. The regions are non-overlapping, and are visited in |
414 // region. The regions are non-overlapping, and are visited in |
408 // *decreasing* address order. (This order aids with imprecise card |
415 // *decreasing* address order. (This order aids with imprecise card |
409 // marking, where a dirty card may cause scanning, and summarization |
416 // marking, where a dirty card may cause scanning, and summarization |
410 // marking, of objects that extend onto subsequent cards.) |
417 // marking, of objects that extend onto subsequent cards.) |
411 void mod_card_iterate(MemRegionClosure* cl) { |
418 void mod_card_iterate(MemRegionClosure* cl) { |
412 non_clean_card_iterate_work(_whole_heap, cl); |
419 non_clean_card_iterate_serial(_whole_heap, cl); |
413 } |
420 } |
414 |
421 |
415 // Like the "mod_cards_iterate" above, except only invokes the closure |
422 // Like the "mod_cards_iterate" above, except only invokes the closure |
416 // for cards within the MemRegion "mr" (which is required to be |
423 // for cards within the MemRegion "mr" (which is required to be |
417 // card-aligned and sized.) |
424 // card-aligned and sized.) |
418 void mod_card_iterate(MemRegion mr, MemRegionClosure* cl) { |
425 void mod_card_iterate(MemRegion mr, MemRegionClosure* cl) { |
419 non_clean_card_iterate_work(mr, cl); |
426 non_clean_card_iterate_serial(mr, cl); |
420 } |
427 } |
421 |
428 |
422 static uintx ct_max_alignment_constraint(); |
429 static uintx ct_max_alignment_constraint(); |
423 |
430 |
424 // Apply closure "cl" to the dirty cards containing some part of |
431 // Apply closure "cl" to the dirty cards containing some part of |