|
1 /* |
|
2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP |
|
26 #define SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP |
|
27 |
|
28 #include "memory/memRegion.hpp" |
|
29 #include "runtime/virtualspace.hpp" |
|
30 #include "utilities/globalDefinitions.hpp" |
|
31 |
|
32 // The CollectedHeap type requires subtypes to implement a method |
|
33 // "block_start". For some subtypes, notably generational |
|
34 // systems using card-table-based write barriers, the efficiency of this |
|
35 // operation may be important. Implementations of the "BlockOffsetArray" |
|
36 // class may be useful in providing such efficient implementations. |
|
37 // |
|
38 // BlockOffsetTable (abstract) |
|
39 // - BlockOffsetArray (abstract) |
|
40 // - BlockOffsetArrayNonContigSpace |
|
41 // - BlockOffsetArrayContigSpace |
|
42 // |
|
43 |
|
44 class ContiguousSpace; |
|
45 |
|
46 ////////////////////////////////////////////////////////////////////////// |
|
47 // The BlockOffsetTable "interface" |
|
48 ////////////////////////////////////////////////////////////////////////// |
|
49 class BlockOffsetTable VALUE_OBJ_CLASS_SPEC { |
|
50 friend class VMStructs; |
|
51 protected: |
|
52 // These members describe the region covered by the table. |
|
53 |
|
54 // The space this table is covering. |
|
55 HeapWord* _bottom; // == reserved.start |
|
56 HeapWord* _end; // End of currently allocated region. |
|
57 |
|
58 public: |
|
59 // Initialize the table to cover the given space. |
|
60 // The contents of the initial table are undefined. |
|
61 BlockOffsetTable(HeapWord* bottom, HeapWord* end): |
|
62 _bottom(bottom), _end(end) { |
|
63 assert(_bottom <= _end, "arguments out of order"); |
|
64 } |
|
65 |
|
66 // Note that the committed size of the covered space may have changed, |
|
67 // so the table size might also wish to change. |
|
68 virtual void resize(size_t new_word_size) = 0; |
|
69 |
|
70 virtual void set_bottom(HeapWord* new_bottom) { |
|
71 assert(new_bottom <= _end, "new_bottom > _end"); |
|
72 _bottom = new_bottom; |
|
73 resize(pointer_delta(_end, _bottom)); |
|
74 } |
|
75 |
|
76 // Requires "addr" to be contained by a block, and returns the address of |
|
77 // the start of that block. |
|
78 virtual HeapWord* block_start_unsafe(const void* addr) const = 0; |
|
79 |
|
80 // Returns the address of the start of the block containing "addr", or |
|
81 // else "null" if it is covered by no block. |
|
82 HeapWord* block_start(const void* addr) const; |
|
83 }; |
|
84 |
|
85 ////////////////////////////////////////////////////////////////////////// |
|
86 // One implementation of "BlockOffsetTable," the BlockOffsetArray, |
|
87 // divides the covered region into "N"-word subregions (where |
|
88 // "N" = 2^"LogN". An array with an entry for each such subregion |
|
89 // indicates how far back one must go to find the start of the |
|
90 // chunk that includes the first word of the subregion. |
|
91 // |
|
92 // Each BlockOffsetArray is owned by a Space. However, the actual array |
|
93 // may be shared by several BlockOffsetArrays; this is useful |
|
94 // when a single resizable area (such as a generation) is divided up into |
|
95 // several spaces in which contiguous allocation takes place. (Consider, |
|
96 // for example, the garbage-first generation.) |
|
97 |
|
98 // Here is the shared array type. |
|
99 ////////////////////////////////////////////////////////////////////////// |
|
100 // BlockOffsetSharedArray |
|
101 ////////////////////////////////////////////////////////////////////////// |
|
102 class BlockOffsetSharedArray: public CHeapObj<mtGC> { |
|
103 friend class BlockOffsetArray; |
|
104 friend class BlockOffsetArrayNonContigSpace; |
|
105 friend class BlockOffsetArrayContigSpace; |
|
106 friend class VMStructs; |
|
107 |
|
108 private: |
|
109 enum SomePrivateConstants { |
|
110 LogN = 9, |
|
111 LogN_words = LogN - LogHeapWordSize, |
|
112 N_bytes = 1 << LogN, |
|
113 N_words = 1 << LogN_words |
|
114 }; |
|
115 |
|
116 bool _init_to_zero; |
|
117 |
|
118 // The reserved region covered by the shared array. |
|
119 MemRegion _reserved; |
|
120 |
|
121 // End of the current committed region. |
|
122 HeapWord* _end; |
|
123 |
|
124 // Array for keeping offsets for retrieving object start fast given an |
|
125 // address. |
|
126 VirtualSpace _vs; |
|
127 u_char* _offset_array; // byte array keeping backwards offsets |
|
128 |
|
129 protected: |
|
130 // Bounds checking accessors: |
|
131 // For performance these have to devolve to array accesses in product builds. |
|
132 u_char offset_array(size_t index) const { |
|
133 assert(index < _vs.committed_size(), "index out of range"); |
|
134 return _offset_array[index]; |
|
135 } |
|
136 // An assertion-checking helper method for the set_offset_array() methods below. |
|
137 void check_reducing_assertion(bool reducing); |
|
138 |
|
139 void set_offset_array(size_t index, u_char offset, bool reducing = false) { |
|
140 check_reducing_assertion(reducing); |
|
141 assert(index < _vs.committed_size(), "index out of range"); |
|
142 assert(!reducing || _offset_array[index] >= offset, "Not reducing"); |
|
143 _offset_array[index] = offset; |
|
144 } |
|
145 |
|
146 void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) { |
|
147 check_reducing_assertion(reducing); |
|
148 assert(index < _vs.committed_size(), "index out of range"); |
|
149 assert(high >= low, "addresses out of order"); |
|
150 assert(pointer_delta(high, low) <= N_words, "offset too large"); |
|
151 assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low), |
|
152 "Not reducing"); |
|
153 _offset_array[index] = (u_char)pointer_delta(high, low); |
|
154 } |
|
155 |
|
156 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) { |
|
157 check_reducing_assertion(reducing); |
|
158 assert(index_for(right - 1) < _vs.committed_size(), |
|
159 "right address out of range"); |
|
160 assert(left < right, "Heap addresses out of order"); |
|
161 size_t num_cards = pointer_delta(right, left) >> LogN_words; |
|
162 |
|
163 // Below, we may use an explicit loop instead of memset() |
|
164 // because on certain platforms memset() can give concurrent |
|
165 // readers "out-of-thin-air," phantom zeros; see 6948537. |
|
166 if (UseMemSetInBOT) { |
|
167 memset(&_offset_array[index_for(left)], offset, num_cards); |
|
168 } else { |
|
169 size_t i = index_for(left); |
|
170 const size_t end = i + num_cards; |
|
171 for (; i < end; i++) { |
|
172 // Elided until CR 6977974 is fixed properly. |
|
173 // assert(!reducing || _offset_array[i] >= offset, "Not reducing"); |
|
174 _offset_array[i] = offset; |
|
175 } |
|
176 } |
|
177 } |
|
178 |
|
179 void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) { |
|
180 check_reducing_assertion(reducing); |
|
181 assert(right < _vs.committed_size(), "right address out of range"); |
|
182 assert(left <= right, "indexes out of order"); |
|
183 size_t num_cards = right - left + 1; |
|
184 |
|
185 // Below, we may use an explicit loop instead of memset |
|
186 // because on certain platforms memset() can give concurrent |
|
187 // readers "out-of-thin-air," phantom zeros; see 6948537. |
|
188 if (UseMemSetInBOT) { |
|
189 memset(&_offset_array[left], offset, num_cards); |
|
190 } else { |
|
191 size_t i = left; |
|
192 const size_t end = i + num_cards; |
|
193 for (; i < end; i++) { |
|
194 // Elided until CR 6977974 is fixed properly. |
|
195 // assert(!reducing || _offset_array[i] >= offset, "Not reducing"); |
|
196 _offset_array[i] = offset; |
|
197 } |
|
198 } |
|
199 } |
|
200 |
|
201 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { |
|
202 assert(index < _vs.committed_size(), "index out of range"); |
|
203 assert(high >= low, "addresses out of order"); |
|
204 assert(pointer_delta(high, low) <= N_words, "offset too large"); |
|
205 assert(_offset_array[index] == pointer_delta(high, low), |
|
206 "Wrong offset"); |
|
207 } |
|
208 |
|
209 bool is_card_boundary(HeapWord* p) const; |
|
210 |
|
211 // Return the number of slots needed for an offset array |
|
212 // that covers mem_region_words words. |
|
213 // We always add an extra slot because if an object |
|
214 // ends on a card boundary we put a 0 in the next |
|
215 // offset array slot, so we want that slot always |
|
216 // to be reserved. |
|
217 |
|
218 size_t compute_size(size_t mem_region_words) { |
|
219 size_t number_of_slots = (mem_region_words / N_words) + 1; |
|
220 return ReservedSpace::allocation_align_size_up(number_of_slots); |
|
221 } |
|
222 |
|
223 public: |
|
224 // Initialize the table to cover from "base" to (at least) |
|
225 // "base + init_word_size". In the future, the table may be expanded |
|
226 // (see "resize" below) up to the size of "_reserved" (which must be at |
|
227 // least "init_word_size".) The contents of the initial table are |
|
228 // undefined; it is the responsibility of the constituent |
|
229 // BlockOffsetTable(s) to initialize cards. |
|
230 BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size); |
|
231 |
|
232 // Notes a change in the committed size of the region covered by the |
|
233 // table. The "new_word_size" may not be larger than the size of the |
|
234 // reserved region this table covers. |
|
235 void resize(size_t new_word_size); |
|
236 |
|
237 void set_bottom(HeapWord* new_bottom); |
|
238 |
|
239 // Whether entries should be initialized to zero. Used currently only for |
|
240 // error checking. |
|
241 void set_init_to_zero(bool val) { _init_to_zero = val; } |
|
242 bool init_to_zero() { return _init_to_zero; } |
|
243 |
|
244 // Updates all the BlockOffsetArray's sharing this shared array to |
|
245 // reflect the current "top"'s of their spaces. |
|
246 void update_offset_arrays(); // Not yet implemented! |
|
247 |
|
248 // Return the appropriate index into "_offset_array" for "p". |
|
249 size_t index_for(const void* p) const; |
|
250 |
|
251 // Return the address indicating the start of the region corresponding to |
|
252 // "index" in "_offset_array". |
|
253 HeapWord* address_for_index(size_t index) const; |
|
254 |
|
255 // Return the address "p" incremented by the size of |
|
256 // a region. This method does not align the address |
|
257 // returned to the start of a region. It is a simple |
|
258 // primitive. |
|
259 HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; } |
|
260 }; |
|
261 |
|
262 ////////////////////////////////////////////////////////////////////////// |
|
263 // The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray. |
|
264 ////////////////////////////////////////////////////////////////////////// |
|
265 class BlockOffsetArray: public BlockOffsetTable { |
|
266 friend class VMStructs; |
|
267 friend class G1BlockOffsetArray; // temp. until we restructure and cleanup |
|
268 protected: |
|
269 // The following enums are used by do_block_internal() below |
|
270 enum Action { |
|
271 Action_single, // BOT records a single block (see single_block()) |
|
272 Action_mark, // BOT marks the start of a block (see mark_block()) |
|
273 Action_check // Check that BOT records block correctly |
|
274 // (see verify_single_block()). |
|
275 }; |
|
276 |
|
277 enum SomePrivateConstants { |
|
278 N_words = BlockOffsetSharedArray::N_words, |
|
279 LogN = BlockOffsetSharedArray::LogN, |
|
280 // entries "e" of at least N_words mean "go back by Base^(e-N_words)." |
|
281 // All entries are less than "N_words + N_powers". |
|
282 LogBase = 4, |
|
283 Base = (1 << LogBase), |
|
284 N_powers = 14 |
|
285 }; |
|
286 |
|
287 static size_t power_to_cards_back(uint i) { |
|
288 return (size_t)1 << (LogBase * i); |
|
289 } |
|
290 static size_t power_to_words_back(uint i) { |
|
291 return power_to_cards_back(i) * N_words; |
|
292 } |
|
293 static size_t entry_to_cards_back(u_char entry) { |
|
294 assert(entry >= N_words, "Precondition"); |
|
295 return power_to_cards_back(entry - N_words); |
|
296 } |
|
297 static size_t entry_to_words_back(u_char entry) { |
|
298 assert(entry >= N_words, "Precondition"); |
|
299 return power_to_words_back(entry - N_words); |
|
300 } |
|
301 |
|
302 // The shared array, which is shared with other BlockOffsetArray's |
|
303 // corresponding to different spaces within a generation or span of |
|
304 // memory. |
|
305 BlockOffsetSharedArray* _array; |
|
306 |
|
307 // The space that owns this subregion. |
|
308 Space* _sp; |
|
309 |
|
310 // If true, array entries are initialized to 0; otherwise, they are |
|
311 // initialized to point backwards to the beginning of the covered region. |
|
312 bool _init_to_zero; |
|
313 |
|
314 // An assertion-checking helper method for the set_remainder*() methods below. |
|
315 void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); } |
|
316 |
|
317 // Sets the entries |
|
318 // corresponding to the cards starting at "start" and ending at "end" |
|
319 // to point back to the card before "start": the interval [start, end) |
|
320 // is right-open. The last parameter, reducing, indicates whether the |
|
321 // updates to individual entries always reduce the entry from a higher |
|
322 // to a lower value. (For example this would hold true during a temporal |
|
323 // regime during which only block splits were updating the BOT. |
|
324 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false); |
|
325 // Same as above, except that the args here are a card _index_ interval |
|
326 // that is closed: [start_index, end_index] |
|
327 void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false); |
|
328 |
|
329 // A helper function for BOT adjustment/verification work |
|
330 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action, bool reducing = false); |
|
331 |
|
332 public: |
|
333 // The space may not have its bottom and top set yet, which is why the |
|
334 // region is passed as a parameter. If "init_to_zero" is true, the |
|
335 // elements of the array are initialized to zero. Otherwise, they are |
|
336 // initialized to point backwards to the beginning. |
|
337 BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr, |
|
338 bool init_to_zero_); |
|
339 |
|
340 // Note: this ought to be part of the constructor, but that would require |
|
341 // "this" to be passed as a parameter to a member constructor for |
|
342 // the containing concrete subtype of Space. |
|
343 // This would be legal C++, but MS VC++ doesn't allow it. |
|
344 void set_space(Space* sp) { _sp = sp; } |
|
345 |
|
346 // Resets the covered region to the given "mr". |
|
347 void set_region(MemRegion mr) { |
|
348 _bottom = mr.start(); |
|
349 _end = mr.end(); |
|
350 } |
|
351 |
|
352 // Note that the committed size of the covered space may have changed, |
|
353 // so the table size might also wish to change. |
|
354 virtual void resize(size_t new_word_size) { |
|
355 HeapWord* new_end = _bottom + new_word_size; |
|
356 if (_end < new_end && !init_to_zero()) { |
|
357 // verify that the old and new boundaries are also card boundaries |
|
358 assert(_array->is_card_boundary(_end), |
|
359 "_end not a card boundary"); |
|
360 assert(_array->is_card_boundary(new_end), |
|
361 "new _end would not be a card boundary"); |
|
362 // set all the newly added cards |
|
363 _array->set_offset_array(_end, new_end, N_words); |
|
364 } |
|
365 _end = new_end; // update _end |
|
366 } |
|
367 |
|
368 // Adjust the BOT to show that it has a single block in the |
|
369 // range [blk_start, blk_start + size). All necessary BOT |
|
370 // cards are adjusted, but _unallocated_block isn't. |
|
371 void single_block(HeapWord* blk_start, HeapWord* blk_end); |
|
372 void single_block(HeapWord* blk, size_t size) { |
|
373 single_block(blk, blk + size); |
|
374 } |
|
375 |
|
376 // When the alloc_block() call returns, the block offset table should |
|
377 // have enough information such that any subsequent block_start() call |
|
378 // with an argument equal to an address that is within the range |
|
379 // [blk_start, blk_end) would return the value blk_start, provided |
|
380 // there have been no calls in between that reset this information |
|
381 // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call |
|
382 // for an appropriate range covering the said interval). |
|
383 // These methods expect to be called with [blk_start, blk_end) |
|
384 // representing a block of memory in the heap. |
|
385 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end); |
|
386 void alloc_block(HeapWord* blk, size_t size) { |
|
387 alloc_block(blk, blk + size); |
|
388 } |
|
389 |
|
390 // If true, initialize array slots with no allocated blocks to zero. |
|
391 // Otherwise, make them point back to the front. |
|
392 bool init_to_zero() { return _init_to_zero; } |
|
393 // Corresponding setter |
|
394 void set_init_to_zero(bool val) { |
|
395 _init_to_zero = val; |
|
396 assert(_array != NULL, "_array should be non-NULL"); |
|
397 _array->set_init_to_zero(val); |
|
398 } |
|
399 |
|
400 // Debugging |
|
401 // Return the index of the last entry in the "active" region. |
|
402 virtual size_t last_active_index() const = 0; |
|
403 // Verify the block offset table |
|
404 void verify() const; |
|
405 void check_all_cards(size_t left_card, size_t right_card) const; |
|
406 }; |
|
407 |
|
408 //////////////////////////////////////////////////////////////////////////// |
|
409 // A subtype of BlockOffsetArray that takes advantage of the fact |
|
410 // that its underlying space is a NonContiguousSpace, so that some |
|
411 // specialized interfaces can be made available for spaces that |
|
412 // manipulate the table. |
|
413 //////////////////////////////////////////////////////////////////////////// |
|
414 class BlockOffsetArrayNonContigSpace: public BlockOffsetArray { |
|
415 friend class VMStructs; |
|
416 private: |
|
417 // The portion [_unallocated_block, _sp.end()) of the space that |
|
418 // is a single block known not to contain any objects. |
|
419 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag. |
|
420 HeapWord* _unallocated_block; |
|
421 |
|
422 public: |
|
423 BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr): |
|
424 BlockOffsetArray(array, mr, false), |
|
425 _unallocated_block(_bottom) { } |
|
426 |
|
427 // accessor |
|
428 HeapWord* unallocated_block() const { |
|
429 assert(BlockOffsetArrayUseUnallocatedBlock, |
|
430 "_unallocated_block is not being maintained"); |
|
431 return _unallocated_block; |
|
432 } |
|
433 |
|
434 void set_unallocated_block(HeapWord* block) { |
|
435 assert(BlockOffsetArrayUseUnallocatedBlock, |
|
436 "_unallocated_block is not being maintained"); |
|
437 assert(block >= _bottom && block <= _end, "out of range"); |
|
438 _unallocated_block = block; |
|
439 } |
|
440 |
|
441 // These methods expect to be called with [blk_start, blk_end) |
|
442 // representing a block of memory in the heap. |
|
443 void alloc_block(HeapWord* blk_start, HeapWord* blk_end); |
|
444 void alloc_block(HeapWord* blk, size_t size) { |
|
445 alloc_block(blk, blk + size); |
|
446 } |
|
447 |
|
448 // The following methods are useful and optimized for a |
|
449 // non-contiguous space. |
|
450 |
|
451 // Given a block [blk_start, blk_start + full_blk_size), and |
|
452 // a left_blk_size < full_blk_size, adjust the BOT to show two |
|
453 // blocks [blk_start, blk_start + left_blk_size) and |
|
454 // [blk_start + left_blk_size, blk_start + full_blk_size). |
|
455 // It is assumed (and verified in the non-product VM) that the |
|
456 // BOT was correct for the original block. |
|
457 void split_block(HeapWord* blk_start, size_t full_blk_size, |
|
458 size_t left_blk_size); |
|
459 |
|
460 // Adjust BOT to show that it has a block in the range |
|
461 // [blk_start, blk_start + size). Only the first card |
|
462 // of BOT is touched. It is assumed (and verified in the |
|
463 // non-product VM) that the remaining cards of the block |
|
464 // are correct. |
|
465 void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false); |
|
466 void mark_block(HeapWord* blk, size_t size, bool reducing = false) { |
|
467 mark_block(blk, blk + size, reducing); |
|
468 } |
|
469 |
|
470 // Adjust _unallocated_block to indicate that a particular |
|
471 // block has been newly allocated or freed. It is assumed (and |
|
472 // verified in the non-product VM) that the BOT is correct for |
|
473 // the given block. |
|
474 void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) { |
|
475 // Verify that the BOT shows [blk, blk + blk_size) to be one block. |
|
476 verify_single_block(blk_start, blk_end); |
|
477 if (BlockOffsetArrayUseUnallocatedBlock) { |
|
478 _unallocated_block = MAX2(_unallocated_block, blk_end); |
|
479 } |
|
480 } |
|
481 |
|
482 void allocated(HeapWord* blk, size_t size, bool reducing = false) { |
|
483 allocated(blk, blk + size, reducing); |
|
484 } |
|
485 |
|
486 void freed(HeapWord* blk_start, HeapWord* blk_end); |
|
487 void freed(HeapWord* blk, size_t size); |
|
488 |
|
489 HeapWord* block_start_unsafe(const void* addr) const; |
|
490 |
|
491 // Requires "addr" to be the start of a card and returns the |
|
492 // start of the block that contains the given address. |
|
493 HeapWord* block_start_careful(const void* addr) const; |
|
494 |
|
495 // Verification & debugging: ensure that the offset table reflects |
|
496 // the fact that the block [blk_start, blk_end) or [blk, blk + size) |
|
497 // is a single block of storage. NOTE: can't const this because of |
|
498 // call to non-const do_block_internal() below. |
|
499 void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) |
|
500 PRODUCT_RETURN; |
|
501 void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN; |
|
502 |
|
503 // Verify that the given block is before _unallocated_block |
|
504 void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end) |
|
505 const PRODUCT_RETURN; |
|
506 void verify_not_unallocated(HeapWord* blk, size_t size) |
|
507 const PRODUCT_RETURN; |
|
508 |
|
509 // Debugging support |
|
510 virtual size_t last_active_index() const; |
|
511 }; |
|
512 |
|
513 //////////////////////////////////////////////////////////////////////////// |
|
514 // A subtype of BlockOffsetArray that takes advantage of the fact |
|
515 // that its underlying space is a ContiguousSpace, so that its "active" |
|
516 // region can be more efficiently tracked (than for a non-contiguous space). |
|
517 //////////////////////////////////////////////////////////////////////////// |
|
518 class BlockOffsetArrayContigSpace: public BlockOffsetArray { |
|
519 friend class VMStructs; |
|
520 private: |
|
521 // allocation boundary at which offset array must be updated |
|
522 HeapWord* _next_offset_threshold; |
|
523 size_t _next_offset_index; // index corresponding to that boundary |
|
524 |
|
525 // Work function when allocation start crosses threshold. |
|
526 void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end); |
|
527 |
|
528 public: |
|
529 BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr): |
|
530 BlockOffsetArray(array, mr, true) { |
|
531 _next_offset_threshold = NULL; |
|
532 _next_offset_index = 0; |
|
533 } |
|
534 |
|
535 void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); } |
|
536 |
|
537 // Initialize the threshold for an empty heap. |
|
538 HeapWord* initialize_threshold(); |
|
539 // Zero out the entry for _bottom (offset will be zero) |
|
540 void zero_bottom_entry(); |
|
541 |
|
542 // Return the next threshold, the point at which the table should be |
|
543 // updated. |
|
544 HeapWord* threshold() const { return _next_offset_threshold; } |
|
545 |
|
546 // In general, these methods expect to be called with |
|
547 // [blk_start, blk_end) representing a block of memory in the heap. |
|
548 // In this implementation, however, we are OK even if blk_start and/or |
|
549 // blk_end are NULL because NULL is represented as 0, and thus |
|
550 // never exceeds the "_next_offset_threshold". |
|
551 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { |
|
552 if (blk_end > _next_offset_threshold) { |
|
553 alloc_block_work(blk_start, blk_end); |
|
554 } |
|
555 } |
|
556 void alloc_block(HeapWord* blk, size_t size) { |
|
557 alloc_block(blk, blk + size); |
|
558 } |
|
559 |
|
560 HeapWord* block_start_unsafe(const void* addr) const; |
|
561 |
|
562 // Debugging support |
|
563 virtual size_t last_active_index() const; |
|
564 }; |
|
565 |
|
566 #endif // SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP |