Thu, 28 Jun 2012 17:03:16 -0400
6995781: Native Memory Tracking (Phase 1)
7151532: DCmd for hotspot native memory tracking
Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd
Reviewed-by: acorn, coleenp, fparain
1 /*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP
26 #define SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP
28 #include "memory/memRegion.hpp"
29 #include "runtime/virtualspace.hpp"
30 #include "utilities/globalDefinitions.hpp"
32 // The CollectedHeap type requires subtypes to implement a method
33 // "block_start". For some subtypes, notably generational
34 // systems using card-table-based write barriers, the efficiency of this
35 // operation may be important. Implementations of the "BlockOffsetArray"
36 // class may be useful in providing such efficient implementations.
37 //
38 // BlockOffsetTable (abstract)
39 // - BlockOffsetArray (abstract)
40 // - BlockOffsetArrayNonContigSpace
41 // - BlockOffsetArrayContigSpace
42 //
44 class ContiguousSpace;
45 class SerializeOopClosure;
47 //////////////////////////////////////////////////////////////////////////
48 // The BlockOffsetTable "interface"
49 //////////////////////////////////////////////////////////////////////////
50 class BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
51 friend class VMStructs;
52 protected:
53 // These members describe the region covered by the table.
55 // The space this table is covering.
56 HeapWord* _bottom; // == reserved.start
57 HeapWord* _end; // End of currently allocated region.
59 public:
60 // Initialize the table to cover the given space.
61 // The contents of the initial table are undefined.
62 BlockOffsetTable(HeapWord* bottom, HeapWord* end):
63 _bottom(bottom), _end(end) {
64 assert(_bottom <= _end, "arguments out of order");
65 }
67 // Note that the committed size of the covered space may have changed,
68 // so the table size might also wish to change.
69 virtual void resize(size_t new_word_size) = 0;
71 virtual void set_bottom(HeapWord* new_bottom) {
72 assert(new_bottom <= _end, "new_bottom > _end");
73 _bottom = new_bottom;
74 resize(pointer_delta(_end, _bottom));
75 }
77 // Requires "addr" to be contained by a block, and returns the address of
78 // the start of that block.
79 virtual HeapWord* block_start_unsafe(const void* addr) const = 0;
81 // Returns the address of the start of the block containing "addr", or
82 // else "null" if it is covered by no block.
83 HeapWord* block_start(const void* addr) const;
84 };
86 //////////////////////////////////////////////////////////////////////////
87 // One implementation of "BlockOffsetTable," the BlockOffsetArray,
88 // divides the covered region into "N"-word subregions (where
89 // "N" = 2^"LogN". An array with an entry for each such subregion
90 // indicates how far back one must go to find the start of the
91 // chunk that includes the first word of the subregion.
92 //
93 // Each BlockOffsetArray is owned by a Space. However, the actual array
94 // may be shared by several BlockOffsetArrays; this is useful
95 // when a single resizable area (such as a generation) is divided up into
96 // several spaces in which contiguous allocation takes place. (Consider,
97 // for example, the garbage-first generation.)
99 // Here is the shared array type.
100 //////////////////////////////////////////////////////////////////////////
101 // BlockOffsetSharedArray
102 //////////////////////////////////////////////////////////////////////////
103 class BlockOffsetSharedArray: public CHeapObj<mtGC> {
104 friend class BlockOffsetArray;
105 friend class BlockOffsetArrayNonContigSpace;
106 friend class BlockOffsetArrayContigSpace;
107 friend class VMStructs;
109 private:
110 enum SomePrivateConstants {
111 LogN = 9,
112 LogN_words = LogN - LogHeapWordSize,
113 N_bytes = 1 << LogN,
114 N_words = 1 << LogN_words
115 };
117 bool _init_to_zero;
119 // The reserved region covered by the shared array.
120 MemRegion _reserved;
122 // End of the current committed region.
123 HeapWord* _end;
125 // Array for keeping offsets for retrieving object start fast given an
126 // address.
127 VirtualSpace _vs;
128 u_char* _offset_array; // byte array keeping backwards offsets
130 protected:
131 // Bounds checking accessors:
132 // For performance these have to devolve to array accesses in product builds.
133 u_char offset_array(size_t index) const {
134 assert(index < _vs.committed_size(), "index out of range");
135 return _offset_array[index];
136 }
137 // An assertion-checking helper method for the set_offset_array() methods below.
138 void check_reducing_assertion(bool reducing);
140 void set_offset_array(size_t index, u_char offset, bool reducing = false) {
141 check_reducing_assertion(reducing);
142 assert(index < _vs.committed_size(), "index out of range");
143 assert(!reducing || _offset_array[index] >= offset, "Not reducing");
144 _offset_array[index] = offset;
145 }
147 void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {
148 check_reducing_assertion(reducing);
149 assert(index < _vs.committed_size(), "index out of range");
150 assert(high >= low, "addresses out of order");
151 assert(pointer_delta(high, low) <= N_words, "offset too large");
152 assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low),
153 "Not reducing");
154 _offset_array[index] = (u_char)pointer_delta(high, low);
155 }
157 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {
158 check_reducing_assertion(reducing);
159 assert(index_for(right - 1) < _vs.committed_size(),
160 "right address out of range");
161 assert(left < right, "Heap addresses out of order");
162 size_t num_cards = pointer_delta(right, left) >> LogN_words;
164 // Below, we may use an explicit loop instead of memset()
165 // because on certain platforms memset() can give concurrent
166 // readers "out-of-thin-air," phantom zeros; see 6948537.
167 if (UseMemSetInBOT) {
168 memset(&_offset_array[index_for(left)], offset, num_cards);
169 } else {
170 size_t i = index_for(left);
171 const size_t end = i + num_cards;
172 for (; i < end; i++) {
173 // Elided until CR 6977974 is fixed properly.
174 // assert(!reducing || _offset_array[i] >= offset, "Not reducing");
175 _offset_array[i] = offset;
176 }
177 }
178 }
180 void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
181 check_reducing_assertion(reducing);
182 assert(right < _vs.committed_size(), "right address out of range");
183 assert(left <= right, "indexes out of order");
184 size_t num_cards = right - left + 1;
186 // Below, we may use an explicit loop instead of memset
187 // because on certain platforms memset() can give concurrent
188 // readers "out-of-thin-air," phantom zeros; see 6948537.
189 if (UseMemSetInBOT) {
190 memset(&_offset_array[left], offset, num_cards);
191 } else {
192 size_t i = left;
193 const size_t end = i + num_cards;
194 for (; i < end; i++) {
195 // Elided until CR 6977974 is fixed properly.
196 // assert(!reducing || _offset_array[i] >= offset, "Not reducing");
197 _offset_array[i] = offset;
198 }
199 }
200 }
202 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
203 assert(index < _vs.committed_size(), "index out of range");
204 assert(high >= low, "addresses out of order");
205 assert(pointer_delta(high, low) <= N_words, "offset too large");
206 assert(_offset_array[index] == pointer_delta(high, low),
207 "Wrong offset");
208 }
210 bool is_card_boundary(HeapWord* p) const;
212 // Return the number of slots needed for an offset array
213 // that covers mem_region_words words.
214 // We always add an extra slot because if an object
215 // ends on a card boundary we put a 0 in the next
216 // offset array slot, so we want that slot always
217 // to be reserved.
219 size_t compute_size(size_t mem_region_words) {
220 size_t number_of_slots = (mem_region_words / N_words) + 1;
221 return ReservedSpace::allocation_align_size_up(number_of_slots);
222 }
224 public:
225 // Initialize the table to cover from "base" to (at least)
226 // "base + init_word_size". In the future, the table may be expanded
227 // (see "resize" below) up to the size of "_reserved" (which must be at
228 // least "init_word_size".) The contents of the initial table are
229 // undefined; it is the responsibility of the constituent
230 // BlockOffsetTable(s) to initialize cards.
231 BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
233 // Notes a change in the committed size of the region covered by the
234 // table. The "new_word_size" may not be larger than the size of the
235 // reserved region this table covers.
236 void resize(size_t new_word_size);
238 void set_bottom(HeapWord* new_bottom);
240 // Whether entries should be initialized to zero. Used currently only for
241 // error checking.
242 void set_init_to_zero(bool val) { _init_to_zero = val; }
243 bool init_to_zero() { return _init_to_zero; }
245 // Updates all the BlockOffsetArray's sharing this shared array to
246 // reflect the current "top"'s of their spaces.
247 void update_offset_arrays(); // Not yet implemented!
249 // Return the appropriate index into "_offset_array" for "p".
250 size_t index_for(const void* p) const;
252 // Return the address indicating the start of the region corresponding to
253 // "index" in "_offset_array".
254 HeapWord* address_for_index(size_t index) const;
256 // Return the address "p" incremented by the size of
257 // a region. This method does not align the address
258 // returned to the start of a region. It is a simple
259 // primitive.
260 HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; }
262 // Shared space support
263 void serialize(SerializeOopClosure* soc, HeapWord* start, HeapWord* end);
264 };
266 //////////////////////////////////////////////////////////////////////////
267 // The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray.
268 //////////////////////////////////////////////////////////////////////////
269 class BlockOffsetArray: public BlockOffsetTable {
270 friend class VMStructs;
271 friend class G1BlockOffsetArray; // temp. until we restructure and cleanup
272 protected:
273 // The following enums are used by do_block_internal() below
274 enum Action {
275 Action_single, // BOT records a single block (see single_block())
276 Action_mark, // BOT marks the start of a block (see mark_block())
277 Action_check // Check that BOT records block correctly
278 // (see verify_single_block()).
279 };
281 enum SomePrivateConstants {
282 N_words = BlockOffsetSharedArray::N_words,
283 LogN = BlockOffsetSharedArray::LogN,
284 // entries "e" of at least N_words mean "go back by Base^(e-N_words)."
285 // All entries are less than "N_words + N_powers".
286 LogBase = 4,
287 Base = (1 << LogBase),
288 N_powers = 14
289 };
291 static size_t power_to_cards_back(uint i) {
292 return (size_t)(1 << (LogBase * i));
293 }
294 static size_t power_to_words_back(uint i) {
295 return power_to_cards_back(i) * N_words;
296 }
297 static size_t entry_to_cards_back(u_char entry) {
298 assert(entry >= N_words, "Precondition");
299 return power_to_cards_back(entry - N_words);
300 }
301 static size_t entry_to_words_back(u_char entry) {
302 assert(entry >= N_words, "Precondition");
303 return power_to_words_back(entry - N_words);
304 }
306 // The shared array, which is shared with other BlockOffsetArray's
307 // corresponding to different spaces within a generation or span of
308 // memory.
309 BlockOffsetSharedArray* _array;
311 // The space that owns this subregion.
312 Space* _sp;
314 // If true, array entries are initialized to 0; otherwise, they are
315 // initialized to point backwards to the beginning of the covered region.
316 bool _init_to_zero;
318 // An assertion-checking helper method for the set_remainder*() methods below.
319 void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); }
321 // Sets the entries
322 // corresponding to the cards starting at "start" and ending at "end"
323 // to point back to the card before "start": the interval [start, end)
324 // is right-open. The last parameter, reducing, indicates whether the
325 // updates to individual entries always reduce the entry from a higher
326 // to a lower value. (For example this would hold true during a temporal
327 // regime during which only block splits were updating the BOT.
328 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false);
329 // Same as above, except that the args here are a card _index_ interval
330 // that is closed: [start_index, end_index]
331 void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false);
333 // A helper function for BOT adjustment/verification work
334 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action, bool reducing = false);
336 public:
337 // The space may not have its bottom and top set yet, which is why the
338 // region is passed as a parameter. If "init_to_zero" is true, the
339 // elements of the array are initialized to zero. Otherwise, they are
340 // initialized to point backwards to the beginning.
341 BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr,
342 bool init_to_zero_);
344 // Note: this ought to be part of the constructor, but that would require
345 // "this" to be passed as a parameter to a member constructor for
346 // the containing concrete subtype of Space.
347 // This would be legal C++, but MS VC++ doesn't allow it.
348 void set_space(Space* sp) { _sp = sp; }
350 // Resets the covered region to the given "mr".
351 void set_region(MemRegion mr) {
352 _bottom = mr.start();
353 _end = mr.end();
354 }
356 // Note that the committed size of the covered space may have changed,
357 // so the table size might also wish to change.
358 virtual void resize(size_t new_word_size) {
359 HeapWord* new_end = _bottom + new_word_size;
360 if (_end < new_end && !init_to_zero()) {
361 // verify that the old and new boundaries are also card boundaries
362 assert(_array->is_card_boundary(_end),
363 "_end not a card boundary");
364 assert(_array->is_card_boundary(new_end),
365 "new _end would not be a card boundary");
366 // set all the newly added cards
367 _array->set_offset_array(_end, new_end, N_words);
368 }
369 _end = new_end; // update _end
370 }
372 // Adjust the BOT to show that it has a single block in the
373 // range [blk_start, blk_start + size). All necessary BOT
374 // cards are adjusted, but _unallocated_block isn't.
375 void single_block(HeapWord* blk_start, HeapWord* blk_end);
376 void single_block(HeapWord* blk, size_t size) {
377 single_block(blk, blk + size);
378 }
380 // When the alloc_block() call returns, the block offset table should
381 // have enough information such that any subsequent block_start() call
382 // with an argument equal to an address that is within the range
383 // [blk_start, blk_end) would return the value blk_start, provided
384 // there have been no calls in between that reset this information
385 // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call
386 // for an appropriate range covering the said interval).
387 // These methods expect to be called with [blk_start, blk_end)
388 // representing a block of memory in the heap.
389 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
390 void alloc_block(HeapWord* blk, size_t size) {
391 alloc_block(blk, blk + size);
392 }
394 // If true, initialize array slots with no allocated blocks to zero.
395 // Otherwise, make them point back to the front.
396 bool init_to_zero() { return _init_to_zero; }
397 // Corresponding setter
398 void set_init_to_zero(bool val) {
399 _init_to_zero = val;
400 assert(_array != NULL, "_array should be non-NULL");
401 _array->set_init_to_zero(val);
402 }
404 // Debugging
405 // Return the index of the last entry in the "active" region.
406 virtual size_t last_active_index() const = 0;
407 // Verify the block offset table
408 void verify() const;
409 void check_all_cards(size_t left_card, size_t right_card) const;
410 };
412 ////////////////////////////////////////////////////////////////////////////
413 // A subtype of BlockOffsetArray that takes advantage of the fact
414 // that its underlying space is a NonContiguousSpace, so that some
415 // specialized interfaces can be made available for spaces that
416 // manipulate the table.
417 ////////////////////////////////////////////////////////////////////////////
418 class BlockOffsetArrayNonContigSpace: public BlockOffsetArray {
419 friend class VMStructs;
420 private:
421 // The portion [_unallocated_block, _sp.end()) of the space that
422 // is a single block known not to contain any objects.
423 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
424 HeapWord* _unallocated_block;
426 public:
427 BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
428 BlockOffsetArray(array, mr, false),
429 _unallocated_block(_bottom) { }
431 // accessor
432 HeapWord* unallocated_block() const {
433 assert(BlockOffsetArrayUseUnallocatedBlock,
434 "_unallocated_block is not being maintained");
435 return _unallocated_block;
436 }
438 void set_unallocated_block(HeapWord* block) {
439 assert(BlockOffsetArrayUseUnallocatedBlock,
440 "_unallocated_block is not being maintained");
441 assert(block >= _bottom && block <= _end, "out of range");
442 _unallocated_block = block;
443 }
445 // These methods expect to be called with [blk_start, blk_end)
446 // representing a block of memory in the heap.
447 void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
448 void alloc_block(HeapWord* blk, size_t size) {
449 alloc_block(blk, blk + size);
450 }
452 // The following methods are useful and optimized for a
453 // non-contiguous space.
455 // Given a block [blk_start, blk_start + full_blk_size), and
456 // a left_blk_size < full_blk_size, adjust the BOT to show two
457 // blocks [blk_start, blk_start + left_blk_size) and
458 // [blk_start + left_blk_size, blk_start + full_blk_size).
459 // It is assumed (and verified in the non-product VM) that the
460 // BOT was correct for the original block.
461 void split_block(HeapWord* blk_start, size_t full_blk_size,
462 size_t left_blk_size);
464 // Adjust BOT to show that it has a block in the range
465 // [blk_start, blk_start + size). Only the first card
466 // of BOT is touched. It is assumed (and verified in the
467 // non-product VM) that the remaining cards of the block
468 // are correct.
469 void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false);
470 void mark_block(HeapWord* blk, size_t size, bool reducing = false) {
471 mark_block(blk, blk + size, reducing);
472 }
474 // Adjust _unallocated_block to indicate that a particular
475 // block has been newly allocated or freed. It is assumed (and
476 // verified in the non-product VM) that the BOT is correct for
477 // the given block.
478 void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) {
479 // Verify that the BOT shows [blk, blk + blk_size) to be one block.
480 verify_single_block(blk_start, blk_end);
481 if (BlockOffsetArrayUseUnallocatedBlock) {
482 _unallocated_block = MAX2(_unallocated_block, blk_end);
483 }
484 }
486 void allocated(HeapWord* blk, size_t size, bool reducing = false) {
487 allocated(blk, blk + size, reducing);
488 }
490 void freed(HeapWord* blk_start, HeapWord* blk_end);
491 void freed(HeapWord* blk, size_t size);
493 HeapWord* block_start_unsafe(const void* addr) const;
495 // Requires "addr" to be the start of a card and returns the
496 // start of the block that contains the given address.
497 HeapWord* block_start_careful(const void* addr) const;
499 // Verification & debugging: ensure that the offset table reflects
500 // the fact that the block [blk_start, blk_end) or [blk, blk + size)
501 // is a single block of storage. NOTE: can't const this because of
502 // call to non-const do_block_internal() below.
503 void verify_single_block(HeapWord* blk_start, HeapWord* blk_end)
504 PRODUCT_RETURN;
505 void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN;
507 // Verify that the given block is before _unallocated_block
508 void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end)
509 const PRODUCT_RETURN;
510 void verify_not_unallocated(HeapWord* blk, size_t size)
511 const PRODUCT_RETURN;
513 // Debugging support
514 virtual size_t last_active_index() const;
515 };
517 ////////////////////////////////////////////////////////////////////////////
518 // A subtype of BlockOffsetArray that takes advantage of the fact
519 // that its underlying space is a ContiguousSpace, so that its "active"
520 // region can be more efficiently tracked (than for a non-contiguous space).
521 ////////////////////////////////////////////////////////////////////////////
522 class BlockOffsetArrayContigSpace: public BlockOffsetArray {
523 friend class VMStructs;
524 private:
525 // allocation boundary at which offset array must be updated
526 HeapWord* _next_offset_threshold;
527 size_t _next_offset_index; // index corresponding to that boundary
529 // Work function when allocation start crosses threshold.
530 void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end);
532 public:
533 BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
534 BlockOffsetArray(array, mr, true) {
535 _next_offset_threshold = NULL;
536 _next_offset_index = 0;
537 }
539 void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); }
541 // Initialize the threshold for an empty heap.
542 HeapWord* initialize_threshold();
543 // Zero out the entry for _bottom (offset will be zero)
544 void zero_bottom_entry();
546 // Return the next threshold, the point at which the table should be
547 // updated.
548 HeapWord* threshold() const { return _next_offset_threshold; }
550 // In general, these methods expect to be called with
551 // [blk_start, blk_end) representing a block of memory in the heap.
552 // In this implementation, however, we are OK even if blk_start and/or
553 // blk_end are NULL because NULL is represented as 0, and thus
554 // never exceeds the "_next_offset_threshold".
555 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
556 if (blk_end > _next_offset_threshold) {
557 alloc_block_work(blk_start, blk_end);
558 }
559 }
560 void alloc_block(HeapWord* blk, size_t size) {
561 alloc_block(blk, blk + size);
562 }
564 HeapWord* block_start_unsafe(const void* addr) const;
566 void serialize(SerializeOopClosure* soc);
568 // Debugging support
569 virtual size_t last_active_index() const;
570 };
572 #endif // SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP