Wed, 11 Sep 2013 00:38:18 -0400
8024256: Minimal VM build is broken with PCH disabled
Reviewed-by: coleenp, twisti
1 /*
2 * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #ifndef SHARE_VM_MEMORY_METASPACE_HPP
25 #define SHARE_VM_MEMORY_METASPACE_HPP
27 #include "memory/allocation.hpp"
28 #include "memory/memRegion.hpp"
29 #include "runtime/virtualspace.hpp"
30 #include "utilities/exceptions.hpp"
32 // Metaspace
33 //
34 // Metaspaces are Arenas for the VM's metadata.
35 // They are allocated one per class loader object, and one for the null
36 // bootstrap class loader
37 // Eventually for bootstrap loader we'll have a read-only section and read-write
38 // to write for DumpSharedSpaces and read for UseSharedSpaces
39 //
40 // block X ---+ +-------------------+
41 // | | Virtualspace |
42 // | | |
43 // | | |
44 // | |-------------------|
45 // | || Chunk |
46 // | || |
47 // | ||---------- |
48 // +------>||| block 0 | |
49 // ||---------- |
50 // ||| block 1 | |
51 // ||---------- |
52 // || |
53 // |-------------------|
54 // | |
55 // | |
56 // +-------------------+
57 //
59 class ClassLoaderData;
60 class Metablock;
61 class MetaWord;
62 class Mutex;
63 class outputStream;
64 class SpaceManager;
66 // Metaspaces each have a SpaceManager and allocations
67 // are done by the SpaceManager. Allocations are done
68 // out of the current Metachunk. When the current Metachunk
69 // is exhausted, the SpaceManager gets a new one from
70 // the current VirtualSpace. When the VirtualSpace is exhausted
71 // the SpaceManager gets a new one. The SpaceManager
72 // also manages freelists of available Chunks.
73 //
74 // Currently the space manager maintains the list of
75 // virtual spaces and the list of chunks in use. Its
76 // allocate() method returns a block for use as a
77 // quantum of metadata.
79 class VirtualSpaceList;
81 class Metaspace : public CHeapObj<mtClass> {
82 friend class VMStructs;
83 friend class SpaceManager;
84 friend class VM_CollectForMetadataAllocation;
85 friend class MetaspaceGC;
86 friend class MetaspaceAux;
88 public:
89 enum MetadataType {ClassType = 0,
90 NonClassType = ClassType + 1,
91 MetadataTypeCount = ClassType + 2
92 };
93 enum MetaspaceType {
94 StandardMetaspaceType,
95 BootMetaspaceType,
96 ROMetaspaceType,
97 ReadWriteMetaspaceType,
98 AnonymousMetaspaceType,
99 ReflectionMetaspaceType
100 };
102 private:
103 void initialize(Mutex* lock, MetaspaceType type);
105 // Align up the word size to the allocation word size
106 static size_t align_word_size_up(size_t);
108 // Aligned size of the metaspace.
109 static size_t _class_metaspace_size;
111 static size_t class_metaspace_size() {
112 return _class_metaspace_size;
113 }
114 static void set_class_metaspace_size(size_t metaspace_size) {
115 _class_metaspace_size = metaspace_size;
116 }
118 static size_t _first_chunk_word_size;
119 static size_t _first_class_chunk_word_size;
121 SpaceManager* _vsm;
122 SpaceManager* vsm() const { return _vsm; }
124 SpaceManager* _class_vsm;
125 SpaceManager* class_vsm() const { return _class_vsm; }
127 // Allocate space for metadata of type mdtype. This is space
128 // within a Metachunk and is used by
129 // allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
130 // which returns a Metablock.
131 MetaWord* allocate(size_t word_size, MetadataType mdtype);
133 // Virtual Space lists for both classes and other metadata
134 static VirtualSpaceList* _space_list;
135 static VirtualSpaceList* _class_space_list;
137 static VirtualSpaceList* space_list() { return _space_list; }
138 static VirtualSpaceList* class_space_list() { return _class_space_list; }
139 static VirtualSpaceList* get_space_list(MetadataType mdtype) {
140 assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
141 return mdtype == ClassType ? class_space_list() : space_list();
142 }
144 // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
145 // maintain a single list for now.
146 void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
148 #ifdef _LP64
149 static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);
151 // Returns true if can use CDS with metaspace allocated as specified address.
152 static bool can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base);
154 static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base);
156 static void initialize_class_space(ReservedSpace rs);
157 #endif
159 class AllocRecord : public CHeapObj<mtClass> {
160 public:
161 AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size)
162 : _next(NULL), _ptr(ptr), _type(type), _byte_size(byte_size) {}
163 AllocRecord *_next;
164 address _ptr;
165 MetaspaceObj::Type _type;
166 int _byte_size;
167 };
169 AllocRecord * _alloc_record_head;
170 AllocRecord * _alloc_record_tail;
172 public:
174 Metaspace(Mutex* lock, MetaspaceType type);
175 ~Metaspace();
177 // Initialize globals for Metaspace
178 static void global_initialize();
180 static size_t first_chunk_word_size() { return _first_chunk_word_size; }
181 static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
183 char* bottom() const;
184 size_t used_words_slow(MetadataType mdtype) const;
185 size_t free_words(MetadataType mdtype) const;
186 size_t capacity_words_slow(MetadataType mdtype) const;
187 size_t waste_words(MetadataType mdtype) const;
189 size_t used_bytes_slow(MetadataType mdtype) const;
190 size_t capacity_bytes_slow(MetadataType mdtype) const;
192 static Metablock* allocate(ClassLoaderData* loader_data, size_t word_size,
193 bool read_only, MetaspaceObj::Type type, TRAPS);
194 void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
196 MetaWord* expand_and_allocate(size_t size,
197 MetadataType mdtype);
199 static bool contains(const void *ptr);
200 void dump(outputStream* const out) const;
202 // Free empty virtualspaces
203 static void purge();
205 void print_on(outputStream* st) const;
206 // Debugging support
207 void verify();
209 class AllocRecordClosure : public StackObj {
210 public:
211 virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) = 0;
212 };
214 void iterate(AllocRecordClosure *closure);
216 // Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False.
217 static bool using_class_space() {
218 return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces);
219 }
221 };
223 class MetaspaceAux : AllStatic {
224 static size_t free_chunks_total(Metaspace::MetadataType mdtype);
226 public:
227 // Statistics for class space and data space in metaspace.
229 // These methods iterate over the classloader data graph
230 // for the given Metaspace type. These are slow.
231 static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
232 static size_t free_in_bytes(Metaspace::MetadataType mdtype);
233 static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
235 // Iterates over the virtual space list.
236 static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
238 // Running sum of space in all Metachunks that has been
239 // allocated to a Metaspace. This is used instead of
240 // iterating over all the classloaders. One for each
241 // type of Metadata
242 static size_t _allocated_capacity_words[Metaspace:: MetadataTypeCount];
243 // Running sum of space in all Metachunks that have
244 // are being used for metadata. One for each
245 // type of Metadata.
246 static size_t _allocated_used_words[Metaspace:: MetadataTypeCount];
248 public:
249 // Decrement and increment _allocated_capacity_words
250 static void dec_capacity(Metaspace::MetadataType type, size_t words);
251 static void inc_capacity(Metaspace::MetadataType type, size_t words);
253 // Decrement and increment _allocated_used_words
254 static void dec_used(Metaspace::MetadataType type, size_t words);
255 static void inc_used(Metaspace::MetadataType type, size_t words);
257 // Total of space allocated to metadata in all Metaspaces.
258 // This sums the space used in each Metachunk by
259 // iterating over the classloader data graph
260 static size_t used_bytes_slow() {
261 return used_bytes_slow(Metaspace::ClassType) +
262 used_bytes_slow(Metaspace::NonClassType);
263 }
265 // Used by MetaspaceCounters
266 static size_t free_chunks_total();
267 static size_t free_chunks_total_in_bytes();
268 static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
270 static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
271 return _allocated_capacity_words[mdtype];
272 }
273 static size_t allocated_capacity_words() {
274 return _allocated_capacity_words[Metaspace::NonClassType] +
275 (Metaspace::using_class_space() ?
276 _allocated_capacity_words[Metaspace::ClassType] : 0);
277 }
278 static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
279 return allocated_capacity_words(mdtype) * BytesPerWord;
280 }
281 static size_t allocated_capacity_bytes() {
282 return allocated_capacity_words() * BytesPerWord;
283 }
285 static size_t allocated_used_words(Metaspace::MetadataType mdtype) {
286 return _allocated_used_words[mdtype];
287 }
288 static size_t allocated_used_words() {
289 return _allocated_used_words[Metaspace::NonClassType] +
290 (Metaspace::using_class_space() ?
291 _allocated_used_words[Metaspace::ClassType] : 0);
292 }
293 static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
294 return allocated_used_words(mdtype) * BytesPerWord;
295 }
296 static size_t allocated_used_bytes() {
297 return allocated_used_words() * BytesPerWord;
298 }
300 static size_t free_bytes();
301 static size_t free_bytes(Metaspace::MetadataType mdtype);
303 // Total capacity in all Metaspaces
304 static size_t capacity_bytes_slow() {
305 #ifdef PRODUCT
306 // Use allocated_capacity_bytes() in PRODUCT instead of this function.
307 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
308 #endif
309 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
310 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
311 assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
312 err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
313 " class_capacity + non_class_capacity " SIZE_FORMAT
314 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
315 allocated_capacity_bytes(), class_capacity + non_class_capacity,
316 class_capacity, non_class_capacity));
318 return class_capacity + non_class_capacity;
319 }
321 // Total space reserved in all Metaspaces
322 static size_t reserved_in_bytes() {
323 return reserved_in_bytes(Metaspace::ClassType) +
324 reserved_in_bytes(Metaspace::NonClassType);
325 }
327 static size_t min_chunk_size();
329 // Print change in used metadata.
330 static void print_metaspace_change(size_t prev_metadata_used);
331 static void print_on(outputStream * out);
332 static void print_on(outputStream * out, Metaspace::MetadataType mdtype);
334 static void print_class_waste(outputStream* out);
335 static void print_waste(outputStream* out);
336 static void dump(outputStream* out);
337 static void verify_free_chunks();
338 // Checks that the values returned by allocated_capacity_bytes() and
339 // capacity_bytes_slow() are the same.
340 static void verify_capacity();
341 static void verify_used();
342 static void verify_metrics();
343 };
345 // Metaspace are deallocated when their class loader are GC'ed.
346 // This class implements a policy for inducing GC's to recover
347 // Metaspaces.
349 class MetaspaceGC : AllStatic {
351 // The current high-water-mark for inducing a GC. When
352 // the capacity of all space in the virtual lists reaches this value,
353 // a GC is induced and the value is increased. This should be changed
354 // to the space actually used for allocations to avoid affects of
355 // fragmentation losses to partially used chunks. Size is in words.
356 static size_t _capacity_until_GC;
358 // After a GC is done any allocation that fails should try to expand
359 // the capacity of the Metaspaces. This flag is set during attempts
360 // to allocate in the VMGCOperation that does the GC.
361 static bool _expand_after_GC;
363 // For a CMS collection, signal that a concurrent collection should
364 // be started.
365 static bool _should_concurrent_collect;
367 static uint _shrink_factor;
369 static void set_capacity_until_GC(size_t v) { _capacity_until_GC = v; }
371 static size_t shrink_factor() { return _shrink_factor; }
372 void set_shrink_factor(uint v) { _shrink_factor = v; }
374 public:
376 static size_t capacity_until_GC() { return _capacity_until_GC; }
377 static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; }
378 static void dec_capacity_until_GC(size_t v) {
379 _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0;
380 }
381 static bool expand_after_GC() { return _expand_after_GC; }
382 static void set_expand_after_GC(bool v) { _expand_after_GC = v; }
384 static bool should_concurrent_collect() { return _should_concurrent_collect; }
385 static void set_should_concurrent_collect(bool v) {
386 _should_concurrent_collect = v;
387 }
389 // The amount to increase the high-water-mark (_capacity_until_GC)
390 static size_t delta_capacity_until_GC(size_t word_size);
392 // It is expected that this will be called when the current capacity
393 // has been used and a GC should be considered.
394 static bool should_expand(VirtualSpaceList* vsl, size_t word_size);
396 // Calculate the new high-water mark at which to induce
397 // a GC.
398 static void compute_new_size();
399 };
401 #endif // SHARE_VM_MEMORY_METASPACE_HPP