Tue, 11 May 2010 14:35:43 -0700
6931180: Migration to recent versions of MS Platform SDK
6951582: Build problems on win64
Summary: Changes to enable building JDK7 with Microsoft Visual Studio 2010
Reviewed-by: ohair, art, ccheung, dcubed
1 /*
2 * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_compactingPermGenGen.cpp.incl"
29 // An ObjectClosure helper: Recursively adjust all pointers in an object
30 // and all objects by referenced it. Clear marks on objects in order to
31 // prevent visiting any object twice. This helper is used when the
32 // RedefineClasses() API has been called.
34 class AdjustSharedObjectClosure : public ObjectClosure {
35 public:
36 void do_object(oop obj) {
37 if (obj->is_shared_readwrite()) {
38 if (obj->mark()->is_marked()) {
39 obj->init_mark(); // Don't revisit this object.
40 obj->adjust_pointers(); // Adjust this object's references.
41 }
42 }
43 }
44 };
47 // An OopClosure helper: Recursively adjust all pointers in an object
48 // and all objects by referenced it. Clear marks on objects in order
49 // to prevent visiting any object twice.
51 class RecursiveAdjustSharedObjectClosure : public OopClosure {
52 protected:
53 template <class T> inline void do_oop_work(T* p) {
54 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
55 if (obj->is_shared_readwrite()) {
56 if (obj->mark()->is_marked()) {
57 obj->init_mark(); // Don't revisit this object.
58 obj->oop_iterate(this); // Recurse - adjust objects referenced.
59 obj->adjust_pointers(); // Adjust this object's references.
61 // Special case: if a class has a read-only constant pool,
62 // then the read-write objects referenced by the pool must
63 // have their marks reset.
65 if (obj->klass() == Universe::instanceKlassKlassObj()) {
66 instanceKlass* ik = instanceKlass::cast((klassOop)obj);
67 constantPoolOop cp = ik->constants();
68 if (cp->is_shared_readonly()) {
69 cp->oop_iterate(this);
70 }
71 }
72 }
73 }
74 }
75 public:
76 virtual void do_oop(oop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
77 virtual void do_oop(narrowOop* p) { RecursiveAdjustSharedObjectClosure::do_oop_work(p); }
78 };
81 // We need to go through all placeholders in the system dictionary and
82 // try to resolve them into shared classes. Other threads might be in
83 // the process of loading a shared class and have strong roots on
84 // their stack to the class without having added the class to the
85 // dictionary yet. This means the class will be marked during phase 1
86 // but will not be unmarked during the application of the
87 // RecursiveAdjustSharedObjectClosure to the SystemDictionary. Note
88 // that we must not call find_shared_class with non-read-only symbols
89 // as doing so can cause hash codes to be computed, destroying
90 // forwarding pointers.
91 class TraversePlaceholdersClosure : public OopClosure {
92 protected:
93 template <class T> inline void do_oop_work(T* p) {
94 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
95 if (obj->klass() == Universe::symbolKlassObj() &&
96 obj->is_shared_readonly()) {
97 symbolHandle sym((symbolOop) obj);
98 oop k = SystemDictionary::find_shared_class(sym);
99 if (k != NULL) {
100 RecursiveAdjustSharedObjectClosure clo;
101 clo.do_oop(&k);
102 }
103 }
104 }
105 public:
106 virtual void do_oop(oop* p) { TraversePlaceholdersClosure::do_oop_work(p); }
107 virtual void do_oop(narrowOop* p) { TraversePlaceholdersClosure::do_oop_work(p); }
109 };
112 void CompactingPermGenGen::initialize_performance_counters() {
114 const char* gen_name = "perm";
116 // Generation Counters - generation 2, 1 subspace
117 _gen_counters = new GenerationCounters(gen_name, 2, 1, &_virtual_space);
119 _space_counters = new CSpaceCounters(gen_name, 0,
120 _virtual_space.reserved_size(),
121 _the_space, _gen_counters);
122 }
124 void CompactingPermGenGen::update_counters() {
125 if (UsePerfData) {
126 _space_counters->update_all();
127 _gen_counters->update_all();
128 }
129 }
132 CompactingPermGenGen::CompactingPermGenGen(ReservedSpace rs,
133 ReservedSpace shared_rs,
134 size_t initial_byte_size,
135 int level, GenRemSet* remset,
136 ContiguousSpace* space,
137 PermanentGenerationSpec* spec_) :
138 OneContigSpaceCardGeneration(rs, initial_byte_size, MinPermHeapExpansion,
139 level, remset, space) {
141 set_spec(spec_);
142 if (!UseSharedSpaces && !DumpSharedSpaces) {
143 spec()->disable_sharing();
144 }
146 // Break virtual space into address ranges for all spaces.
148 if (spec()->enable_shared_spaces()) {
149 shared_end = (HeapWord*)(shared_rs.base() + shared_rs.size());
150 misccode_end = shared_end;
151 misccode_bottom = misccode_end - heap_word_size(spec()->misc_code_size());
152 miscdata_end = misccode_bottom;
153 miscdata_bottom = miscdata_end - heap_word_size(spec()->misc_data_size());
154 readwrite_end = miscdata_bottom;
155 readwrite_bottom =
156 readwrite_end - heap_word_size(spec()->read_write_size());
157 readonly_end = readwrite_bottom;
158 readonly_bottom =
159 readonly_end - heap_word_size(spec()->read_only_size());
160 shared_bottom = readonly_bottom;
161 unshared_end = shared_bottom;
162 assert((char*)shared_bottom == shared_rs.base(), "shared space mismatch");
163 } else {
164 shared_end = (HeapWord*)(rs.base() + rs.size());
165 misccode_end = shared_end;
166 misccode_bottom = shared_end;
167 miscdata_end = shared_end;
168 miscdata_bottom = shared_end;
169 readwrite_end = shared_end;
170 readwrite_bottom = shared_end;
171 readonly_end = shared_end;
172 readonly_bottom = shared_end;
173 shared_bottom = shared_end;
174 unshared_end = shared_bottom;
175 }
176 unshared_bottom = (HeapWord*) rs.base();
178 // Verify shared and unshared spaces adjacent.
179 assert((char*)shared_bottom == rs.base()+rs.size(), "shared space mismatch");
180 assert(unshared_end > unshared_bottom, "shared space mismatch");
182 // Split reserved memory into pieces.
184 ReservedSpace ro_rs = shared_rs.first_part(spec()->read_only_size(),
185 UseSharedSpaces);
186 ReservedSpace tmp_rs1 = shared_rs.last_part(spec()->read_only_size());
187 ReservedSpace rw_rs = tmp_rs1.first_part(spec()->read_write_size(),
188 UseSharedSpaces);
189 ReservedSpace tmp_rs2 = tmp_rs1.last_part(spec()->read_write_size());
190 ReservedSpace md_rs = tmp_rs2.first_part(spec()->misc_data_size(),
191 UseSharedSpaces);
192 ReservedSpace mc_rs = tmp_rs2.last_part(spec()->misc_data_size());
194 _shared_space_size = spec()->read_only_size()
195 + spec()->read_write_size()
196 + spec()->misc_data_size()
197 + spec()->misc_code_size();
199 // Allocate the unshared (default) space.
200 _the_space = new ContigPermSpace(_bts,
201 MemRegion(unshared_bottom, heap_word_size(initial_byte_size)));
202 if (_the_space == NULL)
203 vm_exit_during_initialization("Could not allocate an unshared"
204 " CompactingPermGen Space");
206 // Allocate shared spaces
207 if (spec()->enable_shared_spaces()) {
209 // If mapping a shared file, the space is not committed, don't
210 // mangle.
211 NOT_PRODUCT(bool old_ZapUnusedHeapArea = ZapUnusedHeapArea;)
212 NOT_PRODUCT(if (UseSharedSpaces) ZapUnusedHeapArea = false;)
214 // Commit the memory behind the shared spaces if dumping (not
215 // mapping).
216 if (DumpSharedSpaces) {
217 _ro_vs.initialize(ro_rs, spec()->read_only_size());
218 _rw_vs.initialize(rw_rs, spec()->read_write_size());
219 _md_vs.initialize(md_rs, spec()->misc_data_size());
220 _mc_vs.initialize(mc_rs, spec()->misc_code_size());
221 }
223 // Allocate the shared spaces.
224 _ro_bts = new BlockOffsetSharedArray(
225 MemRegion(readonly_bottom,
226 heap_word_size(spec()->read_only_size())),
227 heap_word_size(spec()->read_only_size()));
228 _ro_space = new OffsetTableContigSpace(_ro_bts,
229 MemRegion(readonly_bottom, readonly_end));
230 _rw_bts = new BlockOffsetSharedArray(
231 MemRegion(readwrite_bottom,
232 heap_word_size(spec()->read_write_size())),
233 heap_word_size(spec()->read_write_size()));
234 _rw_space = new OffsetTableContigSpace(_rw_bts,
235 MemRegion(readwrite_bottom, readwrite_end));
237 // Restore mangling flag.
238 NOT_PRODUCT(ZapUnusedHeapArea = old_ZapUnusedHeapArea;)
240 if (_ro_space == NULL || _rw_space == NULL)
241 vm_exit_during_initialization("Could not allocate a shared space");
243 // Cover both shared spaces entirely with cards.
244 _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end));
246 if (UseSharedSpaces) {
248 // Map in the regions in the shared file.
249 FileMapInfo* mapinfo = FileMapInfo::current_info();
250 size_t image_alignment = mapinfo->alignment();
251 CollectedHeap* ch = Universe::heap();
252 if ((!mapinfo->map_space(ro, ro_rs, _ro_space)) ||
253 (!mapinfo->map_space(rw, rw_rs, _rw_space)) ||
254 (!mapinfo->map_space(md, md_rs, NULL)) ||
255 (!mapinfo->map_space(mc, mc_rs, NULL)) ||
256 // check the alignment constraints
257 (ch == NULL || ch->kind() != CollectedHeap::GenCollectedHeap ||
258 image_alignment !=
259 ((GenCollectedHeap*)ch)->gen_policy()->max_alignment())) {
260 // Base addresses didn't match; skip sharing, but continue
261 shared_rs.release();
262 spec()->disable_sharing();
263 // If -Xshare:on is specified, print out the error message and exit VM,
264 // otherwise, set UseSharedSpaces to false and continue.
265 if (RequireSharedSpaces) {
266 vm_exit_during_initialization("Unable to use shared archive.", NULL);
267 } else {
268 FLAG_SET_DEFAULT(UseSharedSpaces, false);
269 }
271 // Note: freeing the block offset array objects does not
272 // currently free up the underlying storage.
273 delete _ro_bts;
274 _ro_bts = NULL;
275 delete _ro_space;
276 _ro_space = NULL;
277 delete _rw_bts;
278 _rw_bts = NULL;
279 delete _rw_space;
280 _rw_space = NULL;
281 shared_end = (HeapWord*)(rs.base() + rs.size());
282 _rs->resize_covered_region(MemRegion(shared_bottom, shared_bottom));
283 }
284 }
286 // Reserved region includes shared spaces for oop.is_in_reserved().
287 _reserved.set_end(shared_end);
289 } else {
290 _ro_space = NULL;
291 _rw_space = NULL;
292 }
293 }
296 // Do a complete scan of the shared read write space to catch all
297 // objects which contain references to any younger generation. Forward
298 // the pointers. Avoid space_iterate, as actually visiting all the
299 // objects in the space will page in more objects than we need.
300 // Instead, use the system dictionary as strong roots into the read
301 // write space.
302 //
303 // If a RedefineClasses() call has been made, then we have to iterate
304 // over the entire shared read-write space in order to find all the
305 // objects that need to be forwarded. For example, it is possible for
306 // an nmethod to be found and marked in GC phase-1 only for the nmethod
307 // to be freed by the time we reach GC phase-3. The underlying method
308 // is still marked, but we can't (easily) find it in GC phase-3 so we
309 // blow up in GC phase-4. With RedefineClasses() we want replaced code
310 // (EMCP or obsolete) to go away (i.e., be collectible) once it is no
311 // longer being executed by any thread so we keep minimal attachments
312 // to the replaced code. However, we can't guarantee when those EMCP
313 // or obsolete methods will be collected so they may still be out there
314 // even after we've severed our minimal attachments.
316 void CompactingPermGenGen::pre_adjust_pointers() {
317 if (spec()->enable_shared_spaces()) {
318 if (JvmtiExport::has_redefined_a_class()) {
319 // RedefineClasses() requires a brute force approach
320 AdjustSharedObjectClosure blk;
321 rw_space()->object_iterate(&blk);
322 } else {
323 RecursiveAdjustSharedObjectClosure blk;
324 Universe::oops_do(&blk);
325 StringTable::oops_do(&blk);
326 SystemDictionary::always_strong_classes_do(&blk);
327 TraversePlaceholdersClosure tpc;
328 SystemDictionary::placeholders_do(&tpc);
329 }
330 }
331 }
334 #ifdef ASSERT
335 class VerifyMarksClearedClosure : public ObjectClosure {
336 public:
337 void do_object(oop obj) {
338 assert(SharedSkipVerify || !obj->mark()->is_marked(),
339 "Shared oop still marked?");
340 }
341 };
342 #endif
345 void CompactingPermGenGen::post_compact() {
346 #ifdef ASSERT
347 if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
348 VerifyMarksClearedClosure blk;
349 rw_space()->object_iterate(&blk);
350 }
351 #endif
352 }
355 // Do not use in time-critical operations due to the possibility of paging
356 // in otherwise untouched or previously unread portions of the perm gen,
357 // for instance, the shared spaces. NOTE: Because CompactingPermGenGen
358 // derives from OneContigSpaceCardGeneration which is supposed to have a
359 // single space, and does not override its object_iterate() method,
360 // object iteration via that interface does not look at the objects in
361 // the shared spaces when using CDS. This should be fixed; see CR 6897798.
362 void CompactingPermGenGen::space_iterate(SpaceClosure* blk, bool usedOnly) {
363 OneContigSpaceCardGeneration::space_iterate(blk, usedOnly);
364 if (spec()->enable_shared_spaces()) {
365 // Making the rw_space walkable will page in the entire space, and
366 // is to be avoided in the case of time-critical operations.
367 // However, this is required for Verify and heap dump operations.
368 blk->do_space(ro_space());
369 blk->do_space(rw_space());
370 }
371 }
374 void CompactingPermGenGen::print_on(outputStream* st) const {
375 OneContigSpaceCardGeneration::print_on(st);
376 if (spec()->enable_shared_spaces()) {
377 st->print(" ro");
378 ro_space()->print_on(st);
379 st->print(" rw");
380 rw_space()->print_on(st);
381 } else {
382 st->print_cr("No shared spaces configured.");
383 }
384 }
387 // References from the perm gen to the younger generation objects may
388 // occur in static fields in Java classes or in constant pool references
389 // to String objects.
391 void CompactingPermGenGen::younger_refs_iterate(OopsInGenClosure* blk) {
392 OneContigSpaceCardGeneration::younger_refs_iterate(blk);
393 if (spec()->enable_shared_spaces()) {
394 blk->set_generation(this);
395 // ro_space has no younger gen refs.
396 _rs->younger_refs_in_space_iterate(rw_space(), blk);
397 blk->reset_generation();
398 }
399 }
402 // Shared spaces are addressed in pre_adjust_pointers.
403 void CompactingPermGenGen::adjust_pointers() {
404 the_space()->adjust_pointers();
405 }
408 void CompactingPermGenGen::compact() {
409 the_space()->compact();
410 }
413 size_t CompactingPermGenGen::contiguous_available() const {
414 // Don't include shared spaces.
415 return OneContigSpaceCardGeneration::contiguous_available()
416 - _shared_space_size;
417 }
419 size_t CompactingPermGenGen::max_capacity() const {
420 // Don't include shared spaces.
421 assert(UseSharedSpaces || (_shared_space_size == 0),
422 "If not used, the size of shared spaces should be 0");
423 return OneContigSpaceCardGeneration::max_capacity()
424 - _shared_space_size;
425 }
428 // No young generation references, clear this generation's main space's
429 // card table entries. Do NOT clear the card table entries for the
430 // read-only space (always clear) or the read-write space (valuable
431 // information).
433 void CompactingPermGenGen::clear_remembered_set() {
434 _rs->clear(MemRegion(the_space()->bottom(), the_space()->end()));
435 }
438 // Objects in this generation's main space may have moved, invalidate
439 // that space's cards. Do NOT invalidate the card table entries for the
440 // read-only or read-write spaces, as those objects never move.
442 void CompactingPermGenGen::invalidate_remembered_set() {
443 _rs->invalidate(used_region());
444 }
447 void CompactingPermGenGen::verify(bool allow_dirty) {
448 the_space()->verify(allow_dirty);
449 if (!SharedSkipVerify && spec()->enable_shared_spaces()) {
450 ro_space()->verify(allow_dirty);
451 rw_space()->verify(allow_dirty);
452 }
453 }
456 HeapWord* CompactingPermGenGen::unshared_bottom;
457 HeapWord* CompactingPermGenGen::unshared_end;
458 HeapWord* CompactingPermGenGen::shared_bottom;
459 HeapWord* CompactingPermGenGen::shared_end;
460 HeapWord* CompactingPermGenGen::readonly_bottom;
461 HeapWord* CompactingPermGenGen::readonly_end;
462 HeapWord* CompactingPermGenGen::readwrite_bottom;
463 HeapWord* CompactingPermGenGen::readwrite_end;
464 HeapWord* CompactingPermGenGen::miscdata_bottom;
465 HeapWord* CompactingPermGenGen::miscdata_end;
466 HeapWord* CompactingPermGenGen::misccode_bottom;
467 HeapWord* CompactingPermGenGen::misccode_end;
469 // JVM/TI RedefineClasses() support:
470 bool CompactingPermGenGen::remap_shared_readonly_as_readwrite() {
471 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
473 if (UseSharedSpaces) {
474 // remap the shared readonly space to shared readwrite, private
475 FileMapInfo* mapinfo = FileMapInfo::current_info();
476 if (!mapinfo->remap_shared_readonly_as_readwrite()) {
477 return false;
478 }
479 }
480 return true;
481 }
483 void** CompactingPermGenGen::_vtbl_list;