Mon, 30 Jun 2008 17:04:59 -0700
6618726: Introduce -XX:+UnlockExperimentalVMOptions flag
Summary: experimental() flags will protect features of an experimental nature that are not supported in the regular product build. Made UseG1GC an experimental flag.
Reviewed-by: jmasa, kamg, coleenp
1 /*
2 * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_virtualspace.cpp.incl"
29 // ReservedSpace
30 ReservedSpace::ReservedSpace(size_t size) {
31 initialize(size, 0, false, NULL);
32 }
34 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
35 bool large, char* requested_address) {
36 initialize(size, alignment, large, requested_address);
37 }
39 char *
40 ReservedSpace::align_reserved_region(char* addr, const size_t len,
41 const size_t prefix_size,
42 const size_t prefix_align,
43 const size_t suffix_size,
44 const size_t suffix_align)
45 {
46 assert(addr != NULL, "sanity");
47 const size_t required_size = prefix_size + suffix_size;
48 assert(len >= required_size, "len too small");
50 const size_t s = size_t(addr);
51 const size_t beg_ofs = s + prefix_size & suffix_align - 1;
52 const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
54 if (len < beg_delta + required_size) {
55 return NULL; // Cannot do proper alignment.
56 }
57 const size_t end_delta = len - (beg_delta + required_size);
59 if (beg_delta != 0) {
60 os::release_memory(addr, beg_delta);
61 }
63 if (end_delta != 0) {
64 char* release_addr = (char*) (s + beg_delta + required_size);
65 os::release_memory(release_addr, end_delta);
66 }
68 return (char*) (s + beg_delta);
69 }
71 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
72 const size_t prefix_size,
73 const size_t prefix_align,
74 const size_t suffix_size,
75 const size_t suffix_align)
76 {
77 assert(reserve_size > prefix_size + suffix_size, "should not be here");
79 char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
80 if (raw_addr == NULL) return NULL;
82 char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
83 prefix_align, suffix_size,
84 suffix_align);
85 if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
86 fatal("os::release_memory failed");
87 }
89 #ifdef ASSERT
90 if (result != NULL) {
91 const size_t raw = size_t(raw_addr);
92 const size_t res = size_t(result);
93 assert(res >= raw, "alignment decreased start addr");
94 assert(res + prefix_size + suffix_size <= raw + reserve_size,
95 "alignment increased end addr");
96 assert((res & prefix_align - 1) == 0, "bad alignment of prefix");
97 assert((res + prefix_size & suffix_align - 1) == 0,
98 "bad alignment of suffix");
99 }
100 #endif
102 return result;
103 }
105 ReservedSpace::ReservedSpace(const size_t prefix_size,
106 const size_t prefix_align,
107 const size_t suffix_size,
108 const size_t suffix_align)
109 {
110 assert(prefix_size != 0, "sanity");
111 assert(prefix_align != 0, "sanity");
112 assert(suffix_size != 0, "sanity");
113 assert(suffix_align != 0, "sanity");
114 assert((prefix_size & prefix_align - 1) == 0,
115 "prefix_size not divisible by prefix_align");
116 assert((suffix_size & suffix_align - 1) == 0,
117 "suffix_size not divisible by suffix_align");
118 assert((suffix_align & prefix_align - 1) == 0,
119 "suffix_align not divisible by prefix_align");
121 // On systems where the entire region has to be reserved and committed up
122 // front, the compound alignment normally done by this method is unnecessary.
123 const bool try_reserve_special = UseLargePages &&
124 prefix_align == os::large_page_size();
125 if (!os::can_commit_large_page_memory() && try_reserve_special) {
126 initialize(prefix_size + suffix_size, prefix_align, true);
127 return;
128 }
130 _base = NULL;
131 _size = 0;
132 _alignment = 0;
133 _special = false;
135 // Optimistically try to reserve the exact size needed.
136 const size_t size = prefix_size + suffix_size;
137 char* addr = os::reserve_memory(size, NULL, prefix_align);
138 if (addr == NULL) return;
140 // Check whether the result has the needed alignment (unlikely unless
141 // prefix_align == suffix_align).
142 const size_t ofs = size_t(addr) + prefix_size & suffix_align - 1;
143 if (ofs != 0) {
144 // Wrong alignment. Release, allocate more space and do manual alignment.
145 //
146 // On most operating systems, another allocation with a somewhat larger size
147 // will return an address "close to" that of the previous allocation. The
148 // result is often the same address (if the kernel hands out virtual
149 // addresses from low to high), or an address that is offset by the increase
150 // in size. Exploit that to minimize the amount of extra space requested.
151 if (!os::release_memory(addr, size)) {
152 fatal("os::release_memory failed");
153 }
155 const size_t extra = MAX2(ofs, suffix_align - ofs);
156 addr = reserve_and_align(size + extra, prefix_size, prefix_align,
157 suffix_size, suffix_align);
158 if (addr == NULL) {
159 // Try an even larger region. If this fails, address space is exhausted.
160 addr = reserve_and_align(size + suffix_align, prefix_size,
161 prefix_align, suffix_size, suffix_align);
162 }
163 }
165 _base = addr;
166 _size = size;
167 _alignment = prefix_align;
168 }
170 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
171 char* requested_address) {
172 const size_t granularity = os::vm_allocation_granularity();
173 assert((size & granularity - 1) == 0,
174 "size not aligned to os::vm_allocation_granularity()");
175 assert((alignment & granularity - 1) == 0,
176 "alignment not aligned to os::vm_allocation_granularity()");
177 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
178 "not a power of 2");
180 _base = NULL;
181 _size = 0;
182 _special = false;
183 _alignment = 0;
184 if (size == 0) {
185 return;
186 }
188 // If OS doesn't support demand paging for large page memory, we need
189 // to use reserve_memory_special() to reserve and pin the entire region.
190 bool special = large && !os::can_commit_large_page_memory();
191 char* base = NULL;
193 if (special) {
194 // It's not hard to implement reserve_memory_special() such that it can
195 // allocate at fixed address, but there seems no use of this feature
196 // for now, so it's not implemented.
197 assert(requested_address == NULL, "not implemented");
199 base = os::reserve_memory_special(size);
201 if (base != NULL) {
202 // Check alignment constraints
203 if (alignment > 0) {
204 assert((uintptr_t) base % alignment == 0,
205 "Large pages returned a non-aligned address");
206 }
207 _special = true;
208 } else {
209 // failed; try to reserve regular memory below
210 }
211 }
213 if (base == NULL) {
214 // Optimistically assume that the OSes returns an aligned base pointer.
215 // When reserving a large address range, most OSes seem to align to at
216 // least 64K.
218 // If the memory was requested at a particular address, use
219 // os::attempt_reserve_memory_at() to avoid over mapping something
220 // important. If available space is not detected, return NULL.
222 if (requested_address != 0) {
223 base = os::attempt_reserve_memory_at(size, requested_address);
224 } else {
225 base = os::reserve_memory(size, NULL, alignment);
226 }
228 if (base == NULL) return;
230 // Check alignment constraints
231 if (alignment > 0 && ((size_t)base & alignment - 1) != 0) {
232 // Base not aligned, retry
233 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
234 // Reserve size large enough to do manual alignment and
235 // increase size to a multiple of the desired alignment
236 size = align_size_up(size, alignment);
237 size_t extra_size = size + alignment;
238 do {
239 char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
240 if (extra_base == NULL) return;
241 // Do manual alignement
242 base = (char*) align_size_up((uintptr_t) extra_base, alignment);
243 assert(base >= extra_base, "just checking");
244 // Re-reserve the region at the aligned base address.
245 os::release_memory(extra_base, extra_size);
246 base = os::reserve_memory(size, base);
247 } while (base == NULL);
248 }
249 }
250 // Done
251 _base = base;
252 _size = size;
253 _alignment = MAX2(alignment, (size_t) os::vm_page_size());
255 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
256 "area must be distinguisable from marks for mark-sweep");
257 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
258 "area must be distinguisable from marks for mark-sweep");
259 }
262 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
263 bool special) {
264 assert((size % os::vm_allocation_granularity()) == 0,
265 "size not allocation aligned");
266 _base = base;
267 _size = size;
268 _alignment = alignment;
269 _special = special;
270 }
273 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
274 bool split, bool realloc) {
275 assert(partition_size <= size(), "partition failed");
276 if (split) {
277 os::split_reserved_memory(_base, _size, partition_size, realloc);
278 }
279 ReservedSpace result(base(), partition_size, alignment, special());
280 return result;
281 }
284 ReservedSpace
285 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
286 assert(partition_size <= size(), "partition failed");
287 ReservedSpace result(base() + partition_size, size() - partition_size,
288 alignment, special());
289 return result;
290 }
293 size_t ReservedSpace::page_align_size_up(size_t size) {
294 return align_size_up(size, os::vm_page_size());
295 }
298 size_t ReservedSpace::page_align_size_down(size_t size) {
299 return align_size_down(size, os::vm_page_size());
300 }
303 size_t ReservedSpace::allocation_align_size_up(size_t size) {
304 return align_size_up(size, os::vm_allocation_granularity());
305 }
308 size_t ReservedSpace::allocation_align_size_down(size_t size) {
309 return align_size_down(size, os::vm_allocation_granularity());
310 }
313 void ReservedSpace::release() {
314 if (is_reserved()) {
315 if (special()) {
316 os::release_memory_special(_base, _size);
317 } else{
318 os::release_memory(_base, _size);
319 }
320 _base = NULL;
321 _size = 0;
322 _special = false;
323 }
324 }
327 // VirtualSpace
329 VirtualSpace::VirtualSpace() {
330 _low_boundary = NULL;
331 _high_boundary = NULL;
332 _low = NULL;
333 _high = NULL;
334 _lower_high = NULL;
335 _middle_high = NULL;
336 _upper_high = NULL;
337 _lower_high_boundary = NULL;
338 _middle_high_boundary = NULL;
339 _upper_high_boundary = NULL;
340 _lower_alignment = 0;
341 _middle_alignment = 0;
342 _upper_alignment = 0;
343 }
346 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
347 if(!rs.is_reserved()) return false; // allocation failed.
348 assert(_low_boundary == NULL, "VirtualSpace already initialized");
349 _low_boundary = rs.base();
350 _high_boundary = low_boundary() + rs.size();
352 _low = low_boundary();
353 _high = low();
355 _special = rs.special();
357 // When a VirtualSpace begins life at a large size, make all future expansion
358 // and shrinking occur aligned to a granularity of large pages. This avoids
359 // fragmentation of physical addresses that inhibits the use of large pages
360 // by the OS virtual memory system. Empirically, we see that with a 4MB
361 // page size, the only spaces that get handled this way are codecache and
362 // the heap itself, both of which provide a substantial performance
363 // boost in many benchmarks when covered by large pages.
364 //
365 // No attempt is made to force large page alignment at the very top and
366 // bottom of the space if they are not aligned so already.
367 _lower_alignment = os::vm_page_size();
368 _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
369 _upper_alignment = os::vm_page_size();
371 // End of each region
372 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
373 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
374 _upper_high_boundary = high_boundary();
376 // High address of each region
377 _lower_high = low_boundary();
378 _middle_high = lower_high_boundary();
379 _upper_high = middle_high_boundary();
381 // commit to initial size
382 if (committed_size > 0) {
383 if (!expand_by(committed_size)) {
384 return false;
385 }
386 }
387 return true;
388 }
391 VirtualSpace::~VirtualSpace() {
392 release();
393 }
396 void VirtualSpace::release() {
397 (void)os::release_memory(low_boundary(), reserved_size());
398 _low_boundary = NULL;
399 _high_boundary = NULL;
400 _low = NULL;
401 _high = NULL;
402 _lower_high = NULL;
403 _middle_high = NULL;
404 _upper_high = NULL;
405 _lower_high_boundary = NULL;
406 _middle_high_boundary = NULL;
407 _upper_high_boundary = NULL;
408 _lower_alignment = 0;
409 _middle_alignment = 0;
410 _upper_alignment = 0;
411 _special = false;
412 }
415 size_t VirtualSpace::committed_size() const {
416 return pointer_delta(high(), low(), sizeof(char));
417 }
420 size_t VirtualSpace::reserved_size() const {
421 return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
422 }
425 size_t VirtualSpace::uncommitted_size() const {
426 return reserved_size() - committed_size();
427 }
430 bool VirtualSpace::contains(const void* p) const {
431 return low() <= (const char*) p && (const char*) p < high();
432 }
434 /*
435 First we need to determine if a particular virtual space is using large
436 pages. This is done at the initialize function and only virtual spaces
437 that are larger than LargePageSizeInBytes use large pages. Once we
438 have determined this, all expand_by and shrink_by calls must grow and
439 shrink by large page size chunks. If a particular request
440 is within the current large page, the call to commit and uncommit memory
441 can be ignored. In the case that the low and high boundaries of this
442 space is not large page aligned, the pages leading to the first large
443 page address and the pages after the last large page address must be
444 allocated with default pages.
445 */
446 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
447 if (uncommitted_size() < bytes) return false;
449 if (special()) {
450 // don't commit memory if the entire space is pinned in memory
451 _high += bytes;
452 return true;
453 }
455 char* previous_high = high();
456 char* unaligned_new_high = high() + bytes;
457 assert(unaligned_new_high <= high_boundary(),
458 "cannot expand by more than upper boundary");
460 // Calculate where the new high for each of the regions should be. If
461 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
462 // then the unaligned lower and upper new highs would be the
463 // lower_high() and upper_high() respectively.
464 char* unaligned_lower_new_high =
465 MIN2(unaligned_new_high, lower_high_boundary());
466 char* unaligned_middle_new_high =
467 MIN2(unaligned_new_high, middle_high_boundary());
468 char* unaligned_upper_new_high =
469 MIN2(unaligned_new_high, upper_high_boundary());
471 // Align the new highs based on the regions alignment. lower and upper
472 // alignment will always be default page size. middle alignment will be
473 // LargePageSizeInBytes if the actual size of the virtual space is in
474 // fact larger than LargePageSizeInBytes.
475 char* aligned_lower_new_high =
476 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
477 char* aligned_middle_new_high =
478 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
479 char* aligned_upper_new_high =
480 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
482 // Determine which regions need to grow in this expand_by call.
483 // If you are growing in the lower region, high() must be in that
484 // region so calcuate the size based on high(). For the middle and
485 // upper regions, determine the starting point of growth based on the
486 // location of high(). By getting the MAX of the region's low address
487 // (or the prevoius region's high address) and high(), we can tell if it
488 // is an intra or inter region growth.
489 size_t lower_needs = 0;
490 if (aligned_lower_new_high > lower_high()) {
491 lower_needs =
492 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
493 }
494 size_t middle_needs = 0;
495 if (aligned_middle_new_high > middle_high()) {
496 middle_needs =
497 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
498 }
499 size_t upper_needs = 0;
500 if (aligned_upper_new_high > upper_high()) {
501 upper_needs =
502 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
503 }
505 // Check contiguity.
506 assert(low_boundary() <= lower_high() &&
507 lower_high() <= lower_high_boundary(),
508 "high address must be contained within the region");
509 assert(lower_high_boundary() <= middle_high() &&
510 middle_high() <= middle_high_boundary(),
511 "high address must be contained within the region");
512 assert(middle_high_boundary() <= upper_high() &&
513 upper_high() <= upper_high_boundary(),
514 "high address must be contained within the region");
516 // Commit regions
517 if (lower_needs > 0) {
518 assert(low_boundary() <= lower_high() &&
519 lower_high() + lower_needs <= lower_high_boundary(),
520 "must not expand beyond region");
521 if (!os::commit_memory(lower_high(), lower_needs)) {
522 debug_only(warning("os::commit_memory failed"));
523 return false;
524 } else {
525 _lower_high += lower_needs;
526 }
527 }
528 if (middle_needs > 0) {
529 assert(lower_high_boundary() <= middle_high() &&
530 middle_high() + middle_needs <= middle_high_boundary(),
531 "must not expand beyond region");
532 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment())) {
533 debug_only(warning("os::commit_memory failed"));
534 return false;
535 }
536 _middle_high += middle_needs;
537 }
538 if (upper_needs > 0) {
539 assert(middle_high_boundary() <= upper_high() &&
540 upper_high() + upper_needs <= upper_high_boundary(),
541 "must not expand beyond region");
542 if (!os::commit_memory(upper_high(), upper_needs)) {
543 debug_only(warning("os::commit_memory failed"));
544 return false;
545 } else {
546 _upper_high += upper_needs;
547 }
548 }
550 if (pre_touch || AlwaysPreTouch) {
551 int vm_ps = os::vm_page_size();
552 for (char* curr = previous_high;
553 curr < unaligned_new_high;
554 curr += vm_ps) {
555 // Note the use of a write here; originally we tried just a read, but
556 // since the value read was unused, the optimizer removed the read.
557 // If we ever have a concurrent touchahead thread, we'll want to use
558 // a read, to avoid the potential of overwriting data (if a mutator
559 // thread beats the touchahead thread to a page). There are various
560 // ways of making sure this read is not optimized away: for example,
561 // generating the code for a read procedure at runtime.
562 *curr = 0;
563 }
564 }
566 _high += bytes;
567 return true;
568 }
570 // A page is uncommitted if the contents of the entire page is deemed unusable.
571 // Continue to decrement the high() pointer until it reaches a page boundary
572 // in which case that particular page can now be uncommitted.
573 void VirtualSpace::shrink_by(size_t size) {
574 if (committed_size() < size)
575 fatal("Cannot shrink virtual space to negative size");
577 if (special()) {
578 // don't uncommit if the entire space is pinned in memory
579 _high -= size;
580 return;
581 }
583 char* unaligned_new_high = high() - size;
584 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
586 // Calculate new unaligned address
587 char* unaligned_upper_new_high =
588 MAX2(unaligned_new_high, middle_high_boundary());
589 char* unaligned_middle_new_high =
590 MAX2(unaligned_new_high, lower_high_boundary());
591 char* unaligned_lower_new_high =
592 MAX2(unaligned_new_high, low_boundary());
594 // Align address to region's alignment
595 char* aligned_upper_new_high =
596 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
597 char* aligned_middle_new_high =
598 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
599 char* aligned_lower_new_high =
600 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
602 // Determine which regions need to shrink
603 size_t upper_needs = 0;
604 if (aligned_upper_new_high < upper_high()) {
605 upper_needs =
606 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
607 }
608 size_t middle_needs = 0;
609 if (aligned_middle_new_high < middle_high()) {
610 middle_needs =
611 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
612 }
613 size_t lower_needs = 0;
614 if (aligned_lower_new_high < lower_high()) {
615 lower_needs =
616 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
617 }
619 // Check contiguity.
620 assert(middle_high_boundary() <= upper_high() &&
621 upper_high() <= upper_high_boundary(),
622 "high address must be contained within the region");
623 assert(lower_high_boundary() <= middle_high() &&
624 middle_high() <= middle_high_boundary(),
625 "high address must be contained within the region");
626 assert(low_boundary() <= lower_high() &&
627 lower_high() <= lower_high_boundary(),
628 "high address must be contained within the region");
630 // Uncommit
631 if (upper_needs > 0) {
632 assert(middle_high_boundary() <= aligned_upper_new_high &&
633 aligned_upper_new_high + upper_needs <= upper_high_boundary(),
634 "must not shrink beyond region");
635 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
636 debug_only(warning("os::uncommit_memory failed"));
637 return;
638 } else {
639 _upper_high -= upper_needs;
640 }
641 }
642 if (middle_needs > 0) {
643 assert(lower_high_boundary() <= aligned_middle_new_high &&
644 aligned_middle_new_high + middle_needs <= middle_high_boundary(),
645 "must not shrink beyond region");
646 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
647 debug_only(warning("os::uncommit_memory failed"));
648 return;
649 } else {
650 _middle_high -= middle_needs;
651 }
652 }
653 if (lower_needs > 0) {
654 assert(low_boundary() <= aligned_lower_new_high &&
655 aligned_lower_new_high + lower_needs <= lower_high_boundary(),
656 "must not shrink beyond region");
657 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
658 debug_only(warning("os::uncommit_memory failed"));
659 return;
660 } else {
661 _lower_high -= lower_needs;
662 }
663 }
665 _high -= size;
666 }
668 #ifndef PRODUCT
669 void VirtualSpace::check_for_contiguity() {
670 // Check contiguity.
671 assert(low_boundary() <= lower_high() &&
672 lower_high() <= lower_high_boundary(),
673 "high address must be contained within the region");
674 assert(lower_high_boundary() <= middle_high() &&
675 middle_high() <= middle_high_boundary(),
676 "high address must be contained within the region");
677 assert(middle_high_boundary() <= upper_high() &&
678 upper_high() <= upper_high_boundary(),
679 "high address must be contained within the region");
680 assert(low() >= low_boundary(), "low");
681 assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
682 assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
683 assert(high() <= upper_high(), "upper high");
684 }
686 void VirtualSpace::print() {
687 tty->print ("Virtual space:");
688 if (special()) tty->print(" (pinned in memory)");
689 tty->cr();
690 tty->print_cr(" - committed: %ld", committed_size());
691 tty->print_cr(" - reserved: %ld", reserved_size());
692 tty->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
693 tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
694 }
696 #endif