Sat, 19 Jul 2008 17:38:22 -0400
6716785: implicit null checks not triggering with CompressedOops
Summary: allocate alignment-sized page(s) below java heap so that memory accesses at heap_base+1page give signal and cause an implicit null check
Reviewed-by: kvn, jmasa, phh, jcoomes
1 /*
2 * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_virtualspace.cpp.incl"
29 // ReservedSpace
30 ReservedSpace::ReservedSpace(size_t size) {
31 initialize(size, 0, false, NULL, 0);
32 }
34 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
35 bool large,
36 char* requested_address,
37 const size_t noaccess_prefix) {
38 initialize(size+noaccess_prefix, alignment, large, requested_address,
39 noaccess_prefix);
40 }
42 char *
43 ReservedSpace::align_reserved_region(char* addr, const size_t len,
44 const size_t prefix_size,
45 const size_t prefix_align,
46 const size_t suffix_size,
47 const size_t suffix_align)
48 {
49 assert(addr != NULL, "sanity");
50 const size_t required_size = prefix_size + suffix_size;
51 assert(len >= required_size, "len too small");
53 const size_t s = size_t(addr);
54 const size_t beg_ofs = s + prefix_size & suffix_align - 1;
55 const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
57 if (len < beg_delta + required_size) {
58 return NULL; // Cannot do proper alignment.
59 }
60 const size_t end_delta = len - (beg_delta + required_size);
62 if (beg_delta != 0) {
63 os::release_memory(addr, beg_delta);
64 }
66 if (end_delta != 0) {
67 char* release_addr = (char*) (s + beg_delta + required_size);
68 os::release_memory(release_addr, end_delta);
69 }
71 return (char*) (s + beg_delta);
72 }
74 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
75 const size_t prefix_size,
76 const size_t prefix_align,
77 const size_t suffix_size,
78 const size_t suffix_align)
79 {
80 assert(reserve_size > prefix_size + suffix_size, "should not be here");
82 char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
83 if (raw_addr == NULL) return NULL;
85 char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
86 prefix_align, suffix_size,
87 suffix_align);
88 if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
89 fatal("os::release_memory failed");
90 }
92 #ifdef ASSERT
93 if (result != NULL) {
94 const size_t raw = size_t(raw_addr);
95 const size_t res = size_t(result);
96 assert(res >= raw, "alignment decreased start addr");
97 assert(res + prefix_size + suffix_size <= raw + reserve_size,
98 "alignment increased end addr");
99 assert((res & prefix_align - 1) == 0, "bad alignment of prefix");
100 assert((res + prefix_size & suffix_align - 1) == 0,
101 "bad alignment of suffix");
102 }
103 #endif
105 return result;
106 }
108 ReservedSpace::ReservedSpace(const size_t prefix_size,
109 const size_t prefix_align,
110 const size_t suffix_size,
111 const size_t suffix_align,
112 const size_t noaccess_prefix)
113 {
114 assert(prefix_size != 0, "sanity");
115 assert(prefix_align != 0, "sanity");
116 assert(suffix_size != 0, "sanity");
117 assert(suffix_align != 0, "sanity");
118 assert((prefix_size & prefix_align - 1) == 0,
119 "prefix_size not divisible by prefix_align");
120 assert((suffix_size & suffix_align - 1) == 0,
121 "suffix_size not divisible by suffix_align");
122 assert((suffix_align & prefix_align - 1) == 0,
123 "suffix_align not divisible by prefix_align");
125 // Add in noaccess_prefix to prefix_size;
126 const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
127 const size_t size = adjusted_prefix_size + suffix_size;
129 // On systems where the entire region has to be reserved and committed up
130 // front, the compound alignment normally done by this method is unnecessary.
131 const bool try_reserve_special = UseLargePages &&
132 prefix_align == os::large_page_size();
133 if (!os::can_commit_large_page_memory() && try_reserve_special) {
134 initialize(size, prefix_align, true, NULL, noaccess_prefix);
135 return;
136 }
138 _base = NULL;
139 _size = 0;
140 _alignment = 0;
141 _special = false;
142 _noaccess_prefix = 0;
144 // Assert that if noaccess_prefix is used, it is the same as prefix_align.
145 assert(noaccess_prefix == 0 ||
146 noaccess_prefix == prefix_align, "noaccess prefix wrong");
148 // Optimistically try to reserve the exact size needed.
149 char* addr = os::reserve_memory(size, NULL, prefix_align);
150 if (addr == NULL) return;
152 // Check whether the result has the needed alignment (unlikely unless
153 // prefix_align == suffix_align).
154 const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1;
155 if (ofs != 0) {
156 // Wrong alignment. Release, allocate more space and do manual alignment.
157 //
158 // On most operating systems, another allocation with a somewhat larger size
159 // will return an address "close to" that of the previous allocation. The
160 // result is often the same address (if the kernel hands out virtual
161 // addresses from low to high), or an address that is offset by the increase
162 // in size. Exploit that to minimize the amount of extra space requested.
163 if (!os::release_memory(addr, size)) {
164 fatal("os::release_memory failed");
165 }
167 const size_t extra = MAX2(ofs, suffix_align - ofs);
168 addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
169 suffix_size, suffix_align);
170 if (addr == NULL) {
171 // Try an even larger region. If this fails, address space is exhausted.
172 addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
173 prefix_align, suffix_size, suffix_align);
174 }
175 }
177 _base = addr;
178 _size = size;
179 _alignment = prefix_align;
180 _noaccess_prefix = noaccess_prefix;
181 }
183 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
184 char* requested_address,
185 const size_t noaccess_prefix) {
186 const size_t granularity = os::vm_allocation_granularity();
187 assert((size & granularity - 1) == 0,
188 "size not aligned to os::vm_allocation_granularity()");
189 assert((alignment & granularity - 1) == 0,
190 "alignment not aligned to os::vm_allocation_granularity()");
191 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
192 "not a power of 2");
194 _base = NULL;
195 _size = 0;
196 _special = false;
197 _alignment = 0;
198 _noaccess_prefix = 0;
199 if (size == 0) {
200 return;
201 }
203 // If OS doesn't support demand paging for large page memory, we need
204 // to use reserve_memory_special() to reserve and pin the entire region.
205 bool special = large && !os::can_commit_large_page_memory();
206 char* base = NULL;
208 if (special) {
209 // It's not hard to implement reserve_memory_special() such that it can
210 // allocate at fixed address, but there seems no use of this feature
211 // for now, so it's not implemented.
212 assert(requested_address == NULL, "not implemented");
214 base = os::reserve_memory_special(size);
216 if (base != NULL) {
217 // Check alignment constraints
218 if (alignment > 0) {
219 assert((uintptr_t) base % alignment == 0,
220 "Large pages returned a non-aligned address");
221 }
222 _special = true;
223 } else {
224 // failed; try to reserve regular memory below
225 }
226 }
228 if (base == NULL) {
229 // Optimistically assume that the OSes returns an aligned base pointer.
230 // When reserving a large address range, most OSes seem to align to at
231 // least 64K.
233 // If the memory was requested at a particular address, use
234 // os::attempt_reserve_memory_at() to avoid over mapping something
235 // important. If available space is not detected, return NULL.
237 if (requested_address != 0) {
238 base = os::attempt_reserve_memory_at(size,
239 requested_address-noaccess_prefix);
240 } else {
241 base = os::reserve_memory(size, NULL, alignment);
242 }
244 if (base == NULL) return;
246 // Check alignment constraints
247 if (alignment > 0 && ((size_t)base & alignment - 1) != 0) {
248 // Base not aligned, retry
249 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
250 // Reserve size large enough to do manual alignment and
251 // increase size to a multiple of the desired alignment
252 size = align_size_up(size, alignment);
253 size_t extra_size = size + alignment;
254 char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
255 if (extra_base == NULL) return;
256 // Do manual alignement
257 base = (char*) align_size_up((uintptr_t) extra_base, alignment);
258 assert(base >= extra_base, "just checking");
259 // Release unused areas
260 size_t unused_bottom_size = base - extra_base;
261 size_t unused_top_size = extra_size - size - unused_bottom_size;
262 assert(unused_bottom_size % os::vm_allocation_granularity() == 0,
263 "size not allocation aligned");
264 assert(unused_top_size % os::vm_allocation_granularity() == 0,
265 "size not allocation aligned");
266 if (unused_bottom_size > 0) {
267 os::release_memory(extra_base, unused_bottom_size);
268 }
269 if (unused_top_size > 0) {
270 os::release_memory(base + size, unused_top_size);
271 }
272 }
273 }
274 // Done
275 _base = base;
276 _size = size;
277 _alignment = MAX2(alignment, (size_t) os::vm_page_size());
278 _noaccess_prefix = noaccess_prefix;
280 // Assert that if noaccess_prefix is used, it is the same as alignment.
281 assert(noaccess_prefix == 0 ||
282 noaccess_prefix == _alignment, "noaccess prefix wrong");
284 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
285 "area must be distinguisable from marks for mark-sweep");
286 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
287 "area must be distinguisable from marks for mark-sweep");
288 }
291 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
292 bool special) {
293 assert((size % os::vm_allocation_granularity()) == 0,
294 "size not allocation aligned");
295 _base = base;
296 _size = size;
297 _alignment = alignment;
298 _noaccess_prefix = 0;
299 _special = special;
300 }
303 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
304 bool split, bool realloc) {
305 assert(partition_size <= size(), "partition failed");
306 if (split) {
307 os::split_reserved_memory(_base, _size, partition_size, realloc);
308 }
309 ReservedSpace result(base(), partition_size, alignment, special());
310 return result;
311 }
314 ReservedSpace
315 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
316 assert(partition_size <= size(), "partition failed");
317 ReservedSpace result(base() + partition_size, size() - partition_size,
318 alignment, special());
319 return result;
320 }
323 size_t ReservedSpace::page_align_size_up(size_t size) {
324 return align_size_up(size, os::vm_page_size());
325 }
328 size_t ReservedSpace::page_align_size_down(size_t size) {
329 return align_size_down(size, os::vm_page_size());
330 }
333 size_t ReservedSpace::allocation_align_size_up(size_t size) {
334 return align_size_up(size, os::vm_allocation_granularity());
335 }
338 size_t ReservedSpace::allocation_align_size_down(size_t size) {
339 return align_size_down(size, os::vm_allocation_granularity());
340 }
343 void ReservedSpace::release() {
344 if (is_reserved()) {
345 char *real_base = _base - _noaccess_prefix;
346 const size_t real_size = _size + _noaccess_prefix;
347 if (special()) {
348 os::release_memory_special(real_base, real_size);
349 } else{
350 os::release_memory(real_base, real_size);
351 }
352 _base = NULL;
353 _size = 0;
354 _noaccess_prefix = 0;
355 _special = false;
356 }
357 }
359 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
360 // If there is noaccess prefix, return.
361 if (_noaccess_prefix == 0) return;
363 assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
364 "must be at least page size big");
366 // Protect memory at the base of the allocated region.
367 // If special, the page was committed (only matters on windows)
368 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
369 _special)) {
370 fatal("cannot protect protection page");
371 }
373 _base += _noaccess_prefix;
374 _size -= _noaccess_prefix;
375 assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
376 "must be exactly of required size and alignment");
377 }
379 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
380 bool large, char* requested_address) :
381 ReservedSpace(size, alignment, large,
382 requested_address,
383 UseCompressedOops ? lcm(os::vm_page_size(), alignment) : 0) {
384 // Only reserved space for the java heap should have a noaccess_prefix
385 // if using compressed oops.
386 protect_noaccess_prefix(size);
387 }
389 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
390 const size_t prefix_align,
391 const size_t suffix_size,
392 const size_t suffix_align) :
393 ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
394 UseCompressedOops ? lcm(os::vm_page_size(), prefix_align) : 0) {
395 protect_noaccess_prefix(prefix_size+suffix_size);
396 }
398 // VirtualSpace
400 VirtualSpace::VirtualSpace() {
401 _low_boundary = NULL;
402 _high_boundary = NULL;
403 _low = NULL;
404 _high = NULL;
405 _lower_high = NULL;
406 _middle_high = NULL;
407 _upper_high = NULL;
408 _lower_high_boundary = NULL;
409 _middle_high_boundary = NULL;
410 _upper_high_boundary = NULL;
411 _lower_alignment = 0;
412 _middle_alignment = 0;
413 _upper_alignment = 0;
414 _special = false;
415 }
418 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
419 if(!rs.is_reserved()) return false; // allocation failed.
420 assert(_low_boundary == NULL, "VirtualSpace already initialized");
421 _low_boundary = rs.base();
422 _high_boundary = low_boundary() + rs.size();
424 _low = low_boundary();
425 _high = low();
427 _special = rs.special();
429 // When a VirtualSpace begins life at a large size, make all future expansion
430 // and shrinking occur aligned to a granularity of large pages. This avoids
431 // fragmentation of physical addresses that inhibits the use of large pages
432 // by the OS virtual memory system. Empirically, we see that with a 4MB
433 // page size, the only spaces that get handled this way are codecache and
434 // the heap itself, both of which provide a substantial performance
435 // boost in many benchmarks when covered by large pages.
436 //
437 // No attempt is made to force large page alignment at the very top and
438 // bottom of the space if they are not aligned so already.
439 _lower_alignment = os::vm_page_size();
440 _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
441 _upper_alignment = os::vm_page_size();
443 // End of each region
444 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
445 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
446 _upper_high_boundary = high_boundary();
448 // High address of each region
449 _lower_high = low_boundary();
450 _middle_high = lower_high_boundary();
451 _upper_high = middle_high_boundary();
453 // commit to initial size
454 if (committed_size > 0) {
455 if (!expand_by(committed_size)) {
456 return false;
457 }
458 }
459 return true;
460 }
463 VirtualSpace::~VirtualSpace() {
464 release();
465 }
468 void VirtualSpace::release() {
469 // This does not release memory it never reserved.
470 // Caller must release via rs.release();
471 _low_boundary = NULL;
472 _high_boundary = NULL;
473 _low = NULL;
474 _high = NULL;
475 _lower_high = NULL;
476 _middle_high = NULL;
477 _upper_high = NULL;
478 _lower_high_boundary = NULL;
479 _middle_high_boundary = NULL;
480 _upper_high_boundary = NULL;
481 _lower_alignment = 0;
482 _middle_alignment = 0;
483 _upper_alignment = 0;
484 _special = false;
485 }
488 size_t VirtualSpace::committed_size() const {
489 return pointer_delta(high(), low(), sizeof(char));
490 }
493 size_t VirtualSpace::reserved_size() const {
494 return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
495 }
498 size_t VirtualSpace::uncommitted_size() const {
499 return reserved_size() - committed_size();
500 }
503 bool VirtualSpace::contains(const void* p) const {
504 return low() <= (const char*) p && (const char*) p < high();
505 }
507 /*
508 First we need to determine if a particular virtual space is using large
509 pages. This is done at the initialize function and only virtual spaces
510 that are larger than LargePageSizeInBytes use large pages. Once we
511 have determined this, all expand_by and shrink_by calls must grow and
512 shrink by large page size chunks. If a particular request
513 is within the current large page, the call to commit and uncommit memory
514 can be ignored. In the case that the low and high boundaries of this
515 space is not large page aligned, the pages leading to the first large
516 page address and the pages after the last large page address must be
517 allocated with default pages.
518 */
519 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
520 if (uncommitted_size() < bytes) return false;
522 if (special()) {
523 // don't commit memory if the entire space is pinned in memory
524 _high += bytes;
525 return true;
526 }
528 char* previous_high = high();
529 char* unaligned_new_high = high() + bytes;
530 assert(unaligned_new_high <= high_boundary(),
531 "cannot expand by more than upper boundary");
533 // Calculate where the new high for each of the regions should be. If
534 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
535 // then the unaligned lower and upper new highs would be the
536 // lower_high() and upper_high() respectively.
537 char* unaligned_lower_new_high =
538 MIN2(unaligned_new_high, lower_high_boundary());
539 char* unaligned_middle_new_high =
540 MIN2(unaligned_new_high, middle_high_boundary());
541 char* unaligned_upper_new_high =
542 MIN2(unaligned_new_high, upper_high_boundary());
544 // Align the new highs based on the regions alignment. lower and upper
545 // alignment will always be default page size. middle alignment will be
546 // LargePageSizeInBytes if the actual size of the virtual space is in
547 // fact larger than LargePageSizeInBytes.
548 char* aligned_lower_new_high =
549 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
550 char* aligned_middle_new_high =
551 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
552 char* aligned_upper_new_high =
553 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
555 // Determine which regions need to grow in this expand_by call.
556 // If you are growing in the lower region, high() must be in that
557 // region so calcuate the size based on high(). For the middle and
558 // upper regions, determine the starting point of growth based on the
559 // location of high(). By getting the MAX of the region's low address
560 // (or the prevoius region's high address) and high(), we can tell if it
561 // is an intra or inter region growth.
562 size_t lower_needs = 0;
563 if (aligned_lower_new_high > lower_high()) {
564 lower_needs =
565 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
566 }
567 size_t middle_needs = 0;
568 if (aligned_middle_new_high > middle_high()) {
569 middle_needs =
570 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
571 }
572 size_t upper_needs = 0;
573 if (aligned_upper_new_high > upper_high()) {
574 upper_needs =
575 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
576 }
578 // Check contiguity.
579 assert(low_boundary() <= lower_high() &&
580 lower_high() <= lower_high_boundary(),
581 "high address must be contained within the region");
582 assert(lower_high_boundary() <= middle_high() &&
583 middle_high() <= middle_high_boundary(),
584 "high address must be contained within the region");
585 assert(middle_high_boundary() <= upper_high() &&
586 upper_high() <= upper_high_boundary(),
587 "high address must be contained within the region");
589 // Commit regions
590 if (lower_needs > 0) {
591 assert(low_boundary() <= lower_high() &&
592 lower_high() + lower_needs <= lower_high_boundary(),
593 "must not expand beyond region");
594 if (!os::commit_memory(lower_high(), lower_needs)) {
595 debug_only(warning("os::commit_memory failed"));
596 return false;
597 } else {
598 _lower_high += lower_needs;
599 }
600 }
601 if (middle_needs > 0) {
602 assert(lower_high_boundary() <= middle_high() &&
603 middle_high() + middle_needs <= middle_high_boundary(),
604 "must not expand beyond region");
605 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment())) {
606 debug_only(warning("os::commit_memory failed"));
607 return false;
608 }
609 _middle_high += middle_needs;
610 }
611 if (upper_needs > 0) {
612 assert(middle_high_boundary() <= upper_high() &&
613 upper_high() + upper_needs <= upper_high_boundary(),
614 "must not expand beyond region");
615 if (!os::commit_memory(upper_high(), upper_needs)) {
616 debug_only(warning("os::commit_memory failed"));
617 return false;
618 } else {
619 _upper_high += upper_needs;
620 }
621 }
623 if (pre_touch || AlwaysPreTouch) {
624 int vm_ps = os::vm_page_size();
625 for (char* curr = previous_high;
626 curr < unaligned_new_high;
627 curr += vm_ps) {
628 // Note the use of a write here; originally we tried just a read, but
629 // since the value read was unused, the optimizer removed the read.
630 // If we ever have a concurrent touchahead thread, we'll want to use
631 // a read, to avoid the potential of overwriting data (if a mutator
632 // thread beats the touchahead thread to a page). There are various
633 // ways of making sure this read is not optimized away: for example,
634 // generating the code for a read procedure at runtime.
635 *curr = 0;
636 }
637 }
639 _high += bytes;
640 return true;
641 }
643 // A page is uncommitted if the contents of the entire page is deemed unusable.
644 // Continue to decrement the high() pointer until it reaches a page boundary
645 // in which case that particular page can now be uncommitted.
646 void VirtualSpace::shrink_by(size_t size) {
647 if (committed_size() < size)
648 fatal("Cannot shrink virtual space to negative size");
650 if (special()) {
651 // don't uncommit if the entire space is pinned in memory
652 _high -= size;
653 return;
654 }
656 char* unaligned_new_high = high() - size;
657 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
659 // Calculate new unaligned address
660 char* unaligned_upper_new_high =
661 MAX2(unaligned_new_high, middle_high_boundary());
662 char* unaligned_middle_new_high =
663 MAX2(unaligned_new_high, lower_high_boundary());
664 char* unaligned_lower_new_high =
665 MAX2(unaligned_new_high, low_boundary());
667 // Align address to region's alignment
668 char* aligned_upper_new_high =
669 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
670 char* aligned_middle_new_high =
671 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
672 char* aligned_lower_new_high =
673 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
675 // Determine which regions need to shrink
676 size_t upper_needs = 0;
677 if (aligned_upper_new_high < upper_high()) {
678 upper_needs =
679 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
680 }
681 size_t middle_needs = 0;
682 if (aligned_middle_new_high < middle_high()) {
683 middle_needs =
684 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
685 }
686 size_t lower_needs = 0;
687 if (aligned_lower_new_high < lower_high()) {
688 lower_needs =
689 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
690 }
692 // Check contiguity.
693 assert(middle_high_boundary() <= upper_high() &&
694 upper_high() <= upper_high_boundary(),
695 "high address must be contained within the region");
696 assert(lower_high_boundary() <= middle_high() &&
697 middle_high() <= middle_high_boundary(),
698 "high address must be contained within the region");
699 assert(low_boundary() <= lower_high() &&
700 lower_high() <= lower_high_boundary(),
701 "high address must be contained within the region");
703 // Uncommit
704 if (upper_needs > 0) {
705 assert(middle_high_boundary() <= aligned_upper_new_high &&
706 aligned_upper_new_high + upper_needs <= upper_high_boundary(),
707 "must not shrink beyond region");
708 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
709 debug_only(warning("os::uncommit_memory failed"));
710 return;
711 } else {
712 _upper_high -= upper_needs;
713 }
714 }
715 if (middle_needs > 0) {
716 assert(lower_high_boundary() <= aligned_middle_new_high &&
717 aligned_middle_new_high + middle_needs <= middle_high_boundary(),
718 "must not shrink beyond region");
719 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
720 debug_only(warning("os::uncommit_memory failed"));
721 return;
722 } else {
723 _middle_high -= middle_needs;
724 }
725 }
726 if (lower_needs > 0) {
727 assert(low_boundary() <= aligned_lower_new_high &&
728 aligned_lower_new_high + lower_needs <= lower_high_boundary(),
729 "must not shrink beyond region");
730 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
731 debug_only(warning("os::uncommit_memory failed"));
732 return;
733 } else {
734 _lower_high -= lower_needs;
735 }
736 }
738 _high -= size;
739 }
741 #ifndef PRODUCT
742 void VirtualSpace::check_for_contiguity() {
743 // Check contiguity.
744 assert(low_boundary() <= lower_high() &&
745 lower_high() <= lower_high_boundary(),
746 "high address must be contained within the region");
747 assert(lower_high_boundary() <= middle_high() &&
748 middle_high() <= middle_high_boundary(),
749 "high address must be contained within the region");
750 assert(middle_high_boundary() <= upper_high() &&
751 upper_high() <= upper_high_boundary(),
752 "high address must be contained within the region");
753 assert(low() >= low_boundary(), "low");
754 assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
755 assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
756 assert(high() <= upper_high(), "upper high");
757 }
759 void VirtualSpace::print() {
760 tty->print ("Virtual space:");
761 if (special()) tty->print(" (pinned in memory)");
762 tty->cr();
763 tty->print_cr(" - committed: %ld", committed_size());
764 tty->print_cr(" - reserved: %ld", reserved_size());
765 tty->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
766 tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
767 }
769 #endif