Tue, 15 Mar 2011 06:35:10 -0700
7024234: 2/3 jvmti tests fail assert(!_oops_are_stale) failed: oops are stale on Win-AMD64
Summary: Move initialization of the '_instance' field to avoid race with ServiceThread start.
Reviewed-by: dholmes, kamg, never, dsamersoff, ysr, coleenp, acorn
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "oops/markOop.hpp"
27 #include "oops/oop.inline.hpp"
28 #include "runtime/virtualspace.hpp"
29 #ifdef TARGET_OS_FAMILY_linux
30 # include "os_linux.inline.hpp"
31 #endif
32 #ifdef TARGET_OS_FAMILY_solaris
33 # include "os_solaris.inline.hpp"
34 #endif
35 #ifdef TARGET_OS_FAMILY_windows
36 # include "os_windows.inline.hpp"
37 #endif
40 // ReservedSpace
41 ReservedSpace::ReservedSpace(size_t size) {
42 initialize(size, 0, false, NULL, 0, false);
43 }
45 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
46 bool large,
47 char* requested_address,
48 const size_t noaccess_prefix) {
49 initialize(size+noaccess_prefix, alignment, large, requested_address,
50 noaccess_prefix, false);
51 }
53 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
54 bool large,
55 bool executable) {
56 initialize(size, alignment, large, NULL, 0, executable);
57 }
59 char *
60 ReservedSpace::align_reserved_region(char* addr, const size_t len,
61 const size_t prefix_size,
62 const size_t prefix_align,
63 const size_t suffix_size,
64 const size_t suffix_align)
65 {
66 assert(addr != NULL, "sanity");
67 const size_t required_size = prefix_size + suffix_size;
68 assert(len >= required_size, "len too small");
70 const size_t s = size_t(addr);
71 const size_t beg_ofs = s + prefix_size & suffix_align - 1;
72 const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
74 if (len < beg_delta + required_size) {
75 return NULL; // Cannot do proper alignment.
76 }
77 const size_t end_delta = len - (beg_delta + required_size);
79 if (beg_delta != 0) {
80 os::release_memory(addr, beg_delta);
81 }
83 if (end_delta != 0) {
84 char* release_addr = (char*) (s + beg_delta + required_size);
85 os::release_memory(release_addr, end_delta);
86 }
88 return (char*) (s + beg_delta);
89 }
91 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
92 const size_t prefix_size,
93 const size_t prefix_align,
94 const size_t suffix_size,
95 const size_t suffix_align)
96 {
97 assert(reserve_size > prefix_size + suffix_size, "should not be here");
99 char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
100 if (raw_addr == NULL) return NULL;
102 char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
103 prefix_align, suffix_size,
104 suffix_align);
105 if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
106 fatal("os::release_memory failed");
107 }
109 #ifdef ASSERT
110 if (result != NULL) {
111 const size_t raw = size_t(raw_addr);
112 const size_t res = size_t(result);
113 assert(res >= raw, "alignment decreased start addr");
114 assert(res + prefix_size + suffix_size <= raw + reserve_size,
115 "alignment increased end addr");
116 assert((res & prefix_align - 1) == 0, "bad alignment of prefix");
117 assert((res + prefix_size & suffix_align - 1) == 0,
118 "bad alignment of suffix");
119 }
120 #endif
122 return result;
123 }
125 // Helper method.
126 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
127 const size_t size, bool special)
128 {
129 if (base == requested_address || requested_address == NULL)
130 return false; // did not fail
132 if (base != NULL) {
133 // Different reserve address may be acceptable in other cases
134 // but for compressed oops heap should be at requested address.
135 assert(UseCompressedOops, "currently requested address used only for compressed oops");
136 if (PrintCompressedOopsMode) {
137 tty->cr();
138 tty->print_cr("Reserved memory at not requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
139 }
140 // OS ignored requested address. Try different address.
141 if (special) {
142 if (!os::release_memory_special(base, size)) {
143 fatal("os::release_memory_special failed");
144 }
145 } else {
146 if (!os::release_memory(base, size)) {
147 fatal("os::release_memory failed");
148 }
149 }
150 }
151 return true;
152 }
154 ReservedSpace::ReservedSpace(const size_t prefix_size,
155 const size_t prefix_align,
156 const size_t suffix_size,
157 const size_t suffix_align,
158 char* requested_address,
159 const size_t noaccess_prefix)
160 {
161 assert(prefix_size != 0, "sanity");
162 assert(prefix_align != 0, "sanity");
163 assert(suffix_size != 0, "sanity");
164 assert(suffix_align != 0, "sanity");
165 assert((prefix_size & prefix_align - 1) == 0,
166 "prefix_size not divisible by prefix_align");
167 assert((suffix_size & suffix_align - 1) == 0,
168 "suffix_size not divisible by suffix_align");
169 assert((suffix_align & prefix_align - 1) == 0,
170 "suffix_align not divisible by prefix_align");
172 // Assert that if noaccess_prefix is used, it is the same as prefix_align.
173 assert(noaccess_prefix == 0 ||
174 noaccess_prefix == prefix_align, "noaccess prefix wrong");
176 // Add in noaccess_prefix to prefix_size;
177 const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
178 const size_t size = adjusted_prefix_size + suffix_size;
180 // On systems where the entire region has to be reserved and committed up
181 // front, the compound alignment normally done by this method is unnecessary.
182 const bool try_reserve_special = UseLargePages &&
183 prefix_align == os::large_page_size();
184 if (!os::can_commit_large_page_memory() && try_reserve_special) {
185 initialize(size, prefix_align, true, requested_address, noaccess_prefix,
186 false);
187 return;
188 }
190 _base = NULL;
191 _size = 0;
192 _alignment = 0;
193 _special = false;
194 _noaccess_prefix = 0;
195 _executable = false;
197 // Optimistically try to reserve the exact size needed.
198 char* addr;
199 if (requested_address != 0) {
200 requested_address -= noaccess_prefix; // adjust address
201 assert(requested_address != NULL, "huge noaccess prefix?");
202 addr = os::attempt_reserve_memory_at(size, requested_address);
203 if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
204 // OS ignored requested address. Try different address.
205 addr = NULL;
206 }
207 } else {
208 addr = os::reserve_memory(size, NULL, prefix_align);
209 }
210 if (addr == NULL) return;
212 // Check whether the result has the needed alignment (unlikely unless
213 // prefix_align == suffix_align).
214 const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1;
215 if (ofs != 0) {
216 // Wrong alignment. Release, allocate more space and do manual alignment.
217 //
218 // On most operating systems, another allocation with a somewhat larger size
219 // will return an address "close to" that of the previous allocation. The
220 // result is often the same address (if the kernel hands out virtual
221 // addresses from low to high), or an address that is offset by the increase
222 // in size. Exploit that to minimize the amount of extra space requested.
223 if (!os::release_memory(addr, size)) {
224 fatal("os::release_memory failed");
225 }
227 const size_t extra = MAX2(ofs, suffix_align - ofs);
228 addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
229 suffix_size, suffix_align);
230 if (addr == NULL) {
231 // Try an even larger region. If this fails, address space is exhausted.
232 addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
233 prefix_align, suffix_size, suffix_align);
234 }
235 }
237 _base = addr;
238 _size = size;
239 _alignment = prefix_align;
240 _noaccess_prefix = noaccess_prefix;
241 }
243 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
244 char* requested_address,
245 const size_t noaccess_prefix,
246 bool executable) {
247 const size_t granularity = os::vm_allocation_granularity();
248 assert((size & granularity - 1) == 0,
249 "size not aligned to os::vm_allocation_granularity()");
250 assert((alignment & granularity - 1) == 0,
251 "alignment not aligned to os::vm_allocation_granularity()");
252 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
253 "not a power of 2");
255 _base = NULL;
256 _size = 0;
257 _special = false;
258 _executable = executable;
259 _alignment = 0;
260 _noaccess_prefix = 0;
261 if (size == 0) {
262 return;
263 }
265 // If OS doesn't support demand paging for large page memory, we need
266 // to use reserve_memory_special() to reserve and pin the entire region.
267 bool special = large && !os::can_commit_large_page_memory();
268 char* base = NULL;
270 if (requested_address != 0) {
271 requested_address -= noaccess_prefix; // adjust requested address
272 assert(requested_address != NULL, "huge noaccess prefix?");
273 }
275 if (special) {
277 base = os::reserve_memory_special(size, requested_address, executable);
279 if (base != NULL) {
280 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
281 // OS ignored requested address. Try different address.
282 return;
283 }
284 // Check alignment constraints
285 if (alignment > 0) {
286 assert((uintptr_t) base % alignment == 0,
287 "Large pages returned a non-aligned address");
288 }
289 _special = true;
290 } else {
291 // failed; try to reserve regular memory below
292 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
293 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
294 if (PrintCompressedOopsMode) {
295 tty->cr();
296 tty->print_cr("Reserve regular memory without large pages.");
297 }
298 }
299 }
300 }
302 if (base == NULL) {
303 // Optimistically assume that the OSes returns an aligned base pointer.
304 // When reserving a large address range, most OSes seem to align to at
305 // least 64K.
307 // If the memory was requested at a particular address, use
308 // os::attempt_reserve_memory_at() to avoid over mapping something
309 // important. If available space is not detected, return NULL.
311 if (requested_address != 0) {
312 base = os::attempt_reserve_memory_at(size, requested_address);
313 if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
314 // OS ignored requested address. Try different address.
315 base = NULL;
316 }
317 } else {
318 base = os::reserve_memory(size, NULL, alignment);
319 }
321 if (base == NULL) return;
323 // Check alignment constraints
324 if (alignment > 0 && ((size_t)base & alignment - 1) != 0) {
325 // Base not aligned, retry
326 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
327 // Reserve size large enough to do manual alignment and
328 // increase size to a multiple of the desired alignment
329 size = align_size_up(size, alignment);
330 size_t extra_size = size + alignment;
331 do {
332 char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
333 if (extra_base == NULL) return;
334 // Do manual alignement
335 base = (char*) align_size_up((uintptr_t) extra_base, alignment);
336 assert(base >= extra_base, "just checking");
337 // Re-reserve the region at the aligned base address.
338 os::release_memory(extra_base, extra_size);
339 base = os::reserve_memory(size, base);
340 } while (base == NULL);
341 }
342 }
343 // Done
344 _base = base;
345 _size = size;
346 _alignment = MAX2(alignment, (size_t) os::vm_page_size());
347 _noaccess_prefix = noaccess_prefix;
349 // Assert that if noaccess_prefix is used, it is the same as alignment.
350 assert(noaccess_prefix == 0 ||
351 noaccess_prefix == _alignment, "noaccess prefix wrong");
353 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
354 "area must be distinguisable from marks for mark-sweep");
355 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
356 "area must be distinguisable from marks for mark-sweep");
357 }
360 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
361 bool special, bool executable) {
362 assert((size % os::vm_allocation_granularity()) == 0,
363 "size not allocation aligned");
364 _base = base;
365 _size = size;
366 _alignment = alignment;
367 _noaccess_prefix = 0;
368 _special = special;
369 _executable = executable;
370 }
373 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
374 bool split, bool realloc) {
375 assert(partition_size <= size(), "partition failed");
376 if (split) {
377 os::split_reserved_memory(base(), size(), partition_size, realloc);
378 }
379 ReservedSpace result(base(), partition_size, alignment, special(),
380 executable());
381 return result;
382 }
385 ReservedSpace
386 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
387 assert(partition_size <= size(), "partition failed");
388 ReservedSpace result(base() + partition_size, size() - partition_size,
389 alignment, special(), executable());
390 return result;
391 }
394 size_t ReservedSpace::page_align_size_up(size_t size) {
395 return align_size_up(size, os::vm_page_size());
396 }
399 size_t ReservedSpace::page_align_size_down(size_t size) {
400 return align_size_down(size, os::vm_page_size());
401 }
404 size_t ReservedSpace::allocation_align_size_up(size_t size) {
405 return align_size_up(size, os::vm_allocation_granularity());
406 }
409 size_t ReservedSpace::allocation_align_size_down(size_t size) {
410 return align_size_down(size, os::vm_allocation_granularity());
411 }
414 void ReservedSpace::release() {
415 if (is_reserved()) {
416 char *real_base = _base - _noaccess_prefix;
417 const size_t real_size = _size + _noaccess_prefix;
418 if (special()) {
419 os::release_memory_special(real_base, real_size);
420 } else{
421 os::release_memory(real_base, real_size);
422 }
423 _base = NULL;
424 _size = 0;
425 _noaccess_prefix = 0;
426 _special = false;
427 _executable = false;
428 }
429 }
431 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
432 assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
433 (size_t(_base + _size) > OopEncodingHeapMax) &&
434 Universe::narrow_oop_use_implicit_null_checks()),
435 "noaccess_prefix should be used only with non zero based compressed oops");
437 // If there is no noaccess prefix, return.
438 if (_noaccess_prefix == 0) return;
440 assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
441 "must be at least page size big");
443 // Protect memory at the base of the allocated region.
444 // If special, the page was committed (only matters on windows)
445 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
446 _special)) {
447 fatal("cannot protect protection page");
448 }
449 if (PrintCompressedOopsMode) {
450 tty->cr();
451 tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
452 }
454 _base += _noaccess_prefix;
455 _size -= _noaccess_prefix;
456 assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
457 "must be exactly of required size and alignment");
458 }
460 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
461 bool large, char* requested_address) :
462 ReservedSpace(size, alignment, large,
463 requested_address,
464 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
465 Universe::narrow_oop_use_implicit_null_checks()) ?
466 lcm(os::vm_page_size(), alignment) : 0) {
467 // Only reserved space for the java heap should have a noaccess_prefix
468 // if using compressed oops.
469 protect_noaccess_prefix(size);
470 }
472 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
473 const size_t prefix_align,
474 const size_t suffix_size,
475 const size_t suffix_align,
476 char* requested_address) :
477 ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
478 requested_address,
479 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
480 Universe::narrow_oop_use_implicit_null_checks()) ?
481 lcm(os::vm_page_size(), prefix_align) : 0) {
482 protect_noaccess_prefix(prefix_size+suffix_size);
483 }
485 // Reserve space for code segment. Same as Java heap only we mark this as
486 // executable.
487 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
488 size_t rs_align,
489 bool large) :
490 ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
491 }
493 // VirtualSpace
495 VirtualSpace::VirtualSpace() {
496 _low_boundary = NULL;
497 _high_boundary = NULL;
498 _low = NULL;
499 _high = NULL;
500 _lower_high = NULL;
501 _middle_high = NULL;
502 _upper_high = NULL;
503 _lower_high_boundary = NULL;
504 _middle_high_boundary = NULL;
505 _upper_high_boundary = NULL;
506 _lower_alignment = 0;
507 _middle_alignment = 0;
508 _upper_alignment = 0;
509 _special = false;
510 _executable = false;
511 }
514 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
515 if(!rs.is_reserved()) return false; // allocation failed.
516 assert(_low_boundary == NULL, "VirtualSpace already initialized");
517 _low_boundary = rs.base();
518 _high_boundary = low_boundary() + rs.size();
520 _low = low_boundary();
521 _high = low();
523 _special = rs.special();
524 _executable = rs.executable();
526 // When a VirtualSpace begins life at a large size, make all future expansion
527 // and shrinking occur aligned to a granularity of large pages. This avoids
528 // fragmentation of physical addresses that inhibits the use of large pages
529 // by the OS virtual memory system. Empirically, we see that with a 4MB
530 // page size, the only spaces that get handled this way are codecache and
531 // the heap itself, both of which provide a substantial performance
532 // boost in many benchmarks when covered by large pages.
533 //
534 // No attempt is made to force large page alignment at the very top and
535 // bottom of the space if they are not aligned so already.
536 _lower_alignment = os::vm_page_size();
537 _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
538 _upper_alignment = os::vm_page_size();
540 // End of each region
541 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
542 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
543 _upper_high_boundary = high_boundary();
545 // High address of each region
546 _lower_high = low_boundary();
547 _middle_high = lower_high_boundary();
548 _upper_high = middle_high_boundary();
550 // commit to initial size
551 if (committed_size > 0) {
552 if (!expand_by(committed_size)) {
553 return false;
554 }
555 }
556 return true;
557 }
560 VirtualSpace::~VirtualSpace() {
561 release();
562 }
565 void VirtualSpace::release() {
566 // This does not release memory it never reserved.
567 // Caller must release via rs.release();
568 _low_boundary = NULL;
569 _high_boundary = NULL;
570 _low = NULL;
571 _high = NULL;
572 _lower_high = NULL;
573 _middle_high = NULL;
574 _upper_high = NULL;
575 _lower_high_boundary = NULL;
576 _middle_high_boundary = NULL;
577 _upper_high_boundary = NULL;
578 _lower_alignment = 0;
579 _middle_alignment = 0;
580 _upper_alignment = 0;
581 _special = false;
582 _executable = false;
583 }
586 size_t VirtualSpace::committed_size() const {
587 return pointer_delta(high(), low(), sizeof(char));
588 }
591 size_t VirtualSpace::reserved_size() const {
592 return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
593 }
596 size_t VirtualSpace::uncommitted_size() const {
597 return reserved_size() - committed_size();
598 }
601 bool VirtualSpace::contains(const void* p) const {
602 return low() <= (const char*) p && (const char*) p < high();
603 }
605 /*
606 First we need to determine if a particular virtual space is using large
607 pages. This is done at the initialize function and only virtual spaces
608 that are larger than LargePageSizeInBytes use large pages. Once we
609 have determined this, all expand_by and shrink_by calls must grow and
610 shrink by large page size chunks. If a particular request
611 is within the current large page, the call to commit and uncommit memory
612 can be ignored. In the case that the low and high boundaries of this
613 space is not large page aligned, the pages leading to the first large
614 page address and the pages after the last large page address must be
615 allocated with default pages.
616 */
617 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
618 if (uncommitted_size() < bytes) return false;
620 if (special()) {
621 // don't commit memory if the entire space is pinned in memory
622 _high += bytes;
623 return true;
624 }
626 char* previous_high = high();
627 char* unaligned_new_high = high() + bytes;
628 assert(unaligned_new_high <= high_boundary(),
629 "cannot expand by more than upper boundary");
631 // Calculate where the new high for each of the regions should be. If
632 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
633 // then the unaligned lower and upper new highs would be the
634 // lower_high() and upper_high() respectively.
635 char* unaligned_lower_new_high =
636 MIN2(unaligned_new_high, lower_high_boundary());
637 char* unaligned_middle_new_high =
638 MIN2(unaligned_new_high, middle_high_boundary());
639 char* unaligned_upper_new_high =
640 MIN2(unaligned_new_high, upper_high_boundary());
642 // Align the new highs based on the regions alignment. lower and upper
643 // alignment will always be default page size. middle alignment will be
644 // LargePageSizeInBytes if the actual size of the virtual space is in
645 // fact larger than LargePageSizeInBytes.
646 char* aligned_lower_new_high =
647 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
648 char* aligned_middle_new_high =
649 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
650 char* aligned_upper_new_high =
651 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
653 // Determine which regions need to grow in this expand_by call.
654 // If you are growing in the lower region, high() must be in that
655 // region so calcuate the size based on high(). For the middle and
656 // upper regions, determine the starting point of growth based on the
657 // location of high(). By getting the MAX of the region's low address
658 // (or the prevoius region's high address) and high(), we can tell if it
659 // is an intra or inter region growth.
660 size_t lower_needs = 0;
661 if (aligned_lower_new_high > lower_high()) {
662 lower_needs =
663 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
664 }
665 size_t middle_needs = 0;
666 if (aligned_middle_new_high > middle_high()) {
667 middle_needs =
668 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
669 }
670 size_t upper_needs = 0;
671 if (aligned_upper_new_high > upper_high()) {
672 upper_needs =
673 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
674 }
676 // Check contiguity.
677 assert(low_boundary() <= lower_high() &&
678 lower_high() <= lower_high_boundary(),
679 "high address must be contained within the region");
680 assert(lower_high_boundary() <= middle_high() &&
681 middle_high() <= middle_high_boundary(),
682 "high address must be contained within the region");
683 assert(middle_high_boundary() <= upper_high() &&
684 upper_high() <= upper_high_boundary(),
685 "high address must be contained within the region");
687 // Commit regions
688 if (lower_needs > 0) {
689 assert(low_boundary() <= lower_high() &&
690 lower_high() + lower_needs <= lower_high_boundary(),
691 "must not expand beyond region");
692 if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
693 debug_only(warning("os::commit_memory failed"));
694 return false;
695 } else {
696 _lower_high += lower_needs;
697 }
698 }
699 if (middle_needs > 0) {
700 assert(lower_high_boundary() <= middle_high() &&
701 middle_high() + middle_needs <= middle_high_boundary(),
702 "must not expand beyond region");
703 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
704 _executable)) {
705 debug_only(warning("os::commit_memory failed"));
706 return false;
707 }
708 _middle_high += middle_needs;
709 }
710 if (upper_needs > 0) {
711 assert(middle_high_boundary() <= upper_high() &&
712 upper_high() + upper_needs <= upper_high_boundary(),
713 "must not expand beyond region");
714 if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
715 debug_only(warning("os::commit_memory failed"));
716 return false;
717 } else {
718 _upper_high += upper_needs;
719 }
720 }
722 if (pre_touch || AlwaysPreTouch) {
723 int vm_ps = os::vm_page_size();
724 for (char* curr = previous_high;
725 curr < unaligned_new_high;
726 curr += vm_ps) {
727 // Note the use of a write here; originally we tried just a read, but
728 // since the value read was unused, the optimizer removed the read.
729 // If we ever have a concurrent touchahead thread, we'll want to use
730 // a read, to avoid the potential of overwriting data (if a mutator
731 // thread beats the touchahead thread to a page). There are various
732 // ways of making sure this read is not optimized away: for example,
733 // generating the code for a read procedure at runtime.
734 *curr = 0;
735 }
736 }
738 _high += bytes;
739 return true;
740 }
742 // A page is uncommitted if the contents of the entire page is deemed unusable.
743 // Continue to decrement the high() pointer until it reaches a page boundary
744 // in which case that particular page can now be uncommitted.
745 void VirtualSpace::shrink_by(size_t size) {
746 if (committed_size() < size)
747 fatal("Cannot shrink virtual space to negative size");
749 if (special()) {
750 // don't uncommit if the entire space is pinned in memory
751 _high -= size;
752 return;
753 }
755 char* unaligned_new_high = high() - size;
756 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
758 // Calculate new unaligned address
759 char* unaligned_upper_new_high =
760 MAX2(unaligned_new_high, middle_high_boundary());
761 char* unaligned_middle_new_high =
762 MAX2(unaligned_new_high, lower_high_boundary());
763 char* unaligned_lower_new_high =
764 MAX2(unaligned_new_high, low_boundary());
766 // Align address to region's alignment
767 char* aligned_upper_new_high =
768 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
769 char* aligned_middle_new_high =
770 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
771 char* aligned_lower_new_high =
772 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
774 // Determine which regions need to shrink
775 size_t upper_needs = 0;
776 if (aligned_upper_new_high < upper_high()) {
777 upper_needs =
778 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
779 }
780 size_t middle_needs = 0;
781 if (aligned_middle_new_high < middle_high()) {
782 middle_needs =
783 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
784 }
785 size_t lower_needs = 0;
786 if (aligned_lower_new_high < lower_high()) {
787 lower_needs =
788 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
789 }
791 // Check contiguity.
792 assert(middle_high_boundary() <= upper_high() &&
793 upper_high() <= upper_high_boundary(),
794 "high address must be contained within the region");
795 assert(lower_high_boundary() <= middle_high() &&
796 middle_high() <= middle_high_boundary(),
797 "high address must be contained within the region");
798 assert(low_boundary() <= lower_high() &&
799 lower_high() <= lower_high_boundary(),
800 "high address must be contained within the region");
802 // Uncommit
803 if (upper_needs > 0) {
804 assert(middle_high_boundary() <= aligned_upper_new_high &&
805 aligned_upper_new_high + upper_needs <= upper_high_boundary(),
806 "must not shrink beyond region");
807 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
808 debug_only(warning("os::uncommit_memory failed"));
809 return;
810 } else {
811 _upper_high -= upper_needs;
812 }
813 }
814 if (middle_needs > 0) {
815 assert(lower_high_boundary() <= aligned_middle_new_high &&
816 aligned_middle_new_high + middle_needs <= middle_high_boundary(),
817 "must not shrink beyond region");
818 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
819 debug_only(warning("os::uncommit_memory failed"));
820 return;
821 } else {
822 _middle_high -= middle_needs;
823 }
824 }
825 if (lower_needs > 0) {
826 assert(low_boundary() <= aligned_lower_new_high &&
827 aligned_lower_new_high + lower_needs <= lower_high_boundary(),
828 "must not shrink beyond region");
829 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
830 debug_only(warning("os::uncommit_memory failed"));
831 return;
832 } else {
833 _lower_high -= lower_needs;
834 }
835 }
837 _high -= size;
838 }
840 #ifndef PRODUCT
841 void VirtualSpace::check_for_contiguity() {
842 // Check contiguity.
843 assert(low_boundary() <= lower_high() &&
844 lower_high() <= lower_high_boundary(),
845 "high address must be contained within the region");
846 assert(lower_high_boundary() <= middle_high() &&
847 middle_high() <= middle_high_boundary(),
848 "high address must be contained within the region");
849 assert(middle_high_boundary() <= upper_high() &&
850 upper_high() <= upper_high_boundary(),
851 "high address must be contained within the region");
852 assert(low() >= low_boundary(), "low");
853 assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
854 assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
855 assert(high() <= upper_high(), "upper high");
856 }
858 void VirtualSpace::print() {
859 tty->print ("Virtual space:");
860 if (special()) tty->print(" (pinned in memory)");
861 tty->cr();
862 tty->print_cr(" - committed: %ld", committed_size());
863 tty->print_cr(" - reserved: %ld", reserved_size());
864 tty->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
865 tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
866 }
868 #endif