Fri, 28 Mar 2014 10:13:37 -0700
8035828: Turn on @Stable support in VM
Reviewed-by: jrose, twisti
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "oops/markOop.hpp"
27 #include "oops/oop.inline.hpp"
28 #include "runtime/virtualspace.hpp"
29 #include "services/memTracker.hpp"
30 #ifdef TARGET_OS_FAMILY_linux
31 # include "os_linux.inline.hpp"
32 #endif
33 #ifdef TARGET_OS_FAMILY_solaris
34 # include "os_solaris.inline.hpp"
35 #endif
36 #ifdef TARGET_OS_FAMILY_windows
37 # include "os_windows.inline.hpp"
38 #endif
39 #ifdef TARGET_OS_FAMILY_aix
40 # include "os_aix.inline.hpp"
41 #endif
42 #ifdef TARGET_OS_FAMILY_bsd
43 # include "os_bsd.inline.hpp"
44 #endif
47 // ReservedSpace
49 // Dummy constructor
50 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
51 _alignment(0), _special(false), _executable(false) {
52 }
54 ReservedSpace::ReservedSpace(size_t size) {
55 size_t page_size = os::page_size_for_region(size, size, 1);
56 bool large_pages = page_size != (size_t)os::vm_page_size();
57 // Don't force the alignment to be large page aligned,
58 // since that will waste memory.
59 size_t alignment = os::vm_allocation_granularity();
60 initialize(size, alignment, large_pages, NULL, 0, false);
61 }
63 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
64 bool large,
65 char* requested_address,
66 const size_t noaccess_prefix) {
67 initialize(size+noaccess_prefix, alignment, large, requested_address,
68 noaccess_prefix, false);
69 }
71 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
72 bool large,
73 bool executable) {
74 initialize(size, alignment, large, NULL, 0, executable);
75 }
77 // Helper method.
78 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
79 const size_t size, bool special)
80 {
81 if (base == requested_address || requested_address == NULL)
82 return false; // did not fail
84 if (base != NULL) {
85 // Different reserve address may be acceptable in other cases
86 // but for compressed oops heap should be at requested address.
87 assert(UseCompressedOops, "currently requested address used only for compressed oops");
88 if (PrintCompressedOopsMode) {
89 tty->cr();
90 tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
91 }
92 // OS ignored requested address. Try different address.
93 if (special) {
94 if (!os::release_memory_special(base, size)) {
95 fatal("os::release_memory_special failed");
96 }
97 } else {
98 if (!os::release_memory(base, size)) {
99 fatal("os::release_memory failed");
100 }
101 }
102 }
103 return true;
104 }
106 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
107 char* requested_address,
108 const size_t noaccess_prefix,
109 bool executable) {
110 const size_t granularity = os::vm_allocation_granularity();
111 assert((size & (granularity - 1)) == 0,
112 "size not aligned to os::vm_allocation_granularity()");
113 assert((alignment & (granularity - 1)) == 0,
114 "alignment not aligned to os::vm_allocation_granularity()");
115 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
116 "not a power of 2");
118 alignment = MAX2(alignment, (size_t)os::vm_page_size());
120 // Assert that if noaccess_prefix is used, it is the same as alignment.
121 assert(noaccess_prefix == 0 ||
122 noaccess_prefix == alignment, "noaccess prefix wrong");
124 _base = NULL;
125 _size = 0;
126 _special = false;
127 _executable = executable;
128 _alignment = 0;
129 _noaccess_prefix = 0;
130 if (size == 0) {
131 return;
132 }
134 // If OS doesn't support demand paging for large page memory, we need
135 // to use reserve_memory_special() to reserve and pin the entire region.
136 bool special = large && !os::can_commit_large_page_memory();
137 char* base = NULL;
139 if (requested_address != 0) {
140 requested_address -= noaccess_prefix; // adjust requested address
141 assert(requested_address != NULL, "huge noaccess prefix?");
142 }
144 if (special) {
146 base = os::reserve_memory_special(size, alignment, requested_address, executable);
148 if (base != NULL) {
149 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
150 // OS ignored requested address. Try different address.
151 return;
152 }
153 // Check alignment constraints.
154 assert((uintptr_t) base % alignment == 0,
155 err_msg("Large pages returned a non-aligned address, base: "
156 PTR_FORMAT " alignment: " PTR_FORMAT,
157 base, (void*)(uintptr_t)alignment));
158 _special = true;
159 } else {
160 // failed; try to reserve regular memory below
161 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
162 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
163 if (PrintCompressedOopsMode) {
164 tty->cr();
165 tty->print_cr("Reserve regular memory without large pages.");
166 }
167 }
168 }
169 }
171 if (base == NULL) {
172 // Optimistically assume that the OSes returns an aligned base pointer.
173 // When reserving a large address range, most OSes seem to align to at
174 // least 64K.
176 // If the memory was requested at a particular address, use
177 // os::attempt_reserve_memory_at() to avoid over mapping something
178 // important. If available space is not detected, return NULL.
180 if (requested_address != 0) {
181 base = os::attempt_reserve_memory_at(size, requested_address);
182 if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
183 // OS ignored requested address. Try different address.
184 base = NULL;
185 }
186 } else {
187 base = os::reserve_memory(size, NULL, alignment);
188 }
190 if (base == NULL) return;
192 // Check alignment constraints
193 if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
194 // Base not aligned, retry
195 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
196 // Make sure that size is aligned
197 size = align_size_up(size, alignment);
198 base = os::reserve_memory_aligned(size, alignment);
200 if (requested_address != 0 &&
201 failed_to_reserve_as_requested(base, requested_address, size, false)) {
202 // As a result of the alignment constraints, the allocated base differs
203 // from the requested address. Return back to the caller who can
204 // take remedial action (like try again without a requested address).
205 assert(_base == NULL, "should be");
206 return;
207 }
208 }
209 }
210 // Done
211 _base = base;
212 _size = size;
213 _alignment = alignment;
214 _noaccess_prefix = noaccess_prefix;
216 // Assert that if noaccess_prefix is used, it is the same as alignment.
217 assert(noaccess_prefix == 0 ||
218 noaccess_prefix == _alignment, "noaccess prefix wrong");
220 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
221 "area must be distinguisable from marks for mark-sweep");
222 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
223 "area must be distinguisable from marks for mark-sweep");
224 }
227 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
228 bool special, bool executable) {
229 assert((size % os::vm_allocation_granularity()) == 0,
230 "size not allocation aligned");
231 _base = base;
232 _size = size;
233 _alignment = alignment;
234 _noaccess_prefix = 0;
235 _special = special;
236 _executable = executable;
237 }
240 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
241 bool split, bool realloc) {
242 assert(partition_size <= size(), "partition failed");
243 if (split) {
244 os::split_reserved_memory(base(), size(), partition_size, realloc);
245 }
246 ReservedSpace result(base(), partition_size, alignment, special(),
247 executable());
248 return result;
249 }
252 ReservedSpace
253 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
254 assert(partition_size <= size(), "partition failed");
255 ReservedSpace result(base() + partition_size, size() - partition_size,
256 alignment, special(), executable());
257 return result;
258 }
261 size_t ReservedSpace::page_align_size_up(size_t size) {
262 return align_size_up(size, os::vm_page_size());
263 }
266 size_t ReservedSpace::page_align_size_down(size_t size) {
267 return align_size_down(size, os::vm_page_size());
268 }
271 size_t ReservedSpace::allocation_align_size_up(size_t size) {
272 return align_size_up(size, os::vm_allocation_granularity());
273 }
276 size_t ReservedSpace::allocation_align_size_down(size_t size) {
277 return align_size_down(size, os::vm_allocation_granularity());
278 }
281 void ReservedSpace::release() {
282 if (is_reserved()) {
283 char *real_base = _base - _noaccess_prefix;
284 const size_t real_size = _size + _noaccess_prefix;
285 if (special()) {
286 os::release_memory_special(real_base, real_size);
287 } else{
288 os::release_memory(real_base, real_size);
289 }
290 _base = NULL;
291 _size = 0;
292 _noaccess_prefix = 0;
293 _special = false;
294 _executable = false;
295 }
296 }
298 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
299 assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
300 (Universe::narrow_oop_base() != NULL) &&
301 Universe::narrow_oop_use_implicit_null_checks()),
302 "noaccess_prefix should be used only with non zero based compressed oops");
304 // If there is no noaccess prefix, return.
305 if (_noaccess_prefix == 0) return;
307 assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
308 "must be at least page size big");
310 // Protect memory at the base of the allocated region.
311 // If special, the page was committed (only matters on windows)
312 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
313 _special)) {
314 fatal("cannot protect protection page");
315 }
316 if (PrintCompressedOopsMode) {
317 tty->cr();
318 tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
319 }
321 _base += _noaccess_prefix;
322 _size -= _noaccess_prefix;
323 assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
324 "must be exactly of required size and alignment");
325 }
327 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
328 bool large, char* requested_address) :
329 ReservedSpace(size, alignment, large,
330 requested_address,
331 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
332 Universe::narrow_oop_use_implicit_null_checks()) ?
333 lcm(os::vm_page_size(), alignment) : 0) {
334 if (base() > 0) {
335 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
336 }
338 // Only reserved space for the java heap should have a noaccess_prefix
339 // if using compressed oops.
340 protect_noaccess_prefix(size);
341 }
343 // Reserve space for code segment. Same as Java heap only we mark this as
344 // executable.
345 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
346 size_t rs_align,
347 bool large) :
348 ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
349 MemTracker::record_virtual_memory_type((address)base(), mtCode);
350 }
352 // VirtualSpace
354 VirtualSpace::VirtualSpace() {
355 _low_boundary = NULL;
356 _high_boundary = NULL;
357 _low = NULL;
358 _high = NULL;
359 _lower_high = NULL;
360 _middle_high = NULL;
361 _upper_high = NULL;
362 _lower_high_boundary = NULL;
363 _middle_high_boundary = NULL;
364 _upper_high_boundary = NULL;
365 _lower_alignment = 0;
366 _middle_alignment = 0;
367 _upper_alignment = 0;
368 _special = false;
369 _executable = false;
370 }
373 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
374 const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1);
375 return initialize_with_granularity(rs, committed_size, max_commit_granularity);
376 }
378 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
379 if(!rs.is_reserved()) return false; // allocation failed.
380 assert(_low_boundary == NULL, "VirtualSpace already initialized");
381 assert(max_commit_granularity > 0, "Granularity must be non-zero.");
383 _low_boundary = rs.base();
384 _high_boundary = low_boundary() + rs.size();
386 _low = low_boundary();
387 _high = low();
389 _special = rs.special();
390 _executable = rs.executable();
392 // When a VirtualSpace begins life at a large size, make all future expansion
393 // and shrinking occur aligned to a granularity of large pages. This avoids
394 // fragmentation of physical addresses that inhibits the use of large pages
395 // by the OS virtual memory system. Empirically, we see that with a 4MB
396 // page size, the only spaces that get handled this way are codecache and
397 // the heap itself, both of which provide a substantial performance
398 // boost in many benchmarks when covered by large pages.
399 //
400 // No attempt is made to force large page alignment at the very top and
401 // bottom of the space if they are not aligned so already.
402 _lower_alignment = os::vm_page_size();
403 _middle_alignment = max_commit_granularity;
404 _upper_alignment = os::vm_page_size();
406 // End of each region
407 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
408 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
409 _upper_high_boundary = high_boundary();
411 // High address of each region
412 _lower_high = low_boundary();
413 _middle_high = lower_high_boundary();
414 _upper_high = middle_high_boundary();
416 // commit to initial size
417 if (committed_size > 0) {
418 if (!expand_by(committed_size)) {
419 return false;
420 }
421 }
422 return true;
423 }
426 VirtualSpace::~VirtualSpace() {
427 release();
428 }
431 void VirtualSpace::release() {
432 // This does not release memory it never reserved.
433 // Caller must release via rs.release();
434 _low_boundary = NULL;
435 _high_boundary = NULL;
436 _low = NULL;
437 _high = NULL;
438 _lower_high = NULL;
439 _middle_high = NULL;
440 _upper_high = NULL;
441 _lower_high_boundary = NULL;
442 _middle_high_boundary = NULL;
443 _upper_high_boundary = NULL;
444 _lower_alignment = 0;
445 _middle_alignment = 0;
446 _upper_alignment = 0;
447 _special = false;
448 _executable = false;
449 }
452 size_t VirtualSpace::committed_size() const {
453 return pointer_delta(high(), low(), sizeof(char));
454 }
457 size_t VirtualSpace::reserved_size() const {
458 return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
459 }
462 size_t VirtualSpace::uncommitted_size() const {
463 return reserved_size() - committed_size();
464 }
466 size_t VirtualSpace::actual_committed_size() const {
467 // Special VirtualSpaces commit all reserved space up front.
468 if (special()) {
469 return reserved_size();
470 }
472 size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char));
473 size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char));
474 size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char));
476 #ifdef ASSERT
477 size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char));
478 size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char));
479 size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char));
481 if (committed_high > 0) {
482 assert(committed_low == lower, "Must be");
483 assert(committed_middle == middle, "Must be");
484 }
486 if (committed_middle > 0) {
487 assert(committed_low == lower, "Must be");
488 }
489 if (committed_middle < middle) {
490 assert(committed_high == 0, "Must be");
491 }
493 if (committed_low < lower) {
494 assert(committed_high == 0, "Must be");
495 assert(committed_middle == 0, "Must be");
496 }
497 #endif
499 return committed_low + committed_middle + committed_high;
500 }
503 bool VirtualSpace::contains(const void* p) const {
504 return low() <= (const char*) p && (const char*) p < high();
505 }
507 /*
508 First we need to determine if a particular virtual space is using large
509 pages. This is done at the initialize function and only virtual spaces
510 that are larger than LargePageSizeInBytes use large pages. Once we
511 have determined this, all expand_by and shrink_by calls must grow and
512 shrink by large page size chunks. If a particular request
513 is within the current large page, the call to commit and uncommit memory
514 can be ignored. In the case that the low and high boundaries of this
515 space is not large page aligned, the pages leading to the first large
516 page address and the pages after the last large page address must be
517 allocated with default pages.
518 */
519 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
520 if (uncommitted_size() < bytes) return false;
522 if (special()) {
523 // don't commit memory if the entire space is pinned in memory
524 _high += bytes;
525 return true;
526 }
528 char* previous_high = high();
529 char* unaligned_new_high = high() + bytes;
530 assert(unaligned_new_high <= high_boundary(),
531 "cannot expand by more than upper boundary");
533 // Calculate where the new high for each of the regions should be. If
534 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
535 // then the unaligned lower and upper new highs would be the
536 // lower_high() and upper_high() respectively.
537 char* unaligned_lower_new_high =
538 MIN2(unaligned_new_high, lower_high_boundary());
539 char* unaligned_middle_new_high =
540 MIN2(unaligned_new_high, middle_high_boundary());
541 char* unaligned_upper_new_high =
542 MIN2(unaligned_new_high, upper_high_boundary());
544 // Align the new highs based on the regions alignment. lower and upper
545 // alignment will always be default page size. middle alignment will be
546 // LargePageSizeInBytes if the actual size of the virtual space is in
547 // fact larger than LargePageSizeInBytes.
548 char* aligned_lower_new_high =
549 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
550 char* aligned_middle_new_high =
551 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
552 char* aligned_upper_new_high =
553 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
555 // Determine which regions need to grow in this expand_by call.
556 // If you are growing in the lower region, high() must be in that
557 // region so calcuate the size based on high(). For the middle and
558 // upper regions, determine the starting point of growth based on the
559 // location of high(). By getting the MAX of the region's low address
560 // (or the prevoius region's high address) and high(), we can tell if it
561 // is an intra or inter region growth.
562 size_t lower_needs = 0;
563 if (aligned_lower_new_high > lower_high()) {
564 lower_needs =
565 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
566 }
567 size_t middle_needs = 0;
568 if (aligned_middle_new_high > middle_high()) {
569 middle_needs =
570 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
571 }
572 size_t upper_needs = 0;
573 if (aligned_upper_new_high > upper_high()) {
574 upper_needs =
575 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
576 }
578 // Check contiguity.
579 assert(low_boundary() <= lower_high() &&
580 lower_high() <= lower_high_boundary(),
581 "high address must be contained within the region");
582 assert(lower_high_boundary() <= middle_high() &&
583 middle_high() <= middle_high_boundary(),
584 "high address must be contained within the region");
585 assert(middle_high_boundary() <= upper_high() &&
586 upper_high() <= upper_high_boundary(),
587 "high address must be contained within the region");
589 // Commit regions
590 if (lower_needs > 0) {
591 assert(low_boundary() <= lower_high() &&
592 lower_high() + lower_needs <= lower_high_boundary(),
593 "must not expand beyond region");
594 if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
595 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
596 ", lower_needs=" SIZE_FORMAT ", %d) failed",
597 lower_high(), lower_needs, _executable);)
598 return false;
599 } else {
600 _lower_high += lower_needs;
601 }
602 }
603 if (middle_needs > 0) {
604 assert(lower_high_boundary() <= middle_high() &&
605 middle_high() + middle_needs <= middle_high_boundary(),
606 "must not expand beyond region");
607 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
608 _executable)) {
609 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
610 ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
611 ", %d) failed", middle_high(), middle_needs,
612 middle_alignment(), _executable);)
613 return false;
614 }
615 _middle_high += middle_needs;
616 }
617 if (upper_needs > 0) {
618 assert(middle_high_boundary() <= upper_high() &&
619 upper_high() + upper_needs <= upper_high_boundary(),
620 "must not expand beyond region");
621 if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
622 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
623 ", upper_needs=" SIZE_FORMAT ", %d) failed",
624 upper_high(), upper_needs, _executable);)
625 return false;
626 } else {
627 _upper_high += upper_needs;
628 }
629 }
631 if (pre_touch || AlwaysPreTouch) {
632 int vm_ps = os::vm_page_size();
633 for (char* curr = previous_high;
634 curr < unaligned_new_high;
635 curr += vm_ps) {
636 // Note the use of a write here; originally we tried just a read, but
637 // since the value read was unused, the optimizer removed the read.
638 // If we ever have a concurrent touchahead thread, we'll want to use
639 // a read, to avoid the potential of overwriting data (if a mutator
640 // thread beats the touchahead thread to a page). There are various
641 // ways of making sure this read is not optimized away: for example,
642 // generating the code for a read procedure at runtime.
643 *curr = 0;
644 }
645 }
647 _high += bytes;
648 return true;
649 }
651 // A page is uncommitted if the contents of the entire page is deemed unusable.
652 // Continue to decrement the high() pointer until it reaches a page boundary
653 // in which case that particular page can now be uncommitted.
654 void VirtualSpace::shrink_by(size_t size) {
655 if (committed_size() < size)
656 fatal("Cannot shrink virtual space to negative size");
658 if (special()) {
659 // don't uncommit if the entire space is pinned in memory
660 _high -= size;
661 return;
662 }
664 char* unaligned_new_high = high() - size;
665 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
667 // Calculate new unaligned address
668 char* unaligned_upper_new_high =
669 MAX2(unaligned_new_high, middle_high_boundary());
670 char* unaligned_middle_new_high =
671 MAX2(unaligned_new_high, lower_high_boundary());
672 char* unaligned_lower_new_high =
673 MAX2(unaligned_new_high, low_boundary());
675 // Align address to region's alignment
676 char* aligned_upper_new_high =
677 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
678 char* aligned_middle_new_high =
679 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
680 char* aligned_lower_new_high =
681 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
683 // Determine which regions need to shrink
684 size_t upper_needs = 0;
685 if (aligned_upper_new_high < upper_high()) {
686 upper_needs =
687 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
688 }
689 size_t middle_needs = 0;
690 if (aligned_middle_new_high < middle_high()) {
691 middle_needs =
692 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
693 }
694 size_t lower_needs = 0;
695 if (aligned_lower_new_high < lower_high()) {
696 lower_needs =
697 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
698 }
700 // Check contiguity.
701 assert(middle_high_boundary() <= upper_high() &&
702 upper_high() <= upper_high_boundary(),
703 "high address must be contained within the region");
704 assert(lower_high_boundary() <= middle_high() &&
705 middle_high() <= middle_high_boundary(),
706 "high address must be contained within the region");
707 assert(low_boundary() <= lower_high() &&
708 lower_high() <= lower_high_boundary(),
709 "high address must be contained within the region");
711 // Uncommit
712 if (upper_needs > 0) {
713 assert(middle_high_boundary() <= aligned_upper_new_high &&
714 aligned_upper_new_high + upper_needs <= upper_high_boundary(),
715 "must not shrink beyond region");
716 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
717 debug_only(warning("os::uncommit_memory failed"));
718 return;
719 } else {
720 _upper_high -= upper_needs;
721 }
722 }
723 if (middle_needs > 0) {
724 assert(lower_high_boundary() <= aligned_middle_new_high &&
725 aligned_middle_new_high + middle_needs <= middle_high_boundary(),
726 "must not shrink beyond region");
727 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
728 debug_only(warning("os::uncommit_memory failed"));
729 return;
730 } else {
731 _middle_high -= middle_needs;
732 }
733 }
734 if (lower_needs > 0) {
735 assert(low_boundary() <= aligned_lower_new_high &&
736 aligned_lower_new_high + lower_needs <= lower_high_boundary(),
737 "must not shrink beyond region");
738 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
739 debug_only(warning("os::uncommit_memory failed"));
740 return;
741 } else {
742 _lower_high -= lower_needs;
743 }
744 }
746 _high -= size;
747 }
749 #ifndef PRODUCT
750 void VirtualSpace::check_for_contiguity() {
751 // Check contiguity.
752 assert(low_boundary() <= lower_high() &&
753 lower_high() <= lower_high_boundary(),
754 "high address must be contained within the region");
755 assert(lower_high_boundary() <= middle_high() &&
756 middle_high() <= middle_high_boundary(),
757 "high address must be contained within the region");
758 assert(middle_high_boundary() <= upper_high() &&
759 upper_high() <= upper_high_boundary(),
760 "high address must be contained within the region");
761 assert(low() >= low_boundary(), "low");
762 assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
763 assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
764 assert(high() <= upper_high(), "upper high");
765 }
767 void VirtualSpace::print_on(outputStream* out) {
768 out->print ("Virtual space:");
769 if (special()) out->print(" (pinned in memory)");
770 out->cr();
771 out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
772 out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
773 out->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
774 out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
775 }
777 void VirtualSpace::print() {
778 print_on(tty);
779 }
781 /////////////// Unit tests ///////////////
783 #ifndef PRODUCT
785 #define test_log(...) \
786 do {\
787 if (VerboseInternalVMTests) { \
788 tty->print_cr(__VA_ARGS__); \
789 tty->flush(); \
790 }\
791 } while (false)
793 class TestReservedSpace : AllStatic {
794 public:
795 static void small_page_write(void* addr, size_t size) {
796 size_t page_size = os::vm_page_size();
798 char* end = (char*)addr + size;
799 for (char* p = (char*)addr; p < end; p += page_size) {
800 *p = 1;
801 }
802 }
804 static void release_memory_for_test(ReservedSpace rs) {
805 if (rs.special()) {
806 guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
807 } else {
808 guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
809 }
810 }
812 static void test_reserved_space1(size_t size, size_t alignment) {
813 test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
815 assert(is_size_aligned(size, alignment), "Incorrect input parameters");
817 ReservedSpace rs(size, // size
818 alignment, // alignment
819 UseLargePages, // large
820 NULL, // requested_address
821 0); // noacces_prefix
823 test_log(" rs.special() == %d", rs.special());
825 assert(rs.base() != NULL, "Must be");
826 assert(rs.size() == size, "Must be");
828 assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
829 assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
831 if (rs.special()) {
832 small_page_write(rs.base(), size);
833 }
835 release_memory_for_test(rs);
836 }
838 static void test_reserved_space2(size_t size) {
839 test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
841 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
843 ReservedSpace rs(size);
845 test_log(" rs.special() == %d", rs.special());
847 assert(rs.base() != NULL, "Must be");
848 assert(rs.size() == size, "Must be");
850 if (rs.special()) {
851 small_page_write(rs.base(), size);
852 }
854 release_memory_for_test(rs);
855 }
857 static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
858 test_log("test_reserved_space3(%p, %p, %d)",
859 (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
861 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
862 assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
864 bool large = maybe_large && UseLargePages && size >= os::large_page_size();
866 ReservedSpace rs(size, alignment, large, false);
868 test_log(" rs.special() == %d", rs.special());
870 assert(rs.base() != NULL, "Must be");
871 assert(rs.size() == size, "Must be");
873 if (rs.special()) {
874 small_page_write(rs.base(), size);
875 }
877 release_memory_for_test(rs);
878 }
881 static void test_reserved_space1() {
882 size_t size = 2 * 1024 * 1024;
883 size_t ag = os::vm_allocation_granularity();
885 test_reserved_space1(size, ag);
886 test_reserved_space1(size * 2, ag);
887 test_reserved_space1(size * 10, ag);
888 }
890 static void test_reserved_space2() {
891 size_t size = 2 * 1024 * 1024;
892 size_t ag = os::vm_allocation_granularity();
894 test_reserved_space2(size * 1);
895 test_reserved_space2(size * 2);
896 test_reserved_space2(size * 10);
897 test_reserved_space2(ag);
898 test_reserved_space2(size - ag);
899 test_reserved_space2(size);
900 test_reserved_space2(size + ag);
901 test_reserved_space2(size * 2);
902 test_reserved_space2(size * 2 - ag);
903 test_reserved_space2(size * 2 + ag);
904 test_reserved_space2(size * 3);
905 test_reserved_space2(size * 3 - ag);
906 test_reserved_space2(size * 3 + ag);
907 test_reserved_space2(size * 10);
908 test_reserved_space2(size * 10 + size / 2);
909 }
911 static void test_reserved_space3() {
912 size_t ag = os::vm_allocation_granularity();
914 test_reserved_space3(ag, ag , false);
915 test_reserved_space3(ag * 2, ag , false);
916 test_reserved_space3(ag * 3, ag , false);
917 test_reserved_space3(ag * 2, ag * 2, false);
918 test_reserved_space3(ag * 4, ag * 2, false);
919 test_reserved_space3(ag * 8, ag * 2, false);
920 test_reserved_space3(ag * 4, ag * 4, false);
921 test_reserved_space3(ag * 8, ag * 4, false);
922 test_reserved_space3(ag * 16, ag * 4, false);
924 if (UseLargePages) {
925 size_t lp = os::large_page_size();
927 // Without large pages
928 test_reserved_space3(lp, ag * 4, false);
929 test_reserved_space3(lp * 2, ag * 4, false);
930 test_reserved_space3(lp * 4, ag * 4, false);
931 test_reserved_space3(lp, lp , false);
932 test_reserved_space3(lp * 2, lp , false);
933 test_reserved_space3(lp * 3, lp , false);
934 test_reserved_space3(lp * 2, lp * 2, false);
935 test_reserved_space3(lp * 4, lp * 2, false);
936 test_reserved_space3(lp * 8, lp * 2, false);
938 // With large pages
939 test_reserved_space3(lp, ag * 4 , true);
940 test_reserved_space3(lp * 2, ag * 4, true);
941 test_reserved_space3(lp * 4, ag * 4, true);
942 test_reserved_space3(lp, lp , true);
943 test_reserved_space3(lp * 2, lp , true);
944 test_reserved_space3(lp * 3, lp , true);
945 test_reserved_space3(lp * 2, lp * 2, true);
946 test_reserved_space3(lp * 4, lp * 2, true);
947 test_reserved_space3(lp * 8, lp * 2, true);
948 }
949 }
951 static void test_reserved_space() {
952 test_reserved_space1();
953 test_reserved_space2();
954 test_reserved_space3();
955 }
956 };
958 void TestReservedSpace_test() {
959 TestReservedSpace::test_reserved_space();
960 }
962 #define assert_equals(actual, expected) \
963 assert(actual == expected, \
964 err_msg("Got " SIZE_FORMAT " expected " \
965 SIZE_FORMAT, actual, expected));
967 #define assert_ge(value1, value2) \
968 assert(value1 >= value2, \
969 err_msg("'" #value1 "': " SIZE_FORMAT " '" \
970 #value2 "': " SIZE_FORMAT, value1, value2));
972 #define assert_lt(value1, value2) \
973 assert(value1 < value2, \
974 err_msg("'" #value1 "': " SIZE_FORMAT " '" \
975 #value2 "': " SIZE_FORMAT, value1, value2));
978 class TestVirtualSpace : AllStatic {
979 enum TestLargePages {
980 Default,
981 Disable,
982 Reserve,
983 Commit
984 };
986 static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
987 switch(mode) {
988 default:
989 case Default:
990 case Reserve:
991 return ReservedSpace(reserve_size_aligned);
992 case Disable:
993 case Commit:
994 return ReservedSpace(reserve_size_aligned,
995 os::vm_allocation_granularity(),
996 /* large */ false, /* exec */ false);
997 }
998 }
1000 static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1001 switch(mode) {
1002 default:
1003 case Default:
1004 case Reserve:
1005 return vs.initialize(rs, 0);
1006 case Disable:
1007 return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1008 case Commit:
1009 return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1));
1010 }
1011 }
1013 public:
1014 static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1015 TestLargePages mode = Default) {
1016 size_t granularity = os::vm_allocation_granularity();
1017 size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1019 ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1021 assert(reserved.is_reserved(), "Must be");
1023 VirtualSpace vs;
1024 bool initialized = initialize_virtual_space(vs, reserved, mode);
1025 assert(initialized, "Failed to initialize VirtualSpace");
1027 vs.expand_by(commit_size, false);
1029 if (vs.special()) {
1030 assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1031 } else {
1032 assert_ge(vs.actual_committed_size(), commit_size);
1033 // Approximate the commit granularity.
1034 // Make sure that we don't commit using large pages
1035 // if large pages has been disabled for this VirtualSpace.
1036 size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1037 os::vm_page_size() : os::large_page_size();
1038 assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1039 }
1041 reserved.release();
1042 }
1044 static void test_virtual_space_actual_committed_space_one_large_page() {
1045 if (!UseLargePages) {
1046 return;
1047 }
1049 size_t large_page_size = os::large_page_size();
1051 ReservedSpace reserved(large_page_size, large_page_size, true, false);
1053 assert(reserved.is_reserved(), "Must be");
1055 VirtualSpace vs;
1056 bool initialized = vs.initialize(reserved, 0);
1057 assert(initialized, "Failed to initialize VirtualSpace");
1059 vs.expand_by(large_page_size, false);
1061 assert_equals(vs.actual_committed_size(), large_page_size);
1063 reserved.release();
1064 }
1066 static void test_virtual_space_actual_committed_space() {
1067 test_virtual_space_actual_committed_space(4 * K, 0);
1068 test_virtual_space_actual_committed_space(4 * K, 4 * K);
1069 test_virtual_space_actual_committed_space(8 * K, 0);
1070 test_virtual_space_actual_committed_space(8 * K, 4 * K);
1071 test_virtual_space_actual_committed_space(8 * K, 8 * K);
1072 test_virtual_space_actual_committed_space(12 * K, 0);
1073 test_virtual_space_actual_committed_space(12 * K, 4 * K);
1074 test_virtual_space_actual_committed_space(12 * K, 8 * K);
1075 test_virtual_space_actual_committed_space(12 * K, 12 * K);
1076 test_virtual_space_actual_committed_space(64 * K, 0);
1077 test_virtual_space_actual_committed_space(64 * K, 32 * K);
1078 test_virtual_space_actual_committed_space(64 * K, 64 * K);
1079 test_virtual_space_actual_committed_space(2 * M, 0);
1080 test_virtual_space_actual_committed_space(2 * M, 4 * K);
1081 test_virtual_space_actual_committed_space(2 * M, 64 * K);
1082 test_virtual_space_actual_committed_space(2 * M, 1 * M);
1083 test_virtual_space_actual_committed_space(2 * M, 2 * M);
1084 test_virtual_space_actual_committed_space(10 * M, 0);
1085 test_virtual_space_actual_committed_space(10 * M, 4 * K);
1086 test_virtual_space_actual_committed_space(10 * M, 8 * K);
1087 test_virtual_space_actual_committed_space(10 * M, 1 * M);
1088 test_virtual_space_actual_committed_space(10 * M, 2 * M);
1089 test_virtual_space_actual_committed_space(10 * M, 5 * M);
1090 test_virtual_space_actual_committed_space(10 * M, 10 * M);
1091 }
1093 static void test_virtual_space_disable_large_pages() {
1094 if (!UseLargePages) {
1095 return;
1096 }
1097 // These test cases verify that if we force VirtualSpace to disable large pages
1098 test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1099 test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1100 test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1101 test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1102 test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1103 test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1104 test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1106 test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1107 test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1108 test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1109 test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1110 test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1111 test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1112 test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1114 test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1115 test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1116 test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1117 test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1118 test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1119 test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1120 test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1121 }
1123 static void test_virtual_space() {
1124 test_virtual_space_actual_committed_space();
1125 test_virtual_space_actual_committed_space_one_large_page();
1126 test_virtual_space_disable_large_pages();
1127 }
1128 };
1130 void TestVirtualSpace_test() {
1131 TestVirtualSpace::test_virtual_space();
1132 }
1134 #endif // PRODUCT
1136 #endif