Wed, 14 Oct 2020 17:44:48 +0800
Merge
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2018, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "oops/markOop.hpp"
28 #include "oops/oop.inline.hpp"
29 #include "runtime/virtualspace.hpp"
30 #include "services/memTracker.hpp"
31 #ifdef TARGET_OS_FAMILY_linux
32 # include "os_linux.inline.hpp"
33 #endif
34 #ifdef TARGET_OS_FAMILY_solaris
35 # include "os_solaris.inline.hpp"
36 #endif
37 #ifdef TARGET_OS_FAMILY_windows
38 # include "os_windows.inline.hpp"
39 #endif
40 #ifdef TARGET_OS_FAMILY_aix
41 # include "os_aix.inline.hpp"
42 #endif
43 #ifdef TARGET_OS_FAMILY_bsd
44 # include "os_bsd.inline.hpp"
45 #endif
47 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
49 // ReservedSpace
51 // Dummy constructor
52 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
53 _alignment(0), _special(false), _executable(false) {
54 }
56 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
57 bool has_preferred_page_size = preferred_page_size != 0;
58 // Want to use large pages where possible and pad with small pages.
59 size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
60 bool large_pages = page_size != (size_t)os::vm_page_size();
61 size_t alignment;
62 if (large_pages && has_preferred_page_size) {
63 alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
64 // ReservedSpace initialization requires size to be aligned to the given
65 // alignment. Align the size up.
66 size = align_size_up(size, alignment);
67 } else {
68 // Don't force the alignment to be large page aligned,
69 // since that will waste memory.
70 alignment = os::vm_allocation_granularity();
71 }
72 initialize(size, alignment, large_pages, NULL, 0, false);
73 }
75 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
76 bool large,
77 char* requested_address,
78 const size_t noaccess_prefix) {
79 initialize(size+noaccess_prefix, alignment, large, requested_address,
80 noaccess_prefix, false);
81 }
83 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
84 bool large,
85 bool executable) {
86 initialize(size, alignment, large, NULL, 0, executable);
87 }
89 // Helper method.
90 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
91 const size_t size, bool special)
92 {
93 if (base == requested_address || requested_address == NULL)
94 return false; // did not fail
96 if (base != NULL) {
97 // Different reserve address may be acceptable in other cases
98 // but for compressed oops heap should be at requested address.
99 assert(UseCompressedOops, "currently requested address used only for compressed oops");
100 if (PrintCompressedOopsMode) {
101 tty->cr();
102 tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
103 }
104 // OS ignored requested address. Try different address.
105 if (special) {
106 if (!os::release_memory_special(base, size)) {
107 fatal("os::release_memory_special failed");
108 }
109 } else {
110 if (!os::release_memory(base, size)) {
111 fatal("os::release_memory failed");
112 }
113 }
114 }
115 return true;
116 }
118 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
119 char* requested_address,
120 const size_t noaccess_prefix,
121 bool executable) {
122 const size_t granularity = os::vm_allocation_granularity();
123 assert((size & (granularity - 1)) == 0,
124 "size not aligned to os::vm_allocation_granularity()");
125 assert((alignment & (granularity - 1)) == 0,
126 "alignment not aligned to os::vm_allocation_granularity()");
127 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
128 "not a power of 2");
130 alignment = MAX2(alignment, (size_t)os::vm_page_size());
132 // Assert that if noaccess_prefix is used, it is the same as alignment.
133 assert(noaccess_prefix == 0 ||
134 noaccess_prefix == alignment, "noaccess prefix wrong");
136 _base = NULL;
137 _size = 0;
138 _special = false;
139 _executable = executable;
140 _alignment = 0;
141 _noaccess_prefix = 0;
142 if (size == 0) {
143 return;
144 }
146 // If OS doesn't support demand paging for large page memory, we need
147 // to use reserve_memory_special() to reserve and pin the entire region.
148 bool special = large && !os::can_commit_large_page_memory();
149 char* base = NULL;
151 #if defined MIPS && !defined ZERO
152 size_t opt_reg_addr = 5 * os::Linux::page_size();
153 static int code_cache_init_flag = 1;
154 if (UseCodeCacheAllocOpt && code_cache_init_flag && executable) {
155 code_cache_init_flag = 0;
156 requested_address = (char*) opt_reg_addr;
157 }
158 #endif
160 if (requested_address != 0) {
161 requested_address -= noaccess_prefix; // adjust requested address
162 assert(requested_address != NULL, "huge noaccess prefix?");
163 }
165 if (special) {
167 base = os::reserve_memory_special(size, alignment, requested_address, executable);
169 if (base != NULL) {
170 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
171 // OS ignored requested address. Try different address.
172 return;
173 }
174 // Check alignment constraints.
175 assert((uintptr_t) base % alignment == 0,
176 err_msg("Large pages returned a non-aligned address, base: "
177 PTR_FORMAT " alignment: " PTR_FORMAT,
178 base, (void*)(uintptr_t)alignment));
179 _special = true;
180 } else {
181 // failed; try to reserve regular memory below
182 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
183 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
184 if (PrintCompressedOopsMode) {
185 tty->cr();
186 tty->print_cr("Reserve regular memory without large pages.");
187 }
188 }
189 }
190 }
192 if (base == NULL) {
193 // Optimistically assume that the OSes returns an aligned base pointer.
194 // When reserving a large address range, most OSes seem to align to at
195 // least 64K.
197 // If the memory was requested at a particular address, use
198 // os::attempt_reserve_memory_at() to avoid over mapping something
199 // important. If available space is not detected, return NULL.
201 if (requested_address != 0) {
202 base = os::attempt_reserve_memory_at(size, requested_address);
203 if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
204 // OS ignored requested address. Try different address.
205 base = NULL;
206 #if defined MIPS && !defined ZERO
207 if (UseCodeCacheAllocOpt && requested_address == (char*) opt_reg_addr) {
208 requested_address = NULL;
209 base = os::reserve_memory(size, NULL, alignment);
210 }
211 #endif
212 }
213 } else {
214 base = os::reserve_memory(size, NULL, alignment);
215 }
217 if (base == NULL) return;
219 // Check alignment constraints
220 if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
221 // Base not aligned, retry
222 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
223 // Make sure that size is aligned
224 size = align_size_up(size, alignment);
225 base = os::reserve_memory_aligned(size, alignment);
227 if (requested_address != 0 &&
228 failed_to_reserve_as_requested(base, requested_address, size, false)) {
229 // As a result of the alignment constraints, the allocated base differs
230 // from the requested address. Return back to the caller who can
231 // take remedial action (like try again without a requested address).
232 assert(_base == NULL, "should be");
233 return;
234 }
235 }
236 }
237 // Done
238 _base = base;
239 _size = size;
240 _alignment = alignment;
241 _noaccess_prefix = noaccess_prefix;
243 // Assert that if noaccess_prefix is used, it is the same as alignment.
244 assert(noaccess_prefix == 0 ||
245 noaccess_prefix == _alignment, "noaccess prefix wrong");
247 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
248 "area must be distinguisable from marks for mark-sweep");
249 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
250 "area must be distinguisable from marks for mark-sweep");
251 }
254 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
255 bool special, bool executable) {
256 assert((size % os::vm_allocation_granularity()) == 0,
257 "size not allocation aligned");
258 _base = base;
259 _size = size;
260 _alignment = alignment;
261 _noaccess_prefix = 0;
262 _special = special;
263 _executable = executable;
264 }
267 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
268 bool split, bool realloc) {
269 assert(partition_size <= size(), "partition failed");
270 if (split) {
271 os::split_reserved_memory(base(), size(), partition_size, realloc);
272 }
273 ReservedSpace result(base(), partition_size, alignment, special(),
274 executable());
275 return result;
276 }
279 ReservedSpace
280 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
281 assert(partition_size <= size(), "partition failed");
282 ReservedSpace result(base() + partition_size, size() - partition_size,
283 alignment, special(), executable());
284 return result;
285 }
288 size_t ReservedSpace::page_align_size_up(size_t size) {
289 return align_size_up(size, os::vm_page_size());
290 }
293 size_t ReservedSpace::page_align_size_down(size_t size) {
294 return align_size_down(size, os::vm_page_size());
295 }
298 size_t ReservedSpace::allocation_align_size_up(size_t size) {
299 return align_size_up(size, os::vm_allocation_granularity());
300 }
303 size_t ReservedSpace::allocation_align_size_down(size_t size) {
304 return align_size_down(size, os::vm_allocation_granularity());
305 }
308 void ReservedSpace::release() {
309 if (is_reserved()) {
310 char *real_base = _base - _noaccess_prefix;
311 const size_t real_size = _size + _noaccess_prefix;
312 if (special()) {
313 os::release_memory_special(real_base, real_size);
314 } else{
315 os::release_memory(real_base, real_size);
316 }
317 _base = NULL;
318 _size = 0;
319 _noaccess_prefix = 0;
320 _special = false;
321 _executable = false;
322 }
323 }
325 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
326 assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
327 (Universe::narrow_oop_base() != NULL) &&
328 Universe::narrow_oop_use_implicit_null_checks()),
329 "noaccess_prefix should be used only with non zero based compressed oops");
331 // If there is no noaccess prefix, return.
332 if (_noaccess_prefix == 0) return;
334 assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
335 "must be at least page size big");
337 // Protect memory at the base of the allocated region.
338 // If special, the page was committed (only matters on windows)
339 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
340 _special)) {
341 fatal("cannot protect protection page");
342 }
343 if (PrintCompressedOopsMode) {
344 tty->cr();
345 tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
346 }
348 _base += _noaccess_prefix;
349 _size -= _noaccess_prefix;
350 assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
351 "must be exactly of required size and alignment");
352 }
354 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
355 bool large, char* requested_address) :
356 ReservedSpace(size, alignment, large,
357 requested_address,
358 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
359 Universe::narrow_oop_use_implicit_null_checks()) ?
360 lcm(os::vm_page_size(), alignment) : 0) {
361 if (base() != NULL) {
362 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
363 }
365 // Only reserved space for the java heap should have a noaccess_prefix
366 // if using compressed oops.
367 protect_noaccess_prefix(size);
368 }
370 // Reserve space for code segment. Same as Java heap only we mark this as
371 // executable.
372 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
373 size_t rs_align,
374 bool large) :
375 ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
376 MemTracker::record_virtual_memory_type((address)base(), mtCode);
377 }
379 // VirtualSpace
381 VirtualSpace::VirtualSpace() {
382 _low_boundary = NULL;
383 _high_boundary = NULL;
384 _low = NULL;
385 _high = NULL;
386 _lower_high = NULL;
387 _middle_high = NULL;
388 _upper_high = NULL;
389 _lower_high_boundary = NULL;
390 _middle_high_boundary = NULL;
391 _upper_high_boundary = NULL;
392 _lower_alignment = 0;
393 _middle_alignment = 0;
394 _upper_alignment = 0;
395 _special = false;
396 _executable = false;
397 }
400 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
401 const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
402 return initialize_with_granularity(rs, committed_size, max_commit_granularity);
403 }
405 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
406 if(!rs.is_reserved()) return false; // allocation failed.
407 assert(_low_boundary == NULL, "VirtualSpace already initialized");
408 assert(max_commit_granularity > 0, "Granularity must be non-zero.");
410 _low_boundary = rs.base();
411 _high_boundary = low_boundary() + rs.size();
413 _low = low_boundary();
414 _high = low();
416 _special = rs.special();
417 _executable = rs.executable();
419 // When a VirtualSpace begins life at a large size, make all future expansion
420 // and shrinking occur aligned to a granularity of large pages. This avoids
421 // fragmentation of physical addresses that inhibits the use of large pages
422 // by the OS virtual memory system. Empirically, we see that with a 4MB
423 // page size, the only spaces that get handled this way are codecache and
424 // the heap itself, both of which provide a substantial performance
425 // boost in many benchmarks when covered by large pages.
426 //
427 // No attempt is made to force large page alignment at the very top and
428 // bottom of the space if they are not aligned so already.
429 _lower_alignment = os::vm_page_size();
430 _middle_alignment = max_commit_granularity;
431 _upper_alignment = os::vm_page_size();
433 // End of each region
434 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
435 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
436 _upper_high_boundary = high_boundary();
438 // High address of each region
439 _lower_high = low_boundary();
440 _middle_high = lower_high_boundary();
441 _upper_high = middle_high_boundary();
443 // commit to initial size
444 if (committed_size > 0) {
445 if (!expand_by(committed_size)) {
446 return false;
447 }
448 }
449 return true;
450 }
453 VirtualSpace::~VirtualSpace() {
454 release();
455 }
458 void VirtualSpace::release() {
459 // This does not release memory it never reserved.
460 // Caller must release via rs.release();
461 _low_boundary = NULL;
462 _high_boundary = NULL;
463 _low = NULL;
464 _high = NULL;
465 _lower_high = NULL;
466 _middle_high = NULL;
467 _upper_high = NULL;
468 _lower_high_boundary = NULL;
469 _middle_high_boundary = NULL;
470 _upper_high_boundary = NULL;
471 _lower_alignment = 0;
472 _middle_alignment = 0;
473 _upper_alignment = 0;
474 _special = false;
475 _executable = false;
476 }
479 size_t VirtualSpace::committed_size() const {
480 return pointer_delta(high(), low(), sizeof(char));
481 }
484 size_t VirtualSpace::reserved_size() const {
485 return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
486 }
489 size_t VirtualSpace::uncommitted_size() const {
490 return reserved_size() - committed_size();
491 }
493 size_t VirtualSpace::actual_committed_size() const {
494 // Special VirtualSpaces commit all reserved space up front.
495 if (special()) {
496 return reserved_size();
497 }
499 size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char));
500 size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char));
501 size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char));
503 #ifdef ASSERT
504 size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char));
505 size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char));
506 size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char));
508 if (committed_high > 0) {
509 assert(committed_low == lower, "Must be");
510 assert(committed_middle == middle, "Must be");
511 }
513 if (committed_middle > 0) {
514 assert(committed_low == lower, "Must be");
515 }
516 if (committed_middle < middle) {
517 assert(committed_high == 0, "Must be");
518 }
520 if (committed_low < lower) {
521 assert(committed_high == 0, "Must be");
522 assert(committed_middle == 0, "Must be");
523 }
524 #endif
526 return committed_low + committed_middle + committed_high;
527 }
530 bool VirtualSpace::contains(const void* p) const {
531 return low() <= (const char*) p && (const char*) p < high();
532 }
534 /*
535 First we need to determine if a particular virtual space is using large
536 pages. This is done at the initialize function and only virtual spaces
537 that are larger than LargePageSizeInBytes use large pages. Once we
538 have determined this, all expand_by and shrink_by calls must grow and
539 shrink by large page size chunks. If a particular request
540 is within the current large page, the call to commit and uncommit memory
541 can be ignored. In the case that the low and high boundaries of this
542 space is not large page aligned, the pages leading to the first large
543 page address and the pages after the last large page address must be
544 allocated with default pages.
545 */
546 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
547 if (uncommitted_size() < bytes) return false;
549 if (special()) {
550 // don't commit memory if the entire space is pinned in memory
551 _high += bytes;
552 return true;
553 }
555 char* previous_high = high();
556 char* unaligned_new_high = high() + bytes;
557 assert(unaligned_new_high <= high_boundary(),
558 "cannot expand by more than upper boundary");
560 // Calculate where the new high for each of the regions should be. If
561 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
562 // then the unaligned lower and upper new highs would be the
563 // lower_high() and upper_high() respectively.
564 char* unaligned_lower_new_high =
565 MIN2(unaligned_new_high, lower_high_boundary());
566 char* unaligned_middle_new_high =
567 MIN2(unaligned_new_high, middle_high_boundary());
568 char* unaligned_upper_new_high =
569 MIN2(unaligned_new_high, upper_high_boundary());
571 // Align the new highs based on the regions alignment. lower and upper
572 // alignment will always be default page size. middle alignment will be
573 // LargePageSizeInBytes if the actual size of the virtual space is in
574 // fact larger than LargePageSizeInBytes.
575 char* aligned_lower_new_high =
576 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
577 char* aligned_middle_new_high =
578 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
579 char* aligned_upper_new_high =
580 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
582 // Determine which regions need to grow in this expand_by call.
583 // If you are growing in the lower region, high() must be in that
584 // region so calcuate the size based on high(). For the middle and
585 // upper regions, determine the starting point of growth based on the
586 // location of high(). By getting the MAX of the region's low address
587 // (or the prevoius region's high address) and high(), we can tell if it
588 // is an intra or inter region growth.
589 size_t lower_needs = 0;
590 if (aligned_lower_new_high > lower_high()) {
591 lower_needs =
592 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
593 }
594 size_t middle_needs = 0;
595 if (aligned_middle_new_high > middle_high()) {
596 middle_needs =
597 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
598 }
599 size_t upper_needs = 0;
600 if (aligned_upper_new_high > upper_high()) {
601 upper_needs =
602 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
603 }
605 // Check contiguity.
606 assert(low_boundary() <= lower_high() &&
607 lower_high() <= lower_high_boundary(),
608 "high address must be contained within the region");
609 assert(lower_high_boundary() <= middle_high() &&
610 middle_high() <= middle_high_boundary(),
611 "high address must be contained within the region");
612 assert(middle_high_boundary() <= upper_high() &&
613 upper_high() <= upper_high_boundary(),
614 "high address must be contained within the region");
616 // Commit regions
617 if (lower_needs > 0) {
618 assert(low_boundary() <= lower_high() &&
619 lower_high() + lower_needs <= lower_high_boundary(),
620 "must not expand beyond region");
621 if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
622 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
623 ", lower_needs=" SIZE_FORMAT ", %d) failed",
624 lower_high(), lower_needs, _executable);)
625 return false;
626 } else {
627 _lower_high += lower_needs;
628 }
629 }
630 if (middle_needs > 0) {
631 assert(lower_high_boundary() <= middle_high() &&
632 middle_high() + middle_needs <= middle_high_boundary(),
633 "must not expand beyond region");
634 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
635 _executable)) {
636 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
637 ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
638 ", %d) failed", middle_high(), middle_needs,
639 middle_alignment(), _executable);)
640 return false;
641 }
642 _middle_high += middle_needs;
643 }
644 if (upper_needs > 0) {
645 assert(middle_high_boundary() <= upper_high() &&
646 upper_high() + upper_needs <= upper_high_boundary(),
647 "must not expand beyond region");
648 if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
649 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
650 ", upper_needs=" SIZE_FORMAT ", %d) failed",
651 upper_high(), upper_needs, _executable);)
652 return false;
653 } else {
654 _upper_high += upper_needs;
655 }
656 }
658 if (pre_touch || AlwaysPreTouch) {
659 os::pretouch_memory(previous_high, unaligned_new_high);
660 }
662 _high += bytes;
663 return true;
664 }
666 // A page is uncommitted if the contents of the entire page is deemed unusable.
667 // Continue to decrement the high() pointer until it reaches a page boundary
668 // in which case that particular page can now be uncommitted.
669 void VirtualSpace::shrink_by(size_t size) {
670 if (committed_size() < size)
671 fatal("Cannot shrink virtual space to negative size");
673 if (special()) {
674 // don't uncommit if the entire space is pinned in memory
675 _high -= size;
676 return;
677 }
679 char* unaligned_new_high = high() - size;
680 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
682 // Calculate new unaligned address
683 char* unaligned_upper_new_high =
684 MAX2(unaligned_new_high, middle_high_boundary());
685 char* unaligned_middle_new_high =
686 MAX2(unaligned_new_high, lower_high_boundary());
687 char* unaligned_lower_new_high =
688 MAX2(unaligned_new_high, low_boundary());
690 // Align address to region's alignment
691 char* aligned_upper_new_high =
692 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
693 char* aligned_middle_new_high =
694 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
695 char* aligned_lower_new_high =
696 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
698 // Determine which regions need to shrink
699 size_t upper_needs = 0;
700 if (aligned_upper_new_high < upper_high()) {
701 upper_needs =
702 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
703 }
704 size_t middle_needs = 0;
705 if (aligned_middle_new_high < middle_high()) {
706 middle_needs =
707 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
708 }
709 size_t lower_needs = 0;
710 if (aligned_lower_new_high < lower_high()) {
711 lower_needs =
712 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
713 }
715 // Check contiguity.
716 assert(middle_high_boundary() <= upper_high() &&
717 upper_high() <= upper_high_boundary(),
718 "high address must be contained within the region");
719 assert(lower_high_boundary() <= middle_high() &&
720 middle_high() <= middle_high_boundary(),
721 "high address must be contained within the region");
722 assert(low_boundary() <= lower_high() &&
723 lower_high() <= lower_high_boundary(),
724 "high address must be contained within the region");
726 // Uncommit
727 if (upper_needs > 0) {
728 assert(middle_high_boundary() <= aligned_upper_new_high &&
729 aligned_upper_new_high + upper_needs <= upper_high_boundary(),
730 "must not shrink beyond region");
731 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
732 debug_only(warning("os::uncommit_memory failed"));
733 return;
734 } else {
735 _upper_high -= upper_needs;
736 }
737 }
738 if (middle_needs > 0) {
739 assert(lower_high_boundary() <= aligned_middle_new_high &&
740 aligned_middle_new_high + middle_needs <= middle_high_boundary(),
741 "must not shrink beyond region");
742 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
743 debug_only(warning("os::uncommit_memory failed"));
744 return;
745 } else {
746 _middle_high -= middle_needs;
747 }
748 }
749 if (lower_needs > 0) {
750 assert(low_boundary() <= aligned_lower_new_high &&
751 aligned_lower_new_high + lower_needs <= lower_high_boundary(),
752 "must not shrink beyond region");
753 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
754 debug_only(warning("os::uncommit_memory failed"));
755 return;
756 } else {
757 _lower_high -= lower_needs;
758 }
759 }
761 _high -= size;
762 }
764 #ifndef PRODUCT
765 void VirtualSpace::check_for_contiguity() {
766 // Check contiguity.
767 assert(low_boundary() <= lower_high() &&
768 lower_high() <= lower_high_boundary(),
769 "high address must be contained within the region");
770 assert(lower_high_boundary() <= middle_high() &&
771 middle_high() <= middle_high_boundary(),
772 "high address must be contained within the region");
773 assert(middle_high_boundary() <= upper_high() &&
774 upper_high() <= upper_high_boundary(),
775 "high address must be contained within the region");
776 assert(low() >= low_boundary(), "low");
777 assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
778 assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
779 assert(high() <= upper_high(), "upper high");
780 }
782 void VirtualSpace::print_on(outputStream* out) {
783 out->print ("Virtual space:");
784 if (special()) out->print(" (pinned in memory)");
785 out->cr();
786 out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
787 out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
788 out->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
789 out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
790 }
792 void VirtualSpace::print() {
793 print_on(tty);
794 }
796 /////////////// Unit tests ///////////////
798 #ifndef PRODUCT
800 #define test_log(...) \
801 do {\
802 if (VerboseInternalVMTests) { \
803 tty->print_cr(__VA_ARGS__); \
804 tty->flush(); \
805 }\
806 } while (false)
808 class TestReservedSpace : AllStatic {
809 public:
810 static void small_page_write(void* addr, size_t size) {
811 size_t page_size = os::vm_page_size();
813 char* end = (char*)addr + size;
814 for (char* p = (char*)addr; p < end; p += page_size) {
815 *p = 1;
816 }
817 }
819 static void release_memory_for_test(ReservedSpace rs) {
820 if (rs.special()) {
821 guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
822 } else {
823 guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
824 }
825 }
827 static void test_reserved_space1(size_t size, size_t alignment) {
828 test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
830 assert(is_size_aligned(size, alignment), "Incorrect input parameters");
832 ReservedSpace rs(size, // size
833 alignment, // alignment
834 UseLargePages, // large
835 NULL, // requested_address
836 0); // noacces_prefix
838 test_log(" rs.special() == %d", rs.special());
840 assert(rs.base() != NULL, "Must be");
841 assert(rs.size() == size, "Must be");
843 assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
844 assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
846 if (rs.special()) {
847 small_page_write(rs.base(), size);
848 }
850 release_memory_for_test(rs);
851 }
853 static void test_reserved_space2(size_t size) {
854 test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
856 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
858 ReservedSpace rs(size);
860 test_log(" rs.special() == %d", rs.special());
862 assert(rs.base() != NULL, "Must be");
863 assert(rs.size() == size, "Must be");
865 if (rs.special()) {
866 small_page_write(rs.base(), size);
867 }
869 release_memory_for_test(rs);
870 }
872 static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
873 test_log("test_reserved_space3(%p, %p, %d)",
874 (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
876 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
877 assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
879 bool large = maybe_large && UseLargePages && size >= os::large_page_size();
881 ReservedSpace rs(size, alignment, large, false);
883 test_log(" rs.special() == %d", rs.special());
885 assert(rs.base() != NULL, "Must be");
886 assert(rs.size() == size, "Must be");
888 if (rs.special()) {
889 small_page_write(rs.base(), size);
890 }
892 release_memory_for_test(rs);
893 }
896 static void test_reserved_space1() {
897 size_t size = 2 * 1024 * 1024;
898 size_t ag = os::vm_allocation_granularity();
900 test_reserved_space1(size, ag);
901 test_reserved_space1(size * 2, ag);
902 test_reserved_space1(size * 10, ag);
903 }
905 static void test_reserved_space2() {
906 size_t size = 2 * 1024 * 1024;
907 size_t ag = os::vm_allocation_granularity();
909 test_reserved_space2(size * 1);
910 test_reserved_space2(size * 2);
911 test_reserved_space2(size * 10);
912 test_reserved_space2(ag);
913 test_reserved_space2(size - ag);
914 test_reserved_space2(size);
915 test_reserved_space2(size + ag);
916 test_reserved_space2(size * 2);
917 test_reserved_space2(size * 2 - ag);
918 test_reserved_space2(size * 2 + ag);
919 test_reserved_space2(size * 3);
920 test_reserved_space2(size * 3 - ag);
921 test_reserved_space2(size * 3 + ag);
922 test_reserved_space2(size * 10);
923 test_reserved_space2(size * 10 + size / 2);
924 }
926 static void test_reserved_space3() {
927 size_t ag = os::vm_allocation_granularity();
929 test_reserved_space3(ag, ag , false);
930 test_reserved_space3(ag * 2, ag , false);
931 test_reserved_space3(ag * 3, ag , false);
932 test_reserved_space3(ag * 2, ag * 2, false);
933 test_reserved_space3(ag * 4, ag * 2, false);
934 test_reserved_space3(ag * 8, ag * 2, false);
935 test_reserved_space3(ag * 4, ag * 4, false);
936 test_reserved_space3(ag * 8, ag * 4, false);
937 test_reserved_space3(ag * 16, ag * 4, false);
939 if (UseLargePages) {
940 size_t lp = os::large_page_size();
942 // Without large pages
943 test_reserved_space3(lp, ag * 4, false);
944 test_reserved_space3(lp * 2, ag * 4, false);
945 test_reserved_space3(lp * 4, ag * 4, false);
946 test_reserved_space3(lp, lp , false);
947 test_reserved_space3(lp * 2, lp , false);
948 test_reserved_space3(lp * 3, lp , false);
949 test_reserved_space3(lp * 2, lp * 2, false);
950 test_reserved_space3(lp * 4, lp * 2, false);
951 test_reserved_space3(lp * 8, lp * 2, false);
953 // With large pages
954 test_reserved_space3(lp, ag * 4 , true);
955 test_reserved_space3(lp * 2, ag * 4, true);
956 test_reserved_space3(lp * 4, ag * 4, true);
957 test_reserved_space3(lp, lp , true);
958 test_reserved_space3(lp * 2, lp , true);
959 test_reserved_space3(lp * 3, lp , true);
960 test_reserved_space3(lp * 2, lp * 2, true);
961 test_reserved_space3(lp * 4, lp * 2, true);
962 test_reserved_space3(lp * 8, lp * 2, true);
963 }
964 }
966 static void test_reserved_space() {
967 test_reserved_space1();
968 test_reserved_space2();
969 test_reserved_space3();
970 }
971 };
973 void TestReservedSpace_test() {
974 TestReservedSpace::test_reserved_space();
975 }
977 #define assert_equals(actual, expected) \
978 assert(actual == expected, \
979 err_msg("Got " SIZE_FORMAT " expected " \
980 SIZE_FORMAT, actual, expected));
982 #define assert_ge(value1, value2) \
983 assert(value1 >= value2, \
984 err_msg("'" #value1 "': " SIZE_FORMAT " '" \
985 #value2 "': " SIZE_FORMAT, value1, value2));
987 #define assert_lt(value1, value2) \
988 assert(value1 < value2, \
989 err_msg("'" #value1 "': " SIZE_FORMAT " '" \
990 #value2 "': " SIZE_FORMAT, value1, value2));
993 class TestVirtualSpace : AllStatic {
994 enum TestLargePages {
995 Default,
996 Disable,
997 Reserve,
998 Commit
999 };
1001 static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1002 switch(mode) {
1003 default:
1004 case Default:
1005 case Reserve:
1006 return ReservedSpace(reserve_size_aligned);
1007 case Disable:
1008 case Commit:
1009 return ReservedSpace(reserve_size_aligned,
1010 os::vm_allocation_granularity(),
1011 /* large */ false, /* exec */ false);
1012 }
1013 }
1015 static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1016 switch(mode) {
1017 default:
1018 case Default:
1019 case Reserve:
1020 return vs.initialize(rs, 0);
1021 case Disable:
1022 return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1023 case Commit:
1024 return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1025 }
1026 }
1028 public:
1029 static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1030 TestLargePages mode = Default) {
1031 size_t granularity = os::vm_allocation_granularity();
1032 size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1034 ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1036 assert(reserved.is_reserved(), "Must be");
1038 VirtualSpace vs;
1039 bool initialized = initialize_virtual_space(vs, reserved, mode);
1040 assert(initialized, "Failed to initialize VirtualSpace");
1042 vs.expand_by(commit_size, false);
1044 if (vs.special()) {
1045 assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1046 } else {
1047 assert_ge(vs.actual_committed_size(), commit_size);
1048 // Approximate the commit granularity.
1049 // Make sure that we don't commit using large pages
1050 // if large pages has been disabled for this VirtualSpace.
1051 size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1052 os::vm_page_size() : os::large_page_size();
1053 assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1054 }
1056 reserved.release();
1057 }
1059 static void test_virtual_space_actual_committed_space_one_large_page() {
1060 if (!UseLargePages) {
1061 return;
1062 }
1064 size_t large_page_size = os::large_page_size();
1066 ReservedSpace reserved(large_page_size, large_page_size, true, false);
1068 assert(reserved.is_reserved(), "Must be");
1070 VirtualSpace vs;
1071 bool initialized = vs.initialize(reserved, 0);
1072 assert(initialized, "Failed to initialize VirtualSpace");
1074 vs.expand_by(large_page_size, false);
1076 assert_equals(vs.actual_committed_size(), large_page_size);
1078 reserved.release();
1079 }
1081 static void test_virtual_space_actual_committed_space() {
1082 test_virtual_space_actual_committed_space(4 * K, 0);
1083 test_virtual_space_actual_committed_space(4 * K, 4 * K);
1084 test_virtual_space_actual_committed_space(8 * K, 0);
1085 test_virtual_space_actual_committed_space(8 * K, 4 * K);
1086 test_virtual_space_actual_committed_space(8 * K, 8 * K);
1087 test_virtual_space_actual_committed_space(12 * K, 0);
1088 test_virtual_space_actual_committed_space(12 * K, 4 * K);
1089 test_virtual_space_actual_committed_space(12 * K, 8 * K);
1090 test_virtual_space_actual_committed_space(12 * K, 12 * K);
1091 test_virtual_space_actual_committed_space(64 * K, 0);
1092 test_virtual_space_actual_committed_space(64 * K, 32 * K);
1093 test_virtual_space_actual_committed_space(64 * K, 64 * K);
1094 test_virtual_space_actual_committed_space(2 * M, 0);
1095 test_virtual_space_actual_committed_space(2 * M, 4 * K);
1096 test_virtual_space_actual_committed_space(2 * M, 64 * K);
1097 test_virtual_space_actual_committed_space(2 * M, 1 * M);
1098 test_virtual_space_actual_committed_space(2 * M, 2 * M);
1099 test_virtual_space_actual_committed_space(10 * M, 0);
1100 test_virtual_space_actual_committed_space(10 * M, 4 * K);
1101 test_virtual_space_actual_committed_space(10 * M, 8 * K);
1102 test_virtual_space_actual_committed_space(10 * M, 1 * M);
1103 test_virtual_space_actual_committed_space(10 * M, 2 * M);
1104 test_virtual_space_actual_committed_space(10 * M, 5 * M);
1105 test_virtual_space_actual_committed_space(10 * M, 10 * M);
1106 }
1108 static void test_virtual_space_disable_large_pages() {
1109 if (!UseLargePages) {
1110 return;
1111 }
1112 // These test cases verify that if we force VirtualSpace to disable large pages
1113 test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1114 test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1115 test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1116 test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1117 test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1118 test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1119 test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1121 test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1122 test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1123 test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1124 test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1125 test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1126 test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1127 test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1129 test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1130 test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1131 test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1132 test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1133 test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1134 test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1135 test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1136 }
1138 static void test_virtual_space() {
1139 test_virtual_space_actual_committed_space();
1140 test_virtual_space_actual_committed_space_one_large_page();
1141 test_virtual_space_disable_large_pages();
1142 }
1143 };
1145 void TestVirtualSpace_test() {
1146 TestVirtualSpace::test_virtual_space();
1147 }
1149 #endif // PRODUCT
1151 #endif