Thu, 24 May 2018 19:24:53 +0800
#7046 Disable the compilation when branch offset is beyond short branch
Contributed-by: fujie, aoqi
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "oops/markOop.hpp"
27 #include "oops/oop.inline.hpp"
28 #include "runtime/virtualspace.hpp"
29 #include "services/memTracker.hpp"
30 #ifdef TARGET_OS_FAMILY_linux
31 # include "os_linux.inline.hpp"
32 #endif
33 #ifdef TARGET_OS_FAMILY_solaris
34 # include "os_solaris.inline.hpp"
35 #endif
36 #ifdef TARGET_OS_FAMILY_windows
37 # include "os_windows.inline.hpp"
38 #endif
39 #ifdef TARGET_OS_FAMILY_aix
40 # include "os_aix.inline.hpp"
41 #endif
42 #ifdef TARGET_OS_FAMILY_bsd
43 # include "os_bsd.inline.hpp"
44 #endif
46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
48 // ReservedSpace
50 // Dummy constructor
51 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
52 _alignment(0), _special(false), _executable(false) {
53 }
55 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
56 bool has_preferred_page_size = preferred_page_size != 0;
57 // Want to use large pages where possible and pad with small pages.
58 size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
59 bool large_pages = page_size != (size_t)os::vm_page_size();
60 size_t alignment;
61 if (large_pages && has_preferred_page_size) {
62 alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
63 // ReservedSpace initialization requires size to be aligned to the given
64 // alignment. Align the size up.
65 size = align_size_up(size, alignment);
66 } else {
67 // Don't force the alignment to be large page aligned,
68 // since that will waste memory.
69 alignment = os::vm_allocation_granularity();
70 }
71 initialize(size, alignment, large_pages, NULL, 0, false);
72 }
74 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
75 bool large,
76 char* requested_address,
77 const size_t noaccess_prefix) {
78 initialize(size+noaccess_prefix, alignment, large, requested_address,
79 noaccess_prefix, false);
80 }
82 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
83 bool large,
84 bool executable) {
85 initialize(size, alignment, large, NULL, 0, executable);
86 }
88 // Helper method.
89 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
90 const size_t size, bool special)
91 {
92 if (base == requested_address || requested_address == NULL)
93 return false; // did not fail
95 if (base != NULL) {
96 // Different reserve address may be acceptable in other cases
97 // but for compressed oops heap should be at requested address.
98 assert(UseCompressedOops, "currently requested address used only for compressed oops");
99 if (PrintCompressedOopsMode) {
100 tty->cr();
101 tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
102 }
103 // OS ignored requested address. Try different address.
104 if (special) {
105 if (!os::release_memory_special(base, size)) {
106 fatal("os::release_memory_special failed");
107 }
108 } else {
109 if (!os::release_memory(base, size)) {
110 fatal("os::release_memory failed");
111 }
112 }
113 }
114 return true;
115 }
117 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
118 char* requested_address,
119 const size_t noaccess_prefix,
120 bool executable) {
121 const size_t granularity = os::vm_allocation_granularity();
122 assert((size & (granularity - 1)) == 0,
123 "size not aligned to os::vm_allocation_granularity()");
124 assert((alignment & (granularity - 1)) == 0,
125 "alignment not aligned to os::vm_allocation_granularity()");
126 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
127 "not a power of 2");
129 alignment = MAX2(alignment, (size_t)os::vm_page_size());
131 // Assert that if noaccess_prefix is used, it is the same as alignment.
132 assert(noaccess_prefix == 0 ||
133 noaccess_prefix == alignment, "noaccess prefix wrong");
135 _base = NULL;
136 _size = 0;
137 _special = false;
138 _executable = executable;
139 _alignment = 0;
140 _noaccess_prefix = 0;
141 if (size == 0) {
142 return;
143 }
145 // If OS doesn't support demand paging for large page memory, we need
146 // to use reserve_memory_special() to reserve and pin the entire region.
147 bool special = large && !os::can_commit_large_page_memory();
148 char* base = NULL;
150 #ifdef MIPS64
151 static int code_cache_init_flag = 1;
152 if (UseCodeCacheAllocOpt && code_cache_init_flag && executable) {
153 code_cache_init_flag = 0;
154 requested_address = (char*) (5 * os::Linux::page_size());
155 }
156 #endif
158 if (requested_address != 0) {
159 requested_address -= noaccess_prefix; // adjust requested address
160 assert(requested_address != NULL, "huge noaccess prefix?");
161 }
163 if (special) {
165 base = os::reserve_memory_special(size, alignment, requested_address, executable);
167 if (base != NULL) {
168 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
169 // OS ignored requested address. Try different address.
170 return;
171 }
172 // Check alignment constraints.
173 assert((uintptr_t) base % alignment == 0,
174 err_msg("Large pages returned a non-aligned address, base: "
175 PTR_FORMAT " alignment: " PTR_FORMAT,
176 base, (void*)(uintptr_t)alignment));
177 _special = true;
178 } else {
179 // failed; try to reserve regular memory below
180 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
181 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
182 if (PrintCompressedOopsMode) {
183 tty->cr();
184 tty->print_cr("Reserve regular memory without large pages.");
185 }
186 }
187 }
188 }
190 if (base == NULL) {
191 // Optimistically assume that the OSes returns an aligned base pointer.
192 // When reserving a large address range, most OSes seem to align to at
193 // least 64K.
195 // If the memory was requested at a particular address, use
196 // os::attempt_reserve_memory_at() to avoid over mapping something
197 // important. If available space is not detected, return NULL.
199 if (requested_address != 0) {
200 base = os::attempt_reserve_memory_at(size, requested_address);
201 if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
202 // OS ignored requested address. Try different address.
203 base = NULL;
204 }
205 } else {
206 base = os::reserve_memory(size, NULL, alignment);
207 }
209 if (base == NULL) return;
211 // Check alignment constraints
212 if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
213 // Base not aligned, retry
214 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
215 // Make sure that size is aligned
216 size = align_size_up(size, alignment);
217 base = os::reserve_memory_aligned(size, alignment);
219 if (requested_address != 0 &&
220 failed_to_reserve_as_requested(base, requested_address, size, false)) {
221 // As a result of the alignment constraints, the allocated base differs
222 // from the requested address. Return back to the caller who can
223 // take remedial action (like try again without a requested address).
224 assert(_base == NULL, "should be");
225 return;
226 }
227 }
228 }
229 // Done
230 _base = base;
231 _size = size;
232 _alignment = alignment;
233 _noaccess_prefix = noaccess_prefix;
235 // Assert that if noaccess_prefix is used, it is the same as alignment.
236 assert(noaccess_prefix == 0 ||
237 noaccess_prefix == _alignment, "noaccess prefix wrong");
239 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
240 "area must be distinguisable from marks for mark-sweep");
241 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
242 "area must be distinguisable from marks for mark-sweep");
243 }
246 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
247 bool special, bool executable) {
248 assert((size % os::vm_allocation_granularity()) == 0,
249 "size not allocation aligned");
250 _base = base;
251 _size = size;
252 _alignment = alignment;
253 _noaccess_prefix = 0;
254 _special = special;
255 _executable = executable;
256 }
259 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
260 bool split, bool realloc) {
261 assert(partition_size <= size(), "partition failed");
262 if (split) {
263 os::split_reserved_memory(base(), size(), partition_size, realloc);
264 }
265 ReservedSpace result(base(), partition_size, alignment, special(),
266 executable());
267 return result;
268 }
271 ReservedSpace
272 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
273 assert(partition_size <= size(), "partition failed");
274 ReservedSpace result(base() + partition_size, size() - partition_size,
275 alignment, special(), executable());
276 return result;
277 }
280 size_t ReservedSpace::page_align_size_up(size_t size) {
281 return align_size_up(size, os::vm_page_size());
282 }
285 size_t ReservedSpace::page_align_size_down(size_t size) {
286 return align_size_down(size, os::vm_page_size());
287 }
290 size_t ReservedSpace::allocation_align_size_up(size_t size) {
291 return align_size_up(size, os::vm_allocation_granularity());
292 }
295 size_t ReservedSpace::allocation_align_size_down(size_t size) {
296 return align_size_down(size, os::vm_allocation_granularity());
297 }
300 void ReservedSpace::release() {
301 if (is_reserved()) {
302 char *real_base = _base - _noaccess_prefix;
303 const size_t real_size = _size + _noaccess_prefix;
304 if (special()) {
305 os::release_memory_special(real_base, real_size);
306 } else{
307 os::release_memory(real_base, real_size);
308 }
309 _base = NULL;
310 _size = 0;
311 _noaccess_prefix = 0;
312 _special = false;
313 _executable = false;
314 }
315 }
317 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
318 assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
319 (Universe::narrow_oop_base() != NULL) &&
320 Universe::narrow_oop_use_implicit_null_checks()),
321 "noaccess_prefix should be used only with non zero based compressed oops");
323 // If there is no noaccess prefix, return.
324 if (_noaccess_prefix == 0) return;
326 assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
327 "must be at least page size big");
329 // Protect memory at the base of the allocated region.
330 // If special, the page was committed (only matters on windows)
331 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
332 _special)) {
333 fatal("cannot protect protection page");
334 }
335 if (PrintCompressedOopsMode) {
336 tty->cr();
337 tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
338 }
340 _base += _noaccess_prefix;
341 _size -= _noaccess_prefix;
342 assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
343 "must be exactly of required size and alignment");
344 }
346 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
347 bool large, char* requested_address) :
348 ReservedSpace(size, alignment, large,
349 requested_address,
350 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
351 Universe::narrow_oop_use_implicit_null_checks()) ?
352 lcm(os::vm_page_size(), alignment) : 0) {
353 if (base() > 0) {
354 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
355 }
357 // Only reserved space for the java heap should have a noaccess_prefix
358 // if using compressed oops.
359 protect_noaccess_prefix(size);
360 }
362 // Reserve space for code segment. Same as Java heap only we mark this as
363 // executable.
364 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
365 size_t rs_align,
366 bool large) :
367 ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
368 MemTracker::record_virtual_memory_type((address)base(), mtCode);
369 }
371 // VirtualSpace
373 VirtualSpace::VirtualSpace() {
374 _low_boundary = NULL;
375 _high_boundary = NULL;
376 _low = NULL;
377 _high = NULL;
378 _lower_high = NULL;
379 _middle_high = NULL;
380 _upper_high = NULL;
381 _lower_high_boundary = NULL;
382 _middle_high_boundary = NULL;
383 _upper_high_boundary = NULL;
384 _lower_alignment = 0;
385 _middle_alignment = 0;
386 _upper_alignment = 0;
387 _special = false;
388 _executable = false;
389 }
392 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
393 const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
394 return initialize_with_granularity(rs, committed_size, max_commit_granularity);
395 }
397 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
398 if(!rs.is_reserved()) return false; // allocation failed.
399 assert(_low_boundary == NULL, "VirtualSpace already initialized");
400 assert(max_commit_granularity > 0, "Granularity must be non-zero.");
402 _low_boundary = rs.base();
403 _high_boundary = low_boundary() + rs.size();
405 _low = low_boundary();
406 _high = low();
408 _special = rs.special();
409 _executable = rs.executable();
411 // When a VirtualSpace begins life at a large size, make all future expansion
412 // and shrinking occur aligned to a granularity of large pages. This avoids
413 // fragmentation of physical addresses that inhibits the use of large pages
414 // by the OS virtual memory system. Empirically, we see that with a 4MB
415 // page size, the only spaces that get handled this way are codecache and
416 // the heap itself, both of which provide a substantial performance
417 // boost in many benchmarks when covered by large pages.
418 //
419 // No attempt is made to force large page alignment at the very top and
420 // bottom of the space if they are not aligned so already.
421 _lower_alignment = os::vm_page_size();
422 _middle_alignment = max_commit_granularity;
423 _upper_alignment = os::vm_page_size();
425 // End of each region
426 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
427 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
428 _upper_high_boundary = high_boundary();
430 // High address of each region
431 _lower_high = low_boundary();
432 _middle_high = lower_high_boundary();
433 _upper_high = middle_high_boundary();
435 // commit to initial size
436 if (committed_size > 0) {
437 if (!expand_by(committed_size)) {
438 return false;
439 }
440 }
441 return true;
442 }
445 VirtualSpace::~VirtualSpace() {
446 release();
447 }
450 void VirtualSpace::release() {
451 // This does not release memory it never reserved.
452 // Caller must release via rs.release();
453 _low_boundary = NULL;
454 _high_boundary = NULL;
455 _low = NULL;
456 _high = NULL;
457 _lower_high = NULL;
458 _middle_high = NULL;
459 _upper_high = NULL;
460 _lower_high_boundary = NULL;
461 _middle_high_boundary = NULL;
462 _upper_high_boundary = NULL;
463 _lower_alignment = 0;
464 _middle_alignment = 0;
465 _upper_alignment = 0;
466 _special = false;
467 _executable = false;
468 }
471 size_t VirtualSpace::committed_size() const {
472 return pointer_delta(high(), low(), sizeof(char));
473 }
476 size_t VirtualSpace::reserved_size() const {
477 return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
478 }
481 size_t VirtualSpace::uncommitted_size() const {
482 return reserved_size() - committed_size();
483 }
485 size_t VirtualSpace::actual_committed_size() const {
486 // Special VirtualSpaces commit all reserved space up front.
487 if (special()) {
488 return reserved_size();
489 }
491 size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char));
492 size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char));
493 size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char));
495 #ifdef ASSERT
496 size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char));
497 size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char));
498 size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char));
500 if (committed_high > 0) {
501 assert(committed_low == lower, "Must be");
502 assert(committed_middle == middle, "Must be");
503 }
505 if (committed_middle > 0) {
506 assert(committed_low == lower, "Must be");
507 }
508 if (committed_middle < middle) {
509 assert(committed_high == 0, "Must be");
510 }
512 if (committed_low < lower) {
513 assert(committed_high == 0, "Must be");
514 assert(committed_middle == 0, "Must be");
515 }
516 #endif
518 return committed_low + committed_middle + committed_high;
519 }
522 bool VirtualSpace::contains(const void* p) const {
523 return low() <= (const char*) p && (const char*) p < high();
524 }
526 /*
527 First we need to determine if a particular virtual space is using large
528 pages. This is done at the initialize function and only virtual spaces
529 that are larger than LargePageSizeInBytes use large pages. Once we
530 have determined this, all expand_by and shrink_by calls must grow and
531 shrink by large page size chunks. If a particular request
532 is within the current large page, the call to commit and uncommit memory
533 can be ignored. In the case that the low and high boundaries of this
534 space is not large page aligned, the pages leading to the first large
535 page address and the pages after the last large page address must be
536 allocated with default pages.
537 */
538 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
539 if (uncommitted_size() < bytes) return false;
541 if (special()) {
542 // don't commit memory if the entire space is pinned in memory
543 _high += bytes;
544 return true;
545 }
547 char* previous_high = high();
548 char* unaligned_new_high = high() + bytes;
549 assert(unaligned_new_high <= high_boundary(),
550 "cannot expand by more than upper boundary");
552 // Calculate where the new high for each of the regions should be. If
553 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
554 // then the unaligned lower and upper new highs would be the
555 // lower_high() and upper_high() respectively.
556 char* unaligned_lower_new_high =
557 MIN2(unaligned_new_high, lower_high_boundary());
558 char* unaligned_middle_new_high =
559 MIN2(unaligned_new_high, middle_high_boundary());
560 char* unaligned_upper_new_high =
561 MIN2(unaligned_new_high, upper_high_boundary());
563 // Align the new highs based on the regions alignment. lower and upper
564 // alignment will always be default page size. middle alignment will be
565 // LargePageSizeInBytes if the actual size of the virtual space is in
566 // fact larger than LargePageSizeInBytes.
567 char* aligned_lower_new_high =
568 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
569 char* aligned_middle_new_high =
570 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
571 char* aligned_upper_new_high =
572 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
574 // Determine which regions need to grow in this expand_by call.
575 // If you are growing in the lower region, high() must be in that
576 // region so calcuate the size based on high(). For the middle and
577 // upper regions, determine the starting point of growth based on the
578 // location of high(). By getting the MAX of the region's low address
579 // (or the prevoius region's high address) and high(), we can tell if it
580 // is an intra or inter region growth.
581 size_t lower_needs = 0;
582 if (aligned_lower_new_high > lower_high()) {
583 lower_needs =
584 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
585 }
586 size_t middle_needs = 0;
587 if (aligned_middle_new_high > middle_high()) {
588 middle_needs =
589 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
590 }
591 size_t upper_needs = 0;
592 if (aligned_upper_new_high > upper_high()) {
593 upper_needs =
594 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
595 }
597 // Check contiguity.
598 assert(low_boundary() <= lower_high() &&
599 lower_high() <= lower_high_boundary(),
600 "high address must be contained within the region");
601 assert(lower_high_boundary() <= middle_high() &&
602 middle_high() <= middle_high_boundary(),
603 "high address must be contained within the region");
604 assert(middle_high_boundary() <= upper_high() &&
605 upper_high() <= upper_high_boundary(),
606 "high address must be contained within the region");
608 // Commit regions
609 if (lower_needs > 0) {
610 assert(low_boundary() <= lower_high() &&
611 lower_high() + lower_needs <= lower_high_boundary(),
612 "must not expand beyond region");
613 if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
614 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
615 ", lower_needs=" SIZE_FORMAT ", %d) failed",
616 lower_high(), lower_needs, _executable);)
617 return false;
618 } else {
619 _lower_high += lower_needs;
620 }
621 }
622 if (middle_needs > 0) {
623 assert(lower_high_boundary() <= middle_high() &&
624 middle_high() + middle_needs <= middle_high_boundary(),
625 "must not expand beyond region");
626 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
627 _executable)) {
628 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
629 ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
630 ", %d) failed", middle_high(), middle_needs,
631 middle_alignment(), _executable);)
632 return false;
633 }
634 _middle_high += middle_needs;
635 }
636 if (upper_needs > 0) {
637 assert(middle_high_boundary() <= upper_high() &&
638 upper_high() + upper_needs <= upper_high_boundary(),
639 "must not expand beyond region");
640 if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
641 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
642 ", upper_needs=" SIZE_FORMAT ", %d) failed",
643 upper_high(), upper_needs, _executable);)
644 return false;
645 } else {
646 _upper_high += upper_needs;
647 }
648 }
650 if (pre_touch || AlwaysPreTouch) {
651 os::pretouch_memory(previous_high, unaligned_new_high);
652 }
654 _high += bytes;
655 return true;
656 }
658 // A page is uncommitted if the contents of the entire page is deemed unusable.
659 // Continue to decrement the high() pointer until it reaches a page boundary
660 // in which case that particular page can now be uncommitted.
661 void VirtualSpace::shrink_by(size_t size) {
662 if (committed_size() < size)
663 fatal("Cannot shrink virtual space to negative size");
665 if (special()) {
666 // don't uncommit if the entire space is pinned in memory
667 _high -= size;
668 return;
669 }
671 char* unaligned_new_high = high() - size;
672 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
674 // Calculate new unaligned address
675 char* unaligned_upper_new_high =
676 MAX2(unaligned_new_high, middle_high_boundary());
677 char* unaligned_middle_new_high =
678 MAX2(unaligned_new_high, lower_high_boundary());
679 char* unaligned_lower_new_high =
680 MAX2(unaligned_new_high, low_boundary());
682 // Align address to region's alignment
683 char* aligned_upper_new_high =
684 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
685 char* aligned_middle_new_high =
686 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
687 char* aligned_lower_new_high =
688 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
690 // Determine which regions need to shrink
691 size_t upper_needs = 0;
692 if (aligned_upper_new_high < upper_high()) {
693 upper_needs =
694 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
695 }
696 size_t middle_needs = 0;
697 if (aligned_middle_new_high < middle_high()) {
698 middle_needs =
699 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
700 }
701 size_t lower_needs = 0;
702 if (aligned_lower_new_high < lower_high()) {
703 lower_needs =
704 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
705 }
707 // Check contiguity.
708 assert(middle_high_boundary() <= upper_high() &&
709 upper_high() <= upper_high_boundary(),
710 "high address must be contained within the region");
711 assert(lower_high_boundary() <= middle_high() &&
712 middle_high() <= middle_high_boundary(),
713 "high address must be contained within the region");
714 assert(low_boundary() <= lower_high() &&
715 lower_high() <= lower_high_boundary(),
716 "high address must be contained within the region");
718 // Uncommit
719 if (upper_needs > 0) {
720 assert(middle_high_boundary() <= aligned_upper_new_high &&
721 aligned_upper_new_high + upper_needs <= upper_high_boundary(),
722 "must not shrink beyond region");
723 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
724 debug_only(warning("os::uncommit_memory failed"));
725 return;
726 } else {
727 _upper_high -= upper_needs;
728 }
729 }
730 if (middle_needs > 0) {
731 assert(lower_high_boundary() <= aligned_middle_new_high &&
732 aligned_middle_new_high + middle_needs <= middle_high_boundary(),
733 "must not shrink beyond region");
734 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
735 debug_only(warning("os::uncommit_memory failed"));
736 return;
737 } else {
738 _middle_high -= middle_needs;
739 }
740 }
741 if (lower_needs > 0) {
742 assert(low_boundary() <= aligned_lower_new_high &&
743 aligned_lower_new_high + lower_needs <= lower_high_boundary(),
744 "must not shrink beyond region");
745 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
746 debug_only(warning("os::uncommit_memory failed"));
747 return;
748 } else {
749 _lower_high -= lower_needs;
750 }
751 }
753 _high -= size;
754 }
756 #ifndef PRODUCT
757 void VirtualSpace::check_for_contiguity() {
758 // Check contiguity.
759 assert(low_boundary() <= lower_high() &&
760 lower_high() <= lower_high_boundary(),
761 "high address must be contained within the region");
762 assert(lower_high_boundary() <= middle_high() &&
763 middle_high() <= middle_high_boundary(),
764 "high address must be contained within the region");
765 assert(middle_high_boundary() <= upper_high() &&
766 upper_high() <= upper_high_boundary(),
767 "high address must be contained within the region");
768 assert(low() >= low_boundary(), "low");
769 assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
770 assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
771 assert(high() <= upper_high(), "upper high");
772 }
774 void VirtualSpace::print_on(outputStream* out) {
775 out->print ("Virtual space:");
776 if (special()) out->print(" (pinned in memory)");
777 out->cr();
778 out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
779 out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
780 out->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
781 out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
782 }
784 void VirtualSpace::print() {
785 print_on(tty);
786 }
788 /////////////// Unit tests ///////////////
790 #ifndef PRODUCT
792 #define test_log(...) \
793 do {\
794 if (VerboseInternalVMTests) { \
795 tty->print_cr(__VA_ARGS__); \
796 tty->flush(); \
797 }\
798 } while (false)
800 class TestReservedSpace : AllStatic {
801 public:
802 static void small_page_write(void* addr, size_t size) {
803 size_t page_size = os::vm_page_size();
805 char* end = (char*)addr + size;
806 for (char* p = (char*)addr; p < end; p += page_size) {
807 *p = 1;
808 }
809 }
811 static void release_memory_for_test(ReservedSpace rs) {
812 if (rs.special()) {
813 guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
814 } else {
815 guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
816 }
817 }
819 static void test_reserved_space1(size_t size, size_t alignment) {
820 test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
822 assert(is_size_aligned(size, alignment), "Incorrect input parameters");
824 ReservedSpace rs(size, // size
825 alignment, // alignment
826 UseLargePages, // large
827 NULL, // requested_address
828 0); // noacces_prefix
830 test_log(" rs.special() == %d", rs.special());
832 assert(rs.base() != NULL, "Must be");
833 assert(rs.size() == size, "Must be");
835 assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
836 assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
838 if (rs.special()) {
839 small_page_write(rs.base(), size);
840 }
842 release_memory_for_test(rs);
843 }
845 static void test_reserved_space2(size_t size) {
846 test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
848 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
850 ReservedSpace rs(size);
852 test_log(" rs.special() == %d", rs.special());
854 assert(rs.base() != NULL, "Must be");
855 assert(rs.size() == size, "Must be");
857 if (rs.special()) {
858 small_page_write(rs.base(), size);
859 }
861 release_memory_for_test(rs);
862 }
864 static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
865 test_log("test_reserved_space3(%p, %p, %d)",
866 (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
868 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
869 assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
871 bool large = maybe_large && UseLargePages && size >= os::large_page_size();
873 ReservedSpace rs(size, alignment, large, false);
875 test_log(" rs.special() == %d", rs.special());
877 assert(rs.base() != NULL, "Must be");
878 assert(rs.size() == size, "Must be");
880 if (rs.special()) {
881 small_page_write(rs.base(), size);
882 }
884 release_memory_for_test(rs);
885 }
888 static void test_reserved_space1() {
889 size_t size = 2 * 1024 * 1024;
890 size_t ag = os::vm_allocation_granularity();
892 test_reserved_space1(size, ag);
893 test_reserved_space1(size * 2, ag);
894 test_reserved_space1(size * 10, ag);
895 }
897 static void test_reserved_space2() {
898 size_t size = 2 * 1024 * 1024;
899 size_t ag = os::vm_allocation_granularity();
901 test_reserved_space2(size * 1);
902 test_reserved_space2(size * 2);
903 test_reserved_space2(size * 10);
904 test_reserved_space2(ag);
905 test_reserved_space2(size - ag);
906 test_reserved_space2(size);
907 test_reserved_space2(size + ag);
908 test_reserved_space2(size * 2);
909 test_reserved_space2(size * 2 - ag);
910 test_reserved_space2(size * 2 + ag);
911 test_reserved_space2(size * 3);
912 test_reserved_space2(size * 3 - ag);
913 test_reserved_space2(size * 3 + ag);
914 test_reserved_space2(size * 10);
915 test_reserved_space2(size * 10 + size / 2);
916 }
918 static void test_reserved_space3() {
919 size_t ag = os::vm_allocation_granularity();
921 test_reserved_space3(ag, ag , false);
922 test_reserved_space3(ag * 2, ag , false);
923 test_reserved_space3(ag * 3, ag , false);
924 test_reserved_space3(ag * 2, ag * 2, false);
925 test_reserved_space3(ag * 4, ag * 2, false);
926 test_reserved_space3(ag * 8, ag * 2, false);
927 test_reserved_space3(ag * 4, ag * 4, false);
928 test_reserved_space3(ag * 8, ag * 4, false);
929 test_reserved_space3(ag * 16, ag * 4, false);
931 if (UseLargePages) {
932 size_t lp = os::large_page_size();
934 // Without large pages
935 test_reserved_space3(lp, ag * 4, false);
936 test_reserved_space3(lp * 2, ag * 4, false);
937 test_reserved_space3(lp * 4, ag * 4, false);
938 test_reserved_space3(lp, lp , false);
939 test_reserved_space3(lp * 2, lp , false);
940 test_reserved_space3(lp * 3, lp , false);
941 test_reserved_space3(lp * 2, lp * 2, false);
942 test_reserved_space3(lp * 4, lp * 2, false);
943 test_reserved_space3(lp * 8, lp * 2, false);
945 // With large pages
946 test_reserved_space3(lp, ag * 4 , true);
947 test_reserved_space3(lp * 2, ag * 4, true);
948 test_reserved_space3(lp * 4, ag * 4, true);
949 test_reserved_space3(lp, lp , true);
950 test_reserved_space3(lp * 2, lp , true);
951 test_reserved_space3(lp * 3, lp , true);
952 test_reserved_space3(lp * 2, lp * 2, true);
953 test_reserved_space3(lp * 4, lp * 2, true);
954 test_reserved_space3(lp * 8, lp * 2, true);
955 }
956 }
958 static void test_reserved_space() {
959 test_reserved_space1();
960 test_reserved_space2();
961 test_reserved_space3();
962 }
963 };
965 void TestReservedSpace_test() {
966 TestReservedSpace::test_reserved_space();
967 }
969 #define assert_equals(actual, expected) \
970 assert(actual == expected, \
971 err_msg("Got " SIZE_FORMAT " expected " \
972 SIZE_FORMAT, actual, expected));
974 #define assert_ge(value1, value2) \
975 assert(value1 >= value2, \
976 err_msg("'" #value1 "': " SIZE_FORMAT " '" \
977 #value2 "': " SIZE_FORMAT, value1, value2));
979 #define assert_lt(value1, value2) \
980 assert(value1 < value2, \
981 err_msg("'" #value1 "': " SIZE_FORMAT " '" \
982 #value2 "': " SIZE_FORMAT, value1, value2));
985 class TestVirtualSpace : AllStatic {
986 enum TestLargePages {
987 Default,
988 Disable,
989 Reserve,
990 Commit
991 };
993 static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
994 switch(mode) {
995 default:
996 case Default:
997 case Reserve:
998 return ReservedSpace(reserve_size_aligned);
999 case Disable:
1000 case Commit:
1001 return ReservedSpace(reserve_size_aligned,
1002 os::vm_allocation_granularity(),
1003 /* large */ false, /* exec */ false);
1004 }
1005 }
1007 static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1008 switch(mode) {
1009 default:
1010 case Default:
1011 case Reserve:
1012 return vs.initialize(rs, 0);
1013 case Disable:
1014 return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1015 case Commit:
1016 return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1017 }
1018 }
1020 public:
1021 static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1022 TestLargePages mode = Default) {
1023 size_t granularity = os::vm_allocation_granularity();
1024 size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1026 ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1028 assert(reserved.is_reserved(), "Must be");
1030 VirtualSpace vs;
1031 bool initialized = initialize_virtual_space(vs, reserved, mode);
1032 assert(initialized, "Failed to initialize VirtualSpace");
1034 vs.expand_by(commit_size, false);
1036 if (vs.special()) {
1037 assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1038 } else {
1039 assert_ge(vs.actual_committed_size(), commit_size);
1040 // Approximate the commit granularity.
1041 // Make sure that we don't commit using large pages
1042 // if large pages has been disabled for this VirtualSpace.
1043 size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1044 os::vm_page_size() : os::large_page_size();
1045 assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1046 }
1048 reserved.release();
1049 }
1051 static void test_virtual_space_actual_committed_space_one_large_page() {
1052 if (!UseLargePages) {
1053 return;
1054 }
1056 size_t large_page_size = os::large_page_size();
1058 ReservedSpace reserved(large_page_size, large_page_size, true, false);
1060 assert(reserved.is_reserved(), "Must be");
1062 VirtualSpace vs;
1063 bool initialized = vs.initialize(reserved, 0);
1064 assert(initialized, "Failed to initialize VirtualSpace");
1066 vs.expand_by(large_page_size, false);
1068 assert_equals(vs.actual_committed_size(), large_page_size);
1070 reserved.release();
1071 }
1073 static void test_virtual_space_actual_committed_space() {
1074 test_virtual_space_actual_committed_space(4 * K, 0);
1075 test_virtual_space_actual_committed_space(4 * K, 4 * K);
1076 test_virtual_space_actual_committed_space(8 * K, 0);
1077 test_virtual_space_actual_committed_space(8 * K, 4 * K);
1078 test_virtual_space_actual_committed_space(8 * K, 8 * K);
1079 test_virtual_space_actual_committed_space(12 * K, 0);
1080 test_virtual_space_actual_committed_space(12 * K, 4 * K);
1081 test_virtual_space_actual_committed_space(12 * K, 8 * K);
1082 test_virtual_space_actual_committed_space(12 * K, 12 * K);
1083 test_virtual_space_actual_committed_space(64 * K, 0);
1084 test_virtual_space_actual_committed_space(64 * K, 32 * K);
1085 test_virtual_space_actual_committed_space(64 * K, 64 * K);
1086 test_virtual_space_actual_committed_space(2 * M, 0);
1087 test_virtual_space_actual_committed_space(2 * M, 4 * K);
1088 test_virtual_space_actual_committed_space(2 * M, 64 * K);
1089 test_virtual_space_actual_committed_space(2 * M, 1 * M);
1090 test_virtual_space_actual_committed_space(2 * M, 2 * M);
1091 test_virtual_space_actual_committed_space(10 * M, 0);
1092 test_virtual_space_actual_committed_space(10 * M, 4 * K);
1093 test_virtual_space_actual_committed_space(10 * M, 8 * K);
1094 test_virtual_space_actual_committed_space(10 * M, 1 * M);
1095 test_virtual_space_actual_committed_space(10 * M, 2 * M);
1096 test_virtual_space_actual_committed_space(10 * M, 5 * M);
1097 test_virtual_space_actual_committed_space(10 * M, 10 * M);
1098 }
1100 static void test_virtual_space_disable_large_pages() {
1101 if (!UseLargePages) {
1102 return;
1103 }
1104 // These test cases verify that if we force VirtualSpace to disable large pages
1105 test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1106 test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1107 test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1108 test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1109 test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1110 test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1111 test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1113 test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1114 test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1115 test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1116 test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1117 test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1118 test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1119 test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1121 test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1122 test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1123 test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1124 test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1125 test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1126 test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1127 test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1128 }
1130 static void test_virtual_space() {
1131 test_virtual_space_actual_committed_space();
1132 test_virtual_space_actual_committed_space_one_large_page();
1133 test_virtual_space_disable_large_pages();
1134 }
1135 };
1137 void TestVirtualSpace_test() {
1138 TestVirtualSpace::test_virtual_space();
1139 }
1141 #endif // PRODUCT
1143 #endif