Tue, 07 Mar 2017 05:12:17 -0500
Compress the address of the code cache within 32-bit for MIPS64.
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "oops/markOop.hpp"
27 #include "oops/oop.inline.hpp"
28 #include "runtime/virtualspace.hpp"
29 #include "services/memTracker.hpp"
30 #ifdef TARGET_OS_FAMILY_linux
31 # include "os_linux.inline.hpp"
32 #endif
33 #ifdef TARGET_OS_FAMILY_solaris
34 # include "os_solaris.inline.hpp"
35 #endif
36 #ifdef TARGET_OS_FAMILY_windows
37 # include "os_windows.inline.hpp"
38 #endif
39 #ifdef TARGET_OS_FAMILY_aix
40 # include "os_aix.inline.hpp"
41 #endif
42 #ifdef TARGET_OS_FAMILY_bsd
43 # include "os_bsd.inline.hpp"
44 #endif
46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
48 // ReservedSpace
50 // Dummy constructor
51 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
52 _alignment(0), _special(false), _executable(false) {
53 }
55 ReservedSpace::ReservedSpace(size_t size) {
56 size_t page_size = os::page_size_for_region(size, size, 1);
57 bool large_pages = page_size != (size_t)os::vm_page_size();
58 // Don't force the alignment to be large page aligned,
59 // since that will waste memory.
60 size_t alignment = os::vm_allocation_granularity();
61 initialize(size, alignment, large_pages, NULL, 0, false);
62 }
64 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
65 bool large,
66 char* requested_address,
67 const size_t noaccess_prefix) {
68 initialize(size+noaccess_prefix, alignment, large, requested_address,
69 noaccess_prefix, false);
70 }
72 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
73 bool large,
74 bool executable) {
75 initialize(size, alignment, large, NULL, 0, executable);
76 }
78 // Helper method.
79 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
80 const size_t size, bool special)
81 {
82 if (base == requested_address || requested_address == NULL)
83 return false; // did not fail
85 if (base != NULL) {
86 // Different reserve address may be acceptable in other cases
87 // but for compressed oops heap should be at requested address.
88 assert(UseCompressedOops, "currently requested address used only for compressed oops");
89 if (PrintCompressedOopsMode) {
90 tty->cr();
91 tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
92 }
93 // OS ignored requested address. Try different address.
94 if (special) {
95 if (!os::release_memory_special(base, size)) {
96 fatal("os::release_memory_special failed");
97 }
98 } else {
99 if (!os::release_memory(base, size)) {
100 fatal("os::release_memory failed");
101 }
102 }
103 }
104 return true;
105 }
107 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
108 char* requested_address,
109 const size_t noaccess_prefix,
110 bool executable) {
111 const size_t granularity = os::vm_allocation_granularity();
112 assert((size & (granularity - 1)) == 0,
113 "size not aligned to os::vm_allocation_granularity()");
114 assert((alignment & (granularity - 1)) == 0,
115 "alignment not aligned to os::vm_allocation_granularity()");
116 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
117 "not a power of 2");
119 alignment = MAX2(alignment, (size_t)os::vm_page_size());
121 // Assert that if noaccess_prefix is used, it is the same as alignment.
122 assert(noaccess_prefix == 0 ||
123 noaccess_prefix == alignment, "noaccess prefix wrong");
125 _base = NULL;
126 _size = 0;
127 _special = false;
128 _executable = executable;
129 _alignment = 0;
130 _noaccess_prefix = 0;
131 if (size == 0) {
132 return;
133 }
135 // If OS doesn't support demand paging for large page memory, we need
136 // to use reserve_memory_special() to reserve and pin the entire region.
137 bool special = large && !os::can_commit_large_page_memory();
138 char* base = NULL;
140 #ifdef MIPS64
141 static int code_cache_init_flag = 1;
142 if (code_cache_init_flag && executable) {
143 code_cache_init_flag = 0;
144 requested_address = (char*) (5 * os::Linux::page_size());
145 }
146 #endif
148 if (requested_address != 0) {
149 requested_address -= noaccess_prefix; // adjust requested address
150 assert(requested_address != NULL, "huge noaccess prefix?");
151 }
153 if (special) {
155 base = os::reserve_memory_special(size, alignment, requested_address, executable);
157 if (base != NULL) {
158 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
159 // OS ignored requested address. Try different address.
160 return;
161 }
162 // Check alignment constraints.
163 assert((uintptr_t) base % alignment == 0,
164 err_msg("Large pages returned a non-aligned address, base: "
165 PTR_FORMAT " alignment: " PTR_FORMAT,
166 base, (void*)(uintptr_t)alignment));
167 _special = true;
168 } else {
169 // failed; try to reserve regular memory below
170 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
171 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
172 if (PrintCompressedOopsMode) {
173 tty->cr();
174 tty->print_cr("Reserve regular memory without large pages.");
175 }
176 }
177 }
178 }
180 if (base == NULL) {
181 // Optimistically assume that the OSes returns an aligned base pointer.
182 // When reserving a large address range, most OSes seem to align to at
183 // least 64K.
185 // If the memory was requested at a particular address, use
186 // os::attempt_reserve_memory_at() to avoid over mapping something
187 // important. If available space is not detected, return NULL.
189 if (requested_address != 0) {
190 base = os::attempt_reserve_memory_at(size, requested_address);
191 if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
192 // OS ignored requested address. Try different address.
193 base = NULL;
194 }
195 } else {
196 base = os::reserve_memory(size, NULL, alignment);
197 }
199 if (base == NULL) return;
201 // Check alignment constraints
202 if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
203 // Base not aligned, retry
204 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
205 // Make sure that size is aligned
206 size = align_size_up(size, alignment);
207 base = os::reserve_memory_aligned(size, alignment);
209 if (requested_address != 0 &&
210 failed_to_reserve_as_requested(base, requested_address, size, false)) {
211 // As a result of the alignment constraints, the allocated base differs
212 // from the requested address. Return back to the caller who can
213 // take remedial action (like try again without a requested address).
214 assert(_base == NULL, "should be");
215 return;
216 }
217 }
218 }
219 // Done
220 _base = base;
221 _size = size;
222 _alignment = alignment;
223 _noaccess_prefix = noaccess_prefix;
225 // Assert that if noaccess_prefix is used, it is the same as alignment.
226 assert(noaccess_prefix == 0 ||
227 noaccess_prefix == _alignment, "noaccess prefix wrong");
229 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
230 "area must be distinguisable from marks for mark-sweep");
231 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
232 "area must be distinguisable from marks for mark-sweep");
233 }
236 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
237 bool special, bool executable) {
238 assert((size % os::vm_allocation_granularity()) == 0,
239 "size not allocation aligned");
240 _base = base;
241 _size = size;
242 _alignment = alignment;
243 _noaccess_prefix = 0;
244 _special = special;
245 _executable = executable;
246 }
249 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
250 bool split, bool realloc) {
251 assert(partition_size <= size(), "partition failed");
252 if (split) {
253 os::split_reserved_memory(base(), size(), partition_size, realloc);
254 }
255 ReservedSpace result(base(), partition_size, alignment, special(),
256 executable());
257 return result;
258 }
261 ReservedSpace
262 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
263 assert(partition_size <= size(), "partition failed");
264 ReservedSpace result(base() + partition_size, size() - partition_size,
265 alignment, special(), executable());
266 return result;
267 }
270 size_t ReservedSpace::page_align_size_up(size_t size) {
271 return align_size_up(size, os::vm_page_size());
272 }
275 size_t ReservedSpace::page_align_size_down(size_t size) {
276 return align_size_down(size, os::vm_page_size());
277 }
280 size_t ReservedSpace::allocation_align_size_up(size_t size) {
281 return align_size_up(size, os::vm_allocation_granularity());
282 }
285 size_t ReservedSpace::allocation_align_size_down(size_t size) {
286 return align_size_down(size, os::vm_allocation_granularity());
287 }
290 void ReservedSpace::release() {
291 if (is_reserved()) {
292 char *real_base = _base - _noaccess_prefix;
293 const size_t real_size = _size + _noaccess_prefix;
294 if (special()) {
295 os::release_memory_special(real_base, real_size);
296 } else{
297 os::release_memory(real_base, real_size);
298 }
299 _base = NULL;
300 _size = 0;
301 _noaccess_prefix = 0;
302 _special = false;
303 _executable = false;
304 }
305 }
307 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
308 assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
309 (Universe::narrow_oop_base() != NULL) &&
310 Universe::narrow_oop_use_implicit_null_checks()),
311 "noaccess_prefix should be used only with non zero based compressed oops");
313 // If there is no noaccess prefix, return.
314 if (_noaccess_prefix == 0) return;
316 assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
317 "must be at least page size big");
319 // Protect memory at the base of the allocated region.
320 // If special, the page was committed (only matters on windows)
321 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
322 _special)) {
323 fatal("cannot protect protection page");
324 }
325 if (PrintCompressedOopsMode) {
326 tty->cr();
327 tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
328 }
330 _base += _noaccess_prefix;
331 _size -= _noaccess_prefix;
332 assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
333 "must be exactly of required size and alignment");
334 }
336 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
337 bool large, char* requested_address) :
338 ReservedSpace(size, alignment, large,
339 requested_address,
340 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
341 Universe::narrow_oop_use_implicit_null_checks()) ?
342 lcm(os::vm_page_size(), alignment) : 0) {
343 if (base() > 0) {
344 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
345 }
347 // Only reserved space for the java heap should have a noaccess_prefix
348 // if using compressed oops.
349 protect_noaccess_prefix(size);
350 }
352 // Reserve space for code segment. Same as Java heap only we mark this as
353 // executable.
354 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
355 size_t rs_align,
356 bool large) :
357 ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
358 MemTracker::record_virtual_memory_type((address)base(), mtCode);
359 }
361 // VirtualSpace
363 VirtualSpace::VirtualSpace() {
364 _low_boundary = NULL;
365 _high_boundary = NULL;
366 _low = NULL;
367 _high = NULL;
368 _lower_high = NULL;
369 _middle_high = NULL;
370 _upper_high = NULL;
371 _lower_high_boundary = NULL;
372 _middle_high_boundary = NULL;
373 _upper_high_boundary = NULL;
374 _lower_alignment = 0;
375 _middle_alignment = 0;
376 _upper_alignment = 0;
377 _special = false;
378 _executable = false;
379 }
382 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
383 const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1);
384 return initialize_with_granularity(rs, committed_size, max_commit_granularity);
385 }
387 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
388 if(!rs.is_reserved()) return false; // allocation failed.
389 assert(_low_boundary == NULL, "VirtualSpace already initialized");
390 assert(max_commit_granularity > 0, "Granularity must be non-zero.");
392 _low_boundary = rs.base();
393 _high_boundary = low_boundary() + rs.size();
395 _low = low_boundary();
396 _high = low();
398 _special = rs.special();
399 _executable = rs.executable();
401 // When a VirtualSpace begins life at a large size, make all future expansion
402 // and shrinking occur aligned to a granularity of large pages. This avoids
403 // fragmentation of physical addresses that inhibits the use of large pages
404 // by the OS virtual memory system. Empirically, we see that with a 4MB
405 // page size, the only spaces that get handled this way are codecache and
406 // the heap itself, both of which provide a substantial performance
407 // boost in many benchmarks when covered by large pages.
408 //
409 // No attempt is made to force large page alignment at the very top and
410 // bottom of the space if they are not aligned so already.
411 _lower_alignment = os::vm_page_size();
412 _middle_alignment = max_commit_granularity;
413 _upper_alignment = os::vm_page_size();
415 // End of each region
416 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
417 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
418 _upper_high_boundary = high_boundary();
420 // High address of each region
421 _lower_high = low_boundary();
422 _middle_high = lower_high_boundary();
423 _upper_high = middle_high_boundary();
425 // commit to initial size
426 if (committed_size > 0) {
427 if (!expand_by(committed_size)) {
428 return false;
429 }
430 }
431 return true;
432 }
435 VirtualSpace::~VirtualSpace() {
436 release();
437 }
440 void VirtualSpace::release() {
441 // This does not release memory it never reserved.
442 // Caller must release via rs.release();
443 _low_boundary = NULL;
444 _high_boundary = NULL;
445 _low = NULL;
446 _high = NULL;
447 _lower_high = NULL;
448 _middle_high = NULL;
449 _upper_high = NULL;
450 _lower_high_boundary = NULL;
451 _middle_high_boundary = NULL;
452 _upper_high_boundary = NULL;
453 _lower_alignment = 0;
454 _middle_alignment = 0;
455 _upper_alignment = 0;
456 _special = false;
457 _executable = false;
458 }
461 size_t VirtualSpace::committed_size() const {
462 return pointer_delta(high(), low(), sizeof(char));
463 }
466 size_t VirtualSpace::reserved_size() const {
467 return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
468 }
471 size_t VirtualSpace::uncommitted_size() const {
472 return reserved_size() - committed_size();
473 }
475 size_t VirtualSpace::actual_committed_size() const {
476 // Special VirtualSpaces commit all reserved space up front.
477 if (special()) {
478 return reserved_size();
479 }
481 size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char));
482 size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char));
483 size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char));
485 #ifdef ASSERT
486 size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char));
487 size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char));
488 size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char));
490 if (committed_high > 0) {
491 assert(committed_low == lower, "Must be");
492 assert(committed_middle == middle, "Must be");
493 }
495 if (committed_middle > 0) {
496 assert(committed_low == lower, "Must be");
497 }
498 if (committed_middle < middle) {
499 assert(committed_high == 0, "Must be");
500 }
502 if (committed_low < lower) {
503 assert(committed_high == 0, "Must be");
504 assert(committed_middle == 0, "Must be");
505 }
506 #endif
508 return committed_low + committed_middle + committed_high;
509 }
512 bool VirtualSpace::contains(const void* p) const {
513 return low() <= (const char*) p && (const char*) p < high();
514 }
516 /*
517 First we need to determine if a particular virtual space is using large
518 pages. This is done at the initialize function and only virtual spaces
519 that are larger than LargePageSizeInBytes use large pages. Once we
520 have determined this, all expand_by and shrink_by calls must grow and
521 shrink by large page size chunks. If a particular request
522 is within the current large page, the call to commit and uncommit memory
523 can be ignored. In the case that the low and high boundaries of this
524 space is not large page aligned, the pages leading to the first large
525 page address and the pages after the last large page address must be
526 allocated with default pages.
527 */
528 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
529 if (uncommitted_size() < bytes) return false;
531 if (special()) {
532 // don't commit memory if the entire space is pinned in memory
533 _high += bytes;
534 return true;
535 }
537 char* previous_high = high();
538 char* unaligned_new_high = high() + bytes;
539 assert(unaligned_new_high <= high_boundary(),
540 "cannot expand by more than upper boundary");
542 // Calculate where the new high for each of the regions should be. If
543 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
544 // then the unaligned lower and upper new highs would be the
545 // lower_high() and upper_high() respectively.
546 char* unaligned_lower_new_high =
547 MIN2(unaligned_new_high, lower_high_boundary());
548 char* unaligned_middle_new_high =
549 MIN2(unaligned_new_high, middle_high_boundary());
550 char* unaligned_upper_new_high =
551 MIN2(unaligned_new_high, upper_high_boundary());
553 // Align the new highs based on the regions alignment. lower and upper
554 // alignment will always be default page size. middle alignment will be
555 // LargePageSizeInBytes if the actual size of the virtual space is in
556 // fact larger than LargePageSizeInBytes.
557 char* aligned_lower_new_high =
558 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
559 char* aligned_middle_new_high =
560 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
561 char* aligned_upper_new_high =
562 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
564 // Determine which regions need to grow in this expand_by call.
565 // If you are growing in the lower region, high() must be in that
566 // region so calcuate the size based on high(). For the middle and
567 // upper regions, determine the starting point of growth based on the
568 // location of high(). By getting the MAX of the region's low address
569 // (or the prevoius region's high address) and high(), we can tell if it
570 // is an intra or inter region growth.
571 size_t lower_needs = 0;
572 if (aligned_lower_new_high > lower_high()) {
573 lower_needs =
574 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
575 }
576 size_t middle_needs = 0;
577 if (aligned_middle_new_high > middle_high()) {
578 middle_needs =
579 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
580 }
581 size_t upper_needs = 0;
582 if (aligned_upper_new_high > upper_high()) {
583 upper_needs =
584 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
585 }
587 // Check contiguity.
588 assert(low_boundary() <= lower_high() &&
589 lower_high() <= lower_high_boundary(),
590 "high address must be contained within the region");
591 assert(lower_high_boundary() <= middle_high() &&
592 middle_high() <= middle_high_boundary(),
593 "high address must be contained within the region");
594 assert(middle_high_boundary() <= upper_high() &&
595 upper_high() <= upper_high_boundary(),
596 "high address must be contained within the region");
598 // Commit regions
599 if (lower_needs > 0) {
600 assert(low_boundary() <= lower_high() &&
601 lower_high() + lower_needs <= lower_high_boundary(),
602 "must not expand beyond region");
603 if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
604 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
605 ", lower_needs=" SIZE_FORMAT ", %d) failed",
606 lower_high(), lower_needs, _executable);)
607 return false;
608 } else {
609 _lower_high += lower_needs;
610 }
611 }
612 if (middle_needs > 0) {
613 assert(lower_high_boundary() <= middle_high() &&
614 middle_high() + middle_needs <= middle_high_boundary(),
615 "must not expand beyond region");
616 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
617 _executable)) {
618 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
619 ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
620 ", %d) failed", middle_high(), middle_needs,
621 middle_alignment(), _executable);)
622 return false;
623 }
624 _middle_high += middle_needs;
625 }
626 if (upper_needs > 0) {
627 assert(middle_high_boundary() <= upper_high() &&
628 upper_high() + upper_needs <= upper_high_boundary(),
629 "must not expand beyond region");
630 if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
631 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
632 ", upper_needs=" SIZE_FORMAT ", %d) failed",
633 upper_high(), upper_needs, _executable);)
634 return false;
635 } else {
636 _upper_high += upper_needs;
637 }
638 }
640 if (pre_touch || AlwaysPreTouch) {
641 int vm_ps = os::vm_page_size();
642 for (char* curr = previous_high;
643 curr < unaligned_new_high;
644 curr += vm_ps) {
645 // Note the use of a write here; originally we tried just a read, but
646 // since the value read was unused, the optimizer removed the read.
647 // If we ever have a concurrent touchahead thread, we'll want to use
648 // a read, to avoid the potential of overwriting data (if a mutator
649 // thread beats the touchahead thread to a page). There are various
650 // ways of making sure this read is not optimized away: for example,
651 // generating the code for a read procedure at runtime.
652 *curr = 0;
653 }
654 }
656 _high += bytes;
657 return true;
658 }
660 // A page is uncommitted if the contents of the entire page is deemed unusable.
661 // Continue to decrement the high() pointer until it reaches a page boundary
662 // in which case that particular page can now be uncommitted.
663 void VirtualSpace::shrink_by(size_t size) {
664 if (committed_size() < size)
665 fatal("Cannot shrink virtual space to negative size");
667 if (special()) {
668 // don't uncommit if the entire space is pinned in memory
669 _high -= size;
670 return;
671 }
673 char* unaligned_new_high = high() - size;
674 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
676 // Calculate new unaligned address
677 char* unaligned_upper_new_high =
678 MAX2(unaligned_new_high, middle_high_boundary());
679 char* unaligned_middle_new_high =
680 MAX2(unaligned_new_high, lower_high_boundary());
681 char* unaligned_lower_new_high =
682 MAX2(unaligned_new_high, low_boundary());
684 // Align address to region's alignment
685 char* aligned_upper_new_high =
686 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
687 char* aligned_middle_new_high =
688 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
689 char* aligned_lower_new_high =
690 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
692 // Determine which regions need to shrink
693 size_t upper_needs = 0;
694 if (aligned_upper_new_high < upper_high()) {
695 upper_needs =
696 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
697 }
698 size_t middle_needs = 0;
699 if (aligned_middle_new_high < middle_high()) {
700 middle_needs =
701 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
702 }
703 size_t lower_needs = 0;
704 if (aligned_lower_new_high < lower_high()) {
705 lower_needs =
706 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
707 }
709 // Check contiguity.
710 assert(middle_high_boundary() <= upper_high() &&
711 upper_high() <= upper_high_boundary(),
712 "high address must be contained within the region");
713 assert(lower_high_boundary() <= middle_high() &&
714 middle_high() <= middle_high_boundary(),
715 "high address must be contained within the region");
716 assert(low_boundary() <= lower_high() &&
717 lower_high() <= lower_high_boundary(),
718 "high address must be contained within the region");
720 // Uncommit
721 if (upper_needs > 0) {
722 assert(middle_high_boundary() <= aligned_upper_new_high &&
723 aligned_upper_new_high + upper_needs <= upper_high_boundary(),
724 "must not shrink beyond region");
725 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
726 debug_only(warning("os::uncommit_memory failed"));
727 return;
728 } else {
729 _upper_high -= upper_needs;
730 }
731 }
732 if (middle_needs > 0) {
733 assert(lower_high_boundary() <= aligned_middle_new_high &&
734 aligned_middle_new_high + middle_needs <= middle_high_boundary(),
735 "must not shrink beyond region");
736 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
737 debug_only(warning("os::uncommit_memory failed"));
738 return;
739 } else {
740 _middle_high -= middle_needs;
741 }
742 }
743 if (lower_needs > 0) {
744 assert(low_boundary() <= aligned_lower_new_high &&
745 aligned_lower_new_high + lower_needs <= lower_high_boundary(),
746 "must not shrink beyond region");
747 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
748 debug_only(warning("os::uncommit_memory failed"));
749 return;
750 } else {
751 _lower_high -= lower_needs;
752 }
753 }
755 _high -= size;
756 }
758 #ifndef PRODUCT
759 void VirtualSpace::check_for_contiguity() {
760 // Check contiguity.
761 assert(low_boundary() <= lower_high() &&
762 lower_high() <= lower_high_boundary(),
763 "high address must be contained within the region");
764 assert(lower_high_boundary() <= middle_high() &&
765 middle_high() <= middle_high_boundary(),
766 "high address must be contained within the region");
767 assert(middle_high_boundary() <= upper_high() &&
768 upper_high() <= upper_high_boundary(),
769 "high address must be contained within the region");
770 assert(low() >= low_boundary(), "low");
771 assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
772 assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
773 assert(high() <= upper_high(), "upper high");
774 }
776 void VirtualSpace::print_on(outputStream* out) {
777 out->print ("Virtual space:");
778 if (special()) out->print(" (pinned in memory)");
779 out->cr();
780 out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
781 out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
782 out->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
783 out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
784 }
786 void VirtualSpace::print() {
787 print_on(tty);
788 }
790 /////////////// Unit tests ///////////////
792 #ifndef PRODUCT
794 #define test_log(...) \
795 do {\
796 if (VerboseInternalVMTests) { \
797 tty->print_cr(__VA_ARGS__); \
798 tty->flush(); \
799 }\
800 } while (false)
802 class TestReservedSpace : AllStatic {
803 public:
804 static void small_page_write(void* addr, size_t size) {
805 size_t page_size = os::vm_page_size();
807 char* end = (char*)addr + size;
808 for (char* p = (char*)addr; p < end; p += page_size) {
809 *p = 1;
810 }
811 }
813 static void release_memory_for_test(ReservedSpace rs) {
814 if (rs.special()) {
815 guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
816 } else {
817 guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
818 }
819 }
821 static void test_reserved_space1(size_t size, size_t alignment) {
822 test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
824 assert(is_size_aligned(size, alignment), "Incorrect input parameters");
826 ReservedSpace rs(size, // size
827 alignment, // alignment
828 UseLargePages, // large
829 NULL, // requested_address
830 0); // noacces_prefix
832 test_log(" rs.special() == %d", rs.special());
834 assert(rs.base() != NULL, "Must be");
835 assert(rs.size() == size, "Must be");
837 assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
838 assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
840 if (rs.special()) {
841 small_page_write(rs.base(), size);
842 }
844 release_memory_for_test(rs);
845 }
847 static void test_reserved_space2(size_t size) {
848 test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
850 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
852 ReservedSpace rs(size);
854 test_log(" rs.special() == %d", rs.special());
856 assert(rs.base() != NULL, "Must be");
857 assert(rs.size() == size, "Must be");
859 if (rs.special()) {
860 small_page_write(rs.base(), size);
861 }
863 release_memory_for_test(rs);
864 }
866 static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
867 test_log("test_reserved_space3(%p, %p, %d)",
868 (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
870 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
871 assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
873 bool large = maybe_large && UseLargePages && size >= os::large_page_size();
875 ReservedSpace rs(size, alignment, large, false);
877 test_log(" rs.special() == %d", rs.special());
879 assert(rs.base() != NULL, "Must be");
880 assert(rs.size() == size, "Must be");
882 if (rs.special()) {
883 small_page_write(rs.base(), size);
884 }
886 release_memory_for_test(rs);
887 }
890 static void test_reserved_space1() {
891 size_t size = 2 * 1024 * 1024;
892 size_t ag = os::vm_allocation_granularity();
894 test_reserved_space1(size, ag);
895 test_reserved_space1(size * 2, ag);
896 test_reserved_space1(size * 10, ag);
897 }
899 static void test_reserved_space2() {
900 size_t size = 2 * 1024 * 1024;
901 size_t ag = os::vm_allocation_granularity();
903 test_reserved_space2(size * 1);
904 test_reserved_space2(size * 2);
905 test_reserved_space2(size * 10);
906 test_reserved_space2(ag);
907 test_reserved_space2(size - ag);
908 test_reserved_space2(size);
909 test_reserved_space2(size + ag);
910 test_reserved_space2(size * 2);
911 test_reserved_space2(size * 2 - ag);
912 test_reserved_space2(size * 2 + ag);
913 test_reserved_space2(size * 3);
914 test_reserved_space2(size * 3 - ag);
915 test_reserved_space2(size * 3 + ag);
916 test_reserved_space2(size * 10);
917 test_reserved_space2(size * 10 + size / 2);
918 }
920 static void test_reserved_space3() {
921 size_t ag = os::vm_allocation_granularity();
923 test_reserved_space3(ag, ag , false);
924 test_reserved_space3(ag * 2, ag , false);
925 test_reserved_space3(ag * 3, ag , false);
926 test_reserved_space3(ag * 2, ag * 2, false);
927 test_reserved_space3(ag * 4, ag * 2, false);
928 test_reserved_space3(ag * 8, ag * 2, false);
929 test_reserved_space3(ag * 4, ag * 4, false);
930 test_reserved_space3(ag * 8, ag * 4, false);
931 test_reserved_space3(ag * 16, ag * 4, false);
933 if (UseLargePages) {
934 size_t lp = os::large_page_size();
936 // Without large pages
937 test_reserved_space3(lp, ag * 4, false);
938 test_reserved_space3(lp * 2, ag * 4, false);
939 test_reserved_space3(lp * 4, ag * 4, false);
940 test_reserved_space3(lp, lp , false);
941 test_reserved_space3(lp * 2, lp , false);
942 test_reserved_space3(lp * 3, lp , false);
943 test_reserved_space3(lp * 2, lp * 2, false);
944 test_reserved_space3(lp * 4, lp * 2, false);
945 test_reserved_space3(lp * 8, lp * 2, false);
947 // With large pages
948 test_reserved_space3(lp, ag * 4 , true);
949 test_reserved_space3(lp * 2, ag * 4, true);
950 test_reserved_space3(lp * 4, ag * 4, true);
951 test_reserved_space3(lp, lp , true);
952 test_reserved_space3(lp * 2, lp , true);
953 test_reserved_space3(lp * 3, lp , true);
954 test_reserved_space3(lp * 2, lp * 2, true);
955 test_reserved_space3(lp * 4, lp * 2, true);
956 test_reserved_space3(lp * 8, lp * 2, true);
957 }
958 }
960 static void test_reserved_space() {
961 test_reserved_space1();
962 test_reserved_space2();
963 test_reserved_space3();
964 }
965 };
967 void TestReservedSpace_test() {
968 TestReservedSpace::test_reserved_space();
969 }
971 #define assert_equals(actual, expected) \
972 assert(actual == expected, \
973 err_msg("Got " SIZE_FORMAT " expected " \
974 SIZE_FORMAT, actual, expected));
976 #define assert_ge(value1, value2) \
977 assert(value1 >= value2, \
978 err_msg("'" #value1 "': " SIZE_FORMAT " '" \
979 #value2 "': " SIZE_FORMAT, value1, value2));
981 #define assert_lt(value1, value2) \
982 assert(value1 < value2, \
983 err_msg("'" #value1 "': " SIZE_FORMAT " '" \
984 #value2 "': " SIZE_FORMAT, value1, value2));
987 class TestVirtualSpace : AllStatic {
988 enum TestLargePages {
989 Default,
990 Disable,
991 Reserve,
992 Commit
993 };
995 static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
996 switch(mode) {
997 default:
998 case Default:
999 case Reserve:
1000 return ReservedSpace(reserve_size_aligned);
1001 case Disable:
1002 case Commit:
1003 return ReservedSpace(reserve_size_aligned,
1004 os::vm_allocation_granularity(),
1005 /* large */ false, /* exec */ false);
1006 }
1007 }
1009 static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1010 switch(mode) {
1011 default:
1012 case Default:
1013 case Reserve:
1014 return vs.initialize(rs, 0);
1015 case Disable:
1016 return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1017 case Commit:
1018 return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1));
1019 }
1020 }
1022 public:
1023 static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1024 TestLargePages mode = Default) {
1025 size_t granularity = os::vm_allocation_granularity();
1026 size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1028 ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1030 assert(reserved.is_reserved(), "Must be");
1032 VirtualSpace vs;
1033 bool initialized = initialize_virtual_space(vs, reserved, mode);
1034 assert(initialized, "Failed to initialize VirtualSpace");
1036 vs.expand_by(commit_size, false);
1038 if (vs.special()) {
1039 assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1040 } else {
1041 assert_ge(vs.actual_committed_size(), commit_size);
1042 // Approximate the commit granularity.
1043 // Make sure that we don't commit using large pages
1044 // if large pages has been disabled for this VirtualSpace.
1045 size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1046 os::vm_page_size() : os::large_page_size();
1047 assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1048 }
1050 reserved.release();
1051 }
1053 static void test_virtual_space_actual_committed_space_one_large_page() {
1054 if (!UseLargePages) {
1055 return;
1056 }
1058 size_t large_page_size = os::large_page_size();
1060 ReservedSpace reserved(large_page_size, large_page_size, true, false);
1062 assert(reserved.is_reserved(), "Must be");
1064 VirtualSpace vs;
1065 bool initialized = vs.initialize(reserved, 0);
1066 assert(initialized, "Failed to initialize VirtualSpace");
1068 vs.expand_by(large_page_size, false);
1070 assert_equals(vs.actual_committed_size(), large_page_size);
1072 reserved.release();
1073 }
1075 static void test_virtual_space_actual_committed_space() {
1076 test_virtual_space_actual_committed_space(4 * K, 0);
1077 test_virtual_space_actual_committed_space(4 * K, 4 * K);
1078 test_virtual_space_actual_committed_space(8 * K, 0);
1079 test_virtual_space_actual_committed_space(8 * K, 4 * K);
1080 test_virtual_space_actual_committed_space(8 * K, 8 * K);
1081 test_virtual_space_actual_committed_space(12 * K, 0);
1082 test_virtual_space_actual_committed_space(12 * K, 4 * K);
1083 test_virtual_space_actual_committed_space(12 * K, 8 * K);
1084 test_virtual_space_actual_committed_space(12 * K, 12 * K);
1085 test_virtual_space_actual_committed_space(64 * K, 0);
1086 test_virtual_space_actual_committed_space(64 * K, 32 * K);
1087 test_virtual_space_actual_committed_space(64 * K, 64 * K);
1088 test_virtual_space_actual_committed_space(2 * M, 0);
1089 test_virtual_space_actual_committed_space(2 * M, 4 * K);
1090 test_virtual_space_actual_committed_space(2 * M, 64 * K);
1091 test_virtual_space_actual_committed_space(2 * M, 1 * M);
1092 test_virtual_space_actual_committed_space(2 * M, 2 * M);
1093 test_virtual_space_actual_committed_space(10 * M, 0);
1094 test_virtual_space_actual_committed_space(10 * M, 4 * K);
1095 test_virtual_space_actual_committed_space(10 * M, 8 * K);
1096 test_virtual_space_actual_committed_space(10 * M, 1 * M);
1097 test_virtual_space_actual_committed_space(10 * M, 2 * M);
1098 test_virtual_space_actual_committed_space(10 * M, 5 * M);
1099 test_virtual_space_actual_committed_space(10 * M, 10 * M);
1100 }
1102 static void test_virtual_space_disable_large_pages() {
1103 if (!UseLargePages) {
1104 return;
1105 }
1106 // These test cases verify that if we force VirtualSpace to disable large pages
1107 test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1108 test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1109 test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1110 test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1111 test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1112 test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1113 test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1115 test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1116 test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1117 test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1118 test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1119 test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1120 test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1121 test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1123 test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1124 test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1125 test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1126 test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1127 test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1128 test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1129 test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1130 }
1132 static void test_virtual_space() {
1133 test_virtual_space_actual_committed_space();
1134 test_virtual_space_actual_committed_space_one_large_page();
1135 test_virtual_space_disable_large_pages();
1136 }
1137 };
1139 void TestVirtualSpace_test() {
1140 TestVirtualSpace::test_virtual_space();
1141 }
1143 #endif // PRODUCT
1145 #endif