335 // byte_size is the size of the associated virtualspace. |
335 // byte_size is the size of the associated virtualspace. |
336 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) { |
336 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) { |
337 // align up to vm allocation granularity |
337 // align up to vm allocation granularity |
338 byte_size = align_size_up(byte_size, os::vm_allocation_granularity()); |
338 byte_size = align_size_up(byte_size, os::vm_allocation_granularity()); |
339 |
339 |
340 // This allocates memory with mmap. For DumpSharedspaces, allocate the |
340 // This allocates memory with mmap. For DumpSharedspaces, try to reserve |
341 // space at low memory so that other shared images don't conflict. |
341 // configurable address, generally at the top of the Java heap so other |
342 // This is the same address as memory needed for UseCompressedOops but |
342 // memory addresses don't conflict. |
343 // compressed oops don't work with CDS (offsets in metadata are wrong), so |
|
344 // borrow the same address. |
|
345 if (DumpSharedSpaces) { |
343 if (DumpSharedSpaces) { |
346 char* shared_base = (char*)HeapBaseMinAddress; |
344 char* shared_base = (char*)SharedBaseAddress; |
347 _rs = ReservedSpace(byte_size, 0, false, shared_base, 0); |
345 _rs = ReservedSpace(byte_size, 0, false, shared_base, 0); |
348 if (_rs.is_reserved()) { |
346 if (_rs.is_reserved()) { |
349 assert(_rs.base() == shared_base, "should match"); |
347 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); |
350 } else { |
348 } else { |
351 // If we are dumping the heap, then allocate a wasted block of address |
349 // Get a mmap region anywhere if the SharedBaseAddress fails. |
352 // space in order to push the heap to a lower address. This extra |
|
353 // address range allows for other (or larger) libraries to be loaded |
|
354 // without them occupying the space required for the shared spaces. |
|
355 uintx reserved = 0; |
|
356 uintx block_size = 64*1024*1024; |
|
357 while (reserved < SharedDummyBlockSize) { |
|
358 char* dummy = os::reserve_memory(block_size); |
|
359 reserved += block_size; |
|
360 } |
|
361 _rs = ReservedSpace(byte_size); |
350 _rs = ReservedSpace(byte_size); |
362 } |
351 } |
363 MetaspaceShared::set_shared_rs(&_rs); |
352 MetaspaceShared::set_shared_rs(&_rs); |
364 } else { |
353 } else { |
365 _rs = ReservedSpace(byte_size); |
354 _rs = ReservedSpace(byte_size); |