src/share/vm/runtime/virtualspace.cpp

changeset 791
1ee8caae33af
parent 777
37f87013dfd8
parent 672
1fdb98a17101
child 798
032ddb9432ad
equal deleted inserted replaced
790:0edda524b58c 791:1ee8caae33af
26 #include "incls/_virtualspace.cpp.incl" 26 #include "incls/_virtualspace.cpp.incl"
27 27
28 28
29 // ReservedSpace 29 // ReservedSpace
30 ReservedSpace::ReservedSpace(size_t size) { 30 ReservedSpace::ReservedSpace(size_t size) {
31 initialize(size, 0, false, NULL); 31 initialize(size, 0, false, NULL, 0);
32 } 32 }
33 33
34 ReservedSpace::ReservedSpace(size_t size, size_t alignment, 34 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
35 bool large, char* requested_address) { 35 bool large,
36 initialize(size, alignment, large, requested_address); 36 char* requested_address,
37 const size_t noaccess_prefix) {
38 initialize(size+noaccess_prefix, alignment, large, requested_address,
39 noaccess_prefix);
37 } 40 }
38 41
39 char * 42 char *
40 ReservedSpace::align_reserved_region(char* addr, const size_t len, 43 ReservedSpace::align_reserved_region(char* addr, const size_t len,
41 const size_t prefix_size, 44 const size_t prefix_size,
103 } 106 }
104 107
105 ReservedSpace::ReservedSpace(const size_t prefix_size, 108 ReservedSpace::ReservedSpace(const size_t prefix_size,
106 const size_t prefix_align, 109 const size_t prefix_align,
107 const size_t suffix_size, 110 const size_t suffix_size,
108 const size_t suffix_align) 111 const size_t suffix_align,
112 const size_t noaccess_prefix)
109 { 113 {
110 assert(prefix_size != 0, "sanity"); 114 assert(prefix_size != 0, "sanity");
111 assert(prefix_align != 0, "sanity"); 115 assert(prefix_align != 0, "sanity");
112 assert(suffix_size != 0, "sanity"); 116 assert(suffix_size != 0, "sanity");
113 assert(suffix_align != 0, "sanity"); 117 assert(suffix_align != 0, "sanity");
116 assert((suffix_size & suffix_align - 1) == 0, 120 assert((suffix_size & suffix_align - 1) == 0,
117 "suffix_size not divisible by suffix_align"); 121 "suffix_size not divisible by suffix_align");
118 assert((suffix_align & prefix_align - 1) == 0, 122 assert((suffix_align & prefix_align - 1) == 0,
119 "suffix_align not divisible by prefix_align"); 123 "suffix_align not divisible by prefix_align");
120 124
125 // Add in noaccess_prefix to prefix_size;
126 const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
127 const size_t size = adjusted_prefix_size + suffix_size;
128
121 // On systems where the entire region has to be reserved and committed up 129 // On systems where the entire region has to be reserved and committed up
122 // front, the compound alignment normally done by this method is unnecessary. 130 // front, the compound alignment normally done by this method is unnecessary.
123 const bool try_reserve_special = UseLargePages && 131 const bool try_reserve_special = UseLargePages &&
124 prefix_align == os::large_page_size(); 132 prefix_align == os::large_page_size();
125 if (!os::can_commit_large_page_memory() && try_reserve_special) { 133 if (!os::can_commit_large_page_memory() && try_reserve_special) {
126 initialize(prefix_size + suffix_size, prefix_align, true); 134 initialize(size, prefix_align, true, NULL, noaccess_prefix);
127 return; 135 return;
128 } 136 }
129 137
130 _base = NULL; 138 _base = NULL;
131 _size = 0; 139 _size = 0;
132 _alignment = 0; 140 _alignment = 0;
133 _special = false; 141 _special = false;
142 _noaccess_prefix = 0;
143
144 // Assert that if noaccess_prefix is used, it is the same as prefix_align.
145 assert(noaccess_prefix == 0 ||
146 noaccess_prefix == prefix_align, "noaccess prefix wrong");
134 147
135 // Optimistically try to reserve the exact size needed. 148 // Optimistically try to reserve the exact size needed.
136 const size_t size = prefix_size + suffix_size;
137 char* addr = os::reserve_memory(size, NULL, prefix_align); 149 char* addr = os::reserve_memory(size, NULL, prefix_align);
138 if (addr == NULL) return; 150 if (addr == NULL) return;
139 151
140 // Check whether the result has the needed alignment (unlikely unless 152 // Check whether the result has the needed alignment (unlikely unless
141 // prefix_align == suffix_align). 153 // prefix_align == suffix_align).
142 const size_t ofs = size_t(addr) + prefix_size & suffix_align - 1; 154 const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1;
143 if (ofs != 0) { 155 if (ofs != 0) {
144 // Wrong alignment. Release, allocate more space and do manual alignment. 156 // Wrong alignment. Release, allocate more space and do manual alignment.
145 // 157 //
146 // On most operating systems, another allocation with a somewhat larger size 158 // On most operating systems, another allocation with a somewhat larger size
147 // will return an address "close to" that of the previous allocation. The 159 // will return an address "close to" that of the previous allocation. The
151 if (!os::release_memory(addr, size)) { 163 if (!os::release_memory(addr, size)) {
152 fatal("os::release_memory failed"); 164 fatal("os::release_memory failed");
153 } 165 }
154 166
155 const size_t extra = MAX2(ofs, suffix_align - ofs); 167 const size_t extra = MAX2(ofs, suffix_align - ofs);
156 addr = reserve_and_align(size + extra, prefix_size, prefix_align, 168 addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
157 suffix_size, suffix_align); 169 suffix_size, suffix_align);
158 if (addr == NULL) { 170 if (addr == NULL) {
159 // Try an even larger region. If this fails, address space is exhausted. 171 // Try an even larger region. If this fails, address space is exhausted.
160 addr = reserve_and_align(size + suffix_align, prefix_size, 172 addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
161 prefix_align, suffix_size, suffix_align); 173 prefix_align, suffix_size, suffix_align);
162 } 174 }
163 } 175 }
164 176
165 _base = addr; 177 _base = addr;
166 _size = size; 178 _size = size;
167 _alignment = prefix_align; 179 _alignment = prefix_align;
180 _noaccess_prefix = noaccess_prefix;
168 } 181 }
169 182
170 void ReservedSpace::initialize(size_t size, size_t alignment, bool large, 183 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
171 char* requested_address) { 184 char* requested_address,
185 const size_t noaccess_prefix) {
172 const size_t granularity = os::vm_allocation_granularity(); 186 const size_t granularity = os::vm_allocation_granularity();
173 assert((size & granularity - 1) == 0, 187 assert((size & granularity - 1) == 0,
174 "size not aligned to os::vm_allocation_granularity()"); 188 "size not aligned to os::vm_allocation_granularity()");
175 assert((alignment & granularity - 1) == 0, 189 assert((alignment & granularity - 1) == 0,
176 "alignment not aligned to os::vm_allocation_granularity()"); 190 "alignment not aligned to os::vm_allocation_granularity()");
179 193
180 _base = NULL; 194 _base = NULL;
181 _size = 0; 195 _size = 0;
182 _special = false; 196 _special = false;
183 _alignment = 0; 197 _alignment = 0;
198 _noaccess_prefix = 0;
184 if (size == 0) { 199 if (size == 0) {
185 return; 200 return;
186 } 201 }
187 202
188 // If OS doesn't support demand paging for large page memory, we need 203 // If OS doesn't support demand paging for large page memory, we need
218 // If the memory was requested at a particular address, use 233 // If the memory was requested at a particular address, use
219 // os::attempt_reserve_memory_at() to avoid over mapping something 234 // os::attempt_reserve_memory_at() to avoid over mapping something
220 // important. If available space is not detected, return NULL. 235 // important. If available space is not detected, return NULL.
221 236
222 if (requested_address != 0) { 237 if (requested_address != 0) {
223 base = os::attempt_reserve_memory_at(size, requested_address); 238 base = os::attempt_reserve_memory_at(size,
239 requested_address-noaccess_prefix);
224 } else { 240 } else {
225 base = os::reserve_memory(size, NULL, alignment); 241 base = os::reserve_memory(size, NULL, alignment);
226 } 242 }
227 243
228 if (base == NULL) return; 244 if (base == NULL) return;
249 } 265 }
250 // Done 266 // Done
251 _base = base; 267 _base = base;
252 _size = size; 268 _size = size;
253 _alignment = MAX2(alignment, (size_t) os::vm_page_size()); 269 _alignment = MAX2(alignment, (size_t) os::vm_page_size());
270 _noaccess_prefix = noaccess_prefix;
271
272 // Assert that if noaccess_prefix is used, it is the same as alignment.
273 assert(noaccess_prefix == 0 ||
274 noaccess_prefix == _alignment, "noaccess prefix wrong");
254 275
255 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base, 276 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
256 "area must be distinguisable from marks for mark-sweep"); 277 "area must be distinguisable from marks for mark-sweep");
257 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size], 278 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
258 "area must be distinguisable from marks for mark-sweep"); 279 "area must be distinguisable from marks for mark-sweep");
264 assert((size % os::vm_allocation_granularity()) == 0, 285 assert((size % os::vm_allocation_granularity()) == 0,
265 "size not allocation aligned"); 286 "size not allocation aligned");
266 _base = base; 287 _base = base;
267 _size = size; 288 _size = size;
268 _alignment = alignment; 289 _alignment = alignment;
290 _noaccess_prefix = 0;
269 _special = special; 291 _special = special;
270 } 292 }
271 293
272 294
273 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, 295 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
310 } 332 }
311 333
312 334
313 void ReservedSpace::release() { 335 void ReservedSpace::release() {
314 if (is_reserved()) { 336 if (is_reserved()) {
337 char *real_base = _base - _noaccess_prefix;
338 const size_t real_size = _size + _noaccess_prefix;
315 if (special()) { 339 if (special()) {
316 os::release_memory_special(_base, _size); 340 os::release_memory_special(real_base, real_size);
317 } else{ 341 } else{
318 os::release_memory(_base, _size); 342 os::release_memory(real_base, real_size);
319 } 343 }
320 _base = NULL; 344 _base = NULL;
321 _size = 0; 345 _size = 0;
346 _noaccess_prefix = 0;
322 _special = false; 347 _special = false;
323 } 348 }
324 } 349 }
325 350
351 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
352 // If there is noaccess prefix, return.
353 if (_noaccess_prefix == 0) return;
354
355 assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
356 "must be at least page size big");
357
358 // Protect memory at the base of the allocated region.
359 // If special, the page was committed (only matters on windows)
360 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
361 _special)) {
362 fatal("cannot protect protection page");
363 }
364
365 _base += _noaccess_prefix;
366 _size -= _noaccess_prefix;
367 assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
368 "must be exactly of required size and alignment");
369 }
370
371 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
372 bool large, char* requested_address) :
373 ReservedSpace(size, alignment, large,
374 requested_address,
375 UseCompressedOops ? lcm(os::vm_page_size(), alignment) : 0) {
376 // Only reserved space for the java heap should have a noaccess_prefix
377 // if using compressed oops.
378 protect_noaccess_prefix(size);
379 }
380
381 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
382 const size_t prefix_align,
383 const size_t suffix_size,
384 const size_t suffix_align) :
385 ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
386 UseCompressedOops ? lcm(os::vm_page_size(), prefix_align) : 0) {
387 protect_noaccess_prefix(prefix_size+suffix_size);
388 }
326 389
327 // VirtualSpace 390 // VirtualSpace
328 391
329 VirtualSpace::VirtualSpace() { 392 VirtualSpace::VirtualSpace() {
330 _low_boundary = NULL; 393 _low_boundary = NULL;
338 _middle_high_boundary = NULL; 401 _middle_high_boundary = NULL;
339 _upper_high_boundary = NULL; 402 _upper_high_boundary = NULL;
340 _lower_alignment = 0; 403 _lower_alignment = 0;
341 _middle_alignment = 0; 404 _middle_alignment = 0;
342 _upper_alignment = 0; 405 _upper_alignment = 0;
406 _special = false;
343 } 407 }
344 408
345 409
346 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { 410 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
347 if(!rs.is_reserved()) return false; // allocation failed. 411 if(!rs.is_reserved()) return false; // allocation failed.
392 release(); 456 release();
393 } 457 }
394 458
395 459
396 void VirtualSpace::release() { 460 void VirtualSpace::release() {
397 (void)os::release_memory(low_boundary(), reserved_size()); 461 // This does not release memory it never reserved.
462 // Caller must release via rs.release();
398 _low_boundary = NULL; 463 _low_boundary = NULL;
399 _high_boundary = NULL; 464 _high_boundary = NULL;
400 _low = NULL; 465 _low = NULL;
401 _high = NULL; 466 _high = NULL;
402 _lower_high = NULL; 467 _lower_high = NULL;

mercurial