Tue, 11 May 2010 14:35:43 -0700
6931180: Migration to recent versions of MS Platform SDK
6951582: Build problems on win64
Summary: Changes to enable building JDK7 with Microsoft Visual Studio 2010
Reviewed-by: ohair, art, ccheung, dcubed
1 /*
2 * Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_codeBlob.cpp.incl"
28 unsigned int align_code_offset(int offset) {
29 // align the size to CodeEntryAlignment
30 return
31 ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
32 - (int)CodeHeap::header_size();
33 }
36 // This must be consistent with the CodeBlob constructor's layout actions.
37 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
38 unsigned int size = header_size;
39 size += round_to(cb->total_relocation_size(), oopSize);
40 // align the size to CodeEntryAlignment
41 size = align_code_offset(size);
42 size += round_to(cb->total_code_size(), oopSize);
43 size += round_to(cb->total_oop_size(), oopSize);
44 return size;
45 }
48 // Creates a simple CodeBlob. Sets up the size of the different regions.
49 CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) {
50 assert(size == round_to(size, oopSize), "unaligned size");
51 assert(locs_size == round_to(locs_size, oopSize), "unaligned size");
52 assert(header_size == round_to(header_size, oopSize), "unaligned size");
53 assert(!UseRelocIndex, "no space allocated for reloc index yet");
55 // Note: If UseRelocIndex is enabled, there needs to be (at least) one
56 // extra word for the relocation information, containing the reloc
57 // index table length. Unfortunately, the reloc index table imple-
58 // mentation is not easily understandable and thus it is not clear
59 // what exactly the format is supposed to be. For now, we just turn
60 // off the use of this table (gri 7/6/2000).
62 _name = name;
63 _size = size;
64 _frame_complete_offset = frame_complete;
65 _header_size = header_size;
66 _relocation_size = locs_size;
67 _instructions_offset = align_code_offset(header_size + locs_size);
68 _data_offset = size;
69 _oops_offset = size;
70 _oops_length = 0;
71 _frame_size = 0;
72 set_oop_maps(NULL);
73 }
76 // Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions,
77 // and copy code and relocation info.
78 CodeBlob::CodeBlob(
79 const char* name,
80 CodeBuffer* cb,
81 int header_size,
82 int size,
83 int frame_complete,
84 int frame_size,
85 OopMapSet* oop_maps
86 ) {
87 assert(size == round_to(size, oopSize), "unaligned size");
88 assert(header_size == round_to(header_size, oopSize), "unaligned size");
90 _name = name;
91 _size = size;
92 _frame_complete_offset = frame_complete;
93 _header_size = header_size;
94 _relocation_size = round_to(cb->total_relocation_size(), oopSize);
95 _instructions_offset = align_code_offset(header_size + _relocation_size);
96 _data_offset = _instructions_offset + round_to(cb->total_code_size(), oopSize);
97 _oops_offset = _size - round_to(cb->total_oop_size(), oopSize);
98 _oops_length = 0; // temporary, until the copy_oops handshake
99 assert(_oops_offset >= _data_offset, "codeBlob is too small");
100 assert(_data_offset <= size, "codeBlob is too small");
102 cb->copy_code_and_locs_to(this);
103 set_oop_maps(oop_maps);
104 _frame_size = frame_size;
105 #ifdef COMPILER1
106 // probably wrong for tiered
107 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
108 #endif // COMPILER1
109 }
112 void CodeBlob::set_oop_maps(OopMapSet* p) {
113 // Danger Will Robinson! This method allocates a big
114 // chunk of memory, its your job to free it.
115 if (p != NULL) {
116 // We need to allocate a chunk big enough to hold the OopMapSet and all of its OopMaps
117 _oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size());
118 p->copy_to((address)_oop_maps);
119 } else {
120 _oop_maps = NULL;
121 }
122 }
125 void CodeBlob::flush() {
126 if (_oop_maps) {
127 FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
128 _oop_maps = NULL;
129 }
130 _comments.free();
131 }
134 // Promote one word from an assembly-time handle to a live embedded oop.
135 inline void CodeBlob::initialize_immediate_oop(oop* dest, jobject handle) {
136 if (handle == NULL ||
137 // As a special case, IC oops are initialized to 1 or -1.
138 handle == (jobject) Universe::non_oop_word()) {
139 (*dest) = (oop)handle;
140 } else {
141 (*dest) = JNIHandles::resolve_non_null(handle);
142 }
143 }
146 void CodeBlob::copy_oops(GrowableArray<jobject>* array) {
147 assert(_oops_length == 0, "do this handshake just once, please");
148 int length = array->length();
149 assert((address)(oops_begin() + length) <= data_end(), "oops big enough");
150 oop* dest = oops_begin();
151 for (int index = 0 ; index < length; index++) {
152 initialize_immediate_oop(&dest[index], array->at(index));
153 }
154 _oops_length = length;
156 // Now we can fix up all the oops in the code.
157 // We need to do this in the code because
158 // the assembler uses jobjects as placeholders.
159 // The code and relocations have already been
160 // initialized by the CodeBlob constructor,
161 // so it is valid even at this early point to
162 // iterate over relocations and patch the code.
163 fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
164 }
167 relocInfo::relocType CodeBlob::reloc_type_for_address(address pc) {
168 RelocIterator iter(this, pc, pc+1);
169 while (iter.next()) {
170 return (relocInfo::relocType) iter.type();
171 }
172 // No relocation info found for pc
173 ShouldNotReachHere();
174 return relocInfo::none; // dummy return value
175 }
178 bool CodeBlob::is_at_poll_return(address pc) {
179 RelocIterator iter(this, pc, pc+1);
180 while (iter.next()) {
181 if (iter.type() == relocInfo::poll_return_type)
182 return true;
183 }
184 return false;
185 }
188 bool CodeBlob::is_at_poll_or_poll_return(address pc) {
189 RelocIterator iter(this, pc, pc+1);
190 while (iter.next()) {
191 relocInfo::relocType t = iter.type();
192 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
193 return true;
194 }
195 return false;
196 }
199 void CodeBlob::fix_oop_relocations(address begin, address end,
200 bool initialize_immediates) {
201 // re-patch all oop-bearing instructions, just in case some oops moved
202 RelocIterator iter(this, begin, end);
203 while (iter.next()) {
204 if (iter.type() == relocInfo::oop_type) {
205 oop_Relocation* reloc = iter.oop_reloc();
206 if (initialize_immediates && reloc->oop_is_immediate()) {
207 oop* dest = reloc->oop_addr();
208 initialize_immediate_oop(dest, (jobject) *dest);
209 }
210 // Refresh the oop-related bits of this instruction.
211 reloc->fix_oop_relocation();
212 }
214 // There must not be any interfering patches or breakpoints.
215 assert(!(iter.type() == relocInfo::breakpoint_type
216 && iter.breakpoint_reloc()->active()),
217 "no active breakpoint");
218 }
219 }
221 void CodeBlob::do_unloading(BoolObjectClosure* is_alive,
222 OopClosure* keep_alive,
223 bool unloading_occurred) {
224 ShouldNotReachHere();
225 }
227 OopMap* CodeBlob::oop_map_for_return_address(address return_address) {
228 address pc = return_address ;
229 assert (oop_maps() != NULL, "nope");
230 return oop_maps()->find_map_at_offset ((intptr_t) pc - (intptr_t) instructions_begin());
231 }
234 //----------------------------------------------------------------------------------------------------
235 // Implementation of BufferBlob
238 BufferBlob::BufferBlob(const char* name, int size)
239 : CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
240 {}
242 BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
243 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
245 BufferBlob* blob = NULL;
246 unsigned int size = sizeof(BufferBlob);
247 // align the size to CodeEntryAlignment
248 size = align_code_offset(size);
249 size += round_to(buffer_size, oopSize);
250 assert(name != NULL, "must provide a name");
251 {
252 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
253 blob = new (size) BufferBlob(name, size);
254 }
255 // Track memory usage statistic after releasing CodeCache_lock
256 MemoryService::track_code_cache_memory_usage();
258 return blob;
259 }
262 BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb)
263 : CodeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL)
264 {}
266 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
267 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
269 BufferBlob* blob = NULL;
270 unsigned int size = allocation_size(cb, sizeof(BufferBlob));
271 assert(name != NULL, "must provide a name");
272 {
273 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
274 blob = new (size) BufferBlob(name, size, cb);
275 }
276 // Track memory usage statistic after releasing CodeCache_lock
277 MemoryService::track_code_cache_memory_usage();
279 return blob;
280 }
283 void* BufferBlob::operator new(size_t s, unsigned size) {
284 void* p = CodeCache::allocate(size);
285 return p;
286 }
289 void BufferBlob::free( BufferBlob *blob ) {
290 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
291 {
292 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
293 CodeCache::free((CodeBlob*)blob);
294 }
295 // Track memory usage statistic after releasing CodeCache_lock
296 MemoryService::track_code_cache_memory_usage();
297 }
300 //----------------------------------------------------------------------------------------------------
301 // Implementation of AdapterBlob
303 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
304 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
306 AdapterBlob* blob = NULL;
307 unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
308 {
309 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
310 blob = new (size) AdapterBlob(size, cb);
311 }
312 // Track memory usage statistic after releasing CodeCache_lock
313 MemoryService::track_code_cache_memory_usage();
315 return blob;
316 }
319 //----------------------------------------------------------------------------------------------------
320 // Implementation of MethodHandlesAdapterBlob
322 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
323 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
325 MethodHandlesAdapterBlob* blob = NULL;
326 unsigned int size = sizeof(MethodHandlesAdapterBlob);
327 // align the size to CodeEntryAlignment
328 size = align_code_offset(size);
329 size += round_to(buffer_size, oopSize);
330 {
331 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
332 blob = new (size) MethodHandlesAdapterBlob(size);
333 }
334 // Track memory usage statistic after releasing CodeCache_lock
335 MemoryService::track_code_cache_memory_usage();
337 return blob;
338 }
341 //----------------------------------------------------------------------------------------------------
342 // Implementation of RuntimeStub
344 RuntimeStub::RuntimeStub(
345 const char* name,
346 CodeBuffer* cb,
347 int size,
348 int frame_complete,
349 int frame_size,
350 OopMapSet* oop_maps,
351 bool caller_must_gc_arguments
352 )
353 : CodeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps)
354 {
355 _caller_must_gc_arguments = caller_must_gc_arguments;
356 }
359 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
360 CodeBuffer* cb,
361 int frame_complete,
362 int frame_size,
363 OopMapSet* oop_maps,
364 bool caller_must_gc_arguments)
365 {
366 RuntimeStub* stub = NULL;
367 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
368 {
369 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
370 unsigned int size = allocation_size(cb, sizeof(RuntimeStub));
371 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
372 }
374 // Do not hold the CodeCache lock during name formatting.
375 if (stub != NULL) {
376 char stub_id[256];
377 jio_snprintf(stub_id, sizeof(stub_id), "RuntimeStub - %s", stub_name);
378 if (PrintStubCode) {
379 tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub);
380 Disassembler::decode(stub->instructions_begin(), stub->instructions_end());
381 }
382 VTune::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());
383 Forte::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());
385 if (JvmtiExport::should_post_dynamic_code_generated()) {
386 JvmtiExport::post_dynamic_code_generated(stub_name, stub->instructions_begin(), stub->instructions_end());
387 }
388 }
390 // Track memory usage statistic after releasing CodeCache_lock
391 MemoryService::track_code_cache_memory_usage();
393 return stub;
394 }
397 void* RuntimeStub::operator new(size_t s, unsigned size) {
398 void* p = CodeCache::allocate(size);
399 if (!p) fatal("Initial size of CodeCache is too small");
400 return p;
401 }
404 //----------------------------------------------------------------------------------------------------
405 // Implementation of DeoptimizationBlob
407 DeoptimizationBlob::DeoptimizationBlob(
408 CodeBuffer* cb,
409 int size,
410 OopMapSet* oop_maps,
411 int unpack_offset,
412 int unpack_with_exception_offset,
413 int unpack_with_reexecution_offset,
414 int frame_size
415 )
416 : SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps)
417 {
418 _unpack_offset = unpack_offset;
419 _unpack_with_exception = unpack_with_exception_offset;
420 _unpack_with_reexecution = unpack_with_reexecution_offset;
421 #ifdef COMPILER1
422 _unpack_with_exception_in_tls = -1;
423 #endif
424 }
427 DeoptimizationBlob* DeoptimizationBlob::create(
428 CodeBuffer* cb,
429 OopMapSet* oop_maps,
430 int unpack_offset,
431 int unpack_with_exception_offset,
432 int unpack_with_reexecution_offset,
433 int frame_size)
434 {
435 DeoptimizationBlob* blob = NULL;
436 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
437 {
438 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
439 unsigned int size = allocation_size(cb, sizeof(DeoptimizationBlob));
440 blob = new (size) DeoptimizationBlob(cb,
441 size,
442 oop_maps,
443 unpack_offset,
444 unpack_with_exception_offset,
445 unpack_with_reexecution_offset,
446 frame_size);
447 }
449 // Do not hold the CodeCache lock during name formatting.
450 if (blob != NULL) {
451 char blob_id[256];
452 jio_snprintf(blob_id, sizeof(blob_id), "DeoptimizationBlob@" PTR_FORMAT, blob->instructions_begin());
453 if (PrintStubCode) {
454 tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
455 Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
456 }
457 VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
458 Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
460 if (JvmtiExport::should_post_dynamic_code_generated()) {
461 JvmtiExport::post_dynamic_code_generated("DeoptimizationBlob",
462 blob->instructions_begin(),
463 blob->instructions_end());
464 }
465 }
467 // Track memory usage statistic after releasing CodeCache_lock
468 MemoryService::track_code_cache_memory_usage();
470 return blob;
471 }
474 void* DeoptimizationBlob::operator new(size_t s, unsigned size) {
475 void* p = CodeCache::allocate(size);
476 if (!p) fatal("Initial size of CodeCache is too small");
477 return p;
478 }
480 //----------------------------------------------------------------------------------------------------
481 // Implementation of UncommonTrapBlob
483 #ifdef COMPILER2
484 UncommonTrapBlob::UncommonTrapBlob(
485 CodeBuffer* cb,
486 int size,
487 OopMapSet* oop_maps,
488 int frame_size
489 )
490 : SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps)
491 {}
494 UncommonTrapBlob* UncommonTrapBlob::create(
495 CodeBuffer* cb,
496 OopMapSet* oop_maps,
497 int frame_size)
498 {
499 UncommonTrapBlob* blob = NULL;
500 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
501 {
502 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
503 unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob));
504 blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
505 }
507 // Do not hold the CodeCache lock during name formatting.
508 if (blob != NULL) {
509 char blob_id[256];
510 jio_snprintf(blob_id, sizeof(blob_id), "UncommonTrapBlob@" PTR_FORMAT, blob->instructions_begin());
511 if (PrintStubCode) {
512 tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
513 Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
514 }
515 VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
516 Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
518 if (JvmtiExport::should_post_dynamic_code_generated()) {
519 JvmtiExport::post_dynamic_code_generated("UncommonTrapBlob",
520 blob->instructions_begin(),
521 blob->instructions_end());
522 }
523 }
525 // Track memory usage statistic after releasing CodeCache_lock
526 MemoryService::track_code_cache_memory_usage();
528 return blob;
529 }
532 void* UncommonTrapBlob::operator new(size_t s, unsigned size) {
533 void* p = CodeCache::allocate(size);
534 if (!p) fatal("Initial size of CodeCache is too small");
535 return p;
536 }
537 #endif // COMPILER2
540 //----------------------------------------------------------------------------------------------------
541 // Implementation of ExceptionBlob
543 #ifdef COMPILER2
544 ExceptionBlob::ExceptionBlob(
545 CodeBuffer* cb,
546 int size,
547 OopMapSet* oop_maps,
548 int frame_size
549 )
550 : SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps)
551 {}
554 ExceptionBlob* ExceptionBlob::create(
555 CodeBuffer* cb,
556 OopMapSet* oop_maps,
557 int frame_size)
558 {
559 ExceptionBlob* blob = NULL;
560 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
561 {
562 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
563 unsigned int size = allocation_size(cb, sizeof(ExceptionBlob));
564 blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
565 }
567 // We do not need to hold the CodeCache lock during name formatting
568 if (blob != NULL) {
569 char blob_id[256];
570 jio_snprintf(blob_id, sizeof(blob_id), "ExceptionBlob@" PTR_FORMAT, blob->instructions_begin());
571 if (PrintStubCode) {
572 tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
573 Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
574 }
575 VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
576 Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
578 if (JvmtiExport::should_post_dynamic_code_generated()) {
579 JvmtiExport::post_dynamic_code_generated("ExceptionBlob",
580 blob->instructions_begin(),
581 blob->instructions_end());
582 }
583 }
585 // Track memory usage statistic after releasing CodeCache_lock
586 MemoryService::track_code_cache_memory_usage();
588 return blob;
589 }
592 void* ExceptionBlob::operator new(size_t s, unsigned size) {
593 void* p = CodeCache::allocate(size);
594 if (!p) fatal("Initial size of CodeCache is too small");
595 return p;
596 }
597 #endif // COMPILER2
600 //----------------------------------------------------------------------------------------------------
601 // Implementation of SafepointBlob
603 SafepointBlob::SafepointBlob(
604 CodeBuffer* cb,
605 int size,
606 OopMapSet* oop_maps,
607 int frame_size
608 )
609 : SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps)
610 {}
613 SafepointBlob* SafepointBlob::create(
614 CodeBuffer* cb,
615 OopMapSet* oop_maps,
616 int frame_size)
617 {
618 SafepointBlob* blob = NULL;
619 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
620 {
621 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
622 unsigned int size = allocation_size(cb, sizeof(SafepointBlob));
623 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
624 }
626 // We do not need to hold the CodeCache lock during name formatting.
627 if (blob != NULL) {
628 char blob_id[256];
629 jio_snprintf(blob_id, sizeof(blob_id), "SafepointBlob@" PTR_FORMAT, blob->instructions_begin());
630 if (PrintStubCode) {
631 tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
632 Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
633 }
634 VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
635 Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
637 if (JvmtiExport::should_post_dynamic_code_generated()) {
638 JvmtiExport::post_dynamic_code_generated("SafepointBlob",
639 blob->instructions_begin(),
640 blob->instructions_end());
641 }
642 }
644 // Track memory usage statistic after releasing CodeCache_lock
645 MemoryService::track_code_cache_memory_usage();
647 return blob;
648 }
651 void* SafepointBlob::operator new(size_t s, unsigned size) {
652 void* p = CodeCache::allocate(size);
653 if (!p) fatal("Initial size of CodeCache is too small");
654 return p;
655 }
658 //----------------------------------------------------------------------------------------------------
659 // Verification and printing
661 void CodeBlob::verify() {
662 ShouldNotReachHere();
663 }
665 #ifndef PRODUCT
667 void CodeBlob::print() const {
668 tty->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", this);
669 tty->print_cr("Framesize: %d", _frame_size);
670 }
673 void CodeBlob::print_value_on(outputStream* st) const {
674 st->print_cr("[CodeBlob]");
675 }
677 #endif
679 void BufferBlob::verify() {
680 // unimplemented
681 }
683 #ifndef PRODUCT
685 void BufferBlob::print() const {
686 CodeBlob::print();
687 print_value_on(tty);
688 }
691 void BufferBlob::print_value_on(outputStream* st) const {
692 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", this, name());
693 }
696 #endif
698 void RuntimeStub::verify() {
699 // unimplemented
700 }
702 #ifndef PRODUCT
704 void RuntimeStub::print() const {
705 CodeBlob::print();
706 tty->print("Runtime Stub (" INTPTR_FORMAT "): ", this);
707 tty->print_cr(name());
708 Disassembler::decode((CodeBlob*)this);
709 }
712 void RuntimeStub::print_value_on(outputStream* st) const {
713 st->print("RuntimeStub (" INTPTR_FORMAT "): ", this); st->print(name());
714 }
716 #endif
718 void SingletonBlob::verify() {
719 // unimplemented
720 }
722 #ifndef PRODUCT
724 void SingletonBlob::print() const {
725 CodeBlob::print();
726 tty->print_cr(name());
727 Disassembler::decode((CodeBlob*)this);
728 }
731 void SingletonBlob::print_value_on(outputStream* st) const {
732 st->print_cr(name());
733 }
735 void DeoptimizationBlob::print_value_on(outputStream* st) const {
736 st->print_cr("Deoptimization (frame not available)");
737 }
739 #endif // PRODUCT