Tue, 24 Jul 2012 10:51:00 -0700
7023639: JSR 292 method handle invocation needs a fast path for compiled code
6984705: JSR 292 method handle creation should not go through JNI
Summary: remove assembly code for JDK 7 chained method handles
Reviewed-by: jrose, twisti, kvn, mhaupt
Contributed-by: John Rose <john.r.rose@oracle.com>, Christian Thalinger <christian.thalinger@oracle.com>, Michael Haupt <michael.haupt@oracle.com>
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/codeBuffer.hpp"
27 #include "compiler/disassembler.hpp"
28 #include "utilities/copy.hpp"
29 #include "utilities/xmlstream.hpp"
31 // The structure of a CodeSection:
32 //
33 // _start -> +----------------+
34 // | machine code...|
35 // _end -> |----------------|
36 // | |
37 // | (empty) |
38 // | |
39 // | |
40 // +----------------+
41 // _limit -> | |
42 //
43 // _locs_start -> +----------------+
44 // |reloc records...|
45 // |----------------|
46 // _locs_end -> | |
47 // | |
48 // | (empty) |
49 // | |
50 // | |
51 // +----------------+
52 // _locs_limit -> | |
53 // The _end (resp. _limit) pointer refers to the first
54 // unused (resp. unallocated) byte.
56 // The structure of the CodeBuffer while code is being accumulated:
57 //
58 // _total_start -> \
59 // _insts._start -> +----------------+
60 // | |
61 // | Code |
62 // | |
63 // _stubs._start -> |----------------|
64 // | |
65 // | Stubs | (also handlers for deopt/exception)
66 // | |
67 // _consts._start -> |----------------|
68 // | |
69 // | Constants |
70 // | |
71 // +----------------+
72 // + _total_size -> | |
73 //
74 // When the code and relocations are copied to the code cache,
75 // the empty parts of each section are removed, and everything
76 // is copied into contiguous locations.
78 typedef CodeBuffer::csize_t csize_t; // file-local definition
80 // External buffer, in a predefined CodeBlob.
81 // Important: The code_start must be taken exactly, and not realigned.
82 CodeBuffer::CodeBuffer(CodeBlob* blob) {
83 initialize_misc("static buffer");
84 initialize(blob->content_begin(), blob->content_size());
85 verify_section_allocation();
86 }
88 void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
89 // Compute maximal alignment.
90 int align = _insts.alignment();
91 // Always allow for empty slop around each section.
92 int slop = (int) CodeSection::end_slop();
94 assert(blob() == NULL, "only once");
95 set_blob(BufferBlob::create(_name, code_size + (align+slop) * (SECT_LIMIT+1)));
96 if (blob() == NULL) {
97 // The assembler constructor will throw a fatal on an empty CodeBuffer.
98 return; // caller must test this
99 }
101 // Set up various pointers into the blob.
102 initialize(_total_start, _total_size);
104 assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned");
106 pd_initialize();
108 if (locs_size != 0) {
109 _insts.initialize_locs(locs_size / sizeof(relocInfo));
110 }
112 verify_section_allocation();
113 }
116 CodeBuffer::~CodeBuffer() {
117 verify_section_allocation();
119 // If we allocate our code buffer from the CodeCache
120 // via a BufferBlob, and it's not permanent, then
121 // free the BufferBlob.
122 // The rest of the memory will be freed when the ResourceObj
123 // is released.
124 for (CodeBuffer* cb = this; cb != NULL; cb = cb->before_expand()) {
125 // Previous incarnations of this buffer are held live, so that internal
126 // addresses constructed before expansions will not be confused.
127 cb->free_blob();
128 }
130 // free any overflow storage
131 delete _overflow_arena;
133 #ifdef ASSERT
134 // Save allocation type to execute assert in ~ResourceObj()
135 // which is called after this destructor.
136 assert(_default_oop_recorder.allocated_on_stack(), "should be embedded object");
137 ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type();
138 Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
139 ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at);
140 #endif
141 }
143 void CodeBuffer::initialize_oop_recorder(OopRecorder* r) {
144 assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once");
145 DEBUG_ONLY(_default_oop_recorder.oop_size()); // force unused OR to be frozen
146 _oop_recorder = r;
147 }
149 void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) {
150 assert(cs != &_insts, "insts is the memory provider, not the consumer");
151 csize_t slop = CodeSection::end_slop(); // margin between sections
152 int align = cs->alignment();
153 assert(is_power_of_2(align), "sanity");
154 address start = _insts._start;
155 address limit = _insts._limit;
156 address middle = limit - size;
157 middle -= (intptr_t)middle & (align-1); // align the division point downward
158 guarantee(middle - slop > start, "need enough space to divide up");
159 _insts._limit = middle - slop; // subtract desired space, plus slop
160 cs->initialize(middle, limit - middle);
161 assert(cs->start() == middle, "sanity");
162 assert(cs->limit() == limit, "sanity");
163 // give it some relocations to start with, if the main section has them
164 if (_insts.has_locs()) cs->initialize_locs(1);
165 }
167 void CodeBuffer::freeze_section(CodeSection* cs) {
168 CodeSection* next_cs = (cs == consts())? NULL: code_section(cs->index()+1);
169 csize_t frozen_size = cs->size();
170 if (next_cs != NULL) {
171 frozen_size = next_cs->align_at_start(frozen_size);
172 }
173 address old_limit = cs->limit();
174 address new_limit = cs->start() + frozen_size;
175 relocInfo* old_locs_limit = cs->locs_limit();
176 relocInfo* new_locs_limit = cs->locs_end();
177 // Patch the limits.
178 cs->_limit = new_limit;
179 cs->_locs_limit = new_locs_limit;
180 cs->_frozen = true;
181 if (!next_cs->is_allocated() && !next_cs->is_frozen()) {
182 // Give remaining buffer space to the following section.
183 next_cs->initialize(new_limit, old_limit - new_limit);
184 next_cs->initialize_shared_locs(new_locs_limit,
185 old_locs_limit - new_locs_limit);
186 }
187 }
189 void CodeBuffer::set_blob(BufferBlob* blob) {
190 _blob = blob;
191 if (blob != NULL) {
192 address start = blob->content_begin();
193 address end = blob->content_end();
194 // Round up the starting address.
195 int align = _insts.alignment();
196 start += (-(intptr_t)start) & (align-1);
197 _total_start = start;
198 _total_size = end - start;
199 } else {
200 #ifdef ASSERT
201 // Clean out dangling pointers.
202 _total_start = badAddress;
203 _consts._start = _consts._end = badAddress;
204 _insts._start = _insts._end = badAddress;
205 _stubs._start = _stubs._end = badAddress;
206 #endif //ASSERT
207 }
208 }
210 void CodeBuffer::free_blob() {
211 if (_blob != NULL) {
212 BufferBlob::free(_blob);
213 set_blob(NULL);
214 }
215 }
217 const char* CodeBuffer::code_section_name(int n) {
218 #ifdef PRODUCT
219 return NULL;
220 #else //PRODUCT
221 switch (n) {
222 case SECT_CONSTS: return "consts";
223 case SECT_INSTS: return "insts";
224 case SECT_STUBS: return "stubs";
225 default: return NULL;
226 }
227 #endif //PRODUCT
228 }
230 int CodeBuffer::section_index_of(address addr) const {
231 for (int n = 0; n < (int)SECT_LIMIT; n++) {
232 const CodeSection* cs = code_section(n);
233 if (cs->allocates(addr)) return n;
234 }
235 return SECT_NONE;
236 }
238 int CodeBuffer::locator(address addr) const {
239 for (int n = 0; n < (int)SECT_LIMIT; n++) {
240 const CodeSection* cs = code_section(n);
241 if (cs->allocates(addr)) {
242 return locator(addr - cs->start(), n);
243 }
244 }
245 return -1;
246 }
248 address CodeBuffer::locator_address(int locator) const {
249 if (locator < 0) return NULL;
250 address start = code_section(locator_sect(locator))->start();
251 return start + locator_pos(locator);
252 }
254 address CodeBuffer::decode_begin() {
255 address begin = _insts.start();
256 if (_decode_begin != NULL && _decode_begin > begin)
257 begin = _decode_begin;
258 return begin;
259 }
262 GrowableArray<int>* CodeBuffer::create_patch_overflow() {
263 if (_overflow_arena == NULL) {
264 _overflow_arena = new (mtCode) Arena();
265 }
266 return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
267 }
270 // Helper function for managing labels and their target addresses.
271 // Returns a sensible address, and if it is not the label's final
272 // address, notes the dependency (at 'branch_pc') on the label.
273 address CodeSection::target(Label& L, address branch_pc) {
274 if (L.is_bound()) {
275 int loc = L.loc();
276 if (index() == CodeBuffer::locator_sect(loc)) {
277 return start() + CodeBuffer::locator_pos(loc);
278 } else {
279 return outer()->locator_address(loc);
280 }
281 } else {
282 assert(allocates2(branch_pc), "sanity");
283 address base = start();
284 int patch_loc = CodeBuffer::locator(branch_pc - base, index());
285 L.add_patch_at(outer(), patch_loc);
287 // Need to return a pc, doesn't matter what it is since it will be
288 // replaced during resolution later.
289 // Don't return NULL or badAddress, since branches shouldn't overflow.
290 // Don't return base either because that could overflow displacements
291 // for shorter branches. It will get checked when bound.
292 return branch_pc;
293 }
294 }
296 void CodeSection::relocate(address at, RelocationHolder const& spec, int format) {
297 Relocation* reloc = spec.reloc();
298 relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
299 if (rtype == relocInfo::none) return;
301 // The assertion below has been adjusted, to also work for
302 // relocation for fixup. Sometimes we want to put relocation
303 // information for the next instruction, since it will be patched
304 // with a call.
305 assert(start() <= at && at <= end()+1,
306 "cannot relocate data outside code boundaries");
308 if (!has_locs()) {
309 // no space for relocation information provided => code cannot be
310 // relocated. Make sure that relocate is only called with rtypes
311 // that can be ignored for this kind of code.
312 assert(rtype == relocInfo::none ||
313 rtype == relocInfo::runtime_call_type ||
314 rtype == relocInfo::internal_word_type||
315 rtype == relocInfo::section_word_type ||
316 rtype == relocInfo::external_word_type,
317 "code needs relocation information");
318 // leave behind an indication that we attempted a relocation
319 DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress);
320 return;
321 }
323 // Advance the point, noting the offset we'll have to record.
324 csize_t offset = at - locs_point();
325 set_locs_point(at);
327 // Test for a couple of overflow conditions; maybe expand the buffer.
328 relocInfo* end = locs_end();
329 relocInfo* req = end + relocInfo::length_limit;
330 // Check for (potential) overflow
331 if (req >= locs_limit() || offset >= relocInfo::offset_limit()) {
332 req += (uint)offset / (uint)relocInfo::offset_limit();
333 if (req >= locs_limit()) {
334 // Allocate or reallocate.
335 expand_locs(locs_count() + (req - end));
336 // reload pointer
337 end = locs_end();
338 }
339 }
341 // If the offset is giant, emit filler relocs, of type 'none', but
342 // each carrying the largest possible offset, to advance the locs_point.
343 while (offset >= relocInfo::offset_limit()) {
344 assert(end < locs_limit(), "adjust previous paragraph of code");
345 *end++ = filler_relocInfo();
346 offset -= filler_relocInfo().addr_offset();
347 }
349 // If it's a simple reloc with no data, we'll just write (rtype | offset).
350 (*end) = relocInfo(rtype, offset, format);
352 // If it has data, insert the prefix, as (data_prefix_tag | data1), data2.
353 end->initialize(this, reloc);
354 }
356 void CodeSection::initialize_locs(int locs_capacity) {
357 assert(_locs_start == NULL, "only one locs init step, please");
358 // Apply a priori lower limits to relocation size:
359 csize_t min_locs = MAX2(size() / 16, (csize_t)4);
360 if (locs_capacity < min_locs) locs_capacity = min_locs;
361 relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity);
362 _locs_start = locs_start;
363 _locs_end = locs_start;
364 _locs_limit = locs_start + locs_capacity;
365 _locs_own = true;
366 }
368 void CodeSection::initialize_shared_locs(relocInfo* buf, int length) {
369 assert(_locs_start == NULL, "do this before locs are allocated");
370 // Internal invariant: locs buf must be fully aligned.
371 // See copy_relocations_to() below.
372 while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) {
373 ++buf; --length;
374 }
375 if (length > 0) {
376 _locs_start = buf;
377 _locs_end = buf;
378 _locs_limit = buf + length;
379 _locs_own = false;
380 }
381 }
383 void CodeSection::initialize_locs_from(const CodeSection* source_cs) {
384 int lcount = source_cs->locs_count();
385 if (lcount != 0) {
386 initialize_shared_locs(source_cs->locs_start(), lcount);
387 _locs_end = _locs_limit = _locs_start + lcount;
388 assert(is_allocated(), "must have copied code already");
389 set_locs_point(start() + source_cs->locs_point_off());
390 }
391 assert(this->locs_count() == source_cs->locs_count(), "sanity");
392 }
394 void CodeSection::expand_locs(int new_capacity) {
395 if (_locs_start == NULL) {
396 initialize_locs(new_capacity);
397 return;
398 } else {
399 int old_count = locs_count();
400 int old_capacity = locs_capacity();
401 if (new_capacity < old_capacity * 2)
402 new_capacity = old_capacity * 2;
403 relocInfo* locs_start;
404 if (_locs_own) {
405 locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity);
406 } else {
407 locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity);
408 Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo));
409 _locs_own = true;
410 }
411 _locs_start = locs_start;
412 _locs_end = locs_start + old_count;
413 _locs_limit = locs_start + new_capacity;
414 }
415 }
418 /// Support for emitting the code to its final location.
419 /// The pattern is the same for all functions.
420 /// We iterate over all the sections, padding each to alignment.
422 csize_t CodeBuffer::total_content_size() const {
423 csize_t size_so_far = 0;
424 for (int n = 0; n < (int)SECT_LIMIT; n++) {
425 const CodeSection* cs = code_section(n);
426 if (cs->is_empty()) continue; // skip trivial section
427 size_so_far = cs->align_at_start(size_so_far);
428 size_so_far += cs->size();
429 }
430 return size_so_far;
431 }
433 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
434 address buf = dest->_total_start;
435 csize_t buf_offset = 0;
436 assert(dest->_total_size >= total_content_size(), "must be big enough");
438 {
439 // not sure why this is here, but why not...
440 int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment);
441 assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment");
442 }
444 const CodeSection* prev_cs = NULL;
445 CodeSection* prev_dest_cs = NULL;
447 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
448 // figure compact layout of each section
449 const CodeSection* cs = code_section(n);
450 csize_t csize = cs->size();
452 CodeSection* dest_cs = dest->code_section(n);
453 if (!cs->is_empty()) {
454 // Compute initial padding; assign it to the previous non-empty guy.
455 // Cf. figure_expanded_capacities.
456 csize_t padding = cs->align_at_start(buf_offset) - buf_offset;
457 if (padding != 0) {
458 buf_offset += padding;
459 assert(prev_dest_cs != NULL, "sanity");
460 prev_dest_cs->_limit += padding;
461 }
462 #ifdef ASSERT
463 if (prev_cs != NULL && prev_cs->is_frozen() && n < (SECT_LIMIT - 1)) {
464 // Make sure the ends still match up.
465 // This is important because a branch in a frozen section
466 // might target code in a following section, via a Label,
467 // and without a relocation record. See Label::patch_instructions.
468 address dest_start = buf+buf_offset;
469 csize_t start2start = cs->start() - prev_cs->start();
470 csize_t dest_start2start = dest_start - prev_dest_cs->start();
471 assert(start2start == dest_start2start, "cannot stretch frozen sect");
472 }
473 #endif //ASSERT
474 prev_dest_cs = dest_cs;
475 prev_cs = cs;
476 }
478 debug_only(dest_cs->_start = NULL); // defeat double-initialization assert
479 dest_cs->initialize(buf+buf_offset, csize);
480 dest_cs->set_end(buf+buf_offset+csize);
481 assert(dest_cs->is_allocated(), "must always be allocated");
482 assert(cs->is_empty() == dest_cs->is_empty(), "sanity");
484 buf_offset += csize;
485 }
487 // Done calculating sections; did it come out to the right end?
488 assert(buf_offset == total_content_size(), "sanity");
489 dest->verify_section_allocation();
490 }
492 csize_t CodeBuffer::total_offset_of(CodeSection* cs) const {
493 csize_t size_so_far = 0;
494 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
495 const CodeSection* cur_cs = code_section(n);
496 if (!cur_cs->is_empty()) {
497 size_so_far = cur_cs->align_at_start(size_so_far);
498 }
499 if (cur_cs->index() == cs->index()) {
500 return size_so_far;
501 }
502 size_so_far += cur_cs->size();
503 }
504 ShouldNotReachHere();
505 return -1;
506 }
508 csize_t CodeBuffer::total_relocation_size() const {
509 csize_t lsize = copy_relocations_to(NULL); // dry run only
510 csize_t csize = total_content_size();
511 csize_t total = RelocIterator::locs_and_index_size(csize, lsize);
512 return (csize_t) align_size_up(total, HeapWordSize);
513 }
515 csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const {
516 address buf = NULL;
517 csize_t buf_offset = 0;
518 csize_t buf_limit = 0;
519 if (dest != NULL) {
520 buf = (address)dest->relocation_begin();
521 buf_limit = (address)dest->relocation_end() - buf;
522 assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned");
523 assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized");
524 }
525 // if dest == NULL, this is just the sizing pass
527 csize_t code_end_so_far = 0;
528 csize_t code_point_so_far = 0;
529 for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) {
530 // pull relocs out of each section
531 const CodeSection* cs = code_section(n);
532 assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity");
533 if (cs->is_empty()) continue; // skip trivial section
534 relocInfo* lstart = cs->locs_start();
535 relocInfo* lend = cs->locs_end();
536 csize_t lsize = (csize_t)( (address)lend - (address)lstart );
537 csize_t csize = cs->size();
538 code_end_so_far = cs->align_at_start(code_end_so_far);
540 if (lsize > 0) {
541 // Figure out how to advance the combined relocation point
542 // first to the beginning of this section.
543 // We'll insert one or more filler relocs to span that gap.
544 // (Don't bother to improve this by editing the first reloc's offset.)
545 csize_t new_code_point = code_end_so_far;
546 for (csize_t jump;
547 code_point_so_far < new_code_point;
548 code_point_so_far += jump) {
549 jump = new_code_point - code_point_so_far;
550 relocInfo filler = filler_relocInfo();
551 if (jump >= filler.addr_offset()) {
552 jump = filler.addr_offset();
553 } else { // else shrink the filler to fit
554 filler = relocInfo(relocInfo::none, jump);
555 }
556 if (buf != NULL) {
557 assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds");
558 *(relocInfo*)(buf+buf_offset) = filler;
559 }
560 buf_offset += sizeof(filler);
561 }
563 // Update code point and end to skip past this section:
564 csize_t last_code_point = code_end_so_far + cs->locs_point_off();
565 assert(code_point_so_far <= last_code_point, "sanity");
566 code_point_so_far = last_code_point; // advance past this guy's relocs
567 }
568 code_end_so_far += csize; // advance past this guy's instructions too
570 // Done with filler; emit the real relocations:
571 if (buf != NULL && lsize != 0) {
572 assert(buf_offset + lsize <= buf_limit, "target in bounds");
573 assert((uintptr_t)lstart % HeapWordSize == 0, "sane start");
574 if (buf_offset % HeapWordSize == 0) {
575 // Use wordwise copies if possible:
576 Copy::disjoint_words((HeapWord*)lstart,
577 (HeapWord*)(buf+buf_offset),
578 (lsize + HeapWordSize-1) / HeapWordSize);
579 } else {
580 Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize);
581 }
582 }
583 buf_offset += lsize;
584 }
586 // Align end of relocation info in target.
587 while (buf_offset % HeapWordSize != 0) {
588 if (buf != NULL) {
589 relocInfo padding = relocInfo(relocInfo::none, 0);
590 assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds");
591 *(relocInfo*)(buf+buf_offset) = padding;
592 }
593 buf_offset += sizeof(relocInfo);
594 }
596 assert(code_end_so_far == total_content_size(), "sanity");
598 // Account for index:
599 if (buf != NULL) {
600 RelocIterator::create_index(dest->relocation_begin(),
601 buf_offset / sizeof(relocInfo),
602 dest->relocation_end());
603 }
605 return buf_offset;
606 }
608 void CodeBuffer::copy_code_to(CodeBlob* dest_blob) {
609 #ifndef PRODUCT
610 if (PrintNMethods && (WizardMode || Verbose)) {
611 tty->print("done with CodeBuffer:");
612 ((CodeBuffer*)this)->print();
613 }
614 #endif //PRODUCT
616 CodeBuffer dest(dest_blob);
617 assert(dest_blob->content_size() >= total_content_size(), "good sizing");
618 this->compute_final_layout(&dest);
619 relocate_code_to(&dest);
621 // transfer comments from buffer to blob
622 dest_blob->set_comments(_comments);
624 // Done moving code bytes; were they the right size?
625 assert(round_to(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity");
627 // Flush generated code
628 ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size());
629 }
631 // Move all my code into another code buffer. Consult applicable
632 // relocs to repair embedded addresses. The layout in the destination
633 // CodeBuffer is different to the source CodeBuffer: the destination
634 // CodeBuffer gets the final layout (consts, insts, stubs in order of
635 // ascending address).
636 void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
637 address dest_end = dest->_total_start + dest->_total_size;
638 address dest_filled = NULL;
639 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
640 // pull code out of each section
641 const CodeSection* cs = code_section(n);
642 if (cs->is_empty()) continue; // skip trivial section
643 CodeSection* dest_cs = dest->code_section(n);
644 assert(cs->size() == dest_cs->size(), "sanity");
645 csize_t usize = dest_cs->size();
646 csize_t wsize = align_size_up(usize, HeapWordSize);
647 assert(dest_cs->start() + wsize <= dest_end, "no overflow");
648 // Copy the code as aligned machine words.
649 // This may also include an uninitialized partial word at the end.
650 Copy::disjoint_words((HeapWord*)cs->start(),
651 (HeapWord*)dest_cs->start(),
652 wsize / HeapWordSize);
654 if (dest->blob() == NULL) {
655 // Destination is a final resting place, not just another buffer.
656 // Normalize uninitialized bytes in the final padding.
657 Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(),
658 Assembler::code_fill_byte());
659 }
660 // Keep track of the highest filled address
661 dest_filled = MAX2(dest_filled, dest_cs->end() + dest_cs->remaining());
663 assert(cs->locs_start() != (relocInfo*)badAddress,
664 "this section carries no reloc storage, but reloc was attempted");
666 // Make the new code copy use the old copy's relocations:
667 dest_cs->initialize_locs_from(cs);
669 { // Repair the pc relative information in the code after the move
670 RelocIterator iter(dest_cs);
671 while (iter.next()) {
672 iter.reloc()->fix_relocation_after_move(this, dest);
673 }
674 }
675 }
677 if (dest->blob() == NULL) {
678 // Destination is a final resting place, not just another buffer.
679 // Normalize uninitialized bytes in the final padding.
680 Copy::fill_to_bytes(dest_filled, dest_end - dest_filled,
681 Assembler::code_fill_byte());
683 }
684 }
686 csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs,
687 csize_t amount,
688 csize_t* new_capacity) {
689 csize_t new_total_cap = 0;
691 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
692 const CodeSection* sect = code_section(n);
694 if (!sect->is_empty()) {
695 // Compute initial padding; assign it to the previous section,
696 // even if it's empty (e.g. consts section can be empty).
697 // Cf. compute_final_layout
698 csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap;
699 if (padding != 0) {
700 new_total_cap += padding;
701 assert(n - 1 >= SECT_FIRST, "sanity");
702 new_capacity[n - 1] += padding;
703 }
704 }
706 csize_t exp = sect->size(); // 100% increase
707 if ((uint)exp < 4*K) exp = 4*K; // minimum initial increase
708 if (sect == which_cs) {
709 if (exp < amount) exp = amount;
710 if (StressCodeBuffers) exp = amount; // expand only slightly
711 } else if (n == SECT_INSTS) {
712 // scale down inst increases to a more modest 25%
713 exp = 4*K + ((exp - 4*K) >> 2);
714 if (StressCodeBuffers) exp = amount / 2; // expand only slightly
715 } else if (sect->is_empty()) {
716 // do not grow an empty secondary section
717 exp = 0;
718 }
719 // Allow for inter-section slop:
720 exp += CodeSection::end_slop();
721 csize_t new_cap = sect->size() + exp;
722 if (new_cap < sect->capacity()) {
723 // No need to expand after all.
724 new_cap = sect->capacity();
725 }
726 new_capacity[n] = new_cap;
727 new_total_cap += new_cap;
728 }
730 return new_total_cap;
731 }
733 void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
734 #ifndef PRODUCT
735 if (PrintNMethods && (WizardMode || Verbose)) {
736 tty->print("expanding CodeBuffer:");
737 this->print();
738 }
740 if (StressCodeBuffers && blob() != NULL) {
741 static int expand_count = 0;
742 if (expand_count >= 0) expand_count += 1;
743 if (expand_count > 100 && is_power_of_2(expand_count)) {
744 tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count);
745 // simulate an occasional allocation failure:
746 free_blob();
747 }
748 }
749 #endif //PRODUCT
751 // Resizing must be allowed
752 {
753 if (blob() == NULL) return; // caller must check for blob == NULL
754 for (int n = 0; n < (int)SECT_LIMIT; n++) {
755 guarantee(!code_section(n)->is_frozen(), "resizing not allowed when frozen");
756 }
757 }
759 // Figure new capacity for each section.
760 csize_t new_capacity[SECT_LIMIT];
761 csize_t new_total_cap
762 = figure_expanded_capacities(which_cs, amount, new_capacity);
764 // Create a new (temporary) code buffer to hold all the new data
765 CodeBuffer cb(name(), new_total_cap, 0);
766 if (cb.blob() == NULL) {
767 // Failed to allocate in code cache.
768 free_blob();
769 return;
770 }
772 // Create an old code buffer to remember which addresses used to go where.
773 // This will be useful when we do final assembly into the code cache,
774 // because we will need to know how to warp any internal address that
775 // has been created at any time in this CodeBuffer's past.
776 CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size);
777 bxp->take_over_code_from(this); // remember the old undersized blob
778 DEBUG_ONLY(this->_blob = NULL); // silence a later assert
779 bxp->_before_expand = this->_before_expand;
780 this->_before_expand = bxp;
782 // Give each section its required (expanded) capacity.
783 for (int n = (int)SECT_LIMIT-1; n >= SECT_FIRST; n--) {
784 CodeSection* cb_sect = cb.code_section(n);
785 CodeSection* this_sect = code_section(n);
786 if (new_capacity[n] == 0) continue; // already nulled out
787 if (n != SECT_INSTS) {
788 cb.initialize_section_size(cb_sect, new_capacity[n]);
789 }
790 assert(cb_sect->capacity() >= new_capacity[n], "big enough");
791 address cb_start = cb_sect->start();
792 cb_sect->set_end(cb_start + this_sect->size());
793 if (this_sect->mark() == NULL) {
794 cb_sect->clear_mark();
795 } else {
796 cb_sect->set_mark(cb_start + this_sect->mark_off());
797 }
798 }
800 // Move all the code and relocations to the new blob:
801 relocate_code_to(&cb);
803 // Copy the temporary code buffer into the current code buffer.
804 // Basically, do {*this = cb}, except for some control information.
805 this->take_over_code_from(&cb);
806 cb.set_blob(NULL);
808 // Zap the old code buffer contents, to avoid mistakenly using them.
809 debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size,
810 badCodeHeapFreeVal));
812 _decode_begin = NULL; // sanity
814 // Make certain that the new sections are all snugly inside the new blob.
815 verify_section_allocation();
817 #ifndef PRODUCT
818 if (PrintNMethods && (WizardMode || Verbose)) {
819 tty->print("expanded CodeBuffer:");
820 this->print();
821 }
822 #endif //PRODUCT
823 }
825 void CodeBuffer::take_over_code_from(CodeBuffer* cb) {
826 // Must already have disposed of the old blob somehow.
827 assert(blob() == NULL, "must be empty");
828 #ifdef ASSERT
830 #endif
831 // Take the new blob away from cb.
832 set_blob(cb->blob());
833 // Take over all the section pointers.
834 for (int n = 0; n < (int)SECT_LIMIT; n++) {
835 CodeSection* cb_sect = cb->code_section(n);
836 CodeSection* this_sect = code_section(n);
837 this_sect->take_over_code_from(cb_sect);
838 }
839 _overflow_arena = cb->_overflow_arena;
840 // Make sure the old cb won't try to use it or free it.
841 DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress);
842 }
844 void CodeBuffer::verify_section_allocation() {
845 address tstart = _total_start;
846 if (tstart == badAddress) return; // smashed by set_blob(NULL)
847 address tend = tstart + _total_size;
848 if (_blob != NULL) {
850 guarantee(tstart >= _blob->content_begin(), "sanity");
851 guarantee(tend <= _blob->content_end(), "sanity");
852 }
853 // Verify disjointness.
854 for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
855 CodeSection* sect = code_section(n);
856 if (!sect->is_allocated() || sect->is_empty()) continue;
857 guarantee((intptr_t)sect->start() % sect->alignment() == 0
858 || sect->is_empty() || _blob == NULL,
859 "start is aligned");
860 for (int m = (int) SECT_FIRST; m < (int) SECT_LIMIT; m++) {
861 CodeSection* other = code_section(m);
862 if (!other->is_allocated() || other == sect) continue;
863 guarantee(!other->contains(sect->start() ), "sanity");
864 // limit is an exclusive address and can be the start of another
865 // section.
866 guarantee(!other->contains(sect->limit() - 1), "sanity");
867 }
868 guarantee(sect->end() <= tend, "sanity");
869 guarantee(sect->end() <= sect->limit(), "sanity");
870 }
871 }
873 void CodeBuffer::log_section_sizes(const char* name) {
874 if (xtty != NULL) {
875 // log info about buffer usage
876 xtty->print_cr("<blob name='%s' size='%d'>", name, _total_size);
877 for (int n = (int) CodeBuffer::SECT_FIRST; n < (int) CodeBuffer::SECT_LIMIT; n++) {
878 CodeSection* sect = code_section(n);
879 if (!sect->is_allocated() || sect->is_empty()) continue;
880 xtty->print_cr("<sect index='%d' size='" SIZE_FORMAT "' free='" SIZE_FORMAT "'/>",
881 n, sect->limit() - sect->start(), sect->limit() - sect->end());
882 }
883 xtty->print_cr("</blob>");
884 }
885 }
887 #ifndef PRODUCT
889 void CodeSection::dump() {
890 address ptr = start();
891 for (csize_t step; ptr < end(); ptr += step) {
892 step = end() - ptr;
893 if (step > jintSize * 4) step = jintSize * 4;
894 tty->print(PTR_FORMAT ": ", ptr);
895 while (step > 0) {
896 tty->print(" " PTR32_FORMAT, *(jint*)ptr);
897 ptr += jintSize;
898 }
899 tty->cr();
900 }
901 }
904 void CodeSection::decode() {
905 Disassembler::decode(start(), end());
906 }
909 void CodeBuffer::block_comment(intptr_t offset, const char * comment) {
910 _comments.add_comment(offset, comment);
911 }
913 class CodeComment: public CHeapObj<mtCode> {
914 private:
915 friend class CodeComments;
916 intptr_t _offset;
917 const char * _comment;
918 CodeComment* _next;
920 ~CodeComment() {
921 assert(_next == NULL, "wrong interface for freeing list");
922 os::free((void*)_comment, mtCode);
923 }
925 public:
926 CodeComment(intptr_t offset, const char * comment) {
927 _offset = offset;
928 _comment = os::strdup(comment, mtCode);
929 _next = NULL;
930 }
932 intptr_t offset() const { return _offset; }
933 const char * comment() const { return _comment; }
934 CodeComment* next() { return _next; }
936 void set_next(CodeComment* next) { _next = next; }
938 CodeComment* find(intptr_t offset) {
939 CodeComment* a = this;
940 while (a != NULL && a->_offset != offset) {
941 a = a->_next;
942 }
943 return a;
944 }
945 };
948 void CodeComments::add_comment(intptr_t offset, const char * comment) {
949 CodeComment* c = new CodeComment(offset, comment);
950 CodeComment* insert = NULL;
951 if (_comments != NULL) {
952 CodeComment* c = _comments->find(offset);
953 insert = c;
954 while (c && c->offset() == offset) {
955 insert = c;
956 c = c->next();
957 }
958 }
959 if (insert) {
960 // insert after comments with same offset
961 c->set_next(insert->next());
962 insert->set_next(c);
963 } else {
964 c->set_next(_comments);
965 _comments = c;
966 }
967 }
970 void CodeComments::assign(CodeComments& other) {
971 assert(_comments == NULL, "don't overwrite old value");
972 _comments = other._comments;
973 }
976 void CodeComments::print_block_comment(outputStream* stream, intptr_t offset) {
977 if (_comments != NULL) {
978 CodeComment* c = _comments->find(offset);
979 while (c && c->offset() == offset) {
980 stream->bol();
981 stream->print(" ;; ");
982 stream->print_cr(c->comment());
983 c = c->next();
984 }
985 }
986 }
989 void CodeComments::free() {
990 CodeComment* n = _comments;
991 while (n) {
992 // unlink the node from the list saving a pointer to the next
993 CodeComment* p = n->_next;
994 n->_next = NULL;
995 delete n;
996 n = p;
997 }
998 _comments = NULL;
999 }
1003 void CodeBuffer::decode() {
1004 Disassembler::decode(decode_begin(), insts_end());
1005 _decode_begin = insts_end();
1006 }
1009 void CodeBuffer::skip_decode() {
1010 _decode_begin = insts_end();
1011 }
1014 void CodeBuffer::decode_all() {
1015 for (int n = 0; n < (int)SECT_LIMIT; n++) {
1016 // dump contents of each section
1017 CodeSection* cs = code_section(n);
1018 tty->print_cr("! %s:", code_section_name(n));
1019 if (cs != consts())
1020 cs->decode();
1021 else
1022 cs->dump();
1023 }
1024 }
1027 void CodeSection::print(const char* name) {
1028 csize_t locs_size = locs_end() - locs_start();
1029 tty->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)%s",
1030 name, start(), end(), limit(), size(), capacity(),
1031 is_frozen()? " [frozen]": "");
1032 tty->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d",
1033 name, locs_start(), locs_end(), locs_limit(), locs_size, locs_capacity(), locs_point_off());
1034 if (PrintRelocations) {
1035 RelocIterator iter(this);
1036 iter.print();
1037 }
1038 }
1040 void CodeBuffer::print() {
1041 if (this == NULL) {
1042 tty->print_cr("NULL CodeBuffer pointer");
1043 return;
1044 }
1046 tty->print_cr("CodeBuffer:");
1047 for (int n = 0; n < (int)SECT_LIMIT; n++) {
1048 // print each section
1049 CodeSection* cs = code_section(n);
1050 cs->print(code_section_name(n));
1051 }
1052 }
1054 #endif // PRODUCT