Tue, 11 May 2010 14:35:43 -0700
6931180: Migration to recent versions of MS Platform SDK
6951582: Build problems on win64
Summary: Changes to enable building JDK7 with Microsoft Visual Studio 2010
Reviewed-by: ohair, art, ccheung, dcubed
1 /*
2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_codeBuffer.cpp.incl"
28 // The structure of a CodeSection:
29 //
30 // _start -> +----------------+
31 // | machine code...|
32 // _end -> |----------------|
33 // | |
34 // | (empty) |
35 // | |
36 // | |
37 // +----------------+
38 // _limit -> | |
39 //
40 // _locs_start -> +----------------+
41 // |reloc records...|
42 // |----------------|
43 // _locs_end -> | |
44 // | |
45 // | (empty) |
46 // | |
47 // | |
48 // +----------------+
49 // _locs_limit -> | |
50 // The _end (resp. _limit) pointer refers to the first
51 // unused (resp. unallocated) byte.
53 // The structure of the CodeBuffer while code is being accumulated:
54 //
55 // _total_start -> \
56 // _insts._start -> +----------------+
57 // | |
58 // | Code |
59 // | |
60 // _stubs._start -> |----------------|
61 // | |
62 // | Stubs | (also handlers for deopt/exception)
63 // | |
64 // _consts._start -> |----------------|
65 // | |
66 // | Constants |
67 // | |
68 // +----------------+
69 // + _total_size -> | |
70 //
71 // When the code and relocations are copied to the code cache,
72 // the empty parts of each section are removed, and everything
73 // is copied into contiguous locations.
75 typedef CodeBuffer::csize_t csize_t; // file-local definition
77 // external buffer, in a predefined CodeBlob or other buffer area
78 // Important: The code_start must be taken exactly, and not realigned.
79 CodeBuffer::CodeBuffer(address code_start, csize_t code_size) {
80 assert(code_start != NULL, "sanity");
81 initialize_misc("static buffer");
82 initialize(code_start, code_size);
83 assert(verify_section_allocation(), "initial use of buffer OK");
84 }
86 void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
87 // Compute maximal alignment.
88 int align = _insts.alignment();
89 // Always allow for empty slop around each section.
90 int slop = (int) CodeSection::end_slop();
92 assert(blob() == NULL, "only once");
93 set_blob(BufferBlob::create(_name, code_size + (align+slop) * (SECT_LIMIT+1)));
94 if (blob() == NULL) {
95 // The assembler constructor will throw a fatal on an empty CodeBuffer.
96 return; // caller must test this
97 }
99 // Set up various pointers into the blob.
100 initialize(_total_start, _total_size);
102 assert((uintptr_t)code_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned");
104 pd_initialize();
106 if (locs_size != 0) {
107 _insts.initialize_locs(locs_size / sizeof(relocInfo));
108 }
110 assert(verify_section_allocation(), "initial use of blob is OK");
111 }
114 CodeBuffer::~CodeBuffer() {
115 // If we allocate our code buffer from the CodeCache
116 // via a BufferBlob, and it's not permanent, then
117 // free the BufferBlob.
118 // The rest of the memory will be freed when the ResourceObj
119 // is released.
120 assert(verify_section_allocation(), "final storage configuration still OK");
121 for (CodeBuffer* cb = this; cb != NULL; cb = cb->before_expand()) {
122 // Previous incarnations of this buffer are held live, so that internal
123 // addresses constructed before expansions will not be confused.
124 cb->free_blob();
125 }
127 // free any overflow storage
128 delete _overflow_arena;
130 #ifdef ASSERT
131 Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
132 #endif
133 }
135 void CodeBuffer::initialize_oop_recorder(OopRecorder* r) {
136 assert(_oop_recorder == &_default_oop_recorder && _default_oop_recorder.is_unused(), "do this once");
137 DEBUG_ONLY(_default_oop_recorder.oop_size()); // force unused OR to be frozen
138 _oop_recorder = r;
139 }
141 void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) {
142 assert(cs != &_insts, "insts is the memory provider, not the consumer");
143 #ifdef ASSERT
144 for (int n = (int)SECT_INSTS+1; n < (int)SECT_LIMIT; n++) {
145 CodeSection* prevCS = code_section(n);
146 if (prevCS == cs) break;
147 assert(!prevCS->is_allocated(), "section allocation must be in reverse order");
148 }
149 #endif
150 csize_t slop = CodeSection::end_slop(); // margin between sections
151 int align = cs->alignment();
152 assert(is_power_of_2(align), "sanity");
153 address start = _insts._start;
154 address limit = _insts._limit;
155 address middle = limit - size;
156 middle -= (intptr_t)middle & (align-1); // align the division point downward
157 guarantee(middle - slop > start, "need enough space to divide up");
158 _insts._limit = middle - slop; // subtract desired space, plus slop
159 cs->initialize(middle, limit - middle);
160 assert(cs->start() == middle, "sanity");
161 assert(cs->limit() == limit, "sanity");
162 // give it some relocations to start with, if the main section has them
163 if (_insts.has_locs()) cs->initialize_locs(1);
164 }
166 void CodeBuffer::freeze_section(CodeSection* cs) {
167 CodeSection* next_cs = (cs == consts())? NULL: code_section(cs->index()+1);
168 csize_t frozen_size = cs->size();
169 if (next_cs != NULL) {
170 frozen_size = next_cs->align_at_start(frozen_size);
171 }
172 address old_limit = cs->limit();
173 address new_limit = cs->start() + frozen_size;
174 relocInfo* old_locs_limit = cs->locs_limit();
175 relocInfo* new_locs_limit = cs->locs_end();
176 // Patch the limits.
177 cs->_limit = new_limit;
178 cs->_locs_limit = new_locs_limit;
179 cs->_frozen = true;
180 if (!next_cs->is_allocated() && !next_cs->is_frozen()) {
181 // Give remaining buffer space to the following section.
182 next_cs->initialize(new_limit, old_limit - new_limit);
183 next_cs->initialize_shared_locs(new_locs_limit,
184 old_locs_limit - new_locs_limit);
185 }
186 }
188 void CodeBuffer::set_blob(BufferBlob* blob) {
189 _blob = blob;
190 if (blob != NULL) {
191 address start = blob->instructions_begin();
192 address end = blob->instructions_end();
193 // Round up the starting address.
194 int align = _insts.alignment();
195 start += (-(intptr_t)start) & (align-1);
196 _total_start = start;
197 _total_size = end - start;
198 } else {
199 #ifdef ASSERT
200 // Clean out dangling pointers.
201 _total_start = badAddress;
202 _insts._start = _insts._end = badAddress;
203 _stubs._start = _stubs._end = badAddress;
204 _consts._start = _consts._end = badAddress;
205 #endif //ASSERT
206 }
207 }
209 void CodeBuffer::free_blob() {
210 if (_blob != NULL) {
211 BufferBlob::free(_blob);
212 set_blob(NULL);
213 }
214 }
216 const char* CodeBuffer::code_section_name(int n) {
217 #ifdef PRODUCT
218 return NULL;
219 #else //PRODUCT
220 switch (n) {
221 case SECT_INSTS: return "insts";
222 case SECT_STUBS: return "stubs";
223 case SECT_CONSTS: return "consts";
224 default: return NULL;
225 }
226 #endif //PRODUCT
227 }
229 int CodeBuffer::section_index_of(address addr) const {
230 for (int n = 0; n < (int)SECT_LIMIT; n++) {
231 const CodeSection* cs = code_section(n);
232 if (cs->allocates(addr)) return n;
233 }
234 return SECT_NONE;
235 }
237 int CodeBuffer::locator(address addr) const {
238 for (int n = 0; n < (int)SECT_LIMIT; n++) {
239 const CodeSection* cs = code_section(n);
240 if (cs->allocates(addr)) {
241 return locator(addr - cs->start(), n);
242 }
243 }
244 return -1;
245 }
247 address CodeBuffer::locator_address(int locator) const {
248 if (locator < 0) return NULL;
249 address start = code_section(locator_sect(locator))->start();
250 return start + locator_pos(locator);
251 }
253 address CodeBuffer::decode_begin() {
254 address begin = _insts.start();
255 if (_decode_begin != NULL && _decode_begin > begin)
256 begin = _decode_begin;
257 return begin;
258 }
261 GrowableArray<int>* CodeBuffer::create_patch_overflow() {
262 if (_overflow_arena == NULL) {
263 _overflow_arena = new Arena();
264 }
265 return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
266 }
269 // Helper function for managing labels and their target addresses.
270 // Returns a sensible address, and if it is not the label's final
271 // address, notes the dependency (at 'branch_pc') on the label.
272 address CodeSection::target(Label& L, address branch_pc) {
273 if (L.is_bound()) {
274 int loc = L.loc();
275 if (index() == CodeBuffer::locator_sect(loc)) {
276 return start() + CodeBuffer::locator_pos(loc);
277 } else {
278 return outer()->locator_address(loc);
279 }
280 } else {
281 assert(allocates2(branch_pc), "sanity");
282 address base = start();
283 int patch_loc = CodeBuffer::locator(branch_pc - base, index());
284 L.add_patch_at(outer(), patch_loc);
286 // Need to return a pc, doesn't matter what it is since it will be
287 // replaced during resolution later.
288 // Don't return NULL or badAddress, since branches shouldn't overflow.
289 // Don't return base either because that could overflow displacements
290 // for shorter branches. It will get checked when bound.
291 return branch_pc;
292 }
293 }
295 void CodeSection::relocate(address at, RelocationHolder const& spec, int format) {
296 Relocation* reloc = spec.reloc();
297 relocInfo::relocType rtype = (relocInfo::relocType) reloc->type();
298 if (rtype == relocInfo::none) return;
300 // The assertion below has been adjusted, to also work for
301 // relocation for fixup. Sometimes we want to put relocation
302 // information for the next instruction, since it will be patched
303 // with a call.
304 assert(start() <= at && at <= end()+1,
305 "cannot relocate data outside code boundaries");
307 if (!has_locs()) {
308 // no space for relocation information provided => code cannot be
309 // relocated. Make sure that relocate is only called with rtypes
310 // that can be ignored for this kind of code.
311 assert(rtype == relocInfo::none ||
312 rtype == relocInfo::runtime_call_type ||
313 rtype == relocInfo::internal_word_type||
314 rtype == relocInfo::section_word_type ||
315 rtype == relocInfo::external_word_type,
316 "code needs relocation information");
317 // leave behind an indication that we attempted a relocation
318 DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress);
319 return;
320 }
322 // Advance the point, noting the offset we'll have to record.
323 csize_t offset = at - locs_point();
324 set_locs_point(at);
326 // Test for a couple of overflow conditions; maybe expand the buffer.
327 relocInfo* end = locs_end();
328 relocInfo* req = end + relocInfo::length_limit;
329 // Check for (potential) overflow
330 if (req >= locs_limit() || offset >= relocInfo::offset_limit()) {
331 req += (uint)offset / (uint)relocInfo::offset_limit();
332 if (req >= locs_limit()) {
333 // Allocate or reallocate.
334 expand_locs(locs_count() + (req - end));
335 // reload pointer
336 end = locs_end();
337 }
338 }
340 // If the offset is giant, emit filler relocs, of type 'none', but
341 // each carrying the largest possible offset, to advance the locs_point.
342 while (offset >= relocInfo::offset_limit()) {
343 assert(end < locs_limit(), "adjust previous paragraph of code");
344 *end++ = filler_relocInfo();
345 offset -= filler_relocInfo().addr_offset();
346 }
348 // If it's a simple reloc with no data, we'll just write (rtype | offset).
349 (*end) = relocInfo(rtype, offset, format);
351 // If it has data, insert the prefix, as (data_prefix_tag | data1), data2.
352 end->initialize(this, reloc);
353 }
355 void CodeSection::initialize_locs(int locs_capacity) {
356 assert(_locs_start == NULL, "only one locs init step, please");
357 // Apply a priori lower limits to relocation size:
358 csize_t min_locs = MAX2(size() / 16, (csize_t)4);
359 if (locs_capacity < min_locs) locs_capacity = min_locs;
360 relocInfo* locs_start = NEW_RESOURCE_ARRAY(relocInfo, locs_capacity);
361 _locs_start = locs_start;
362 _locs_end = locs_start;
363 _locs_limit = locs_start + locs_capacity;
364 _locs_own = true;
365 }
367 void CodeSection::initialize_shared_locs(relocInfo* buf, int length) {
368 assert(_locs_start == NULL, "do this before locs are allocated");
369 // Internal invariant: locs buf must be fully aligned.
370 // See copy_relocations_to() below.
371 while ((uintptr_t)buf % HeapWordSize != 0 && length > 0) {
372 ++buf; --length;
373 }
374 if (length > 0) {
375 _locs_start = buf;
376 _locs_end = buf;
377 _locs_limit = buf + length;
378 _locs_own = false;
379 }
380 }
382 void CodeSection::initialize_locs_from(const CodeSection* source_cs) {
383 int lcount = source_cs->locs_count();
384 if (lcount != 0) {
385 initialize_shared_locs(source_cs->locs_start(), lcount);
386 _locs_end = _locs_limit = _locs_start + lcount;
387 assert(is_allocated(), "must have copied code already");
388 set_locs_point(start() + source_cs->locs_point_off());
389 }
390 assert(this->locs_count() == source_cs->locs_count(), "sanity");
391 }
393 void CodeSection::expand_locs(int new_capacity) {
394 if (_locs_start == NULL) {
395 initialize_locs(new_capacity);
396 return;
397 } else {
398 int old_count = locs_count();
399 int old_capacity = locs_capacity();
400 if (new_capacity < old_capacity * 2)
401 new_capacity = old_capacity * 2;
402 relocInfo* locs_start;
403 if (_locs_own) {
404 locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity);
405 } else {
406 locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity);
407 Copy::conjoint_bytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo));
408 _locs_own = true;
409 }
410 _locs_start = locs_start;
411 _locs_end = locs_start + old_count;
412 _locs_limit = locs_start + new_capacity;
413 }
414 }
417 /// Support for emitting the code to its final location.
418 /// The pattern is the same for all functions.
419 /// We iterate over all the sections, padding each to alignment.
421 csize_t CodeBuffer::total_code_size() const {
422 csize_t code_size_so_far = 0;
423 for (int n = 0; n < (int)SECT_LIMIT; n++) {
424 const CodeSection* cs = code_section(n);
425 if (cs->is_empty()) continue; // skip trivial section
426 code_size_so_far = cs->align_at_start(code_size_so_far);
427 code_size_so_far += cs->size();
428 }
429 return code_size_so_far;
430 }
432 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
433 address buf = dest->_total_start;
434 csize_t buf_offset = 0;
435 assert(dest->_total_size >= total_code_size(), "must be big enough");
437 {
438 // not sure why this is here, but why not...
439 int alignSize = MAX2((intx) sizeof(jdouble), CodeEntryAlignment);
440 assert( (dest->_total_start - _insts.start()) % alignSize == 0, "copy must preserve alignment");
441 }
443 const CodeSection* prev_cs = NULL;
444 CodeSection* prev_dest_cs = NULL;
445 for (int n = 0; n < (int)SECT_LIMIT; n++) {
446 // figure compact layout of each section
447 const CodeSection* cs = code_section(n);
448 address cstart = cs->start();
449 address cend = cs->end();
450 csize_t csize = cend - cstart;
452 CodeSection* dest_cs = dest->code_section(n);
453 if (!cs->is_empty()) {
454 // Compute initial padding; assign it to the previous non-empty guy.
455 // Cf. figure_expanded_capacities.
456 csize_t padding = cs->align_at_start(buf_offset) - buf_offset;
457 if (padding != 0) {
458 buf_offset += padding;
459 assert(prev_dest_cs != NULL, "sanity");
460 prev_dest_cs->_limit += padding;
461 }
462 #ifdef ASSERT
463 if (prev_cs != NULL && prev_cs->is_frozen() && n < SECT_CONSTS) {
464 // Make sure the ends still match up.
465 // This is important because a branch in a frozen section
466 // might target code in a following section, via a Label,
467 // and without a relocation record. See Label::patch_instructions.
468 address dest_start = buf+buf_offset;
469 csize_t start2start = cs->start() - prev_cs->start();
470 csize_t dest_start2start = dest_start - prev_dest_cs->start();
471 assert(start2start == dest_start2start, "cannot stretch frozen sect");
472 }
473 #endif //ASSERT
474 prev_dest_cs = dest_cs;
475 prev_cs = cs;
476 }
478 debug_only(dest_cs->_start = NULL); // defeat double-initialization assert
479 dest_cs->initialize(buf+buf_offset, csize);
480 dest_cs->set_end(buf+buf_offset+csize);
481 assert(dest_cs->is_allocated(), "must always be allocated");
482 assert(cs->is_empty() == dest_cs->is_empty(), "sanity");
484 buf_offset += csize;
485 }
487 // Done calculating sections; did it come out to the right end?
488 assert(buf_offset == total_code_size(), "sanity");
489 assert(dest->verify_section_allocation(), "final configuration works");
490 }
492 csize_t CodeBuffer::total_offset_of(address addr) const {
493 csize_t code_size_so_far = 0;
494 for (int n = 0; n < (int)SECT_LIMIT; n++) {
495 const CodeSection* cs = code_section(n);
496 if (!cs->is_empty()) {
497 code_size_so_far = cs->align_at_start(code_size_so_far);
498 }
499 if (cs->contains2(addr)) {
500 return code_size_so_far + (addr - cs->start());
501 }
502 code_size_so_far += cs->size();
503 }
504 #ifndef PRODUCT
505 tty->print_cr("Dangling address " PTR_FORMAT " in:", addr);
506 ((CodeBuffer*)this)->print();
507 #endif
508 ShouldNotReachHere();
509 return -1;
510 }
512 csize_t CodeBuffer::total_relocation_size() const {
513 csize_t lsize = copy_relocations_to(NULL); // dry run only
514 csize_t csize = total_code_size();
515 csize_t total = RelocIterator::locs_and_index_size(csize, lsize);
516 return (csize_t) align_size_up(total, HeapWordSize);
517 }
519 csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const {
520 address buf = NULL;
521 csize_t buf_offset = 0;
522 csize_t buf_limit = 0;
523 if (dest != NULL) {
524 buf = (address)dest->relocation_begin();
525 buf_limit = (address)dest->relocation_end() - buf;
526 assert((uintptr_t)buf % HeapWordSize == 0, "buf must be fully aligned");
527 assert(buf_limit % HeapWordSize == 0, "buf must be evenly sized");
528 }
529 // if dest == NULL, this is just the sizing pass
531 csize_t code_end_so_far = 0;
532 csize_t code_point_so_far = 0;
533 for (int n = 0; n < (int)SECT_LIMIT; n++) {
534 // pull relocs out of each section
535 const CodeSection* cs = code_section(n);
536 assert(!(cs->is_empty() && cs->locs_count() > 0), "sanity");
537 if (cs->is_empty()) continue; // skip trivial section
538 relocInfo* lstart = cs->locs_start();
539 relocInfo* lend = cs->locs_end();
540 csize_t lsize = (csize_t)( (address)lend - (address)lstart );
541 csize_t csize = cs->size();
542 code_end_so_far = cs->align_at_start(code_end_so_far);
544 if (lsize > 0) {
545 // Figure out how to advance the combined relocation point
546 // first to the beginning of this section.
547 // We'll insert one or more filler relocs to span that gap.
548 // (Don't bother to improve this by editing the first reloc's offset.)
549 csize_t new_code_point = code_end_so_far;
550 for (csize_t jump;
551 code_point_so_far < new_code_point;
552 code_point_so_far += jump) {
553 jump = new_code_point - code_point_so_far;
554 relocInfo filler = filler_relocInfo();
555 if (jump >= filler.addr_offset()) {
556 jump = filler.addr_offset();
557 } else { // else shrink the filler to fit
558 filler = relocInfo(relocInfo::none, jump);
559 }
560 if (buf != NULL) {
561 assert(buf_offset + (csize_t)sizeof(filler) <= buf_limit, "filler in bounds");
562 *(relocInfo*)(buf+buf_offset) = filler;
563 }
564 buf_offset += sizeof(filler);
565 }
567 // Update code point and end to skip past this section:
568 csize_t last_code_point = code_end_so_far + cs->locs_point_off();
569 assert(code_point_so_far <= last_code_point, "sanity");
570 code_point_so_far = last_code_point; // advance past this guy's relocs
571 }
572 code_end_so_far += csize; // advance past this guy's instructions too
574 // Done with filler; emit the real relocations:
575 if (buf != NULL && lsize != 0) {
576 assert(buf_offset + lsize <= buf_limit, "target in bounds");
577 assert((uintptr_t)lstart % HeapWordSize == 0, "sane start");
578 if (buf_offset % HeapWordSize == 0) {
579 // Use wordwise copies if possible:
580 Copy::disjoint_words((HeapWord*)lstart,
581 (HeapWord*)(buf+buf_offset),
582 (lsize + HeapWordSize-1) / HeapWordSize);
583 } else {
584 Copy::conjoint_bytes(lstart, buf+buf_offset, lsize);
585 }
586 }
587 buf_offset += lsize;
588 }
590 // Align end of relocation info in target.
591 while (buf_offset % HeapWordSize != 0) {
592 if (buf != NULL) {
593 relocInfo padding = relocInfo(relocInfo::none, 0);
594 assert(buf_offset + (csize_t)sizeof(padding) <= buf_limit, "padding in bounds");
595 *(relocInfo*)(buf+buf_offset) = padding;
596 }
597 buf_offset += sizeof(relocInfo);
598 }
600 assert(code_end_so_far == total_code_size(), "sanity");
602 // Account for index:
603 if (buf != NULL) {
604 RelocIterator::create_index(dest->relocation_begin(),
605 buf_offset / sizeof(relocInfo),
606 dest->relocation_end());
607 }
609 return buf_offset;
610 }
612 void CodeBuffer::copy_code_to(CodeBlob* dest_blob) {
613 #ifndef PRODUCT
614 if (PrintNMethods && (WizardMode || Verbose)) {
615 tty->print("done with CodeBuffer:");
616 ((CodeBuffer*)this)->print();
617 }
618 #endif //PRODUCT
620 CodeBuffer dest(dest_blob->instructions_begin(),
621 dest_blob->instructions_size());
622 assert(dest_blob->instructions_size() >= total_code_size(), "good sizing");
623 this->compute_final_layout(&dest);
624 relocate_code_to(&dest);
626 // transfer comments from buffer to blob
627 dest_blob->set_comments(_comments);
629 // Done moving code bytes; were they the right size?
630 assert(round_to(dest.total_code_size(), oopSize) == dest_blob->instructions_size(), "sanity");
632 // Flush generated code
633 ICache::invalidate_range(dest_blob->instructions_begin(),
634 dest_blob->instructions_size());
635 }
637 // Move all my code into another code buffer.
638 // Consult applicable relocs to repair embedded addresses.
639 void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
640 DEBUG_ONLY(address dest_end = dest->_total_start + dest->_total_size);
641 for (int n = 0; n < (int)SECT_LIMIT; n++) {
642 // pull code out of each section
643 const CodeSection* cs = code_section(n);
644 if (cs->is_empty()) continue; // skip trivial section
645 CodeSection* dest_cs = dest->code_section(n);
646 assert(cs->size() == dest_cs->size(), "sanity");
647 csize_t usize = dest_cs->size();
648 csize_t wsize = align_size_up(usize, HeapWordSize);
649 assert(dest_cs->start() + wsize <= dest_end, "no overflow");
650 // Copy the code as aligned machine words.
651 // This may also include an uninitialized partial word at the end.
652 Copy::disjoint_words((HeapWord*)cs->start(),
653 (HeapWord*)dest_cs->start(),
654 wsize / HeapWordSize);
656 if (dest->blob() == NULL) {
657 // Destination is a final resting place, not just another buffer.
658 // Normalize uninitialized bytes in the final padding.
659 Copy::fill_to_bytes(dest_cs->end(), dest_cs->remaining(),
660 Assembler::code_fill_byte());
661 }
663 assert(cs->locs_start() != (relocInfo*)badAddress,
664 "this section carries no reloc storage, but reloc was attempted");
666 // Make the new code copy use the old copy's relocations:
667 dest_cs->initialize_locs_from(cs);
669 { // Repair the pc relative information in the code after the move
670 RelocIterator iter(dest_cs);
671 while (iter.next()) {
672 iter.reloc()->fix_relocation_after_move(this, dest);
673 }
674 }
675 }
676 }
678 csize_t CodeBuffer::figure_expanded_capacities(CodeSection* which_cs,
679 csize_t amount,
680 csize_t* new_capacity) {
681 csize_t new_total_cap = 0;
683 int prev_n = -1;
684 for (int n = 0; n < (int)SECT_LIMIT; n++) {
685 const CodeSection* sect = code_section(n);
687 if (!sect->is_empty()) {
688 // Compute initial padding; assign it to the previous non-empty guy.
689 // Cf. compute_final_layout.
690 csize_t padding = sect->align_at_start(new_total_cap) - new_total_cap;
691 if (padding != 0) {
692 new_total_cap += padding;
693 assert(prev_n >= 0, "sanity");
694 new_capacity[prev_n] += padding;
695 }
696 prev_n = n;
697 }
699 csize_t exp = sect->size(); // 100% increase
700 if ((uint)exp < 4*K) exp = 4*K; // minimum initial increase
701 if (sect == which_cs) {
702 if (exp < amount) exp = amount;
703 if (StressCodeBuffers) exp = amount; // expand only slightly
704 } else if (n == SECT_INSTS) {
705 // scale down inst increases to a more modest 25%
706 exp = 4*K + ((exp - 4*K) >> 2);
707 if (StressCodeBuffers) exp = amount / 2; // expand only slightly
708 } else if (sect->is_empty()) {
709 // do not grow an empty secondary section
710 exp = 0;
711 }
712 // Allow for inter-section slop:
713 exp += CodeSection::end_slop();
714 csize_t new_cap = sect->size() + exp;
715 if (new_cap < sect->capacity()) {
716 // No need to expand after all.
717 new_cap = sect->capacity();
718 }
719 new_capacity[n] = new_cap;
720 new_total_cap += new_cap;
721 }
723 return new_total_cap;
724 }
726 void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
727 #ifndef PRODUCT
728 if (PrintNMethods && (WizardMode || Verbose)) {
729 tty->print("expanding CodeBuffer:");
730 this->print();
731 }
733 if (StressCodeBuffers && blob() != NULL) {
734 static int expand_count = 0;
735 if (expand_count >= 0) expand_count += 1;
736 if (expand_count > 100 && is_power_of_2(expand_count)) {
737 tty->print_cr("StressCodeBuffers: have expanded %d times", expand_count);
738 // simulate an occasional allocation failure:
739 free_blob();
740 }
741 }
742 #endif //PRODUCT
744 // Resizing must be allowed
745 {
746 if (blob() == NULL) return; // caller must check for blob == NULL
747 for (int n = 0; n < (int)SECT_LIMIT; n++) {
748 guarantee(!code_section(n)->is_frozen(), "resizing not allowed when frozen");
749 }
750 }
752 // Figure new capacity for each section.
753 csize_t new_capacity[SECT_LIMIT];
754 csize_t new_total_cap
755 = figure_expanded_capacities(which_cs, amount, new_capacity);
757 // Create a new (temporary) code buffer to hold all the new data
758 CodeBuffer cb(name(), new_total_cap, 0);
759 if (cb.blob() == NULL) {
760 // Failed to allocate in code cache.
761 free_blob();
762 return;
763 }
765 // Create an old code buffer to remember which addresses used to go where.
766 // This will be useful when we do final assembly into the code cache,
767 // because we will need to know how to warp any internal address that
768 // has been created at any time in this CodeBuffer's past.
769 CodeBuffer* bxp = new CodeBuffer(_total_start, _total_size);
770 bxp->take_over_code_from(this); // remember the old undersized blob
771 DEBUG_ONLY(this->_blob = NULL); // silence a later assert
772 bxp->_before_expand = this->_before_expand;
773 this->_before_expand = bxp;
775 // Give each section its required (expanded) capacity.
776 for (int n = (int)SECT_LIMIT-1; n >= SECT_INSTS; n--) {
777 CodeSection* cb_sect = cb.code_section(n);
778 CodeSection* this_sect = code_section(n);
779 if (new_capacity[n] == 0) continue; // already nulled out
780 if (n > SECT_INSTS) {
781 cb.initialize_section_size(cb_sect, new_capacity[n]);
782 }
783 assert(cb_sect->capacity() >= new_capacity[n], "big enough");
784 address cb_start = cb_sect->start();
785 cb_sect->set_end(cb_start + this_sect->size());
786 if (this_sect->mark() == NULL) {
787 cb_sect->clear_mark();
788 } else {
789 cb_sect->set_mark(cb_start + this_sect->mark_off());
790 }
791 }
793 // Move all the code and relocations to the new blob:
794 relocate_code_to(&cb);
796 // Copy the temporary code buffer into the current code buffer.
797 // Basically, do {*this = cb}, except for some control information.
798 this->take_over_code_from(&cb);
799 cb.set_blob(NULL);
801 // Zap the old code buffer contents, to avoid mistakenly using them.
802 debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size,
803 badCodeHeapFreeVal));
805 _decode_begin = NULL; // sanity
807 // Make certain that the new sections are all snugly inside the new blob.
808 assert(verify_section_allocation(), "expanded allocation is ship-shape");
810 #ifndef PRODUCT
811 if (PrintNMethods && (WizardMode || Verbose)) {
812 tty->print("expanded CodeBuffer:");
813 this->print();
814 }
815 #endif //PRODUCT
816 }
818 void CodeBuffer::take_over_code_from(CodeBuffer* cb) {
819 // Must already have disposed of the old blob somehow.
820 assert(blob() == NULL, "must be empty");
821 #ifdef ASSERT
823 #endif
824 // Take the new blob away from cb.
825 set_blob(cb->blob());
826 // Take over all the section pointers.
827 for (int n = 0; n < (int)SECT_LIMIT; n++) {
828 CodeSection* cb_sect = cb->code_section(n);
829 CodeSection* this_sect = code_section(n);
830 this_sect->take_over_code_from(cb_sect);
831 }
832 _overflow_arena = cb->_overflow_arena;
833 // Make sure the old cb won't try to use it or free it.
834 DEBUG_ONLY(cb->_blob = (BufferBlob*)badAddress);
835 }
837 #ifdef ASSERT
838 bool CodeBuffer::verify_section_allocation() {
839 address tstart = _total_start;
840 if (tstart == badAddress) return true; // smashed by set_blob(NULL)
841 address tend = tstart + _total_size;
842 if (_blob != NULL) {
843 assert(tstart >= _blob->instructions_begin(), "sanity");
844 assert(tend <= _blob->instructions_end(), "sanity");
845 }
846 address tcheck = tstart; // advancing pointer to verify disjointness
847 for (int n = 0; n < (int)SECT_LIMIT; n++) {
848 CodeSection* sect = code_section(n);
849 if (!sect->is_allocated()) continue;
850 assert(sect->start() >= tcheck, "sanity");
851 tcheck = sect->start();
852 assert((intptr_t)tcheck % sect->alignment() == 0
853 || sect->is_empty() || _blob == NULL,
854 "start is aligned");
855 assert(sect->end() >= tcheck, "sanity");
856 assert(sect->end() <= tend, "sanity");
857 }
858 return true;
859 }
860 #endif //ASSERT
862 #ifndef PRODUCT
864 void CodeSection::dump() {
865 address ptr = start();
866 for (csize_t step; ptr < end(); ptr += step) {
867 step = end() - ptr;
868 if (step > jintSize * 4) step = jintSize * 4;
869 tty->print(PTR_FORMAT ": ", ptr);
870 while (step > 0) {
871 tty->print(" " PTR32_FORMAT, *(jint*)ptr);
872 ptr += jintSize;
873 }
874 tty->cr();
875 }
876 }
879 void CodeSection::decode() {
880 Disassembler::decode(start(), end());
881 }
884 void CodeBuffer::block_comment(intptr_t offset, const char * comment) {
885 _comments.add_comment(offset, comment);
886 }
889 class CodeComment: public CHeapObj {
890 private:
891 friend class CodeComments;
892 intptr_t _offset;
893 const char * _comment;
894 CodeComment* _next;
896 ~CodeComment() {
897 assert(_next == NULL, "wrong interface for freeing list");
898 os::free((void*)_comment);
899 }
901 public:
902 CodeComment(intptr_t offset, const char * comment) {
903 _offset = offset;
904 _comment = os::strdup(comment);
905 _next = NULL;
906 }
908 intptr_t offset() const { return _offset; }
909 const char * comment() const { return _comment; }
910 CodeComment* next() { return _next; }
912 void set_next(CodeComment* next) { _next = next; }
914 CodeComment* find(intptr_t offset) {
915 CodeComment* a = this;
916 while (a != NULL && a->_offset != offset) {
917 a = a->_next;
918 }
919 return a;
920 }
921 };
924 void CodeComments::add_comment(intptr_t offset, const char * comment) {
925 CodeComment* c = new CodeComment(offset, comment);
926 CodeComment* insert = NULL;
927 if (_comments != NULL) {
928 CodeComment* c = _comments->find(offset);
929 insert = c;
930 while (c && c->offset() == offset) {
931 insert = c;
932 c = c->next();
933 }
934 }
935 if (insert) {
936 // insert after comments with same offset
937 c->set_next(insert->next());
938 insert->set_next(c);
939 } else {
940 c->set_next(_comments);
941 _comments = c;
942 }
943 }
946 void CodeComments::assign(CodeComments& other) {
947 assert(_comments == NULL, "don't overwrite old value");
948 _comments = other._comments;
949 }
952 void CodeComments::print_block_comment(outputStream* stream, intptr_t offset) {
953 if (_comments != NULL) {
954 CodeComment* c = _comments->find(offset);
955 while (c && c->offset() == offset) {
956 stream->bol();
957 stream->print(" ;; ");
958 stream->print_cr(c->comment());
959 c = c->next();
960 }
961 }
962 }
965 void CodeComments::free() {
966 CodeComment* n = _comments;
967 while (n) {
968 // unlink the node from the list saving a pointer to the next
969 CodeComment* p = n->_next;
970 n->_next = NULL;
971 delete n;
972 n = p;
973 }
974 _comments = NULL;
975 }
979 void CodeBuffer::decode() {
980 Disassembler::decode(decode_begin(), code_end());
981 _decode_begin = code_end();
982 }
985 void CodeBuffer::skip_decode() {
986 _decode_begin = code_end();
987 }
990 void CodeBuffer::decode_all() {
991 for (int n = 0; n < (int)SECT_LIMIT; n++) {
992 // dump contents of each section
993 CodeSection* cs = code_section(n);
994 tty->print_cr("! %s:", code_section_name(n));
995 if (cs != consts())
996 cs->decode();
997 else
998 cs->dump();
999 }
1000 }
1003 void CodeSection::print(const char* name) {
1004 csize_t locs_size = locs_end() - locs_start();
1005 tty->print_cr(" %7s.code = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d)%s",
1006 name, start(), end(), limit(), size(), capacity(),
1007 is_frozen()? " [frozen]": "");
1008 tty->print_cr(" %7s.locs = " PTR_FORMAT " : " PTR_FORMAT " : " PTR_FORMAT " (%d of %d) point=%d",
1009 name, locs_start(), locs_end(), locs_limit(), locs_size, locs_capacity(), locs_point_off());
1010 if (PrintRelocations) {
1011 RelocIterator iter(this);
1012 iter.print();
1013 }
1014 }
1016 void CodeBuffer::print() {
1017 if (this == NULL) {
1018 tty->print_cr("NULL CodeBuffer pointer");
1019 return;
1020 }
1022 tty->print_cr("CodeBuffer:");
1023 for (int n = 0; n < (int)SECT_LIMIT; n++) {
1024 // print each section
1025 CodeSection* cs = code_section(n);
1026 cs->print(code_section_name(n));
1027 }
1028 }
1030 #endif // PRODUCT