duke@435: /* xdono@631: * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: class BytecodeStream; duke@435: duke@435: // The MethodData object collects counts and other profile information duke@435: // during zeroth-tier (interpretive) and first-tier execution. duke@435: // The profile is used later by compilation heuristics. Some heuristics duke@435: // enable use of aggressive (or "heroic") optimizations. An aggressive duke@435: // optimization often has a down-side, a corner case that it handles duke@435: // poorly, but which is thought to be rare. The profile provides duke@435: // evidence of this rarity for a given method or even BCI. It allows duke@435: // the compiler to back out of the optimization at places where it duke@435: // has historically been a poor choice. Other heuristics try to use duke@435: // specific information gathered about types observed at a given site. duke@435: // duke@435: // All data in the profile is approximate. It is expected to be accurate duke@435: // on the whole, but the system expects occasional inaccuraces, due to duke@435: // counter overflow, multiprocessor races during data collection, space duke@435: // limitations, missing MDO blocks, etc. Bad or missing data will degrade duke@435: // optimization quality but will not affect correctness. Also, each MDO duke@435: // is marked with its birth-date ("creation_mileage") which can be used duke@435: // to assess the quality ("maturity") of its data. duke@435: // duke@435: // Short (<32-bit) counters are designed to overflow to a known "saturated" duke@435: // state. Also, certain recorded per-BCI events are given one-bit counters duke@435: // which overflow to a saturated state which applied to all counters at duke@435: // that BCI. In other words, there is a small lattice which approximates duke@435: // the ideal of an infinite-precision counter for each event at each BCI, duke@435: // and the lattice quickly "bottoms out" in a state where all counters duke@435: // are taken to be indefinitely large. duke@435: // duke@435: // The reader will find many data races in profile gathering code, starting duke@435: // with invocation counter incrementation. None of these races harm correct duke@435: // execution of the compiled code. duke@435: duke@435: // DataLayout duke@435: // duke@435: // Overlay for generic profiling data. duke@435: class DataLayout VALUE_OBJ_CLASS_SPEC { duke@435: private: duke@435: // Every data layout begins with a header. This header duke@435: // contains a tag, which is used to indicate the size/layout duke@435: // of the data, 4 bits of flags, which can be used in any way, duke@435: // 4 bits of trap history (none/one reason/many reasons), duke@435: // and a bci, which is used to tie this piece of data to a duke@435: // specific bci in the bytecodes. duke@435: union { duke@435: intptr_t _bits; duke@435: struct { duke@435: u1 _tag; duke@435: u1 _flags; duke@435: u2 _bci; duke@435: } _struct; duke@435: } _header; duke@435: duke@435: // The data layout has an arbitrary number of cells, each sized duke@435: // to accomodate a pointer or an integer. duke@435: intptr_t _cells[1]; duke@435: duke@435: // Some types of data layouts need a length field. duke@435: static bool needs_array_len(u1 tag); duke@435: duke@435: public: duke@435: enum { duke@435: counter_increment = 1 duke@435: }; duke@435: duke@435: enum { duke@435: cell_size = sizeof(intptr_t) duke@435: }; duke@435: duke@435: // Tag values duke@435: enum { duke@435: no_tag, duke@435: bit_data_tag, duke@435: counter_data_tag, duke@435: jump_data_tag, duke@435: receiver_type_data_tag, duke@435: virtual_call_data_tag, duke@435: ret_data_tag, duke@435: branch_data_tag, kvn@480: multi_branch_data_tag, kvn@480: arg_info_data_tag duke@435: }; duke@435: duke@435: enum { duke@435: // The _struct._flags word is formatted as [trap_state:4 | flags:4]. duke@435: // The trap state breaks down further as [recompile:1 | reason:3]. duke@435: // This further breakdown is defined in deoptimization.cpp. duke@435: // See Deoptimization::trap_state_reason for an assert that duke@435: // trap_bits is big enough to hold reasons < Reason_RECORDED_LIMIT. duke@435: // duke@435: // The trap_state is collected only if ProfileTraps is true. duke@435: trap_bits = 1+3, // 3: enough to distinguish [0..Reason_RECORDED_LIMIT]. duke@435: trap_shift = BitsPerByte - trap_bits, duke@435: trap_mask = right_n_bits(trap_bits), duke@435: trap_mask_in_place = (trap_mask << trap_shift), duke@435: flag_limit = trap_shift, duke@435: flag_mask = right_n_bits(flag_limit), duke@435: first_flag = 0 duke@435: }; duke@435: duke@435: // Size computation duke@435: static int header_size_in_bytes() { duke@435: return cell_size; duke@435: } duke@435: static int header_size_in_cells() { duke@435: return 1; duke@435: } duke@435: duke@435: static int compute_size_in_bytes(int cell_count) { duke@435: return header_size_in_bytes() + cell_count * cell_size; duke@435: } duke@435: duke@435: // Initialization duke@435: void initialize(u1 tag, u2 bci, int cell_count); duke@435: duke@435: // Accessors duke@435: u1 tag() { duke@435: return _header._struct._tag; duke@435: } duke@435: duke@435: // Return a few bits of trap state. Range is [0..trap_mask]. duke@435: // The state tells if traps with zero, one, or many reasons have occurred. duke@435: // It also tells whether zero or many recompilations have occurred. duke@435: // The associated trap histogram in the MDO itself tells whether duke@435: // traps are common or not. If a BCI shows that a trap X has duke@435: // occurred, and the MDO shows N occurrences of X, we make the duke@435: // simplifying assumption that all N occurrences can be blamed duke@435: // on that BCI. duke@435: int trap_state() { duke@435: return ((_header._struct._flags >> trap_shift) & trap_mask); duke@435: } duke@435: duke@435: void set_trap_state(int new_state) { duke@435: assert(ProfileTraps, "used only under +ProfileTraps"); duke@435: uint old_flags = (_header._struct._flags & flag_mask); duke@435: _header._struct._flags = (new_state << trap_shift) | old_flags; duke@435: } duke@435: duke@435: u1 flags() { duke@435: return _header._struct._flags; duke@435: } duke@435: duke@435: u2 bci() { duke@435: return _header._struct._bci; duke@435: } duke@435: duke@435: void set_header(intptr_t value) { duke@435: _header._bits = value; duke@435: } duke@435: void release_set_header(intptr_t value) { duke@435: OrderAccess::release_store_ptr(&_header._bits, value); duke@435: } duke@435: intptr_t header() { duke@435: return _header._bits; duke@435: } duke@435: void set_cell_at(int index, intptr_t value) { duke@435: _cells[index] = value; duke@435: } duke@435: void release_set_cell_at(int index, intptr_t value) { duke@435: OrderAccess::release_store_ptr(&_cells[index], value); duke@435: } duke@435: intptr_t cell_at(int index) { duke@435: return _cells[index]; duke@435: } duke@435: intptr_t* adr_cell_at(int index) { duke@435: return &_cells[index]; duke@435: } duke@435: oop* adr_oop_at(int index) { duke@435: return (oop*)&(_cells[index]); duke@435: } duke@435: duke@435: void set_flag_at(int flag_number) { duke@435: assert(flag_number < flag_limit, "oob"); duke@435: _header._struct._flags |= (0x1 << flag_number); duke@435: } duke@435: bool flag_at(int flag_number) { duke@435: assert(flag_number < flag_limit, "oob"); duke@435: return (_header._struct._flags & (0x1 << flag_number)) != 0; duke@435: } duke@435: duke@435: // Low-level support for code generation. duke@435: static ByteSize header_offset() { duke@435: return byte_offset_of(DataLayout, _header); duke@435: } duke@435: static ByteSize tag_offset() { duke@435: return byte_offset_of(DataLayout, _header._struct._tag); duke@435: } duke@435: static ByteSize flags_offset() { duke@435: return byte_offset_of(DataLayout, _header._struct._flags); duke@435: } duke@435: static ByteSize bci_offset() { duke@435: return byte_offset_of(DataLayout, _header._struct._bci); duke@435: } duke@435: static ByteSize cell_offset(int index) { duke@435: return byte_offset_of(DataLayout, _cells[index]); duke@435: } duke@435: // Return a value which, when or-ed as a byte into _flags, sets the flag. duke@435: static int flag_number_to_byte_constant(int flag_number) { duke@435: assert(0 <= flag_number && flag_number < flag_limit, "oob"); duke@435: DataLayout temp; temp.set_header(0); duke@435: temp.set_flag_at(flag_number); duke@435: return temp._header._struct._flags; duke@435: } duke@435: // Return a value which, when or-ed as a word into _header, sets the flag. duke@435: static intptr_t flag_mask_to_header_mask(int byte_constant) { duke@435: DataLayout temp; temp.set_header(0); duke@435: temp._header._struct._flags = byte_constant; duke@435: return temp._header._bits; duke@435: } duke@435: }; duke@435: duke@435: duke@435: // ProfileData class hierarchy duke@435: class ProfileData; duke@435: class BitData; duke@435: class CounterData; duke@435: class ReceiverTypeData; duke@435: class VirtualCallData; duke@435: class RetData; duke@435: class JumpData; duke@435: class BranchData; duke@435: class ArrayData; duke@435: class MultiBranchData; kvn@480: class ArgInfoData; duke@435: duke@435: duke@435: // ProfileData duke@435: // duke@435: // A ProfileData object is created to refer to a section of profiling duke@435: // data in a structured way. duke@435: class ProfileData : public ResourceObj { duke@435: private: duke@435: #ifndef PRODUCT duke@435: enum { duke@435: tab_width_one = 16, duke@435: tab_width_two = 36 duke@435: }; duke@435: #endif // !PRODUCT duke@435: duke@435: // This is a pointer to a section of profiling data. duke@435: DataLayout* _data; duke@435: duke@435: protected: duke@435: DataLayout* data() { return _data; } duke@435: duke@435: enum { duke@435: cell_size = DataLayout::cell_size duke@435: }; duke@435: duke@435: public: duke@435: // How many cells are in this? duke@435: virtual int cell_count() { duke@435: ShouldNotReachHere(); duke@435: return -1; duke@435: } duke@435: duke@435: // Return the size of this data. duke@435: int size_in_bytes() { duke@435: return DataLayout::compute_size_in_bytes(cell_count()); duke@435: } duke@435: duke@435: protected: duke@435: // Low-level accessors for underlying data duke@435: void set_intptr_at(int index, intptr_t value) { duke@435: assert(0 <= index && index < cell_count(), "oob"); duke@435: data()->set_cell_at(index, value); duke@435: } duke@435: void release_set_intptr_at(int index, intptr_t value) { duke@435: assert(0 <= index && index < cell_count(), "oob"); duke@435: data()->release_set_cell_at(index, value); duke@435: } duke@435: intptr_t intptr_at(int index) { duke@435: assert(0 <= index && index < cell_count(), "oob"); duke@435: return data()->cell_at(index); duke@435: } duke@435: void set_uint_at(int index, uint value) { duke@435: set_intptr_at(index, (intptr_t) value); duke@435: } duke@435: void release_set_uint_at(int index, uint value) { duke@435: release_set_intptr_at(index, (intptr_t) value); duke@435: } duke@435: uint uint_at(int index) { duke@435: return (uint)intptr_at(index); duke@435: } duke@435: void set_int_at(int index, int value) { duke@435: set_intptr_at(index, (intptr_t) value); duke@435: } duke@435: void release_set_int_at(int index, int value) { duke@435: release_set_intptr_at(index, (intptr_t) value); duke@435: } duke@435: int int_at(int index) { duke@435: return (int)intptr_at(index); duke@435: } duke@435: int int_at_unchecked(int index) { duke@435: return (int)data()->cell_at(index); duke@435: } duke@435: void set_oop_at(int index, oop value) { duke@435: set_intptr_at(index, (intptr_t) value); duke@435: } duke@435: oop oop_at(int index) { duke@435: return (oop)intptr_at(index); duke@435: } duke@435: oop* adr_oop_at(int index) { duke@435: assert(0 <= index && index < cell_count(), "oob"); duke@435: return data()->adr_oop_at(index); duke@435: } duke@435: duke@435: void set_flag_at(int flag_number) { duke@435: data()->set_flag_at(flag_number); duke@435: } duke@435: bool flag_at(int flag_number) { duke@435: return data()->flag_at(flag_number); duke@435: } duke@435: duke@435: // two convenient imports for use by subclasses: duke@435: static ByteSize cell_offset(int index) { duke@435: return DataLayout::cell_offset(index); duke@435: } duke@435: static int flag_number_to_byte_constant(int flag_number) { duke@435: return DataLayout::flag_number_to_byte_constant(flag_number); duke@435: } duke@435: duke@435: ProfileData(DataLayout* data) { duke@435: _data = data; duke@435: } duke@435: duke@435: public: duke@435: // Constructor for invalid ProfileData. duke@435: ProfileData(); duke@435: duke@435: u2 bci() { duke@435: return data()->bci(); duke@435: } duke@435: duke@435: address dp() { duke@435: return (address)_data; duke@435: } duke@435: duke@435: int trap_state() { duke@435: return data()->trap_state(); duke@435: } duke@435: void set_trap_state(int new_state) { duke@435: data()->set_trap_state(new_state); duke@435: } duke@435: duke@435: // Type checking duke@435: virtual bool is_BitData() { return false; } duke@435: virtual bool is_CounterData() { return false; } duke@435: virtual bool is_JumpData() { return false; } duke@435: virtual bool is_ReceiverTypeData(){ return false; } duke@435: virtual bool is_VirtualCallData() { return false; } duke@435: virtual bool is_RetData() { return false; } duke@435: virtual bool is_BranchData() { return false; } duke@435: virtual bool is_ArrayData() { return false; } duke@435: virtual bool is_MultiBranchData() { return false; } kvn@480: virtual bool is_ArgInfoData() { return false; } kvn@480: duke@435: duke@435: BitData* as_BitData() { duke@435: assert(is_BitData(), "wrong type"); duke@435: return is_BitData() ? (BitData*) this : NULL; duke@435: } duke@435: CounterData* as_CounterData() { duke@435: assert(is_CounterData(), "wrong type"); duke@435: return is_CounterData() ? (CounterData*) this : NULL; duke@435: } duke@435: JumpData* as_JumpData() { duke@435: assert(is_JumpData(), "wrong type"); duke@435: return is_JumpData() ? (JumpData*) this : NULL; duke@435: } duke@435: ReceiverTypeData* as_ReceiverTypeData() { duke@435: assert(is_ReceiverTypeData(), "wrong type"); duke@435: return is_ReceiverTypeData() ? (ReceiverTypeData*)this : NULL; duke@435: } duke@435: VirtualCallData* as_VirtualCallData() { duke@435: assert(is_VirtualCallData(), "wrong type"); duke@435: return is_VirtualCallData() ? (VirtualCallData*)this : NULL; duke@435: } duke@435: RetData* as_RetData() { duke@435: assert(is_RetData(), "wrong type"); duke@435: return is_RetData() ? (RetData*) this : NULL; duke@435: } duke@435: BranchData* as_BranchData() { duke@435: assert(is_BranchData(), "wrong type"); duke@435: return is_BranchData() ? (BranchData*) this : NULL; duke@435: } duke@435: ArrayData* as_ArrayData() { duke@435: assert(is_ArrayData(), "wrong type"); duke@435: return is_ArrayData() ? (ArrayData*) this : NULL; duke@435: } duke@435: MultiBranchData* as_MultiBranchData() { duke@435: assert(is_MultiBranchData(), "wrong type"); duke@435: return is_MultiBranchData() ? (MultiBranchData*)this : NULL; duke@435: } kvn@480: ArgInfoData* as_ArgInfoData() { kvn@480: assert(is_ArgInfoData(), "wrong type"); kvn@480: return is_ArgInfoData() ? (ArgInfoData*)this : NULL; kvn@480: } duke@435: duke@435: duke@435: // Subclass specific initialization duke@435: virtual void post_initialize(BytecodeStream* stream, methodDataOop mdo) {} duke@435: duke@435: // GC support duke@435: virtual void follow_contents() {} duke@435: virtual void oop_iterate(OopClosure* blk) {} duke@435: virtual void oop_iterate_m(OopClosure* blk, MemRegion mr) {} duke@435: virtual void adjust_pointers() {} duke@435: duke@435: #ifndef SERIALGC duke@435: // Parallel old support duke@435: virtual void follow_contents(ParCompactionManager* cm) {} duke@435: virtual void update_pointers() {} duke@435: virtual void update_pointers(HeapWord* beg_addr, HeapWord* end_addr) {} duke@435: #endif // SERIALGC duke@435: duke@435: // CI translation: ProfileData can represent both MethodDataOop data duke@435: // as well as CIMethodData data. This function is provided for translating duke@435: // an oop in a ProfileData to the ci equivalent. Generally speaking, duke@435: // most ProfileData don't require any translation, so we provide the null duke@435: // translation here, and the required translators are in the ci subclasses. duke@435: virtual void translate_from(ProfileData* data) {} duke@435: duke@435: virtual void print_data_on(outputStream* st) { duke@435: ShouldNotReachHere(); duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: void print_shared(outputStream* st, const char* name); duke@435: void tab(outputStream* st); duke@435: #endif duke@435: }; duke@435: duke@435: // BitData duke@435: // duke@435: // A BitData holds a flag or two in its header. duke@435: class BitData : public ProfileData { duke@435: protected: duke@435: enum { duke@435: // null_seen: duke@435: // saw a null operand (cast/aastore/instanceof) duke@435: null_seen_flag = DataLayout::first_flag + 0 duke@435: }; duke@435: enum { bit_cell_count = 0 }; // no additional data fields needed. duke@435: public: duke@435: BitData(DataLayout* layout) : ProfileData(layout) { duke@435: } duke@435: duke@435: virtual bool is_BitData() { return true; } duke@435: duke@435: static int static_cell_count() { duke@435: return bit_cell_count; duke@435: } duke@435: duke@435: virtual int cell_count() { duke@435: return static_cell_count(); duke@435: } duke@435: duke@435: // Accessor duke@435: duke@435: // The null_seen flag bit is specially known to the interpreter. duke@435: // Consulting it allows the compiler to avoid setting up null_check traps. duke@435: bool null_seen() { return flag_at(null_seen_flag); } duke@435: void set_null_seen() { set_flag_at(null_seen_flag); } duke@435: duke@435: duke@435: // Code generation support duke@435: static int null_seen_byte_constant() { duke@435: return flag_number_to_byte_constant(null_seen_flag); duke@435: } duke@435: duke@435: static ByteSize bit_data_size() { duke@435: return cell_offset(bit_cell_count); duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: void print_data_on(outputStream* st); duke@435: #endif duke@435: }; duke@435: duke@435: // CounterData duke@435: // duke@435: // A CounterData corresponds to a simple counter. duke@435: class CounterData : public BitData { duke@435: protected: duke@435: enum { duke@435: count_off, duke@435: counter_cell_count duke@435: }; duke@435: public: duke@435: CounterData(DataLayout* layout) : BitData(layout) {} duke@435: duke@435: virtual bool is_CounterData() { return true; } duke@435: duke@435: static int static_cell_count() { duke@435: return counter_cell_count; duke@435: } duke@435: duke@435: virtual int cell_count() { duke@435: return static_cell_count(); duke@435: } duke@435: duke@435: // Direct accessor duke@435: uint count() { duke@435: return uint_at(count_off); duke@435: } duke@435: duke@435: // Code generation support duke@435: static ByteSize count_offset() { duke@435: return cell_offset(count_off); duke@435: } duke@435: static ByteSize counter_data_size() { duke@435: return cell_offset(counter_cell_count); duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: void print_data_on(outputStream* st); duke@435: #endif duke@435: }; duke@435: duke@435: // JumpData duke@435: // duke@435: // A JumpData is used to access profiling information for a direct duke@435: // branch. It is a counter, used for counting the number of branches, duke@435: // plus a data displacement, used for realigning the data pointer to duke@435: // the corresponding target bci. duke@435: class JumpData : public ProfileData { duke@435: protected: duke@435: enum { duke@435: taken_off_set, duke@435: displacement_off_set, duke@435: jump_cell_count duke@435: }; duke@435: duke@435: void set_displacement(int displacement) { duke@435: set_int_at(displacement_off_set, displacement); duke@435: } duke@435: duke@435: public: duke@435: JumpData(DataLayout* layout) : ProfileData(layout) { duke@435: assert(layout->tag() == DataLayout::jump_data_tag || duke@435: layout->tag() == DataLayout::branch_data_tag, "wrong type"); duke@435: } duke@435: duke@435: virtual bool is_JumpData() { return true; } duke@435: duke@435: static int static_cell_count() { duke@435: return jump_cell_count; duke@435: } duke@435: duke@435: virtual int cell_count() { duke@435: return static_cell_count(); duke@435: } duke@435: duke@435: // Direct accessor duke@435: uint taken() { duke@435: return uint_at(taken_off_set); duke@435: } duke@435: // Saturating counter duke@435: uint inc_taken() { duke@435: uint cnt = taken() + 1; duke@435: // Did we wrap? Will compiler screw us?? duke@435: if (cnt == 0) cnt--; duke@435: set_uint_at(taken_off_set, cnt); duke@435: return cnt; duke@435: } duke@435: duke@435: int displacement() { duke@435: return int_at(displacement_off_set); duke@435: } duke@435: duke@435: // Code generation support duke@435: static ByteSize taken_offset() { duke@435: return cell_offset(taken_off_set); duke@435: } duke@435: duke@435: static ByteSize displacement_offset() { duke@435: return cell_offset(displacement_off_set); duke@435: } duke@435: duke@435: // Specific initialization. duke@435: void post_initialize(BytecodeStream* stream, methodDataOop mdo); duke@435: duke@435: #ifndef PRODUCT duke@435: void print_data_on(outputStream* st); duke@435: #endif duke@435: }; duke@435: duke@435: // ReceiverTypeData duke@435: // duke@435: // A ReceiverTypeData is used to access profiling information about a duke@435: // dynamic type check. It consists of a counter which counts the total times duke@435: // that the check is reached, and a series of (klassOop, count) pairs duke@435: // which are used to store a type profile for the receiver of the check. duke@435: class ReceiverTypeData : public CounterData { duke@435: protected: duke@435: enum { duke@435: receiver0_offset = counter_cell_count, duke@435: count0_offset, duke@435: receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset duke@435: }; duke@435: duke@435: public: duke@435: ReceiverTypeData(DataLayout* layout) : CounterData(layout) { duke@435: assert(layout->tag() == DataLayout::receiver_type_data_tag || duke@435: layout->tag() == DataLayout::virtual_call_data_tag, "wrong type"); duke@435: } duke@435: duke@435: virtual bool is_ReceiverTypeData() { return true; } duke@435: duke@435: static int static_cell_count() { duke@435: return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count; duke@435: } duke@435: duke@435: virtual int cell_count() { duke@435: return static_cell_count(); duke@435: } duke@435: duke@435: // Direct accessors duke@435: static uint row_limit() { duke@435: return TypeProfileWidth; duke@435: } duke@435: static int receiver_cell_index(uint row) { duke@435: return receiver0_offset + row * receiver_type_row_cell_count; duke@435: } duke@435: static int receiver_count_cell_index(uint row) { duke@435: return count0_offset + row * receiver_type_row_cell_count; duke@435: } duke@435: duke@435: // Get the receiver at row. The 'unchecked' version is needed by parallel old duke@435: // gc; it does not assert the receiver is a klass. During compaction of the duke@435: // perm gen, the klass may already have moved, so the is_klass() predicate duke@435: // would fail. The 'normal' version should be used whenever possible. duke@435: klassOop receiver_unchecked(uint row) { duke@435: assert(row < row_limit(), "oob"); duke@435: oop recv = oop_at(receiver_cell_index(row)); duke@435: return (klassOop)recv; duke@435: } duke@435: duke@435: klassOop receiver(uint row) { duke@435: klassOop recv = receiver_unchecked(row); duke@435: assert(recv == NULL || ((oop)recv)->is_klass(), "wrong type"); duke@435: return recv; duke@435: } duke@435: duke@435: uint receiver_count(uint row) { duke@435: assert(row < row_limit(), "oob"); duke@435: return uint_at(receiver_count_cell_index(row)); duke@435: } duke@435: duke@435: // Code generation support duke@435: static ByteSize receiver_offset(uint row) { duke@435: return cell_offset(receiver_cell_index(row)); duke@435: } duke@435: static ByteSize receiver_count_offset(uint row) { duke@435: return cell_offset(receiver_count_cell_index(row)); duke@435: } duke@435: static ByteSize receiver_type_data_size() { duke@435: return cell_offset(static_cell_count()); duke@435: } duke@435: duke@435: // GC support duke@435: virtual void follow_contents(); duke@435: virtual void oop_iterate(OopClosure* blk); duke@435: virtual void oop_iterate_m(OopClosure* blk, MemRegion mr); duke@435: virtual void adjust_pointers(); duke@435: duke@435: #ifndef SERIALGC duke@435: // Parallel old support duke@435: virtual void follow_contents(ParCompactionManager* cm); duke@435: virtual void update_pointers(); duke@435: virtual void update_pointers(HeapWord* beg_addr, HeapWord* end_addr); duke@435: #endif // SERIALGC duke@435: duke@435: oop* adr_receiver(uint row) { duke@435: return adr_oop_at(receiver_cell_index(row)); duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: void print_receiver_data_on(outputStream* st); duke@435: void print_data_on(outputStream* st); duke@435: #endif duke@435: }; duke@435: duke@435: // VirtualCallData duke@435: // duke@435: // A VirtualCallData is used to access profiling information about a duke@435: // virtual call. For now, it has nothing more than a ReceiverTypeData. duke@435: class VirtualCallData : public ReceiverTypeData { duke@435: public: duke@435: VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) { duke@435: assert(layout->tag() == DataLayout::virtual_call_data_tag, "wrong type"); duke@435: } duke@435: duke@435: virtual bool is_VirtualCallData() { return true; } duke@435: duke@435: static int static_cell_count() { duke@435: // At this point we could add more profile state, e.g., for arguments. duke@435: // But for now it's the same size as the base record type. duke@435: return ReceiverTypeData::static_cell_count(); duke@435: } duke@435: duke@435: virtual int cell_count() { duke@435: return static_cell_count(); duke@435: } duke@435: duke@435: // Direct accessors duke@435: static ByteSize virtual_call_data_size() { duke@435: return cell_offset(static_cell_count()); duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: void print_data_on(outputStream* st); duke@435: #endif duke@435: }; duke@435: duke@435: // RetData duke@435: // duke@435: // A RetData is used to access profiling information for a ret bytecode. duke@435: // It is composed of a count of the number of times that the ret has duke@435: // been executed, followed by a series of triples of the form duke@435: // (bci, count, di) which count the number of times that some bci was the duke@435: // target of the ret and cache a corresponding data displacement. duke@435: class RetData : public CounterData { duke@435: protected: duke@435: enum { duke@435: bci0_offset = counter_cell_count, duke@435: count0_offset, duke@435: displacement0_offset, duke@435: ret_row_cell_count = (displacement0_offset + 1) - bci0_offset duke@435: }; duke@435: duke@435: void set_bci(uint row, int bci) { duke@435: assert((uint)row < row_limit(), "oob"); duke@435: set_int_at(bci0_offset + row * ret_row_cell_count, bci); duke@435: } duke@435: void release_set_bci(uint row, int bci) { duke@435: assert((uint)row < row_limit(), "oob"); duke@435: // 'release' when setting the bci acts as a valid flag for other duke@435: // threads wrt bci_count and bci_displacement. duke@435: release_set_int_at(bci0_offset + row * ret_row_cell_count, bci); duke@435: } duke@435: void set_bci_count(uint row, uint count) { duke@435: assert((uint)row < row_limit(), "oob"); duke@435: set_uint_at(count0_offset + row * ret_row_cell_count, count); duke@435: } duke@435: void set_bci_displacement(uint row, int disp) { duke@435: set_int_at(displacement0_offset + row * ret_row_cell_count, disp); duke@435: } duke@435: duke@435: public: duke@435: RetData(DataLayout* layout) : CounterData(layout) { duke@435: assert(layout->tag() == DataLayout::ret_data_tag, "wrong type"); duke@435: } duke@435: duke@435: virtual bool is_RetData() { return true; } duke@435: duke@435: enum { duke@435: no_bci = -1 // value of bci when bci1/2 are not in use. duke@435: }; duke@435: duke@435: static int static_cell_count() { duke@435: return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count; duke@435: } duke@435: duke@435: virtual int cell_count() { duke@435: return static_cell_count(); duke@435: } duke@435: duke@435: static uint row_limit() { duke@435: return BciProfileWidth; duke@435: } duke@435: static int bci_cell_index(uint row) { duke@435: return bci0_offset + row * ret_row_cell_count; duke@435: } duke@435: static int bci_count_cell_index(uint row) { duke@435: return count0_offset + row * ret_row_cell_count; duke@435: } duke@435: static int bci_displacement_cell_index(uint row) { duke@435: return displacement0_offset + row * ret_row_cell_count; duke@435: } duke@435: duke@435: // Direct accessors duke@435: int bci(uint row) { duke@435: return int_at(bci_cell_index(row)); duke@435: } duke@435: uint bci_count(uint row) { duke@435: return uint_at(bci_count_cell_index(row)); duke@435: } duke@435: int bci_displacement(uint row) { duke@435: return int_at(bci_displacement_cell_index(row)); duke@435: } duke@435: duke@435: // Interpreter Runtime support duke@435: address fixup_ret(int return_bci, methodDataHandle mdo); duke@435: duke@435: // Code generation support duke@435: static ByteSize bci_offset(uint row) { duke@435: return cell_offset(bci_cell_index(row)); duke@435: } duke@435: static ByteSize bci_count_offset(uint row) { duke@435: return cell_offset(bci_count_cell_index(row)); duke@435: } duke@435: static ByteSize bci_displacement_offset(uint row) { duke@435: return cell_offset(bci_displacement_cell_index(row)); duke@435: } duke@435: duke@435: // Specific initialization. duke@435: void post_initialize(BytecodeStream* stream, methodDataOop mdo); duke@435: duke@435: #ifndef PRODUCT duke@435: void print_data_on(outputStream* st); duke@435: #endif duke@435: }; duke@435: duke@435: // BranchData duke@435: // duke@435: // A BranchData is used to access profiling data for a two-way branch. duke@435: // It consists of taken and not_taken counts as well as a data displacement duke@435: // for the taken case. duke@435: class BranchData : public JumpData { duke@435: protected: duke@435: enum { duke@435: not_taken_off_set = jump_cell_count, duke@435: branch_cell_count duke@435: }; duke@435: duke@435: void set_displacement(int displacement) { duke@435: set_int_at(displacement_off_set, displacement); duke@435: } duke@435: duke@435: public: duke@435: BranchData(DataLayout* layout) : JumpData(layout) { duke@435: assert(layout->tag() == DataLayout::branch_data_tag, "wrong type"); duke@435: } duke@435: duke@435: virtual bool is_BranchData() { return true; } duke@435: duke@435: static int static_cell_count() { duke@435: return branch_cell_count; duke@435: } duke@435: duke@435: virtual int cell_count() { duke@435: return static_cell_count(); duke@435: } duke@435: duke@435: // Direct accessor duke@435: uint not_taken() { duke@435: return uint_at(not_taken_off_set); duke@435: } duke@435: duke@435: uint inc_not_taken() { duke@435: uint cnt = not_taken() + 1; duke@435: // Did we wrap? Will compiler screw us?? duke@435: if (cnt == 0) cnt--; duke@435: set_uint_at(not_taken_off_set, cnt); duke@435: return cnt; duke@435: } duke@435: duke@435: // Code generation support duke@435: static ByteSize not_taken_offset() { duke@435: return cell_offset(not_taken_off_set); duke@435: } duke@435: static ByteSize branch_data_size() { duke@435: return cell_offset(branch_cell_count); duke@435: } duke@435: duke@435: // Specific initialization. duke@435: void post_initialize(BytecodeStream* stream, methodDataOop mdo); duke@435: duke@435: #ifndef PRODUCT duke@435: void print_data_on(outputStream* st); duke@435: #endif duke@435: }; duke@435: duke@435: // ArrayData duke@435: // duke@435: // A ArrayData is a base class for accessing profiling data which does duke@435: // not have a statically known size. It consists of an array length duke@435: // and an array start. duke@435: class ArrayData : public ProfileData { duke@435: protected: duke@435: friend class DataLayout; duke@435: duke@435: enum { duke@435: array_len_off_set, duke@435: array_start_off_set duke@435: }; duke@435: duke@435: uint array_uint_at(int index) { duke@435: int aindex = index + array_start_off_set; duke@435: return uint_at(aindex); duke@435: } duke@435: int array_int_at(int index) { duke@435: int aindex = index + array_start_off_set; duke@435: return int_at(aindex); duke@435: } duke@435: oop array_oop_at(int index) { duke@435: int aindex = index + array_start_off_set; duke@435: return oop_at(aindex); duke@435: } duke@435: void array_set_int_at(int index, int value) { duke@435: int aindex = index + array_start_off_set; duke@435: set_int_at(aindex, value); duke@435: } duke@435: duke@435: // Code generation support for subclasses. duke@435: static ByteSize array_element_offset(int index) { duke@435: return cell_offset(array_start_off_set + index); duke@435: } duke@435: duke@435: public: duke@435: ArrayData(DataLayout* layout) : ProfileData(layout) {} duke@435: duke@435: virtual bool is_ArrayData() { return true; } duke@435: duke@435: static int static_cell_count() { duke@435: return -1; duke@435: } duke@435: duke@435: int array_len() { duke@435: return int_at_unchecked(array_len_off_set); duke@435: } duke@435: duke@435: virtual int cell_count() { duke@435: return array_len() + 1; duke@435: } duke@435: duke@435: // Code generation support duke@435: static ByteSize array_len_offset() { duke@435: return cell_offset(array_len_off_set); duke@435: } duke@435: static ByteSize array_start_offset() { duke@435: return cell_offset(array_start_off_set); duke@435: } duke@435: }; duke@435: duke@435: // MultiBranchData duke@435: // duke@435: // A MultiBranchData is used to access profiling information for duke@435: // a multi-way branch (*switch bytecodes). It consists of a series duke@435: // of (count, displacement) pairs, which count the number of times each duke@435: // case was taken and specify the data displacment for each branch target. duke@435: class MultiBranchData : public ArrayData { duke@435: protected: duke@435: enum { duke@435: default_count_off_set, duke@435: default_disaplacement_off_set, duke@435: case_array_start duke@435: }; duke@435: enum { duke@435: relative_count_off_set, duke@435: relative_displacement_off_set, duke@435: per_case_cell_count duke@435: }; duke@435: duke@435: void set_default_displacement(int displacement) { duke@435: array_set_int_at(default_disaplacement_off_set, displacement); duke@435: } duke@435: void set_displacement_at(int index, int displacement) { duke@435: array_set_int_at(case_array_start + duke@435: index * per_case_cell_count + duke@435: relative_displacement_off_set, duke@435: displacement); duke@435: } duke@435: duke@435: public: duke@435: MultiBranchData(DataLayout* layout) : ArrayData(layout) { duke@435: assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type"); duke@435: } duke@435: duke@435: virtual bool is_MultiBranchData() { return true; } duke@435: duke@435: static int compute_cell_count(BytecodeStream* stream); duke@435: duke@435: int number_of_cases() { duke@435: int alen = array_len() - 2; // get rid of default case here. duke@435: assert(alen % per_case_cell_count == 0, "must be even"); duke@435: return (alen / per_case_cell_count); duke@435: } duke@435: duke@435: uint default_count() { duke@435: return array_uint_at(default_count_off_set); duke@435: } duke@435: int default_displacement() { duke@435: return array_int_at(default_disaplacement_off_set); duke@435: } duke@435: duke@435: uint count_at(int index) { duke@435: return array_uint_at(case_array_start + duke@435: index * per_case_cell_count + duke@435: relative_count_off_set); duke@435: } duke@435: int displacement_at(int index) { duke@435: return array_int_at(case_array_start + duke@435: index * per_case_cell_count + duke@435: relative_displacement_off_set); duke@435: } duke@435: duke@435: // Code generation support duke@435: static ByteSize default_count_offset() { duke@435: return array_element_offset(default_count_off_set); duke@435: } duke@435: static ByteSize default_displacement_offset() { duke@435: return array_element_offset(default_disaplacement_off_set); duke@435: } duke@435: static ByteSize case_count_offset(int index) { duke@435: return case_array_offset() + duke@435: (per_case_size() * index) + duke@435: relative_count_offset(); duke@435: } duke@435: static ByteSize case_array_offset() { duke@435: return array_element_offset(case_array_start); duke@435: } duke@435: static ByteSize per_case_size() { duke@435: return in_ByteSize(per_case_cell_count) * cell_size; duke@435: } duke@435: static ByteSize relative_count_offset() { duke@435: return in_ByteSize(relative_count_off_set) * cell_size; duke@435: } duke@435: static ByteSize relative_displacement_offset() { duke@435: return in_ByteSize(relative_displacement_off_set) * cell_size; duke@435: } duke@435: duke@435: // Specific initialization. duke@435: void post_initialize(BytecodeStream* stream, methodDataOop mdo); duke@435: duke@435: #ifndef PRODUCT duke@435: void print_data_on(outputStream* st); duke@435: #endif duke@435: }; duke@435: kvn@480: class ArgInfoData : public ArrayData { kvn@480: kvn@480: public: kvn@480: ArgInfoData(DataLayout* layout) : ArrayData(layout) { kvn@480: assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type"); kvn@480: } kvn@480: kvn@480: virtual bool is_ArgInfoData() { return true; } kvn@480: kvn@480: kvn@480: int number_of_args() { kvn@480: return array_len(); kvn@480: } kvn@480: kvn@480: uint arg_modified(int arg) { kvn@480: return array_uint_at(arg); kvn@480: } kvn@480: kvn@480: void set_arg_modified(int arg, uint val) { kvn@480: array_set_int_at(arg, val); kvn@480: } kvn@480: kvn@480: #ifndef PRODUCT kvn@480: void print_data_on(outputStream* st); kvn@480: #endif kvn@480: }; kvn@480: duke@435: // methodDataOop duke@435: // duke@435: // A methodDataOop holds information which has been collected about duke@435: // a method. Its layout looks like this: duke@435: // duke@435: // ----------------------------- duke@435: // | header | duke@435: // | klass | duke@435: // ----------------------------- duke@435: // | method | duke@435: // | size of the methodDataOop | duke@435: // ----------------------------- duke@435: // | Data entries... | duke@435: // | (variable size) | duke@435: // | | duke@435: // . . duke@435: // . . duke@435: // . . duke@435: // | | duke@435: // ----------------------------- duke@435: // duke@435: // The data entry area is a heterogeneous array of DataLayouts. Each duke@435: // DataLayout in the array corresponds to a specific bytecode in the duke@435: // method. The entries in the array are sorted by the corresponding duke@435: // bytecode. Access to the data is via resource-allocated ProfileData, duke@435: // which point to the underlying blocks of DataLayout structures. duke@435: // duke@435: // During interpretation, if profiling in enabled, the interpreter duke@435: // maintains a method data pointer (mdp), which points at the entry duke@435: // in the array corresponding to the current bci. In the course of duke@435: // intepretation, when a bytecode is encountered that has profile data duke@435: // associated with it, the entry pointed to by mdp is updated, then the duke@435: // mdp is adjusted to point to the next appropriate DataLayout. If mdp duke@435: // is NULL to begin with, the interpreter assumes that the current method duke@435: // is not (yet) being profiled. duke@435: // duke@435: // In methodDataOop parlance, "dp" is a "data pointer", the actual address duke@435: // of a DataLayout element. A "di" is a "data index", the offset in bytes duke@435: // from the base of the data entry array. A "displacement" is the byte offset duke@435: // in certain ProfileData objects that indicate the amount the mdp must be duke@435: // adjusted in the event of a change in control flow. duke@435: // duke@435: duke@435: class methodDataOopDesc : public oopDesc { duke@435: friend class VMStructs; duke@435: private: duke@435: friend class ProfileData; duke@435: duke@435: // Back pointer to the methodOop duke@435: methodOop _method; duke@435: duke@435: // Size of this oop in bytes duke@435: int _size; duke@435: duke@435: // Cached hint for bci_to_dp and bci_to_data duke@435: int _hint_di; duke@435: duke@435: // Whole-method sticky bits and flags duke@435: public: duke@435: enum { duke@435: _trap_hist_limit = 16, // decoupled from Deoptimization::Reason_LIMIT duke@435: _trap_hist_mask = max_jubyte, duke@435: _extra_data_count = 4 // extra DataLayout headers, for trap history duke@435: }; // Public flag values duke@435: private: duke@435: uint _nof_decompiles; // count of all nmethod removals duke@435: uint _nof_overflow_recompiles; // recompile count, excluding recomp. bits duke@435: uint _nof_overflow_traps; // trap count, excluding _trap_hist duke@435: union { duke@435: intptr_t _align; duke@435: u1 _array[_trap_hist_limit]; duke@435: } _trap_hist; duke@435: duke@435: // Support for interprocedural escape analysis, from Thomas Kotzmann. duke@435: intx _eflags; // flags on escape information duke@435: intx _arg_local; // bit set of non-escaping arguments duke@435: intx _arg_stack; // bit set of stack-allocatable arguments duke@435: intx _arg_returned; // bit set of returned arguments duke@435: duke@435: int _creation_mileage; // method mileage at MDO creation duke@435: duke@435: // Size of _data array in bytes. (Excludes header and extra_data fields.) duke@435: int _data_size; duke@435: duke@435: // Beginning of the data entries duke@435: intptr_t _data[1]; duke@435: duke@435: // Helper for size computation duke@435: static int compute_data_size(BytecodeStream* stream); duke@435: static int bytecode_cell_count(Bytecodes::Code code); duke@435: enum { no_profile_data = -1, variable_cell_count = -2 }; duke@435: duke@435: // Helper for initialization duke@435: DataLayout* data_layout_at(int data_index) { duke@435: assert(data_index % sizeof(intptr_t) == 0, "unaligned"); duke@435: return (DataLayout*) (((address)_data) + data_index); duke@435: } duke@435: duke@435: // Initialize an individual data segment. Returns the size of duke@435: // the segment in bytes. duke@435: int initialize_data(BytecodeStream* stream, int data_index); duke@435: duke@435: // Helper for data_at duke@435: DataLayout* limit_data_position() { duke@435: return (DataLayout*)((address)data_base() + _data_size); duke@435: } duke@435: bool out_of_bounds(int data_index) { duke@435: return data_index >= data_size(); duke@435: } duke@435: duke@435: // Give each of the data entries a chance to perform specific duke@435: // data initialization. duke@435: void post_initialize(BytecodeStream* stream); duke@435: duke@435: // hint accessors duke@435: int hint_di() const { return _hint_di; } duke@435: void set_hint_di(int di) { duke@435: assert(!out_of_bounds(di), "hint_di out of bounds"); duke@435: _hint_di = di; duke@435: } duke@435: ProfileData* data_before(int bci) { duke@435: // avoid SEGV on this edge case duke@435: if (data_size() == 0) duke@435: return NULL; duke@435: int hint = hint_di(); duke@435: if (data_layout_at(hint)->bci() <= bci) duke@435: return data_at(hint); duke@435: return first_data(); duke@435: } duke@435: duke@435: // What is the index of the first data entry? duke@435: int first_di() { return 0; } duke@435: duke@435: // Find or create an extra ProfileData: duke@435: ProfileData* bci_to_extra_data(int bci, bool create_if_missing); duke@435: kvn@480: // return the argument info cell kvn@480: ArgInfoData *arg_info(); kvn@480: duke@435: public: duke@435: static int header_size() { duke@435: return sizeof(methodDataOopDesc)/wordSize; duke@435: } duke@435: duke@435: // Compute the size of a methodDataOop before it is created. duke@435: static int compute_allocation_size_in_bytes(methodHandle method); duke@435: static int compute_allocation_size_in_words(methodHandle method); duke@435: static int compute_extra_data_count(int data_size, int empty_bc_count); duke@435: duke@435: // Determine if a given bytecode can have profile information. duke@435: static bool bytecode_has_profile(Bytecodes::Code code) { duke@435: return bytecode_cell_count(code) != no_profile_data; duke@435: } duke@435: duke@435: // Perform initialization of a new methodDataOop duke@435: void initialize(methodHandle method); duke@435: duke@435: // My size duke@435: int object_size_in_bytes() { return _size; } duke@435: int object_size() { duke@435: return align_object_size(align_size_up(_size, BytesPerWord)/BytesPerWord); duke@435: } duke@435: duke@435: int creation_mileage() const { return _creation_mileage; } duke@435: void set_creation_mileage(int x) { _creation_mileage = x; } duke@435: bool is_mature() const; // consult mileage and ProfileMaturityPercentage duke@435: static int mileage_of(methodOop m); duke@435: duke@435: // Support for interprocedural escape analysis, from Thomas Kotzmann. duke@435: enum EscapeFlag { duke@435: estimated = 1 << 0, kvn@513: return_local = 1 << 1, kvn@513: return_allocated = 1 << 2, kvn@513: allocated_escapes = 1 << 3, kvn@513: unknown_modified = 1 << 4 duke@435: }; duke@435: duke@435: intx eflags() { return _eflags; } duke@435: intx arg_local() { return _arg_local; } duke@435: intx arg_stack() { return _arg_stack; } duke@435: intx arg_returned() { return _arg_returned; } kvn@480: uint arg_modified(int a) { ArgInfoData *aid = arg_info(); kvn@480: assert(a >= 0 && a < aid->number_of_args(), "valid argument number"); kvn@480: return aid->arg_modified(a); } duke@435: duke@435: void set_eflags(intx v) { _eflags = v; } duke@435: void set_arg_local(intx v) { _arg_local = v; } duke@435: void set_arg_stack(intx v) { _arg_stack = v; } duke@435: void set_arg_returned(intx v) { _arg_returned = v; } kvn@480: void set_arg_modified(int a, uint v) { ArgInfoData *aid = arg_info(); kvn@480: assert(a >= 0 && a < aid->number_of_args(), "valid argument number"); kvn@480: kvn@480: aid->set_arg_modified(a, v); } duke@435: duke@435: void clear_escape_info() { _eflags = _arg_local = _arg_stack = _arg_returned = 0; } duke@435: duke@435: // Location and size of data area duke@435: address data_base() const { duke@435: return (address) _data; duke@435: } duke@435: int data_size() { duke@435: return _data_size; duke@435: } duke@435: duke@435: // Accessors duke@435: methodOop method() { return _method; } duke@435: duke@435: // Get the data at an arbitrary (sort of) data index. duke@435: ProfileData* data_at(int data_index); duke@435: duke@435: // Walk through the data in order. duke@435: ProfileData* first_data() { return data_at(first_di()); } duke@435: ProfileData* next_data(ProfileData* current); duke@435: bool is_valid(ProfileData* current) { return current != NULL; } duke@435: duke@435: // Convert a dp (data pointer) to a di (data index). duke@435: int dp_to_di(address dp) { duke@435: return dp - ((address)_data); duke@435: } duke@435: duke@435: address di_to_dp(int di) { duke@435: return (address)data_layout_at(di); duke@435: } duke@435: duke@435: // bci to di/dp conversion. duke@435: address bci_to_dp(int bci); duke@435: int bci_to_di(int bci) { duke@435: return dp_to_di(bci_to_dp(bci)); duke@435: } duke@435: duke@435: // Get the data at an arbitrary bci, or NULL if there is none. duke@435: ProfileData* bci_to_data(int bci); duke@435: duke@435: // Same, but try to create an extra_data record if one is needed: duke@435: ProfileData* allocate_bci_to_data(int bci) { duke@435: ProfileData* data = bci_to_data(bci); duke@435: return (data != NULL) ? data : bci_to_extra_data(bci, true); duke@435: } duke@435: duke@435: // Add a handful of extra data records, for trap tracking. duke@435: DataLayout* extra_data_base() { return limit_data_position(); } duke@435: DataLayout* extra_data_limit() { return (DataLayout*)((address)this + object_size_in_bytes()); } duke@435: int extra_data_size() { return (address)extra_data_limit() duke@435: - (address)extra_data_base(); } duke@435: static DataLayout* next_extra(DataLayout* dp) { return (DataLayout*)((address)dp + in_bytes(DataLayout::cell_offset(0))); } duke@435: duke@435: // Return (uint)-1 for overflow. duke@435: uint trap_count(int reason) const { duke@435: assert((uint)reason < _trap_hist_limit, "oob"); duke@435: return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1; duke@435: } duke@435: // For loops: duke@435: static uint trap_reason_limit() { return _trap_hist_limit; } duke@435: static uint trap_count_limit() { return _trap_hist_mask; } duke@435: uint inc_trap_count(int reason) { duke@435: // Count another trap, anywhere in this method. duke@435: assert(reason >= 0, "must be single trap"); duke@435: if ((uint)reason < _trap_hist_limit) { duke@435: uint cnt1 = 1 + _trap_hist._array[reason]; duke@435: if ((cnt1 & _trap_hist_mask) != 0) { // if no counter overflow... duke@435: _trap_hist._array[reason] = cnt1; duke@435: return cnt1; duke@435: } else { duke@435: return _trap_hist_mask + (++_nof_overflow_traps); duke@435: } duke@435: } else { duke@435: // Could not represent the count in the histogram. duke@435: return (++_nof_overflow_traps); duke@435: } duke@435: } duke@435: duke@435: uint overflow_trap_count() const { duke@435: return _nof_overflow_traps; duke@435: } duke@435: uint overflow_recompile_count() const { duke@435: return _nof_overflow_recompiles; duke@435: } duke@435: void inc_overflow_recompile_count() { duke@435: _nof_overflow_recompiles += 1; duke@435: } duke@435: uint decompile_count() const { duke@435: return _nof_decompiles; duke@435: } duke@435: void inc_decompile_count() { duke@435: _nof_decompiles += 1; duke@435: } duke@435: duke@435: // Support for code generation duke@435: static ByteSize data_offset() { duke@435: return byte_offset_of(methodDataOopDesc, _data[0]); duke@435: } duke@435: duke@435: // GC support duke@435: oop* adr_method() const { return (oop*)&_method; } duke@435: bool object_is_parsable() const { return _size != 0; } duke@435: void set_object_is_parsable(int object_size_in_bytes) { _size = object_size_in_bytes; } duke@435: duke@435: #ifndef PRODUCT duke@435: // printing support for method data duke@435: void print_data_on(outputStream* st); duke@435: #endif duke@435: duke@435: // verification duke@435: void verify_data_on(outputStream* st); duke@435: };