src/share/vm/code/stubs.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/code/stubs.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,263 @@
     1.4 +/*
     1.5 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "code/codeBlob.hpp"
    1.30 +#include "code/stubs.hpp"
    1.31 +#include "memory/allocation.inline.hpp"
    1.32 +#include "oops/oop.inline.hpp"
    1.33 +#include "runtime/mutexLocker.hpp"
    1.34 +
    1.35 +
    1.36 +// Implementation of StubQueue
    1.37 +//
    1.38 +// Standard wrap-around queue implementation; the queue dimensions
    1.39 +// are specified by the _queue_begin & _queue_end indices. The queue
    1.40 +// can be in two states (transparent to the outside):
    1.41 +//
    1.42 +// a) contiguous state: all queue entries in one block (or empty)
    1.43 +//
    1.44 +// Queue: |...|XXXXXXX|...............|
    1.45 +//        ^0  ^begin  ^end            ^size = limit
    1.46 +//            |_______|
    1.47 +//            one block
    1.48 +//
    1.49 +// b) non-contiguous state: queue entries in two blocks
    1.50 +//
    1.51 +// Queue: |XXX|.......|XXXXXXX|.......|
    1.52 +//        ^0  ^end    ^begin  ^limit  ^size
    1.53 +//        |___|       |_______|
    1.54 +//         1st block  2nd block
    1.55 +//
    1.56 +// In the non-contiguous state, the wrap-around point is
    1.57 +// indicated via the _buffer_limit index since the last
    1.58 +// queue entry may not fill up the queue completely in
    1.59 +// which case we need to know where the 2nd block's end
    1.60 +// is to do the proper wrap-around. When removing the
    1.61 +// last entry of the 2nd block, _buffer_limit is reset
    1.62 +// to _buffer_size.
    1.63 +//
    1.64 +// CAUTION: DO NOT MESS WITH THIS CODE IF YOU CANNOT PROVE
    1.65 +// ITS CORRECTNESS! THIS CODE IS MORE SUBTLE THAN IT LOOKS!
    1.66 +
    1.67 +
    1.68 +StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
    1.69 +                     Mutex* lock, const char* name) : _mutex(lock) {
    1.70 +  intptr_t size = round_to(buffer_size, 2*BytesPerWord);
    1.71 +  BufferBlob* blob = BufferBlob::create(name, size);
    1.72 +  if( blob == NULL) {
    1.73 +    vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, err_msg("CodeCache: no room for %s", name));
    1.74 +  }
    1.75 +  _stub_interface  = stub_interface;
    1.76 +  _buffer_size     = blob->content_size();
    1.77 +  _buffer_limit    = blob->content_size();
    1.78 +  _stub_buffer     = blob->content_begin();
    1.79 +  _queue_begin     = 0;
    1.80 +  _queue_end       = 0;
    1.81 +  _number_of_stubs = 0;
    1.82 +  register_queue(this);
    1.83 +}
    1.84 +
    1.85 +
    1.86 +StubQueue::~StubQueue() {
    1.87 +  // Note: Currently StubQueues are never destroyed so nothing needs to be done here.
    1.88 +  //       If we want to implement the destructor, we need to release the BufferBlob
    1.89 +  //       allocated in the constructor (i.e., we need to keep it around or look it
    1.90 +  //       up via CodeCache::find_blob(...).
    1.91 +  Unimplemented();
    1.92 +}
    1.93 +
    1.94 +
    1.95 +Stub* StubQueue::stub_containing(address pc) const {
    1.96 +  if (contains(pc)) {
    1.97 +    for (Stub* s = first(); s != NULL; s = next(s)) {
    1.98 +      if (stub_contains(s, pc)) return s;
    1.99 +    }
   1.100 +  }
   1.101 +  return NULL;
   1.102 +}
   1.103 +
   1.104 +
   1.105 +Stub* StubQueue::request_committed(int code_size) {
   1.106 +  Stub* s = request(code_size);
   1.107 +  CodeStrings strings;
   1.108 +  if (s != NULL) commit(code_size, strings);
   1.109 +  return s;
   1.110 +}
   1.111 +
   1.112 +
   1.113 +Stub* StubQueue::request(int requested_code_size) {
   1.114 +  assert(requested_code_size > 0, "requested_code_size must be > 0");
   1.115 +  if (_mutex != NULL) _mutex->lock();
   1.116 +  Stub* s = current_stub();
   1.117 +  int requested_size = round_to(stub_code_size_to_size(requested_code_size), CodeEntryAlignment);
   1.118 +  if (requested_size <= available_space()) {
   1.119 +    if (is_contiguous()) {
   1.120 +      // Queue: |...|XXXXXXX|.............|
   1.121 +      //        ^0  ^begin  ^end          ^size = limit
   1.122 +      assert(_buffer_limit == _buffer_size, "buffer must be fully usable");
   1.123 +      if (_queue_end + requested_size <= _buffer_size) {
   1.124 +        // code fits in at the end => nothing to do
   1.125 +        CodeStrings strings;
   1.126 +        stub_initialize(s, requested_size, strings);
   1.127 +        return s;
   1.128 +      } else {
   1.129 +        // stub doesn't fit in at the queue end
   1.130 +        // => reduce buffer limit & wrap around
   1.131 +        assert(!is_empty(), "just checkin'");
   1.132 +        _buffer_limit = _queue_end;
   1.133 +        _queue_end = 0;
   1.134 +      }
   1.135 +    }
   1.136 +  }
   1.137 +  if (requested_size <= available_space()) {
   1.138 +    assert(!is_contiguous(), "just checkin'");
   1.139 +    assert(_buffer_limit <= _buffer_size, "queue invariant broken");
   1.140 +    // Queue: |XXX|.......|XXXXXXX|.......|
   1.141 +    //        ^0  ^end    ^begin  ^limit  ^size
   1.142 +    s = current_stub();
   1.143 +    CodeStrings strings;
   1.144 +    stub_initialize(s, requested_size, strings);
   1.145 +    return s;
   1.146 +  }
   1.147 +  // Not enough space left
   1.148 +  if (_mutex != NULL) _mutex->unlock();
   1.149 +  return NULL;
   1.150 +}
   1.151 +
   1.152 +
   1.153 +void StubQueue::commit(int committed_code_size, CodeStrings& strings) {
   1.154 +  assert(committed_code_size > 0, "committed_code_size must be > 0");
   1.155 +  int committed_size = round_to(stub_code_size_to_size(committed_code_size), CodeEntryAlignment);
   1.156 +  Stub* s = current_stub();
   1.157 +  assert(committed_size <= stub_size(s), "committed size must not exceed requested size");
   1.158 +  stub_initialize(s, committed_size, strings);
   1.159 +  _queue_end += committed_size;
   1.160 +  _number_of_stubs++;
   1.161 +  if (_mutex != NULL) _mutex->unlock();
   1.162 +  debug_only(stub_verify(s);)
   1.163 +}
   1.164 +
   1.165 +
   1.166 +void StubQueue::remove_first() {
   1.167 +  if (number_of_stubs() == 0) return;
   1.168 +  Stub* s = first();
   1.169 +  debug_only(stub_verify(s);)
   1.170 +  stub_finalize(s);
   1.171 +  _queue_begin += stub_size(s);
   1.172 +  assert(_queue_begin <= _buffer_limit, "sanity check");
   1.173 +  if (_queue_begin == _queue_end) {
   1.174 +    // buffer empty
   1.175 +    // => reset queue indices
   1.176 +    _queue_begin  = 0;
   1.177 +    _queue_end    = 0;
   1.178 +    _buffer_limit = _buffer_size;
   1.179 +  } else if (_queue_begin == _buffer_limit) {
   1.180 +    // buffer limit reached
   1.181 +    // => reset buffer limit & wrap around
   1.182 +    _buffer_limit = _buffer_size;
   1.183 +    _queue_begin = 0;
   1.184 +  }
   1.185 +  _number_of_stubs--;
   1.186 +}
   1.187 +
   1.188 +
   1.189 +void StubQueue::remove_first(int n) {
   1.190 +  int i = MIN2(n, number_of_stubs());
   1.191 +  while (i-- > 0) remove_first();
   1.192 +}
   1.193 +
   1.194 +
   1.195 +void StubQueue::remove_all(){
   1.196 +  debug_only(verify();)
   1.197 +  remove_first(number_of_stubs());
   1.198 +  assert(number_of_stubs() == 0, "sanity check");
   1.199 +}
   1.200 +
   1.201 +
   1.202 +enum { StubQueueLimit = 10 };  // there are only a few in the world
   1.203 +static StubQueue* registered_stub_queues[StubQueueLimit];
   1.204 +
   1.205 +void StubQueue::register_queue(StubQueue* sq) {
   1.206 +  for (int i = 0; i < StubQueueLimit; i++) {
   1.207 +    if (registered_stub_queues[i] == NULL) {
   1.208 +      registered_stub_queues[i] = sq;
   1.209 +      return;
   1.210 +    }
   1.211 +  }
   1.212 +  ShouldNotReachHere();
   1.213 +}
   1.214 +
   1.215 +
   1.216 +void StubQueue::queues_do(void f(StubQueue* sq)) {
   1.217 +  for (int i = 0; i < StubQueueLimit; i++) {
   1.218 +    if (registered_stub_queues[i] != NULL) {
   1.219 +      f(registered_stub_queues[i]);
   1.220 +    }
   1.221 +  }
   1.222 +}
   1.223 +
   1.224 +
   1.225 +void StubQueue::stubs_do(void f(Stub* s)) {
   1.226 +  debug_only(verify();)
   1.227 +  MutexLockerEx lock(_mutex);
   1.228 +  for (Stub* s = first(); s != NULL; s = next(s)) f(s);
   1.229 +}
   1.230 +
   1.231 +
   1.232 +void StubQueue::verify() {
   1.233 +  // verify only if initialized
   1.234 +  if (_stub_buffer == NULL) return;
   1.235 +  MutexLockerEx lock(_mutex);
   1.236 +  // verify index boundaries
   1.237 +  guarantee(0 <= _buffer_size, "buffer size must be positive");
   1.238 +  guarantee(0 <= _buffer_limit && _buffer_limit <= _buffer_size , "_buffer_limit out of bounds");
   1.239 +  guarantee(0 <= _queue_begin  && _queue_begin  <  _buffer_limit, "_queue_begin out of bounds");
   1.240 +  guarantee(0 <= _queue_end    && _queue_end    <= _buffer_limit, "_queue_end   out of bounds");
   1.241 +  // verify alignment
   1.242 +  guarantee(_buffer_size  % CodeEntryAlignment == 0, "_buffer_size  not aligned");
   1.243 +  guarantee(_buffer_limit % CodeEntryAlignment == 0, "_buffer_limit not aligned");
   1.244 +  guarantee(_queue_begin  % CodeEntryAlignment == 0, "_queue_begin  not aligned");
   1.245 +  guarantee(_queue_end    % CodeEntryAlignment == 0, "_queue_end    not aligned");
   1.246 +  // verify buffer limit/size relationship
   1.247 +  if (is_contiguous()) {
   1.248 +    guarantee(_buffer_limit == _buffer_size, "_buffer_limit must equal _buffer_size");
   1.249 +  }
   1.250 +  // verify contents
   1.251 +  int n = 0;
   1.252 +  for (Stub* s = first(); s != NULL; s = next(s)) {
   1.253 +    stub_verify(s);
   1.254 +    n++;
   1.255 +  }
   1.256 +  guarantee(n == number_of_stubs(), "number of stubs inconsistent");
   1.257 +  guarantee(_queue_begin != _queue_end || n == 0, "buffer indices must be the same");
   1.258 +}
   1.259 +
   1.260 +
   1.261 +void StubQueue::print() {
   1.262 +  MutexLockerEx lock(_mutex);
   1.263 +  for (Stub* s = first(); s != NULL; s = next(s)) {
   1.264 +    stub_print(s);
   1.265 +  }
   1.266 +}

mercurial