src/share/vm/utilities/stack.inline.hpp

Tue, 18 Jul 2017 09:53:54 +0200

author
shade
date
Tue, 18 Jul 2017 09:53:54 +0200
changeset 9997
c7ef664f8649
parent 9316
a27880c1288b
child 10015
eb7ce841ccec
permissions
-rw-r--r--

8184762: ZapStackSegments should use optimized memset
Reviewed-by: rkennke, mgerdin

     1 /*
     2  * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_UTILITIES_STACK_INLINE_HPP
    26 #define SHARE_VM_UTILITIES_STACK_INLINE_HPP
    28 #include "utilities/stack.hpp"
    29 #include "utilities/copy.hpp"
    31 template <MEMFLAGS F> StackBase<F>::StackBase(size_t segment_size, size_t max_cache_size,
    32                      size_t max_size):
    33   _seg_size(segment_size),
    34   _max_cache_size(max_cache_size),
    35   _max_size(adjust_max_size(max_size, segment_size))
    36 {
    37   assert(_max_size % _seg_size == 0, "not a multiple");
    38 }
    40 template <MEMFLAGS F> size_t StackBase<F>::adjust_max_size(size_t max_size, size_t seg_size)
    41 {
    42   assert(seg_size > 0, "cannot be 0");
    43   assert(max_size >= seg_size || max_size == 0, "max_size too small");
    44   const size_t limit = max_uintx - (seg_size - 1);
    45   if (max_size == 0 || max_size > limit) {
    46     max_size = limit;
    47   }
    48   return (max_size + seg_size - 1) / seg_size * seg_size;
    49 }
    51 template <class E, MEMFLAGS F>
    52 Stack<E, F>::Stack(size_t segment_size, size_t max_cache_size, size_t max_size):
    53   StackBase<F>(adjust_segment_size(segment_size), max_cache_size, max_size)
    54 {
    55   reset(true);
    56 }
    58 template <class E, MEMFLAGS F>
    59 void Stack<E, F>::push(E item)
    60 {
    61   assert(!is_full(), "pushing onto a full stack");
    62   if (this->_cur_seg_size == this->_seg_size) {
    63     push_segment();
    64   }
    65   this->_cur_seg[this->_cur_seg_size] = item;
    66   ++this->_cur_seg_size;
    67 }
    69 template <class E, MEMFLAGS F>
    70 E Stack<E, F>::pop()
    71 {
    72   assert(!is_empty(), "popping from an empty stack");
    73   if (this->_cur_seg_size == 1) {
    74     E tmp = _cur_seg[--this->_cur_seg_size];
    75     pop_segment();
    76     return tmp;
    77   }
    78   return this->_cur_seg[--this->_cur_seg_size];
    79 }
    81 template <class E, MEMFLAGS F>
    82 void Stack<E, F>::clear(bool clear_cache)
    83 {
    84   free_segments(_cur_seg);
    85   if (clear_cache) free_segments(_cache);
    86   reset(clear_cache);
    87 }
    89 template <class E, MEMFLAGS F>
    90 size_t Stack<E, F>::adjust_segment_size(size_t seg_size)
    91 {
    92   const size_t elem_sz = sizeof(E);
    93   const size_t ptr_sz = sizeof(E*);
    94   assert(elem_sz % ptr_sz == 0 || ptr_sz % elem_sz == 0, "bad element size");
    95   if (elem_sz < ptr_sz) {
    96     return align_size_up(seg_size * elem_sz, ptr_sz) / elem_sz;
    97   }
    98   return seg_size;
    99 }
   101 template <class E, MEMFLAGS F>
   102 size_t Stack<E, F>::link_offset() const
   103 {
   104   return align_size_up(this->_seg_size * sizeof(E), sizeof(E*));
   105 }
   107 template <class E, MEMFLAGS F>
   108 size_t Stack<E, F>::segment_bytes() const
   109 {
   110   return link_offset() + sizeof(E*);
   111 }
   113 template <class E, MEMFLAGS F>
   114 E** Stack<E, F>::link_addr(E* seg) const
   115 {
   116   return (E**) ((char*)seg + link_offset());
   117 }
   119 template <class E, MEMFLAGS F>
   120 E* Stack<E, F>::get_link(E* seg) const
   121 {
   122   return *link_addr(seg);
   123 }
   125 template <class E, MEMFLAGS F>
   126 E* Stack<E, F>::set_link(E* new_seg, E* old_seg)
   127 {
   128   *link_addr(new_seg) = old_seg;
   129   return new_seg;
   130 }
   132 template <class E, MEMFLAGS F>
   133 E* Stack<E, F>::alloc(size_t bytes)
   134 {
   135   return (E*) NEW_C_HEAP_ARRAY(char, bytes, F);
   136 }
   138 template <class E, MEMFLAGS F>
   139 void Stack<E, F>::free(E* addr, size_t bytes)
   140 {
   141   FREE_C_HEAP_ARRAY(char, (char*) addr, F);
   142 }
   144 template <class E, MEMFLAGS F>
   145 void Stack<E, F>::push_segment()
   146 {
   147   assert(this->_cur_seg_size == this->_seg_size, "current segment is not full");
   148   E* next;
   149   if (this->_cache_size > 0) {
   150     // Use a cached segment.
   151     next = _cache;
   152     _cache = get_link(_cache);
   153     --this->_cache_size;
   154   } else {
   155     next = alloc(segment_bytes());
   156     DEBUG_ONLY(zap_segment(next, true);)
   157   }
   158   const bool at_empty_transition = is_empty();
   159   this->_cur_seg = set_link(next, _cur_seg);
   160   this->_cur_seg_size = 0;
   161   this->_full_seg_size += at_empty_transition ? 0 : this->_seg_size;
   162   DEBUG_ONLY(verify(at_empty_transition);)
   163 }
   165 template <class E, MEMFLAGS F>
   166 void Stack<E, F>::pop_segment()
   167 {
   168   assert(this->_cur_seg_size == 0, "current segment is not empty");
   169   E* const prev = get_link(_cur_seg);
   170   if (this->_cache_size < this->_max_cache_size) {
   171     // Add the current segment to the cache.
   172     DEBUG_ONLY(zap_segment(_cur_seg, false);)
   173     _cache = set_link(_cur_seg, _cache);
   174     ++this->_cache_size;
   175   } else {
   176     DEBUG_ONLY(zap_segment(_cur_seg, true);)
   177     free(_cur_seg, segment_bytes());
   178   }
   179   const bool at_empty_transition = prev == NULL;
   180   this->_cur_seg = prev;
   181   this->_cur_seg_size = this->_seg_size;
   182   this->_full_seg_size -= at_empty_transition ? 0 : this->_seg_size;
   183   DEBUG_ONLY(verify(at_empty_transition);)
   184 }
   186 template <class E, MEMFLAGS F>
   187 void Stack<E, F>::free_segments(E* seg)
   188 {
   189   const size_t bytes = segment_bytes();
   190   while (seg != NULL) {
   191     E* const prev = get_link(seg);
   192     free(seg, bytes);
   193     seg = prev;
   194   }
   195 }
   197 template <class E, MEMFLAGS F>
   198 void Stack<E, F>::reset(bool reset_cache)
   199 {
   200   this->_cur_seg_size = this->_seg_size; // So push() will alloc a new segment.
   201   this->_full_seg_size = 0;
   202   _cur_seg = NULL;
   203   if (reset_cache) {
   204     this->_cache_size = 0;
   205     _cache = NULL;
   206   }
   207 }
   209 #ifdef ASSERT
   210 template <class E, MEMFLAGS F>
   211 void Stack<E, F>::verify(bool at_empty_transition) const
   212 {
   213   assert(size() <= this->max_size(), "stack exceeded bounds");
   214   assert(this->cache_size() <= this->max_cache_size(), "cache exceeded bounds");
   215   assert(this->_cur_seg_size <= this->segment_size(), "segment index exceeded bounds");
   217   assert(this->_full_seg_size % this->_seg_size == 0, "not a multiple");
   218   assert(at_empty_transition || is_empty() == (size() == 0), "mismatch");
   219   assert((_cache == NULL) == (this->cache_size() == 0), "mismatch");
   221   if (is_empty()) {
   222     assert(this->_cur_seg_size == this->segment_size(), "sanity");
   223   }
   224 }
   226 template <class E, MEMFLAGS F>
   227 void Stack<E, F>::zap_segment(E* seg, bool zap_link_field) const
   228 {
   229   if (!ZapStackSegments) return;
   230   const size_t zap_bytes = segment_bytes() - (zap_link_field ? 0 : sizeof(E*));
   231   Copy::fill_to_bytes(seg, zap_bytes, badStackSegVal);
   232 }
   233 #endif
   235 template <class E, MEMFLAGS F>
   236 E* ResourceStack<E, F>::alloc(size_t bytes)
   237 {
   238   return (E*) resource_allocate_bytes(bytes);
   239 }
   241 template <class E, MEMFLAGS F>
   242 void ResourceStack<E, F>::free(E* addr, size_t bytes)
   243 {
   244   resource_free_bytes((char*) addr, bytes);
   245 }
   247 template <class E, MEMFLAGS F>
   248 void StackIterator<E, F>::sync()
   249 {
   250   _full_seg_size = _stack._full_seg_size;
   251   _cur_seg_size = _stack._cur_seg_size;
   252   _cur_seg = _stack._cur_seg;
   253 }
   255 template <class E, MEMFLAGS F>
   256 E* StackIterator<E, F>::next_addr()
   257 {
   258   assert(!is_empty(), "no items left");
   259   if (_cur_seg_size == 1) {
   260     E* addr = _cur_seg;
   261     _cur_seg = _stack.get_link(_cur_seg);
   262     _cur_seg_size = _stack.segment_size();
   263     _full_seg_size -= _stack.segment_size();
   264     return addr;
   265   }
   266   return _cur_seg + --_cur_seg_size;
   267 }
   269 #endif // SHARE_VM_UTILITIES_STACK_INLINE_HPP

mercurial