src/share/vm/memory/resourceArea.hpp

Wed, 27 Aug 2014 08:19:12 -0400

author
zgu
date
Wed, 27 Aug 2014 08:19:12 -0400
changeset 7074
833b0f92429a
parent 5409
dbc0b5dc08f5
child 7535
7ae4e26cb1e0
child 9055
e4e58811ed1b
permissions
-rw-r--r--

8046598: Scalable Native memory tracking development
Summary: Enhance scalability of native memory tracking
Reviewed-by: coleenp, ctornqvi, gtriantafill

     1 /*
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_MEMORY_RESOURCEAREA_HPP
    26 #define SHARE_VM_MEMORY_RESOURCEAREA_HPP
    28 #include "memory/allocation.hpp"
    29 #include "runtime/thread.inline.hpp"
    31 // The resource area holds temporary data structures in the VM.
    32 // The actual allocation areas are thread local. Typical usage:
    33 //
    34 //   ...
    35 //   {
    36 //     ResourceMark rm;
    37 //     int foo[] = NEW_RESOURCE_ARRAY(int, 64);
    38 //     ...
    39 //   }
    40 //   ...
    42 //------------------------------ResourceArea-----------------------------------
    43 // A ResourceArea is an Arena that supports safe usage of ResourceMark.
    44 class ResourceArea: public Arena {
    45   friend class ResourceMark;
    46   friend class DeoptResourceMark;
    47   friend class VMStructs;
    48   debug_only(int _nesting;)             // current # of nested ResourceMarks
    49   debug_only(static int _warned;)       // to suppress multiple warnings
    51 public:
    52   ResourceArea() : Arena(mtThread) {
    53     debug_only(_nesting = 0;)
    54   }
    56   ResourceArea(size_t init_size) : Arena(mtThread, init_size) {
    57     debug_only(_nesting = 0;);
    58   }
    60   char* allocate_bytes(size_t size, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
    61 #ifdef ASSERT
    62     if (_nesting < 1 && !_warned++)
    63       fatal("memory leak: allocating without ResourceMark");
    64     if (UseMallocOnly) {
    65       // use malloc, but save pointer in res. area for later freeing
    66       char** save = (char**)internal_malloc_4(sizeof(char*));
    67       return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC));
    68     }
    69 #endif
    70     return (char*)Amalloc(size, alloc_failmode);
    71   }
    73   debug_only(int nesting() const { return _nesting; });
    74 };
    77 //------------------------------ResourceMark-----------------------------------
    78 // A resource mark releases all resources allocated after it was constructed
    79 // when the destructor is called.  Typically used as a local variable.
    80 class ResourceMark: public StackObj {
    81 protected:
    82   ResourceArea *_area;          // Resource area to stack allocate
    83   Chunk *_chunk;                // saved arena chunk
    84   char *_hwm, *_max;
    85   size_t _size_in_bytes;
    86 #ifdef ASSERT
    87   Thread* _thread;
    88   ResourceMark* _previous_resource_mark;
    89 #endif //ASSERT
    91   void initialize(Thread *thread) {
    92     _area = thread->resource_area();
    93     _chunk = _area->_chunk;
    94     _hwm = _area->_hwm;
    95     _max= _area->_max;
    96     _size_in_bytes = _area->size_in_bytes();
    97     debug_only(_area->_nesting++;)
    98     assert( _area->_nesting > 0, "must stack allocate RMs" );
    99 #ifdef ASSERT
   100     _thread = thread;
   101     _previous_resource_mark = thread->current_resource_mark();
   102     thread->set_current_resource_mark(this);
   103 #endif // ASSERT
   104   }
   105  public:
   107 #ifndef ASSERT
   108   ResourceMark(Thread *thread) {
   109     assert(thread == Thread::current(), "not the current thread");
   110     initialize(thread);
   111   }
   112 #else
   113   ResourceMark(Thread *thread);
   114 #endif // ASSERT
   116   ResourceMark()               { initialize(Thread::current()); }
   118   ResourceMark( ResourceArea *r ) :
   119     _area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) {
   120     _size_in_bytes = r->_size_in_bytes;
   121     debug_only(_area->_nesting++;)
   122     assert( _area->_nesting > 0, "must stack allocate RMs" );
   123 #ifdef ASSERT
   124     Thread* thread = ThreadLocalStorage::thread();
   125     if (thread != NULL) {
   126       _thread = thread;
   127       _previous_resource_mark = thread->current_resource_mark();
   128       thread->set_current_resource_mark(this);
   129     } else {
   130       _thread = NULL;
   131       _previous_resource_mark = NULL;
   132     }
   133 #endif // ASSERT
   134   }
   136   void reset_to_mark() {
   137     if (UseMallocOnly) free_malloced_objects();
   139     if( _chunk->next() ) {       // Delete later chunks
   140       // reset arena size before delete chunks. Otherwise, the total
   141       // arena size could exceed total chunk size
   142       assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check");
   143       _area->set_size_in_bytes(size_in_bytes());
   144       _chunk->next_chop();
   145     } else {
   146       assert(_area->size_in_bytes() == size_in_bytes(), "Sanity check");
   147     }
   148     _area->_chunk = _chunk;     // Roll back arena to saved chunk
   149     _area->_hwm = _hwm;
   150     _area->_max = _max;
   152     // clear out this chunk (to detect allocation bugs)
   153     if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm);
   154   }
   156   ~ResourceMark() {
   157     assert( _area->_nesting > 0, "must stack allocate RMs" );
   158     debug_only(_area->_nesting--;)
   159     reset_to_mark();
   160 #ifdef ASSERT
   161     if (_thread != NULL) {
   162       _thread->set_current_resource_mark(_previous_resource_mark);
   163     }
   164 #endif // ASSERT
   165   }
   168  private:
   169   void free_malloced_objects()                                         PRODUCT_RETURN;
   170   size_t size_in_bytes() { return _size_in_bytes; }
   171 };
   173 //------------------------------DeoptResourceMark-----------------------------------
   174 // A deopt resource mark releases all resources allocated after it was constructed
   175 // when the destructor is called.  Typically used as a local variable. It differs
   176 // from a typical resource more in that it is C-Heap allocated so that deoptimization
   177 // can use data structures that are arena based but are not amenable to vanilla
   178 // ResourceMarks because deoptimization can not use a stack allocated mark. During
   179 // deoptimization we go thru the following steps:
   180 //
   181 // 0: start in assembly stub and call either uncommon_trap/fetch_unroll_info
   182 // 1: create the vframeArray (contains pointers to Resource allocated structures)
   183 //   This allocates the DeoptResourceMark.
   184 // 2: return to assembly stub and remove stub frame and deoptee frame and create
   185 //    the new skeletal frames.
   186 // 3: push new stub frame and call unpack_frames
   187 // 4: retrieve information from the vframeArray to populate the skeletal frames
   188 // 5: release the DeoptResourceMark
   189 // 6: return to stub and eventually to interpreter
   190 //
   191 // With old style eager deoptimization the vframeArray was created by the vmThread there
   192 // was no way for the vframeArray to contain resource allocated objects and so
   193 // a complex set of data structures to simulate an array of vframes in CHeap memory
   194 // was used. With new style lazy deoptimization the vframeArray is created in the
   195 // the thread that will use it and we can use a much simpler scheme for the vframeArray
   196 // leveraging existing data structures if we simply create a way to manage this one
   197 // special need for a ResourceMark. If ResourceMark simply inherited from CHeapObj
   198 // then existing ResourceMarks would work fine since no one use new to allocate them
   199 // and they would be stack allocated. This leaves open the possibilty of accidental
   200 // misuse so we simple duplicate the ResourceMark functionality here.
   202 class DeoptResourceMark: public CHeapObj<mtInternal> {
   203 protected:
   204   ResourceArea *_area;          // Resource area to stack allocate
   205   Chunk *_chunk;                // saved arena chunk
   206   char *_hwm, *_max;
   207   size_t _size_in_bytes;
   209   void initialize(Thread *thread) {
   210     _area = thread->resource_area();
   211     _chunk = _area->_chunk;
   212     _hwm = _area->_hwm;
   213     _max= _area->_max;
   214     _size_in_bytes = _area->size_in_bytes();
   215     debug_only(_area->_nesting++;)
   216     assert( _area->_nesting > 0, "must stack allocate RMs" );
   217   }
   219  public:
   221 #ifndef ASSERT
   222   DeoptResourceMark(Thread *thread) {
   223     assert(thread == Thread::current(), "not the current thread");
   224     initialize(thread);
   225   }
   226 #else
   227   DeoptResourceMark(Thread *thread);
   228 #endif // ASSERT
   230   DeoptResourceMark()               { initialize(Thread::current()); }
   232   DeoptResourceMark( ResourceArea *r ) :
   233     _area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) {
   234     _size_in_bytes = _area->size_in_bytes();
   235     debug_only(_area->_nesting++;)
   236     assert( _area->_nesting > 0, "must stack allocate RMs" );
   237   }
   239   void reset_to_mark() {
   240     if (UseMallocOnly) free_malloced_objects();
   242     if( _chunk->next() ) {        // Delete later chunks
   243       // reset arena size before delete chunks. Otherwise, the total
   244       // arena size could exceed total chunk size
   245       assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check");
   246       _area->set_size_in_bytes(size_in_bytes());
   247       _chunk->next_chop();
   248     } else {
   249       assert(_area->size_in_bytes() == size_in_bytes(), "Sanity check");
   250     }
   251     _area->_chunk = _chunk;     // Roll back arena to saved chunk
   252     _area->_hwm = _hwm;
   253     _area->_max = _max;
   255     // clear out this chunk (to detect allocation bugs)
   256     if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm);
   257   }
   259   ~DeoptResourceMark() {
   260     assert( _area->_nesting > 0, "must stack allocate RMs" );
   261     debug_only(_area->_nesting--;)
   262     reset_to_mark();
   263   }
   266  private:
   267   void free_malloced_objects()                                         PRODUCT_RETURN;
   268   size_t size_in_bytes() { return _size_in_bytes; };
   269 };
   271 #endif // SHARE_VM_MEMORY_RESOURCEAREA_HPP

mercurial