src/share/vm/code/codeCache.hpp

Mon, 10 Jun 2013 11:30:51 +0200

author
sla
date
Mon, 10 Jun 2013 11:30:51 +0200
changeset 5237
f2110083203d
parent 5151
91eba9f82325
child 5792
510fbd28919c
permissions
-rw-r--r--

8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_CODE_CODECACHE_HPP
    26 #define SHARE_VM_CODE_CODECACHE_HPP
    28 #include "code/codeBlob.hpp"
    29 #include "memory/allocation.hpp"
    30 #include "memory/heap.hpp"
    31 #include "oops/instanceKlass.hpp"
    32 #include "oops/oopsHierarchy.hpp"
    34 // The CodeCache implements the code cache for various pieces of generated
    35 // code, e.g., compiled java methods, runtime stubs, transition frames, etc.
    36 // The entries in the CodeCache are all CodeBlob's.
    38 // Implementation:
    39 //   - Each CodeBlob occupies one chunk of memory.
    40 //   - Like the offset table in oldspace the zone has at table for
    41 //     locating a method given a addess of an instruction.
    43 class OopClosure;
    44 class DepChange;
    46 class CodeCache : AllStatic {
    47   friend class VMStructs;
    48  private:
    49   // CodeHeap is malloc()'ed at startup and never deleted during shutdown,
    50   // so that the generated assembly code is always there when it's needed.
    51   // This may cause memory leak, but is necessary, for now. See 4423824,
    52   // 4422213 or 4436291 for details.
    53   static CodeHeap * _heap;
    54   static int _number_of_blobs;
    55   static int _number_of_adapters;
    56   static int _number_of_nmethods;
    57   static int _number_of_nmethods_with_dependencies;
    58   static bool _needs_cache_clean;
    59   static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
    60   static nmethod* _saved_nmethods;          // Linked list of speculatively disconnected nmethods.
    62   static void verify_if_often() PRODUCT_RETURN;
    64   static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
    65   static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
    67   static int _codemem_full_count;
    69  public:
    71   // Initialization
    72   static void initialize();
    74   static void report_codemem_full();
    76   // Allocation/administration
    77   static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
    78   static void commit(CodeBlob* cb);                 // called when the allocated CodeBlob has been filled
    79   static int alignment_unit();                      // guaranteed alignment of all CodeBlobs
    80   static int alignment_offset();                    // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
    81   static void free(CodeBlob* cb);                   // frees a CodeBlob
    82   static void flush();                              // flushes all CodeBlobs
    83   static bool contains(void *p);                    // returns whether p is included
    84   static void blobs_do(void f(CodeBlob* cb));       // iterates over all CodeBlobs
    85   static void blobs_do(CodeBlobClosure* f);         // iterates over all CodeBlobs
    86   static void nmethods_do(void f(nmethod* nm));     // iterates over all nmethods
    87   static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
    89   // Lookup
    90   static CodeBlob* find_blob(void* start);
    91   static nmethod*  find_nmethod(void* start);
    93   // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
    94   // what you are doing)
    95   static CodeBlob* find_blob_unsafe(void* start) {
    96     // NMT can walk the stack before code cache is created
    97     if (_heap == NULL) return NULL;
    99     CodeBlob* result = (CodeBlob*)_heap->find_start(start);
   100     // this assert is too strong because the heap code will return the
   101     // heapblock containing start. That block can often be larger than
   102     // the codeBlob itself. If you look up an address that is within
   103     // the heapblock but not in the codeBlob you will assert.
   104     //
   105     // Most things will not lookup such bad addresses. However
   106     // AsyncGetCallTrace can see intermediate frames and get that kind
   107     // of invalid address and so can a developer using hsfind.
   108     //
   109     // The more correct answer is to return NULL if blob_contains() returns
   110     // false.
   111     // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
   113     if (result != NULL && !result->blob_contains((address)start)) {
   114       result = NULL;
   115     }
   116     return result;
   117   }
   119   // Iteration
   120   static CodeBlob* first();
   121   static CodeBlob* next (CodeBlob* cb);
   122   static CodeBlob* alive(CodeBlob *cb);
   123   static nmethod* alive_nmethod(CodeBlob *cb);
   124   static nmethod* first_nmethod();
   125   static nmethod* next_nmethod (CodeBlob* cb);
   126   static int       nof_blobs()                 { return _number_of_blobs; }
   127   static int       nof_adapters()              { return _number_of_adapters; }
   128   static int       nof_nmethods()              { return _number_of_nmethods; }
   130   // GC support
   131   static void gc_epilogue();
   132   static void gc_prologue();
   133   static void verify_oops();
   134   // If "unloading_occurred" is true, then unloads (i.e., breaks root links
   135   // to) any unmarked codeBlobs in the cache.  Sets "marked_for_unloading"
   136   // to "true" iff some code got unloaded.
   137   static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
   138   static void oops_do(OopClosure* f) {
   139     CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
   140     blobs_do(&oopc);
   141   }
   142   static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
   143   static void scavenge_root_nmethods_do(CodeBlobClosure* f);
   145   static nmethod* scavenge_root_nmethods()          { return _scavenge_root_nmethods; }
   146   static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
   147   static void add_scavenge_root_nmethod(nmethod* nm);
   148   static void drop_scavenge_root_nmethod(nmethod* nm);
   149   static void prune_scavenge_root_nmethods();
   151   // Printing/debugging
   152   static void print();                           // prints summary
   153   static void print_internals();
   154   static void verify();                          // verifies the code cache
   155   static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
   156   static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
   157   static void log_state(outputStream* st);
   159   // The full limits of the codeCache
   160   static address  low_bound()                    { return (address) _heap->low_boundary(); }
   161   static address  high_bound()                   { return (address) _heap->high_boundary(); }
   162   static address  high()                         { return (address) _heap->high(); }
   164   // Profiling
   165   static address first_address();                // first address used for CodeBlobs
   166   static address last_address();                 // last  address used for CodeBlobs
   167   static size_t  capacity()                      { return _heap->capacity(); }
   168   static size_t  max_capacity()                  { return _heap->max_capacity(); }
   169   static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
   170   static bool    needs_flushing()                { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
   171   static double  reverse_free_ratio();
   173   static bool needs_cache_clean()                { return _needs_cache_clean; }
   174   static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
   175   static void clear_inline_caches();             // clear all inline caches
   177   static nmethod* reanimate_saved_code(Method* m);
   178   static void remove_saved_code(nmethod* nm);
   179   static void speculatively_disconnect(nmethod* nm);
   181   // Deoptimization
   182   static int  mark_for_deoptimization(DepChange& changes);
   183 #ifdef HOTSWAP
   184   static int  mark_for_evol_deoptimization(instanceKlassHandle dependee);
   185 #endif // HOTSWAP
   187   static void mark_all_nmethods_for_deoptimization();
   188   static int  mark_for_deoptimization(Method* dependee);
   189   static void make_marked_nmethods_zombies();
   190   static void make_marked_nmethods_not_entrant();
   192     // tells how many nmethods have dependencies
   193   static int number_of_nmethods_with_dependencies();
   195   static int get_codemem_full_count() { return _codemem_full_count; }
   196 };
   198 #endif // SHARE_VM_CODE_CODECACHE_HPP

mercurial