src/share/vm/services/virtualMemoryTracker.cpp

Wed, 31 Jan 2018 19:24:57 -0500

author
dbuck
date
Wed, 31 Jan 2018 19:24:57 -0500
changeset 9289
427b2fb1944f
parent 7267
417e3b8d04c5
child 9485
7a6239517d46
permissions
-rw-r--r--

8189170: Add option to disable stack overflow checking in primordial thread for use with JNI_CreateJavaJVM
Reviewed-by: dcubed

     1 /*
     2  * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    24 #include "precompiled.hpp"
    26 #include "runtime/threadCritical.hpp"
    27 #include "services/virtualMemoryTracker.hpp"
    29 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
    31 void VirtualMemorySummary::initialize() {
    32   assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
    33   // Use placement operator new to initialize static data area.
    34   ::new ((void*)_snapshot) VirtualMemorySnapshot();
    35 }
    37 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
    39 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
    40   return r1.compare(r2);
    41 }
    43 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
    44   return r1.compare(r2);
    45 }
    47 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
    48   assert(addr != NULL, "Invalid address");
    49   assert(size > 0, "Invalid size");
    50   assert(contain_region(addr, size), "Not contain this region");
    52   if (all_committed()) return true;
    54   CommittedMemoryRegion committed_rgn(addr, size, stack);
    55   LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn);
    56   if (node != NULL) {
    57     CommittedMemoryRegion* rgn = node->data();
    58     if (rgn->same_region(addr, size)) {
    59       return true;
    60     }
    62     if (rgn->adjacent_to(addr, size)) {
    63       // check if the next region covers this committed region,
    64       // the regions may not be merged due to different call stacks
    65       LinkedListNode<CommittedMemoryRegion>* next =
    66         node->next();
    67       if (next != NULL && next->data()->contain_region(addr, size)) {
    68         if (next->data()->same_region(addr, size)) {
    69           next->data()->set_call_stack(stack);
    70         }
    71         return true;
    72       }
    73       if (rgn->call_stack()->equals(stack)) {
    74         VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
    75         // the two adjacent regions have the same call stack, merge them
    76         rgn->expand_region(addr, size);
    77         VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
    78         return true;
    79       }
    80       VirtualMemorySummary::record_committed_memory(size, flag());
    81       if (rgn->base() > addr) {
    82         return _committed_regions.insert_before(committed_rgn, node) != NULL;
    83       } else {
    84         return _committed_regions.insert_after(committed_rgn, node) != NULL;
    85       }
    86     }
    87     assert(rgn->contain_region(addr, size), "Must cover this region");
    88     return true;
    89   } else {
    90     // New committed region
    91     VirtualMemorySummary::record_committed_memory(size, flag());
    92     return add_committed_region(committed_rgn);
    93   }
    94 }
    96 void ReservedMemoryRegion::set_all_committed(bool b) {
    97   if (all_committed() != b) {
    98     _all_committed = b;
    99     if (b) {
   100       VirtualMemorySummary::record_committed_memory(size(), flag());
   101     }
   102   }
   103 }
   105 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
   106   address addr, size_t size) {
   107   assert(addr != NULL, "Invalid address");
   108   assert(size > 0, "Invalid size");
   110   CommittedMemoryRegion* rgn = node->data();
   111   assert(rgn->contain_region(addr, size), "Has to be contained");
   112   assert(!rgn->same_region(addr, size), "Can not be the same region");
   114   if (rgn->base() == addr ||
   115       rgn->end() == addr + size) {
   116     rgn->exclude_region(addr, size);
   117     return true;
   118   } else {
   119     // split this region
   120     address top =rgn->end();
   121     // use this region for lower part
   122     size_t exclude_size = rgn->end() - addr;
   123     rgn->exclude_region(addr, exclude_size);
   125     // higher part
   126     address high_base = addr + size;
   127     size_t  high_size = top - high_base;
   129     CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
   130     LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
   131     assert(high_node == NULL || node->next() == high_node, "Should be right after");
   132     return (high_node != NULL);
   133   }
   135   return false;
   136 }
   138 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
   139   // uncommit stack guard pages
   140   if (flag() == mtThreadStack && !same_region(addr, sz)) {
   141     return true;
   142   }
   144   assert(addr != NULL, "Invalid address");
   145   assert(sz > 0, "Invalid size");
   147   if (all_committed()) {
   148     assert(_committed_regions.is_empty(), "Sanity check");
   149     assert(contain_region(addr, sz), "Reserved region does not contain this region");
   150     set_all_committed(false);
   151     VirtualMemorySummary::record_uncommitted_memory(sz, flag());
   152     if (same_region(addr, sz)) {
   153       return true;
   154     } else {
   155       CommittedMemoryRegion rgn(base(), size(), *call_stack());
   156       if (rgn.base() == addr || rgn.end() == (addr + sz)) {
   157         rgn.exclude_region(addr, sz);
   158         return add_committed_region(rgn);
   159       } else {
   160         // split this region
   161         // top of the whole region
   162         address top =rgn.end();
   163         // use this region for lower part
   164         size_t exclude_size = rgn.end() - addr;
   165         rgn.exclude_region(addr, exclude_size);
   166         if (add_committed_region(rgn)) {
   167           // higher part
   168           address high_base = addr + sz;
   169           size_t  high_size = top - high_base;
   170           CommittedMemoryRegion high_rgn(high_base, high_size, NativeCallStack::EMPTY_STACK);
   171           return add_committed_region(high_rgn);
   172         } else {
   173           return false;
   174         }
   175       }
   176     }
   177   } else {
   178     // we have to walk whole list to remove the committed regions in
   179     // specified range
   180     LinkedListNode<CommittedMemoryRegion>* head =
   181       _committed_regions.head();
   182     LinkedListNode<CommittedMemoryRegion>* prev = NULL;
   183     VirtualMemoryRegion uncommitted_rgn(addr, sz);
   185     while (head != NULL && !uncommitted_rgn.is_empty()) {
   186       CommittedMemoryRegion* crgn = head->data();
   187       // this committed region overlaps to region to uncommit
   188       if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
   189         if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
   190           // find matched region, remove the node will do
   191           VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
   192           _committed_regions.remove_after(prev);
   193           return true;
   194         } else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
   195           // this committed region contains whole uncommitted region
   196           VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
   197           return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size());
   198         } else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) {
   199           // this committed region has been uncommitted
   200           size_t exclude_size = crgn->end() - uncommitted_rgn.base();
   201           uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size);
   202           VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
   203           LinkedListNode<CommittedMemoryRegion>* tmp = head;
   204           head = head->next();
   205           _committed_regions.remove_after(prev);
   206           continue;
   207         } else if (crgn->contain_address(uncommitted_rgn.base())) {
   208           size_t toUncommitted = crgn->end() - uncommitted_rgn.base();
   209           crgn->exclude_region(uncommitted_rgn.base(), toUncommitted);
   210           uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted);
   211           VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
   212         } else if (uncommitted_rgn.contain_address(crgn->base())) {
   213           size_t toUncommitted = uncommitted_rgn.end() - crgn->base();
   214           crgn->exclude_region(crgn->base(), toUncommitted);
   215           uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted,
   216             toUncommitted);
   217           VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
   218         }
   219       }
   220       prev = head;
   221       head = head->next();
   222     }
   223   }
   225   return true;
   226 }
   228 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
   229   assert(addr != NULL, "Invalid address");
   231   // split committed regions
   232   LinkedListNode<CommittedMemoryRegion>* head =
   233     _committed_regions.head();
   234   LinkedListNode<CommittedMemoryRegion>* prev = NULL;
   236   while (head != NULL) {
   237     if (head->data()->base() >= addr) {
   238       break;
   239     }
   240     prev = head;
   241     head = head->next();
   242   }
   244   if (head != NULL) {
   245     if (prev != NULL) {
   246       prev->set_next(head->next());
   247     } else {
   248       _committed_regions.set_head(NULL);
   249     }
   250   }
   252   rgn._committed_regions.set_head(head);
   253 }
   255 size_t ReservedMemoryRegion::committed_size() const {
   256   if (all_committed()) {
   257     return size();
   258   } else {
   259     size_t committed = 0;
   260     LinkedListNode<CommittedMemoryRegion>* head =
   261       _committed_regions.head();
   262     while (head != NULL) {
   263       committed += head->data()->size();
   264       head = head->next();
   265     }
   266     return committed;
   267   }
   268 }
   270 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
   271   assert((flag() == mtNone || flag() == f), "Overwrite memory type");
   272   if (flag() != f) {
   273     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
   274     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
   275     _flag = f;
   276   }
   277 }
   279 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
   280   if (level >= NMT_summary) {
   281     VirtualMemorySummary::initialize();
   282   }
   283   return true;
   284 }
   286 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
   287   if (level >= NMT_summary) {
   288     _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
   289       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
   290     return (_reserved_regions != NULL);
   291   }
   292   return true;
   293 }
   295 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
   296    const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
   297   assert(base_addr != NULL, "Invalid address");
   298   assert(size > 0, "Invalid size");
   299   assert(_reserved_regions != NULL, "Sanity check");
   300   ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
   301   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
   302   LinkedListNode<ReservedMemoryRegion>* node;
   303   if (reserved_rgn == NULL) {
   304     VirtualMemorySummary::record_reserved_memory(size, flag);
   305     node = _reserved_regions->add(rgn);
   306     if (node != NULL) {
   307       node->data()->set_all_committed(all_committed);
   308       return true;
   309     } else {
   310       return false;
   311     }
   312   } else {
   313     if (reserved_rgn->same_region(base_addr, size)) {
   314       reserved_rgn->set_call_stack(stack);
   315       reserved_rgn->set_flag(flag);
   316       return true;
   317     } else if (reserved_rgn->adjacent_to(base_addr, size)) {
   318       VirtualMemorySummary::record_reserved_memory(size, flag);
   319       reserved_rgn->expand_region(base_addr, size);
   320       reserved_rgn->set_call_stack(stack);
   321       return true;
   322     } else {
   323       // Overlapped reservation.
   324       // It can happen when the regions are thread stacks, as JNI
   325       // thread does not detach from VM before exits, and leads to
   326       // leak JavaThread object
   327       if (reserved_rgn->flag() == mtThreadStack) {
   328         guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
   329         // Overwrite with new region
   331         // Release old region
   332         VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
   333         VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
   335         // Add new region
   336         VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
   338         *reserved_rgn = rgn;
   339         return true;
   340       }
   342       // CDS mapping region.
   343       // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
   344       // NMT reports CDS as a whole.
   345       if (reserved_rgn->flag() == mtClassShared) {
   346         assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
   347         return true;
   348       }
   350       ShouldNotReachHere();
   351       return false;
   352     }
   353   }
   354 }
   356 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
   357   assert(addr != NULL, "Invalid address");
   358   assert(_reserved_regions != NULL, "Sanity check");
   360   ReservedMemoryRegion   rgn(addr, 1);
   361   ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
   362   if (reserved_rgn != NULL) {
   363     assert(reserved_rgn->contain_address(addr), "Containment");
   364     if (reserved_rgn->flag() != flag) {
   365       assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
   366       reserved_rgn->set_flag(flag);
   367     }
   368   }
   369 }
   371 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
   372   const NativeCallStack& stack) {
   373   assert(addr != NULL, "Invalid address");
   374   assert(size > 0, "Invalid size");
   375   assert(_reserved_regions != NULL, "Sanity check");
   377   ReservedMemoryRegion  rgn(addr, size);
   378   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
   380   assert(reserved_rgn != NULL, "No reserved region");
   381   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
   382   return reserved_rgn->add_committed_region(addr, size, stack);
   383 }
   385 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
   386   assert(addr != NULL, "Invalid address");
   387   assert(size > 0, "Invalid size");
   388   assert(_reserved_regions != NULL, "Sanity check");
   390   ReservedMemoryRegion  rgn(addr, size);
   391   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
   392   assert(reserved_rgn != NULL, "No reserved region");
   393   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
   394   return reserved_rgn->remove_uncommitted_region(addr, size);
   395 }
   397 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
   398   assert(addr != NULL, "Invalid address");
   399   assert(size > 0, "Invalid size");
   400   assert(_reserved_regions != NULL, "Sanity check");
   402   ReservedMemoryRegion  rgn(addr, size);
   403   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
   405   assert(reserved_rgn != NULL, "No reserved region");
   407   // uncommit regions within the released region
   408   if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
   409     return false;
   410   }
   413   VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
   415   if (reserved_rgn->same_region(addr, size)) {
   416     return _reserved_regions->remove(rgn);
   417   } else {
   418     assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
   419     if (reserved_rgn->base() == addr ||
   420         reserved_rgn->end() == addr + size) {
   421         reserved_rgn->exclude_region(addr, size);
   422       return true;
   423     } else {
   424       address top = reserved_rgn->end();
   425       address high_base = addr + size;
   426       ReservedMemoryRegion high_rgn(high_base, top - high_base,
   427         *reserved_rgn->call_stack(), reserved_rgn->flag());
   429       // use original region for lower region
   430       reserved_rgn->exclude_region(addr, top - addr);
   431       LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
   432       if (new_rgn == NULL) {
   433         return false;
   434       } else {
   435         reserved_rgn->move_committed_regions(addr, *new_rgn->data());
   436         return true;
   437       }
   438     }
   439   }
   440 }
   443 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
   444   assert(_reserved_regions != NULL, "Sanity check");
   445   ThreadCritical tc;
   446   // Check that the _reserved_regions haven't been deleted.
   447   if (_reserved_regions != NULL) {
   448     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
   449     while (head != NULL) {
   450       const ReservedMemoryRegion* rgn = head->peek();
   451       if (!walker->do_allocation_site(rgn)) {
   452         return false;
   453       }
   454       head = head->next();
   455     }
   456    }
   457   return true;
   458 }
   460 // Transition virtual memory tracking level.
   461 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
   462   assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
   463   if (to == NMT_minimal) {
   464     assert(from == NMT_summary || from == NMT_detail, "Just check");
   465     // Clean up virtual memory tracking data structures.
   466     ThreadCritical tc;
   467     // Check for potential race with other thread calling transition
   468     if (_reserved_regions != NULL) {
   469       delete _reserved_regions;
   470       _reserved_regions = NULL;
   471     }
   472   }
   474   return true;
   475 }

mercurial