src/share/vm/services/virtualMemoryTracker.cpp

Thu, 14 Aug 2014 09:02:51 -0400

author
zgu
date
Thu, 14 Aug 2014 09:02:51 -0400
changeset 7077
36c9011aaead
parent 7074
833b0f92429a
child 7078
c6211b707068
permissions
-rw-r--r--

8054368: nsk/jdi/VirtualMachine/exit/exit002 crash with detail tracking on (NMT2)
Summary: Dynamic allocate _reserved_regions instead of static object to avoid racing during process exit
Reviewed-by: dholmes, coleenp

     1 /*
     2  * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    24 #include "precompiled.hpp"
    26 #include "runtime/threadCritical.hpp"
    27 #include "services/virtualMemoryTracker.hpp"
    29 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
    31 void VirtualMemorySummary::initialize() {
    32   assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
    33   // Use placement operator new to initialize static data area.
    34   ::new ((void*)_snapshot) VirtualMemorySnapshot();
    35 }
    37 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
    39 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
    40   return r1.compare(r2);
    41 }
    43 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
    44   return r1.compare(r2);
    45 }
    47 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
    48   assert(addr != NULL, "Invalid address");
    49   assert(size > 0, "Invalid size");
    50   assert(contain_region(addr, size), "Not contain this region");
    52   if (all_committed()) return true;
    54   CommittedMemoryRegion committed_rgn(addr, size, stack);
    55   LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn);
    56   if (node != NULL) {
    57     CommittedMemoryRegion* rgn = node->data();
    58     if (rgn->same_region(addr, size)) {
    59       return true;
    60     }
    62     if (rgn->adjacent_to(addr, size)) {
    63       // check if the next region covers this committed region,
    64       // the regions may not be merged due to different call stacks
    65       LinkedListNode<CommittedMemoryRegion>* next =
    66         node->next();
    67       if (next != NULL && next->data()->contain_region(addr, size)) {
    68         if (next->data()->same_region(addr, size)) {
    69           next->data()->set_call_stack(stack);
    70         }
    71         return true;
    72       }
    73       if (rgn->call_stack()->equals(stack)) {
    74         VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
    75         // the two adjacent regions have the same call stack, merge them
    76         rgn->expand_region(addr, size);
    77         VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
    78         return true;
    79       }
    80       VirtualMemorySummary::record_committed_memory(size, flag());
    81       if (rgn->base() > addr) {
    82         return _committed_regions.insert_before(committed_rgn, node) != NULL;
    83       } else {
    84         return _committed_regions.insert_after(committed_rgn, node) != NULL;
    85       }
    86     }
    87     assert(rgn->contain_region(addr, size), "Must cover this region");
    88     return true;
    89   } else {
    90     // New committed region
    91     VirtualMemorySummary::record_committed_memory(size, flag());
    92     return add_committed_region(committed_rgn);
    93   }
    94 }
    96 void ReservedMemoryRegion::set_all_committed(bool b) {
    97   if (all_committed() != b) {
    98     _all_committed = b;
    99     if (b) {
   100       VirtualMemorySummary::record_committed_memory(size(), flag());
   101     }
   102   }
   103 }
   105 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
   106   address addr, size_t size) {
   107   assert(addr != NULL, "Invalid address");
   108   assert(size > 0, "Invalid size");
   110   CommittedMemoryRegion* rgn = node->data();
   111   assert(rgn->contain_region(addr, size), "Has to be contained");
   112   assert(!rgn->same_region(addr, size), "Can not be the same region");
   114   if (rgn->base() == addr ||
   115       rgn->end() == addr + size) {
   116     rgn->exclude_region(addr, size);
   117     return true;
   118   } else {
   119     // split this region
   120     address top =rgn->end();
   121     // use this region for lower part
   122     size_t exclude_size = rgn->end() - addr;
   123     rgn->exclude_region(addr, exclude_size);
   125     // higher part
   126     address high_base = addr + size;
   127     size_t  high_size = top - high_base;
   129     CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
   130     LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
   131     assert(high_node == NULL || node->next() == high_node, "Should be right after");
   132     return (high_node != NULL);
   133   }
   135   return false;
   136 }
   138 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
   139   // uncommit stack guard pages
   140   if (flag() == mtThreadStack && !same_region(addr, sz)) {
   141     return true;
   142   }
   144   assert(addr != NULL, "Invalid address");
   145   assert(sz > 0, "Invalid size");
   147   if (all_committed()) {
   148     assert(_committed_regions.is_empty(), "Sanity check");
   149     assert(contain_region(addr, sz), "Reserved region does not contain this region");
   150     set_all_committed(false);
   151     VirtualMemorySummary::record_uncommitted_memory(sz, flag());
   152     if (same_region(addr, sz)) {
   153       return true;
   154     } else {
   155       CommittedMemoryRegion rgn(base(), size(), *call_stack());
   156       if (rgn.base() == addr || rgn.end() == (addr + sz)) {
   157         rgn.exclude_region(addr, sz);
   158         return add_committed_region(rgn);
   159       } else {
   160         // split this region
   161         // top of the whole region
   162         address top =rgn.end();
   163         // use this region for lower part
   164         size_t exclude_size = rgn.end() - addr;
   165         rgn.exclude_region(addr, exclude_size);
   166         if (add_committed_region(rgn)) {
   167           // higher part
   168           address high_base = addr + sz;
   169           size_t  high_size = top - high_base;
   170           CommittedMemoryRegion high_rgn(high_base, high_size, emptyStack);
   171           return add_committed_region(high_rgn);
   172         } else {
   173           return false;
   174         }
   175       }
   176     }
   177   } else {
   178     // we have to walk whole list to remove the committed regions in
   179     // specified range
   180     LinkedListNode<CommittedMemoryRegion>* head =
   181       _committed_regions.head();
   182     LinkedListNode<CommittedMemoryRegion>* prev = NULL;
   183     VirtualMemoryRegion uncommitted_rgn(addr, sz);
   185     while (head != NULL && !uncommitted_rgn.is_empty()) {
   186       CommittedMemoryRegion* crgn = head->data();
   187       // this committed region overlaps to region to uncommit
   188       if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
   189         if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
   190           // find matched region, remove the node will do
   191           VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
   192           _committed_regions.remove_after(prev);
   193           return true;
   194         } else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
   195           // this committed region contains whole uncommitted region
   196           VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
   197           return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size());
   198         } else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) {
   199           // this committed region has been uncommitted
   200           size_t exclude_size = crgn->end() - uncommitted_rgn.base();
   201           uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size);
   202           VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
   203           LinkedListNode<CommittedMemoryRegion>* tmp = head;
   204           head = head->next();
   205           _committed_regions.remove_after(prev);
   206           continue;
   207         } else if (crgn->contain_address(uncommitted_rgn.base())) {
   208           size_t toUncommitted = crgn->end() - uncommitted_rgn.base();
   209           crgn->exclude_region(uncommitted_rgn.base(), toUncommitted);
   210           uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted);
   211           VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
   212         } else if (uncommitted_rgn.contain_address(crgn->base())) {
   213           size_t toUncommitted = uncommitted_rgn.end() - crgn->base();
   214           crgn->exclude_region(crgn->base(), toUncommitted);
   215           uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted,
   216             toUncommitted);
   217           VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
   218         }
   219       }
   220       prev = head;
   221       head = head->next();
   222     }
   223   }
   225   return true;
   226 }
   228 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
   229   assert(addr != NULL, "Invalid address");
   231   // split committed regions
   232   LinkedListNode<CommittedMemoryRegion>* head =
   233     _committed_regions.head();
   234   LinkedListNode<CommittedMemoryRegion>* prev = NULL;
   236   while (head != NULL) {
   237     if (head->data()->base() >= addr) {
   238       break;
   239     }
   240     prev = head;
   241     head = head->next();
   242   }
   244   if (head != NULL) {
   245     if (prev != NULL) {
   246       prev->set_next(head->next());
   247     } else {
   248       _committed_regions.set_head(NULL);
   249     }
   250   }
   252   rgn._committed_regions.set_head(head);
   253 }
   255 size_t ReservedMemoryRegion::committed_size() const {
   256   if (all_committed()) {
   257     return size();
   258   } else {
   259     size_t committed = 0;
   260     LinkedListNode<CommittedMemoryRegion>* head =
   261       _committed_regions.head();
   262     while (head != NULL) {
   263       committed += head->data()->size();
   264       head = head->next();
   265     }
   266     return committed;
   267   }
   268 }
   270 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
   271   assert((flag() == mtNone || flag() == f), "Overwrite memory type");
   272   if (flag() != f) {
   273     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
   274     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
   275     _flag = f;
   276   }
   277 }
   279 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
   280   if (level >= NMT_summary) {
   281     VirtualMemorySummary::initialize();
   282   }
   283   return true;
   284 }
   286 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
   287   if (level >= NMT_summary) {
   288     _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
   289       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
   290     return (_reserved_regions != NULL);
   291   }
   292   return true;
   293 }
   295 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
   296    const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
   297   assert(base_addr != NULL, "Invalid address");
   298   assert(size > 0, "Invalid size");
   299   assert(_reserved_regions != NULL, "Sanity check");
   300   ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
   301   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
   302   LinkedListNode<ReservedMemoryRegion>* node;
   303   if (reserved_rgn == NULL) {
   304     VirtualMemorySummary::record_reserved_memory(size, flag);
   305     node = _reserved_regions->add(rgn);
   306     if (node != NULL) {
   307       node->data()->set_all_committed(all_committed);
   308       return true;
   309     } else {
   310       return false;
   311     }
   312   } else {
   313     if (reserved_rgn->same_region(base_addr, size)) {
   314       reserved_rgn->set_call_stack(stack);
   315       reserved_rgn->set_flag(flag);
   316       return true;
   317     } else if (reserved_rgn->adjacent_to(base_addr, size)) {
   318       VirtualMemorySummary::record_reserved_memory(size, flag);
   319       reserved_rgn->expand_region(base_addr, size);
   320       reserved_rgn->set_call_stack(stack);
   321       return true;
   322     } else {
   323       // Overlapped reservation.
   324       // It can happen when the regions are thread stacks, as JNI
   325       // thread does not detach from VM before exits, and leads to
   326       // leak JavaThread object
   327       if (reserved_rgn->flag() == mtThreadStack) {
   328         guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
   329         // Overwrite with new region
   331         // Release old region
   332         VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
   333         VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
   335         // Add new region
   336         VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
   338         *reserved_rgn = rgn;
   339         return true;
   340       } else {
   341         ShouldNotReachHere();
   342         return false;
   343       }
   344     }
   345   }
   346 }
   348 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
   349   assert(addr != NULL, "Invalid address");
   350   assert(_reserved_regions != NULL, "Sanity check");
   352   ReservedMemoryRegion   rgn(addr, 1);
   353   ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
   354   if (reserved_rgn != NULL) {
   355     assert(reserved_rgn->contain_address(addr), "Containment");
   356     if (reserved_rgn->flag() != flag) {
   357       assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
   358       reserved_rgn->set_flag(flag);
   359     }
   360   }
   361 }
   363 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
   364   const NativeCallStack& stack) {
   365   assert(addr != NULL, "Invalid address");
   366   assert(size > 0, "Invalid size");
   367   assert(_reserved_regions != NULL, "Sanity check");
   369   ReservedMemoryRegion  rgn(addr, size);
   370   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
   372   assert(reserved_rgn != NULL, "No reserved region");
   373   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
   374   return reserved_rgn->add_committed_region(addr, size, stack);
   375 }
   377 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
   378   assert(addr != NULL, "Invalid address");
   379   assert(size > 0, "Invalid size");
   380   assert(_reserved_regions != NULL, "Sanity check");
   382   ReservedMemoryRegion  rgn(addr, size);
   383   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
   384   assert(reserved_rgn != NULL, "No reserved region");
   385   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
   386   return reserved_rgn->remove_uncommitted_region(addr, size);
   387 }
   389 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
   390   assert(addr != NULL, "Invalid address");
   391   assert(size > 0, "Invalid size");
   392   assert(_reserved_regions != NULL, "Sanity check");
   394   ReservedMemoryRegion  rgn(addr, size);
   395   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
   397   assert(reserved_rgn != NULL, "No reserved region");
   399   // uncommit regions within the released region
   400   if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
   401     return false;
   402   }
   405   VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
   407   if (reserved_rgn->same_region(addr, size)) {
   408     return _reserved_regions->remove(rgn);
   409   } else {
   410     assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
   411     if (reserved_rgn->base() == addr ||
   412         reserved_rgn->end() == addr + size) {
   413         reserved_rgn->exclude_region(addr, size);
   414       return true;
   415     } else {
   416       address top = reserved_rgn->end();
   417       address high_base = addr + size;
   418       ReservedMemoryRegion high_rgn(high_base, top - high_base,
   419         *reserved_rgn->call_stack(), reserved_rgn->flag());
   421       // use original region for lower region
   422       reserved_rgn->exclude_region(addr, top - addr);
   423       LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
   424       if (new_rgn == NULL) {
   425         return false;
   426       } else {
   427         reserved_rgn->move_committed_regions(addr, *new_rgn->data());
   428         return true;
   429       }
   430     }
   431   }
   432 }
   435 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
   436   assert(_reserved_regions != NULL, "Sanity check");
   437   ThreadCritical tc;
   438   LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
   439   while (head != NULL) {
   440     const ReservedMemoryRegion* rgn = head->peek();
   441     if (!walker->do_allocation_site(rgn)) {
   442       return false;
   443     }
   444     head = head->next();
   445   }
   446   return true;
   447 }
   449 // Transition virtual memory tracking level.
   450 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
   451   if (from == NMT_minimal) {
   452     assert(to == NMT_summary || to == NMT_detail, "Just check");
   453     VirtualMemorySummary::reset();
   454   } else if (to == NMT_minimal) {
   455     assert(from == NMT_summary || from == NMT_detail, "Just check");
   456     // Clean up virtual memory tracking data structures.
   457     ThreadCritical tc;
   458     if (_reserved_regions != NULL) {
   459       delete _reserved_regions;
   460       _reserved_regions = NULL;
   461     }
   462   }
   464   return true;
   465 }

mercurial