src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp

Fri, 31 May 2013 14:32:44 +0200

author
stefank
date
Fri, 31 May 2013 14:32:44 +0200
changeset 5515
9766f73e770d
parent 4993
746b070f5022
child 6198
55fb97c4c58d
permissions
-rw-r--r--

8022880: False sharing between PSPromotionManager instances
Summary: Pad the PSPromotionManager instances in the manager array.
Reviewed-by: brutisso, jmasa

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
    27 #include "memory/allocation.inline.hpp"
    28 #include "memory/cardTableModRefBS.hpp"
    29 #include "oops/oop.inline.hpp"
    30 #include "runtime/java.hpp"
    31 #include "services/memTracker.hpp"
    33 void ObjectStartArray::initialize(MemRegion reserved_region) {
    34   // We're based on the assumption that we use the same
    35   // size blocks as the card table.
    36   assert((int)block_size == (int)CardTableModRefBS::card_size, "Sanity");
    37   assert((int)block_size <= 512, "block_size must be less than or equal to 512");
    39   // Calculate how much space must be reserved
    40   _reserved_region = reserved_region;
    42   size_t bytes_to_reserve = reserved_region.word_size() / block_size_in_words;
    43   assert(bytes_to_reserve > 0, "Sanity");
    45   bytes_to_reserve =
    46     align_size_up(bytes_to_reserve, os::vm_allocation_granularity());
    48   // Do not use large-pages for the backing store. The one large page region
    49   // will be used for the heap proper.
    50   ReservedSpace backing_store(bytes_to_reserve);
    51   if (!backing_store.is_reserved()) {
    52     vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
    53   }
    54   MemTracker::record_virtual_memory_type((address)backing_store.base(), mtGC);
    56   // We do not commit any memory initially
    57   if (!_virtual_space.initialize(backing_store, 0)) {
    58     vm_exit_during_initialization("Could not commit space for ObjectStartArray");
    59   }
    61   _raw_base = (jbyte*)_virtual_space.low_boundary();
    63   if (_raw_base == NULL) {
    64     vm_exit_during_initialization("Could not get raw_base address");
    65   }
    67   MemTracker::record_virtual_memory_type((address)_raw_base, mtGC);
    70   _offset_base = _raw_base - (size_t(reserved_region.start()) >> block_shift);
    72   _covered_region.set_start(reserved_region.start());
    73   _covered_region.set_word_size(0);
    75   _blocks_region.set_start((HeapWord*)_raw_base);
    76   _blocks_region.set_word_size(0);
    77 }
    79 void ObjectStartArray::set_covered_region(MemRegion mr) {
    80   assert(_reserved_region.contains(mr), "MemRegion outside of reserved space");
    81   assert(_reserved_region.start() == mr.start(), "Attempt to move covered region");
    83   HeapWord* low_bound  = mr.start();
    84   HeapWord* high_bound = mr.end();
    85   assert((uintptr_t(low_bound)  & (block_size - 1))  == 0, "heap must start at block boundary");
    86   assert((uintptr_t(high_bound) & (block_size - 1))  == 0, "heap must end at block boundary");
    88   size_t requested_blocks_size_in_bytes = mr.word_size() / block_size_in_words;
    90   // Only commit memory in page sized chunks
    91   requested_blocks_size_in_bytes =
    92     align_size_up(requested_blocks_size_in_bytes, os::vm_page_size());
    94   _covered_region = mr;
    96   size_t current_blocks_size_in_bytes = _blocks_region.byte_size();
    98   if (requested_blocks_size_in_bytes > current_blocks_size_in_bytes) {
    99     // Expand
   100     size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes;
   101     if (!_virtual_space.expand_by(expand_by)) {
   102       vm_exit_out_of_memory(expand_by, OOM_MMAP_ERROR, "object start array expansion");
   103     }
   104     // Clear *only* the newly allocated region
   105     memset(_blocks_region.end(), clean_block, expand_by);
   106   }
   108   if (requested_blocks_size_in_bytes < current_blocks_size_in_bytes) {
   109     // Shrink
   110     size_t shrink_by = current_blocks_size_in_bytes - requested_blocks_size_in_bytes;
   111     _virtual_space.shrink_by(shrink_by);
   112   }
   114   _blocks_region.set_word_size(requested_blocks_size_in_bytes / sizeof(HeapWord));
   116   assert(requested_blocks_size_in_bytes % sizeof(HeapWord) == 0, "Block table not expanded in word sized increment");
   117   assert(requested_blocks_size_in_bytes == _blocks_region.byte_size(), "Sanity");
   118   assert(block_for_addr(low_bound) == &_raw_base[0], "Checking start of map");
   119   assert(block_for_addr(high_bound-1) <= &_raw_base[_blocks_region.byte_size()-1], "Checking end of map");
   120 }
   122 void ObjectStartArray::reset() {
   123   memset(_blocks_region.start(), clean_block, _blocks_region.byte_size());
   124 }
   127 bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr,
   128                                               HeapWord* end_addr) const {
   129   assert(start_addr <= end_addr, "range is wrong");
   130   if (start_addr > end_addr) {
   131     return false;
   132   }
   134   jbyte* start_block = block_for_addr(start_addr);
   135   jbyte* end_block = block_for_addr(end_addr);
   137   for (jbyte* block = start_block; block <= end_block; block++) {
   138     if (*block != clean_block) {
   139       return true;
   140     }
   141   }
   142   // No object starts in this slice; verify this using
   143   // more traditional methods:  Note that no object can
   144   // start before the start_addr.
   145   assert(end_addr == start_addr ||
   146          object_start(end_addr - 1) <= start_addr,
   147          "Oops an object does start in this slice?");
   148   return false;
   149 }

mercurial