duke@435: /* brutisso@3711: * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLESPACE_HPP stefank@2314: #define SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLESPACE_HPP stefank@2314: stefank@2314: #include "gc_implementation/shared/immutableSpace.hpp" stefank@2314: #include "memory/memRegion.hpp" stefank@2314: #include "utilities/copy.hpp" stefank@2314: duke@435: // A MutableSpace is a subtype of ImmutableSpace that supports the duke@435: // concept of allocation. This includes the concepts that a space may duke@435: // be only partially full, and the querry methods that go with such iveresov@970: // an assumption. MutableSpace is also responsible for minimizing the iveresov@970: // page allocation time by having the memory pretouched (with iveresov@970: // AlwaysPretouch) and for optimizing page placement on NUMA systems iveresov@970: // by make the underlying region interleaved (with UseNUMA). duke@435: // duke@435: // Invariant: (ImmutableSpace +) bottom() <= top() <= end() duke@435: // top() is inclusive and end() is exclusive. duke@435: jmasa@698: class MutableSpaceMangler; jmasa@698: duke@435: class MutableSpace: public ImmutableSpace { duke@435: friend class VMStructs; jmasa@698: jmasa@698: // Helper for mangling unused space in debug builds jmasa@698: MutableSpaceMangler* _mangler; iveresov@970: // The last region which page had been setup to be interleaved. iveresov@970: MemRegion _last_setup_region; iveresov@970: size_t _alignment; duke@435: protected: duke@435: HeapWord* _top; duke@435: jmasa@698: MutableSpaceMangler* mangler() { return _mangler; } jmasa@698: iveresov@970: void numa_setup_pages(MemRegion mr, bool clear_space); iveresov@970: void pretouch_pages(MemRegion mr); iveresov@970: iveresov@970: void set_last_setup_region(MemRegion mr) { _last_setup_region = mr; } iveresov@970: MemRegion last_setup_region() const { return _last_setup_region; } iveresov@970: duke@435: public: jmasa@698: virtual ~MutableSpace(); iveresov@970: MutableSpace(size_t page_size); jmasa@698: duke@435: // Accessors duke@435: HeapWord* top() const { return _top; } duke@435: virtual void set_top(HeapWord* value) { _top = value; } duke@435: duke@435: HeapWord** top_addr() { return &_top; } duke@435: HeapWord** end_addr() { return &_end; } duke@435: duke@435: virtual void set_bottom(HeapWord* value) { _bottom = value; } duke@435: virtual void set_end(HeapWord* value) { _end = value; } duke@435: iveresov@970: size_t alignment() { return _alignment; } iveresov@970: duke@435: // Returns a subregion containing all objects in this space. duke@435: MemRegion used_region() { return MemRegion(bottom(), top()); } duke@435: iveresov@970: static const bool SetupPages = true; iveresov@970: static const bool DontSetupPages = false; iveresov@970: duke@435: // Initialization jmasa@698: virtual void initialize(MemRegion mr, jmasa@698: bool clear_space, iveresov@970: bool mangle_space, iveresov@970: bool setup_pages = SetupPages); iveresov@970: jmasa@698: virtual void clear(bool mangle_space); jmasa@698: // Does the usual initialization but optionally resets top to bottom. jmasa@698: #if 0 // MANGLE_SPACE jmasa@698: void initialize(MemRegion mr, bool clear_space, bool reset_top); jmasa@698: #endif duke@435: virtual void update() { } duke@435: virtual void accumulate_statistics() { } duke@435: jmasa@698: // Methods used in mangling. See descriptions under SpaceMangler. jmasa@698: virtual void mangle_unused_area() PRODUCT_RETURN; jmasa@698: virtual void mangle_unused_area_complete() PRODUCT_RETURN; jmasa@698: virtual void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN; jmasa@698: virtual void check_mangled_unused_area_complete() PRODUCT_RETURN; jmasa@698: virtual void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN; jmasa@698: jmasa@698: // Used to save the space's current top for later use during mangling. jmasa@698: virtual void set_top_for_allocations() PRODUCT_RETURN; jmasa@698: duke@435: virtual void ensure_parsability() { } duke@435: jmasa@698: virtual void mangle_region(MemRegion mr) PRODUCT_RETURN; duke@435: duke@435: // Boolean querries. duke@435: bool is_empty() const { return used_in_words() == 0; } duke@435: bool not_empty() const { return used_in_words() > 0; } duke@435: bool contains(const void* p) const { return _bottom <= p && p < _end; } duke@435: duke@435: // Size computations. Sizes are in bytes. duke@435: size_t used_in_bytes() const { return used_in_words() * HeapWordSize; } duke@435: size_t free_in_bytes() const { return free_in_words() * HeapWordSize; } duke@435: duke@435: // Size computations. Sizes are in heapwords. duke@435: virtual size_t used_in_words() const { return pointer_delta(top(), bottom()); } duke@435: virtual size_t free_in_words() const { return pointer_delta(end(), top()); } duke@435: virtual size_t tlab_capacity(Thread* thr) const { return capacity_in_bytes(); } brutisso@6376: virtual size_t tlab_used(Thread* thr) const { return used_in_bytes(); } duke@435: virtual size_t unsafe_max_tlab_alloc(Thread* thr) const { return free_in_bytes(); } duke@435: duke@435: // Allocation (return NULL if full) duke@435: virtual HeapWord* allocate(size_t word_size); duke@435: virtual HeapWord* cas_allocate(size_t word_size); duke@435: // Optional deallocation. Used in NUMA-allocator. duke@435: bool cas_deallocate(HeapWord *obj, size_t size); duke@435: duke@435: // Iteration. coleenp@4037: void oop_iterate(ExtendedOopClosure* cl); coleenp@4037: void oop_iterate_no_header(OopClosure* cl); duke@435: void object_iterate(ObjectClosure* cl); duke@435: duke@435: // Debugging duke@435: virtual void print() const; duke@435: virtual void print_on(outputStream* st) const; duke@435: virtual void print_short() const; duke@435: virtual void print_short_on(outputStream* st) const; brutisso@3711: virtual void verify(); duke@435: }; stefank@2314: stefank@2314: #endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLESPACE_HPP