Thu, 17 Jul 2008 10:26:33 -0700
6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
6723229: NUMA allocator: assert(lgrp_num > 0, "There should be at least one locality group")
Summary: The fix takes care of the assertion triggered during TLAB resizing after reconfiguration. Also it now handles a defect in the topology graph, in which a single leaf node doesn't have memory.
Reviewed-by: jmasa
1.1 --- a/src/os/solaris/vm/os_solaris.cpp Mon Jul 14 04:12:47 2008 -0700 1.2 +++ b/src/os/solaris/vm/os_solaris.cpp Thu Jul 17 10:26:33 2008 -0700 1.3 @@ -2658,6 +2658,12 @@ 1.4 top += r; 1.5 cur++; 1.6 } 1.7 + if (bottom == 0) { 1.8 + // Handle a situation, when the OS reports no memory available. 1.9 + // Assume UMA architecture. 1.10 + ids[0] = 0; 1.11 + return 1; 1.12 + } 1.13 return bottom; 1.14 } 1.15
2.1 --- a/src/share/vm/gc_implementation/shared/gcUtil.hpp Mon Jul 14 04:12:47 2008 -0700 2.2 +++ b/src/share/vm/gc_implementation/shared/gcUtil.hpp Thu Jul 17 10:26:33 2008 -0700 2.3 @@ -58,6 +58,12 @@ 2.4 _average(0.0), _sample_count(0), _weight(weight), _last_sample(0.0) { 2.5 } 2.6 2.7 + void clear() { 2.8 + _average = 0; 2.9 + _sample_count = 0; 2.10 + _last_sample = 0; 2.11 + } 2.12 + 2.13 // Accessors 2.14 float average() const { return _average; } 2.15 unsigned weight() const { return _weight; } 2.16 @@ -115,6 +121,12 @@ 2.17 float deviation() const { return _deviation; } 2.18 unsigned padding() const { return _padding; } 2.19 2.20 + void clear() { 2.21 + AdaptiveWeightedAverage::clear(); 2.22 + _padded_avg = 0; 2.23 + _deviation = 0; 2.24 + } 2.25 + 2.26 // Override 2.27 void sample(float new_sample); 2.28 };
3.1 --- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Mon Jul 14 04:12:47 2008 -0700 3.2 +++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Thu Jul 17 10:26:33 2008 -0700 3.3 @@ -141,7 +141,20 @@ 3.4 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { 3.5 guarantee(thr != NULL, "No thread"); 3.6 int lgrp_id = thr->lgrp_id(); 3.7 - assert(lgrp_id != -1, "No lgrp_id set"); 3.8 + if (lgrp_id == -1) { 3.9 + // This case can occur after the topology of the system has 3.10 + // changed. Thread can change their location, the new home 3.11 + // group will be determined during the first allocation 3.12 + // attempt. For now we can safely assume that all spaces 3.13 + // have equal size because the whole space will be reinitialized. 3.14 + if (lgrp_spaces()->length() > 0) { 3.15 + return capacity_in_bytes() / lgrp_spaces()->length(); 3.16 + } else { 3.17 + assert(false, "There should be at least one locality group"); 3.18 + return 0; 3.19 + } 3.20 + } 3.21 + // That's the normal case, where we know the locality group of the thread. 3.22 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); 3.23 if (i == -1) { 3.24 return 0; 3.25 @@ -150,9 +163,17 @@ 3.26 } 3.27 3.28 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { 3.29 + // Please see the comments for tlab_capacity(). 3.30 guarantee(thr != NULL, "No thread"); 3.31 int lgrp_id = thr->lgrp_id(); 3.32 - assert(lgrp_id != -1, "No lgrp_id set"); 3.33 + if (lgrp_id == -1) { 3.34 + if (lgrp_spaces()->length() > 0) { 3.35 + return free_in_bytes() / lgrp_spaces()->length(); 3.36 + } else { 3.37 + assert(false, "There should be at least one locality group"); 3.38 + return 0; 3.39 + } 3.40 + } 3.41 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); 3.42 if (i == -1) { 3.43 return 0; 3.44 @@ -250,10 +271,15 @@ 3.45 void MutableNUMASpace::update() { 3.46 if (update_layout(false)) { 3.47 // If the topology has changed, make all chunks zero-sized. 3.48 + // And clear the alloc-rate statistics. 3.49 + // In future we may want to handle this more gracefully in order 3.50 + // to avoid the reallocation of the pages as much as possible. 3.51 for (int i = 0; i < lgrp_spaces()->length(); i++) { 3.52 - MutableSpace *s = lgrp_spaces()->at(i)->space(); 3.53 + LGRPSpace *ls = lgrp_spaces()->at(i); 3.54 + MutableSpace *s = ls->space(); 3.55 s->set_end(s->bottom()); 3.56 s->set_top(s->bottom()); 3.57 + ls->clear_alloc_rate(); 3.58 } 3.59 // A NUMA space is never mangled 3.60 initialize(region(),
4.1 --- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp Mon Jul 14 04:12:47 2008 -0700 4.2 +++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp Thu Jul 17 10:26:33 2008 -0700 4.3 @@ -112,6 +112,7 @@ 4.4 int lgrp_id() const { return _lgrp_id; } 4.5 MutableSpace* space() const { return _space; } 4.6 AdaptiveWeightedAverage* alloc_rate() const { return _alloc_rate; } 4.7 + void clear_alloc_rate() { _alloc_rate->clear(); } 4.8 SpaceStats* space_stats() { return &_space_stats; } 4.9 void clear_space_stats() { _space_stats = SpaceStats(); } 4.10