aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2011-11-11 01:54:14 -0500
committerPekka Enberg <penberg@kernel.org>2011-12-13 15:27:09 -0500
commitb13683d1cc14d1dd30b8e20f3ebea3f814ad029f (patch)
tree29384daa693218952473ee9a41dab2045926c914
parent8f1e33daeda6cd89753f9e77d174805a6f21db09 (diff)
slub: add missed accounting
With per-cpu partial list, slab is added to partial list first and then moved to node list. The __slab_free() code path for add/remove_partial is almost deprecated(except for slub debug). But we forget to account add/remove_partial when move per-cpu partial pages to node list, so the statistics for such events are always 0. Add corresponding accounting. This is against the patch "slub: use correct parameter to add a page to partial list tail" Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--mm/slub.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 4056d29e6610..8284a206f48d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1901,11 +1901,14 @@ static void unfreeze_partials(struct kmem_cache *s)
1901 } 1901 }
1902 1902
1903 if (l != m) { 1903 if (l != m) {
1904 if (l == M_PARTIAL) 1904 if (l == M_PARTIAL) {
1905 remove_partial(n, page); 1905 remove_partial(n, page);
1906 else 1906 stat(s, FREE_REMOVE_PARTIAL);
1907 } else {
1907 add_partial(n, page, 1908 add_partial(n, page,
1908 DEACTIVATE_TO_TAIL); 1909 DEACTIVATE_TO_TAIL);
1910 stat(s, FREE_ADD_PARTIAL);
1911 }
1909 1912
1910 l = m; 1913 l = m;
1911 } 1914 }