aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-07-02 18:22:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-07-03 12:21:53 -0400
commit8a5b20aebaa3d0ade5b8381e64d35fb777b7b355 (patch)
treeaec5b72a829413901bb0c1a03371f7dc2b84709a /mm/slub.c
parentdc78327c0ea7da5186d8cbc1647bd6088c5c9fa5 (diff)
slub: fix off by one in number of slab tests
min_partial means minimum number of slab cached in node partial list. So, if nr_partial is less than it, we keep newly empty slab on node partial list rather than freeing it. But if nr_partial is equal or greater than it, it means that we have enough partial slabs so should free newly empty slab. Current implementation missed the equal case so if we set min_partial is 0, then, at least one slab could be cached. This is critical problem to kmemcg destroying logic because it doesn't works properly if some slabs is cached. This patch fixes this problem. Fixes 91cb69620284 ("slub: make dead memcg caches discard free slabs immediately"). Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/slub.c b/mm/slub.c
index b2b047327d76..73004808537e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1881,7 +1881,7 @@ redo:
1881 1881
1882 new.frozen = 0; 1882 new.frozen = 0;
1883 1883
1884 if (!new.inuse && n->nr_partial > s->min_partial) 1884 if (!new.inuse && n->nr_partial >= s->min_partial)
1885 m = M_FREE; 1885 m = M_FREE;
1886 else if (new.freelist) { 1886 else if (new.freelist) {
1887 m = M_PARTIAL; 1887 m = M_PARTIAL;
@@ -1992,7 +1992,7 @@ static void unfreeze_partials(struct kmem_cache *s,
1992 new.freelist, new.counters, 1992 new.freelist, new.counters,
1993 "unfreezing slab")); 1993 "unfreezing slab"));
1994 1994
1995 if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) { 1995 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
1996 page->next = discard_page; 1996 page->next = discard_page;
1997 discard_page = page; 1997 discard_page = page;
1998 } else { 1998 } else {
@@ -2620,7 +2620,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2620 return; 2620 return;
2621 } 2621 }
2622 2622
2623 if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) 2623 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2624 goto slab_empty; 2624 goto slab_empty;
2625 2625
2626 /* 2626 /*