aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2012-12-18 17:22:50 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 18:02:14 -0500
commit1f458cbf122288b23620ee822e19bcbb76c8d6ec (patch)
treefa434b9ff6d6bdfd2daaf24fd7812cc975cba7b7 /mm/slub.c
parentd79923fad95b0cdf7770e024677180c734cb7148 (diff)
memcg: destroy memcg caches
Implement destruction of memcg caches. Right now, only caches where our reference counter is the last remaining are deleted. If there are any other reference counters around, we just leave the caches lying around until they go away. When that happens, a destruction function is called from the cache code. Caches are only destroyed in process context, so we queue them up for later processing in the general case. Signed-off-by: Glauber Costa <glommer@parallels.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Frederic Weisbecker <fweisbec@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: JoonSoo Kim <js1304@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Michal Hocko <mhocko@suse.cz> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Rik van Riel <riel@redhat.com> Cc: Suleiman Souhlal <suleiman@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/mm/slub.c b/mm/slub.c
index ef39e872b8eb..692177bebdf0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1344,6 +1344,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1344 void *start; 1344 void *start;
1345 void *last; 1345 void *last;
1346 void *p; 1346 void *p;
1347 int order;
1347 1348
1348 BUG_ON(flags & GFP_SLAB_BUG_MASK); 1349 BUG_ON(flags & GFP_SLAB_BUG_MASK);
1349 1350
@@ -1352,7 +1353,9 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1352 if (!page) 1353 if (!page)
1353 goto out; 1354 goto out;
1354 1355
1356 order = compound_order(page);
1355 inc_slabs_node(s, page_to_nid(page), page->objects); 1357 inc_slabs_node(s, page_to_nid(page), page->objects);
1358 memcg_bind_pages(s, order);
1356 page->slab_cache = s; 1359 page->slab_cache = s;
1357 __SetPageSlab(page); 1360 __SetPageSlab(page);
1358 if (page->pfmemalloc) 1361 if (page->pfmemalloc)
@@ -1361,7 +1364,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1361 start = page_address(page); 1364 start = page_address(page);
1362 1365
1363 if (unlikely(s->flags & SLAB_POISON)) 1366 if (unlikely(s->flags & SLAB_POISON))
1364 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); 1367 memset(start, POISON_INUSE, PAGE_SIZE << order);
1365 1368
1366 last = start; 1369 last = start;
1367 for_each_object(p, s, start, page->objects) { 1370 for_each_object(p, s, start, page->objects) {
@@ -1402,6 +1405,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1402 1405
1403 __ClearPageSlabPfmemalloc(page); 1406 __ClearPageSlabPfmemalloc(page);
1404 __ClearPageSlab(page); 1407 __ClearPageSlab(page);
1408
1409 memcg_release_pages(s, order);
1405 reset_page_mapcount(page); 1410 reset_page_mapcount(page);
1406 if (current->reclaim_state) 1411 if (current->reclaim_state)
1407 current->reclaim_state->reclaimed_slab += pages; 1412 current->reclaim_state->reclaimed_slab += pages;