aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c94
1 files changed, 88 insertions, 6 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 2c3a2e0394db..e7667a3584bc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -87,7 +87,6 @@
87 */ 87 */
88 88
89#include <linux/slab.h> 89#include <linux/slab.h>
90#include "slab.h"
91#include <linux/mm.h> 90#include <linux/mm.h>
92#include <linux/poison.h> 91#include <linux/poison.h>
93#include <linux/swap.h> 92#include <linux/swap.h>
@@ -128,6 +127,8 @@
128 127
129#include "internal.h" 128#include "internal.h"
130 129
130#include "slab.h"
131
131/* 132/*
132 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. 133 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
133 * 0 for faster, smaller code (especially in the critical paths). 134 * 0 for faster, smaller code (especially in the critical paths).
@@ -641,6 +642,26 @@ static void init_node_lock_keys(int q)
641 } 642 }
642} 643}
643 644
645static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
646{
647 struct kmem_list3 *l3;
648 l3 = cachep->nodelists[q];
649 if (!l3)
650 return;
651
652 slab_set_lock_classes(cachep, &on_slab_l3_key,
653 &on_slab_alc_key, q);
654}
655
656static inline void on_slab_lock_classes(struct kmem_cache *cachep)
657{
658 int node;
659
660 VM_BUG_ON(OFF_SLAB(cachep));
661 for_each_node(node)
662 on_slab_lock_classes_node(cachep, node);
663}
664
644static inline void init_lock_keys(void) 665static inline void init_lock_keys(void)
645{ 666{
646 int node; 667 int node;
@@ -657,6 +678,14 @@ static inline void init_lock_keys(void)
657{ 678{
658} 679}
659 680
681static inline void on_slab_lock_classes(struct kmem_cache *cachep)
682{
683}
684
685static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node)
686{
687}
688
660static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) 689static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
661{ 690{
662} 691}
@@ -1385,6 +1414,9 @@ static int __cpuinit cpuup_prepare(long cpu)
1385 free_alien_cache(alien); 1414 free_alien_cache(alien);
1386 if (cachep->flags & SLAB_DEBUG_OBJECTS) 1415 if (cachep->flags & SLAB_DEBUG_OBJECTS)
1387 slab_set_debugobj_lock_classes_node(cachep, node); 1416 slab_set_debugobj_lock_classes_node(cachep, node);
1417 else if (!OFF_SLAB(cachep) &&
1418 !(cachep->flags & SLAB_DESTROY_BY_RCU))
1419 on_slab_lock_classes_node(cachep, node);
1388 } 1420 }
1389 init_node_lock_keys(node); 1421 init_node_lock_keys(node);
1390 1422
@@ -1863,6 +1895,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1863 if (page->pfmemalloc) 1895 if (page->pfmemalloc)
1864 SetPageSlabPfmemalloc(page + i); 1896 SetPageSlabPfmemalloc(page + i);
1865 } 1897 }
1898 memcg_bind_pages(cachep, cachep->gfporder);
1866 1899
1867 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { 1900 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1868 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); 1901 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
@@ -1899,9 +1932,11 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1899 __ClearPageSlab(page); 1932 __ClearPageSlab(page);
1900 page++; 1933 page++;
1901 } 1934 }
1935
1936 memcg_release_pages(cachep, cachep->gfporder);
1902 if (current->reclaim_state) 1937 if (current->reclaim_state)
1903 current->reclaim_state->reclaimed_slab += nr_freed; 1938 current->reclaim_state->reclaimed_slab += nr_freed;
1904 free_pages((unsigned long)addr, cachep->gfporder); 1939 free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder);
1905} 1940}
1906 1941
1907static void kmem_rcu_free(struct rcu_head *head) 1942static void kmem_rcu_free(struct rcu_head *head)
@@ -2489,7 +2524,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2489 WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU); 2524 WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
2490 2525
2491 slab_set_debugobj_lock_classes(cachep); 2526 slab_set_debugobj_lock_classes(cachep);
2492 } 2527 } else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU))
2528 on_slab_lock_classes(cachep);
2493 2529
2494 return 0; 2530 return 0;
2495} 2531}
@@ -3453,6 +3489,8 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3453 if (slab_should_failslab(cachep, flags)) 3489 if (slab_should_failslab(cachep, flags))
3454 return NULL; 3490 return NULL;
3455 3491
3492 cachep = memcg_kmem_get_cache(cachep, flags);
3493
3456 cache_alloc_debugcheck_before(cachep, flags); 3494 cache_alloc_debugcheck_before(cachep, flags);
3457 local_irq_save(save_flags); 3495 local_irq_save(save_flags);
3458 3496
@@ -3538,6 +3576,8 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3538 if (slab_should_failslab(cachep, flags)) 3576 if (slab_should_failslab(cachep, flags))
3539 return NULL; 3577 return NULL;
3540 3578
3579 cachep = memcg_kmem_get_cache(cachep, flags);
3580
3541 cache_alloc_debugcheck_before(cachep, flags); 3581 cache_alloc_debugcheck_before(cachep, flags);
3542 local_irq_save(save_flags); 3582 local_irq_save(save_flags);
3543 objp = __do_cache_alloc(cachep, flags); 3583 objp = __do_cache_alloc(cachep, flags);
@@ -3851,6 +3891,9 @@ EXPORT_SYMBOL(__kmalloc);
3851void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3891void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3852{ 3892{
3853 unsigned long flags; 3893 unsigned long flags;
3894 cachep = cache_from_obj(cachep, objp);
3895 if (!cachep)
3896 return;
3854 3897
3855 local_irq_save(flags); 3898 local_irq_save(flags);
3856 debug_check_no_locks_freed(objp, cachep->object_size); 3899 debug_check_no_locks_freed(objp, cachep->object_size);
@@ -3998,7 +4041,7 @@ static void do_ccupdate_local(void *info)
3998} 4041}
3999 4042
4000/* Always called with the slab_mutex held */ 4043/* Always called with the slab_mutex held */
4001static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 4044static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
4002 int batchcount, int shared, gfp_t gfp) 4045 int batchcount, int shared, gfp_t gfp)
4003{ 4046{
4004 struct ccupdate_struct *new; 4047 struct ccupdate_struct *new;
@@ -4041,12 +4084,49 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4041 return alloc_kmemlist(cachep, gfp); 4084 return alloc_kmemlist(cachep, gfp);
4042} 4085}
4043 4086
4087static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4088 int batchcount, int shared, gfp_t gfp)
4089{
4090 int ret;
4091 struct kmem_cache *c = NULL;
4092 int i = 0;
4093
4094 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
4095
4096 if (slab_state < FULL)
4097 return ret;
4098
4099 if ((ret < 0) || !is_root_cache(cachep))
4100 return ret;
4101
4102 VM_BUG_ON(!mutex_is_locked(&slab_mutex));
4103 for_each_memcg_cache_index(i) {
4104 c = cache_from_memcg(cachep, i);
4105 if (c)
4106 /* return value determined by the parent cache only */
4107 __do_tune_cpucache(c, limit, batchcount, shared, gfp);
4108 }
4109
4110 return ret;
4111}
4112
4044/* Called with slab_mutex held always */ 4113/* Called with slab_mutex held always */
4045static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) 4114static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
4046{ 4115{
4047 int err; 4116 int err;
4048 int limit, shared; 4117 int limit = 0;
4118 int shared = 0;
4119 int batchcount = 0;
4120
4121 if (!is_root_cache(cachep)) {
4122 struct kmem_cache *root = memcg_root_cache(cachep);
4123 limit = root->limit;
4124 shared = root->shared;
4125 batchcount = root->batchcount;
4126 }
4049 4127
4128 if (limit && shared && batchcount)
4129 goto skip_setup;
4050 /* 4130 /*
4051 * The head array serves three purposes: 4131 * The head array serves three purposes:
4052 * - create a LIFO ordering, i.e. return objects that are cache-warm 4132 * - create a LIFO ordering, i.e. return objects that are cache-warm
@@ -4088,7 +4168,9 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
4088 if (limit > 32) 4168 if (limit > 32)
4089 limit = 32; 4169 limit = 32;
4090#endif 4170#endif
4091 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp); 4171 batchcount = (limit + 1) / 2;
4172skip_setup:
4173 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
4092 if (err) 4174 if (err)
4093 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 4175 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4094 cachep->name, -err); 4176 cachep->name, -err);