aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-07-06 16:25:12 -0400
committerPekka Enberg <penberg@kernel.org>2012-07-09 05:13:41 -0400
commit18004c5d4084d965aa1396392706b8688306427a (patch)
treeae480cb4514cbddf38ee43ec4513f59cfa42c3d7 /mm/slab.c
parent97d06609158e61f6bdf538c4a6788e2de492236f (diff)
mm, sl[aou]b: Use a common mutex definition
Use the mutex definition from SLAB and make it the common way to take a sleeping lock. This has the effect of using a mutex instead of a rw semaphore for SLUB. SLOB gains the use of a mutex for kmem_cache_create serialization. Not needed now but SLOB may acquire some more features later (like slabinfo / sysfs support) through the expansion of the common code that will need this. Reviewed-by: Glauber Costa <glommer@parallels.com> Reviewed-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c108
1 files changed, 51 insertions, 57 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 59a466b85b0f..fd7dac67c26e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -68,7 +68,7 @@
68 * Further notes from the original documentation: 68 * Further notes from the original documentation:
69 * 69 *
70 * 11 April '97. Started multi-threading - markhe 70 * 11 April '97. Started multi-threading - markhe
71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'. 71 * The global cache-chain is protected by the mutex 'slab_mutex'.
72 * The sem is only needed when accessing/extending the cache-chain, which 72 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(), 73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()). 74 * kmem_cache_shrink() and kmem_cache_reap()).
@@ -671,12 +671,6 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
671} 671}
672#endif 672#endif
673 673
674/*
675 * Guard access to the cache-chain.
676 */
677static DEFINE_MUTEX(cache_chain_mutex);
678static struct list_head cache_chain;
679
680static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); 674static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
681 675
682static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 676static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
@@ -1100,7 +1094,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1100 * When hotplugging memory or a cpu, existing nodelists are not replaced if 1094 * When hotplugging memory or a cpu, existing nodelists are not replaced if
1101 * already in use. 1095 * already in use.
1102 * 1096 *
1103 * Must hold cache_chain_mutex. 1097 * Must hold slab_mutex.
1104 */ 1098 */
1105static int init_cache_nodelists_node(int node) 1099static int init_cache_nodelists_node(int node)
1106{ 1100{
@@ -1108,7 +1102,7 @@ static int init_cache_nodelists_node(int node)
1108 struct kmem_list3 *l3; 1102 struct kmem_list3 *l3;
1109 const int memsize = sizeof(struct kmem_list3); 1103 const int memsize = sizeof(struct kmem_list3);
1110 1104
1111 list_for_each_entry(cachep, &cache_chain, list) { 1105 list_for_each_entry(cachep, &slab_caches, list) {
1112 /* 1106 /*
1113 * Set up the size64 kmemlist for cpu before we can 1107 * Set up the size64 kmemlist for cpu before we can
1114 * begin anything. Make sure some other cpu on this 1108 * begin anything. Make sure some other cpu on this
@@ -1124,7 +1118,7 @@ static int init_cache_nodelists_node(int node)
1124 1118
1125 /* 1119 /*
1126 * The l3s don't come and go as CPUs come and 1120 * The l3s don't come and go as CPUs come and
1127 * go. cache_chain_mutex is sufficient 1121 * go. slab_mutex is sufficient
1128 * protection here. 1122 * protection here.
1129 */ 1123 */
1130 cachep->nodelists[node] = l3; 1124 cachep->nodelists[node] = l3;
@@ -1146,7 +1140,7 @@ static void __cpuinit cpuup_canceled(long cpu)
1146 int node = cpu_to_mem(cpu); 1140 int node = cpu_to_mem(cpu);
1147 const struct cpumask *mask = cpumask_of_node(node); 1141 const struct cpumask *mask = cpumask_of_node(node);
1148 1142
1149 list_for_each_entry(cachep, &cache_chain, list) { 1143 list_for_each_entry(cachep, &slab_caches, list) {
1150 struct array_cache *nc; 1144 struct array_cache *nc;
1151 struct array_cache *shared; 1145 struct array_cache *shared;
1152 struct array_cache **alien; 1146 struct array_cache **alien;
@@ -1196,7 +1190,7 @@ free_array_cache:
1196 * the respective cache's slabs, now we can go ahead and 1190 * the respective cache's slabs, now we can go ahead and
1197 * shrink each nodelist to its limit. 1191 * shrink each nodelist to its limit.
1198 */ 1192 */
1199 list_for_each_entry(cachep, &cache_chain, list) { 1193 list_for_each_entry(cachep, &slab_caches, list) {
1200 l3 = cachep->nodelists[node]; 1194 l3 = cachep->nodelists[node];
1201 if (!l3) 1195 if (!l3)
1202 continue; 1196 continue;
@@ -1225,7 +1219,7 @@ static int __cpuinit cpuup_prepare(long cpu)
1225 * Now we can go ahead with allocating the shared arrays and 1219 * Now we can go ahead with allocating the shared arrays and
1226 * array caches 1220 * array caches
1227 */ 1221 */
1228 list_for_each_entry(cachep, &cache_chain, list) { 1222 list_for_each_entry(cachep, &slab_caches, list) {
1229 struct array_cache *nc; 1223 struct array_cache *nc;
1230 struct array_cache *shared = NULL; 1224 struct array_cache *shared = NULL;
1231 struct array_cache **alien = NULL; 1225 struct array_cache **alien = NULL;
@@ -1293,9 +1287,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1293 switch (action) { 1287 switch (action) {
1294 case CPU_UP_PREPARE: 1288 case CPU_UP_PREPARE:
1295 case CPU_UP_PREPARE_FROZEN: 1289 case CPU_UP_PREPARE_FROZEN:
1296 mutex_lock(&cache_chain_mutex); 1290 mutex_lock(&slab_mutex);
1297 err = cpuup_prepare(cpu); 1291 err = cpuup_prepare(cpu);
1298 mutex_unlock(&cache_chain_mutex); 1292 mutex_unlock(&slab_mutex);
1299 break; 1293 break;
1300 case CPU_ONLINE: 1294 case CPU_ONLINE:
1301 case CPU_ONLINE_FROZEN: 1295 case CPU_ONLINE_FROZEN:
@@ -1305,7 +1299,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1305 case CPU_DOWN_PREPARE: 1299 case CPU_DOWN_PREPARE:
1306 case CPU_DOWN_PREPARE_FROZEN: 1300 case CPU_DOWN_PREPARE_FROZEN:
1307 /* 1301 /*
1308 * Shutdown cache reaper. Note that the cache_chain_mutex is 1302 * Shutdown cache reaper. Note that the slab_mutex is
1309 * held so that if cache_reap() is invoked it cannot do 1303 * held so that if cache_reap() is invoked it cannot do
1310 * anything expensive but will only modify reap_work 1304 * anything expensive but will only modify reap_work
1311 * and reschedule the timer. 1305 * and reschedule the timer.
@@ -1332,9 +1326,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1332#endif 1326#endif
1333 case CPU_UP_CANCELED: 1327 case CPU_UP_CANCELED:
1334 case CPU_UP_CANCELED_FROZEN: 1328 case CPU_UP_CANCELED_FROZEN:
1335 mutex_lock(&cache_chain_mutex); 1329 mutex_lock(&slab_mutex);
1336 cpuup_canceled(cpu); 1330 cpuup_canceled(cpu);
1337 mutex_unlock(&cache_chain_mutex); 1331 mutex_unlock(&slab_mutex);
1338 break; 1332 break;
1339 } 1333 }
1340 return notifier_from_errno(err); 1334 return notifier_from_errno(err);
@@ -1350,14 +1344,14 @@ static struct notifier_block __cpuinitdata cpucache_notifier = {
1350 * Returns -EBUSY if all objects cannot be drained so that the node is not 1344 * Returns -EBUSY if all objects cannot be drained so that the node is not
1351 * removed. 1345 * removed.
1352 * 1346 *
1353 * Must hold cache_chain_mutex. 1347 * Must hold slab_mutex.
1354 */ 1348 */
1355static int __meminit drain_cache_nodelists_node(int node) 1349static int __meminit drain_cache_nodelists_node(int node)
1356{ 1350{
1357 struct kmem_cache *cachep; 1351 struct kmem_cache *cachep;
1358 int ret = 0; 1352 int ret = 0;
1359 1353
1360 list_for_each_entry(cachep, &cache_chain, list) { 1354 list_for_each_entry(cachep, &slab_caches, list) {
1361 struct kmem_list3 *l3; 1355 struct kmem_list3 *l3;
1362 1356
1363 l3 = cachep->nodelists[node]; 1357 l3 = cachep->nodelists[node];
@@ -1388,14 +1382,14 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
1388 1382
1389 switch (action) { 1383 switch (action) {
1390 case MEM_GOING_ONLINE: 1384 case MEM_GOING_ONLINE:
1391 mutex_lock(&cache_chain_mutex); 1385 mutex_lock(&slab_mutex);
1392 ret = init_cache_nodelists_node(nid); 1386 ret = init_cache_nodelists_node(nid);
1393 mutex_unlock(&cache_chain_mutex); 1387 mutex_unlock(&slab_mutex);
1394 break; 1388 break;
1395 case MEM_GOING_OFFLINE: 1389 case MEM_GOING_OFFLINE:
1396 mutex_lock(&cache_chain_mutex); 1390 mutex_lock(&slab_mutex);
1397 ret = drain_cache_nodelists_node(nid); 1391 ret = drain_cache_nodelists_node(nid);
1398 mutex_unlock(&cache_chain_mutex); 1392 mutex_unlock(&slab_mutex);
1399 break; 1393 break;
1400 case MEM_ONLINE: 1394 case MEM_ONLINE:
1401 case MEM_OFFLINE: 1395 case MEM_OFFLINE:
@@ -1499,8 +1493,8 @@ void __init kmem_cache_init(void)
1499 node = numa_mem_id(); 1493 node = numa_mem_id();
1500 1494
1501 /* 1) create the cache_cache */ 1495 /* 1) create the cache_cache */
1502 INIT_LIST_HEAD(&cache_chain); 1496 INIT_LIST_HEAD(&slab_caches);
1503 list_add(&cache_cache.list, &cache_chain); 1497 list_add(&cache_cache.list, &slab_caches);
1504 cache_cache.colour_off = cache_line_size(); 1498 cache_cache.colour_off = cache_line_size();
1505 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1499 cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1506 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; 1500 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
@@ -1642,11 +1636,11 @@ void __init kmem_cache_init_late(void)
1642 init_lock_keys(); 1636 init_lock_keys();
1643 1637
1644 /* 6) resize the head arrays to their final sizes */ 1638 /* 6) resize the head arrays to their final sizes */
1645 mutex_lock(&cache_chain_mutex); 1639 mutex_lock(&slab_mutex);
1646 list_for_each_entry(cachep, &cache_chain, list) 1640 list_for_each_entry(cachep, &slab_caches, list)
1647 if (enable_cpucache(cachep, GFP_NOWAIT)) 1641 if (enable_cpucache(cachep, GFP_NOWAIT))
1648 BUG(); 1642 BUG();
1649 mutex_unlock(&cache_chain_mutex); 1643 mutex_unlock(&slab_mutex);
1650 1644
1651 /* Done! */ 1645 /* Done! */
1652 slab_state = FULL; 1646 slab_state = FULL;
@@ -2253,10 +2247,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2253 */ 2247 */
2254 if (slab_is_available()) { 2248 if (slab_is_available()) {
2255 get_online_cpus(); 2249 get_online_cpus();
2256 mutex_lock(&cache_chain_mutex); 2250 mutex_lock(&slab_mutex);
2257 } 2251 }
2258 2252
2259 list_for_each_entry(pc, &cache_chain, list) { 2253 list_for_each_entry(pc, &slab_caches, list) {
2260 char tmp; 2254 char tmp;
2261 int res; 2255 int res;
2262 2256
@@ -2500,10 +2494,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2500 } 2494 }
2501 2495
2502 /* cache setup completed, link it into the list */ 2496 /* cache setup completed, link it into the list */
2503 list_add(&cachep->list, &cache_chain); 2497 list_add(&cachep->list, &slab_caches);
2504oops: 2498oops:
2505 if (slab_is_available()) { 2499 if (slab_is_available()) {
2506 mutex_unlock(&cache_chain_mutex); 2500 mutex_unlock(&slab_mutex);
2507 put_online_cpus(); 2501 put_online_cpus();
2508 } 2502 }
2509 return cachep; 2503 return cachep;
@@ -2622,7 +2616,7 @@ out:
2622 return nr_freed; 2616 return nr_freed;
2623} 2617}
2624 2618
2625/* Called with cache_chain_mutex held to protect against cpu hotplug */ 2619/* Called with slab_mutex held to protect against cpu hotplug */
2626static int __cache_shrink(struct kmem_cache *cachep) 2620static int __cache_shrink(struct kmem_cache *cachep)
2627{ 2621{
2628 int ret = 0, i = 0; 2622 int ret = 0, i = 0;
@@ -2657,9 +2651,9 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
2657 BUG_ON(!cachep || in_interrupt()); 2651 BUG_ON(!cachep || in_interrupt());
2658 2652
2659 get_online_cpus(); 2653 get_online_cpus();
2660 mutex_lock(&cache_chain_mutex); 2654 mutex_lock(&slab_mutex);
2661 ret = __cache_shrink(cachep); 2655 ret = __cache_shrink(cachep);
2662 mutex_unlock(&cache_chain_mutex); 2656 mutex_unlock(&slab_mutex);
2663 put_online_cpus(); 2657 put_online_cpus();
2664 return ret; 2658 return ret;
2665} 2659}
@@ -2687,15 +2681,15 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
2687 2681
2688 /* Find the cache in the chain of caches. */ 2682 /* Find the cache in the chain of caches. */
2689 get_online_cpus(); 2683 get_online_cpus();
2690 mutex_lock(&cache_chain_mutex); 2684 mutex_lock(&slab_mutex);
2691 /* 2685 /*
2692 * the chain is never empty, cache_cache is never destroyed 2686 * the chain is never empty, cache_cache is never destroyed
2693 */ 2687 */
2694 list_del(&cachep->list); 2688 list_del(&cachep->list);
2695 if (__cache_shrink(cachep)) { 2689 if (__cache_shrink(cachep)) {
2696 slab_error(cachep, "Can't free all objects"); 2690 slab_error(cachep, "Can't free all objects");
2697 list_add(&cachep->list, &cache_chain); 2691 list_add(&cachep->list, &slab_caches);
2698 mutex_unlock(&cache_chain_mutex); 2692 mutex_unlock(&slab_mutex);
2699 put_online_cpus(); 2693 put_online_cpus();
2700 return; 2694 return;
2701 } 2695 }
@@ -2704,7 +2698,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
2704 rcu_barrier(); 2698 rcu_barrier();
2705 2699
2706 __kmem_cache_destroy(cachep); 2700 __kmem_cache_destroy(cachep);
2707 mutex_unlock(&cache_chain_mutex); 2701 mutex_unlock(&slab_mutex);
2708 put_online_cpus(); 2702 put_online_cpus();
2709} 2703}
2710EXPORT_SYMBOL(kmem_cache_destroy); 2704EXPORT_SYMBOL(kmem_cache_destroy);
@@ -4017,7 +4011,7 @@ static void do_ccupdate_local(void *info)
4017 new->new[smp_processor_id()] = old; 4011 new->new[smp_processor_id()] = old;
4018} 4012}
4019 4013
4020/* Always called with the cache_chain_mutex held */ 4014/* Always called with the slab_mutex held */
4021static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 4015static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4022 int batchcount, int shared, gfp_t gfp) 4016 int batchcount, int shared, gfp_t gfp)
4023{ 4017{
@@ -4061,7 +4055,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4061 return alloc_kmemlist(cachep, gfp); 4055 return alloc_kmemlist(cachep, gfp);
4062} 4056}
4063 4057
4064/* Called with cache_chain_mutex held always */ 4058/* Called with slab_mutex held always */
4065static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) 4059static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
4066{ 4060{
4067 int err; 4061 int err;
@@ -4163,11 +4157,11 @@ static void cache_reap(struct work_struct *w)
4163 int node = numa_mem_id(); 4157 int node = numa_mem_id();
4164 struct delayed_work *work = to_delayed_work(w); 4158 struct delayed_work *work = to_delayed_work(w);
4165 4159
4166 if (!mutex_trylock(&cache_chain_mutex)) 4160 if (!mutex_trylock(&slab_mutex))
4167 /* Give up. Setup the next iteration. */ 4161 /* Give up. Setup the next iteration. */
4168 goto out; 4162 goto out;
4169 4163
4170 list_for_each_entry(searchp, &cache_chain, list) { 4164 list_for_each_entry(searchp, &slab_caches, list) {
4171 check_irq_on(); 4165 check_irq_on();
4172 4166
4173 /* 4167 /*
@@ -4205,7 +4199,7 @@ next:
4205 cond_resched(); 4199 cond_resched();
4206 } 4200 }
4207 check_irq_on(); 4201 check_irq_on();
4208 mutex_unlock(&cache_chain_mutex); 4202 mutex_unlock(&slab_mutex);
4209 next_reap_node(); 4203 next_reap_node();
4210out: 4204out:
4211 /* Set up the next iteration */ 4205 /* Set up the next iteration */
@@ -4241,21 +4235,21 @@ static void *s_start(struct seq_file *m, loff_t *pos)
4241{ 4235{
4242 loff_t n = *pos; 4236 loff_t n = *pos;
4243 4237
4244 mutex_lock(&cache_chain_mutex); 4238 mutex_lock(&slab_mutex);
4245 if (!n) 4239 if (!n)
4246 print_slabinfo_header(m); 4240 print_slabinfo_header(m);
4247 4241
4248 return seq_list_start(&cache_chain, *pos); 4242 return seq_list_start(&slab_caches, *pos);
4249} 4243}
4250 4244
4251static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4245static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4252{ 4246{
4253 return seq_list_next(p, &cache_chain, pos); 4247 return seq_list_next(p, &slab_caches, pos);
4254} 4248}
4255 4249
4256static void s_stop(struct seq_file *m, void *p) 4250static void s_stop(struct seq_file *m, void *p)
4257{ 4251{
4258 mutex_unlock(&cache_chain_mutex); 4252 mutex_unlock(&slab_mutex);
4259} 4253}
4260 4254
4261static int s_show(struct seq_file *m, void *p) 4255static int s_show(struct seq_file *m, void *p)
@@ -4406,9 +4400,9 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4406 return -EINVAL; 4400 return -EINVAL;
4407 4401
4408 /* Find the cache in the chain of caches. */ 4402 /* Find the cache in the chain of caches. */
4409 mutex_lock(&cache_chain_mutex); 4403 mutex_lock(&slab_mutex);
4410 res = -EINVAL; 4404 res = -EINVAL;
4411 list_for_each_entry(cachep, &cache_chain, list) { 4405 list_for_each_entry(cachep, &slab_caches, list) {
4412 if (!strcmp(cachep->name, kbuf)) { 4406 if (!strcmp(cachep->name, kbuf)) {
4413 if (limit < 1 || batchcount < 1 || 4407 if (limit < 1 || batchcount < 1 ||
4414 batchcount > limit || shared < 0) { 4408 batchcount > limit || shared < 0) {
@@ -4421,7 +4415,7 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4421 break; 4415 break;
4422 } 4416 }
4423 } 4417 }
4424 mutex_unlock(&cache_chain_mutex); 4418 mutex_unlock(&slab_mutex);
4425 if (res >= 0) 4419 if (res >= 0)
4426 res = count; 4420 res = count;
4427 return res; 4421 return res;
@@ -4444,8 +4438,8 @@ static const struct file_operations proc_slabinfo_operations = {
4444 4438
4445static void *leaks_start(struct seq_file *m, loff_t *pos) 4439static void *leaks_start(struct seq_file *m, loff_t *pos)
4446{ 4440{
4447 mutex_lock(&cache_chain_mutex); 4441 mutex_lock(&slab_mutex);
4448 return seq_list_start(&cache_chain, *pos); 4442 return seq_list_start(&slab_caches, *pos);
4449} 4443}
4450 4444
4451static inline int add_caller(unsigned long *n, unsigned long v) 4445static inline int add_caller(unsigned long *n, unsigned long v)
@@ -4544,17 +4538,17 @@ static int leaks_show(struct seq_file *m, void *p)
4544 name = cachep->name; 4538 name = cachep->name;
4545 if (n[0] == n[1]) { 4539 if (n[0] == n[1]) {
4546 /* Increase the buffer size */ 4540 /* Increase the buffer size */
4547 mutex_unlock(&cache_chain_mutex); 4541 mutex_unlock(&slab_mutex);
4548 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4542 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4549 if (!m->private) { 4543 if (!m->private) {
4550 /* Too bad, we are really out */ 4544 /* Too bad, we are really out */
4551 m->private = n; 4545 m->private = n;
4552 mutex_lock(&cache_chain_mutex); 4546 mutex_lock(&slab_mutex);
4553 return -ENOMEM; 4547 return -ENOMEM;
4554 } 4548 }
4555 *(unsigned long *)m->private = n[0] * 2; 4549 *(unsigned long *)m->private = n[0] * 2;
4556 kfree(n); 4550 kfree(n);
4557 mutex_lock(&cache_chain_mutex); 4551 mutex_lock(&slab_mutex);
4558 /* Now make sure this entry will be retried */ 4552 /* Now make sure this entry will be retried */
4559 m->count = m->size; 4553 m->count = m->size;
4560 return 0; 4554 return 0;