aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-07-06 16:25:12 -0400
committerPekka Enberg <penberg@kernel.org>2012-07-09 05:13:41 -0400
commit18004c5d4084d965aa1396392706b8688306427a (patch)
treeae480cb4514cbddf38ee43ec4513f59cfa42c3d7 /mm
parent97d06609158e61f6bdf538c4a6788e2de492236f (diff)
mm, sl[aou]b: Use a common mutex definition
Use the mutex definition from SLAB and make it the common way to take a sleeping lock. This has the effect of using a mutex instead of a rw semaphore for SLUB. SLOB gains the use of a mutex for kmem_cache_create serialization. Not needed now but SLOB may acquire some more features later (like slabinfo / sysfs support) through the expansion of the common code that will need this. Reviewed-by: Glauber Costa <glommer@parallels.com> Reviewed-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c108
-rw-r--r--mm/slab.h4
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/slub.c54
4 files changed, 82 insertions, 86 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 59a466b85b0f..fd7dac67c26e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -68,7 +68,7 @@
68 * Further notes from the original documentation: 68 * Further notes from the original documentation:
69 * 69 *
70 * 11 April '97. Started multi-threading - markhe 70 * 11 April '97. Started multi-threading - markhe
71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'. 71 * The global cache-chain is protected by the mutex 'slab_mutex'.
72 * The sem is only needed when accessing/extending the cache-chain, which 72 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(), 73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()). 74 * kmem_cache_shrink() and kmem_cache_reap()).
@@ -671,12 +671,6 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
671} 671}
672#endif 672#endif
673 673
674/*
675 * Guard access to the cache-chain.
676 */
677static DEFINE_MUTEX(cache_chain_mutex);
678static struct list_head cache_chain;
679
680static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); 674static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
681 675
682static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 676static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
@@ -1100,7 +1094,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1100 * When hotplugging memory or a cpu, existing nodelists are not replaced if 1094 * When hotplugging memory or a cpu, existing nodelists are not replaced if
1101 * already in use. 1095 * already in use.
1102 * 1096 *
1103 * Must hold cache_chain_mutex. 1097 * Must hold slab_mutex.
1104 */ 1098 */
1105static int init_cache_nodelists_node(int node) 1099static int init_cache_nodelists_node(int node)
1106{ 1100{
@@ -1108,7 +1102,7 @@ static int init_cache_nodelists_node(int node)
1108 struct kmem_list3 *l3; 1102 struct kmem_list3 *l3;
1109 const int memsize = sizeof(struct kmem_list3); 1103 const int memsize = sizeof(struct kmem_list3);
1110 1104
1111 list_for_each_entry(cachep, &cache_chain, list) { 1105 list_for_each_entry(cachep, &slab_caches, list) {
1112 /* 1106 /*
1113 * Set up the size64 kmemlist for cpu before we can 1107 * Set up the size64 kmemlist for cpu before we can
1114 * begin anything. Make sure some other cpu on this 1108 * begin anything. Make sure some other cpu on this
@@ -1124,7 +1118,7 @@ static int init_cache_nodelists_node(int node)
1124 1118
1125 /* 1119 /*
1126 * The l3s don't come and go as CPUs come and 1120 * The l3s don't come and go as CPUs come and
1127 * go. cache_chain_mutex is sufficient 1121 * go. slab_mutex is sufficient
1128 * protection here. 1122 * protection here.
1129 */ 1123 */
1130 cachep->nodelists[node] = l3; 1124 cachep->nodelists[node] = l3;
@@ -1146,7 +1140,7 @@ static void __cpuinit cpuup_canceled(long cpu)
1146 int node = cpu_to_mem(cpu); 1140 int node = cpu_to_mem(cpu);
1147 const struct cpumask *mask = cpumask_of_node(node); 1141 const struct cpumask *mask = cpumask_of_node(node);
1148 1142
1149 list_for_each_entry(cachep, &cache_chain, list) { 1143 list_for_each_entry(cachep, &slab_caches, list) {
1150 struct array_cache *nc; 1144 struct array_cache *nc;
1151 struct array_cache *shared; 1145 struct array_cache *shared;
1152 struct array_cache **alien; 1146 struct array_cache **alien;
@@ -1196,7 +1190,7 @@ free_array_cache:
1196 * the respective cache's slabs, now we can go ahead and 1190 * the respective cache's slabs, now we can go ahead and
1197 * shrink each nodelist to its limit. 1191 * shrink each nodelist to its limit.
1198 */ 1192 */
1199 list_for_each_entry(cachep, &cache_chain, list) { 1193 list_for_each_entry(cachep, &slab_caches, list) {
1200 l3 = cachep->nodelists[node]; 1194 l3 = cachep->nodelists[node];
1201 if (!l3) 1195 if (!l3)
1202 continue; 1196 continue;
@@ -1225,7 +1219,7 @@ static int __cpuinit cpuup_prepare(long cpu)
1225 * Now we can go ahead with allocating the shared arrays and 1219 * Now we can go ahead with allocating the shared arrays and
1226 * array caches 1220 * array caches
1227 */ 1221 */
1228 list_for_each_entry(cachep, &cache_chain, list) { 1222 list_for_each_entry(cachep, &slab_caches, list) {
1229 struct array_cache *nc; 1223 struct array_cache *nc;
1230 struct array_cache *shared = NULL; 1224 struct array_cache *shared = NULL;
1231 struct array_cache **alien = NULL; 1225 struct array_cache **alien = NULL;
@@ -1293,9 +1287,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1293 switch (action) { 1287 switch (action) {
1294 case CPU_UP_PREPARE: 1288 case CPU_UP_PREPARE:
1295 case CPU_UP_PREPARE_FROZEN: 1289 case CPU_UP_PREPARE_FROZEN:
1296 mutex_lock(&cache_chain_mutex); 1290 mutex_lock(&slab_mutex);
1297 err = cpuup_prepare(cpu); 1291 err = cpuup_prepare(cpu);
1298 mutex_unlock(&cache_chain_mutex); 1292 mutex_unlock(&slab_mutex);
1299 break; 1293 break;
1300 case CPU_ONLINE: 1294 case CPU_ONLINE:
1301 case CPU_ONLINE_FROZEN: 1295 case CPU_ONLINE_FROZEN:
@@ -1305,7 +1299,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1305 case CPU_DOWN_PREPARE: 1299 case CPU_DOWN_PREPARE:
1306 case CPU_DOWN_PREPARE_FROZEN: 1300 case CPU_DOWN_PREPARE_FROZEN:
1307 /* 1301 /*
1308 * Shutdown cache reaper. Note that the cache_chain_mutex is 1302 * Shutdown cache reaper. Note that the slab_mutex is
1309 * held so that if cache_reap() is invoked it cannot do 1303 * held so that if cache_reap() is invoked it cannot do
1310 * anything expensive but will only modify reap_work 1304 * anything expensive but will only modify reap_work
1311 * and reschedule the timer. 1305 * and reschedule the timer.
@@ -1332,9 +1326,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1332#endif 1326#endif
1333 case CPU_UP_CANCELED: 1327 case CPU_UP_CANCELED:
1334 case CPU_UP_CANCELED_FROZEN: 1328 case CPU_UP_CANCELED_FROZEN:
1335 mutex_lock(&cache_chain_mutex); 1329 mutex_lock(&slab_mutex);
1336 cpuup_canceled(cpu); 1330 cpuup_canceled(cpu);
1337 mutex_unlock(&cache_chain_mutex); 1331 mutex_unlock(&slab_mutex);
1338 break; 1332 break;
1339 } 1333 }
1340 return notifier_from_errno(err); 1334 return notifier_from_errno(err);
@@ -1350,14 +1344,14 @@ static struct notifier_block __cpuinitdata cpucache_notifier = {
1350 * Returns -EBUSY if all objects cannot be drained so that the node is not 1344 * Returns -EBUSY if all objects cannot be drained so that the node is not
1351 * removed. 1345 * removed.
1352 * 1346 *
1353 * Must hold cache_chain_mutex. 1347 * Must hold slab_mutex.
1354 */ 1348 */
1355static int __meminit drain_cache_nodelists_node(int node) 1349static int __meminit drain_cache_nodelists_node(int node)
1356{ 1350{
1357 struct kmem_cache *cachep; 1351 struct kmem_cache *cachep;
1358 int ret = 0; 1352 int ret = 0;
1359 1353
1360 list_for_each_entry(cachep, &cache_chain, list) { 1354 list_for_each_entry(cachep, &slab_caches, list) {
1361 struct kmem_list3 *l3; 1355 struct kmem_list3 *l3;
1362 1356
1363 l3 = cachep->nodelists[node]; 1357 l3 = cachep->nodelists[node];
@@ -1388,14 +1382,14 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
1388 1382
1389 switch (action) { 1383 switch (action) {
1390 case MEM_GOING_ONLINE: 1384 case MEM_GOING_ONLINE:
1391 mutex_lock(&cache_chain_mutex); 1385 mutex_lock(&slab_mutex);
1392 ret = init_cache_nodelists_node(nid); 1386 ret = init_cache_nodelists_node(nid);
1393 mutex_unlock(&cache_chain_mutex); 1387 mutex_unlock(&slab_mutex);
1394 break; 1388 break;
1395 case MEM_GOING_OFFLINE: 1389 case MEM_GOING_OFFLINE:
1396 mutex_lock(&cache_chain_mutex); 1390 mutex_lock(&slab_mutex);
1397 ret = drain_cache_nodelists_node(nid); 1391 ret = drain_cache_nodelists_node(nid);
1398 mutex_unlock(&cache_chain_mutex); 1392 mutex_unlock(&slab_mutex);
1399 break; 1393 break;
1400 case MEM_ONLINE: 1394 case MEM_ONLINE:
1401 case MEM_OFFLINE: 1395 case MEM_OFFLINE:
@@ -1499,8 +1493,8 @@ void __init kmem_cache_init(void)
1499 node = numa_mem_id(); 1493 node = numa_mem_id();
1500 1494
1501 /* 1) create the cache_cache */ 1495 /* 1) create the cache_cache */
1502 INIT_LIST_HEAD(&cache_chain); 1496 INIT_LIST_HEAD(&slab_caches);
1503 list_add(&cache_cache.list, &cache_chain); 1497 list_add(&cache_cache.list, &slab_caches);
1504 cache_cache.colour_off = cache_line_size(); 1498 cache_cache.colour_off = cache_line_size();
1505 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1499 cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1506 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; 1500 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
@@ -1642,11 +1636,11 @@ void __init kmem_cache_init_late(void)
1642 init_lock_keys(); 1636 init_lock_keys();
1643 1637
1644 /* 6) resize the head arrays to their final sizes */ 1638 /* 6) resize the head arrays to their final sizes */
1645 mutex_lock(&cache_chain_mutex); 1639 mutex_lock(&slab_mutex);
1646 list_for_each_entry(cachep, &cache_chain, list) 1640 list_for_each_entry(cachep, &slab_caches, list)
1647 if (enable_cpucache(cachep, GFP_NOWAIT)) 1641 if (enable_cpucache(cachep, GFP_NOWAIT))
1648 BUG(); 1642 BUG();
1649 mutex_unlock(&cache_chain_mutex); 1643 mutex_unlock(&slab_mutex);
1650 1644
1651 /* Done! */ 1645 /* Done! */
1652 slab_state = FULL; 1646 slab_state = FULL;
@@ -2253,10 +2247,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2253 */ 2247 */
2254 if (slab_is_available()) { 2248 if (slab_is_available()) {
2255 get_online_cpus(); 2249 get_online_cpus();
2256 mutex_lock(&cache_chain_mutex); 2250 mutex_lock(&slab_mutex);
2257 } 2251 }
2258 2252
2259 list_for_each_entry(pc, &cache_chain, list) { 2253 list_for_each_entry(pc, &slab_caches, list) {
2260 char tmp; 2254 char tmp;
2261 int res; 2255 int res;
2262 2256
@@ -2500,10 +2494,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2500 } 2494 }
2501 2495
2502 /* cache setup completed, link it into the list */ 2496 /* cache setup completed, link it into the list */
2503 list_add(&cachep->list, &cache_chain); 2497 list_add(&cachep->list, &slab_caches);
2504oops: 2498oops:
2505 if (slab_is_available()) { 2499 if (slab_is_available()) {
2506 mutex_unlock(&cache_chain_mutex); 2500 mutex_unlock(&slab_mutex);
2507 put_online_cpus(); 2501 put_online_cpus();
2508 } 2502 }
2509 return cachep; 2503 return cachep;
@@ -2622,7 +2616,7 @@ out:
2622 return nr_freed; 2616 return nr_freed;
2623} 2617}
2624 2618
2625/* Called with cache_chain_mutex held to protect against cpu hotplug */ 2619/* Called with slab_mutex held to protect against cpu hotplug */
2626static int __cache_shrink(struct kmem_cache *cachep) 2620static int __cache_shrink(struct kmem_cache *cachep)
2627{ 2621{
2628 int ret = 0, i = 0; 2622 int ret = 0, i = 0;
@@ -2657,9 +2651,9 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
2657 BUG_ON(!cachep || in_interrupt()); 2651 BUG_ON(!cachep || in_interrupt());
2658 2652
2659 get_online_cpus(); 2653 get_online_cpus();
2660 mutex_lock(&cache_chain_mutex); 2654 mutex_lock(&slab_mutex);
2661 ret = __cache_shrink(cachep); 2655 ret = __cache_shrink(cachep);
2662 mutex_unlock(&cache_chain_mutex); 2656 mutex_unlock(&slab_mutex);
2663 put_online_cpus(); 2657 put_online_cpus();
2664 return ret; 2658 return ret;
2665} 2659}
@@ -2687,15 +2681,15 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
2687 2681
2688 /* Find the cache in the chain of caches. */ 2682 /* Find the cache in the chain of caches. */
2689 get_online_cpus(); 2683 get_online_cpus();
2690 mutex_lock(&cache_chain_mutex); 2684 mutex_lock(&slab_mutex);
2691 /* 2685 /*
2692 * the chain is never empty, cache_cache is never destroyed 2686 * the chain is never empty, cache_cache is never destroyed
2693 */ 2687 */
2694 list_del(&cachep->list); 2688 list_del(&cachep->list);
2695 if (__cache_shrink(cachep)) { 2689 if (__cache_shrink(cachep)) {
2696 slab_error(cachep, "Can't free all objects"); 2690 slab_error(cachep, "Can't free all objects");
2697 list_add(&cachep->list, &cache_chain); 2691 list_add(&cachep->list, &slab_caches);
2698 mutex_unlock(&cache_chain_mutex); 2692 mutex_unlock(&slab_mutex);
2699 put_online_cpus(); 2693 put_online_cpus();
2700 return; 2694 return;
2701 } 2695 }
@@ -2704,7 +2698,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
2704 rcu_barrier(); 2698 rcu_barrier();
2705 2699
2706 __kmem_cache_destroy(cachep); 2700 __kmem_cache_destroy(cachep);
2707 mutex_unlock(&cache_chain_mutex); 2701 mutex_unlock(&slab_mutex);
2708 put_online_cpus(); 2702 put_online_cpus();
2709} 2703}
2710EXPORT_SYMBOL(kmem_cache_destroy); 2704EXPORT_SYMBOL(kmem_cache_destroy);
@@ -4017,7 +4011,7 @@ static void do_ccupdate_local(void *info)
4017 new->new[smp_processor_id()] = old; 4011 new->new[smp_processor_id()] = old;
4018} 4012}
4019 4013
4020/* Always called with the cache_chain_mutex held */ 4014/* Always called with the slab_mutex held */
4021static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 4015static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4022 int batchcount, int shared, gfp_t gfp) 4016 int batchcount, int shared, gfp_t gfp)
4023{ 4017{
@@ -4061,7 +4055,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4061 return alloc_kmemlist(cachep, gfp); 4055 return alloc_kmemlist(cachep, gfp);
4062} 4056}
4063 4057
4064/* Called with cache_chain_mutex held always */ 4058/* Called with slab_mutex held always */
4065static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) 4059static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
4066{ 4060{
4067 int err; 4061 int err;
@@ -4163,11 +4157,11 @@ static void cache_reap(struct work_struct *w)
4163 int node = numa_mem_id(); 4157 int node = numa_mem_id();
4164 struct delayed_work *work = to_delayed_work(w); 4158 struct delayed_work *work = to_delayed_work(w);
4165 4159
4166 if (!mutex_trylock(&cache_chain_mutex)) 4160 if (!mutex_trylock(&slab_mutex))
4167 /* Give up. Setup the next iteration. */ 4161 /* Give up. Setup the next iteration. */
4168 goto out; 4162 goto out;
4169 4163
4170 list_for_each_entry(searchp, &cache_chain, list) { 4164 list_for_each_entry(searchp, &slab_caches, list) {
4171 check_irq_on(); 4165 check_irq_on();
4172 4166
4173 /* 4167 /*
@@ -4205,7 +4199,7 @@ next:
4205 cond_resched(); 4199 cond_resched();
4206 } 4200 }
4207 check_irq_on(); 4201 check_irq_on();
4208 mutex_unlock(&cache_chain_mutex); 4202 mutex_unlock(&slab_mutex);
4209 next_reap_node(); 4203 next_reap_node();
4210out: 4204out:
4211 /* Set up the next iteration */ 4205 /* Set up the next iteration */
@@ -4241,21 +4235,21 @@ static void *s_start(struct seq_file *m, loff_t *pos)
4241{ 4235{
4242 loff_t n = *pos; 4236 loff_t n = *pos;
4243 4237
4244 mutex_lock(&cache_chain_mutex); 4238 mutex_lock(&slab_mutex);
4245 if (!n) 4239 if (!n)
4246 print_slabinfo_header(m); 4240 print_slabinfo_header(m);
4247 4241
4248 return seq_list_start(&cache_chain, *pos); 4242 return seq_list_start(&slab_caches, *pos);
4249} 4243}
4250 4244
4251static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4245static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4252{ 4246{
4253 return seq_list_next(p, &cache_chain, pos); 4247 return seq_list_next(p, &slab_caches, pos);
4254} 4248}
4255 4249
4256static void s_stop(struct seq_file *m, void *p) 4250static void s_stop(struct seq_file *m, void *p)
4257{ 4251{
4258 mutex_unlock(&cache_chain_mutex); 4252 mutex_unlock(&slab_mutex);
4259} 4253}
4260 4254
4261static int s_show(struct seq_file *m, void *p) 4255static int s_show(struct seq_file *m, void *p)
@@ -4406,9 +4400,9 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4406 return -EINVAL; 4400 return -EINVAL;
4407 4401
4408 /* Find the cache in the chain of caches. */ 4402 /* Find the cache in the chain of caches. */
4409 mutex_lock(&cache_chain_mutex); 4403 mutex_lock(&slab_mutex);
4410 res = -EINVAL; 4404 res = -EINVAL;
4411 list_for_each_entry(cachep, &cache_chain, list) { 4405 list_for_each_entry(cachep, &slab_caches, list) {
4412 if (!strcmp(cachep->name, kbuf)) { 4406 if (!strcmp(cachep->name, kbuf)) {
4413 if (limit < 1 || batchcount < 1 || 4407 if (limit < 1 || batchcount < 1 ||
4414 batchcount > limit || shared < 0) { 4408 batchcount > limit || shared < 0) {
@@ -4421,7 +4415,7 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4421 break; 4415 break;
4422 } 4416 }
4423 } 4417 }
4424 mutex_unlock(&cache_chain_mutex); 4418 mutex_unlock(&slab_mutex);
4425 if (res >= 0) 4419 if (res >= 0)
4426 res = count; 4420 res = count;
4427 return res; 4421 return res;
@@ -4444,8 +4438,8 @@ static const struct file_operations proc_slabinfo_operations = {
4444 4438
4445static void *leaks_start(struct seq_file *m, loff_t *pos) 4439static void *leaks_start(struct seq_file *m, loff_t *pos)
4446{ 4440{
4447 mutex_lock(&cache_chain_mutex); 4441 mutex_lock(&slab_mutex);
4448 return seq_list_start(&cache_chain, *pos); 4442 return seq_list_start(&slab_caches, *pos);
4449} 4443}
4450 4444
4451static inline int add_caller(unsigned long *n, unsigned long v) 4445static inline int add_caller(unsigned long *n, unsigned long v)
@@ -4544,17 +4538,17 @@ static int leaks_show(struct seq_file *m, void *p)
4544 name = cachep->name; 4538 name = cachep->name;
4545 if (n[0] == n[1]) { 4539 if (n[0] == n[1]) {
4546 /* Increase the buffer size */ 4540 /* Increase the buffer size */
4547 mutex_unlock(&cache_chain_mutex); 4541 mutex_unlock(&slab_mutex);
4548 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4542 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4549 if (!m->private) { 4543 if (!m->private) {
4550 /* Too bad, we are really out */ 4544 /* Too bad, we are really out */
4551 m->private = n; 4545 m->private = n;
4552 mutex_lock(&cache_chain_mutex); 4546 mutex_lock(&slab_mutex);
4553 return -ENOMEM; 4547 return -ENOMEM;
4554 } 4548 }
4555 *(unsigned long *)m->private = n[0] * 2; 4549 *(unsigned long *)m->private = n[0] * 2;
4556 kfree(n); 4550 kfree(n);
4557 mutex_lock(&cache_chain_mutex); 4551 mutex_lock(&slab_mutex);
4558 /* Now make sure this entry will be retried */ 4552 /* Now make sure this entry will be retried */
4559 m->count = m->size; 4553 m->count = m->size;
4560 return 0; 4554 return 0;
diff --git a/mm/slab.h b/mm/slab.h
index f9a9815cdc82..db7848caaa25 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -23,6 +23,10 @@ enum slab_state {
23 23
24extern enum slab_state slab_state; 24extern enum slab_state slab_state;
25 25
26/* The slab cache mutex protects the management structures during changes */
27extern struct mutex slab_mutex;
28extern struct list_head slab_caches;
29
26struct kmem_cache *__kmem_cache_create(const char *name, size_t size, 30struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
27 size_t align, unsigned long flags, void (*ctor)(void *)); 31 size_t align, unsigned long flags, void (*ctor)(void *));
28 32
diff --git a/mm/slab_common.c b/mm/slab_common.c
index ca1aaf69a1f5..50e1ff10bff9 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -19,6 +19,8 @@
19#include "slab.h" 19#include "slab.h"
20 20
21enum slab_state slab_state; 21enum slab_state slab_state;
22LIST_HEAD(slab_caches);
23DEFINE_MUTEX(slab_mutex);
22 24
23/* 25/*
24 * kmem_cache_create - Create a cache. 26 * kmem_cache_create - Create a cache.
diff --git a/mm/slub.c b/mm/slub.c
index 4c385164d9f7..8c4fd37541d7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -36,13 +36,13 @@
36 36
37/* 37/*
38 * Lock order: 38 * Lock order:
39 * 1. slub_lock (Global Semaphore) 39 * 1. slab_mutex (Global Mutex)
40 * 2. node->list_lock 40 * 2. node->list_lock
41 * 3. slab_lock(page) (Only on some arches and for debugging) 41 * 3. slab_lock(page) (Only on some arches and for debugging)
42 * 42 *
43 * slub_lock 43 * slab_mutex
44 * 44 *
45 * The role of the slub_lock is to protect the list of all the slabs 45 * The role of the slab_mutex is to protect the list of all the slabs
46 * and to synchronize major metadata changes to slab cache structures. 46 * and to synchronize major metadata changes to slab cache structures.
47 * 47 *
48 * The slab_lock is only used for debugging and on arches that do not 48 * The slab_lock is only used for debugging and on arches that do not
@@ -183,10 +183,6 @@ static int kmem_size = sizeof(struct kmem_cache);
183static struct notifier_block slab_notifier; 183static struct notifier_block slab_notifier;
184#endif 184#endif
185 185
186/* A list of all slab caches on the system */
187static DECLARE_RWSEM(slub_lock);
188static LIST_HEAD(slab_caches);
189
190/* 186/*
191 * Tracking user of a slab. 187 * Tracking user of a slab.
192 */ 188 */
@@ -3177,11 +3173,11 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3177 */ 3173 */
3178void kmem_cache_destroy(struct kmem_cache *s) 3174void kmem_cache_destroy(struct kmem_cache *s)
3179{ 3175{
3180 down_write(&slub_lock); 3176 mutex_lock(&slab_mutex);
3181 s->refcount--; 3177 s->refcount--;
3182 if (!s->refcount) { 3178 if (!s->refcount) {
3183 list_del(&s->list); 3179 list_del(&s->list);
3184 up_write(&slub_lock); 3180 mutex_unlock(&slab_mutex);
3185 if (kmem_cache_close(s)) { 3181 if (kmem_cache_close(s)) {
3186 printk(KERN_ERR "SLUB %s: %s called for cache that " 3182 printk(KERN_ERR "SLUB %s: %s called for cache that "
3187 "still has objects.\n", s->name, __func__); 3183 "still has objects.\n", s->name, __func__);
@@ -3191,7 +3187,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
3191 rcu_barrier(); 3187 rcu_barrier();
3192 sysfs_slab_remove(s); 3188 sysfs_slab_remove(s);
3193 } else 3189 } else
3194 up_write(&slub_lock); 3190 mutex_unlock(&slab_mutex);
3195} 3191}
3196EXPORT_SYMBOL(kmem_cache_destroy); 3192EXPORT_SYMBOL(kmem_cache_destroy);
3197 3193
@@ -3253,7 +3249,7 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
3253 3249
3254 /* 3250 /*
3255 * This function is called with IRQs disabled during early-boot on 3251 * This function is called with IRQs disabled during early-boot on
3256 * single CPU so there's no need to take slub_lock here. 3252 * single CPU so there's no need to take slab_mutex here.
3257 */ 3253 */
3258 if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, 3254 if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
3259 flags, NULL)) 3255 flags, NULL))
@@ -3538,10 +3534,10 @@ static int slab_mem_going_offline_callback(void *arg)
3538{ 3534{
3539 struct kmem_cache *s; 3535 struct kmem_cache *s;
3540 3536
3541 down_read(&slub_lock); 3537 mutex_lock(&slab_mutex);
3542 list_for_each_entry(s, &slab_caches, list) 3538 list_for_each_entry(s, &slab_caches, list)
3543 kmem_cache_shrink(s); 3539 kmem_cache_shrink(s);
3544 up_read(&slub_lock); 3540 mutex_unlock(&slab_mutex);
3545 3541
3546 return 0; 3542 return 0;
3547} 3543}
@@ -3562,7 +3558,7 @@ static void slab_mem_offline_callback(void *arg)
3562 if (offline_node < 0) 3558 if (offline_node < 0)
3563 return; 3559 return;
3564 3560
3565 down_read(&slub_lock); 3561 mutex_lock(&slab_mutex);
3566 list_for_each_entry(s, &slab_caches, list) { 3562 list_for_each_entry(s, &slab_caches, list) {
3567 n = get_node(s, offline_node); 3563 n = get_node(s, offline_node);
3568 if (n) { 3564 if (n) {
@@ -3578,7 +3574,7 @@ static void slab_mem_offline_callback(void *arg)
3578 kmem_cache_free(kmem_cache_node, n); 3574 kmem_cache_free(kmem_cache_node, n);
3579 } 3575 }
3580 } 3576 }
3581 up_read(&slub_lock); 3577 mutex_unlock(&slab_mutex);
3582} 3578}
3583 3579
3584static int slab_mem_going_online_callback(void *arg) 3580static int slab_mem_going_online_callback(void *arg)
@@ -3601,7 +3597,7 @@ static int slab_mem_going_online_callback(void *arg)
3601 * allocate a kmem_cache_node structure in order to bring the node 3597 * allocate a kmem_cache_node structure in order to bring the node
3602 * online. 3598 * online.
3603 */ 3599 */
3604 down_read(&slub_lock); 3600 mutex_lock(&slab_mutex);
3605 list_for_each_entry(s, &slab_caches, list) { 3601 list_for_each_entry(s, &slab_caches, list) {
3606 /* 3602 /*
3607 * XXX: kmem_cache_alloc_node will fallback to other nodes 3603 * XXX: kmem_cache_alloc_node will fallback to other nodes
@@ -3617,7 +3613,7 @@ static int slab_mem_going_online_callback(void *arg)
3617 s->node[nid] = n; 3613 s->node[nid] = n;
3618 } 3614 }
3619out: 3615out:
3620 up_read(&slub_lock); 3616 mutex_unlock(&slab_mutex);
3621 return ret; 3617 return ret;
3622} 3618}
3623 3619
@@ -3915,7 +3911,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
3915 struct kmem_cache *s; 3911 struct kmem_cache *s;
3916 char *n; 3912 char *n;
3917 3913
3918 down_write(&slub_lock); 3914 mutex_lock(&slab_mutex);
3919 s = find_mergeable(size, align, flags, name, ctor); 3915 s = find_mergeable(size, align, flags, name, ctor);
3920 if (s) { 3916 if (s) {
3921 s->refcount++; 3917 s->refcount++;
@@ -3930,7 +3926,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
3930 s->refcount--; 3926 s->refcount--;
3931 goto err; 3927 goto err;
3932 } 3928 }
3933 up_write(&slub_lock); 3929 mutex_unlock(&slab_mutex);
3934 return s; 3930 return s;
3935 } 3931 }
3936 3932
@@ -3943,9 +3939,9 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
3943 if (kmem_cache_open(s, n, 3939 if (kmem_cache_open(s, n,
3944 size, align, flags, ctor)) { 3940 size, align, flags, ctor)) {
3945 list_add(&s->list, &slab_caches); 3941 list_add(&s->list, &slab_caches);
3946 up_write(&slub_lock); 3942 mutex_unlock(&slab_mutex);
3947 if (sysfs_slab_add(s)) { 3943 if (sysfs_slab_add(s)) {
3948 down_write(&slub_lock); 3944 mutex_lock(&slab_mutex);
3949 list_del(&s->list); 3945 list_del(&s->list);
3950 kfree(n); 3946 kfree(n);
3951 kfree(s); 3947 kfree(s);
@@ -3957,7 +3953,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
3957 } 3953 }
3958 kfree(n); 3954 kfree(n);
3959err: 3955err:
3960 up_write(&slub_lock); 3956 mutex_unlock(&slab_mutex);
3961 return s; 3957 return s;
3962} 3958}
3963 3959
@@ -3978,13 +3974,13 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3978 case CPU_UP_CANCELED_FROZEN: 3974 case CPU_UP_CANCELED_FROZEN:
3979 case CPU_DEAD: 3975 case CPU_DEAD:
3980 case CPU_DEAD_FROZEN: 3976 case CPU_DEAD_FROZEN:
3981 down_read(&slub_lock); 3977 mutex_lock(&slab_mutex);
3982 list_for_each_entry(s, &slab_caches, list) { 3978 list_for_each_entry(s, &slab_caches, list) {
3983 local_irq_save(flags); 3979 local_irq_save(flags);
3984 __flush_cpu_slab(s, cpu); 3980 __flush_cpu_slab(s, cpu);
3985 local_irq_restore(flags); 3981 local_irq_restore(flags);
3986 } 3982 }
3987 up_read(&slub_lock); 3983 mutex_unlock(&slab_mutex);
3988 break; 3984 break;
3989 default: 3985 default:
3990 break; 3986 break;
@@ -5360,11 +5356,11 @@ static int __init slab_sysfs_init(void)
5360 struct kmem_cache *s; 5356 struct kmem_cache *s;
5361 int err; 5357 int err;
5362 5358
5363 down_write(&slub_lock); 5359 mutex_lock(&slab_mutex);
5364 5360
5365 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); 5361 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5366 if (!slab_kset) { 5362 if (!slab_kset) {
5367 up_write(&slub_lock); 5363 mutex_unlock(&slab_mutex);
5368 printk(KERN_ERR "Cannot register slab subsystem.\n"); 5364 printk(KERN_ERR "Cannot register slab subsystem.\n");
5369 return -ENOSYS; 5365 return -ENOSYS;
5370 } 5366 }
@@ -5389,7 +5385,7 @@ static int __init slab_sysfs_init(void)
5389 kfree(al); 5385 kfree(al);
5390 } 5386 }
5391 5387
5392 up_write(&slub_lock); 5388 mutex_unlock(&slab_mutex);
5393 resiliency_test(); 5389 resiliency_test();
5394 return 0; 5390 return 0;
5395} 5391}
@@ -5415,7 +5411,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
5415{ 5411{
5416 loff_t n = *pos; 5412 loff_t n = *pos;
5417 5413
5418 down_read(&slub_lock); 5414 mutex_lock(&slab_mutex);
5419 if (!n) 5415 if (!n)
5420 print_slabinfo_header(m); 5416 print_slabinfo_header(m);
5421 5417
@@ -5429,7 +5425,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
5429 5425
5430static void s_stop(struct seq_file *m, void *p) 5426static void s_stop(struct seq_file *m, void *p)
5431{ 5427{
5432 up_read(&slub_lock); 5428 mutex_unlock(&slab_mutex);
5433} 5429}
5434 5430
5435static int s_show(struct seq_file *m, void *p) 5431static int s_show(struct seq_file *m, void *p)