aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/slab.c40
1 files changed, 21 insertions, 19 deletions
diff --git a/mm/slab.c b/mm/slab.c
index ff60a94142f9..3318252f657f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -730,7 +730,10 @@ static inline void init_lock_keys(void)
730} 730}
731#endif 731#endif
732 732
733/* Guard access to the cache-chain. */ 733/*
734 * 1. Guard access to the cache-chain.
735 * 2. Protect sanity of cpu_online_map against cpu hotplug events
736 */
734static DEFINE_MUTEX(cache_chain_mutex); 737static DEFINE_MUTEX(cache_chain_mutex);
735static struct list_head cache_chain; 738static struct list_head cache_chain;
736 739
@@ -1230,12 +1233,18 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1230 kfree(shared); 1233 kfree(shared);
1231 free_alien_cache(alien); 1234 free_alien_cache(alien);
1232 } 1235 }
1233 mutex_unlock(&cache_chain_mutex);
1234 break; 1236 break;
1235 case CPU_ONLINE: 1237 case CPU_ONLINE:
1238 mutex_unlock(&cache_chain_mutex);
1236 start_cpu_timer(cpu); 1239 start_cpu_timer(cpu);
1237 break; 1240 break;
1238#ifdef CONFIG_HOTPLUG_CPU 1241#ifdef CONFIG_HOTPLUG_CPU
1242 case CPU_DOWN_PREPARE:
1243 mutex_lock(&cache_chain_mutex);
1244 break;
1245 case CPU_DOWN_FAILED:
1246 mutex_unlock(&cache_chain_mutex);
1247 break;
1239 case CPU_DEAD: 1248 case CPU_DEAD:
1240 /* 1249 /*
1241 * Even if all the cpus of a node are down, we don't free the 1250 * Even if all the cpus of a node are down, we don't free the
@@ -1246,8 +1255,8 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1246 * gets destroyed at kmem_cache_destroy(). 1255 * gets destroyed at kmem_cache_destroy().
1247 */ 1256 */
1248 /* fall thru */ 1257 /* fall thru */
1258#endif
1249 case CPU_UP_CANCELED: 1259 case CPU_UP_CANCELED:
1250 mutex_lock(&cache_chain_mutex);
1251 list_for_each_entry(cachep, &cache_chain, next) { 1260 list_for_each_entry(cachep, &cache_chain, next) {
1252 struct array_cache *nc; 1261 struct array_cache *nc;
1253 struct array_cache *shared; 1262 struct array_cache *shared;
@@ -1308,11 +1317,9 @@ free_array_cache:
1308 } 1317 }
1309 mutex_unlock(&cache_chain_mutex); 1318 mutex_unlock(&cache_chain_mutex);
1310 break; 1319 break;
1311#endif
1312 } 1320 }
1313 return NOTIFY_OK; 1321 return NOTIFY_OK;
1314bad: 1322bad:
1315 mutex_unlock(&cache_chain_mutex);
1316 return NOTIFY_BAD; 1323 return NOTIFY_BAD;
1317} 1324}
1318 1325
@@ -2098,11 +2105,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2098 } 2105 }
2099 2106
2100 /* 2107 /*
2101 * Prevent CPUs from coming and going. 2108 * We use cache_chain_mutex to ensure a consistent view of
2102 * lock_cpu_hotplug() nests outside cache_chain_mutex 2109 * cpu_online_map as well. Please see cpuup_callback
2103 */ 2110 */
2104 lock_cpu_hotplug();
2105
2106 mutex_lock(&cache_chain_mutex); 2111 mutex_lock(&cache_chain_mutex);
2107 2112
2108 list_for_each_entry(pc, &cache_chain, next) { 2113 list_for_each_entry(pc, &cache_chain, next) {
@@ -2325,7 +2330,6 @@ oops:
2325 panic("kmem_cache_create(): failed to create slab `%s'\n", 2330 panic("kmem_cache_create(): failed to create slab `%s'\n",
2326 name); 2331 name);
2327 mutex_unlock(&cache_chain_mutex); 2332 mutex_unlock(&cache_chain_mutex);
2328 unlock_cpu_hotplug();
2329 return cachep; 2333 return cachep;
2330} 2334}
2331EXPORT_SYMBOL(kmem_cache_create); 2335EXPORT_SYMBOL(kmem_cache_create);
@@ -2443,6 +2447,7 @@ out:
2443 return nr_freed; 2447 return nr_freed;
2444} 2448}
2445 2449
2450/* Called with cache_chain_mutex held to protect against cpu hotplug */
2446static int __cache_shrink(struct kmem_cache *cachep) 2451static int __cache_shrink(struct kmem_cache *cachep)
2447{ 2452{
2448 int ret = 0, i = 0; 2453 int ret = 0, i = 0;
@@ -2473,9 +2478,13 @@ static int __cache_shrink(struct kmem_cache *cachep)
2473 */ 2478 */
2474int kmem_cache_shrink(struct kmem_cache *cachep) 2479int kmem_cache_shrink(struct kmem_cache *cachep)
2475{ 2480{
2481 int ret;
2476 BUG_ON(!cachep || in_interrupt()); 2482 BUG_ON(!cachep || in_interrupt());
2477 2483
2478 return __cache_shrink(cachep); 2484 mutex_lock(&cache_chain_mutex);
2485 ret = __cache_shrink(cachep);
2486 mutex_unlock(&cache_chain_mutex);
2487 return ret;
2479} 2488}
2480EXPORT_SYMBOL(kmem_cache_shrink); 2489EXPORT_SYMBOL(kmem_cache_shrink);
2481 2490
@@ -2499,23 +2508,16 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
2499{ 2508{
2500 BUG_ON(!cachep || in_interrupt()); 2509 BUG_ON(!cachep || in_interrupt());
2501 2510
2502 /* Don't let CPUs to come and go */
2503 lock_cpu_hotplug();
2504
2505 /* Find the cache in the chain of caches. */ 2511 /* Find the cache in the chain of caches. */
2506 mutex_lock(&cache_chain_mutex); 2512 mutex_lock(&cache_chain_mutex);
2507 /* 2513 /*
2508 * the chain is never empty, cache_cache is never destroyed 2514 * the chain is never empty, cache_cache is never destroyed
2509 */ 2515 */
2510 list_del(&cachep->next); 2516 list_del(&cachep->next);
2511 mutex_unlock(&cache_chain_mutex);
2512
2513 if (__cache_shrink(cachep)) { 2517 if (__cache_shrink(cachep)) {
2514 slab_error(cachep, "Can't free all objects"); 2518 slab_error(cachep, "Can't free all objects");
2515 mutex_lock(&cache_chain_mutex);
2516 list_add(&cachep->next, &cache_chain); 2519 list_add(&cachep->next, &cache_chain);
2517 mutex_unlock(&cache_chain_mutex); 2520 mutex_unlock(&cache_chain_mutex);
2518 unlock_cpu_hotplug();
2519 return; 2521 return;
2520 } 2522 }
2521 2523
@@ -2523,7 +2525,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
2523 synchronize_rcu(); 2525 synchronize_rcu();
2524 2526
2525 __kmem_cache_destroy(cachep); 2527 __kmem_cache_destroy(cachep);
2526 unlock_cpu_hotplug(); 2528 mutex_unlock(&cache_chain_mutex);
2527} 2529}
2528EXPORT_SYMBOL(kmem_cache_destroy); 2530EXPORT_SYMBOL(kmem_cache_destroy);
2529 2531