aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 1a7a10de2a4d..6f3d6e240c61 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1190,6 +1190,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1190 mutex_lock(&cache_chain_mutex); 1190 mutex_lock(&cache_chain_mutex);
1191 break; 1191 break;
1192 case CPU_UP_PREPARE: 1192 case CPU_UP_PREPARE:
1193 case CPU_UP_PREPARE_FROZEN:
1193 /* 1194 /*
1194 * We need to do this right in the beginning since 1195 * We need to do this right in the beginning since
1195 * alloc_arraycache's are going to use this list. 1196 * alloc_arraycache's are going to use this list.
@@ -1276,10 +1277,12 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1276 } 1277 }
1277 break; 1278 break;
1278 case CPU_ONLINE: 1279 case CPU_ONLINE:
1280 case CPU_ONLINE_FROZEN:
1279 start_cpu_timer(cpu); 1281 start_cpu_timer(cpu);
1280 break; 1282 break;
1281#ifdef CONFIG_HOTPLUG_CPU 1283#ifdef CONFIG_HOTPLUG_CPU
1282 case CPU_DOWN_PREPARE: 1284 case CPU_DOWN_PREPARE:
1285 case CPU_DOWN_PREPARE_FROZEN:
1283 /* 1286 /*
1284 * Shutdown cache reaper. Note that the cache_chain_mutex is 1287 * Shutdown cache reaper. Note that the cache_chain_mutex is
1285 * held so that if cache_reap() is invoked it cannot do 1288 * held so that if cache_reap() is invoked it cannot do
@@ -1291,9 +1294,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1291 per_cpu(reap_work, cpu).work.func = NULL; 1294 per_cpu(reap_work, cpu).work.func = NULL;
1292 break; 1295 break;
1293 case CPU_DOWN_FAILED: 1296 case CPU_DOWN_FAILED:
1297 case CPU_DOWN_FAILED_FROZEN:
1294 start_cpu_timer(cpu); 1298 start_cpu_timer(cpu);
1295 break; 1299 break;
1296 case CPU_DEAD: 1300 case CPU_DEAD:
1301 case CPU_DEAD_FROZEN:
1297 /* 1302 /*
1298 * Even if all the cpus of a node are down, we don't free the 1303 * Even if all the cpus of a node are down, we don't free the
1299 * kmem_list3 of any cache. This to avoid a race between 1304 * kmem_list3 of any cache. This to avoid a race between
@@ -1305,6 +1310,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1305 /* fall thru */ 1310 /* fall thru */
1306#endif 1311#endif
1307 case CPU_UP_CANCELED: 1312 case CPU_UP_CANCELED:
1313 case CPU_UP_CANCELED_FROZEN:
1308 list_for_each_entry(cachep, &cache_chain, next) { 1314 list_for_each_entry(cachep, &cache_chain, next) {
1309 struct array_cache *nc; 1315 struct array_cache *nc;
1310 struct array_cache *shared; 1316 struct array_cache *shared;