aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2007-05-09 05:35:10 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:56 -0400
commit8bb7844286fb8c9fce6f65d8288aeb09d03a5e0d (patch)
treef4e305edaedbde05774bb1e4acd89a9475661d2e /mm
parentf37bc2712b54ec641e0c0c8634f1a4b61d9956c0 (diff)
Add suspend-related notifications for CPU hotplug
Since nonboot CPUs are now disabled after tasks and devices have been frozen and the CPU hotplug infrastructure is used for this purpose, we need special CPU hotplug notifications that will help the CPU-hotplug-aware subsystems distinguish normal CPU hotplug events from CPU hotplug events related to a system-wide suspend or resume operation in progress. This patch introduces such notifications and causes them to be used during suspend and resume transitions. It also changes all of the CPU-hotplug-aware subsystems to take these notifications into consideration (for now they are handled in the same way as the corresponding "normal" ones). [oleg@tv-sign.ru: cleanups] Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: Gautham R Shenoy <ego@in.ibm.com> Cc: Pavel Machek <pavel@ucw.cz> Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/slab.c6
-rw-r--r--mm/slub.c2
-rw-r--r--mm/swap.c2
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/vmstat.c3
6 files changed, 17 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6fd0b7455b0..d53cbf8acb8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2148,11 +2148,14 @@ static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
2148 2148
2149 switch (action) { 2149 switch (action) {
2150 case CPU_UP_PREPARE: 2150 case CPU_UP_PREPARE:
2151 case CPU_UP_PREPARE_FROZEN:
2151 if (process_zones(cpu)) 2152 if (process_zones(cpu))
2152 ret = NOTIFY_BAD; 2153 ret = NOTIFY_BAD;
2153 break; 2154 break;
2154 case CPU_UP_CANCELED: 2155 case CPU_UP_CANCELED:
2156 case CPU_UP_CANCELED_FROZEN:
2155 case CPU_DEAD: 2157 case CPU_DEAD:
2158 case CPU_DEAD_FROZEN:
2156 free_zone_pagesets(cpu); 2159 free_zone_pagesets(cpu);
2157 break; 2160 break;
2158 default: 2161 default:
@@ -3012,7 +3015,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
3012{ 3015{
3013 int cpu = (unsigned long)hcpu; 3016 int cpu = (unsigned long)hcpu;
3014 3017
3015 if (action == CPU_DEAD) { 3018 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
3016 local_irq_disable(); 3019 local_irq_disable();
3017 __drain_pages(cpu); 3020 __drain_pages(cpu);
3018 vm_events_fold_cpu(cpu); 3021 vm_events_fold_cpu(cpu);
diff --git a/mm/slab.c b/mm/slab.c
index 1a7a10de2a4..6f3d6e240c6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1190,6 +1190,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1190 mutex_lock(&cache_chain_mutex); 1190 mutex_lock(&cache_chain_mutex);
1191 break; 1191 break;
1192 case CPU_UP_PREPARE: 1192 case CPU_UP_PREPARE:
1193 case CPU_UP_PREPARE_FROZEN:
1193 /* 1194 /*
1194 * We need to do this right in the beginning since 1195 * We need to do this right in the beginning since
1195 * alloc_arraycache's are going to use this list. 1196 * alloc_arraycache's are going to use this list.
@@ -1276,10 +1277,12 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1276 } 1277 }
1277 break; 1278 break;
1278 case CPU_ONLINE: 1279 case CPU_ONLINE:
1280 case CPU_ONLINE_FROZEN:
1279 start_cpu_timer(cpu); 1281 start_cpu_timer(cpu);
1280 break; 1282 break;
1281#ifdef CONFIG_HOTPLUG_CPU 1283#ifdef CONFIG_HOTPLUG_CPU
1282 case CPU_DOWN_PREPARE: 1284 case CPU_DOWN_PREPARE:
1285 case CPU_DOWN_PREPARE_FROZEN:
1283 /* 1286 /*
1284 * Shutdown cache reaper. Note that the cache_chain_mutex is 1287 * Shutdown cache reaper. Note that the cache_chain_mutex is
1285 * held so that if cache_reap() is invoked it cannot do 1288 * held so that if cache_reap() is invoked it cannot do
@@ -1291,9 +1294,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1291 per_cpu(reap_work, cpu).work.func = NULL; 1294 per_cpu(reap_work, cpu).work.func = NULL;
1292 break; 1295 break;
1293 case CPU_DOWN_FAILED: 1296 case CPU_DOWN_FAILED:
1297 case CPU_DOWN_FAILED_FROZEN:
1294 start_cpu_timer(cpu); 1298 start_cpu_timer(cpu);
1295 break; 1299 break;
1296 case CPU_DEAD: 1300 case CPU_DEAD:
1301 case CPU_DEAD_FROZEN:
1297 /* 1302 /*
1298 * Even if all the cpus of a node are down, we don't free the 1303 * Even if all the cpus of a node are down, we don't free the
1299 * kmem_list3 of any cache. This to avoid a race between 1304 * kmem_list3 of any cache. This to avoid a race between
@@ -1305,6 +1310,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1305 /* fall thru */ 1310 /* fall thru */
1306#endif 1311#endif
1307 case CPU_UP_CANCELED: 1312 case CPU_UP_CANCELED:
1313 case CPU_UP_CANCELED_FROZEN:
1308 list_for_each_entry(cachep, &cache_chain, next) { 1314 list_for_each_entry(cachep, &cache_chain, next) {
1309 struct array_cache *nc; 1315 struct array_cache *nc;
1310 struct array_cache *shared; 1316 struct array_cache *shared;
diff --git a/mm/slub.c b/mm/slub.c
index f7c120b93c4..a581fa8ae11 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2514,7 +2514,9 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
2514 2514
2515 switch (action) { 2515 switch (action) {
2516 case CPU_UP_CANCELED: 2516 case CPU_UP_CANCELED:
2517 case CPU_UP_CANCELED_FROZEN:
2517 case CPU_DEAD: 2518 case CPU_DEAD:
2519 case CPU_DEAD_FROZEN:
2518 for_all_slabs(__flush_cpu_slab, cpu); 2520 for_all_slabs(__flush_cpu_slab, cpu);
2519 break; 2521 break;
2520 default: 2522 default:
diff --git a/mm/swap.c b/mm/swap.c
index 218c52a24a2..d3cb966fe99 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -488,7 +488,7 @@ static int cpu_swap_callback(struct notifier_block *nfb,
488 long *committed; 488 long *committed;
489 489
490 committed = &per_cpu(committed_space, (long)hcpu); 490 committed = &per_cpu(committed_space, (long)hcpu);
491 if (action == CPU_DEAD) { 491 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
492 atomic_add(*committed, &vm_committed_space); 492 atomic_add(*committed, &vm_committed_space);
493 *committed = 0; 493 *committed = 0;
494 __lru_add_drain((long)hcpu); 494 __lru_add_drain((long)hcpu);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1c8e75a1cfc..1be5a6376ef 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1528,7 +1528,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
1528 pg_data_t *pgdat; 1528 pg_data_t *pgdat;
1529 cpumask_t mask; 1529 cpumask_t mask;
1530 1530
1531 if (action == CPU_ONLINE) { 1531 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
1532 for_each_online_pgdat(pgdat) { 1532 for_each_online_pgdat(pgdat) {
1533 mask = node_to_cpumask(pgdat->node_id); 1533 mask = node_to_cpumask(pgdat->node_id);
1534 if (any_online_cpu(mask) != NR_CPUS) 1534 if (any_online_cpu(mask) != NR_CPUS)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6c488d6ac42..9a66dc4aed4 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -650,8 +650,11 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
650{ 650{
651 switch (action) { 651 switch (action) {
652 case CPU_UP_PREPARE: 652 case CPU_UP_PREPARE:
653 case CPU_UP_PREPARE_FROZEN:
653 case CPU_UP_CANCELED: 654 case CPU_UP_CANCELED:
655 case CPU_UP_CANCELED_FROZEN:
654 case CPU_DEAD: 656 case CPU_DEAD:
657 case CPU_DEAD_FROZEN:
655 refresh_zone_stat_thresholds(); 658 refresh_zone_stat_thresholds();
656 break; 659 break;
657 default: 660 default: