aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2011-01-13 18:45:50 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:32 -0500
commitf0bc0a60b13f209df16062f94e9fb4b90dc08708 (patch)
treec876603d2cb17a3e2ed159ca5e08e4665cc09fe2 /mm/vmscan.c
parentc3f0da631539b3b8e17f6dda567af9958d49d14f (diff)
vmscan: factor out kswapd sleeping logic from kswapd()
Currently, kswapd() has deep nesting and is slightly hard to read. Clean this up. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c92
1 files changed, 46 insertions, 46 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 86f8c3418795..cacdf6684971 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2371,6 +2371,50 @@ out:
2371 return sc.nr_reclaimed; 2371 return sc.nr_reclaimed;
2372} 2372}
2373 2373
2374static void kswapd_try_to_sleep(pg_data_t *pgdat, int order)
2375{
2376 long remaining = 0;
2377 DEFINE_WAIT(wait);
2378
2379 if (freezing(current) || kthread_should_stop())
2380 return;
2381
2382 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2383
2384 /* Try to sleep for a short interval */
2385 if (!sleeping_prematurely(pgdat, order, remaining)) {
2386 remaining = schedule_timeout(HZ/10);
2387 finish_wait(&pgdat->kswapd_wait, &wait);
2388 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2389 }
2390
2391 /*
2392 * After a short sleep, check if it was a premature sleep. If not, then
2393 * go fully to sleep until explicitly woken up.
2394 */
2395 if (!sleeping_prematurely(pgdat, order, remaining)) {
2396 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2397
2398 /*
2399 * vmstat counters are not perfectly accurate and the estimated
2400 * value for counters such as NR_FREE_PAGES can deviate from the
2401 * true value by nr_online_cpus * threshold. To avoid the zone
2402 * watermarks being breached while under pressure, we reduce the
2403 * per-cpu vmstat threshold while kswapd is awake and restore
2404 * them before going back to sleep.
2405 */
2406 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
2407 schedule();
2408 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
2409 } else {
2410 if (remaining)
2411 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2412 else
2413 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2414 }
2415 finish_wait(&pgdat->kswapd_wait, &wait);
2416}
2417
2374/* 2418/*
2375 * The background pageout daemon, started as a kernel thread 2419 * The background pageout daemon, started as a kernel thread
2376 * from the init process. 2420 * from the init process.
@@ -2389,7 +2433,7 @@ static int kswapd(void *p)
2389 unsigned long order; 2433 unsigned long order;
2390 pg_data_t *pgdat = (pg_data_t*)p; 2434 pg_data_t *pgdat = (pg_data_t*)p;
2391 struct task_struct *tsk = current; 2435 struct task_struct *tsk = current;
2392 DEFINE_WAIT(wait); 2436
2393 struct reclaim_state reclaim_state = { 2437 struct reclaim_state reclaim_state = {
2394 .reclaimed_slab = 0, 2438 .reclaimed_slab = 0,
2395 }; 2439 };
@@ -2421,7 +2465,6 @@ static int kswapd(void *p)
2421 unsigned long new_order; 2465 unsigned long new_order;
2422 int ret; 2466 int ret;
2423 2467
2424 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2425 new_order = pgdat->kswapd_max_order; 2468 new_order = pgdat->kswapd_max_order;
2426 pgdat->kswapd_max_order = 0; 2469 pgdat->kswapd_max_order = 0;
2427 if (order < new_order) { 2470 if (order < new_order) {
@@ -2431,52 +2474,9 @@ static int kswapd(void *p)
2431 */ 2474 */
2432 order = new_order; 2475 order = new_order;
2433 } else { 2476 } else {
2434 if (!freezing(current) && !kthread_should_stop()) { 2477 kswapd_try_to_sleep(pgdat, order);
2435 long remaining = 0;
2436
2437 /* Try to sleep for a short interval */
2438 if (!sleeping_prematurely(pgdat, order, remaining)) {
2439 remaining = schedule_timeout(HZ/10);
2440 finish_wait(&pgdat->kswapd_wait, &wait);
2441 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2442 }
2443
2444 /*
2445 * After a short sleep, check if it was a
2446 * premature sleep. If not, then go fully
2447 * to sleep until explicitly woken up
2448 */
2449 if (!sleeping_prematurely(pgdat, order, remaining)) {
2450 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2451
2452 /*
2453 * vmstat counters are not perfectly
2454 * accurate and the estimated value
2455 * for counters such as NR_FREE_PAGES
2456 * can deviate from the true value by
2457 * nr_online_cpus * threshold. To
2458 * avoid the zone watermarks being
2459 * breached while under pressure, we
2460 * reduce the per-cpu vmstat threshold
2461 * while kswapd is awake and restore
2462 * them before going back to sleep.
2463 */
2464 set_pgdat_percpu_threshold(pgdat,
2465 calculate_normal_threshold);
2466 schedule();
2467 set_pgdat_percpu_threshold(pgdat,
2468 calculate_pressure_threshold);
2469 } else {
2470 if (remaining)
2471 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2472 else
2473 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2474 }
2475 }
2476
2477 order = pgdat->kswapd_max_order; 2478 order = pgdat->kswapd_max_order;
2478 } 2479 }
2479 finish_wait(&pgdat->kswapd_wait, &wait);
2480 2480
2481 ret = try_to_freeze(); 2481 ret = try_to_freeze();
2482 if (kthread_should_stop()) 2482 if (kthread_should_stop())