aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c69
1 files changed, 58 insertions, 11 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index e104e32c2ee8..9c1c199f88ce 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -316,6 +316,18 @@ static void sync_supers_timer_fn(unsigned long unused)
316 bdi_arm_supers_timer(); 316 bdi_arm_supers_timer();
317} 317}
318 318
319/*
320 * Calculate the longest interval (jiffies) bdi threads are allowed to be
321 * inactive.
322 */
323static unsigned long bdi_longest_inactive(void)
324{
325 unsigned long interval;
326
327 interval = msecs_to_jiffies(dirty_writeback_interval * 10);
328 return max(5UL * 60 * HZ, interval);
329}
330
319static int bdi_forker_thread(void *ptr) 331static int bdi_forker_thread(void *ptr)
320{ 332{
321 struct bdi_writeback *me = ptr; 333 struct bdi_writeback *me = ptr;
@@ -329,11 +341,12 @@ static int bdi_forker_thread(void *ptr)
329 set_user_nice(current, 0); 341 set_user_nice(current, 0);
330 342
331 for (;;) { 343 for (;;) {
332 struct task_struct *task; 344 struct task_struct *task = NULL;
333 struct backing_dev_info *bdi; 345 struct backing_dev_info *bdi;
334 enum { 346 enum {
335 NO_ACTION, /* Nothing to do */ 347 NO_ACTION, /* Nothing to do */
336 FORK_THREAD, /* Fork bdi thread */ 348 FORK_THREAD, /* Fork bdi thread */
349 KILL_THREAD, /* Kill inactive bdi thread */
337 } action = NO_ACTION; 350 } action = NO_ACTION;
338 351
339 /* 352 /*
@@ -346,10 +359,6 @@ static int bdi_forker_thread(void *ptr)
346 spin_lock_bh(&bdi_lock); 359 spin_lock_bh(&bdi_lock);
347 set_current_state(TASK_INTERRUPTIBLE); 360 set_current_state(TASK_INTERRUPTIBLE);
348 361
349 /*
350 * Check if any existing bdi's have dirty data without
351 * a thread registered. If so, set that up.
352 */
353 list_for_each_entry(bdi, &bdi_list, bdi_list) { 362 list_for_each_entry(bdi, &bdi_list, bdi_list) {
354 bool have_dirty_io; 363 bool have_dirty_io;
355 364
@@ -376,6 +385,25 @@ static int bdi_forker_thread(void *ptr)
376 action = FORK_THREAD; 385 action = FORK_THREAD;
377 break; 386 break;
378 } 387 }
388
389 spin_lock(&bdi->wb_lock);
390 /*
391 * If there is no work to do and the bdi thread was
392 * inactive long enough - kill it. The wb_lock is taken
393 * to make sure no-one adds more work to this bdi and
394 * wakes the bdi thread up.
395 */
396 if (bdi->wb.task && !have_dirty_io &&
397 time_after(jiffies, bdi->wb.last_active +
398 bdi_longest_inactive())) {
399 task = bdi->wb.task;
400 bdi->wb.task = NULL;
401 spin_unlock(&bdi->wb_lock);
402 set_bit(BDI_pending, &bdi->state);
403 action = KILL_THREAD;
404 break;
405 }
406 spin_unlock(&bdi->wb_lock);
379 } 407 }
380 spin_unlock_bh(&bdi_lock); 408 spin_unlock_bh(&bdi_lock);
381 409
@@ -394,8 +422,20 @@ static int bdi_forker_thread(void *ptr)
394 * the bdi from the thread. 422 * the bdi from the thread.
395 */ 423 */
396 bdi_flush_io(bdi); 424 bdi_flush_io(bdi);
397 } else 425 } else {
426 /*
427 * The spinlock makes sure we do not lose
428 * wake-ups when racing with 'bdi_queue_work()'.
429 */
430 spin_lock(&bdi->wb_lock);
398 bdi->wb.task = task; 431 bdi->wb.task = task;
432 spin_unlock(&bdi->wb_lock);
433 }
434 break;
435
436 case KILL_THREAD:
437 __set_current_state(TASK_RUNNING);
438 kthread_stop(task);
399 break; 439 break;
400 440
401 case NO_ACTION: 441 case NO_ACTION:
@@ -407,6 +447,13 @@ static int bdi_forker_thread(void *ptr)
407 /* Back to the main loop */ 447 /* Back to the main loop */
408 continue; 448 continue;
409 } 449 }
450
451 /*
452 * Clear pending bit and wakeup anybody waiting to tear us down.
453 */
454 clear_bit(BDI_pending, &bdi->state);
455 smp_mb__after_clear_bit();
456 wake_up_bit(&bdi->state, BDI_pending);
410 } 457 }
411 458
412 return 0; 459 return 0;
@@ -490,15 +537,15 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
490 return; 537 return;
491 538
492 /* 539 /*
493 * If setup is pending, wait for that to complete first 540 * Make sure nobody finds us on the bdi_list anymore
494 */ 541 */
495 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait, 542 bdi_remove_from_list(bdi);
496 TASK_UNINTERRUPTIBLE);
497 543
498 /* 544 /*
499 * Make sure nobody finds us on the bdi_list anymore 545 * If setup is pending, wait for that to complete first
500 */ 546 */
501 bdi_remove_from_list(bdi); 547 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
548 TASK_UNINTERRUPTIBLE);
502 549
503 /* 550 /*
504 * Finally, kill the kernel thread. We don't need to be RCU 551 * Finally, kill the kernel thread. We don't need to be RCU