aboutsummaryrefslogtreecommitdiffstats
path: root/mm/backing-dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/backing-dev.c')
-rw-r--r--mm/backing-dev.c90
1 files changed, 68 insertions, 22 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index d3ca0dac1111..3d3accb1f800 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -26,6 +26,12 @@ struct backing_dev_info default_backing_dev_info = {
26EXPORT_SYMBOL_GPL(default_backing_dev_info); 26EXPORT_SYMBOL_GPL(default_backing_dev_info);
27 27
28static struct class *bdi_class; 28static struct class *bdi_class;
29
30/*
31 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
32 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
33 * locking.
34 */
29DEFINE_SPINLOCK(bdi_lock); 35DEFINE_SPINLOCK(bdi_lock);
30LIST_HEAD(bdi_list); 36LIST_HEAD(bdi_list);
31LIST_HEAD(bdi_pending_list); 37LIST_HEAD(bdi_pending_list);
@@ -284,9 +290,9 @@ static int bdi_start_fn(void *ptr)
284 /* 290 /*
285 * Add us to the active bdi_list 291 * Add us to the active bdi_list
286 */ 292 */
287 spin_lock(&bdi_lock); 293 spin_lock_bh(&bdi_lock);
288 list_add(&bdi->bdi_list, &bdi_list); 294 list_add_rcu(&bdi->bdi_list, &bdi_list);
289 spin_unlock(&bdi_lock); 295 spin_unlock_bh(&bdi_lock);
290 296
291 bdi_task_init(bdi, wb); 297 bdi_task_init(bdi, wb);
292 298
@@ -389,7 +395,7 @@ static int bdi_forker_task(void *ptr)
389 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) 395 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
390 wb_do_writeback(me, 0); 396 wb_do_writeback(me, 0);
391 397
392 spin_lock(&bdi_lock); 398 spin_lock_bh(&bdi_lock);
393 399
394 /* 400 /*
395 * Check if any existing bdi's have dirty data without 401 * Check if any existing bdi's have dirty data without
@@ -410,7 +416,7 @@ static int bdi_forker_task(void *ptr)
410 if (list_empty(&bdi_pending_list)) { 416 if (list_empty(&bdi_pending_list)) {
411 unsigned long wait; 417 unsigned long wait;
412 418
413 spin_unlock(&bdi_lock); 419 spin_unlock_bh(&bdi_lock);
414 wait = msecs_to_jiffies(dirty_writeback_interval * 10); 420 wait = msecs_to_jiffies(dirty_writeback_interval * 10);
415 schedule_timeout(wait); 421 schedule_timeout(wait);
416 try_to_freeze(); 422 try_to_freeze();
@@ -426,7 +432,7 @@ static int bdi_forker_task(void *ptr)
426 bdi = list_entry(bdi_pending_list.next, struct backing_dev_info, 432 bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
427 bdi_list); 433 bdi_list);
428 list_del_init(&bdi->bdi_list); 434 list_del_init(&bdi->bdi_list);
429 spin_unlock(&bdi_lock); 435 spin_unlock_bh(&bdi_lock);
430 436
431 wb = &bdi->wb; 437 wb = &bdi->wb;
432 wb->task = kthread_run(bdi_start_fn, wb, "flush-%s", 438 wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
@@ -445,9 +451,9 @@ static int bdi_forker_task(void *ptr)
445 * a chance to flush other bdi's to free 451 * a chance to flush other bdi's to free
446 * memory. 452 * memory.
447 */ 453 */
448 spin_lock(&bdi_lock); 454 spin_lock_bh(&bdi_lock);
449 list_add_tail(&bdi->bdi_list, &bdi_pending_list); 455 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
450 spin_unlock(&bdi_lock); 456 spin_unlock_bh(&bdi_lock);
451 457
452 bdi_flush_io(bdi); 458 bdi_flush_io(bdi);
453 } 459 }
@@ -456,6 +462,24 @@ static int bdi_forker_task(void *ptr)
456 return 0; 462 return 0;
457} 463}
458 464
465static void bdi_add_to_pending(struct rcu_head *head)
466{
467 struct backing_dev_info *bdi;
468
469 bdi = container_of(head, struct backing_dev_info, rcu_head);
470 INIT_LIST_HEAD(&bdi->bdi_list);
471
472 spin_lock(&bdi_lock);
473 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
474 spin_unlock(&bdi_lock);
475
476 /*
477 * We are now on the pending list, wake up bdi_forker_task()
478 * to finish the job and add us back to the active bdi_list
479 */
480 wake_up_process(default_backing_dev_info.wb.task);
481}
482
459/* 483/*
460 * Add the default flusher task that gets created for any bdi 484 * Add the default flusher task that gets created for any bdi
461 * that has dirty data pending writeout 485 * that has dirty data pending writeout
@@ -478,16 +502,29 @@ void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
478 * waiting for previous additions to finish. 502 * waiting for previous additions to finish.
479 */ 503 */
480 if (!test_and_set_bit(BDI_pending, &bdi->state)) { 504 if (!test_and_set_bit(BDI_pending, &bdi->state)) {
481 list_move_tail(&bdi->bdi_list, &bdi_pending_list); 505 list_del_rcu(&bdi->bdi_list);
482 506
483 /* 507 /*
484 * We are now on the pending list, wake up bdi_forker_task() 508 * We must wait for the current RCU period to end before
485 * to finish the job and add us back to the active bdi_list 509 * moving to the pending list. So schedule that operation
510 * from an RCU callback.
486 */ 511 */
487 wake_up_process(default_backing_dev_info.wb.task); 512 call_rcu(&bdi->rcu_head, bdi_add_to_pending);
488 } 513 }
489} 514}
490 515
516/*
517 * Remove bdi from bdi_list, and ensure that it is no longer visible
518 */
519static void bdi_remove_from_list(struct backing_dev_info *bdi)
520{
521 spin_lock_bh(&bdi_lock);
522 list_del_rcu(&bdi->bdi_list);
523 spin_unlock_bh(&bdi_lock);
524
525 synchronize_rcu();
526}
527
491int bdi_register(struct backing_dev_info *bdi, struct device *parent, 528int bdi_register(struct backing_dev_info *bdi, struct device *parent,
492 const char *fmt, ...) 529 const char *fmt, ...)
493{ 530{
@@ -506,9 +543,9 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
506 goto exit; 543 goto exit;
507 } 544 }
508 545
509 spin_lock(&bdi_lock); 546 spin_lock_bh(&bdi_lock);
510 list_add_tail(&bdi->bdi_list, &bdi_list); 547 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
511 spin_unlock(&bdi_lock); 548 spin_unlock_bh(&bdi_lock);
512 549
513 bdi->dev = dev; 550 bdi->dev = dev;
514 551
@@ -526,9 +563,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
526 wb->task = NULL; 563 wb->task = NULL;
527 ret = -ENOMEM; 564 ret = -ENOMEM;
528 565
529 spin_lock(&bdi_lock); 566 bdi_remove_from_list(bdi);
530 list_del(&bdi->bdi_list);
531 spin_unlock(&bdi_lock);
532 goto exit; 567 goto exit;
533 } 568 }
534 } 569 }
@@ -565,9 +600,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
565 /* 600 /*
566 * Make sure nobody finds us on the bdi_list anymore 601 * Make sure nobody finds us on the bdi_list anymore
567 */ 602 */
568 spin_lock(&bdi_lock); 603 bdi_remove_from_list(bdi);
569 list_del(&bdi->bdi_list);
570 spin_unlock(&bdi_lock);
571 604
572 /* 605 /*
573 * Finally, kill the kernel threads. We don't need to be RCU 606 * Finally, kill the kernel threads. We don't need to be RCU
@@ -599,6 +632,7 @@ int bdi_init(struct backing_dev_info *bdi)
599 bdi->max_ratio = 100; 632 bdi->max_ratio = 100;
600 bdi->max_prop_frac = PROP_FRAC_BASE; 633 bdi->max_prop_frac = PROP_FRAC_BASE;
601 spin_lock_init(&bdi->wb_lock); 634 spin_lock_init(&bdi->wb_lock);
635 INIT_RCU_HEAD(&bdi->rcu_head);
602 INIT_LIST_HEAD(&bdi->bdi_list); 636 INIT_LIST_HEAD(&bdi->bdi_list);
603 INIT_LIST_HEAD(&bdi->wb_list); 637 INIT_LIST_HEAD(&bdi->wb_list);
604 INIT_LIST_HEAD(&bdi->work_list); 638 INIT_LIST_HEAD(&bdi->work_list);
@@ -634,7 +668,19 @@ void bdi_destroy(struct backing_dev_info *bdi)
634{ 668{
635 int i; 669 int i;
636 670
637 WARN_ON(bdi_has_dirty_io(bdi)); 671 /*
672 * Splice our entries to the default_backing_dev_info, if this
673 * bdi disappears
674 */
675 if (bdi_has_dirty_io(bdi)) {
676 struct bdi_writeback *dst = &default_backing_dev_info.wb;
677
678 spin_lock(&inode_lock);
679 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
680 list_splice(&bdi->wb.b_io, &dst->b_io);
681 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
682 spin_unlock(&inode_lock);
683 }
638 684
639 bdi_unregister(bdi); 685 bdi_unregister(bdi);
640 686